Application Properties File

The application properties file is the main configuration for Analytics Backend. The following file provides some example parameter usages, descriptions and standard default settings.

Note: The application.properties file is located in the following directory: <Automic>/Automation.Platform/Analytics/backend

####################################

##Analytics Datastore connection##

####################################

#JDBC connection string pointing at the Analytics Datastore

#Note: user names and passwords are case-sensitive

 

datasource.analytics.url=jdbc:postgresql://localhost:5434/analytics

 

#Username used to connect to the Analytics Datastore

datasource.analytics.username=analytics

 

#Password for the Analytics Datastore user

datasource.analytics.password=--10061863562315215D747BFFD1E1AD49AB

 

###########################

## External data sources ##

###########################

#Ensure users provided for the JDBC connections have minimum rights on the target DB (ideally read-only). You can specify additional datasource properties like this:

 

#<datasource>.additional_properties[<property>]=<value>

 

#The following lines are an example JDBC strings for different types of databases. Replace <datasource> with the datasource that is being configured

 

#Note: Lower case names and passwords are required

 

#MSSQL:

#<datasource>.url=jdbc:sqlserver://192.168.0.1:1433;databaseName=automic

 

#MSSQL2012 with named instance:

#<datasource>.url=jdbc:sqlserver://192.168.0.1;instanceName=automic;databaseName=automic

 

#MSSQL2014 with named instance:

#<datasource>.url=jdbc:sqlserver://192.168.0.1:1433;instanceName=automic;databaseName=automic

#Make sure you enabled TCP/IP for the named instance in the MSSQL server configuration

#See https://msdn.microsoft.com/en-us/ms177440.aspx for details

 

#Oracle:

#<datasource>.url=jdbc:oracle:thin:@mydbcluster:1521/AUTOMIC

 

#DB2:

#<datasource>.url=jdbc:db2://host:50000/automic

 

## Automation Engine ##

#######################

#JDBC connection string pointing at the Automation Engine database

datasource.ae.url=jdbc:Sqlserver//;servername=localhost\\AUTOMIC;port=1433;databaseName=Automic_AE;

 

#Username used to connect to the Automation Engine database

datasource.ae.username=sledgehammer

 

#Password corresponding the Automation Engine database user

datasource.ae.password=--1080AFBF6E8C6D48DC7034FF8FD2418C61

 

#Specify an initial SQL file that will be executed before querying data

 

#datasource.ae.schema=file:ae_schema.sql

 

## Application Release Automation##

###################################

#Make sure to uncomment and set these settings when collecting data from a CDA instance.

 

#JDBC connection string pointing at the CDA database for a given client

#Appeared in Analytics v1.0

#datasource.ara[100].url=

 

#Username used to connect to the CDA database

#Appeared in Analytics v1.0

#datasource.ara[100].username=

 

#Password for the CDA database user

#Appeared in Analytics v1.0

#datasource.ara[100].password=

 

#Reporting tables suffix

#Used to identify the reporting tables for analytics if more than one CDA instance shares the same report DB.

#Appeared in Analytics v1.0

#datasource.ara[100].table_suffix=

 

#Specify an initial SQL file that will be executed before querying data

#Appeared in Analytics v1.0

#datasource.ara[100].schema=file:ara_schema.sql

 

################

## Collectors ##

################

#Enable/disable all data collectors globally, precedes collector specific switches collectors.enabled=true

 

#The following settings can be configured for collectors. Replace <collector> with the collector that is being configured. The values shown are the default values for those settings.

 

#Enable/disable the collection of source data

#<collector>.enabled=true

 

#Safety margin for interval calculation

#<collector>.safety_margin_seconds=60

 

#Normal sampling period for data collection interval

#<collector>.normal_sampling_period_minutes=5

 

#Large sampling period used when last interval is longer ago than normal sampling period

#<collector>.large_sampling_period_days=1

 

#Initial start for data collection in days before now

#<collector>.initial_collect_before_now_days=32
#Note: The collector name must be aligned with the data source name. Example: If your data source parameters are called: datasource.ae.*, the corresponding collector name has to be collector.ae.*. The correct parameter in this case is collector.ae.initial_collect_before_now_days=

 

#Maximum number of rows to be inserted at once within data collection

#<collector>.chunk_size_rows=10000

 

#Maximum number of rows to be enriched at once

#<collector>.enricher_chunk_size_rows=1000

 

##Automation Engine##

#####################

#Enable/disable the collection of source data

#collector.ae.enabled=true

 

#Application Release Automation#

################################

#Enable/disable the collection of source data

#collector.ara.enabled=true

 

########################

# Collector scheduling #

########################

#How many threads to use for the scheduler

#scheduler.pool_size=30

 

#Specify the timeout in seconds to wait for the collector thread pool task scheduler to shutdown

#scheduler.await_termination_seconds=10

 

#####################

# Events ingestion #

#####################

#Enable/disable event ingestion

collector.events.enabled=true

 

##########################

## Rule Engine settings ##

##########################

#Flink job manager - The JobManager coordinates every Flink deployment. It is responsible for both scheduling and resource management.

flink.host=localhost

 

#flink.port=6124

 

#flink.web_port=8081

 

#Use SSL for connecting to Flink

#flink.use_ssl=false

 

#Verify SSL certificate

#flink.disable_self_signed_certificates=true

 

#Job monitoring interval

#flink.monitoring_interval_seconds=60

 

#Heartbeat interval to check for abandoned jobs

#flink.monitoring_heartbeat_interval_minutes=3

 

#Zookeeper#

###########

#Specify Zookeeper hosts

zookeeper.bootstrap_servers=localhost:2181

 

#zookeeper.session_timeout_ms=30000

 

#zookeeper.connection_timeout_ms=10000

 

#zookeeper.is_secure_kafka_cluster=false

 

#########################

## Cache configuration ##

#########################

#Enable caching of charts

#service_cache.enabled=false

 

#Request cache expiration

#service_cache.expiration_minutes=15

 

#Specify how many requests should stay cached at maximum

#service_cache.size=100

 

#######################

## Log configuration ##

#######################

#Base directory where all Backend logs are stored

#Note: The absolute path is required. Using "\" will cause the Backend to crash, since it cannot write the logfiles.

 

#Example: Windows c:/temp/logs (not backslash)

#Example: Linux /temp/logs

logging.basedir=temp

 

#Application logging level

#Possible values: TRACE, DEBUG, INFO, WARN, ERROR, FATAL, OFF

#TRACE - This is highest level of trace information. Use this flag for a verbose trace.

#DEBUG - Provides basic debug information

#INFO - Provides standard log information

#WARN - Provides warnings of likely problems

#ERROR - Reports known errors

#FATAL - Reports fatal errors

#OFF - Turns off Logging

logging.level.=INFO

logging.level.org.apache.kafka=ERROR

 

#Log output file

#Writes to the specified log file. Names can be an exact location or relative to the current directory.

logging.file=${logging.basedir}/analytics_backend.log

logging.config=logging.xml

 

#Log configuration for embedded Tomcat

server.tomcat.accesslog.enabled=true

#Patterns for the logged message may include constant text or any of the following replacement strings, for which the corresponding information from the specified Response is substituted:

#%a - Remote IP address

#%h - Remote host name (or IP address if enableLookups for the connector is false)

#%l - Remote logical username from identd (always returns '-')

#%u - Remote user that was authenticated

#%t - Date and time, in Common Log Format format

#%r - First line of the request

#%s - HTTP status code of the response

#%b - Bytes sent, excluding HTTP headers, or '-' if no bytes were sent

#%D - Time taken to process the request, in millis

server.tomcat.accesslog.pattern=%a %h %l %u %t "%r" %s %b %D %{X-Forwarded-Port}i %{X-Forwarded-Host}i %{X-Forwarded-Ssl}i %{X-Forwarded-For}i %{X-Forwarded-Proto}i %{X-Forwarded-Server}i

server.tomcat.basedir=${logging.basedir}/tomcat-logs

See: https://tomcat.apache.org/tomcat-7.0-doc/api/org/apache/catalina/valves/AccessLogValve.html for further information regarding access logs.

 

##########################

## HTTP server settings ##

##########################

#Backend port

server.port=8090

 

#######################

## SSL Configuration ##

#######################

#By enabling the server.ssl.* settings the Backend will only accept HTTPS connections. An unsecure HTTP connection will be automatically disabled. Change the port to 8443 if required by setting

#server.port=8443

 

#Path to the key store that holds the SSL certificate (typically a .jks file)

#server.ssl.key-store=/path/to/keystore.jks

 

#Password used to access the key store

#server.ssl.key-store-password=

 

#Password used to access the key in the key store

#server.ssl.key-password=

 

##############################

## Internal Spring settings ##

##############################

#The following information if for internal use only

#Change only after consulting with a member of CA Automic Support

 

spring.main.banner-mode=off

spring.jmx.enabled=false

flyway.baseline-on-migrate=true

flyway.baseline-version=0

loader.path=lib,jdbc

flink.port=6124

flink.web_port=8081