blob: bb2f17e5dff2da8c264aaf56d014995141a41282 [file] [log] [blame]
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
# Log level
logging.level.root=info
logging.level.org.apache.inlong.manager=debug
spring.datasource.druid.url=jdbc:mysql://127.0.0.1:3306/apache_inlong_manager?useSSL=false&allowPublicKeyRetrieval=true&characterEncoding=UTF-8&nullCatalogMeansCurrent=true&serverTimezone=GMT%2b8
spring.datasource.druid.username=root
spring.datasource.druid.password=inlong
# datasource config, set org.postgresql.Driver if using PostgreSQL
spring.datasource.druid.driver-class-name=com.mysql.cj.jdbc.Driver
spring.datasource.druid.validationQuery=SELECT 'x'
# Initialization size, minimum, maximum
spring.datasource.druid.initialSize=20
spring.datasource.druid.minIdle=20
spring.datasource.druid.maxActive=300
# Configure the timeout period to wait for the connection to be acquired
spring.datasource.druid.maxWait=600000
# Configure the minimum survival time of a connection in the pool, in milliseconds
spring.datasource.druid.minEvictableIdleTimeMillis=3600000
# Detect when applying for connection. It is recommended to configure it to true, which does not affect performance and ensures safety
spring.datasource.druid.testWhileIdle=true
# Perform detection when obtaining a connection, it is recommended to close it, which affects performance
spring.datasource.druid.testOnBorrow=false
# Perform detection when returning the connection, it is recommended to close it, which affects performance
spring.datasource.druid.testOnReturn=false
# Configure filters for monitoring statistics interception, stat:monitoring statistics, log4j:log, wall:defense against SQL injection
spring.datasource.druid.filters=stat,wall
# Open the mergeSql function through the connectProperties property, Slow SQL records
spring.datasource.druid.connectionProperties=druid.stat.mergeSql=true;druid.stat.slowSqlMillis=5000
# Exclude ElasticsearchRestClientAutoConfiguration
spring.autoconfigure.exclude=org.springframework.boot.autoconfigure.elasticsearch.ElasticsearchRestClientAutoConfiguration
# Audit configuration
# Audit query source that decide what data source to query, currently only supports [MYSQL|ELASTICSEARCH|CLICKHOUSE]
audit.query.source=MYSQL
# Elasticsearch config
# Elasticsearch host split by coma if more than one host, such as 'host1,host2'
es.index.search.hostname=127.0.0.1
# Elasticsearch port
es.index.search.port=9200
# Elasticsearch support authentication flag
es.auth.enable=false
# Elasticsearch user of authentication info
es.auth.user=admin
# Elasticsearch password of authentication info
es.auth.password=inlong
# ClickHouse config
# ClickHouse jdbcUrl
audit.ck.jdbcUrl=jdbc:clickhouse://127.0.0.1:8123/apache_inlong_audit
# ClickHouse username
audit.ck.username=default
# ClickHouse password
audit.ck.password=
# Database cleansing
# If turned on, logically deleted data will be collected and permanently deleted periodically
data.cleansing.enabled=false
# The interval (in seconds) between the end of one execution and the start of the next, default is 1800s (0.5 hour)
data.cleansing.interval.seconds=1800
# Select the data whose latest modify time is some days before, default is 10 days
data.cleansing.before.days=10
# The maximum size of data to be deleted in batch, default is 100
data.cleansing.batchSize=100
# Whether to use ZooKeeper to manage the Sort task config, default is false, which means not using ZooKeeper
sort.enable.zookeeper=false
# If turned on, synchronizing change the source status when the agent heartbeat times out
source.update.enabled=false
source.update.before.seconds=60
source.update.interval=60
# If turned on, tasks in the incorrect state are periodically deleted
source.cleansing.enabled=false
source.cleansing.interval=600