cluster.name | The name of the cluster the Log Feeder program runs in. | EMPTY | cl1 |
hadoop.security.credential.provider.path | The jceks file that provides passwords. | EMPTY | jceks://file/etc/ambari-logsearch-logfeeder/conf/logfeeder.jceks |
logfeeder.cache.dedup.interval | Maximum number of milliseconds between two identical messages to be filtered out. | 1000 | 500 |
logfeeder.cache.enabled | Enables the usage of a cache to avoid duplications. | false | true |
logfeeder.cache.key.field | The field which's value should be cached and should be checked for repetitions. | log_message | some_field_prone_to_repeating_value |
logfeeder.cache.last.dedup.enabled | Enable filtering directly repeating log entries irrelevant of the time spent between them. | false | true |
logfeeder.cache.size | The number of log entries to cache in order to avoid duplications. | 100 | 50 |
logfeeder.checkpoint.extension | The extension used for checkpoint files. | .cp | ckp |
logfeeder.checkpoint.folder | The folder where checkpoint files are stored. | EMPTY | /usr/lib/ambari-logsearch-logfeeder/conf/checkpoints |
logfeeder.cloud.rollover.archive.base.dir | Location where the active and archives logs will be stored. Beware, it could require a large amount of space, use mounted disks if it is possible. | /tmp | /var/lib/ambari-logsearch-logfeeder/data |
logfeeder.cloud.rollover.immediate.flush | Immediately flush temporal cloud logs (to active location). | true | false |
logfeeder.cloud.rollover.max.files | The number of max backup log files for rolled over logs. | 10 | 50 |
logfeeder.cloud.rollover.on.shutdown | Rollover temporal cloud log files on shutdown | true | false |
logfeeder.cloud.rollover.on.startup | Rollover temporal cloud log files on startup | true | false |
logfeeder.cloud.rollover.threshold.min | Rollover cloud log files after an interval (minutes) | 60 | 1 |
logfeeder.cloud.rollover.threshold.size | Rollover cloud log files after the log file size reach this limit | 80 | 1024 |
logfeeder.cloud.rollover.threshold.size.unit | Rollover cloud log file size unit (e.g: KB, MB etc.) | MB | KB |
logfeeder.cloud.rollover.use.gzip | Use GZip on archived logs. | true | false |
logfeeder.cloud.storage.base.path | Base path prefix for storing logs (cloud storage / hdfs), could be an absolute path or URI. (if URI used, that will override the default.FS with HDFS client) | /apps/logsearch | /user/logsearch/mypath s3a:///user/logsearch |
logfeeder.cloud.storage.bucket | Amazon S3 bucket. | logfeeder | logs |
logfeeder.cloud.storage.bucket.bootstrap | Create bucket on startup. | true | false |
logfeeder.cloud.storage.custom.fs | If it is not empty, override fs.defaultFS for HDFS client. Can be useful to write data to a different bucket (from other services) if the bucket address is read from core-site.xml | EMPTY | s3a://anotherbucket |
logfeeder.cloud.storage.destination | Type of storage that is the destination for cloud output logs. | none | hdfs s3 |
logfeeder.cloud.storage.mode | Option to support sending logs to cloud storage. You can choose between supporting only cloud storage, non-cloud storage or both | default | default cloud hybrid |
logfeeder.cloud.storage.upload.on.shutdown | Try to upload archived files on shutdown | false | true |
logfeeder.cloud.storage.uploader.interval.seconds | Second interval, that is used to check against there are any files to upload to cloud storage or not. | 60 | 10 |
logfeeder.cloud.storage.uploader.timeout.minutes | Timeout value for uploading task to cloud storage in minutes. | 60 | 10 |
logfeeder.cloud.storage.use.filters | Use filters for inputs (with filters the output format will be JSON) | false | true |
logfeeder.cloud.storage.use.hdfs.client | Use hdfs client with cloud connectors instead of the core clients for shipping data to cloud storage | false | true |
logfeeder.config.dir | The directory where shipper configuration files are looked for. | /usr/lib/ambari-logsearch-logfeeder/conf | /usr/lib/ambari-logsearch-logfeeder/conf |
logfeeder.config.files | Comma separated list of the config files containing global / output configurations. | EMPTY | global.json,output.json /usr/lib/ambari-logsearch-logfeeder/conf/global.config.json |
logfeeder.configs.filter.solr.enabled | Use solr as a log level filter storage | false | true |
logfeeder.configs.filter.solr.monitor.enabled | Monitor log level filters (in solr) periodically - used for checking updates. | true | false |
logfeeder.configs.filter.solr.monitor.interval | Time interval (in seconds) between monitoring input config filter definitions from Solr. | 30 | 60 |
logfeeder.configs.filter.zk.enabled | Use zk as a log level filter storage (works only with local config) | false | true |
logfeeder.configs.local.enabled | Monitor local input.config-*.json files (do not upload them to zookeeper or solr) | false | true |
logfeeder.docker.registry.enabled | Enable to monitor docker containers and store their metadata in an in-memory registry. | false | true |
logfeeder.hdfs.file.permissions | Default permissions for created files on HDFS | 640 | 600 |
logfeeder.hdfs.host | HDFS Name Node host. | EMPTY | mynamenodehost |
logfeeder.hdfs.kerberos | Enable kerberos support for HDFS | false | true |
logfeeder.hdfs.keytab | Kerberos keytab location for Log Feeder for communicating with secure HDFS. | /etc/security/keytabs/logfeeder.service.keytab | /etc/security/keytabs/mykeytab.keytab |
logfeeder.hdfs.port | HDFS Name Node port | EMPTY | 9000 |
logfeeder.hdfs.principal | Kerberos principal for Log Feeder for communicating with secure HDFS. | logfeeder/_HOST | mylogfeeder/myhost1@EXAMPLE.COM |
logfeeder.hdfs.user | Overrides HADOOP_USER_NAME variable at runtime | EMPTY | hdfs |
logfeeder.include.default.level | Comma separated list of the default log levels to be enabled by the filtering. | EMPTY | FATAL,ERROR,WARN |
logfeeder.log.filter.enable | Enables the filtering of the log entries by log level filters. | false | true |
logfeeder.metrics.collector.hosts | Comma separtaed list of metric collector hosts. | EMPTY | c6401.ambari.apache.org,c6402.ambari.apache.org |
logfeeder.metrics.collector.path | The path used by metric collectors. | EMPTY | /ws/v1/timeline/metrics |
logfeeder.metrics.collector.port | The port used by metric collectors. | EMPTY | 6188 |
logfeeder.metrics.collector.protocol | The protocol used by metric collectors. | EMPTY | http https |
logfeeder.s3.access.key | Amazon S3 secret access key. | EMPTY | MySecretAccessKey |
logfeeder.s3.access.key.file | Amazon S3 secret access key file (that contains only the key). | EMPTY | /my/path/access_key |
logfeeder.s3.credentials.file.enabled | Enable to get Amazon S3 secret/access keys from files. | EMPTY | true |
logfeeder.s3.credentials.hadoop.access.ref | Amazon S3 access key reference in Hadoop credential store.. | logfeeder.s3.access.key | logfeeder.s3.access.key |
logfeeder.s3.credentials.hadoop.enabled | Enable to get Amazon S3 secret/access keys from Hadoop credential store API. | EMPTY | true |
logfeeder.s3.credentials.hadoop.secret.ref | Amazon S3 secret access key reference in Hadoop credential store.. | logfeeder.s3.secret.key | logfeeder.s3.secret.key |
logfeeder.s3.endpoint | Amazon S3 endpoint. | https://s3.amazonaws.com | https://s3.amazonaws.com |
logfeeder.s3.multiobject.delete.enable | When enabled, multiple single-object delete requests are replaced by a single ‘delete multiple objects’-request, reducing the number of requests. | true | false |
logfeeder.s3.object.acl | Amazon S3 ACLs for new objects. | private | logs |
logfeeder.s3.path.style.access | Enable S3 path style access will disable the default virtual hosting behaviour (DNS). | false | true |
logfeeder.s3.region | Amazon S3 region. | EMPTY | us-east-2 |
logfeeder.s3.secret.key | Amazon S3 secret key. | EMPTY | MySecretKey |
logfeeder.s3.secret.key.file | Amazon S3 secret key file (that contains only the key). | EMPTY | /my/path/secret_key |
logfeeder.simulate.input_number | The number of the simulator instances to run with. O means no simulation. | 0 | 10 |
logfeeder.simulate.log_ids | The comma separated list of log ids for which to create the simulated log entries. | The log ids of the installed services in the cluster | ambari_server,zookeeper,infra_solr,logsearch_app |
logfeeder.simulate.log_level | The log level to create the simulated log entries with. | WARN | INFO |
logfeeder.simulate.max_log_words | The maximum number of words in a simulated log entry. | 5 | 8 |
logfeeder.simulate.min_log_words | The minimum number of words in a simulated log entry. | 5 | 3 |
logfeeder.simulate.number_of_words | The size of the set of words that may be used to create the simulated log entries with. | 1000 | 100 |
logfeeder.simulate.sleep_milliseconds | The milliseconds to sleep between creating two simulated log entries. | 10000 | 5000 |
logfeeder.solr.cloud.client.discover | On startup, with a Solr Cloud client, the Solr nodes will be discovered, then LBHttpClient will be built from that. | false | true |
logfeeder.solr.implicit.routing | Use implicit routing for Solr Collections. | false | true |
logfeeder.solr.jaas.file | The jaas file used for solr. | /etc/security/keytabs/logsearch_solr.service.keytab | /usr/lib/ambari-logsearch-logfeeder/conf/logfeeder_jaas.conf |
logfeeder.solr.kerberos.enable | Enables using kerberos for accessing solr. | false | true |
logfeeder.solr.metadata.collection | Metadata collection name that could contain log level filters or input configurations. | EMPTY | logsearch_metadata |
logfeeder.solr.urls | Comma separated solr urls (with protocol and port), override logfeeder.solr.zk_connect_string config | EMPTY | https://localhost1:8983/solr,https://localhost2:8983 |
logfeeder.solr.zk_connect_string | Zookeeper connection string for Solr. | EMPTY | localhost1:2181,localhost2:2181/mysolr_znode |
logfeeder.tmp.dir | The tmp dir used for creating temporary files. | java.io.tmpdir | /tmp/ |
logsearch.config.zk_acls | ZooKeeper ACLs for handling configs. (read & write) | world:anyone:cdrwa | world:anyone:r,sasl:solr:cdrwa,sasl:logsearch:cdrwa |
logsearch.config.zk_connect_string | ZooKeeper connection string. | EMPTY | localhost1:2181,localhost2:2181/znode |
logsearch.config.zk_connection_retry_time_out_ms | The maximum elapsed time for connecting to ZooKeeper in milliseconds. 0 means retrying forever. | EMPTY | 1200000 |
logsearch.config.zk_connection_time_out_ms | ZooKeeper connection timeout in milliseconds | EMPTY | 30000 |
logsearch.config.zk_root | ZooKeeper root node where the shippers are stored. (added to the connection string) | EMPTY | /logsearch |
logsearch.config.zk_session_time_out_ms | ZooKeeper session timeout in milliseconds | EMPTY | 60000 |