| # Licensed to the Apache Software Foundation (ASF) under one |
| # or more contributor license agreements. See the NOTICE file |
| # distributed with this work for additional information |
| # regarding copyright ownership. The ASF licenses this file |
| # to you under the Apache License, Version 2.0 (the |
| # "License"); you may not use this file except in compliance |
| # with the License. You may obtain a copy of the License at |
| # |
| # http://www.apache.org/licenses/LICENSE-2.0 |
| # |
| # Unless required by applicable law or agreed to in writing, software |
| # distributed under the License is distributed on an "AS IS" BASIS, |
| # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| # See the License for the specific language governing permissions and |
| # limitations under the License. |
| |
| |
| ########### These all have default values as shown |
| ########### Additional configuration goes into storm.yaml |
| |
| java.library.path: "/usr/local/lib:/opt/local/lib:/usr/lib" |
| |
| ### storm.* configs are general configurations |
| # the local dir is where jars are kept |
| storm.local.dir: "storm-local" |
| storm.log4j2.conf.dir: "log4j2" |
| storm.zookeeper.servers: |
| - "localhost" |
| storm.zookeeper.port: 2181 |
| storm.zookeeper.root: "/storm" |
| storm.zookeeper.session.timeout: 20000 |
| storm.zookeeper.connection.timeout: 15000 |
| storm.zookeeper.retry.times: 5 |
| storm.zookeeper.retry.interval: 1000 |
| storm.zookeeper.retry.intervalceiling.millis: 30000 |
| storm.zookeeper.auth.user: null |
| storm.zookeeper.auth.password: null |
| storm.cluster.mode: "distributed" # can be distributed or local |
| storm.local.mode.zmq: false |
| storm.thrift.transport: "backtype.storm.security.auth.SimpleTransportPlugin" |
| storm.principal.tolocal: "backtype.storm.security.auth.DefaultPrincipalToLocal" |
| storm.group.mapping.service: "backtype.storm.security.auth.ShellBasedGroupsMapping" |
| storm.group.mapping.service.params: null |
| storm.messaging.transport: "backtype.storm.messaging.netty.Context" |
| storm.nimbus.retry.times: 5 |
| storm.nimbus.retry.interval.millis: 2000 |
| storm.nimbus.retry.intervalceiling.millis: 60000 |
| storm.auth.simple-white-list.users: [] |
| storm.auth.simple-acl.users: [] |
| storm.auth.simple-acl.users.commands: [] |
| storm.auth.simple-acl.admins: [] |
| storm.cluster.state.store: "backtype.storm.cluster_state.zookeeper_state_factory" |
| storm.meta.serialization.delegate: "backtype.storm.serialization.GzipThriftSerializationDelegate" |
| storm.codedistributor.class: "backtype.storm.codedistributor.LocalFileSystemCodeDistributor" |
| storm.health.check.dir: "healthchecks" |
| storm.health.check.timeout.ms: 5000 |
| |
| ### nimbus.* configs are for the master |
| nimbus.seeds : ["localhost"] |
| nimbus.thrift.port: 6627 |
| nimbus.thrift.threads: 64 |
| nimbus.thrift.max_buffer_size: 1048576 |
| nimbus.childopts: "-Xmx1024m" |
| nimbus.task.timeout.secs: 30 |
| nimbus.supervisor.timeout.secs: 60 |
| nimbus.monitor.freq.secs: 10 |
| nimbus.cleanup.inbox.freq.secs: 600 |
| nimbus.inbox.jar.expiration.secs: 3600 |
| nimbus.code.sync.freq.secs: 300 |
| nimbus.task.launch.secs: 120 |
| nimbus.file.copy.expiration.secs: 600 |
| nimbus.topology.validator: "backtype.storm.nimbus.DefaultTopologyValidator" |
| topology.min.replication.count: 1 |
| topology.max.replication.wait.time.sec: 60 |
| nimbus.credential.renewers.freq.secs: 600 |
| nimbus.impersonation.authorizer: "backtype.storm.security.auth.authorizer.ImpersonationAuthorizer" |
| |
| scheduler.display.resource: false |
| |
| ### ui.* configs are for the master |
| ui.host: 0.0.0.0 |
| ui.port: 8080 |
| ui.childopts: "-Xmx768m" |
| ui.actions.enabled: true |
| ui.filter: null |
| ui.filter.params: null |
| ui.users: null |
| ui.header.buffer.bytes: 4096 |
| ui.http.creds.plugin: backtype.storm.security.auth.DefaultHttpCredentialsPlugin |
| |
| logviewer.port: 8000 |
| logviewer.childopts: "-Xmx128m" |
| logviewer.cleanup.age.mins: 10080 |
| logviewer.appender.name: "A1" |
| logviewer.max.sum.worker.logs.size.mb: 4096 |
| logviewer.max.per.worker.logs.size.mb: 2048 |
| |
| logs.users: null |
| |
| drpc.port: 3772 |
| drpc.worker.threads: 64 |
| drpc.max_buffer_size: 1048576 |
| drpc.queue.size: 128 |
| drpc.invocations.port: 3773 |
| drpc.invocations.threads: 64 |
| drpc.request.timeout.secs: 600 |
| drpc.childopts: "-Xmx768m" |
| drpc.http.port: 3774 |
| drpc.https.port: -1 |
| drpc.https.keystore.password: "" |
| drpc.https.keystore.type: "JKS" |
| drpc.http.creds.plugin: backtype.storm.security.auth.DefaultHttpCredentialsPlugin |
| drpc.authorizer.acl.filename: "drpc-auth-acl.yaml" |
| drpc.authorizer.acl.strict: false |
| |
| transactional.zookeeper.root: "/transactional" |
| transactional.zookeeper.servers: null |
| transactional.zookeeper.port: null |
| |
| ### supervisor.* configs are for node supervisors |
| # Define the amount of workers that can be run on this machine. Each worker is assigned a port to use for communication |
| supervisor.slots.ports: |
| - 6700 |
| - 6701 |
| - 6702 |
| - 6703 |
| supervisor.childopts: "-Xmx256m" |
| supervisor.run.worker.as.user: false |
| #how long supervisor will wait to ensure that a worker process is started |
| supervisor.worker.start.timeout.secs: 120 |
| #how long between heartbeats until supervisor considers that worker dead and tries to restart it |
| supervisor.worker.timeout.secs: 30 |
| #how many seconds to sleep for before shutting down threads on worker |
| supervisor.worker.shutdown.sleep.secs: 1 |
| #how frequently the supervisor checks on the status of the processes it's monitoring and restarts if necessary |
| supervisor.monitor.frequency.secs: 3 |
| #how frequently the supervisor heartbeats to the cluster state (for nimbus) |
| supervisor.heartbeat.frequency.secs: 5 |
| supervisor.enable: true |
| supervisor.supervisors: [] |
| supervisor.supervisors.commands: [] |
| supervisor.memory.capacity.mb: 3072.0 |
| #By convention 1 cpu core should be about 100, but this can be adjusted if needed |
| # using 100 makes it simple to set the desired value to the capacity measurement |
| # for single threaded bolts |
| supervisor.cpu.capacity: 400.0 |
| |
| ### worker.* configs are for task workers |
| worker.heap.memory.mb: 768 |
| worker.childopts: "-Xmx%HEAP-MEM%m -XX:+PrintGCDetails -Xloggc:artifacts/gc.log -XX:+PrintGCDateStamps -XX:+PrintGCTimeStamps -XX:+UseGCLogFileRotation -XX:NumberOfGCLogFiles=10 -XX:GCLogFileSize=1M -XX:+HeapDumpOnOutOfMemoryError -XX:HeapDumpPath=artifacts/heapdump" |
| worker.gc.childopts: "" |
| worker.profiler.childopts: "-XX:+UnlockCommercialFeatures -XX:+FlightRecorder" |
| worker.profiler.enabled: true |
| worker.profiler.command: "flight.bash" |
| worker.heartbeat.frequency.secs: 1 |
| |
| # check whether dynamic log levels can be reset from DEBUG to INFO in workers |
| worker.log.level.reset.poll.secs: 30 |
| |
| # control how many worker receiver threads we need per worker |
| topology.worker.receiver.thread.count: 1 |
| |
| task.heartbeat.frequency.secs: 3 |
| task.refresh.poll.secs: 10 |
| task.credentials.poll.secs: 30 |
| |
| # now should be null by default |
| topology.backpressure.enable: true |
| backpressure.disruptor.high.watermark: 0.9 |
| backpressure.disruptor.low.watermark: 0.4 |
| |
| zmq.threads: 1 |
| zmq.linger.millis: 5000 |
| zmq.hwm: 0 |
| |
| |
| storm.messaging.netty.server_worker_threads: 1 |
| storm.messaging.netty.client_worker_threads: 1 |
| storm.messaging.netty.buffer_size: 5242880 #5MB buffer |
| # Since nimbus.task.launch.secs and supervisor.worker.start.timeout.secs are 120, other workers should also wait at least that long before giving up on connecting to the other worker. The reconnection period need also be bigger than storm.zookeeper.session.timeout(default is 20s), so that we can abort the reconnection when the target worker is dead. |
| storm.messaging.netty.max_retries: 300 |
| storm.messaging.netty.max_wait_ms: 1000 |
| storm.messaging.netty.min_wait_ms: 100 |
| |
| # If the Netty messaging layer is busy(netty internal buffer not writable), the Netty client will try to batch message as more as possible up to the size of storm.messaging.netty.transfer.batch.size bytes, otherwise it will try to flush message as soon as possible to reduce latency. |
| storm.messaging.netty.transfer.batch.size: 262144 |
| # Sets the backlog value to specify when the channel binds to a local address |
| storm.messaging.netty.socket.backlog: 500 |
| |
| # By default, the Netty SASL authentication is set to false. Users can override and set it true for a specific topology. |
| storm.messaging.netty.authentication: false |
| |
| # Default plugin to use for automatic network topology discovery |
| storm.network.topography.plugin: backtype.storm.networktopography.DefaultRackDNSToSwitchMapping |
| |
| # default number of seconds group mapping service will cache user group |
| storm.group.mapping.service.cache.duration.secs: 120 |
| |
| ### topology.* configs are for specific executing storms |
| topology.enable.message.timeouts: true |
| topology.debug: false |
| topology.workers: 1 |
| topology.acker.executors: null |
| topology.eventlogger.executors: null |
| topology.tasks: null |
| # maximum amount of time a message has to complete before it's considered failed |
| topology.message.timeout.secs: 30 |
| topology.multilang.serializer: "backtype.storm.multilang.JsonSerializer" |
| topology.skip.missing.kryo.registrations: false |
| topology.max.task.parallelism: null |
| topology.max.spout.pending: null |
| topology.state.synchronization.timeout.secs: 60 |
| topology.stats.sample.rate: 0.05 |
| topology.builtin.metrics.bucket.size.secs: 60 |
| topology.fall.back.on.java.serialization: true |
| topology.worker.childopts: null |
| topology.worker.logwriter.childopts: "-Xmx64m" |
| topology.executor.receive.buffer.size: 1024 #batched |
| topology.executor.send.buffer.size: 1024 #individual messages |
| topology.transfer.buffer.size: 1024 # batched |
| topology.tick.tuple.freq.secs: null |
| topology.worker.shared.thread.pool.size: 4 |
| topology.spout.wait.strategy: "backtype.storm.spout.SleepSpoutWaitStrategy" |
| topology.sleep.spout.wait.strategy.time.ms: 1 |
| topology.error.throttle.interval.secs: 10 |
| topology.max.error.report.per.interval: 5 |
| topology.kryo.factory: "backtype.storm.serialization.DefaultKryoFactory" |
| topology.tuple.serializer: "backtype.storm.serialization.types.ListDelegateSerializer" |
| topology.trident.batch.emit.interval.millis: 500 |
| topology.testing.always.try.serialize: false |
| topology.classpath: null |
| topology.environment: null |
| topology.bolts.outgoing.overflow.buffer.enable: false |
| topology.disruptor.wait.timeout.millis: 1000 |
| topology.disruptor.batch.size: 100 |
| topology.disruptor.batch.timeout.millis: 1 |
| topology.disable.loadaware: false |
| |
| # Configs for Resource Aware Scheduler |
| topology.component.resources.onheap.memory.mb: 128.0 |
| topology.component.resources.offheap.memory.mb: 0.0 |
| topology.component.cpu.pcore.percent: 10.0 |
| topology.worker.max.heap.size.mb: 768.0 |
| |
| dev.zookeeper.path: "/tmp/dev-storm-zookeeper" |
| |
| pacemaker.host: "localhost" |
| pacemaker.port: 6699 |
| pacemaker.base.threads: 10 |
| pacemaker.max.threads: 50 |
| pacemaker.thread.timeout: 10 |
| pacemaker.childopts: "-Xmx1024m" |
| pacemaker.auth.method: "NONE" |
| pacemaker.kerberos.users: [] |