blob: df45fcc32b9bacc22292a359fde3922d86a790e1 [file] [log] [blame]
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
# kafka global settings
#
[kafka-global]
# Initial list of brokers as a CSV list of broker host or host:port.
# Type: string
metadata.broker.list = localhost:9092
# Client identifier.
# Type: string
# Default: rdkafka
client.id = fastcapa
# Compression codec to use for compressing message sets. This is the default value
# for all topics, may be overriden by the topic configuration property compression.codec.
# Type: enum value { none, gzip, snappy, lz4 }
# Default: none
compression.codec = snappy
# Maximum number of messages batched in one MessageSet. The total MessageSet size is
# also limited by message.max.bytes. Increase for better compression.
# Type: integer
# Default: 10000
batch.num.messages = 500000
# Maximum number of messages allowed on the producer queue.
# Type: integer
# Default: 100000
#queue.buffering.max.messages = 5000000
# Maximum total message size sum allowed on the producer queue.
# Type: integer
#queue.buffering.max.kbytes = 2097151
# Maximum time, in milliseconds, for buffering data on the producer queue.
# Type: integer
# Default: 1000
#queue.buffering.max.ms = 5000
# Maximum size for message to be copied to buffer. Messages larger than this will be
# passed by reference (zero-copy) at the expense of larger iovecs.
# Type: integer
# Default: 65535
#message.copy.max.bytes = 65535
# Maximum transmit message size.
# Type: integer
# Default: 1000000
#message.max.bytes = 1000000000
# How many times to retry sending a failing MessageSet. Note: retrying may cause reordering.
# Type: integer
# Default: 2
#message.send.max.retries = 5
# The backoff time in milliseconds before retrying a message send.
# Type: integer
# Default: 100
#retry.backoff.ms = 500
# how often statistics are emitted; 0 = never
# Statistics emit interval. The application also needs to register a stats callback
# using rd_kafka_conf_set_stats_cb(). The granularity is 1000ms. A value of 0 disables statistics.
# Type: integer
# Default: 0
#statistics.interval.ms = 5000
# Protocol used to communicate with brokers.
# Type: enum value { plaintext, ssl, sasl_plaintext, sasl_ssl }
# Default: plaintext
#security.protocol = plaintext
# Timeout for network requests.
# Type: integer
# Default: 60000
socket.timeout.ms = 6000
# Only provide delivery reports for failed messages.
# Type: boolean
# Default: false
#delivery.report.only.error = false
#
# kafka topic settings
#
[kafka-topic]
# This field indicates how many acknowledgements the leader broker must receive from ISR brokers
# before responding to the request:
# 0=Broker does not send any response/ack to client,
# 1=Only the leader broker will need to ack the message,
# -1 or all=broker will block until message is committed by all in sync replicas (ISRs) or broker's in.sync.replicas setting before sending response.
# Type: integer
#request.required.acks = 1
# Local message timeout. This value is only enforced locally and limits the time a produced message
# waits for successful delivery. A time of 0 is infinite.
# Type: integer
# Default: 300000
#message.timeout.ms = 300000
# Report offset of produced message back to application. The application must be use the
# dr_msg_cb to retrieve the offset from rd_kafka_message_t.offset.
# Type: boolean
# Default: false
#produce.offset.report = false