| # Licensed to the Apache Software Foundation (ASF) under one or more |
| # contributor license agreements. See the NOTICE file distributed with |
| # this work for additional information regarding copyright ownership. |
| # The ASF licenses this file to You under the Apache License, Version 2.0 |
| # (the "License"); you may not use this file except in compliance with |
| # the License. You may obtain a copy of the License at |
| # |
| # http://www.apache.org/licenses/LICENSE-2.0 |
| # |
| # Unless required by applicable law or agreed to in writing, software |
| # distributed under the License is distributed on an "AS IS" BASIS, |
| # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| # See the License for the specific language governing permissions and |
| # limitations under the License. |
| |
| # See org.apache.kafka.clients.consumer.ConsumerConfig for more details. |
| # Consider using environment variables or external configuration management |
| # for sensitive information like passwords and environment-specific settings. |
| |
| ##################### Consumer Basics ####################### |
| |
| # List of Kafka brokers used for initial cluster discovery and metadata retrieval. |
| # Format: host1:port1,host2:port2,host3:port3 |
| # Include all brokers for high availability |
| bootstrap.servers=localhost:9092 |
| |
| # Client identifier for logging and metrics. |
| # Helps with debugging and monitoring. |
| client.id=test-consumer |
| |
| ##################### Transaction Support ##################### |
| |
| # Isolation level for reading messages. |
| # Options: read_uncommitted (default), read_committed (for exactly-once semantics). |
| isolation.level=read_uncommitted |
| |
| ##################### Consumer Group Configuration ##################### |
| |
| # Unique identifier for this consumer group. |
| # All consumers with the same group.id will share partition consumption. |
| group.id=test-consumer-group |
| |
| # What to do when there is no initial offset or if the current offset no longer exists. |
| # Options: earliest (from beginning), latest (from end), none (throw exception). |
| # Use 'earliest' to avoid data loss on first run. |
| auto.offset.reset=earliest |
| |
| ##################### Partition Assignment Strategy ##################### |
| |
| # Strategy for assigning partitions to consumers in a group. |
| # Options: RangeAssignor, RoundRobinAssignor, StickyAssignor, CooperativeStickyAssignor. |
| # CooperativeStickyAssignor is recommended (requires Kafka 2.4+). |
| partition.assignment.strategy=org.apache.kafka.clients.consumer.CooperativeStickyAssignor |
| |
| ##################### Deserialization ##################### |
| |
| # Deserializer class for message keys. |
| # Common options: StringDeserializer, ByteArrayDeserializer, AvroDeserializer. |
| key.deserializer=org.apache.kafka.common.serialization.StringDeserializer |
| |
| # Deserializer class for message values. |
| value.deserializer=org.apache.kafka.common.serialization.StringDeserializer |
| |
| ##################### Offset Management ##################### |
| |
| # Whether to automatically commit offsets in the background. |
| # Set to false for manual offset management and exactly-once processing. |
| enable.auto.commit=true |
| |
| # Frequency (in milliseconds) at which offsets are auto-committed. |
| # Lower values provide better fault tolerance but increase broker load. |
| auto.commit.interval.ms=5000 |
| |
| ##################### Classic Group Session Management ##################### |
| |
| # Timeout for detecting consumer failures when using group management. |
| # Must be between group.min.session.timeout.ms and group.max.session.timeout.ms (broker config). |
| session.timeout.ms=30000 |
| |
| # Expected time between heartbeats when using group management. |
| # Should be lower than session.timeout.ms (typically 1/3 of session timeout). |
| heartbeat.interval.ms=10000 |
| |
| # Maximum time between successive calls to poll(). |
| # If exceeded, consumer is considered failed and partition rebalancing occurs. |
| max.poll.interval.ms=300000 |
| |
| ##################### Retry And Error Handling ##################### |
| |
| # Initial and max time to wait for failed request retries. |
| # The retry.backoff.ms is the initial backoff value and will increase exponentially |
| # for each failed request, up to the retry.backoff.max.ms value. |
| retry.backoff.ms=100 |
| retry.backoff.max.ms=1000 |
| |
| # Total time to wait for a response to a request. |
| request.timeout.ms=40000 |
| |
| # Close idle connections after this many milliseconds. |
| connections.max.idle.ms=540000 |
| |
| ##################### Security Configuration ##################### |
| |
| # Security protocol for communication with brokers. |
| # Options: PLAINTEXT, SSL, SASL_PLAINTEXT, SASL_SSL |
| #security.protocol=SASL_SSL |
| |
| # SSL configuration. |
| #ssl.truststore.location=/path/to/truststore.jks |
| #ssl.truststore.password=truststore-password |
| |
| # SASL configuration. |
| #sasl.mechanism=PLAIN |
| #sasl.jaas.config=org.apache.kafka.common.security.plain.PlainLoginModule required \ |
| # username="your-username" \ |
| # password="your-password"; |
| |
| ##################### Performance And Throughput ##################### |
| |
| # Minimum data size (bytes) and maximum polling timeout (ms). |
| # Whichever condition is met first will trigger the fetch operation. |
| # Balances response latency against message batching efficiency. |
| # For remote partition fetching, configure remote.fetch.max.wait.ms instead. |
| fetch.min.bytes=1 |
| fetch.max.wait.ms=500 |
| |
| # Set soft limits to the amount of bytes per fetch request and partition. |
| # Both max.partition.fetch.bytes and fetch.max.bytes limits can be exceeded when |
| # the first batch in the first non-empty partition is larger than the configured |
| # value to ensure that the consumer can make progress. |
| # Configuring message.max.bytes (broker config) or max.message.bytes (topic config) |
| # <= fetch.max.bytes prevents oversized fetch responses. |
| fetch.max.bytes=52428800 |
| max.partition.fetch.bytes=1048576 |
| |
| # Maximum number of records returned in a single poll() call. |
| # Higher values increase throughput but may cause longer processing delays. |
| max.poll.records=500 |