|
12 | 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 | 13 | # See the License for the specific language governing permissions and
|
14 | 14 | # limitations under the License.
|
15 |
| -# see org.apache.kafka.clients.producer.ProducerConfig for more details |
16 | 15 |
|
17 |
| -############################# Producer Basics ############################# |
| 16 | +# See org.apache.kafka.clients.producer.ProducerConfig for more details. |
| 17 | +# Consider using environment variables or external configuration management |
| 18 | +# for sensitive information like passwords and environment-specific settings. |
18 | 19 |
|
19 |
| -# list of brokers used for bootstrapping knowledge about the rest of the cluster |
20 |
| -# format: host1:port1,host2:port2 ... |
| 20 | +##################### Producer Basics ##################### |
| 21 | + |
| 22 | +# List of Kafka brokers used for initial cluster discovery and metadata retrieval. |
| 23 | +# Format: host1:port1,host2:port2,host3:port3 |
| 24 | +# Include all brokers for high availability. |
21 | 25 | bootstrap.servers=localhost:9092
|
22 | 26 |
|
23 |
| -# specify the compression codec for all data generated: none, gzip, snappy, lz4, zstd |
24 |
| -compression.type=none |
| 27 | +# Client identifier for logging and metrics. |
| 28 | +# Helps with debugging and monitoring. |
| 29 | +client.id=test-producer |
| 30 | + |
| 31 | +##################### Transaction Support ##################### |
| 32 | + |
| 33 | +# Transactional ID for the producer. |
| 34 | +# Must be unique across all producer instances. |
| 35 | +# Enables exactly-once semantics across multiple partitions/topics. |
| 36 | +#transactional.id=test-transactional-id |
| 37 | + |
| 38 | +# Maximum amount of time in milliseconds that a transaction will remain open. |
| 39 | +# Only applies when transactional.id is set. |
| 40 | +transaction.timeout.ms=60000 |
| 41 | + |
| 42 | +##################### Partitioning ##################### |
| 43 | + |
| 44 | +# Name of the partitioner class for partitioning records. |
| 45 | +# Default uses "sticky" partitioning which improves throughput by filling batches |
| 46 | +# Options: DefaultPartitioner, RoundRobinPartitioner, UniformStickyPartitioner. |
| 47 | +#partitioner.class=org.apache.kafka.clients.producer.RoundRobinPartitioner |
| 48 | + |
| 49 | +##################### Serialization ##################### |
| 50 | + |
| 51 | +# Serializer class for message keys. |
| 52 | +# Common options: StringSerializer, ByteArraySerializer, AvroSerializer. |
| 53 | +key.serializer=org.apache.kafka.common.serialization.StringSerializer |
| 54 | + |
| 55 | +# Serializer class for message values. |
| 56 | +value.serializer=org.apache.kafka.common.serialization.StringSerializer |
| 57 | + |
| 58 | +##################### Reliability And Durability ##################### |
| 59 | + |
| 60 | +# Number of acknowledgments the producer requires the leader to have received. |
| 61 | +# Options: 0 (no ack), 1 (leader only), all/-1 (all in-sync replicas). |
| 62 | +# Use 'all' for maximum durability. |
| 63 | +acks=all |
25 | 64 |
|
26 |
| -# name of the partitioner class for partitioning records; |
27 |
| -# The default uses "sticky" partitioning logic which spreads the load evenly between partitions, but improves throughput by attempting to fill the batches sent to each partition. |
28 |
| -#partitioner.class= |
| 65 | +# Number of retries for failed sends. |
| 66 | +# Set to high value or Integer.MAX_VALUE for maximum reliability. |
| 67 | +retries=2147483647 |
29 | 68 |
|
30 |
| -# the maximum amount of time the client will wait for the response of a request |
31 |
| -#request.timeout.ms= |
| 69 | +# Initial and max time to wait for failed request retries. |
| 70 | +# The retry.backoff.ms is the initial backoff value and will increase exponentially |
| 71 | +# for each failed request, up to the retry.backoff.max.ms value. |
| 72 | +retry.backoff.ms=100 |
| 73 | +retry.backoff.max.ms=1000 |
32 | 74 |
|
33 |
| -# how long `KafkaProducer.send` and `KafkaProducer.partitionsFor` will block for |
34 |
| -#max.block.ms= |
| 75 | +# Enable idempotent producer to prevent duplicate messages. |
| 76 | +# Ensures exactly-once delivery semantics when combined with proper consumer settings. |
| 77 | +enable.idempotence=true |
| 78 | + |
| 79 | +# Maximum number of unacknowledged requests the client will send on a single connection. |
| 80 | +# Must be <= 5 when enable.idempotence=true to maintain ordering guarantees. |
| 81 | +max.in.flight.requests.per.connection=5 |
| 82 | + |
| 83 | +##################### Timeouts And Blocking ##################### |
| 84 | + |
| 85 | +# Maximum amount of time the client will wait for the response of a request. |
| 86 | +# Should be higher than replica.lag.time.max.ms (broker config). |
| 87 | +request.timeout.ms=30000 |
| 88 | + |
| 89 | +# How long KafkaProducer.send() and KafkaProducer.partitionsFor() will block. |
| 90 | +# Should be higher than request.timeout.ms. |
| 91 | +max.block.ms=60000 |
| 92 | + |
| 93 | +# Timeout for broker requests, including produce requests. |
| 94 | +# Should be greater than or equal to the sum of request.timeout.ms and linger.ms. |
| 95 | +delivery.timeout.ms=120000 |
| 96 | + |
| 97 | +##################### Security Configuration ##################### |
| 98 | + |
| 99 | +# Security protocol for communication with brokers. |
| 100 | +# Options: PLAINTEXT, SSL, SASL_PLAINTEXT, SASL_SSL |
| 101 | +#security.protocol=SASL_SSL |
| 102 | + |
| 103 | +# SSL configuration. |
| 104 | +#ssl.truststore.location=/path/to/truststore.jks |
| 105 | +#ssl.truststore.password=truststore-password |
| 106 | + |
| 107 | +# SASL configuration. |
| 108 | +#sasl.mechanism=PLAIN |
| 109 | +#sasl.jaas.config=org.apache.kafka.common.security.plain.PlainLoginModule required \ |
| 110 | +# username="your-username" \ |
| 111 | +# password="your-password"; |
| 112 | + |
| 113 | +##################### Performance And Throughput ##################### |
| 114 | + |
| 115 | +# Compression codec for all data generated. |
| 116 | +# Options: none, gzip, snappy, lz4, zstd. |
| 117 | +# Can greatly improve throughput at the cost of increased CPU usage. |
| 118 | +compression.type=none |
35 | 119 |
|
36 |
| -# the producer will wait for up to the given delay to allow other records to be sent so that the sends can be batched together |
37 |
| -#linger.ms= |
| 120 | +# Producer will wait up to this delay to batch records together. |
| 121 | +# Higher values increase throughput but add latency. |
| 122 | +# Set to 0 for lowest latency, 5-100ms for balanced throughput/latency. |
| 123 | +linger.ms=5 |
38 | 124 |
|
39 |
| -# the maximum size of a request in bytes |
40 |
| -#max.request.size= |
| 125 | +# Default batch size in bytes when batching multiple records sent to a partition. |
| 126 | +# Larger batches improve throughput but use more memory. |
| 127 | +# 16KB is a good starting point, adjust based on message size and throughput needs. |
| 128 | +batch.size=16384 |
41 | 129 |
|
42 |
| -# the default batch size in bytes when batching multiple records sent to a partition |
43 |
| -#batch.size= |
| 130 | +# Total bytes of memory the producer can use to buffer records waiting to be sent. |
| 131 | +# Should be larger than batch.size * number of partitions you're writing to. |
| 132 | +# 32MB is reasonable for most use cases. |
| 133 | +buffer.memory=33554432 |
44 | 134 |
|
45 |
| -# the total bytes of memory the producer can use to buffer records waiting to be sent to the server |
46 |
| -#buffer.memory= |
| 135 | +# Maximum size of a request in bytes. |
| 136 | +# Should accommodate your largest batch size plus overhead. |
| 137 | +# 1MB is default and suitable for most cases. |
| 138 | +max.request.size=1048576 |
0 commit comments