kafka of server.properties

# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements.  See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License.  You may obtain a copy of the License at
#
#    http://www.apache.org/licenses/LICENSE-2.0
#
# Entitled to a repayment required by applicable LAW or Agreed to in Writing, Software 
# Distributed an under at The License IS Distributed ON AN " AS IS " BASIS, 
# the WITHOUT WARRANTIES OR CONDITIONS OF the ANY the KIND, either Express or tIMPLIED. 
# See at The License for at The specific Language and the Permissions Governing 
# an under at The License Limitations. 

# See kafka.server.KafkaConfig for Additional the Details and Defaults 

############################# Basics ############################# Server 

# each broker in the cluster represents the only requirement is a positive number. When the IP address of the server is changed, broker.id not changed, the message does not affect the case of consumers 
Broker. ID = 106

# That is to say, this command does not actually delete the action, just mark the topic to be deleted only on the zookeeper, but also to remind the user must open delete.topic.enable switch in advance, otherwise the deletion will not be implemented of. 
delete.topic.enable = to true 

# Allow to automatically create topic, if false, you need to create topic command 
auto.create.topics.enable = false 

################## the socket Server Settings ############################# ########### 

# at The address at The socket Server Listens . It by Will GET ON at The value returned from 
# java.net.InetAddress.getCanonicalHostName () IF not the Configured. 
# the FORMAT: 
# in the Listeners = listener_name: // host_name: Port 
# the EXAMPLE: 
# in the Listeners = PLAINTEXT: // your.host. name: 9092

Address #Socket server is listening. If no, it will get () returned from Java.NET.InAddio.GETCANONICALITHAMEMENE value 
in the Listeners = PLAINTEXT: // node106.yinzhengjie.org.cn:9092 

#broker Server Service Port 
Port = 9092 

#broker host address, if is set, it will be bound to this address, if not, it will bind to all interfaces, and send one of them to ZK, generally do not set 
host.name = node106.yinzhengjie.org.cn 
# Hostname and at The Broker by Will advertise to Port Producers and Consumers. the If not the SET, 
# IT uses at The value for  " in the Listeners "  IF the Configured. the Otherwise, by Will use IT at The value 
# returned from java.net.InetAddress.getCanonicalHostName (). 

#kafka 0.9after .x version adds advertised.listeners configuration, Kafka used to live 0.9 .x later versions do not use advertised.host.name and advertised.host.port been deprecated. If configured to do so, it uses " in the Listeners " value. Otherwise, it uses the value () returned from java.net.InetAddress.getCanonicalHostName. 
advertised.listeners # = PLAINTEXT: // your.host.name:9092 


# The Listener (listener) name mapped to the security protocol, by default they are the same. For more information, see the configuration documentation. 
listener.security.protocol.map # = PLAINTEXT: PLAINTEXT, the SSL: the SSL, SASL_PLAINTEXT: SASL_PLAINTEXT, SASL_SSL: SASL_SSL 


# processing network requests the maximum number of threads 
num.network.threads = 30 

# disk processing the I / O number of threads 
num. io.threads = 30 

 
# socket server using transmit buffer (SOYSNDBUF)
socket.send.buffer.bytes = 5242880 

# socket server using the receive buffer (SOYRCVBUF) 
socket.receive.buffer.bytes = 5242880 

# socket server will accept the requested maximum size (protection of the OOM) 
Socket. request.max.bytes = 104857600 

#I / O thread waits for a maximum number of requests in the queue, exceeds this number, Network thread will no longer receive a new request. It should be a self-protection mechanism. 
queued.max.requests = 1000 

############################# the Log Basics ############ ################# 

# log storage directory, multiple directories separated by commas, if you have multiple disks, it is recommended to configure multiple directories, so as to achieve the I / efficiency of O improved. 
log.dirs = / Home / the Data / Kafka used to live / logs, / Home / the Data / Kafka used to live / logs2, / Home / Kafka used to live the Data / / logs3 

# is the number of partitions for each topic, if not specified, the topic will be created when the topic covering the specified parameters when creating 
num.partitions = 20

# Of recovery log on and off each data directory when the brush plate log when you start threads, default 1 
num.recovery.threads.per.data. Dir = 30 


# the default number of copies 
default.replication.factor = 2 

# server receiving the maximum size of the single message, i.e. the maximum size of the message body, in bytes 
message.max.bytes = 104857600 

# automatic load balancing, if set to true, the copy controller periodically automatically try, the broker for all each partition equilibrium leadership, replica assigned higher priority (Preferred) of leadership. 
Auto.leader.rebalance.enable # = false 


############################# the Flush the Log Policy ######## ##################### 

# before the number of messages in the log file fsync force the staging of a partition. The lower value would be more frequent sync data to disk, affecting performance. Usually recommend people use replication to ensure persistence, rather than relying on a single fsync, but this can bring more reliability, default 10000. 
log.flush.interval.messages # = 10000

# 2 times the maximum time interval between the fsync call, in units of ms. Even log.flush.interval.messages not reached, as long as the time to also need to call fsync. The default 3000ms. 
# Log.flush.interval.ms = 10000 

############################# the Log Retention Policy ##### ######################## 


# log retention time (hours | minutes), the default is 7 days (168 hours). More than this time will be processed according to policy data. bytes and minutes will trigger whichever is reached first. 
log.retention.hours = 168 

# log data stored in the maximum number of bytes. More than this time will be processed according to policy data. 
log.retention.bytes # = 1073741824 

size of segment # log control file is appended to exceed the size of a new log segment file ( - 1 indicates no limit) 
log.segment.bytes = 536870912 

# when the time reaches below, will forced to create a new segment 
# log.roll.hours = 24- * 7

Check the cycle # log file fragments to see if they reached a set of removal policies (log.retention.hours or log.retention.bytes) 
log.retention.check.interval.ms = 600000 

# compression is turned on 
# log.cleaner. enable = false 

# log clean-up choose: delete expired and compact mainly for data processing, or log file reaches the limit on the amount of specified parameters will be created when the topic covered 
# log.cleanup.policy = the Delete 

# log compression run the number of threads 
# log.cleaner.threads = 2 


longest time reserved # compressed log 
# log.cleaner.delete.retention.ms = 3600000 


################## Zookeeper ############################# ########### 

#zookeeper cluster address, which can be more than one, among a plurality of divided by commas. 
zookeeper.connect = node106.yinzhengjie.org.cn: 2181 , node107.yinzhengjie.org.cn:2181 , node108.yinzhengjie.org.cn: 2181 / kafka01 

#ZooKeeper maximum timeout is the heartbeat interval, if not reflect, it is considered dead, is not easy too large 
zookeeper.session.timeout.ms = 180000 

# specify how long consumers update offset to the zookeeper. Based on time rather than a message every time to get the attention of offset update. Once the abnormality and restart the update zookeeper, will likely have to get to get over the message, the connection time zk 
zookeeper.connection.timeout.ms = 6000 

# requested maximum size of bytes, the maximum number of bytes requested. It is also effective coverage for maximum record size. Note: server has its own coverage message record size, and the sizes of these different settings. This setting will limit the number of batches each producer transmits a request, in case the request massive. 
max.request.size = 104857600 

# fetch maximum number of bytes for each message of each fetch request. These bytes will be used for each partition of the memory supervision, and thus, this setting will control the memory size used by consumer. This is equal to the size fetch request and server must allow at least the maximum message size, otherwise, the message may be sent producer size greater than the size that can be consumed by the consumer. 
fetch.message.max.bytes =104857600 

#ZooKeeper cluster synchronization time between leader and follower, in other words: a ZK follower can behind leader long. 
#zookeeper. Sync . Time .ms = 2000 


############################# Replica Basics ######### #################### 

# Leader receives a follower " FETCH request " timeout, default is 10 seconds. 
Replica.lag #. Time .max.ms = 30000 

# If relicas far behind, will think this partition relicas has failed. Under normal circumstances, because the network delays, etc., always results in the replicas message synchronization lag. If the message is seriously lagging behind, leader considers this relicas larger network latency or limited message throughput. In a small number of broker, or insufficient network environments, proposed to increase this value for the maximum number of message .follower behind the leader. This parameter is global broker. Setting it too large, the real impact of "backward" to remove the follower; set too small, resulting in and out of the follower. Not given a proper value replica.lag.max.messages therefore not recommended to use the new version of Kafka is said to remove this parameter. 
# replica.lag.max.messages= 4000 

Socket timeout between # follower and Leader 
# replica.socket.timeout.ms = 30000 

# follower the maximum size of each fetch data 
replica.fetch.max.bytes = 104857600 

# follower of the fetch request retransmission timeout time 
replica .fetch. the wait .max.ms = 2000 

# FETCH minimum data size 
# replica.fetch.min.bytes = . 1 

# 0.11 . 0 default parameters unclean.leader.election.enable since version 0.01 from the original true change is false, you can turn off unclean leader election, which is not ISR (IN- Sync Replica) list replica, will not be promoted to the new leader partition. Persistence force kafka cluster is greater than availability, the ISR if no other replica, will lead to the partition can not read and write. 
unclean.leader.election.enable = false

Number fetcher thread # follower in open, synchronous speed and system load balancing 
num.replica.fetchers = 5 

when communicating between # partition leader and replicas, the socket timeout 
# controller.socket.timeout.ms = 30000 

# partition leader and when replicas data synchronization, the size of the message queue 
# controller.message.queue.size = 10 

# specifies which version will be used inter - Broker protocol. After all broker upgrade to the new version, which is usually affected. To set when upgrading 
# inter.broker.protocol.version = 0.10 . 1 

# designated broker will be used to add a message to the log file format version of the message. This value should be effective ApiVersion. Some examples are: 0.8 . 2 , 0.9 . 0.0 , 0.10 . 0. By providing a specific format version of the message, to ensure that all user messages existing on the disk equal to or less than the specified version. Incorrectly set this value will cause the user to use the old version of the error, because they will receive the message that they do not understand the format. 
log.message.format.version # = 0.10 . 1

 

Guess you like

Origin www.cnblogs.com/gaoyuechen/p/11274746.html