Three-node kafka cluster configuration

Deploy zookeeper

1. Create a data directory

mkdir -p /data/zookeeper

2. Three servers create myid files and write 1 2 3 respectively

echo "1" > /data/zookeeper/myid
echo "2" > /data/zookeeper/myid
echo "3" > /data/zookeeper/myid

3. Configure hosts

vim /etc/hosts

192.168.44.180 node1
192.168.44.182 node2
192.168.44.184 node3

4. Configure the zookeeper configuration file

cp zoo_sample.cfg zoo.cfg

vim zoo.cfg

tickTime=2000
dataDir=/data/zookeeper/
clientPort=2181
initLimit=5
syncLimit=2
server.1=node1:2888:3888
server.2=node2:2888:3888
server.3=node3:2888:3888

5. Start zookeeper

 bin/zkServer.sh start

View logs kafka1 kafka2 kafka3 is the host name of the server

tail -200f logs/zookeeper-root-server-kafka1.out
tail -200f logs/zookeeper-root-server-kafka2.out
tail -200f logs/zookeeper-root-server-kafka3.out

6. View zookeeper status

View the first

#依次启动时,第一台会先提示失败,启动第二天成功后,再次查看第一天会提示成功
[root@kafka1 bin]# ./zkServer.sh status
/usr/bin/java
ZooKeeper JMX enabled by default
Using config: /root/apache-zookeeper-3.7.1-bin/bin/../conf/zoo.cfg
Client port found: 2181. Client address: localhost. Client SSL: false.
Error contacting service. It is probably not running.

#启动第二天成功后再次查看
[root@kafka1 bin]# ./zkServer.sh status
/usr/bin/java
ZooKeeper JMX enabled by default
Using config: /root/apache-zookeeper-3.7.1-bin/bin/../conf/zoo.cfg
Client port found: 2181. Client address: localhost. Client SSL: false.
Mode: follower

Check out the second

[root@kafka2 bin]# ./zkServer.sh status
/usr/bin/java
ZooKeeper JMX enabled by default
Using config: /root/apache-zookeeper-3.7.1-bin/bin/../conf/zoo.cfg
Client port found: 2181. Client address: localhost. Client SSL: false.
Mode: leader

Check out the third

[root@kafka3 bin]# ./zkServer.sh status
ZooKeeper JMX enabled by default
Using config: /root/apache-zookeeper-3.7.1-bin/bin/../conf/zoo.cfg
Client port found: 2181. Client address: localhost. Client SSL: false.
Mode: follower

department kafka

1. Create a data directory

mkdir -p /data/kafka

2. The hosts file has been configured when configuring zookeeper

3. Configure kafka configuration file

server 1

 #当前服务在集群的唯一标识
 broker.id=0

#当前服务的ip和端口
 listeners=PLAINTEXT://192.168.44.180:9092

#数据存储目录 
log.dirs=/data/kafka/kafka-logs

#zookeeper地址
zookeeper.connect=node1:2181,node2:2181,node3:2181

server 2

#当前服务在集群的唯一标识
broker.id=1

#当前服务的ip和端口
listeners=PLAINTEXT://192.168.44.182:9092

#数据存储目录 
log.dirs=/data/kafka/kafka-logs

#zookeeper地址
zookeeper.connect=node1:2181,node2:2181,node3:2181

server 3

 #当前服务在集群的唯一标识
 broker.id=2

#当前服务的ip和端口
 listeners=PLAINTEXT://192.168.44.184:9092

#数据存储目录 
log.dirs=/data/kafka/kafka-logs

#zookeeper地址
zookeeper.connect=node1:2181,node2:2181,node3:2181

4. Start kafka

bin/kafka-server-start.sh -daemon config/server.properties

View kafka command script

cd bin
ll
-rwxr-xr-x 1 root root  1423 21 02:02 connect-distributed.sh
-rwxr-xr-x 1 root root  1396 21 02:02 connect-mirror-maker.sh
-rwxr-xr-x 1 root root  1420 21 02:02 connect-standalone.sh
-rwxr-xr-x 1 root root   861 21 02:02 kafka-acls.sh
-rwxr-xr-x 1 root root   873 21 02:02 kafka-broker-api-versions.sh
-rwxr-xr-x 1 root root   860 21 02:02 kafka-cluster.sh
-rwxr-xr-x 1 root root   864 21 02:02 kafka-configs.sh
-rwxr-xr-x 1 root root   945 21 02:02 kafka-console-consumer.sh
-rwxr-xr-x 1 root root   944 21 02:02 kafka-console-producer.sh
-rwxr-xr-x 1 root root   871 21 02:02 kafka-consumer-groups.sh
-rwxr-xr-x 1 root root   948 21 02:02 kafka-consumer-perf-test.sh
-rwxr-xr-x 1 root root   871 21 02:02 kafka-delegation-tokens.sh
-rwxr-xr-x 1 root root   869 21 02:02 kafka-delete-records.sh
-rwxr-xr-x 1 root root   866 21 02:02 kafka-dump-log.sh
-rwxr-xr-x 1 root root   863 21 02:02 kafka-features.sh
-rwxr-xr-x 1 root root   865 21 02:02 kafka-get-offsets.sh
-rwxr-xr-x 1 root root   870 21 02:02 kafka-leader-election.sh
-rwxr-xr-x 1 root root   863 21 02:02 kafka-log-dirs.sh
-rwxr-xr-x 1 root root   881 21 02:02 kafka-metadata-quorum.sh
-rwxr-xr-x 1 root root   873 21 02:02 kafka-metadata-shell.sh
-rwxr-xr-x 1 root root   862 21 02:02 kafka-mirror-maker.sh
-rwxr-xr-x 1 root root   959 21 02:02 kafka-producer-perf-test.sh
-rwxr-xr-x 1 root root   874 21 02:02 kafka-reassign-partitions.sh
-rwxr-xr-x 1 root root   874 21 02:02 kafka-replica-verification.sh
-rwxr-xr-x 1 root root 10884 21 02:02 kafka-run-class.sh
-rwxr-xr-x 1 root root  1376 21 02:02 kafka-server-start.sh
-rwxr-xr-x 1 root root  1361 21 02:02 kafka-server-stop.sh
-rwxr-xr-x 1 root root   860 21 02:02 kafka-storage.sh
-rwxr-xr-x 1 root root   945 21 02:02 kafka-streams-application-reset.sh
-rwxr-xr-x 1 root root   863 21 02:02 kafka-topics.sh
-rwxr-xr-x 1 root root   879 21 02:02 kafka-transactions.sh
-rwxr-xr-x 1 root root   958 21 02:02 kafka-verifiable-consumer.sh
-rwxr-xr-x 1 root root   958 21 02:02 kafka-verifiable-producer.sh
-rwxr-xr-x 1 root root  1714 21 02:02 trogdor.sh
drwxr-xr-x 2 root root  4096 21 02:02 windows
-rwxr-xr-x 1 root root   867 21 02:02 zookeeper-security-migration.sh
-rwxr-xr-x 1 root root  1393 21 02:02 zookeeper-server-start.sh
-rwxr-xr-x 1 root root  1366 21 02:02 zookeeper-server-stop.sh
-rwxr-xr-x 1 root root  1019 21 02:02 zookeeper-shell.sh

5. Operate topic

#操作topic的脚本
kafka-topics.sh

[root@kafka1 bin]# ./kafka-topics.sh --help
This tool helps to create, delete, describe, or change a topic.
Option                                   Description
------                                   -----------
--alter                                  Alter the number of partitions,
                                           replica assignment, and/or
                                           configuration for the topic.
--at-min-isr-partitions                  if set when describing topics, only
                                           show partitions whose isr count is
                                           equal to the configured minimum.
--bootstrap-server <String: server to    REQUIRED: The Kafka server to connect
  connect to>                              to.
--command-config <String: command        Property file containing configs to be
  config property file>                    passed to Admin Client. This is used
                                           only with --bootstrap-server option
                                           for describing and altering broker
                                           configs.
--config <String: name=value>            A topic configuration override for the
                                           topic being created or altered. The
                                           following is a list of valid
                                           configurations:
                                                cleanup.policy
                                                compression.type
                                                delete.retention.ms
                                                file.delete.delay.ms
                                                flush.messages
                                                flush.ms
                                                follower.replication.throttled.
                                           replicas
                                                index.interval.bytes
                                                leader.replication.throttled.replicas
                                                local.retention.bytes
                                                local.retention.ms
                                                max.compaction.lag.ms
                                                max.message.bytes
                                                message.downconversion.enable
                                                message.format.version
                                                message.timestamp.difference.max.ms
                                                message.timestamp.type
                                                min.cleanable.dirty.ratio
                                                min.compaction.lag.ms
                                                min.insync.replicas
                                                preallocate
                                                remote.storage.enable
                                                retention.bytes
                                                retention.ms
                                                segment.bytes
                                                segment.index.bytes
                                                segment.jitter.ms
                                                segment.ms
                                                unclean.leader.election.enable
                                         See the Kafka documentation for full
                                           details on the topic configs. It is
                                           supported only in combination with --
                                           create if --bootstrap-server option
                                           is used (the kafka-configs CLI
                                           supports altering topic configs with
                                           a --bootstrap-server option).
--create                                 Create a new topic.
--delete                                 Delete a topic
--delete-config <String: name>           A topic configuration override to be
                                           removed for an existing topic (see
                                           the list of configurations under the
                                           --config option). Not supported with
                                           the --bootstrap-server option.
--describe                               List details for the given topics.
--disable-rack-aware                     Disable rack aware replica assignment
--exclude-internal                       exclude internal topics when running
                                           list or describe command. The
                                           internal topics will be listed by
                                           default
--help                                   Print usage information.
--if-exists                              if set when altering or deleting or
                                           describing topics, the action will
                                           only execute if the topic exists.
--if-not-exists                          if set when creating topics, the
                                           action will only execute if the
                                           topic does not already exist.
--list                                   List all available topics.
--partitions <Integer: # of partitions>  The number of partitions for the topic
                                           being created or altered (WARNING:
                                           If partitions are increased for a
                                           topic that has a key, the partition
                                           logic or ordering of the messages
                                           will be affected). If not supplied
                                           for create, defaults to the cluster
                                           default.
--replica-assignment <String:            A list of manual partition-to-broker
  broker_id_for_part1_replica1 :           assignments for the topic being
  broker_id_for_part1_replica2 ,           created or altered.
  broker_id_for_part2_replica1 :
  broker_id_for_part2_replica2 , ...>
--replication-factor <Integer:           The replication factor for each
  replication factor>                      partition in the topic being
                                           created. If not supplied, defaults
                                           to the cluster default.
--topic <String: topic>                  The topic to create, alter, describe
                                           or delete. It also accepts a regular
                                           expression, except for --create
                                           option. Put topic name in double
                                           quotes and use the '\' prefix to
                                           escape regular expression symbols; e.
                                           g. "test\.topic".
--topic-id <String: topic-id>            The topic-id to describe.This is used
                                           only with --bootstrap-server option
                                           for describing topics.
--topics-with-overrides                  if set when describing topics, only
                                           show topics that have overridden
                                           configs
--unavailable-partitions                 if set when describing topics, only
                                           show partitions whose leader is not
                                           available
--under-min-isr-partitions               if set when describing topics, only
                                           show partitions whose isr count is
                                           less than the configured minimum.
--under-replicated-partitions            if set when describing topics, only
                                           show under replicated partitions
--version                                Display Kafka version.

Operation topic

#创建主题,名字叫做firstTopic ,分区1,副本3
[root@kafka1 bin]# ./kafka-topics.sh --bootstrap-server node1:9092 --topic firstTopic --create --partitions 1 --replication-factor 3
Created topic firstTopic.

#查看全部主题,此处两个是因为多创建了一个
[root@kafka1 bin]# ./kafka-topics.sh --bootstrap-server node1:9092 --list
firstTopic
first_topic

#查看主题详细信息
#topic名字 firstTopic
#PartitionCount: 1  分区数 1
#ReplicationFactor: 3 副本数 3
#Partition: 0    数据在0号分区,分区即把大文件切分成多份
#Leader: 0       数据主备,leader为主,分别存放集群不同服务器上。此处leader存放在broker.id=0的服务器上
#Replicas: 0,2,1 Isr: 0,2,1 副本存放在集群broker.id=0,1,2的主机,Isr显示集群运行正常的节点信息,节点运行不正常时会被从Isr中踢出
[root@kafka1 bin]# ./kafka-topics.sh --bootstrap-server node1:9092 --topic firstTopic --describe
Topic: firstTopic       TopicId: HqizKYTWQ06Ed1_IF4B3Jw PartitionCount: 1       ReplicationFactor: 3    Configs:
        Topic: firstTopic       Partition: 0    Leader: 0       Replicas: 0,2,1 Isr: 0,2,1

#修改主题分区为3,分区数只能增加不能减少
[root@kafka1 bin]# ./kafka-topics.sh --bootstrap-server node1:9092 --topic firstTopic --alter --partitions 3

#再次查看主题详细信息
[root@kafka1 bin]# ./kafka-topics.sh --bootstrap-server node1:9092 --topic firstTopic --describe
Topic: firstTopic       TopicId: HqizKYTWQ06Ed1_IF4B3Jw PartitionCount: 3       ReplicationFactor: 3    Configs:
        Topic: firstTopic       Partition: 0    Leader: 0       Replicas: 0,2,1 Isr: 0,2,1
        Topic: firstTopic       Partition: 1    Leader: 1       Replicas: 1,2,0 Isr: 1,2,0
        Topic: firstTopic       Partition: 2    Leader: 2       Replicas: 2,0,1 Isr: 2,0,1

6. Operation Producer

#生产者使用的脚本
kafka-console-producer.sh

[root@kafka1 bin]# ./kafka-console-producer.sh
Missing required option(s) [bootstrap-server]
Option                                   Description
------                                   -----------
--batch-size <Integer: size>             Number of messages to send in a single
                                           batch if they are not being sent
                                           synchronously. please note that this
                                           option will be replaced if max-
                                           partition-memory-bytes is also set
                                           (default: 16384)
--bootstrap-server <String: server to    REQUIRED unless --broker-list
  connect to>                              (deprecated) is specified. The server
                                           (s) to connect to. The broker list
                                           string in the form HOST1:PORT1,HOST2:
                                           PORT2.
--broker-list <String: broker-list>      DEPRECATED, use --bootstrap-server
                                           instead; ignored if --bootstrap-
                                           server is specified.  The broker
                                           list string in the form HOST1:PORT1,
                                           HOST2:PORT2.
--compression-codec [String:             The compression codec: either 'none',
  compression-codec]                       'gzip', 'snappy', 'lz4', or 'zstd'.
                                           If specified without value, then it
                                           defaults to 'gzip'
--help                                   Print usage information.
--line-reader <String: reader_class>     The class name of the class to use for
                                           reading lines from standard in. By
                                           default each line is read as a
                                           separate message. (default: kafka.
                                           tools.
                                           ConsoleProducer$LineMessageReader)
--max-block-ms <Long: max block on       The max time that the producer will
  send>                                    block for during a send request.
                                           (default: 60000)
--max-memory-bytes <Long: total memory   The total memory used by the producer
  in bytes>                                to buffer records waiting to be sent
                                           to the server. This is the option to
                                           control `buffer.memory` in producer
                                           configs. (default: 33554432)
--max-partition-memory-bytes <Integer:   The buffer size allocated for a
  memory in bytes per partition>           partition. When records are received
                                           which are smaller than this size the
                                           producer will attempt to
                                           optimistically group them together
                                           until this size is reached. This is
                                           the option to control `batch.size`
                                           in producer configs. (default: 16384)
--message-send-max-retries <Integer>     Brokers can fail receiving the message
                                           for multiple reasons, and being
                                           unavailable transiently is just one
                                           of them. This property specifies the
                                           number of retries before the
                                           producer give up and drop this
                                           message. This is the option to
                                           control `retries` in producer
                                           configs. (default: 3)
--metadata-expiry-ms <Long: metadata     The period of time in milliseconds
  expiration interval>                     after which we force a refresh of
                                           metadata even if we haven't seen any
                                           leadership changes. This is the
                                           option to control `metadata.max.age.
                                           ms` in producer configs. (default:
                                           300000)
--producer-property <String:             A mechanism to pass user-defined
  producer_prop>                           properties in the form key=value to
                                           the producer.
--producer.config <String: config file>  Producer config properties file. Note
                                           that [producer-property] takes
                                           precedence over this config.
--property <String: prop>                A mechanism to pass user-defined
                                           properties in the form key=value to
                                           the message reader. This allows
                                           custom configuration for a user-
                                           defined message reader.
                                         Default properties include:
                                          parse.key=false
                                          parse.headers=false
                                          ignore.error=false
                                          key.separator=\t
                                          headers.delimiter=\t
                                          headers.separator=,
                                          headers.key.separator=:
                                          null.marker=   When set, any fields
                                           (key, value and headers) equal to
                                           this will be replaced by null
                                         Default parsing pattern when:
                                          parse.headers=true and parse.key=true:
                                           "h1:v1,h2:v2...\tkey\tvalue"
                                          parse.key=true:
                                           "key\tvalue"
                                          parse.headers=true:
                                           "h1:v1,h2:v2...\tvalue"
--reader-config <String: config file>    Config properties file for the message
                                           reader. Note that [property] takes
                                           precedence over this config.
--request-required-acks <String:         The required `acks` of the producer
  request required acks>                   requests (default: -1)
--request-timeout-ms <Integer: request   The ack timeout of the producer
  timeout ms>                              requests. Value must be non-negative
                                           and non-zero. (default: 1500)
--retry-backoff-ms <Long>                Before each retry, the producer
                                           refreshes the metadata of relevant
                                           topics. Since leader election takes
                                           a bit of time, this property
                                           specifies the amount of time that
                                           the producer waits before refreshing
                                           the metadata. This is the option to
                                           control `retry.backoff.ms` in
                                           producer configs. (default: 100)
--socket-buffer-size <Integer: size>     The size of the tcp RECV size. This is
                                           the option to control `send.buffer.
                                           bytes` in producer configs.
                                           (default: 102400)
--sync                                   If set message send requests to the
                                           brokers are synchronously, one at a
                                           time as they arrive.
--timeout <Long: timeout_ms>             If set and the producer is running in
                                           asynchronous mode, this gives the
                                           maximum amount of time a message
                                           will queue awaiting sufficient batch
                                           size. The value is given in ms. This
                                           is the option to control `linger.ms`
                                           in producer configs. (default: 1000)
--topic <String: topic>                  REQUIRED: The topic id to produce
                                           messages to.
--version                                Display Kafka version.

create producer

bin/kafka-console-producer.sh --bootstrap-server node1:9092 --topic firstTopic

7. Operating consumers

create consumer

bin/kafka-console-consumer.sh --bootstrap-server node1:9092 --topic firstTopic

#消费者默认从最新的数据开始消费,可以指定从头开始消费
bin/kafka-console-consumer.sh --bootstrap-server node1:9092 --topic firstTopic --from-beginning

Guess you like

Origin blog.csdn.net/qq_44659804/article/details/130464207