基于 docker 的 kafka 集群与 kafka manager 监控工具搭建

kafka 集群

最近在研究利用 kafka 集群作为消息队列,以提高消息的吞吐量,在测试过程中,利用 docker 搭建了一套测试环境,拿出来供大家参考:

version: '3.1'
services:
  zookeeper1:
    image: zookeeper
    container_name: kafka-zookeeper1
    restart: always
    hostname: zookeeper1
    ports:
      - 2181:2181
    environment:
      ZOO_MY_ID: 1
      ZOO_SERVERS: server.1=0.0.0.0:2888:3888 server.2=zookeeper2:2888:3888 server.3=zookeeper3:2888:3888
  zookeeper2:
    image: zookeeper
    container_name: kafka-zookeeper2
    restart: always
    hostname: zookeeper2
    ports:
      - 2182:2181
    environment:
      ZOO_MY_ID: 2
      ZOO_SERVERS: server.1=zookeeper1:2888:3888 server.2=0.0.0.0:2888:3888 server.3=zookeeper3:2888:3888
  zookeeper3:
    image: zookeeper
    container_name: kafka-zookeeper3
    restart: always
    hostname: zookeeper3
    ports:
      - 2183:2181
    environment:
      ZOO_MY_ID: 3
      ZOO_SERVERS: server.1=zookeeper1:2888:3888 server.2=zookeeper2:2888:3888 server.3=0.0.0.0:2888:3888
  kafka1:
    image: xuxiangwork/kafka
    container_name: kafka-kafka1
    ports:
      - "9092:9092"
      - "1099:1099"
    depends_on:
      - zookeeper1
      - zookeeper2
      - zookeeper3
    environment:
      KAFKA_BROKER_ID: 1
      KAFKA_ADVERTISED_HOST_NAME: you_ip_addr(like 192.168.14.10) 
      KAFKA_ZOOKEEPER_CONNECT: zookeeper1:2181,zookeeper2:2181,zookeeper3:2181
      CUSTOM_JMX_PORT: "1099"
      JVM_XMS: "256M"
      JVM_XMX: "512M"
    volumes:
      - /var/run/docker.sock:/var/run/docker.sock
      - /home/work/data/kafka1:/kafka
    restart: always
  kafka2:
    image: xuxiangwork/kafka
    container_name: kafka-kafka2
    ports:
      - "9093:9092"
      - "1100:1100"
    depends_on:
      - zookeeper1
      - zookeeper2
      - zookeeper3
    environment:
      KAFKA_BROKER_ID: 2
      KAFKA_ADVERTISED_HOST_NAME: you_ip_addr(like 192.168.14.10)
      KAFKA_ZOOKEEPER_CONNECT: zookeeper1:2181,zookeeper2:2181,zookeeper3:2181
      CUSTOM_JMX_PORT: "1100"
      JVM_XMS: "256M"
      JVM_XMX: "512M"
    volumes:
      - /var/run/docker.sock:/var/run/docker.sock
      - /home/work/data/kafka2:/kafka
    restart: always
  kafka3:
    image: xuxiangwork/kafka
    container_name: kafka-kafka3
    ports:
      - "9094:9092"
      - "1101:1101"
    depends_on:
      - zookeeper1
      - zookeeper2
      - zookeeper3
    environment:
      KAFKA_BROKER_ID: 3
      KAFKA_ADVERTISED_HOST_NAME: you_ip_addr(like 192.168.14.10)
      KAFKA_ZOOKEEPER_CONNECT: zookeeper1:2181,zookeeper2:2181,zookeeper3:2181
      CUSTOM_JMX_PORT: "1101"
      JVM_XMS: "256M"
      JVM_XMX: "512M"
    volumes:
      - /var/run/docker.sock:/var/run/docker.sock
      - /home/work/data/kafka3:/kafka
    restart: always
  kafka4:
    image: xuxiangwork/kafka
    container_name: kafka-kafka4
    ports:
      - "9095:9092"
      - "1102:1102"
    depends_on:
      - zookeeper1
      - zookeeper2
      - zookeeper3
    environment:
      KAFKA_BROKER_ID: 4
      KAFKA_ADVERTISED_HOST_NAME: you_ip_addr(like 192.168.14.10)
      KAFKA_ZOOKEEPER_CONNECT: zookeeper1:2181,zookeeper2:2181,zookeeper3:2181
      CUSTOM_JMX_PORT: "1102"
      JVM_XMS: "256M"
      JVM_XMX: "512M"
    volumes:
      - /var/run/docker.sock:/var/run/docker.sock
      - /home/work/data/kafka4:/kafka
    restart: always
  kafka5:
    image: xuxiangwork/kafka
    container_name: kafka-kafka5
    ports:
      - "9096:9092"
      - "1103:1103"
    depends_on:
      - zookeeper1
      - zookeeper2
      - zookeeper3
    environment:
      KAFKA_BROKER_ID: 5
      KAFKA_ADVERTISED_HOST_NAME: you_ip_addr(like 192.168.14.10)
      KAFKA_ZOOKEEPER_CONNECT: zookeeper1:2181,zookeeper2:2181,zookeeper3:2181
      CUSTOM_JMX_PORT: "1103"
      JVM_XMS: "256M"
      JVM_XMX: "512M"
    volumes:
      - /var/run/docker.sock:/var/run/docker.sock
      - /home/work/data/kafka5:/kafka
    restart: always

上面提供的是 3 个结点的 zookeepr 集群和 5 个 broker 的 kakfa 集群,详细的可以参看 Docker Hub,其中需要注意的有如下几个地方:

  • KAFKA_ADVERTISED_HOST_NAME
    这个请配置成 docker 宿主机 ip 地址,如果希望运行多个 broker ,请不要配置成 localhost127.0.0.1,查看宿主机的 ip 地址,请 google ifconfig 使用方法。
  • CUSTOM_JMX_PORT
    这个参数是为了能够获取 kafka broker 的相关信息用于监控集群使用的,利用 kafka manager 等监控工具,可以从这个端口获取 broker 的信息,参考:详细解析 kafka manager 的使用
  • JVM_XMS 和 JVM_XMX
    这两个参数是用于控制分配给 kafka broker 的内存,默认都是 1G,对于测试环境,可根据宿主机的内存情况进行分配,参数说明参考:JVM系列三:JVM参数设置、分析
  • 磁盘挂载
    上面的示例 yml 中,每个 kafka broker 的数据文件通过 /home/work/data/kafka5:/kafka 挂载了出来,可根据需要,修改挂载在宿主机的路径。

kafka manager

目前 kafka manager 是主流的 kafka 监控和管理工具,同样也提供 docker 化的搭建方法供大家参考:

version: '3.1'
services:
  kafka-manager:
    container_name: kafka-manager
    image: xuxiangwork/kafka-manager
    ports:
      - "9000:9000"
    environment:
      ZK_HOSTS: your_zookeeper_host:port(like 192.168.10.11:2181)
      APPLICATION_SECRET: letmein
      KAFKA_MANAGER_AUTH_ENABLED: "true"
      KAFKA_MANAGER_USERNAME: admin
      KAFKA_MANAGER_PASSWORD: password
    restart: always

详细的说明参见:Docker Hub,需要注意的点有:

  • ZK_HOSTS
    用于存储 kafka manager 管理状态的 zookeeper 集群地址,可以用逗号连接多个地址
  • KAFKA_MANAGER_AUTH_ENABLED
    是否开启 kafka manager 权限校验
  • KAFKA_MANAGER_USERNAME 和 KAFKA_MANAGER_PASSWORD
    登陆 kafka manger 账户名和密码

猜你喜欢

转载自blog.csdn.net/weixin_34191845/article/details/86891256