SpringBoot2 + Kafka integrity

1: Simple Docker environment to build

Environment to build a reference site (effective pro-test): Docker Compose-kafka cluster deployment environment under centos

Add docker-cpmpose.yml script

version: '2'
services:
  zoo1:
    image: wurstmeister/zookeeper
    restart: unless-stopped
    hostname: zoo1
    ports:
      - "2181:2181"
    container_name: zookeeper
  kafka1:
    image: wurstmeister/kafka
    ports:
      - "9092:9092"
    environment:
      KAFKA_ADVERTISED_HOST_NAME: 192.168.189.131                     
      KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://192.168.189.131:9092 
      KAFKA_ZOOKEEPER_CONNECT: "zoo1:2181"
      KAFKA_ADVERTISED_PORT: 9092
      KAFKA_BROKER_ID: 1
      KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 1
    depends_on:
      - zoo1
    container_name: kafka1
  kafka-manager:
    image: sheepkiller/kafka-manager             
    environment:
        ZK_HOSTS: 192.168.189.131           
    ports:
      - "9000:9000"

 

2:IDEA 

  • 2.1: Adding pom-dependent

<dependency>
   <groupId>org.springframework.kafka</groupId>
   <artifactId>spring-kafka</artifactId>
</dependency>
  • 2.2: application.yml editor

spring:
  kafka:
    bootstrap-servers: 192.168.189.131:9092 #指定kafka server的地址,集群配多个,中间,逗号隔开
    producer:
      retries: 0
      key-serializer: org.apache.kafka.common.serialization.StringSerializer
      #扩展序列化类,方便传输实体对象或者List
      value-serializer: com.zhou.wei.config.EncodeingKafka
      batch-size: 16384
      buffer-memory: 33554432
      acks: 1
    consumer:
      #群组ID
      group-id: default_consumer_group
      #如果为true,则消费者的偏移量将在后台定期提交,默认值为true
      enable-auto-commit: true
      #如果'enable.auto.commit'为true,则消费者偏移自动提交给Kafka的频率(以毫秒为单位),默认值为5000。
      auto-commit-interval: PT5S
      key-deserializer: org.apache.kafka.common.serialization.StringDeserializer
      #扩展反序列化类,方便传输实体对象或者List
      value-deserializer: com.zhou.wei.config.DecodeingKafka
server:
  port: 8500
  • 2.3: the Code

 The first step: First, the bootstrap class

 

@SpringBootApplication
@RestController
public class KafkaApplication {

    public static void main(String[] args){
        SpringApplication.run(KafkaApplication.class,args);
    }

    @Autowired
    private KafkaTemplate kafkaTemplate;

    /**
     * 简单类型
     * @param msg
     * @return
     */
    @RequestMapping(value = "/send/{msg}",method = RequestMethod.GET)
    public String send(@PathVariable("msg") String msg){
        kafkaTemplate.send("demo", msg); //使用kafka模板发送信息
        return "success";
    }

    @KafkaListener(topics = "demo")
    public void listen (String record){
        System.out.printf("获取消费信息:"+record);
    }

    /**
     * 复杂类型
     */
    @RequestMapping(value = "/sendObj",method = RequestMethod.GET)
    public String sendObj(){
        kafkaTemplate.send("topic_001", new Student("胖哥",27)); //使用kafka模板发送信息
        return "success";
    }

    @KafkaListener(topics = "topic_001")
    public void listenObj (Student record){
        System.out.printf("获取消费信息:"+record);
    }

    @AllArgsConstructor
    @Data
    static class Student implements Serializable{
        private String name;
        private Integer age;
    }

}

Step Two: serialization and deserialization tools

EncodeingKafka
public class EncodeingKafka implements Serializer<Object> {
    @Override
    public void configure(Map<String, ?> map, boolean b) {

    }

    @Override
    public byte[] serialize(String s, Object o) {
        return BeanUtils.ObjectToBytes(o);
    }

    @Override
    public void close() {

    }
}
DecodeingKafka
public class DecodeingKafka implements Deserializer<Object> {
    @Override
    public void configure(Map<String, ?> map, boolean b) {

    }

    @Override
    public Object deserialize(String s, byte[] bytes) {
        return BeanUtils.BytesToObject(bytes);
    }

    @Override
    public void close() {

    }
}

BeanUtils

public class BeanUtils {

    /**
     * 对象转字节数组
     * @param obj
     * @return
     */
    public static byte[] ObjectToBytes(Object obj){
        byte[] bytes = null;
        ByteArrayOutputStream bo = null;
        ObjectOutputStream oo = null;
        try {
            bo = new ByteArrayOutputStream();
            oo = new ObjectOutputStream(bo);
            oo.writeObject(obj);
            bytes = bo.toByteArray();

        } catch (IOException e) {
            e.printStackTrace();
        }finally {
            try {
                if(bo!=null){
                    bo.close();
                }
                if(oo!=null){
                    oo.close();
                }
            } catch (IOException e) {
                e.printStackTrace();
            }
        }
        return bytes;
    }


    /**
     * 字节数组转对象
     * @param bytes
     * @return
     */
    public static Object BytesToObject(byte[] bytes){
        Object obj = null;
        ByteArrayInputStream bi = null;
        ObjectInputStream oi = null;
        try {
            bi =new ByteArrayInputStream(bytes);
            oi =new ObjectInputStream(bi);
            obj = oi.readObject();

        } catch (Exception e) {
            e.printStackTrace();
        }finally {
            try {
                if(bi!=null){
                    bi.close();
                }
                if(oi!=null){
                    oi.close();
                }
            } catch (IOException e) {
                e.printStackTrace();
            }
        }

        return obj;
    }


}
 
 
 
  • 2.4: a pleasant surprise, more full of kafka configuration item description

spring:
  kafka:
    #以逗号分隔的主机:端口对列表,用于建立与Kafka群集的初始连接
    bootstrap-servers:
    consumer:
      #用于标识此使用者所属的使用者组的唯一字符串。
      group-id: group-id-1
      #ID在发出请求时传递给服务器;用于服务器端日志记录。
      client-id: client-id-1
      #如果为true,则消费者的偏移量将在后台定期提交,默认值为true
      enable-auto-commit: true
      #如果'enable.auto.commit'为true,则消费者偏移自动提交给Kafka的频率(以毫秒为单位),默认值为5000。
      auto-commit-interval: PT5S
      #当Kafka中没有初始偏移量或者服务器上不再存在当前偏移量时该怎么办,默认值为latest,表示自动将偏移重置为最新的偏移量
      #可选的值为latest, earliest, none
      auto-offset-reset: earliest
      #如果没有足够的数据立即满足“fetch.min.bytes”给出的要求,服务器在回答获取请求之前将阻塞的最长时间(以毫秒为单位)
      #默认值为500
      fetch-max-wait: PT0.5S
      #服务器应以字节为单位返回获取请求的最小数据量,默认值为1,对应的kafka的参数为fetch.min.bytes。
      fetch-min-size: 1
      #心跳与消费者协调员之间的预期时间(以毫秒为单位),默认值为3000
      heartbeat-interval: PT3S
      #key的反序列化器类,实现类实现了接口org.apache.kafka.common.serialization.Deserializer
      key-deserializer: org.apache.kafka.common.serialization.StringDeserializer
      #值的反序列化器类,实现类实现了接口org.apache.kafka.common.serialization.Deserializer
      value-deserializer: org.apache.kafka.common.serialization.StringDeserializer
      #一次调用poll()操作时返回的最大记录数,默认值为500
      max-poll-records: 500
    producer:
      #ID在发出请求时传递给服务器,用于服务器端日志记录
      client-id:
      #procedure要求leader在考虑完成请求之前收到的确认数,用于控制发送记录在服务端的持久化,其值可以为如下:
      #acks = 0 如果设置为零,则生产者将不会等待来自服务器的任何确认,该记录将立即添加到套接字缓冲区并视为已发送。
      #acks = 1 这意味着leader会将记录写入其本地日志,但无需等待所有副本服务器的完全确认即可做出回应.
      #acks = all 这意味着leader将等待完整的同步副本集以确认记录,这保证了只要至少一个同步副本服务器仍然存活,记录就不会丢失,这是最强有力的保证,这相当于acks = -1的设置。
      #可以设置的值为:all, -1, 0, 1
      acks: 1
      #每当多个记录被发送到同一分区时,生产者将尝试将记录一起批量处理为更少的请求,
      #这有助于提升客户端和服务器上的性能,此配置控制默认批量大小(以字节为单位),默认值为16384
      batch-size: 16384
      #生产者可用于缓冲等待发送到服务器的记录的内存总字节数,默认值为33554432
      buffer-memory: 33554432
      #生产者生成的所有数据的压缩类型,此配置接受标准压缩编解码器('gzip','snappy','lz4'),
      #它还接受'uncompressed'以及'producer',分别表示没有压缩以及保留生产者设置的原始压缩编解码器,
      #默认值为producer
      compression-type: producer
      #key的反序列化器类,实现类实现了接口org.apache.kafka.common.serialization.Deserializer
      key-deserializer: org.apache.kafka.common.serialization.StringDeserializer
      #值的反序列化器类,实现类实现了接口org.apache.kafka.common.serialization.Deserializer
      value-deserializer: org.apache.kafka.common.serialization.StringDeserializer

 

Published 33 original articles · won praise 3 · Views 5852

Guess you like

Origin blog.csdn.net/WandaZw/article/details/105319352