kafka杂记之consumer的使用和手动管理offset

版权声明:本文为博主原创文章,未经博主允许不得转载。 https://blog.csdn.net/lyzx_in_csdn/article/details/82846492
package com.lyzx.kafka;

import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.clients.consumer.ConsumerRecords;
import org.apache.kafka.clients.consumer.KafkaConsumer;
import org.apache.kafka.clients.consumer.OffsetAndMetadata;
import org.apache.kafka.common.TopicPartition;
import org.junit.Test;

import java.util.*;

public class ConsumerTest {
    private static String ips = "192.168.29.164:9092";


    /**
     * kafka consumer的基本使用
     * 1、group.id 代表组名
     * 2、auto.offset.reset 代表从什么地方开始消费
     *    earliest  从最早的位置开始消费,注意最早的位置不一定是0,这个最早的位置指上一次提交的位置
     *              比如group1杀过一次offset=1000  那么这次就从1000开始读,可能此时的消息总数有5000
     *    latest    从最新的位置开始消费
     *              比如消息总数有5000 那么这次就从5000开始读取
     *
     * max.poll.records 表示每次拉取的最大的数据条数
     * consumer.poll(time)  time表示poll方法最大的阻塞时间
     * poll方法返回的前提
     * 1、获取到了max.poll.records配置项的条数的记录
     * 2、阻塞时间超过了time
     * 不管达到了哪个条件都会返回
     *
     *
     */
    @Test
    public void basicTest(){
        Properties props = new Properties();
        props.put("bootstrap.servers",ips);
        props.put("group.id","test2");
        props.put("auto.offset.reset","earliest");
        props.put("max.poll.records","10");
        props.put("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
        props.put("value.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
        KafkaConsumer<String, String> consumer = new KafkaConsumer<>(props);
        consumer.subscribe(Arrays.asList("yh1"));
        while (true){
            ConsumerRecords<String, String> records = consumer.poll(100);
            for (ConsumerRecord<String, String> record : records)
                System.out.printf("offset = %d, key = %s, value = %s%n", record.offset(), record.key(), record.value());
        }
    }


    /**
     * 第一种手动管理offset的方式 粗粒度管理
     * 首先设置 enable.auto.commit=false 即关闭自动提交机制
     * 在每次消费完后手动条用 consumer.commitSync(); 提交消息
     * consumer.commitSync(); 同步阻塞
     * consumer.commitAsync(); 异步非阻塞,并不是说会开启另一个线程提交消息而是这个方法不阻塞所以就称为异步
     * 如果在poll到数据之后 调用consumer.commitSync();/consumer.commitAsync();之前如果程序崩溃
     * 仍然会有数据的丢失
     */
    @Test
    public void autoManageOffset1(){
        Properties props = new Properties();
        props.put("bootstrap.servers",ips);
        props.put("group.id","group_autoManageOffset2");
        props.put("auto.offset.reset","earliest");
        props.put("enable.auto.commit","false");
        props.put("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
        props.put("value.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
        KafkaConsumer<String, String> consumer = new KafkaConsumer<>(props);
        consumer.subscribe(Arrays.asList("yh1"));

        while(true){
            System.out.println("===============================");
            ConsumerRecords<String, String> records = consumer.poll(100);
            for (ConsumerRecord<String, String> record : records){
                System.out.println(record.offset()+"     "+record.key()+"    "+record.value());
            }
            consumer.commitSync();
        }
    }


    @Test
    public void autoManageOffset2(){
        Properties props = new Properties();
        props.put("bootstrap.servers",ips);
        props.put("group.id","group_autoManageOffset3");
        props.put("auto.offset.reset","earliest");
        props.put("enable.auto.commit","false");
        props.put("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
        props.put("value.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
        KafkaConsumer<String, String> consumer = new KafkaConsumer<>(props);
        consumer.subscribe(Arrays.asList("yh1"));

        while(true){
            ConsumerRecords<String, String> records = consumer.poll(1000);

            //通过records获取这个集合中的数据属于那几个partition
            Set<TopicPartition> partitions = records.partitions();
            for(TopicPartition tp : partitions){

                //通过具体的partition把该partition中的数据拿出来消费
                List<ConsumerRecord<String, String>> partitionRecords = records.records(tp);
                for(ConsumerRecord r : partitionRecords){
                    System.out.println(r.offset()   +"     "+r.key()+"     "+r.value());
                }
                //获取新这个partition中的最后一条记录的offset并加1 那么这个位置就是下一次要提交的offset
                long newOffset = partitionRecords.get(partitionRecords.size() - 1).offset() + 1;
                consumer.commitSync(Collections.singletonMap(tp,new OffsetAndMetadata(newOffset)));
            }
        }
    }

}

猜你喜欢

转载自blog.csdn.net/lyzx_in_csdn/article/details/82846492