kafka javaAPI 手动维护偏移量

kafka javaAPI 手动维护偏移量

package com.kafka;

import kafka.javaapi.PartitionMetadata;
import kafka.javaapi.consumer.SimpleConsumer;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.clients.consumer.ConsumerRecords;
import org.apache.kafka.clients.consumer.KafkaConsumer;
import org.apache.kafka.clients.consumer.OffsetAndMetadata;
import org.apache.kafka.common.TopicPartition;
import org.junit.Test;

import java.util.*;

public class ConsumerManageOffet {

//broker的地址,
//与老版的kafka的区别是,新版本的kafka把偏移量保存到了broker,而老版本的是把偏移量保存到了zookeeper中
//所以在读取数据时,应当设置broker的地址
    private static String ips = "192.168.136.150:9092,192.168.136.151:9092,192.168.136.152:9092";

    public static void main(String[] args) {
        Properties props = new Properties();
        props.put("bootstrap.servers",ips);
        props.put("group.id","test02");
        props.put("auto.offset.reset","earliest");
        props.put("max.poll.records","10"); 
        props.put("key.deserializer","org.apache.kafka.common.serialization.StringDeserializer");
        props.put("value.deserializer","org.apache.kafka.common.serialization.StringDeserializer");
        KafkaConsumer<String,String> consumer = new KafkaConsumer<>(props);
        consumer.subscribe(Arrays.asList("my-topic"));
        System.out.println("---------------------");
        while(true){
            ConsumerRecords<String,String> records = consumer.poll(10);
            System.out.println("+++++++++++++++++++++++");
            for(ConsumerRecord<String,String> record: records){
                System.out.println("---");
                System.out.printf("offset=%d,key=%s,value=%s%n",record.offset(),
                        record.key(),record.value());
            }
        }
    }








    //手动维护偏移量
    @Test
    public void autoManageOffset2(){
        Properties props = new Properties();
        //broker的地址
        props.put("bootstrap.servers",ips);
        //这是消费者组
        props.put("group.id","groupPP");
        //设置消费的偏移量,如果以前消费过则接着消费,如果没有就从头开始消费
        props.put("auto.offset.reset","earliest");
        //设置自动提交偏移量为false
        props.put("enable.auto.commit","false");
        //设置Key和value的序列化
        props.put("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
        props.put("value.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
        //new一个消费者
        KafkaConsumer<String, String> consumer = new KafkaConsumer<>(props);
        //指定消费的topic
        consumer.subscribe(Arrays.asList("my-topic"));

        while(true){



            ConsumerRecords<String, String> records = consumer.poll(1000);

            //通过records获取这个集合中的数据属于那几个partition
            Set<TopicPartition> partitions = records.partitions();
            for(TopicPartition tp : partitions){

                //通过具体的partition把该partition中的数据拿出来消费
                List<ConsumerRecord<String, String>> partitionRecords = records.records(tp);

                for(ConsumerRecord r : partitionRecords){
                    System.out.println(r.offset()   +"     "+r.key()+"     "+r.value());
                }

                //获取新这个partition中的最后一条记录的offset并加1 那么这个位置就是下一次要提交的offset
                long newOffset = partitionRecords.get(partitionRecords.size() - 1).offset() + 1;
                consumer.commitSync(Collections.singletonMap(tp,new OffsetAndMetadata(newOffset)));
            }
        }
    }




}

猜你喜欢

转载自blog.csdn.net/Lu_Xiao_Yue/article/details/85082568