Kafka的JavaAPI操作

Kafka的JavaAPI操作

官网代码参考
生产者:生产者
消费者:消费者

生产者代码:

public class Producer {
public static void main(String[] args) {
        //创建与kakfa的连接
        Properties props = new Properties();
        //broker的地址
        props.put("bootstrap.servers", "hadoop01:9092,hadoop02:9092,hadoop03:9092");
        //ack通信机制
        //0: 生产者只管发送数据,不管数据是否丢失 (性能最近)
        //1: 只负责把数据发送到主副本leader,broker给生产者的响应状态是1 (折中)
        //all或-1:leader主副本和follower从副本,都获取到数,响应状态是-1 (数据最安全的)
        props.put("acks", "all");
        //重试次数
        props.put("retries", 0);
        //批量发送大小字节
        props.put("batch.size", 16384);
        //定时发送(毫秒级别)
        props.put("linger.ms", 1);
        //缓存大小
        props.put("buffer.memory", 33554432);
        //序列化操作
        props.put("key.serializer", "org.apache.kafka.common.serialization.StringSerializer");
        props.put("value.serializer", "org.apache.kafka.common.serialization.StringSerializer");
        //自定义分区发送数据,内部采用轮询的方式
//        props.put("partitioner.class",RoundRobin.class);
        //构建生产者对象
        Producer<String, String> producer = new KafkaProducer<>(props);
        for(int i=0;i<100;i++){
            //发送数据
            //1.内部会采用轮询(RoundRobin)的方式,将数据发送到每一个分区
            producer.send(new ProducerRecord("test","test-"+i));
            //2.指定key值存储,存储的时候会根据key.hashCode% partitionNum ,造成数据倾斜,磁盘空间不够
//            producer.send(new ProducerRecord("test","key","test-"+i));
            //3.指定分区发送数据,如果topic下面有多个分区,你只指定了一个分区,会造成热点问题
            //producer.send(new ProducerRecord("test",2,"key","test-"+i));
            //4.自定义分区发送数据,分区规则写在属性配置内
//            producer.send(new ProducerRecord("test","key","test-"+i));

        }
        producer.close();
}

自定义分区

public class KafkaCustomPartitioner implements Partitioner {
	@Override
	public void configure(Map<String, ?> configs) {
	}

	@Override
	public int partition(String topic, Object arg1, byte[] keyBytes, Object arg3, byte[] arg4, Cluster cluster) {
		List<PartitionInfo> partitions = cluster.partitionsForTopic(topic);
	    int partitionNum = partitions.size();
		Random random = new Random();
		int partition = random.nextInt(partitionNum);
	    return partition;
	}

	@Override
	public void close() {
		
	}

}

消费者代码:

public class Consumer {
    public static void main(String[] args) {
        //kafka消费组属性配置
        Properties props = new Properties();
        //broker地址
        props.put("bootstrap.servers", "hadoop01:9092,hadoop02:9092,hadoop03:9092");
        //消费组
        props.put("group.id", "test");
        //自动提交offset
        props.put("enable.auto.commit", "true");
        //手动提交offset
//        props.put("enable.auto.commit", "false");
        //offset间隔提交时间
//        props.put("auto.commit.interval.ms", "1000");
        //反序列
        props.put("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
        props.put("value.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");

        //构建消费者对象
        KafkaConsumer<String, String> kafkaConsumer = new KafkaConsumer<>(props);
        //订阅主题
        kafkaConsumer.subscribe(Arrays.asList("test"));

        //数据消费
        while (true){
            //定时拉取数据,单位毫秒
            ConsumerRecords<String, String> poll = kafkaConsumer.poll(100);
            //遍历集合
            for (ConsumerRecord<String, String> record : poll) {
                //偏移量
                long offset = record.offset();
                //数据
                String value = record.value();
                //key值
                String key = record.key();
                //分区
                int partition = record.partition();
                System.out.println("partition:"+partition+",offset:"+offset+
                        ",key:"+key+",value:"+value);
            }

            //同步提交,提交偏移量
//            kafkaConsumer.commitSync();
        }
} 
}

手动提交offset:

public class Consumer2 {

    public static void main(String[] args) {

        Properties props = new Properties();
        props.put("bootstrap.servers", "hadoop01:9092,hadoop02:9092,hadoop03:9092");
        props.put("group.id", "test");
        props.put("enable.auto.commit", "true");
        props.put("auto.commit.interval.ms", "1000");

        props.put("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
        props.put("value.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
        KafkaConsumer<String, String> consumer = new KafkaConsumer<>(props);
        consumer.subscribe(Arrays.asList("test"));
        while (true) {
            ConsumerRecords<String, String> poll = consumer.poll(100);
            Set<TopicPartition> partitions = poll.partitions();
            for (TopicPartition partition : partitions) {
                List<ConsumerRecord<String, String>> records = poll.records(partition);
                long offset=0;
                for (ConsumerRecord<String, String> record : records) {
                    long off = record.offset();
                    int par = record.partition();
                    String key = record.key();
                    String value = record.value();
                    System.out.println(off + " " + par + " " + key + " " + value);
                }
                Map<TopicPartition, OffsetAndMetadata> offsets = Collections.singletonMap(partition, new OffsetAndMetadata(offset + 1));
                consumer.commitSync(offsets);
            }
        }

    }

}
发布了14 篇原创文章 · 获赞 12 · 访问量 820

猜你喜欢

转载自blog.csdn.net/qq_45094921/article/details/104488647