package com.yy.fastcustom; import org.apache.kafka.clients.consumer.ConsumerRecord; import org.apache.kafka.clients.consumer.ConsumerRecords; import org.apache.kafka.clients.consumer.KafkaConsumer; import org.apache.kafka.clients.producer.KafkaProducer; import org.apache.kafka.clients.producer.Producer; import org.apache.kafka.clients.producer.ProducerRecord; import java.util.Arrays; import java.util.Properties; /** * Created by zzq on 2019/6/13/013. */ public class KafkaTest implements Runnable { public void read(String clientId) { producer.send(new ProducerRecord<String, String>("read", clientId)); System.out.println("=========" + clientId); System.out.println(); } public volatile Producer<String, String> producer; public volatile KafkaConsumer<String, String> customer; // consumption method @Override public void RUN () { customer.subscribe(Arrays.asList("read")); for (; ; ) { ConsumerRecords<String, String> records = customer.poll(100); for (ConsumerRecord<String, String> record : records) { System.out.printf ( "offset =% d,% S value =" , record.offset (), record.value ()); System.out.println(); } } } public Producer<String, String> producer() { Properties properties = new Properties(); properties.put ( "bootstrap.servers", "10.10.210.123:9092" ); // producers need server after receiving the data, to send a signal to acknowledge receipt // 0 Producer does not need to wait for any acknowledgment message throughput the highest amount // 1 means that at least wait leader has successfully write data to a local log, does not mean all that has been written follower // All means leader needs to wait for all backups are successfully written to the log properties.put ( "ACKs", "0" ); properties.put ( "retries", 0); // number of retries properties.put ( "batch.size", 16384); // Producer attempt to record a message batch processing. The purpose is to reduce the number of requests, improve performance between client and server. This configuration is the number of bytes of a control message batch processing. If set to 0, disables batch. If set too large, will take up memory space. Properties.put ( "linger.ms", 1 ); properties.put("buffer.memory", 33554432);// 缓存大小 properties.put("key.serializer", "org.apache.kafka.common.serialization.StringSerializer"); properties.put("value.serializer", "org.apache.kafka.common.serialization.StringSerializer"); Producer<String, String> producer = null; producer = new KafkaProducer<String, String>(properties); return producer; } public KafkaConsumer<String, String> customer() { Properties properties = new Properties(); properties.put("bootstrap.servers", "10.10.210.123:9092"); properties.put("group.id", "read"); properties.put ( "enable.auto.commit", "to true"); // offset Consumer received message will be automatically synchronized to ZooKeeper properties.put ( "auto.commit.interval.ms", "1000" ); // consumer commit frequency offset to the zookeeper, in milliseconds properties.put ( "auto.offset.reset", "Earliest"); // if zk no offset from the initial position on the record start spending properties.put ( "session.timeout.ms", "30000" ); properties.put("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer"); properties.put("value.deserializer", "org.apache.kafka.common.serialization.StringDeserializer"); KafkaConsumer<String, String> kafkaConsumer = new KafkaConsumer<>(properties); return kafkaConsumer; } }
<dependency> <groupId>org.apache.kafka</groupId> <artifactId>kafka-clients</artifactId> <version>0.10.2.1</version> </dependency>