background
In order to improve development efficiency, it is often used to test and understand kafka configuration.
rely
<dependency>
<groupId>org.apache.kafka</groupId>
<artifactId>kafka-clients</artifactId>
<version>3.2.0</version>
</dependency>
producer
package com.example.demo.mqtt;
import com.alibaba.fastjson.JSON;
import org.apache.kafka.clients.producer.*;
import org.apache.kafka.common.serialization.StringSerializer;
import java.util.Properties;
import java.util.concurrent.ExecutionException;
/**
* @date 2022/11/2 14:39
*/
public class KafkaProducerDemo {
public static void main(String[] args) {
//1.创建kafka生产者配置对象
Properties props = new Properties();
//2.给 kafka 配置对象添加配置信息:bootstrap.servers
props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, "192.168.1.111:9092");
//key,value 序列化(必须)
//把发送的key从字符串序列化为字节数组
props.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class.getName());
//把发送消息value从字符串序列化为字节数组
props.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, StringSerializer.class.getName());
// 3. 创建 kafka 生产者对象
Producer<String, String> producer = new KafkaProducer<>(props);
Order order = new Order((long) 1, 100);
ProducerRecord<String, String> producerRecord = new ProducerRecord<>("my-topic", JSON.toJSONString(order));
//4. 调用 send 方法,发送消息
RecordMetadata metadata = null;
try {
metadata = producer.send(producerRecord).get();
} catch (InterruptedException e) {
throw new RuntimeException(e);
} catch (ExecutionException e) {
throw new RuntimeException(e);
}
producer.close();
//=====阻塞=======
System.out.println("同步方式发送消息结果:" + "topic-" + metadata.topic() + "|partition-" + metadata.partition() + "|offset-" + metadata.offset());
}
}
consumer
package com.example.demo.mqtt;
import org.apache.kafka.clients.consumer.ConsumerConfig;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.clients.consumer.ConsumerRecords;
import org.apache.kafka.clients.consumer.KafkaConsumer;
import org.apache.kafka.common.TopicPartition;
import org.apache.kafka.common.serialization.StringDeserializer;
import java.time.Duration;
import java.util.ArrayList;
import java.util.List;
import java.util.Properties;
/**
* @date 2022/10/11 19:26
*/
public class KafkaConsumerDemo {
public static void main(String[] args) {
Properties properties = new Properties();
//borker地址
properties.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG,"192.168.1.111:9092");
//反序列化方式
properties.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class.getName());
properties.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG,StringDeserializer.class.getName());
//指定消费者组id,必须
properties.put(ConsumerConfig.GROUP_ID_CONFIG,"group11111");
//earliest:offset偏移至最早时候开始消费;latest:偏移到从最新开始消费(默认)
properties.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG,"earliest");
//每批次最小拉取数据大小,默认1byte
properties.put(ConsumerConfig.FETCH_MIN_BYTES_CONFIG,1);
//每批次最大拉取数据大小,默认50M
properties.put(ConsumerConfig.FETCH_MAX_BYTES_CONFIG,50 * 1024 * 1024);
//一批次数据,未达到最小数据大小时候,最大等待时间.默认500ms
properties.put(ConsumerConfig.FETCH_MAX_WAIT_MS_CONFIG,500);
//单次调用 poll() 返回的最大记录数,默认500
properties.put(ConsumerConfig.MAX_POLL_RECORDS_CONFIG,500);
KafkaConsumer kafkaConsumer = new KafkaConsumer(properties);
//A.设置要订阅的topic 列表
List<TopicPartition> topicPartitions = new ArrayList<>();
topicPartitions.add(new TopicPartition("my-topic",0));
kafkaConsumer.assign(topicPartitions);
while (true){
ConsumerRecords<String,String> records = kafkaConsumer.poll(Duration.ofSeconds(2L));
for (ConsumerRecord<String,String> record : records) {
System.out.println("消费数据。topic: "+record.topic()+"|partition:"+record.partition()+"|数据:"+record.value());
}
}
}
}