KAFKA生产者消费者API学习

前期准备

  • 启动一个broker(可以启动多个,这次测试只启动一个)
> bin/kafka-server-start.sh config/server.properties & 
  • 创建一个topic
> bin/kafka-topics.sh --create --zookeeper localhost:2181 
     --replication-factor 1 --partitions 1 --topic test
  • 保险起见,查看一下刚刚创建的topic
> bin/kafka-topics.sh --describe --zookeeper localhost:2181 --topic test

创建项目

  • 为了方便管理,使用maven创建管理我们的项目
修改maven模式中的核心配置文件pom.xml:
<dependency>
    <groupId>org.apache.kafka</groupId>
    <artifactId>kafka-clients</artifactId>
    <version>0.10.1.0</version>
</dependency>

生产者客户端

kafka生产者客户端API:http://orchome.com/303
通过阅读生产者API,得知其核心内容是创建生产者,并使用生产者发送消息到主题中。

// 通过Properties类设置生产者属性,然后创建生产者
 Properties props = new Properties();
 props.put("bootstrap.servers", "localhost:9092");
 props.put("acks", "all");
 props.put("retries", 0);
 props.put("batch.size", 16384);
 props.put("linger.ms", 1);
 props.put("buffer.memory", 33554432);
 props.put("key.serializer", "org.apache.kafka.common.serialization.StringSerializer");
 props.put("value.serializer", "org.apache.kafka.common.serialization.StringSerializer");
 Producer<String, String> producer = new KafkaProducer<>(props);
//send()发送消息
ProducerRecord<K,V> record = new ProducerRecord<>(String topic, K key, V value);
public Future<RecordMetadata> send(ProducerRecord<K,V> record,Callback callback)
  • 总体实现代码
import java.util.Properties;
import java.util.concurrent.TimeUnit;

import org.apache.kafka.clients.producer.KafkaProducer;
import org.apache.kafka.clients.producer.Producer;
import org.apache.kafka.clients.producer.ProducerRecord;

/*
 * write message into topic test
 */

public class kafkaProducer extends Thread {

    private String topic;

    // Constructor
    public kafkaProducer(String topic) {
        super();
        this.topic = topic;
    }

    // 新的生产者是线程安全的,在线程之间共享单个生产者实例,通常单例比多个实例要快
    // 可以在日后考虑采用单例模式进行改造,初步使用private方法
    private Producer<String, String> createProducer() {
        // 通过Properties类设置Producer的属性
        Properties properties = new Properties();
        properties.put("bootstrap.servers", "localhost:9092");
        properties.put("acks", "all");
        properties.put("retries", 0);
        properties.put("batch.size", 16384);
        properties.put("linger.ms", 1);
        properties.put("buffer.memory", 33554432);
        properties.put("key.serializer", "org.apache.kafka.common.serialization.StringSerializer");
        properties.put("value.serializer", "org.apache.kafka.common.serialization.StringSerializer");

        return new KafkaProducer<String, String>(properties);
    }

    @Override
    public void run() {
        Producer<String, String> producer = createProducer();
        for (int i = 0; i < 10; i++) {
            producer.send(new ProducerRecord<String, String>(this.topic, "times", Integer.toString(i)));
        }
        try {
            TimeUnit.SECONDS.sleep(1);
        } catch (InterruptedException e) {
            // TODO: handle exception
            e.printStackTrace();
        }
    }

    public static void main(String[] args) {
        new kafkaProducer("test").run();
    }
}

消费者客户端

  • 偏移量(offset)
    偏移量是主题分区中一条消息的唯一标识符,对于消费者而言始终指向下一个待访问消息。偏移量可以自动提交也可以消费者手动控制。

  • 消费者组
    组中包含多个消费进程,通过进程池瓜分消费和处理消息的工作。
    每个消费进程通过subscribe API订阅一个主题列表,并和组内进程平衡主题分区。
    消费组中的成员动态维护,不论什么原因增加减少都会重新平衡分配。

  • 简单示例

import java.util.Arrays;
import java.util.Properties;

import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.clients.consumer.ConsumerRecords;
import org.apache.kafka.clients.consumer.KafkaConsumer;

public class kafkaConsumer extends Thread{

    private String topic;

    public kafkaConsumer(String topic) {
        this.topic = topic;
    }

    private KafkaConsumer<String, String> createConsumer() {
        Properties properties = new Properties();
        //指定一个或多个broker,可自动集群中其余broker
        properties.put("bootstrap.servers", "localhost:9092");
        //设置消费者组
        properties.put("group.id", "group-test");
        //设置自动提交offset
        properties.put("enable.auto.commit", "true");
        //设置自动提交频率间隔
        properties.put("auto.commit.interval.ms", "1000");
        //deserializer用于将byte转换成Object,StringDeserializer是一个String解析器
        properties.put("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
        properties.put("value.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
        return new KafkaConsumer<String, String>(properties);
    }

    @Override
    public void run() {
        KafkaConsumer<String, String> consumer = createConsumer();
        //设置订阅的主题列表
        consumer.subscribe(Arrays.asList(this.topic));
        while(true) {
        //poll()方法持续接收消息,获得一个消息Map
        //timeout参数:等待可用消息的时间ms
        ConsumerRecords<String, String> records = consumer.poll(2000);
        for(ConsumerRecord<String, String> record : records){
        System.out.println(record.offset() + "  " + record.key() + "  " + record.value());
            }           
        }
    }

    public static void main(String[] args) {
        new kafkaConsumer("test").start();
    }

猜你喜欢

转载自blog.csdn.net/baidu_22153679/article/details/77679743