kafka 消费者(老版本)源码开篇demo

版权声明:本文为博主原创文章,未经博主允许不得转载。 https://blog.csdn.net/AngeloWolf/article/details/82832327

老版本消费者案例:

指定主题以及消费线程个数。

每个主题下面对应一个多个消息流,每个消息流供一个线程消费====》<String,List<KafkaStream>>

所以消费者消费消息的时候必须指定主题和线程个数多少个线程就是多少个消息流

 1、创建kafka消费者连接器,(目前只有一种连接器),

 2、利用消费者连接器创建kafka消息流

 3、迭代消费消息

package kafka.old.consumer;
import kafka.consumer.ConsumerConfig;
import kafka.consumer.ConsumerIterator;
import kafka.consumer.KafkaStream;
import kafka.javaapi.consumer.ConsumerConnector;

import java.util.*;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;

public class HighLevelConsumerExample {
    private final ConsumerConnector consumer;
    private final String topic;
    private  ExecutorService executor;

    public HighLevelConsumerExample(String a_zookeeper, String a_groupId, String a_topic) {
        consumer = kafka.consumer.Consumer.createJavaConsumerConnector( // 创建Connector,注意下面对conf的配置
                createConsumerConfig(a_zookeeper, a_groupId));
        this.topic = a_topic;
    }

    public void shutdown() {
        if (consumer != null) consumer.shutdown();
        if (executor != null) executor.shutdown();
    }

    public void run(int a_numThreads) { // 创建并发的consumers
        Map<String, Integer> topicCountMap = new HashMap<String, Integer>();
        topicCountMap.put(topic, new Integer(a_numThreads)); // 描述读取哪个topic,需要几个线程读
        Map<String, List<KafkaStream<byte[], byte[]>>> consumerMap = consumer.createMessageStreams(topicCountMap); // 创建Streams

        List<KafkaStream<byte[], byte[]>> streams = consumerMap.get(topic); // 每个线程对应于一个KafkaStream

        // now launch all the threads
        executor = Executors.newFixedThreadPool(a_numThreads);

        // now create an object to consume the messages
        int threadNumber = 0;
        for (final KafkaStream stream : streams) {
            executor.submit(new ConsumerTest(stream, threadNumber)); // 启动consumer thread
            threadNumber++;
        }
    }

    private static ConsumerConfig createConsumerConfig(String a_zookeeper, String a_groupId) {
        Properties props = new Properties();
        props.put("zookeeper.connect", a_zookeeper);
        props.put("group.id", a_groupId);
        props.put("zookeeper.session.timeout.ms", "400");
        props.put("zookeeper.sync.time.ms", "200");
        props.put("auto.commit.interval.ms", "1000");

        return new ConsumerConfig(props);
    }

    public static void main(String[] args) {
        args = new String[]{"localhost:2181","test-group","test","10"};

        String zooKeeper = args[0];
        String groupId = args[1];
        String topic = args[2];
        int threads = Integer.parseInt(args[3]);

        HighLevelConsumerExample example = new HighLevelConsumerExample(zooKeeper, groupId, topic);
        example.run(threads);

        try {
            Thread.sleep(100000);
        } catch (InterruptedException ie) {
        }
        example.shutdown();
    }

    class ConsumerTest implements Runnable {
        private KafkaStream m_stream;
        private int m_threadNumber;

        public ConsumerTest(KafkaStream a_stream, int a_threadNumber) {
            m_threadNumber = a_threadNumber;
            m_stream = a_stream;
        }

        public void run() {
            ConsumerIterator<byte[], byte[]> it = m_stream.iterator();
            while (it.hasNext())
                System.out.println("["+new Date()+"]"+"Thread " + m_threadNumber + ": " + new String(it.next().message()));
            System.out.println("Shutting down Thread: " + m_threadNumber);
        }
    }
}

后续步骤开始分析kafka源码,kafka版本主要0.10.2.1。

猜你喜欢

转载自blog.csdn.net/AngeloWolf/article/details/82832327