基于多线程应用的高级消费者(kafka)

版权声明:本文为博主原创文章,未经博主允许不得转载。 https://blog.csdn.net/change_on/article/details/88532604

从Kafka读取消息的逻辑并不关心处理消息偏移量,它只需要数据。因此,高级消费者可以从Kafka中抽象出消费事件的大部分细节。

线程模型围绕主题的分区数,有一些具体的规则:

  • 如果线程多于主题上的分区,那么有些线程将永远不会看到消息

  • 如果分区比线程多,一些线程将从多个分区接收数据

  • 如果每个线程有多个分区,则无法保证接收消息的顺序,除非在分区中偏移量是顺序的。例如,从分区1接收到5条消息,从分区2接收到6条消息,然后从分区1接收到5条消息,然后从分区1接收到5条消息,即使分区2有可用的数据

  • 添加更多的进程/线程将导致Kafka重新平衡,可能会改变分配给线程的分区

思路:

先从Kafka获得一个迭代器,如果没有可用的新消息,这个迭代器可能会阻塞。

  • 首先,我们创建一个map,它告诉Kafka我们为哪些主题提供了多少线程、消费者。createMessageStreams是我们将信息传递给Kafka的方式。返回的是一张KafkaStream map,用于监听每个主题。(注意,这里我们只要求Kafka提供一个主题,但是我们可以通过向map添加另一个元素来要求多个主题。)
  • 然后创建线程池,并将一个新的ConsumerTest对象作为业务逻辑传递给每个线程
  • 最后,清理关机和错误处理

ConsumerTest

import kafka.consumer.ConsumerIterator;
import kafka.consumer.KafkaStream;
 
public class ConsumerTest implements Runnable {
    private KafkaStream m_stream;
    private int m_threadNumber;
 
    public ConsumerTest(KafkaStream a_stream, int a_threadNumber) {
        m_threadNumber = a_threadNumber;
        m_stream = a_stream;
    }
 
    public void run() {
        ConsumerIterator<byte[], byte[]> it = m_stream.iterator();
        while (it.hasNext())
            System.out.println("Consumer Thread " + m_threadNumber + ": " + new String(it.next().message()));
        System.out.println("Consumer Shutting down Thread: " + m_threadNumber);
    }
}

ConsumerGroupExample

import kafka.consumer.ConsumerConfig;
import kafka.consumer.KafkaStream;
import kafka.javaapi.consumer.ConsumerConnector;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Properties;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.TimeUnit;
 
public class ConsumerGroupExample {
    private final ConsumerConnector consumer;
    private final String topic;
    private  ExecutorService executor;
 
    public ConsumerGroupExample(String a_zookeeper, String a_groupId, String a_topic) {
        consumer = kafka.consumer.Consumer.createJavaConsumerConnector(
                createConsumerConfig(a_zookeeper, a_groupId));
        this.topic = a_topic;
    }
 
    public void shutdown() {
        if (consumer != null) consumer.shutdown();
        if (executor != null) executor.shutdown();
        try {
            if (!executor.awaitTermination(5000, TimeUnit.MILLISECONDS)) {
                System.out.println("Consumer Timed out waiting for consumer threads to shut down, exiting uncleanly");
            }
        } catch (InterruptedException e) {
            System.out.println("Consumer Interrupted during shutdown, exiting uncleanly");
        }
   }
 
    public void run(int a_numThreads) {
        Map<String, Integer> topicCountMap = new HashMap<String, Integer>();
        topicCountMap.put(topic, new Integer(a_numThreads));
        Map<String, List<KafkaStream<byte[], byte[]>>> consumerMap = consumer.createMessageStreams(topicCountMap);
        List<KafkaStream<byte[], byte[]>> streams = consumerMap.get(topic);
 
        // now launch all the threads
        //
        executor = Executors.newFixedThreadPool(a_numThreads);
 
        // now create an object to consume the messages
        //
        int threadNumber = 0;
        for (final KafkaStream stream : streams) {
            executor.submit(new ConsumerTest(stream, threadNumber));
            threadNumber++;
        }
    }
 
    private static ConsumerConfig createConsumerConfig(String a_zookeeper, String a_groupId) {
        Properties props = new Properties();
        props.put("zookeeper.connect", a_zookeeper);
        props.put("group.id", a_groupId);
        props.put("zookeeper.session.timeout.ms", "400");
        props.put("zookeeper.sync.time.ms", "200");
        props.put("auto.commit.interval.ms", "1000");
        return new ConsumerConfig(props);
    }
 
    public static void main(String[] args) {
        String zooKeeper = "192.168.1.41:2181";
        String groupId = "comsumer";
        String topic = "topic_test_consumer";
        int threads = 5;
 
        ConsumerGroupExample example = new ConsumerGroupExample(zooKeeper, groupId, topic);
        example.run(threads);
 
        try {
            Thread.sleep(10000);
        } catch (InterruptedException ie) {
 
        }
        example.shutdown();
    }
}

ProduceTest

import java.util.Properties;

import org.apache.kafka.clients.producer.KafkaProducer;
import org.apache.kafka.clients.producer.ProducerRecord;

import kafka.consumer.ConsumerIterator;
import kafka.consumer.KafkaStream;
 
public class ProduceTest{
	
	private static KafkaProducer<String, String> producer;
    private final static String TOPIC = "topic_test_consumer";
    
    public ProduceTest(){
        Properties props = new Properties();
        props.put("bootstrap.servers", "192.168.1.41:9092");
        props.put("acks", "all");
        props.put("retries", 0);
        props.put("batch.size", 16384);
        props.put("linger.ms", 1);
        props.put("buffer.memory", 33554432);
        props.put("key.serializer", "org.apache.kafka.common.serialization.StringSerializer");
        props.put("value.serializer", "org.apache.kafka.common.serialization.StringSerializer");
        //设置分区类,根据key进行数据分区
        producer = new KafkaProducer<String, String>(props);
    }
    
    public void produce(){
        for (int i = 1;i<1001;i++){
            String key = String.valueOf(i);
            String data = "produce kafka message:"+key;
            producer.send(new ProducerRecord<String, String>(TOPIC,key,data));
            System.out.println(data);
        }
        producer.close();
    } 

    public static void main(String[] args) {
        new ProduceTest().produce();
    }
}

猜你喜欢

转载自blog.csdn.net/change_on/article/details/88532604
今日推荐