kafka 生产者和消费者实例

一、环境安装配置

1、去官网下载kafka

我下载的版本是 kafka_2.11-0.10.0.1.tgz,下面的实例也是基于该版本。

2、解压安装

tar -xzf kafka_2.11-0.10.0.1.tgz 
mv kafka_2.11-0.10.0.1    /root

3、修改配置文件

cd /root/kafka_2.11-0.10.0.1/config
cp server.properties server1.properties 
cp server.properties server2.properties 
cp server.properties server3.properties 
修改配置中的三个参数如下:

server1.properties

broker.id=1
listeners=PLAINTEXT://:9092
log.dirs=/tmp/kafka-logs-1
server2.properties

broker.id=2<pre name="code" class="plain"><span style="font-family: Arial, Helvetica, sans-serif;"></span><pre name="code" class="plain">listeners=PLAINTEXT://:9094

 log.dirs=/tmp/kafka-logs-2 
 
 
 
server3.properties

broker.id=3
listeners=PLAINTEXT://:9094
log.dirs=/tmp/kafka-logs-3
4、启动脚本kafkacluster编写

bin/zookeeper-server-start.sh config/zookeeper.properties &
sleep 3s
bin/kafka-server-start.sh config/server1.properties &
bin/kafka-server-start.sh config/server2.properties &
bin/kafka-server-start.sh config/server3.properties &
启动kafka集群只需要执行./kafkacluster

另外附上一个kill kafka脚本

#!/bin/sh
PIDS=$(ps ax | grep -i 'kafka\.Kafka' | grep java | grep -v grep | awk '{print $1}')

if [ -z "$PIDS" ]; then
    echo "No kafka server to stop"
else 
    for pid in $PIDS
    do  
        echo "$pid"
        kill -s 9 $pid
    done
fi

sleep 3s

PIDS=$(ps ax | grep java | grep -i QuorumPeerMain | grep -v grep | awk '{print $1}')

if [ -z "$PIDS" ]; then
    echo "No zookeeper server to stop"
    exit 1
else
    kill -s TERM $PIDS
fi
5、创建topic
bin/kafka-topics.sh --create --zookeeper localhost:2181 --replication-factor 3 --partitions 3 --topic test

二、代码编写

pom.xml

	<properties>
		<project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
		<spring.version>3.2.14.RELEASE</spring.version>
		<kafka.version>0.10.0.1</kafka.version>
	</properties>

	<dependencies>
		<dependency>
			<groupId>log4j</groupId>
			<artifactId>log4j</artifactId>
			<version>1.2.17</version>
		</dependency>
		<dependency>
			<groupId>org.slf4j</groupId>
			<artifactId>slf4j-log4j12</artifactId>
			<version>1.7.12</version>
		</dependency>
		<dependency>
			<groupId>org.slf4j</groupId>
			<artifactId>slf4j-api</artifactId>
			<version>1.7.12</version>
		</dependency>
		<dependency>
			<groupId>junit</groupId>
			<artifactId>junit</artifactId>
			<version>3.8.1</version>
			<scope>test</scope>
		</dependency>

		<dependency>
			<groupId>org.springframework</groupId>
			<artifactId>spring-context</artifactId>
			<version>${spring.version}</version>
		</dependency>


		<dependency>
			<groupId>org.apache.kafka</groupId>
			<artifactId>kafka_2.11</artifactId>
			<version>${kafka.version}</version>
			<scope>compile</scope>
			<exclusions>
				<exclusion>
					<artifactId>jmxri</artifactId>
					<groupId>com.sun.jmx</groupId>
				</exclusion>
				<exclusion>
					<artifactId>jms</artifactId>
					<groupId>javax.jms</groupId>
				</exclusion>
				<exclusion>
					<artifactId>jmxtools</artifactId>
					<groupId>com.sun.jdmk</groupId>
				</exclusion>
			</exclusions>
		</dependency>
		<dependency>
			<groupId>org.apache.kafka</groupId>
			<artifactId>kafka-clients</artifactId>
			<version>${kafka.version}</version>
		</dependency>
	</dependencies>

1、生产者

生产者代码ProducerTest.java

/*
 * @(#) ProducerTest.java 2016/04/28
 * 
 * Copyright 2016 snow.com, Inc. All rights reserved.
 */
package com.snow.kafka.producer;

import java.util.Properties;
import java.util.concurrent.TimeUnit;

import org.apache.kafka.clients.producer.Callback;
import org.apache.kafka.clients.producer.KafkaProducer;
import org.apache.kafka.clients.producer.Producer;
import org.apache.kafka.clients.producer.ProducerRecord;
import org.apache.kafka.clients.producer.RecordMetadata;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.stereotype.Component;

/**
 * kafka-clients apix
 * @author hzwanghuiqi
 * @version 2016/04/28
 */
@Component
public class ProducerTest {
    private static final Logger logger = LoggerFactory.getLogger(ProducerTest.class);

    public Properties createProperties() {
        Properties properties = new Properties();
        properties.put("bootstrap.servers", "localhost:9092,localhost:9093,localhost:9094");
        properties.put("acks", "all");
        properties.put("retries", 0); // 消息发送请求失败重试次数
        properties.put("batch.size", 2000);
        properties.put("linger.ms", 1); // 消息逗留在缓冲区的时间,等待更多的消息进入缓冲区一起发送,减少请求发送次数
        properties.put("buffer.memory", 33554432); // 内存缓冲区的总量
        // 如果发送到不同分区,并且不想采用默认的Utils.abs(key.hashCode) % numPartitions分区方式,则需要自己自定义分区逻辑
        properties.put("partitioner.class", "com.snow.kafka.producer.SimplePartitioner");
        properties.put("key.serializer", "org.apache.kafka.common.serialization.StringSerializer");
        properties.put("value.serializer", "org.apache.kafka.common.serialization.StringSerializer");
        return properties;
    }
    
    public void sendMessage() {
        Properties properties = createProperties();
        Producer<String, String> producer = new KafkaProducer<String, String>(properties);
        int i = 0;
        try {
            while (true) {
                TimeUnit.SECONDS.sleep(2);
                String key = Integer.toString(i);
                String value = "times: " + key;
                ProducerRecord<String, String> record = new ProducerRecord<String, String>("test", key, value);
                producer.send(record, new Callback() {

                    @Override
                    public void onCompletion(RecordMetadata metadata, Exception e) {
                        if (e != null) {
                            logger.warn("send record error {}", e);
                        }
                        logger.info("offset: {}, partition: {}", metadata.offset(), metadata.partition());
                    }
                });
                i++;
            }
        } catch (Exception e) {
            logger.warn("{}", e);
        } finally {
            producer.close();
        }

    }
}
生产者分区代码 SimplePartitioner.java
/*
 * @(#) SimplePartitioner.java 2016/04/28
 * 
 * Copyright 2016 snow.com, Inc. All rights reserved.
 */
package com.snow.kafka.producer;

import java.util.Map;

import org.apache.kafka.clients.producer.Partitioner;
import org.apache.kafka.common.Cluster;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

/**
 * @author hzwanghuiqi
 * @version 2016/04/28
 */
public class SimplePartitioner implements Partitioner {
    public static final Logger logger = LoggerFactory.getLogger(SimplePartitioner.class);


    @Override
    public int partition(String topic, Object key, byte[] keyBytes, Object value, byte[] valueBytes, Cluster cluster) {
        int partition = 0;
        int offset = Integer.valueOf((String) key);
        if (offset >= 0) {
            partition = Integer.valueOf((String) key) % cluster.partitionCountForTopic(topic);
            // logger.info("key {}, partition {}", key, partition);
        }
        return partition;
    }

    @Override
    public void close() {
        // TODO Auto-generated method stub

    }

    @Override
    public void configure(Map<String, ?> configs) {
        // TODO Auto-generated method stub

    }
}
主函数ProducerMainTest.java

/*
 * @(#) ProducerMainTest.java 2016/09/23
 * 
 * Copyright 2016 snow.com, Inc. All rights reserved.
 */
package com.snow.kafka.producer;

import org.springframework.context.ApplicationContext;
import org.springframework.context.support.ClassPathXmlApplicationContext;
import org.springframework.stereotype.Component;

/**
 * @author hzwanghuiqi
 * @version 2016/09/23
 */
@Component
public class ProducerMainTest {

    public static void main(String[] args) {
        ApplicationContext context = new ClassPathXmlApplicationContext(new String[] {"application-context.xml"});
        
        ProducerTest producerTest = context.getBean(ProducerTest.class);
        producerTest.sendMessage();
    }


}
2、消费者

消费者代码ConsumerTest.java

/*
 * @(#) ConsumerTest.java 2016/09/26
 * 
 * Copyright 2016 snow.com, Inc. All rights reserved.
 */
package com.snow.kafka.consumer;

import java.util.Arrays;
import java.util.Collections;
import java.util.List;
import java.util.Map;
import java.util.Map.Entry;
import java.util.Properties;

import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.clients.consumer.ConsumerRecords;
import org.apache.kafka.clients.consumer.KafkaConsumer;
import org.apache.kafka.clients.consumer.OffsetAndMetadata;
import org.apache.kafka.clients.consumer.OffsetCommitCallback;
import org.apache.kafka.common.TopicPartition;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.stereotype.Component;

/**
 * @author hzwanghuiqi
 * @version 2016/09/26
 */
@Component
public class ConsumerTest {
    private static final Logger logger = LoggerFactory.getLogger(ConsumerTest.class);

    // 在消费的时候生产者一定要处于运行状态,否则就会得不到数据,无法消费
    public Properties createSimpleProperties() {
        Properties properties = new Properties();
        properties.put("bootstrap.servers", "localhost:9092,localhost:9093,localhost:9094");
        properties.put("group.id", "test");
        properties.put("enable.auto.commit", "true");
        properties.put("auto.commit.interval.ms", "60000");
        properties.put("session.timeout.ms", "30000");
        properties.put("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
        properties.put("value.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
        return properties;
    }

    // 自动提交偏移量
    public void simpleConsumer() {
        Properties properties = createSimpleProperties();

        KafkaConsumer<String, String> consumer = new KafkaConsumer<>(properties);
        consumer.subscribe(Arrays.asList("test"));
        try {
            while (true) {
                ConsumerRecords<String, String> records = consumer.poll(10);
                for (ConsumerRecord<String, String> record : records) {
                    logger.info("partition = {}, offset = {}, key = {}, value = {}", record.partition(), record.offset(), record.key(),
                                record.value());
                }
            }
        } finally {
            consumer.close();
        }
    }


    public Properties createProperties() {
        Properties properties = new Properties();
        properties.put("bootstrap.servers", "localhost:9092,localhost:9093,localhost:9094");
        properties.put("group.id", "test");
        properties.put("enable.auto.commit", "false");
        properties.put("auto.commit.interval.ms", "60000");
        properties.put("session.timeout.ms", "30000");
        properties.put("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
        properties.put("value.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
        return properties;
    }

    /**
     * 手动控制偏移量
     */
    public void consumer() {
        Properties properties = createProperties();
        KafkaConsumer<String, String> consumer = new KafkaConsumer<String, String>(properties);
        consumer.subscribe(Arrays.asList("test"));
        try {
            while (true) {
                ConsumerRecords<String, String> records = consumer.poll(10);
                for (ConsumerRecord<String, String> record : records) {
                    logger.info("partition = {}, offset = {}, key = {}, value = {}", record.partition(), record.offset(), record.key(),
                                record.value());
                }
                if (!records.isEmpty()) {
                    // 异步提交offset
                    consumer.commitAsync(new OffsetCommitCallback() {

                        @Override
                        public void onComplete(Map<TopicPartition, OffsetAndMetadata> offsets, Exception exception) {
                            for (Entry<TopicPartition, OffsetAndMetadata> offset : offsets.entrySet()) {
                                logger.info("commit offset: partition = {}, offset = {}", offset.getKey(), offset.getValue().offset());
                            }
                        }
                    });
                }
            }
        } finally {
            consumer.close();
        }
    }

    /**
     * 手工精确控制每个分区的偏移量
     */
    public void consumerExactly() {
        Properties properties = createProperties();
        KafkaConsumer<String, String> consumer = new KafkaConsumer<String, String>(properties);
        consumer.subscribe(Arrays.asList("test"));
        try {
            while (true) {
                ConsumerRecords<String, String> records = consumer.poll(Long.MAX_VALUE);
                for (TopicPartition partition : records.partitions()) {
                    List<ConsumerRecord<String, String>> partitionRecords = records.records(partition);
                    for (ConsumerRecord<String, String> record : partitionRecords) {
                        logger.info("partition = {}, offset = {}, key = {}, value = {}", record.partition(), record.offset(), record.key(),
                                    record.value());
                    }
                    long lastOffset = partitionRecords.get(partitionRecords.size() - 1).offset();
                    consumer.commitSync(Collections.singletonMap(partition, new OffsetAndMetadata(lastOffset + 1)));
                }
            }
        } finally {
            consumer.close();
        }

    }

}
消费者主函数ConsumerMainTest.java

/*
 * @(#) ConsumerMainTest.java 2016/09/26
 * 
 * Copyright 2016 snow.com, Inc. All rights reserved.
 */
package com.snow.kafka.consumer;

import org.springframework.context.ApplicationContext;
import org.springframework.context.support.ClassPathXmlApplicationContext;
import org.springframework.stereotype.Component;

/**
 * @author hzwanghuiqi
 * @version 2016/09/26
 */
@Component
public class ConsumerMainTest {

    public static void main(String[] args) {
        ApplicationContext context = new ClassPathXmlApplicationContext(new String[] {"application-context.xml"});
        ConsumerTest consumerTest = context.getBean(ConsumerTest.class);
        // consumerTest.simpleConsumer();
        // consumerTest.consumer();
        consumerTest.consumerExactly();
    }

}
在消费者的函数中,simpleConsumer()函数是最经典的消费者函数样例,consumer()函数中采用了异步提交offset,consumerExactly()函数按照分区同步提交offset。

三、运行结果展示

在运行消费者的时候生产者必须处于运行状态,否则是无法消费消息的。

生产者执行结果示例:

2016-09-27 20:10:01,505  INFO ProducerTest:61 - offset: 0, partition: 0
2016-09-27 20:10:03,496  INFO ProducerTest:61 - offset: 0, partition: 1
2016-09-27 20:10:05,512  INFO ProducerTest:61 - offset: 0, partition: 2
2016-09-27 20:10:07,497  INFO ProducerTest:61 - offset: 1, partition: 0
2016-09-27 20:10:09,494  INFO ProducerTest:61 - offset: 1, partition: 1
2016-09-27 20:10:11,497  INFO ProducerTest:61 - offset: 1, partition: 2
2016-09-27 20:10:13,501  INFO ProducerTest:61 - offset: 2, partition: 0
2016-09-27 20:10:15,494  INFO ProducerTest:61 - offset: 2, partition: 1
2016-09-27 20:10:17,494  INFO ProducerTest:61 - offset: 2, partition: 2
2016-09-27 20:10:19,495  INFO ProducerTest:61 - offset: 3, partition: 0
2016-09-27 20:10:21,503  INFO ProducerTest:61 - offset: 3, partition: 1
2016-09-27 20:10:23,497  INFO ProducerTest:61 - offset: 3, partition: 2
2016-09-27 20:10:25,498  INFO ProducerTest:61 - offset: 4, partition: 0
2016-09-27 20:10:27,499  INFO ProducerTest:61 - offset: 4, partition: 1
2016-09-27 20:10:29,501  INFO ProducerTest:61 - offset: 4, partition: 2
2016-09-27 20:10:31,500  INFO ProducerTest:61 - offset: 5, partition: 0
2016-09-27 20:10:33,500  INFO ProducerTest:61 - offset: 5, partition: 1
2016-09-27 20:10:35,497  INFO ProducerTest:61 - offset: 5, partition: 2
2016-09-27 20:10:37,501  INFO ProducerTest:61 - offset: 6, partition: 0
消费者执行结果:

2016-09-27 20:10:13,507  INFO ConsumerTest:123 - partition = 0, offset = 2, key = 6, value = times: 6
2016-09-27 20:10:15,496  INFO ConsumerTest:123 - partition = 1, offset = 2, key = 7, value = times: 7
2016-09-27 20:10:17,494  INFO ConsumerTest:123 - partition = 2, offset = 2, key = 8, value = times: 8
2016-09-27 20:10:19,494  INFO ConsumerTest:123 - partition = 0, offset = 3, key = 9, value = times: 9
2016-09-27 20:10:21,503  INFO ConsumerTest:123 - partition = 1, offset = 3, key = 10, value = times: 10
2016-09-27 20:10:23,495  INFO ConsumerTest:123 - partition = 2, offset = 3, key = 11, value = times: 11
2016-09-27 20:10:25,500  INFO ConsumerTest:123 - partition = 0, offset = 4, key = 12, value = times: 12
2016-09-27 20:10:27,502  INFO ConsumerTest:123 - partition = 1, offset = 4, key = 13, value = times: 13
2016-09-27 20:10:29,496  INFO ConsumerTest:123 - partition = 2, offset = 4, key = 14, value = times: 14
2016-09-27 20:10:31,497  INFO ConsumerTest:123 - partition = 0, offset = 5, key = 15, value = times: 15
2016-09-27 20:10:33,502  INFO ConsumerTest:123 - partition = 1, offset = 5, key = 16, value = times: 16
2016-09-27 20:10:35,496  INFO ConsumerTest:123 - partition = 2, offset = 5, key = 17, value = times: 17
2016-09-27 20:10:37,496  INFO ConsumerTest:123 - partition = 0, offset = 6, key = 18, value = times: 18

kafka打包下载,包含自定义配置:kafka_2.11-0.10.0.1.tar.gz

完整代码下载地址:https://github.com/xueshui/kafka-test


参考文献:

kafka官方文档

kafka中文教程


猜你喜欢

转载自blog.csdn.net/wanghuiqi2008/article/details/52683915