下载 Kafka
https://kafka.apache.org/downloads
解压后修改基本配置 config/server.properties
broker.id=1
# 日志存储路径
log.dirs=/data/kafka/logs
# 默认分区数
num.partitions=1
依次执行下面命令
pom
<dependency>
<groupId>org.apache.kafka</groupId>
<artifactId>kafka_2.11</artifactId>
<version>0.10.0.0</version>
</dependency>
原生api
生产者
public class MyProducer {
private static KafkaProducer<String, String> producer;
static {
//初始话连接
Properties properties = new Properties();
//kafka连接 端口ip
properties.put("bootstrap.servers", "127.0.0.1:9092");
//序列化key方式
properties.put("key.serializer", "org.apache.kafka.common.serialization.StringSerializer");
//序列化value方式
properties.put("value.serializer", "org.apache.kafka.common.serialization.StringSerializer");
producer = new KafkaProducer<>(properties);
}
/**
* 配置发送方式
*/
private static void sendMessageForgetResult() {
ProducerRecord<String, String> record = new ProducerRecord<>(
//topic,key,value
"imooc-kafka-study", "name", "ForgetResult"
);
producer.send(record);
producer.close();
}
public static void main(String[] args) throws Exception {
sendMessageForgetResult();
}
}
- 同步发送消息
private static void sendMessageSync() throws Exception {
ProducerRecord<String, String> record = new ProducerRecord<>(
"imooc-kafka-study", "name", "sync"
);
//返回的是future 用get获取结果
RecordMetadata result = producer.send(record).get();
System.out.println(result.topic());
//分区
System.out.println(result.partition());
//偏移量(每个消息都不一样)
System.out.println(result.offset());
producer.close();
}
- 异步发送消息 (一边只管发送,一边只管接收)
/**
* 定义返回消息接收类
*/
public class MyProducerCallback implements Callback {
@Override
public void onCompletion(RecordMetadata recordMetadata, Exception e) {
if (e != null) {
e.printStackTrace();
return;
}
System.out.println(recordMetadata.topic());
System.out.println(recordMetadata.partition());
System.out.println(recordMetadata.offset());
System.out.println("Coming in MyProducerCallback");
}
}
private static void sendMessageCallback() {
ProducerRecord<String, String> record = new ProducerRecord<>(
"imooc-kafka-study-x", "name", "callback"
);
producer.send(record, new MyProducerCallback());
producer.close();
}
自定义分区策略
策略类
public class CustomPartitioner implements Partitioner {
@Override
public int partition(String topic,
Object key, byte[] keyBytes,
Object value, byte[] valueBytes,
Cluster cluster) {
//获取所有分区
List<PartitionInfo> partitionInfos = cluster.partitionsForTopic(topic);
int numPartitions = partitionInfos.size();
//没有传入key,或者不是String类型,直接报错
if (null == keyBytes || !(key instanceof String)) {
throw new InvalidRecordException("kafka message must have key");
}
//分区数只有只有一个,只能全部放在一起
if (numPartitions == 1) {
return 0;
}
//name的key放在最后一个分区
if (key.equals("name")) {
return numPartitions - 1;
}
//获取key的hash值,然后取余。(相同key回分配到同一个分区内)
return Math.abs(Utils.murmur2(keyBytes)) % (numPartitions - 1);
}
@Override
public void close() {
}
@Override
public void configure(Map<String, ?> map) {
}
}
添加策略
public class MyProducer {
private static KafkaProducer<String, String> producer;
static {
//初始话连接
Properties properties = new Properties();
//kafka连接 端口ip
properties.put("bootstrap.servers", "127.0.0.1:9092");
//序列化key方式
properties.put("key.serializer", "org.apache.kafka.common.serialization.StringSerializer");
//序列化value方式
properties.put("value.serializer", "org.apache.kafka.common.serialization.StringSerializer");
//设置key分配器
properties.put("partitioner.class", "com.imooc.kafkastudy.CustomPartitioner");
producer = new KafkaProducer<>(properties);
}
}
消费者
理解:
- 每个消费组,会对生成的消息。同时的消费一次。
- 消费组内,每个组员会均衡的得到消费消息。
- 消费组内,partition消息,小于组员时,排在后面的组员得不到可消费的消息
自动提交位移
public class MyConsumer {
private static KafkaConsumer<String, String> consumer;
private static Properties properties;
static {
properties = new Properties();
properties.put("bootstrap.servers", "127.0.0.1:9092");
//反序列化
properties.put("key.deserializer",
"org.apache.kafka.common.serialization.StringDeserializer");
properties.put("value.deserializer",
"org.apache.kafka.common.serialization.StringDeserializer");
//指定消费者组KafkaStudy
properties.put("group.id", "KafkaStudy");
}
private static void generalConsumeMessageAutoCommit() {
//允许我们自动提交位移
properties.put("enable.auto.commit", true);
consumer = new KafkaConsumer<>(properties);
//指定之前发送消息的 topic (就是key,value前面那个参数)
consumer.subscribe(Collections.singleton("imooc-kafka-study-x"));
try {
while (true) {
boolean flag = true;
//超时时间
ConsumerRecords<String, String> records = consumer.poll(100);
for (ConsumerRecord<String, String> record : records) {
System.out.println(String.format(
"topic = %s, partition = %s, key = %s, value = %s",
record.topic(), record.partition(),
record.key(), record.value()
));
//done 代表消息停止接收消息
if (record.value().equals("done")) {
flag = false;
}
}
if (!flag) {
break;
}
}
} finally {
consumer.close();
}
}
public static void main(String[] args) {
generalConsumeMessageAutoCommit();
}
}
手动异步提交位移 commitAsync (比上面多了一个A)
@SuppressWarnings("all")
private static void mixSyncAndAsyncCommit() {
properties.put("auto.commit.offset", false);
consumer = new KafkaConsumer<>(properties);
consumer.subscribe(Collections.singletonList("imooc-kafka-study-x"));
try {
while (true) {
ConsumerRecords<String, String> records = consumer.poll(100);
for (ConsumerRecord<String, String> record : records) {
System.out.println(String.format(
"topic = %s, partition = %s, key = %s, " +
"value = %s",
record.topic(), record.partition(),
record.key(), record.value()
));
}
consumer.commitAsync();
}
} catch (Exception ex) {
System.out.println("commit async error: " + ex.getMessage());
} finally {
try {
consumer.commitSync();
} finally {
consumer.close();
}
}
}