拦截器介绍
- Producer拦截器(interceptor)是在Kafka 0.10版本被引入的,主要用于实现clients端的定制化控制逻辑。
- Intercetpor的接口是org.apache.kafka.clients.producer.ProducerInterceptor,其定义的方法包括:
- 1.configure(configs)
- 2.onSend(ProducerRecord)
- 该方法封装进KafkaProducer.send方法中,即它运行在用户主线程中。
Producer确保在消息被序列化以及计算分区前调用该方法。
- 用户可以在该方法中对消息做任何操作,但最好保证不要修改消息所属的topic和分区,否则会影响目标分区的计算
- 3.onAcknowledgement(RecordMetadata, Exception)
该方法会在消息被应答或消息发送失败时调用
,并且通常都是在producer回调逻辑触发之前。onAcknowledgement运行在producer的IO线程中,因此不要在该方法中放入很重的逻辑,否则会拖慢producer的消息发送效率
- 4.close:
- 关闭interceptor,
主要用于执行一些资源清理工作
- 如前所述,interceptor可能被运行在多个线程中,因此在具体实现时用户需要自行确保线程安全。另外倘若指定了多个interceptor,则producer将按照指定顺序调用它们,并仅仅是捕获每个interceptor可能抛出的异常记录到错误日志中而非在向上传递。这在使用过程中要特别留意。
需求分析
- 实现一个简单的双interceptor组成的拦截链。
- 第1个interceptor将消息中的手机号打码,如13888888888–> 138****8888
- 第2个interceptor在消息发送后统计成功发送消息数或失败发送消息数
代码实现
拦截器1
package cn.hanjiaxiaozhi.interceptor;
import org.apache.kafka.clients.producer.ProducerInterceptor;
import org.apache.kafka.clients.producer.ProducerRecord;
import org.apache.kafka.clients.producer.RecordMetadata;
import java.util.Map;
public class MyInterceptor1_Mosaic implements ProducerInterceptor<String,String> {
@Override
public ProducerRecord<String, String> onSend(ProducerRecord<String, String> record) {
String phoneNum = record.value().toString();
String mosaicNum = phoneNum.substring(0, 3) + "****" + phoneNum.substring(7);
return new ProducerRecord(record.topic(),
record.partition(),
record.timestamp(),
record.key(),
mosaicNum);
}
@Override
public void onAcknowledgement(RecordMetadata metadata, Exception exception) {
}
@Override
public void close() {
}
@Override
public void configure(Map<String, ?> configs) {
}
}
拦截器2
package cn.hanjiaxiaozhi.interceptor;
import org.apache.kafka.clients.producer.ProducerInterceptor;
import org.apache.kafka.clients.producer.ProducerRecord;
import org.apache.kafka.clients.producer.RecordMetadata;
import java.util.Map;
public class MyInterceptor2_Statistics implements ProducerInterceptor<String,String> {
private int successCount = 0;
private int failCount = 0;
@Override
public ProducerRecord<String, String> onSend(ProducerRecord<String, String> record) {
return record;
}
@Override
public void onAcknowledgement(RecordMetadata metadata, Exception exception) {
if(exception==null){
successCount++;
}else{
failCount++;
}
}
@Override
public void close() {
System.out.println("发送消息成功"+successCount+"次");
System.out.println("发送消息失败"+failCount+"次");
}
@Override
public void configure(Map<String, ?> configs) {
}
}
测试-生产者
package cn.hanjiaxiaozhi.producer;
import org.apache.kafka.clients.producer.KafkaProducer;
import org.apache.kafka.clients.producer.ProducerConfig;
import org.apache.kafka.clients.producer.ProducerRecord;
import org.apache.kafka.clients.producer.RecordMetadata;
import java.util.Arrays;
import java.util.Properties;
public class MyKafkaProducer {
public static void main(String[] args) throws Exception {
Properties props = new Properties();
props.setProperty(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG,"node01:9092");
props.setProperty("acks","all");
props.setProperty("retries","2");
props.setProperty("retries.backoff.ms","20");
props.setProperty("buffer.memory","10240000");
props.setProperty("batch.size","10240");
props.setProperty("linger.ms","25");
props.setProperty("max.request.size","102400");
props.put("key.serializer", "org.apache.kafka.common.serialization.StringSerializer");
props.put("value.serializer", "org.apache.kafka.common.serialization.StringSerializer");
props.put("interceptor.classes", Arrays.asList(
"cn.hanjiaxiaozhi.interceptor.MyInterceptor1_Mosaic",
"cn.hanjiaxiaozhi.interceptor.MyInterceptor2_Statistics"
));
KafkaProducer<String, String> producer = new KafkaProducer<>(props);
for (int i = 0; i < 10; i++){
ProducerRecord<String, String> record = new ProducerRecord<>("test_topic", "key_"+i, "138888888" + i);
RecordMetadata metadata = producer.send(record).get();
System.out.println("同步发送后获得分区编号和offset :"+metadata.partition() + "---" + metadata.offset());
}
producer.close();
}
}
测试消费者
package cn.hanjiaxiaozhi.consumer;
import org.apache.kafka.clients.CommonClientConfigs;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.clients.consumer.ConsumerRecords;
import org.apache.kafka.clients.consumer.KafkaConsumer;
import java.util.Arrays;
import java.util.Properties;
public class MyKafkaConsumerAutoCommit {
public static void main(String[] args) {
Properties props = new Properties();
props.put(CommonClientConfigs.BOOTSTRAP_SERVERS_CONFIG,"node01:9092");
props.put("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
props.put("value.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
props.put("enable.auto.commit","true");
props.put("auto.commit.interval.ms","1000");
props.put("auto.offset.reset","latest ");
props.put("group.id","myconsumer");
KafkaConsumer<String, String> consumer = new KafkaConsumer<>(props);
consumer.subscribe(Arrays.asList("test_topic"));
while (true){
ConsumerRecords<String, String> records = consumer.poll(100);
for (ConsumerRecord<String, String> record : records) {
System.out.println("分区:"+record.partition()+" 偏移量:"+record.offset()+" key:"+record.key()+" value:"+record.value());
}
}
}
}