springboot系列之整合kafka

版权声明:本文为博主原创文章,未经博主允许不得转载。 https://blog.csdn.net/qq_17522211/article/details/84619355

PS: 会持续更新springboot系列,希望大家多多支持. >_<
kafka版本间配置会存在一些差异,需依据实际版本进行适配.具体参数描述此处不做讲解.
此例为: kafka-clients-0.10.1.1
maven:

<dependency>
	<groupId>org.springframework.kafka</groupId>
    <artifactId>spring-kafka</artifactId>
    <version>1.0.6.RELEASE</version> <!-- 如果kafka版本为0.10,不需要添加, 如果为0.9,需要加上版本 -->
</dependency>

properties:

kafka.consumer.servers=集群地址,集群地址
kafka.consumer.enable.auto.commit=false
kafka.consumer.session.timeout=6000
kafka.consumer.auto.commit.interval=100
kafka.consumer.auto.offset.reset=earliest
kafka.consumer.topic=topic
kafka.consumer.group.id=groupid
kafka.consumer.concurrency=5

kafka.producer.servers=
kafka.producer.retries=0
kafka.producer.batch.size=4096
kafka.producer.linger=1
kafka.producer.buffer.memory=40960

kafka_消费配置:
KafkaConsumerConfig.java:

import org.apache.kafka.clients.consumer.ConsumerConfig;
import org.apache.kafka.common.serialization.StringDeserializer;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import org.springframework.kafka.annotation.EnableKafka;
import org.springframework.kafka.config.ConcurrentKafkaListenerContainerFactory;
import org.springframework.kafka.config.KafkaListenerContainerFactory;
import org.springframework.kafka.core.ConsumerFactory;
import org.springframework.kafka.core.DefaultKafkaConsumerFactory;
import org.springframework.kafka.listener.ConcurrentMessageListenerContainer;

import com.ky.rdpcs.comm.kafka.KafkaRec;

import java.util.HashMap;
import java.util.Map;

@Configuration
@EnableKafka
public class KafkaConsumerConfig {

	@Value("${kafka.consumer.servers}")
	private String servers;
	@Value("${kafka.consumer.enable.auto.commit}")
	private boolean enableAutoCommit;
	@Value("${kafka.consumer.session.timeout}")
	private String sessionTimeout;
	@Value("${kafka.consumer.auto.commit.interval}")
	private String autoCommitInterval;
	@Value("${kafka.consumer.group.id}")
	private String groupId;
	@Value("${kafka.consumer.auto.offset.reset}")
	private String autoOffsetReset;
	@Value("${kafka.consumer.concurrency}")
	private int concurrency;
	
	@Bean
	public KafkaListenerContainerFactory<ConcurrentMessageListenerContainer<String, String>> kafkaListenerContainerFactory() {
		ConcurrentKafkaListenerContainerFactory<String, String> factory = new ConcurrentKafkaListenerContainerFactory<>();
		factory.setConsumerFactory(consumerFactory());
		factory.setConcurrency(concurrency);
		factory.getContainerProperties().setPollTimeout(1500);
		return factory;
	}
	
	public ConsumerFactory<String, String> consumerFactory() {
		return new DefaultKafkaConsumerFactory<>(consumerConfigs());
	}

	public Map<String, Object> consumerConfigs() {
		 
		Map<String, Object> propsMap = new HashMap<>();
		propsMap.put("key.deserializer.encoding", "gb18030");
		propsMap.put("value.deserializer.encoding", "gb18030");
		propsMap.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, servers);
		propsMap.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, enableAutoCommit);
		propsMap.put(ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG, autoCommitInterval);
		propsMap.put(ConsumerConfig.SESSION_TIMEOUT_MS_CONFIG, sessionTimeout);
		propsMap.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, autoOffsetReset);
		propsMap.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
		propsMap.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
		propsMap.put(ConsumerConfig.GROUP_ID_CONFIG, groupId);
		propsMap.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, autoOffsetReset);
		propsMap.put(ConsumerConfig.RECEIVE_BUFFER_CONFIG, "33554432");
		propsMap.put(ConsumerConfig.FETCH_MIN_BYTES_CONFIG, "33554432");
		propsMap.put(ConsumerConfig.MAX_PARTITION_FETCH_BYTES_CONFIG, "33554432");
		propsMap.put(ConsumerConfig.MAX_POLL_RECORDS_CONFIG, "1000");
		return propsMap;
	}
	
	@Bean
	public KafkaRec listener() {
		return new KafkaRec();
	}

}

Rec.java

import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.kafka.annotation.KafkaListener;
import org.springframework.stereotype.Component;

/**
 * 接收kafka消息入口
 */
@Component
public class KafkaRec {
	/**
	 * 监听器自动执行该方法 消费消息 自动提交offset 执行业务代码 (high level api 不提供offset管理,不能指定offset进行消费)
	 * topic: 一类消息,如page view,click行为等。
	 * partition: topic的物理分组,每个partition都是一个有序队列。
	 * offset: 一个连续的用于定位被追加到分区的每一个消息的序列号,最大值为64位的long大小,19位数字字符长度。
	 * value: 消息体
	 */
	@KafkaListener(topics = {"topic"})
	public void onMessage(ConsumerRecord<String, String> record) {
		String topic = record.topic();
		String value = record.value();
		long offset = record.offset();
		int partition = record.partition();
		long startTime = System.currentTimeMillis();
	}

}

由于时间原因,此次先只写消费端,生产端后续更新.

猜你喜欢

转载自blog.csdn.net/qq_17522211/article/details/84619355