kafka单线程

package com.zkdj.message.common;

/**
 * 公共常量
 * @author
 */
public class Constants {
	
	/**
	 * 配置文件属性
	 * @author 
	 */
	public static class Pks{
		//kafka地址信息
		public static final String KAFKA_SERVERS = "kafka.bootstrap.servers";
		public static final String KAFKA_CONSUMER_GROUP_ID = "kafka.consumer.group.id";
		public static final String KAFKA_TOPIC = "kafka.topic";
		
	}
	
}
package com.zkdj.kafka.common.config;

import java.io.IOException;
import java.io.InputStream;
import java.util.Properties;

import org.apache.log4j.Logger;

import com.zkdj.kafka.utils.EmptyUtils;


public class ConfigManager {
	private static final Logger _log = Logger.getRootLogger();
	private static final String DEFAULT_PATH = "config.properties";
	private static ConfigManager instance;
	
	private Properties config; 
	
	private ConfigManager(){
		config = new Properties();
		config(null);
	}
	
	public static ConfigManager getInstance(){
		if(instance == null){
			instance = new ConfigManager();
		}
		return instance;
	}
	
	/**
	 * 读取配置文件
	 * @param path 配置文件classpath路径下相对地址
	 * @return
	 */
	public void config(String path){
		path = EmptyUtils.isEmpty(path) ? DEFAULT_PATH : path;
		InputStream in = null;
		try {
			Properties tmp = new Properties();
			in = ConfigManager.class.getClassLoader().getResourceAsStream(path);
			tmp.load(in);
			config = tmp;
		} catch (Exception e) {
			_log.error("load config file [" + path + "] error -> " + e);
		} finally {
			if (in != null) {
				try {
					in.close();
				} catch (IOException e) {
					_log.error("close inStream error");
				}
			}
		}
	}
	
	/**
	 * 读取配置文件
	 * @param key 配置信息键
	 * @return 配置信息值
	 */
	public String get(String key){
		return config.getProperty(key);
	}
}
package com.zkdj.kafka.common.config;

import java.util.HashMap;
import java.util.Map;

import org.apache.kafka.clients.consumer.ConsumerConfig;
import org.apache.kafka.clients.producer.ProducerConfig;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import org.springframework.kafka.config.ConcurrentKafkaListenerContainerFactory;
import org.springframework.kafka.config.KafkaListenerContainerFactory;
import org.springframework.kafka.core.ConsumerFactory;
import org.springframework.kafka.core.DefaultKafkaConsumerFactory;
import org.springframework.kafka.core.DefaultKafkaProducerFactory;
import org.springframework.kafka.core.KafkaTemplate;
import org.springframework.kafka.core.ProducerFactory;
import org.springframework.kafka.listener.ConcurrentMessageListenerContainer;
import org.springframework.kafka.listener.ContainerProperties;
import com.zkdj.kafka.common.config.Constants.Pks;




/**
 * @Description: kafka配置类
 * @Author:XJ
 */
@Configuration
public class KafKaConfig {

	protected static final ConfigManager config = ConfigManager.getInstance();
     
    
    /**
     * @Description: 生产者的配置
     * @Author:
     * @return
     */
    public Map<String, Object>producerConfigs() {

        Map<String, Object>props = new HashMap<String, Object>();
        // 集群的服务器地址
        props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, config.get(Pks.KAFKA_SERVERS));
        //  消息缓存
        props.put(ProducerConfig.BUFFER_MEMORY_CONFIG, 40960);
        // 生产者空间不足时,send()被阻塞的时间,默认60s
        props.put(ProducerConfig.MAX_BLOCK_MS_CONFIG, 6000);
        // 生产者重试次数
        props.put(ProducerConfig.RETRIES_CONFIG, 0);
        // 指定ProducerBatch(消息累加器中BufferPool中的)可复用大小
        props.put(ProducerConfig.BATCH_SIZE_CONFIG,  4096);
        // 生产者会在ProducerBatch被填满或者等待超过LINGER_MS_CONFIG时发送
        props.put(ProducerConfig.LINGER_MS_CONFIG, 1);
        // key 和 value 的序列化
        props.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringSerializer");
        props.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG,
                "org.apache.kafka.common.serialization.StringSerializer");
        // 客户端id
        props.put(ProducerConfig.CLIENT_ID_CONFIG, "producer.client.id.topinfo");

        return props;
    }

    /**
     * @Description: 生产者工厂
     * @Author:
     * @return
     */
    @Bean
    public ProducerFactory<String, String>producerFactory() {
        return new DefaultKafkaProducerFactory<String, String>(producerConfigs());
    }

    /**
     * @Description: KafkaTemplate
     * @Author:SXJ
     * @return
     */
    @Bean
    public KafkaTemplate<String, String>kafkaTemplate() {
        return new KafkaTemplate<String, String>(producerFactory());
    }


    // ------------------------------------------------------------------------------------------------------------

    /**
     * @Description: 消费者配置
     * @Author:
     * @return
     */
    public Map<String, Object>consumerConfigs() {

        Map<String, Object>props = new HashMap<String, Object>();

        props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, config.get(Pks.KAFKA_SERVERS));
        // 消费者组
        props.put(ConsumerConfig.GROUP_ID_CONFIG, config.get(Pks.KAFKA_CONSUMER_GROUP_ID));
        // 自动位移提交
        props.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, false);
        // 自动位移提交间隔时间
        props.put(ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG, 100);
        // 消费组失效超时时间
        props.put(ConsumerConfig.SESSION_TIMEOUT_MS_CONFIG, 10000);
        // 位移丢失和位移越界后的恢复起始位置
        props.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "latest");
        // key 和 value 的反序列化
        props.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG,
                "org.apache.kafka.common.serialization.StringDeserializer");
        props.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG,
                "org.apache.kafka.common.serialization.StringDeserializer");
//        props.put(ConsumerConfig.MAX_POLL_RECORDS_CONFIG,10);

        return props;
    }

    /**
     * @Description: 消费者工厂
     * @Author:
     * @return
     */
    @Bean
    public ConsumerFactory<String, String>consumerFactory() {
        return new DefaultKafkaConsumerFactory<>(consumerConfigs());
    }

    /**
     * @Description: kafka 监听容器工厂
     * @Author:
     * @return
     */
    @Bean
    public KafkaListenerContainerFactory<ConcurrentMessageListenerContainer<String, String>>kafkaListenerContainerFactory() {

        ConcurrentKafkaListenerContainerFactory<String, String> factory = new ConcurrentKafkaListenerContainerFactory<>();
        // 设置消费者工厂
        factory.setConsumerFactory(consumerFactory());
;// 关闭ack自动提交偏移
        return factory;
    }

}
package com.zkdj.kafka.controller.kafka;

import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.kafka.core.KafkaTemplate;
import org.springframework.kafka.support.SendResult;
import org.springframework.util.concurrent.ListenableFuture;
import org.springframework.util.concurrent.ListenableFutureCallback;
import org.springframework.web.bind.annotation.RequestMapping;
import org.springframework.web.bind.annotation.RestController;

@RestController
@RequestMapping("/kafka")
	public class KafkaProducer {

	    @Autowired
	    private KafkaTemplate<String, String> kafkaTemplate;
	    
	    @RequestMapping("send")
	    public String send(String name) {
	        
	        ListenableFuture<SendResult<String, String>>  future = kafkaTemplate.send("topinfo", name);
	        
	        future.addCallback(new ListenableFutureCallback<SendResult<String, String>>() {

	            @Override
	            public void onSuccess(SendResult<String, String> result) {
//	                System.out.println("生产者-发送消息成功:" + result.toString());
	            }

	            @Override
	            public void onFailure(Throwable ex) {
	                System.out.println("生产者-发送消息失败:" + ex.getMessage());
	            }
	        });
	        
	        
	        return "test-ok";
	    }
	    
	}
package com.zkdj.kafka.controller.kafka;

import java.util.List;

import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.kafka.annotation.KafkaListener;
import org.springframework.kafka.support.Acknowledgment;
import org.springframework.stereotype.Component;

/**
 * @Description: kafka消费者
 * @Author:sxj
 */
@Component
public class KafkaConsumer {

    private final Logger logger = LoggerFactory.getLogger(KafkaConsumer.class);

    
    /**
     * @Description: 可以同时订阅多主题,只需按数组格式即可,也就是用“,”隔开
     * @param record
     */
    @KafkaListener(topics = { "topinfo" })
  public void receive(ConsumerRecord<?, ?> record) {

        logger.info("消费得到的消息---key: " + record.key());
        logger.info("消费得到的消息---value: " + record.value().toString());
    	
    	
        
    }

}

猜你喜欢

转载自blog.csdn.net/My_SweetXue/article/details/109726196
今日推荐