Kafka消息发布

package com.paile.kafka.service.impl;

import java.util.Properties;
import java.util.Random;

import kafka.javaapi.producer.Producer;
import kafka.producer.KeyedMessage;
import kafka.producer.ProducerConfig;

import org.apache.log4j.Logger;

import com.paile.kafka.bean.MessageBean;
import com.paile.kafka.service.IKafkaService;
import com.paile.utils.others.Const;

/***
 * kafka消息服务类
 * 
 * @author libo
 * 
 */
public class KafkaServiceImpl implements IKafkaService {

	private Logger logger = Logger.getLogger(KafkaServiceImpl.class);
	/***
	 * 发送一条消息
	 */
	public void sendSinglePartitionMessage(String broke_list, String topic, Object message)
			throws Exception {
			Producer<Integer, Object> producer = null;
			try {
				Properties props = new Properties();
				props.put("metadata.broker.list", broke_list);
				props.put("serializer.class", "com.paile.kafka.CustomEncoder");
				props.put("key.serializer.class", "kafka.serializer.StringEncoder");
				props.put("producer.type", "sync");//是否同步 sync:同步   async:异步
				props.put("request.required.acks", "1");
				producer = new Producer<Integer, Object>(new ProducerConfig(props));
				
				KeyedMessage<Integer, Object> data = new KeyedMessage<Integer, Object>(topic, Const.defaultPartitionKey, message);  
				producer.send(data);
			} catch (Exception e) {
				logger.error("发送消息到Kafka失败,", e);
				throw e;
			}finally{
				if(producer!=null)
					producer.close();
			}
	}
	/***
	 * 发送多分区消息
	 */
	public void sendMutilPartitionMessage(String broke_list,
			String topic,MessageBean message)
			throws Exception {
			Producer<String, MessageBean> producer = null;
			try {
				Properties props = new Properties();
				props.put("metadata.broker.list", broke_list);
				props.put("serializer.class", "com.paile.kafka.productor.CustomEncoder");
				props.put("key.serializer.class", "kafka.serializer.StringEncoder");
				props.put("producer.type", "async");//是否同步 sync:同步   async:异步
				props.put("partitioner.class", "com.paile.kafka.productor.SimplePartitioner");//分区算法类
				props.put("request.required.acks", "1");
				producer = new Producer<String, MessageBean>(new ProducerConfig(props));
				
				Random random = new Random();
				int partitionKey = random.nextInt(255);
				KeyedMessage<String, MessageBean> data = new KeyedMessage<String, MessageBean>(topic, 
						String.valueOf(partitionKey), message);  
				producer.send(data);
			} catch (Throwable e) {
				logger.error("发送消息到Kafka失败,", e);
				System.out.println(e.getMessage());
				throw new Exception(e.getMessage());
			}finally{
				if(producer!=null)
					producer.close();
			}
	}
	/***
	 * 接收消息
	 */
	@Override
	public void startConsumer(String zookeeperConnect, String groupId, String topic,int threads)
			throws Exception {
        GroupConsumerManager example = new GroupConsumerManager(zookeeperConnect, groupId, topic);
        try {
			example.run(threads);
		} catch (Exception e) {
			e.printStackTrace();
		}
        try {
            Thread.sleep(10000);
        } catch (InterruptedException ie) {
 
        }
        example.shutdown();
	}
	
	public static void main(String[] args){
		KafkaServiceImpl service = new KafkaServiceImpl();
		try {
				for(int i=0;i<10;i++){
					MessageBean bean = new MessageBean();
					bean.setId("00"+i);
					bean.setData("111111111111111");
					bean.setImg(new byte[0]);
					service.sendMutilPartitionMessage("192.168.1.101:9092", "paile01",bean);
				}
				System.out.println("");
		} catch (Exception e) {
			e.printStackTrace();
		}
	}

}

猜你喜欢

转载自littie1987.iteye.com/blog/2193132