In the actual business, particularly in relation to specified task is completed, a corresponding message if the task is completed consumption, simply specify the topic of consumption, automatically assigned by kafka partition has been unable to meet our actual needs, and then we need to be specified partition production and consumption. Without further ado, let producers and consumers through a detailed description of the configuration code.
producer Code
Note: producer code we need two classes, designated partitioner of a class when a class is a real producer.
Specifies the partitioner class
package com.wshare.common;
import kafka.javaapi.producer.Producer;
import kafka.producer.KeyedMessage;
import kafka.producer.ProducerConfig;
import java.util.Properties;
import java.util.concurrent.BlockingQueue;
import java.util.concurrent.LinkedBlockingDeque;
/**
* Created by flyfy on 2017/11/15.
*/
public class KafkaPartitionerProducerUtil {
private final Producer<String, String> producer;
private final String topic = "test";
private final String kafkaQueueHost = "1.1.1.1";
private final int kafkaQueuePort = 9092;
private Properties props = new Properties();
private BlockingQueue<KeyedMessage<String, String>> blockingQueue = new LinkedBlockingDeque<KeyedMessage<String, String>>();
public KafkaPartitionerProducerUtil() {
props.put("serializer.class", "kafka.serializer.StringEncoder");
props.put("metadata.broker.list", kafkaQueueHost + ":" + kafkaQueuePort);
props.put("partitioner.class","com.wshare.common.CidPartitioner");
this.producer = new Producer<String, String>(new ProducerConfig(props));
}
public void send(String key,String msg) {
producer.send(new KeyedMessage<String, String>(topic,key,msg));
}
}
producer类
package com.wshare.common;
import kafka.javaapi.producer.Producer;
import kafka.producer.KeyedMessage;
import kafka.producer.ProducerConfig;
import java.util.Properties;
import java.util.concurrent.BlockingQueue;
import java.util.concurrent.LinkedBlockingDeque;
/**
* Created by flyfy on 2017/11/15.
*/
public class KafkaPartitionerProducerUtil {
private final Producer<String, String> producer;
private final String topic = "test";
private final String kafkaQueueHost = "1.1.1.1";
private final int kafkaQueuePort = 9092;
private Properties props = new Properties();
private BlockingQueue<KeyedMessage<String, String>> blockingQueue = new LinkedBlockingDeque<KeyedMessage<String, String>>();
public KafkaPartitionerProducerUtil() {
props.put("serializer.class", "kafka.serializer.StringEncoder");
props.put("metadata.broker.list", kafkaQueueHost + ":" + kafkaQueuePort);
props.put("partitioner.class","com.wshare.common.CidPartitioner");
this.producer = new Producer<String, String>(new ProducerConfig(props));
}
Send void public (String Key, String MSG) {
producer.send (new new KeyedMessage <String, String> (Topic, Key, MSG));
}
}
Note props.put ( "partitioner.class", "com.wshare.common .CidPartitioner ") which specifies the class we defined partitioner
consumer codes
package com.lyf.scandatatofile.Utils;
import kafka.cluster.Partition;
import kafka.consumer.ConsumerConfig;
import kafka.consumer.ConsumerIterator;
import kafka.consumer.KafkaStream;
import kafka.javaapi.consumer.ConsumerConnector;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.clients.consumer.ConsumerRecords;
import org.apache.kafka.clients.consumer.KafkaConsumer;
import org.apache.kafka.common.TopicPartition;
import java.io.File;
import java.io.FileWriter;
import java.text.SimpleDateFormat;
import java.util.*;
import java.util.concurrent.BlockingQueue;
/**
* Created by flyfy on 2017/10/7.
*/
public class KafkaConsumerUtil {
final String topic = "test";
final Properties props = new Properties();
public void execute(String groupid, BlockingQueue<String> bq) {
props.put("bootstrap.servers", "1.1.1.1:9092");
props.put("group.id", "test");
props.put("auto.offset.reset", "earliest");
props.put("enable.auto.commit", "true");
props.put("auto.commit.interval.ms", "1000");
props.put("session.timeout.ms", "30000");
props.put("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
props.put("value.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
KafkaConsumer<String, String> consumer = new KafkaConsumer<>(props);
//consumer.subscribe(Arrays.asList("IP_REGISTER_yangkai"));
consumer.assign(Arrays.asList(new TopicPartition("kafka.wshare.match_meta_data.topic",0)));
while (true) {
ConsumerRecords<String, String> records = consumer.poll(100);
for (ConsumerRecord<String, String> record : records)
System.out.println("offset"+record.offset()+"key"
+record.key()+"value"+record.value());
}
}
}
Consumers have a need to pay special attention to consumer.subscribe (Arrays.asList ( "IP_REGISTER_yangkai")) and consumer.assign (Arrays.asList (new TopicPartition ( "kafka.wshare.match_meta_data.topic", 0))) in terms of kafka are mutually exclusive, both can only specify one, the former designated consumption patterns for the specified topic, partition allocation policy specified by kafka, which specifies the consumption patterns for the specified partition, defined by the user partition allocation policy.