kafka pause and resume Consumer Consumption

// Pause kafka consumer suspend distribution partition 
consumer.unsubscribe (); // here will not unsubscribe pause too long subscription time-out error of
consumer.pause (consumer.assignment ());



// re-partition consumption, this not go wrong at the re-allocation
this.open(null,null,null);
    IF (Consumer == null) { 
the Properties The props = new new the Properties ();
props.put ( "bootstrap.servers", PropertiesUtil.getValue ( "bootstrap.servers"));
// consumer group ID
props.put ( " the group.id ", constant.kafka_groupName); // Spider2
props.put (" enable.auto.commit "," to false ");
// max.poll.interval.ms (official website to get the default is 3000) mean It is, when we poll the message from kafkaServer end, the maximum delay between the call to poll () is.
// This provides an upper limit of the amount of time before consumers get more records can be idle. If the timeout expires before this poll was not called (), the user is considered a failure, and consumer
//'s group will be re-balanced so that the partition will be reassigned to other consumers, which is exactly where we set up Thread.sleep (6000) > max.poll.interval.ms value,
// that is when we submitted manually, in fact, the partition has been assigned to the entire consumer group inside the other consumer
props.put ( "auto.commit.interval. ms "," 3000 ");

props.put("session.timeout.ms", "100000");
props.put("request.timeout.ms", "200000");
props.put("max.poll.records", "2");
// poll的数量限制
// props.put("max.poll.records", "100");
/* props.put("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
props.put("value.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");*/
props.put("key.deserializer", StringDeserializer.class.getName());
props.put("value.deserializer", StringDeserializer.class.getName());
props.put("group.name", UUID.randomUUID().toString().replaceAll("-", ""));
consumer = new KafkaConsumer<String, String>(props);

// 订阅主题列表topic
//consumer.subscribe(Arrays.asList("test_input"));
}
//注册kafka rebalanceListener
//consumer.subscribe(Arrays.asList("test_etl"), new ConsumerRebalanceListener(){

listener = new ConsumerRebalanceListener(){
@Override
public void onPartitionsRevoked(Collection<TopicPartition> partitions) {
System.out.printf("threadId = {}, onPartitionsRevoked.", Thread.currentThread().getId());
consumer.commitSync(offsetsMap);
consumer.commitSync();
}
@Override
public void onPartitionsAssigned(
Collection<TopicPartition> partitions) {
System.out.printf("threadId = {}, onPartitionsAssigned.", Thread.currentThread().getId());
consumer.commitSync();
offsetsMap.clear();
}};

consumer.subscribe(Arrays.asList(topicName.split(",")[0],topicName.split(",")[1],topicName.split(",")[2]), listener);
consumer.resume(consumer.assignment());

Guess you like

Origin www.cnblogs.com/yaohaitao/p/12172867.html