【Kafka】Kafka使用代码设置offset值

在这里插入图片描述

1.概述

转载:https://www.cnblogs.com/jinniezheng/p/6379639.html

package com.kafka.consumer.offset.update;

import kafka.common.*;
import kafka.common.OffsetAndMetadata;
import kafka.javaapi.*;
import kafka.network.BlockingChannel;
import org.apache.kafka.common.TopicPartition;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import java.lang.Long;
import java.util.*;
import java.util.Map.Entry;

public class KafkaUtilsV2 {
    
    
    private static Logger LOG = LoggerFactory.getLogger(KafkaUtilsV2.class);
    private static final int correlationId = 2;
    private static final String clientId = "internalConsumer";

    public static BlockingChannel createBlockingChannel(String bootstrapServers1) {
    
    
        List<String> hosts = new ArrayList();
        int port = 9092;
        BlockingChannel channel = null;
        String bootstrapServers = bootstrapServers1.replaceAll(" ", "");
        if ("" != bootstrapServers) {
    
    
            String[] hostsAndPort = bootstrapServers.split(",");
            for (int i = 0; i < hostsAndPort.length; i++) {
    
    
                String host = hostsAndPort[i].split(":")[0];
                port = Integer.parseInt(hostsAndPort[i].split(":")[1]);
                hosts.add(host);
            }

            String[] hostsArray = new String[hosts.size()];
            for (int k = 0; k < hosts.size(); k++) {
    
    
                hostsArray[k] = hosts.get(k);
            }

            for (int j = 0; (j < hostsArray.length) && ((channel == null) || (!channel.isConnected())); j++)
                try {
    
    
                    //LOG.info("###testbug001: try to create BlockingChannel in {} times", Integer.valueOf(j + 1));
                    channel = new BlockingChannel(hostsArray[j].trim(), port,
                            BlockingChannel.UseDefaultBufferSize(),
                            BlockingChannel.UseDefaultBufferSize(), 5000);
                    channel.connect();
                } catch (Exception e) {
    
    
                    LOG.info("###>:channel connect but failed with the exception {}", e.getMessage());
                }
        } else {
    
    
            LOG.info("###>: bootstrapServers is null, so can not create blockingChannel");
        }
        return channel;
    }
    //获得offset 

    public static Map<Integer, Long> getOffsetFromKafka(String bootstrapServers, String groupId, String topic, List<Integer> partitionsIds) {
    
    
        Map<Integer, Long> offsetMap = new HashMap();
        BlockingChannel channel = createBlockingChannel(bootstrapServers);
        if (channel.isConnected()) {
    
    
            List partitions = new ArrayList();
            for (Integer i : partitionsIds) {
    
    
                partitions.add(new TopicAndPartition(topic, i.intValue()));
            }
            OffsetFetchRequest fetchRequest = new OffsetFetchRequest(groupId, partitions,
                    (short) 1, correlationId, clientId);
            try {
    
    
                channel.send(fetchRequest.underlying());
                OffsetFetchResponse fetchResponse = OffsetFetchResponse.readFrom(channel.receive().payload());
                Map<TopicAndPartition, OffsetMetadataAndError> result = fetchResponse.offsets();
                for (Entry<TopicAndPartition, OffsetMetadataAndError> entry : result.entrySet()) {
    
    
                    TopicAndPartition topicAndPartition = entry.getKey();
                    OffsetMetadataAndError offsetMetadataAndError = entry.getValue();
                    int partition = topicAndPartition.partition();
                    long retriveOffset = offsetMetadataAndError.offset();
                    offsetMap.put(partition, retriveOffset);
                }
            } catch (Exception e) {
    
    
                LOG.warn("###>: send offsetFetchRequest with exception: {}", e.getMessage());
                e.printStackTrace();
            } finally {
    
    
                channel.disconnect();
            }
        } else {
    
    
            LOG.info("###>: BlockingChannel is not connected!");
        }
        return offsetMap;
    }

    public static Map<TopicPartition, Long> getOffsetFromKafkaByTopicAndMetadata(String bootstrapServers, String groupId,
                                                                                 Set<TopicAndPartition> topicPartitions) {
    
    
        Map<TopicPartition, Long> topicPartitionLongMap = new HashMap<>();
        BlockingChannel channel = createBlockingChannel(bootstrapServers);
        if (channel.isConnected()) {
    
    
            List partitions = new ArrayList();
            partitions.addAll(topicPartitions);
            OffsetFetchRequest fetchRequest = new OffsetFetchRequest(groupId, partitions,
                    (short) 1, correlationId, clientId);
            try {
    
    
                channel.send(fetchRequest.underlying());
                OffsetFetchResponse fetchResponse = OffsetFetchResponse.readFrom(channel.receive().payload());
                Map<TopicAndPartition, OffsetMetadataAndError> result = fetchResponse.offsets();
                for (Entry<TopicAndPartition, OffsetMetadataAndError> entry : result.entrySet()) {
    
    
                    TopicAndPartition topicAndPartition = entry.getKey();
                    TopicPartition topicPartition = new TopicPartition(topicAndPartition.topic(), topicAndPartition.partition());
                    OffsetMetadataAndError offsetMetadataAndError = entry.getValue();
                    long retriveOffset = offsetMetadataAndError.offset();
                    topicPartitionLongMap.put(topicPartition, retriveOffset);
                }
            } catch (Exception e) {
    
    
                LOG.warn("###>: send offsetFetchRequest with exception: {}", e.getMessage());
                e.printStackTrace();
            } finally {
    
    
                channel.disconnect();
            }
        } else {
    
    
            LOG.info("###>: BlockingChannel is not connected!");
        }
        return topicPartitionLongMap;
    }

    public static Map<Integer, Long> getOffsetFromKafkaByPartitionNum(String bootstrapServers, String groupId,
                                                                      String topic, int partitionsNum) {
    
    
        Map<Integer, Long> offsetMap = new HashMap();
        BlockingChannel channel = createBlockingChannel(bootstrapServers);
        if (channel.isConnected()) {
    
    
            List partitions = new ArrayList();
            for (int i = 0; i < partitionsNum; i++) {
    
    
                partitions.add(new TopicAndPartition(topic, i));
            }
            OffsetFetchRequest fetchRequest = new OffsetFetchRequest(groupId, partitions,
                    (short) 1, correlationId, clientId);
            try {
    
    
                channel.send(fetchRequest.underlying());
                OffsetFetchResponse fetchResponse = OffsetFetchResponse.readFrom(channel.receive().payload());
                Map<TopicAndPartition, OffsetMetadataAndError> result = fetchResponse.offsets();
                for (Entry<TopicAndPartition, OffsetMetadataAndError> entry : result.entrySet()) {
    
    
                    TopicAndPartition topicAndPartition = entry.getKey();
                    OffsetMetadataAndError offsetMetadataAndError = entry.getValue();
                    int partition = topicAndPartition.partition();
                    long retriveOffset = offsetMetadataAndError.offset();
                    offsetMap.put(partition, retriveOffset);
                }
            } catch (Exception e) {
    
    
                LOG.warn("###>: send offsetFetchRequest with exception: {}", e.getMessage());
                e.printStackTrace();
            } finally {
    
    
                channel.disconnect();
            }
        } else {
    
    
            LOG.info("###>: BlockingChannel is not connected!");
        }
        return offsetMap;
    }

    public static void commitOffsetToKafka(String bootstrapServers, String groupId, Map<TopicAndPartition, OffsetAndMetadata> offsets) {
    
    
        BlockingChannel channel = createBlockingChannel(bootstrapServers);
        if (channel.isConnected()) {
    
    
            OffsetCommitRequest commitRequest = new OffsetCommitRequest(groupId, offsets, correlationId, clientId, (short) 1);
            try {
    
    
                LOG.debug("###testbug: begin to send OffsetCommitRequest");
                channel.send(commitRequest.underlying());
                OffsetCommitResponse commitResponse = OffsetCommitResponse.readFrom(channel.receive().payload());
                if (commitResponse.hasError()) {
    
    
                    Map<TopicAndPartition, Object> result = commitResponse.errors();
                    for (Entry<TopicAndPartition, Object> entry : result.entrySet()) {
    
    
                        Short aa = (Short) entry.getValue();

                        if (aa == ErrorMapping.OffsetMetadataTooLargeCode()) {
    
    

                        } else if (aa == ErrorMapping.NotCoordinatorForConsumerCode() ||
                                aa == ErrorMapping.ConsumerCoordinatorNotAvailableCode()) {
    
    

                            // Go to step 1 (offset manager has moved) and then retry the commit to the new offset manager
                        } else {
    
    
                            // log and retry the commit
                        }
                    }
                }
                channel.disconnect();
            } catch (Exception e) {
    
    
                LOG.info("###>: commit offset request failed with exception {}", e.getMessage());
            }
        } else {
    
    
            LOG.info("###>: BlockingChannel is not connected!");
        }
    }

    public static Map<TopicAndPartition, OffsetAndMetadata> convertToCommon(Map<TopicPartition, org.apache.kafka.clients.consumer.OffsetAndMetadata> offsets) {
    
    
        Map<TopicAndPartition, OffsetAndMetadata> convertedOffsets = new HashMap<>();
        for (Map.Entry<TopicPartition, org.apache.kafka.clients.consumer.OffsetAndMetadata> offset : offsets.entrySet()) {
    
    
            TopicAndPartition topicAndPartition = new TopicAndPartition(offset.getKey().topic(), offset.getKey().partition());
            OffsetMetadata offsetMetadata = new OffsetMetadata(offset.getValue().offset(), Integer.toString(offset.getKey().partition()));
            OffsetAndMetadata offsetAndMetadata = new OffsetAndMetadata(offsetMetadata, System.currentTimeMillis(), System.currentTimeMillis() + 6 * 3600 * 3600);
            convertedOffsets.put(topicAndPartition, offsetAndMetadata);
        }
        return convertedOffsets;
    }


    public static void main(String[] args) {
    
    
        Map<TopicAndPartition, OffsetAndMetadata> offset = new HashMap<>();
        for (int i = 0; i < 1; i++) {
    
    
            TopicAndPartition topicAndPartition = new TopicAndPartition("fieldcompact02", i);
            Long offset1 = 80L;
            String metadata = Integer.toString(i);
            OffsetMetadata offsetMetadata = new OffsetMetadata(offset1, metadata);
            OffsetAndMetadata offsetAndMetadata = new OffsetAndMetadata(offsetMetadata, System.currentTimeMillis(), System.currentTimeMillis() + 2 * 60 * 60 * 1000);

            offset.put(topicAndPartition, offsetAndMetadata);
        }
        commitOffsetToKafka("hdh153:9092", "connectors-lsh-008", offset);

         /*Map<Integer, Long> test = KafkaUtilsV2.getOffsetFromKafkaByPartitionNum("lark001:9092", "hik_mac_info", "hik_mac_info", "test", 10);
         for(Entry<Integer, Long> entry : test.entrySet()) {
             Integer key = entry.getKey();
             Long value = entry.getValue();
             LOG.info("###testbug: key = {},and value = {}", key, value);
         }*/

        Map<TopicAndPartition, org.apache.kafka.clients.consumer.OffsetAndMetadata> topicPartitions = new HashMap<>();
        TopicAndPartition topicPartition = new TopicAndPartition("fieldcompact02", 0);
        Set<TopicAndPartition> topicAndPartitionSet = new HashSet<>();
        topicAndPartitionSet.add(topicPartition);
        org.apache.kafka.clients.consumer.OffsetAndMetadata offsetAndMetadata = new org.apache.kafka.clients.consumer.OffsetAndMetadata(0);
        topicPartitions.put(topicPartition, offsetAndMetadata);
        //Map<Integer, Long> offsets = KafkaUtilsV2.getOffsetFromKafkaByPartitionNum("hdh153:9092", "connectors-lsh-008", "fieldcompact02", 1);
        Map<TopicPartition, Long> offsets = KafkaUtilsV2.getOffsetFromKafkaByTopicAndMetadata("hdh153:9092", "connectors-lsh-018", topicAndPartitionSet);
        for (Map.Entry<TopicPartition, Long> offsetx : offsets.entrySet()) {
    
    
            //System.out.println("###test: topic = " + offset.getKey().topic() + ";partition = " + offset.getKey().partition() + "; offset = " + offset.getValue());
            LOG.info("###> partition = {}, offset = {}", offsetx.getKey().partition(), offsetx.getValue());
        }
    }
}

wiki地址https://cwiki.apache.org/confluence/display/KAFKA/Committing+and+fetching+consumer+offsets+in+Kafka

public class KafkaOffset {
    
    
	private String group = "gaia";
	private int correlationId = 0;
	final String clientId = "demoClientId";
 
	public void getOffset() {
    
    
		BlockingChannel channel = new BlockingChannel("192.168.40.28", 9092, BlockingChannel.UseDefaultBufferSize(), BlockingChannel.UseDefaultBufferSize(), 5000 /* read timeout in millis */);
		channel.connect();
 
		List<TopicAndPartition> partitions = Arrays.asList(0, 1, 2, 3, 4, 5, 6, 7).stream().map(i -> new TopicAndPartition("article_basic_info", i)).collect(Collectors.toList());
			/* version */// version 1 and above fetch from Kafka, version 0 fetches from ZooKeeper,测试为0程序正常执行,为1程序执行没有任何效果
		OffsetFetchRequest fetchRequest = new OffsetFetchRequest(group, partitions, (short) 0, correlationId, clientId);
		try {
    
    
			channel.send(fetchRequest.underlying());
			OffsetFetchResponse fetchResponse = OffsetFetchResponse.readFrom(channel.receive().buffer());
			for (TopicAndPartition partition : partitions) {
    
    
				OffsetMetadataAndError result = fetchResponse.offsets().get(partition);
				short offsetFetchErrorCode = result.error();
				if (offsetFetchErrorCode == ErrorMapping.NotCoordinatorForConsumerCode()) {
    
    
					channel.disconnect();
					// Go to step 1 and retry the offset fetch
				} else {
    
    
					long offset = result.offset();
					System.out.println(String.format("offset->%s->%d", partition.partition(), offset));
				}
			}
		}finally {
    
    
			channel.disconnect();
		}
	}
 
	public void commitOffset() {
    
    
 
		BlockingChannel channel = new BlockingChannel("192.168.40.28", 9092, BlockingChannel.UseDefaultBufferSize(), BlockingChannel.UseDefaultBufferSize(), 5000 /* read timeout in millis */);
		channel.connect();
		try {
    
    
			long now = System.currentTimeMillis();
			Map<TopicAndPartition, OffsetAndMetadata> offsets = new LinkedHashMap<>();
			offsets.put(new TopicAndPartition("article_basic_info", 0), new OffsetAndMetadata(10L, "associated metadata", now));
			offsets.put(new TopicAndPartition("article_basic_info", 1), new OffsetAndMetadata(20L, "more metadata", now));
			OffsetCommitRequest commitRequest = new OffsetCommitRequest(group, offsets, correlationId++, clientId, (short) 0 /* version */); // version 1 and above commit to Kafka, version 0 commits to ZooKeeper
			channel.send(commitRequest.underlying());
			OffsetCommitResponse commitResponse = OffsetCommitResponse.readFrom(channel.receive().buffer());
			System.out.println(String.format("提交->%s", commitResponse.hasError()));
		} finally {
    
    
			channel.disconnect();
		}
	}
 
	public static void main(String[] args) {
    
    
		KafkaOffset kafkaOffset = new KafkaOffset();
		kafkaOffset.getOffset();
		kafkaOffset.commitOffset();
		kafkaOffset.getOffset();
	}
}

Kafka监控——获取Partition的LogSize、Lag、BrokerId

package com.xgd.log.common;
 
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
 
import kafka.api.OffsetRequest;
import kafka.api.PartitionOffsetRequestInfo;
import kafka.cluster.Broker;
import kafka.common.TopicAndPartition;
import kafka.javaapi.OffsetResponse;
import kafka.javaapi.PartitionMetadata;
import kafka.javaapi.TopicMetadata;
import kafka.javaapi.TopicMetadataRequest;
import kafka.javaapi.TopicMetadataResponse;
import kafka.javaapi.consumer.SimpleConsumer;
 
public class KafkaHelper {
    
    
 
	/**
	 * 获取kafka logSize
	 * @param host
	 * @param port
	 * @param topic
	 * @param partition
	 * @return
	 */
	public static long getLogSize(String host,int port,String topic,int partition){
    
    
		String clientName = "Client_" + topic + "_" + partition;
		Broker leaderBroker = getLeaderBroker(host, port, topic, partition);
		String reaHost = null;
		if (leaderBroker != null) {
    
    
			reaHost = leaderBroker.host();
		}else {
    
    
			System.out.println("Partition of Host is not find");
		}
		SimpleConsumer simpleConsumer = new SimpleConsumer(reaHost, port, 10000, 64*1024, clientName);
		TopicAndPartition topicAndPartition = new TopicAndPartition(topic, partition);
		Map<TopicAndPartition, PartitionOffsetRequestInfo> requestInfo = new HashMap<TopicAndPartition, PartitionOffsetRequestInfo>();
		requestInfo.put(topicAndPartition, new PartitionOffsetRequestInfo(OffsetRequest.LatestTime(), 1));
		kafka.javaapi.OffsetRequest request = new kafka.javaapi.OffsetRequest(requestInfo, OffsetRequest.CurrentVersion(), clientName);
		OffsetResponse response = simpleConsumer.getOffsetsBefore(request);
		if (response.hasError()) {
    
    
			System.out.println("Error fetching data Offset , Reason: " + response.errorCode(topic, partition) );
			return 0;
		}
		long[] offsets = response.offsets(topic, partition);
		return offsets[0];
	}
	
	/**
	 * 获取broker ID
	 * @param host
	 * @param port
	 * @param topic
	 * @param partition
	 * @return
	 */
	public static Integer getBrokerId(String host,int port,String topic,int partition){
    
    
		Broker leaderBroker = getLeaderBroker(host, port, topic, partition);
		if (leaderBroker != null) {
    
    
			return leaderBroker.id();
		}
		return null;
	}
	/**
	 * 获取leaderBroker
	 * @param host
	 * @param port
	 * @param topic
	 * @param partition
	 * @return
	 */
	private static Broker getLeaderBroker(String host,int port,String topic,int partition){
    
    
		String clientName = "Client_Leader_LookUp";
		SimpleConsumer consumer = null;
		PartitionMetadata partitionMetaData = null;
		try {
    
    
			consumer = new SimpleConsumer(host, port, 10000, 64*1024, clientName);
			List<String> topics = new ArrayList<String>();
			topics.add(topic);
			TopicMetadataRequest request = new TopicMetadataRequest(topics);
			TopicMetadataResponse reponse = consumer.send(request);
			List<TopicMetadata> topicMetadataList = reponse.topicsMetadata();
			for(TopicMetadata topicMetadata : topicMetadataList){
    
    
				for(PartitionMetadata metadata : topicMetadata.partitionsMetadata()){
    
    
					if (metadata.partitionId() == partition) {
    
    
						partitionMetaData = metadata;
						break;
					}
				}
			}
			if (partitionMetaData != null) {
    
    
				return partitionMetaData.leader();
			}
		} catch (Exception e) {
    
    
			e.printStackTrace();
		}
		return null;
	}
 
}

kafka通过java api 获取当前消费组offset/logsize/lag信息,实现消费延迟监控

package com.fengjr.elk.web.write;
import java.util.ArrayList;
import java.util.Collections;
import java.util.Date;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.TreeMap;
import java.util.Map.Entry;

import kafka.api.PartitionOffsetRequestInfo;
import kafka.common.ErrorMapping;
import kafka.common.OffsetMetadataAndError;
import kafka.common.TopicAndPartition;
import kafka.javaapi.*;
import kafka.javaapi.consumer.SimpleConsumer;
import kafka.network.BlockingChannel;

public class KafkaOffsetTools {
    
    

    public static void main(String[] args) {
    
    

        String topic = "app-log-all-beta";
        String broker = "10.255.73.160";
        int port = 9092;
        String group = "fengjr-elk-group-es";
        String clientId = "Client_app-log-all-beta_1";
        int correlationId = 0;
        BlockingChannel channel = new BlockingChannel(broker, port,
                BlockingChannel.UseDefaultBufferSize(),
                BlockingChannel.UseDefaultBufferSize(),
                5000 );
        channel.connect();

        List<String> seeds = new ArrayList<String>();
        seeds.add(broker);
        KafkaOffsetTools kot = new KafkaOffsetTools();

        TreeMap<Integer,PartitionMetadata> metadatas = kot.findLeader(seeds, port, topic);

        long sum = 0l;
        long sumOffset = 0l;
        long lag = 0l;
        List<TopicAndPartition> partitions = new ArrayList<TopicAndPartition>();
        for (Entry<Integer,PartitionMetadata> entry : metadatas.entrySet()) {
    
    
            int partition = entry.getKey();
            TopicAndPartition testPartition = new TopicAndPartition(topic, partition);
            partitions.add(testPartition);
        }
        OffsetFetchRequest fetchRequest = new OffsetFetchRequest(
                group,
                partitions,
                (short) 0,
                correlationId,
                clientId);
        for (Entry<Integer,PartitionMetadata> entry : metadatas.entrySet()) {
    
    
            int partition = entry.getKey();
            try {
    
    
                channel.send(fetchRequest.underlying());
                OffsetFetchResponse fetchResponse = OffsetFetchResponse.readFrom(channel.receive().payload());
                TopicAndPartition testPartition0 = new TopicAndPartition(topic, partition);
                OffsetMetadataAndError result = fetchResponse.offsets().get(testPartition0);
                short offsetFetchErrorCode = result.error();
                if (offsetFetchErrorCode == ErrorMapping.NotCoordinatorForConsumerCode()) {
    
    
                } else {
    
    
                    long retrievedOffset = result.offset();
                    sumOffset += retrievedOffset;
                }
                String leadBroker = entry.getValue().leader().host();
                String clientName = "Client_" + topic + "_" + partition;
                SimpleConsumer consumer = new SimpleConsumer(leadBroker, port, 100000,
                        64 * 1024, clientName);
                long readOffset = getLastOffset(consumer, topic, partition,
                        kafka.api.OffsetRequest.LatestTime(), clientName);
                sum += readOffset;
                System.out.println(partition+":"+readOffset);
                if(consumer!=null)consumer.close();
            } catch (Exception e) {
    
    
                channel.disconnect();
            }
        }

        System.out.println("logSize:"+sum);
        System.out.println("offset:"+sumOffset);

        lag = sum - sumOffset;
        System.out.println("lag:"+ lag);


    }

    public KafkaOffsetTools() {
    
    
    }


    public static long getLastOffset(SimpleConsumer consumer, String topic,
                                     int partition, long whichTime, String clientName) {
    
    
        TopicAndPartition topicAndPartition = new TopicAndPartition(topic,
                partition);
        Map<TopicAndPartition, PartitionOffsetRequestInfo> requestInfo = new HashMap<TopicAndPartition, PartitionOffsetRequestInfo>();
        requestInfo.put(topicAndPartition, new PartitionOffsetRequestInfo(
                whichTime, 1));
        kafka.javaapi.OffsetRequest request = new kafka.javaapi.OffsetRequest(
                requestInfo, kafka.api.OffsetRequest.CurrentVersion(),
                clientName);
        OffsetResponse response = consumer.getOffsetsBefore(request);
        if (response.hasError()) {
    
    
            System.out
                    .println("Error fetching data Offset Data the Broker. Reason: "
                            + response.errorCode(topic, partition));
            return 0;
        }
        long[] offsets = response.offsets(topic, partition);
        return offsets[0];
    }

    private TreeMap<Integer,PartitionMetadata> findLeader(List<String> a_seedBrokers,
                                                          int a_port, String a_topic) {
    
    
        TreeMap<Integer, PartitionMetadata> map = new TreeMap<Integer, PartitionMetadata>();
        loop: for (String seed : a_seedBrokers) {
    
    
            SimpleConsumer consumer = null;
            try {
    
    
                consumer = new SimpleConsumer(seed, a_port, 100000, 64 * 1024,
                        "leaderLookup"+new Date().getTime());
                List<String> topics = Collections.singletonList(a_topic);
                TopicMetadataRequest req = new TopicMetadataRequest(topics);
                kafka.javaapi.TopicMetadataResponse resp = consumer.send(req);

                List<TopicMetadata> metaData = resp.topicsMetadata();
                for (TopicMetadata item : metaData) {
    
    
                    for (PartitionMetadata part : item.partitionsMetadata()) {
    
    
                        map.put(part.partitionId(), part);
                    }
                }
            } catch (Exception e) {
    
    
                System.out.println("Error communicating with Broker [" + seed
                        + "] to find Leader for [" + a_topic + ", ] Reason: " + e);
            } finally {
    
    
                if (consumer != null)
                    consumer.close();
            }
        }
        return map;
    }

}

kafka监控获取logSize, offset, lag等信息

import java.io.IOException;
import java.text.MessageFormat;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.Comparator;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Map.Entry;
import java.util.Properties;
import java.util.concurrent.CountDownLatch;
 
import org.apache.kafka.clients.consumer.KafkaConsumer;
import org.apache.kafka.common.PartitionInfo;
import org.apache.kafka.common.TopicPartition;
import org.apache.zookeeper.KeeperException;
import org.apache.zookeeper.WatchedEvent;
import org.apache.zookeeper.Watcher;
import org.apache.zookeeper.ZooKeeper;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
 
public class KafkaUtil {
    
    
	private static Logger logger = LoggerFactory.getLogger(KafkaUtil.class);
	private static final int ZOOKEEPER_TIMEOUT = 30000;
	private final CountDownLatch latch = new CountDownLatch(1);
 
	public ZooKeeper getZookeeper(String connectionString) {
    
    
		ZooKeeper zk = null;
		try {
    
    
			zk = new ZooKeeper(connectionString, ZOOKEEPER_TIMEOUT, new Watcher() {
    
    
				@Override
				public void process(WatchedEvent event) {
    
    
					if (Event.KeeperState.SyncConnected.equals(event.getState())) {
    
    
						latch.countDown();
					}
				}
			});
			latch.await();
		} catch (IOException e) {
    
    
			e.printStackTrace();
		} catch (InterruptedException e) {
    
    
			e.printStackTrace();
		}
		return zk;
	}
 
	public static Properties getConsumerProperties(String groupId, String bootstrap_servers) {
    
    
		Properties props = new Properties();
		props.put("group.id", groupId);
		props.put("bootstrap.servers", bootstrap_servers);
		props.put("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
		props.put("value.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
		return props;
	}
 
	/**
	 * 获取logSize, offset, lag等信息
	 * @param zk
	 * @param bootstrap_servers
	 * @param groupId
	 * @param topics null查询groupId消费过的所有topic
	 * @param sorted
	 * @return
	 * @throws Exception
	 */
	public List<Map<String, Object>> getLagByGroupAndTopic(ZooKeeper zk, String bootstrap_servers, String groupId,
			String[] topics, boolean sorted) throws Exception {
    
    
 
		List<Map<String, Object>> topicPatitionMapList = new ArrayList<>();
 
		// 获取group消费过的所有topic
		List<String> topicList = null;
		if (topics == null || topics.length == 0) {
    
    
			try {
    
    
				topicList = zk.getChildren("/consumers/" + groupId + "/offsets", false);
			} catch (KeeperException | InterruptedException e) {
    
    
				logger.error("从zookeeper获取topics失败:zkState: {}, groupId:{}", zk.getState(), groupId);
				throw new Exception("从zookeeper中获取topics失败");
			}
		} else {
    
    
			topicList = Arrays.asList(topics);
		}
 
		Properties consumeProps = getConsumerProperties(groupId, bootstrap_servers);
		logger.info("consumer properties:{}", consumeProps);
		KafkaConsumer<String, String> consumer = new KafkaConsumer<>(consumeProps);
 
		// 查询topic partitions
		for (String topic : topicList) {
    
    
			List<PartitionInfo> partitionsFor = consumer.partitionsFor(topic);
			//由于有时延, 尽量逐个topic查询, 减少lag为负数的情况
			List<TopicPartition> topicPartitions = new ArrayList<>();
 
			// 获取topic对应的 TopicPartition
			for (PartitionInfo partitionInfo : partitionsFor) {
    
    
				TopicPartition topicPartition = new TopicPartition(partitionInfo.topic(), partitionInfo.partition());
				topicPartitions.add(topicPartition);
			}
			// 查询logSize
			Map<TopicPartition, Long> endOffsets = consumer.endOffsets(topicPartitions);
			for (Entry<TopicPartition, Long> entry : endOffsets.entrySet()) {
    
    
				TopicPartition partitionInfo = entry.getKey();
				// 获取offset
				String offsetPath = MessageFormat.format("/consumers/{0}/offsets/{1}/{2}", groupId, partitionInfo.topic(),
						partitionInfo.partition());
				byte[] data = zk.getData(offsetPath, false, null);
				long offset = Long.valueOf(new String(data));
 
				Map<String, Object> topicPatitionMap = new HashMap<>();
				topicPatitionMap.put("group", groupId);
				topicPatitionMap.put("topic", partitionInfo.topic());
				topicPatitionMap.put("partition", partitionInfo.partition());
				topicPatitionMap.put("logSize", endOffsets.get(partitionInfo));
				topicPatitionMap.put("offset", offset);
				topicPatitionMap.put("lag", endOffsets.get(partitionInfo) - offset);
				topicPatitionMapList.add(topicPatitionMap);
			}
		}
		consumer.close();
		
		if(sorted) {
    
    
			Collections.sort(topicPatitionMapList, new Comparator<Map<String,Object>>() {
    
    
				@Override
				public int compare(Map<String, Object> o1, Map<String, Object> o2) {
    
    
					if(o1.get("topic").equals(o2.get("topic"))) {
    
    
						return ((Integer)o1.get("partition")).compareTo((Integer)o2.get("partition"));
					}
					return ((String)o1.get("topic")).compareTo((String)o2.get("topic"));
				}
			});
		}
		
		return topicPatitionMapList;
	}
 
	public static void main(String[] args) throws Exception {
    
    
		String bootstrap_servers = "localhost:9092";
		String groupId = "interface-group-new";
		String[] topics = null;//{"test1", "test2", test3};
 
		KafkaUtil kafkaUtil = new KafkaUtil();
		String connectionString = "localhost:2181";
		ZooKeeper zk = kafkaUtil.getZookeeper(connectionString);
		if (zk == null) {
    
    
			throw new RuntimeException("获取zookeeper连接失败");
		}
		List<Map<String, Object>> topicPatitionMapList = kafkaUtil.getLagByGroupAndTopic(zk, bootstrap_servers,
				groupId, topics, true);
 
		for (Map<String, Object> map : topicPatitionMapList) {
    
    
			System.out.println(map);
		}
		zk.close();
	}
}

java获取kafka consumer lag

猜你喜欢

转载自blog.csdn.net/qq_21383435/article/details/111131572