kafka 监控相关指标API

本文已参与「新人创作礼」活动,一起开启掘金创作之路。

功能类似于 kafka eagle 和 kafka manager,但一些指标比其更全。基于kafka 2.0 版本开发,需要以下依赖。

        <dependency>
            <groupId>org.apache.kafka</groupId>
            <artifactId>kafka-clients</artifactId>
            <version>2.0.0</version>
        </dependency>
复制代码

基础类

public abstract class KafkaBase implements AutoCloseable {

    private KafkaDataSource kafkaDataSource;

    public KafkaBase(KafkaDataSource kafkaDataSource) {
        this.kafkaDataSource = kafkaDataSource;
    }

    @Override
    public void close() {}
}
复制代码

kafka topic 相关指标监控

public class KafkaAdmin extends KafkaBase {

    private AdminClient adminClient;

    public KafkaAdmin(KafkaDataSource kafkaDataSource) {
        super(kafkaDataSource);
        Properties props = new Properties();
        props.setProperty(AdminClientConfig.BOOTSTRAP_SERVERS_CONFIG, kafkaDataSource.getBootstrapServers());
        adminClient = AdminClient.create(props);
    }

    /**
     * @param newTopic
     * @throws InterruptedException
     * @throws ExecutionException
     */
    public void createTopic(NewTopic newTopic) throws InterruptedException, ExecutionException, TimeoutException {
        adminClient.createTopics(Collections.singletonList(newTopic))
                .all()
                .get(Constant.KAFKA_GET_MAX_WAIT_TIME, TimeUnit.SECONDS);
    }

    public void deleteTopic(Set<String> topics) throws InterruptedException, ExecutionException, TimeoutException {
        adminClient.deleteTopics(topics)
                .all()
                .get(Constant.KAFKA_GET_MAX_WAIT_TIME, TimeUnit.SECONDS);
    }

    public Set<String> listTopics() throws InterruptedException, ExecutionException, TimeoutException {
        return adminClient.listTopics(new ListTopicsOptions().listInternal(true))
                .names()
                .get(Constant.KAFKA_GET_MAX_WAIT_TIME, TimeUnit.SECONDS);
    }

    public Set<TopicDescriptionDto> describeTopics(Set<String> topics) throws InterruptedException, ExecutionException, TimeoutException {
        Set<TopicDescriptionDto> topicsDesc = new HashSet<>(8);
        adminClient.describeTopics(topics)
                .all()
                .get(Constant.KAFKA_GET_MAX_WAIT_TIME, TimeUnit.SECONDS)
                .values()
                .forEach(e -> {
                    List<TopicPartitionInfoDto> list = new ArrayList<>();
                    e.partitions().forEach(p -> list.add(new TopicPartitionInfoDto(p.partition(), p.leader().toString(), p.replicas().toString(), p.isr().toString())));
                    topicsDesc.add(new TopicDescriptionDto(e.name(), e.isInternal(), list));
                });
        return topicsDesc;
    }

    /**
     * 分区数只能增加不能减小
     *
     * @param topicName
     * @param toPartitions
     * @throws InterruptedException
     * @throws ExecutionException
     */
    public void incrTopicPartitions(String topicName, int toPartitions) throws InterruptedException, ExecutionException, TimeoutException {
        adminClient.createPartitions(Collections.synchronizedMap(new HashMap<String, NewPartitions>() {
            {
                put(topicName, NewPartitions.increaseTo(toPartitions));
                // 通过指定 brokerID 来指定副本数据量,其中 1,2为brokerID
                // put(topicName, NewPartitions.increaseTo(toPartions, Arrays.asList(Arrays.asList(1,2))));
            }
        })).all().get(Constant.KAFKA_GET_MAX_WAIT_TIME, TimeUnit.SECONDS);
    }

    public KafkaClusterDescribeDto getKafkaClusterDescribe() throws InterruptedException, ExecutionException, TimeoutException {
        DescribeClusterResult describeClusterResult = adminClient.describeCluster();
        return new KafkaClusterDescribeDto(describeClusterResult.nodes().get(Constant.KAFKA_GET_MAX_WAIT_TIME, TimeUnit.SECONDS).toString(),
                describeClusterResult.controller().get(Constant.KAFKA_GET_MAX_WAIT_TIME, TimeUnit.SECONDS).toString(),
                describeClusterResult.clusterId().get(Constant.KAFKA_GET_MAX_WAIT_TIME, TimeUnit.SECONDS));
    }

    public Map<String, String> getTopicConfigs(String topic) throws Exception {
        Map<String, String> topicConfigs = new HashMap<>(32);
        //ConfigResource configResource = new ConfigResource(ConfigResource.Type.BROKER,TOPIC_NAME);
        ConfigResource configResource = new ConfigResource(ConfigResource.Type.TOPIC, topic);
        DescribeConfigsResult describeConfigsResult = adminClient.describeConfigs(Arrays.asList(configResource));
        Map<ConfigResource, Config> resourceConfigMap = describeConfigsResult.all().get(Constant.KAFKA_GET_MAX_WAIT_TIME, TimeUnit.SECONDS);
        resourceConfigMap.get(configResource).entries().forEach(configEntry -> {
            topicConfigs.put(configEntry.name(), configEntry.value());
        });
        return topicConfigs;
    }

    /**
     * @param topic
     * @param topicConfigs TopicConfig
     * @throws Exception
     */
    public void alterTopicConfig(String topic, Map<String, String> topicConfigs) throws Exception {
        Map<ConfigResource, Config> configMap = new HashMap<>();
        Set<ConfigEntry> configEntries = new HashSet<>();
        topicConfigs.entrySet().forEach(entry -> configEntries.add(new ConfigEntry(entry.getKey(), entry.getValue())));
        configMap.put(new ConfigResource(ConfigResource.Type.TOPIC, topic), new Config(configEntries));
        adminClient.alterConfigs(configMap)
                .all()
                .get(Constant.KAFKA_GET_MAX_WAIT_TIME, TimeUnit.SECONDS);
    }

    /* public static void alterTopicConfigNew() throws Exception {
         Map<ConfigResource, Collection<AlterConfigOp>> configMap = new HashMap<>();
         ConfigResource configResource = new ConfigResource(ConfigResource.Type.TOPIC,TOPIC_NAME);
         AlterConfigOp alterConfigOp = new AlterConfigOp(new ConfigEntry("preallocate","false"),AlterConfigOp.OpType.SET);
         configMap.put(configResource,Arrays.asList(alterConfigOp));
         AlterConfigsResult alterConfigsResult = adminClient.incrementalAlterConfigs(configMap);
         alterConfigsResult.all().get();
     }*/

    @Override
    public void close() {
        adminClient.close();
    }
}

复制代码

kafka consumer group 监控指标

public class KafkaConsumerGroup extends KafkaBase {

    private final AdminClient adminClient;

    private Consumer<String, String> consumer;

    public KafkaConsumerGroup(KafkaDataSource kafkaDataSource) {
        super(kafkaDataSource);
        Properties props = new Properties();
        props.setProperty(AdminClientConfig.BOOTSTRAP_SERVERS_CONFIG, kafkaDataSource.getBootstrapServers());
        adminClient = AdminClient.create(props);

        props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, kafkaDataSource.getBootstrapServers());
        props.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, kafkaDataSource.getKeyDeserializer());
        props.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, kafkaDataSource.getValueDeserializer());
        props.put(ConsumerConfig.GROUP_ID_CONFIG, "consumer-" + UUID.randomUUID());
        props.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, "false");
        consumer = new org.apache.kafka.clients.consumer.KafkaConsumer<>(props);

    }

    public Set<String> getAllConsumerGroups() throws InterruptedException, ExecutionException {
        Set<String> groupIds = new HashSet<>();
        //(groupId='console-consumer-44009', isSimpleConsumerGroup=false)
        adminClient.listConsumerGroups()
                .all()
                .get()
                .forEach(consumerGroupListing -> groupIds.add(consumerGroupListing.groupId()));
        return groupIds;
    }

    public void deleteConsumerGroupId(List<String> groupIds) throws InterruptedException, ExecutionException {
        adminClient.deleteConsumerGroups(groupIds).all().get();
    }

    public Map<String, List<KafkaConsumerGroupDescribeDto>> getConsumerGroupDesc(List<String> groupIds) throws InterruptedException, ExecutionException {
        //g1 -> {ConsumerGroupDescription@2150} "(groupId=g1, isSimpleConsumerGroup=false, members=, partitionAssignor=, state=Empty, coordinator=bdpapp01:9092 (id: 0 rack: null))"
        //console-consumer-44009 -> {ConsumerGroupDescription@2155} "(groupId=console-consumer-44009, isSimpleConsumerGroup=false, members=(memberId=consumer-console-consumer-44009-1-2468e397-d96d-496c-87ff-200c02a67653, clientId=consumer-console-consumer-44009-1, host=/11.51.196.255, assignment=(topicPartitions=a1-0)), partitionAssignor=range, state=Stable, coordinator=bdpapp01:9092 (id: 0 rack: null))"
        Map<String, ConsumerGroupDescription> consumerGroupDescriptionMap = adminClient.describeConsumerGroups(groupIds).all().get();

        Map<String, List<KafkaConsumerGroupDescribeDto>> listMap = new HashMap<>(5);
        List<KafkaConsumerGroupDescribeDto> list;
        Map<TopicPartition, OffsetAndMetadata> offsetAndMetadataMap;
        for (ConsumerGroupDescription desc : consumerGroupDescriptionMap.values()) {
            list = new ArrayList<>();
            // eg:{TopicPartition@2378} "a1-0" -> {OffsetAndMetadata@2379} "OffsetAndMetadata{offset=3, metadata=''}"
            offsetAndMetadataMap = adminClient.listConsumerGroupOffsets(desc.groupId()).partitionsToOffsetAndMetadata().get();

            switch (desc.state()) {
                case STABLE:
                    // groupId 存活,有状态记录
                    list.addAll(getGroupDescWithMembers(offsetAndMetadataMap, desc));
                    break;
                case EMPTY:
                    // groupId 非存活,但有状态记录
                    list.addAll(getGroupDescWithoutMembers(offsetAndMetadataMap, desc));
                    break;
                case DEAD:
                    list.add(new KafkaConsumerGroupDescribeDto(desc.groupId(), desc.coordinator().toString(), ConsumerGroupState.DEAD));
                    break;
                default:
                    // UNKNOWN,PREPARING_REBALANCE,COMPLETING_REBALANCE
                    if (desc.members().size() > 0) { // 同 STABLE
                        list.addAll(getGroupDescWithMembers(offsetAndMetadataMap, desc));
                    } else if (desc.members().size() == 0 && offsetAndMetadataMap.size() != 0) { // 同 EMPTY
                        list.addAll(getGroupDescWithoutMembers(offsetAndMetadataMap, desc));
                    } else if (offsetAndMetadataMap.size() == 0) { // 同 DEAD
                        list.add(new KafkaConsumerGroupDescribeDto(desc.groupId(), desc.coordinator().toString(), desc.state()));
                    }
            }
            log.info("consumer groups: {}", Arrays.asList(list));
            listMap.put(desc.groupId(), list);
        }
        return listMap;
    }

    private List<KafkaConsumerGroupDescribeDto> getGroupDescWithMembers(Map<TopicPartition, OffsetAndMetadata> offsetAndMetadataMap, ConsumerGroupDescription desc) {
        Collection<MemberDescription> members = desc.members();
        List<KafkaConsumerGroupDescribeDto> list = new ArrayList<>();
        for (MemberDescription member : members) {
            for (TopicPartition topicPartition : member.assignment().topicPartitions()) {
                Map<TopicPartition, Long> endOffsets = consumer.endOffsets(Collections.singleton(topicPartition));
                long currentOffset = ObjectUtils.allNotNull(offsetAndMetadataMap.get(topicPartition)) ? offsetAndMetadataMap.get(topicPartition).offset() : -1L;
                long logEndOffset = endOffsets.get(topicPartition);
                list.add(new KafkaConsumerGroupDescribeDto(desc.groupId(),
                        topicPartition.topic(),
                        topicPartition.partition(),
                        currentOffset,
                        logEndOffset,
                        currentOffset == -1 ? -1 : logEndOffset - currentOffset,
                        member.consumerId(),
                        member.host(),
                        member.clientId(),
                        desc.coordinator().toString(),
                        desc.state()));
            }
        }
        return list;
    }

    private List<KafkaConsumerGroupDescribeDto> getGroupDescWithoutMembers(Map<TopicPartition, OffsetAndMetadata> offsetAndMetadataMap, ConsumerGroupDescription desc) {
        Set<TopicPartition> tps = offsetAndMetadataMap.keySet();
        List<KafkaConsumerGroupDescribeDto> list = new ArrayList<>();
        for (TopicPartition topicPartition : tps) {
            Map<TopicPartition, Long> endOffsets = consumer.endOffsets(Collections.singleton(topicPartition));
            long currentOffset = ObjectUtils.allNotNull(offsetAndMetadataMap.get(topicPartition)) ? offsetAndMetadataMap.get(topicPartition).offset() : -1L;
            long logEndOffset = endOffsets.get(topicPartition);
            list.add(new KafkaConsumerGroupDescribeDto(desc.groupId(),
                    topicPartition.topic(),
                    topicPartition.partition(),
                    currentOffset,
                    logEndOffset,
                    currentOffset == -1 ? -1 : logEndOffset - currentOffset,
                    desc.coordinator().toString(),
                    desc.state()));
        }
        return list;
    }

    @Override
    public void close() {
        adminClient.close();
        consumer.close();
    }
}
复制代码

kafka consumer 模拟消费

@Slf4j
public class KafkaConsumer extends KafkaBase {

    private KafkaConsumerDtoNew kafkaConsumerDto;

    private Consumer<String, String> consumer;

    private String consumerGroup;

    public KafkaConsumer(KafkaDataSource kafkaDataSource, KafkaConsumerDtoNew kafkaConsumerDto) {
        super(kafkaDataSource);
        this.kafkaConsumerDto = kafkaConsumerDto;
        if (StringUtils.isNotBlank(kafkaConsumerDto.getGroupId())) {
            this.consumerGroup = kafkaConsumerDto.getGroupId();
        } else {
            this.consumerGroup = "consumer-" + kafkaConsumerDto.getLoginUserName() + "-" + ZonedDateTime.now().format(DateTimeFormatter.ofPattern("yyyyMMddHHmmss.SSS"));
        }

        Properties props = new Properties();
        props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, kafkaDataSource.getBootstrapServers());
        props.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, kafkaDataSource.getKeyDeserializer());
        props.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, kafkaDataSource.getValueDeserializer());
        props.put(ConsumerConfig.GROUP_ID_CONFIG, this.consumerGroup);
        props.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, kafkaConsumerDto.getOffset());//earliest,latest
        props.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, "false");
        props.put(ConsumerConfig.SESSION_TIMEOUT_MS_CONFIG, "30000");
        props.put(ConsumerConfig.HEARTBEAT_INTERVAL_MS_CONFIG, "10000");
        consumer = new org.apache.kafka.clients.consumer.KafkaConsumer<>(props);
    }

    public List<ConsumerRecordDtoNew> consumer() {
        consumer.subscribe(Collections.singleton(kafkaConsumerDto.getTopic()));
        List<ConsumerRecordDtoNew> list = new ArrayList<>(5);
        long t1 = System.currentTimeMillis();
        while (true) {
            ConsumerRecords<String, String> records = consumer.poll(Duration.ofSeconds(5));
            for (ConsumerRecord<String, String> record : records) {
                if (kafkaConsumerDto.getPartition() < 0) {
                    list.add(new ConsumerRecordDtoNew(record.topic(), record.partition(), record.offset(), record.timestamp(), record.key(), record.value(), this.consumerGroup));
                } else {
                    if (kafkaConsumerDto.getPartition() == record.partition()) {
                        list.add(new ConsumerRecordDtoNew(record.topic(), record.partition(), record.offset(), record.timestamp(), record.key(), record.value(), this.consumerGroup));
                    }
                }
                if (list.size() >= 5) {
                    break;
                }
            }
            long t2 = System.currentTimeMillis();
            if (list.size() >= 5 || (t2 - t1) > 10000) {
                break;
            }
        }

        consumer.commitSync();
        log.info("consumer records: " + Arrays.asList(list));
        return list;
    }

    public List<ConsumerRecordDtoNew> consumerByTimestamp(long timestamp) {
        //===========================
        //时间戳设置
//        Map<TopicPartition, OffsetAndMetadata> offset = new HashMap<>();
        /*这两个方法需要绑定使用,否则consumer.assignment()获取的数据为空
        consumer.assign(Arrays.asList(new TopicPartition("t7", 2)));
        Set<TopicPartition> partitionInfos = consumer.assignment();*/
        List<PartitionInfo> partitionInfos = consumer.partitionsFor(kafkaConsumerDto.getTopic());
        if (null != partitionInfos && partitionInfos.size() > 0) {
            Map<TopicPartition, Long> map = new HashMap<>();
            for (PartitionInfo p : partitionInfos) {
                map.put(new TopicPartition(p.topic(), p.partition()), timestamp);
            }
            Map<TopicPartition, OffsetAndTimestamp> offsetTimestamp = consumer.offsetsForTimes(map);
            for (Map.Entry<TopicPartition, OffsetAndTimestamp> entry : offsetTimestamp.entrySet()) {
                TopicPartition key = entry.getKey();
                OffsetAndTimestamp value = entry.getValue();
                //根据消费里的timestamp确定offset
                long position = 0;
                if (value != null) {
                    position = value.offset();
                } else {
                    //当指定时间戳大于最分区最新数据时间戳时,为null
                    consumer.assign(Collections.singleton(key));
                    consumer.seekToEnd(Collections.singleton(key));
                    position = consumer.position(key);
                }
                //offset.put(key, new OffsetAndMetadata(position));
                //以下两个方法需要配合使用
                consumer.assign(Collections.singleton(key));
                consumer.seek(key, position);
            }
        }
        //时间戳设置完毕
        //=========================
        List<ConsumerRecordDtoNew> list = new ArrayList<>(5);
        while (true) {
            ConsumerRecords<String, String> records = consumer.poll(Duration.ofSeconds(5));
            for (ConsumerRecord<String, String> record : records) {
                list.add(new ConsumerRecordDtoNew(record.topic(), record.partition(), record.offset(), record.timestamp(), record.key(), record.value(), this.consumerGroup));
            }
            if (list.size() >= 5) {
                break;
            }
        }
        consumer.commitSync();
        log.info("consumer records: " + Arrays.asList(list));
        return list;
    }

    public void resetOffsetToEnd() {
        Map<TopicPartition, OffsetAndMetadata> offset = new HashMap<>();
        List<PartitionInfo> partitionInfos = consumer.partitionsFor(kafkaConsumerDto.getTopic());
        if (null != partitionInfos && partitionInfos.size() > 0) {
            for (PartitionInfo p : partitionInfos) {
                consumer.assign(Collections.singleton(new TopicPartition(p.topic(), p.partition())));
                //移动到最新offset
                consumer.seekToEnd(Collections.singleton(new TopicPartition(p.topic(), p.partition())));
                //移动到最早offset
                //consumer.seekToBeginning(Collections.singleton(new TopicPartition(p.topic(), p.partition())));
                //获取到该分区的last offset
                long position = consumer.position(new TopicPartition(p.topic(), p.partition()));
                offset.put(new TopicPartition(p.topic(), p.partition()), new OffsetAndMetadata(position));
            }
        }
        consumer.commitSync(offset);
    }

    public void resetOffsetByTimestamps(long timestamp) {
        Map<TopicPartition, OffsetAndMetadata> offset = new HashMap<>();
        /*这两个方法需要绑定使用,否则consumer.assignment()获取的数据为空
        consumer.assign(Arrays.asList(new TopicPartition("t7", 2)));
        Set<TopicPartition> partitionInfos = consumer.assignment();*/
        List<PartitionInfo> partitionInfos = consumer.partitionsFor(kafkaConsumerDto.getTopic());
        if (null != partitionInfos && partitionInfos.size() > 0) {
            Map<TopicPartition, Long> map = new HashMap<>();
            for (PartitionInfo p : partitionInfos) {
                map.put(new TopicPartition(p.topic(), p.partition()), timestamp);
            }
            Map<TopicPartition, OffsetAndTimestamp> offsetTimestamp = consumer.offsetsForTimes(map);
            for (Map.Entry<TopicPartition, OffsetAndTimestamp> entry : offsetTimestamp.entrySet()) {
                TopicPartition key = entry.getKey();
                OffsetAndTimestamp value = entry.getValue();
                //根据消费里的timestamp确定offset
                long position = 0;
                if (value != null) {
                    position = value.offset();
                } else {
                    //当指定时间戳大于最分区最新数据时间戳时,为null
                    consumer.assign(Collections.singleton(key));
                    consumer.seekToEnd(Collections.singleton(key));
                    position = consumer.position(key);
                }
                offset.put(key, new OffsetAndMetadata(position));
                //以下是从指定offset开始消费
                //consumer.seek(entry.getKey(), position);
            }
        }
        consumer.commitSync(offset);
    }

    @Override
    public void close() {
        consumer.close();
    }

}

复制代码

kafka producer 模拟生产

public class KafkaProducer extends KafkaBase {


    private Producer<String, String> producer;

    public KafkaProducer(KafkaDataSource kafkaDataSource) {
        super(kafkaDataSource);
        Properties props = new Properties();
        props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, kafkaDataSource.getBootstrapServers());
        props.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, kafkaDataSource.getKeySerializer());
        props.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, kafkaDataSource.getValueSerializer());
        producer = new org.apache.kafka.clients.producer.KafkaProducer<>(props);
    }

    public void send(List<ProducerRecordDto> records) {
        records.forEach(record ->
                producer.send(new ProducerRecord<>(record.getTopic(),
                        record.getPartition(),
                        record.getTimestamp(),
                        record.getKey(),
                        record.getValue())));

        producer.flush();
    }

    @Override
    public void close() {
        producer.close();
    }
}

复制代码

猜你喜欢

转载自juejin.im/post/7130232373962080293
今日推荐