Kafka producer producer API operation

1. Preparation

  • Create a maven project on the IDE, add dependencies to the pom file
<!-- https://mvnrepository.com/artifact/org.apache.kafka/kafka-clients -->
<dependency>
    <groupId>org.apache.kafka</groupId>
    <artifactId>kafka-clients</artifactId>
    <version>1.1.1</version>
</dependency>
  • Start zookeeper cluster
bin/zkServer.sh start
  • Start the kafka cluster
bin/kafka-server-start.sh -daemon config/server.properties
  • Kafka cluster opens a consumer
bin/kafka-console-consumer.sh --bootstrap-server localhost:9092 --topic bigdata 
--from-beginning

2. Create an ordinary producer

import java.util.Properties;
import java.util.concurrent.ExecutionException;
import org.apache.kafka.clients.producer.KafkaProducer;
import org.apache.kafka.clients.producer.ProducerRecord;

public class MyProducer {
    
    
    public static void main(String[] args) throws InterruptedException, ExecutionException {
    
    
        /* 相当于map操作 */
        Properties properties = new Properties();

        /* kafka 集群,broker-list */
        properties.put("bootstrap.servers", "centos7-1:9092");

        /* 等待所有副本节点的应答 */
        properties.put("acks", "all");

        /* 重试次数 */
        properties.put("retries", Integer.valueOf(3));

        /* 批次大小 */
        properties.put("batch.size", Integer.valueOf(16384));

        /* 等待时间 */
        properties.put("linger.ms", Integer.valueOf(1));

        /* RecordAccumulator 缓冲区大小 */
        properties.put("buffer.memory", Integer.valueOf(33554432));

        /* key序列化 */
        properties.put("key.serializer", "org.apache.kafka.common.serialization.StringSerializer");
        /* value序列化 */
        properties.put("value.serializer", "org.apache.kafka.common.serialization.StringSerializer");

        /* 创建生产者对象 */
        KafkaProducer<String, String> kafkaProducer = new KafkaProducer(properties);

        /* 发送数据 */
        for (int i = 0; i < 10; i++) {
    
    
            /* 发送的主题、key、value */
            kafkaProducer.send(new ProducerRecord("bigdata", "jh","jh==" + i));
        }

        /* 关闭资源 */
        kafkaProducer.close();
    }
}

Effect display, consumers who started clustering:
Insert picture description here

Three, create a producer with a callback function

The callback function is called when the producer receives an ack. It is an asynchronous call. This method has two parameters, RecordMetadata and Exception. If the Exception is null, the message is sent successfully, and if the Exception is not null, the message has failed to be sent.

Note: If the message fails to be sent, it will be retried automatically, and we do not need to manually retry in the callback function.

import java.util.Properties;
import org.apache.kafka.clients.producer.*;

public class CallBackProducer {
    
    
    public static void main(String[] args) {
    
    
        Properties properties = new Properties();

        /* kafka 集群 broker-list */
        properties.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, "centos7-1:9092");

        /* 等待所有副本节点的应答 */
        properties.put("acks", "-1");

        /* 重试次数 */
        properties.put("retries", Integer.valueOf(3));

        /* 批次大小 */
        properties.put("batch.size", Integer.valueOf(16384));

        /* 等待时间 */
        properties.put("linger.ms", Integer.valueOf(1));

        /* RecordAccumulator 缓冲区大小 */
        properties.put("buffer.memory", Integer.valueOf(33554432));

        /* key序列化 */
        properties.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringSerializer");

        /* value序列化 */
        properties.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringSerializer");

        /* 创建生产者对象 */
        KafkaProducer<String, String> kafkaProducer = new KafkaProducer(properties);

        /* 发送数据 */
        for (int i = 0; i < 100000; i++) {
    
    
            /* 发送的主题、key、value 回调函数*/
            kafkaProducer.send(new ProducerRecord("backtest", "jh","jh===>" + i), new Callback() {
    
    
                @Override
                public void onCompletion(RecordMetadata metadata, Exception exception) {
    
    
                    /* 如果发送成功,输出分区、offset */
                    if (exception == null) {
    
    
                        System.out.println(metadata.partition() + "--" + metadata.offset());
                    } else {
    
    
                        /* 如果发送失败,打印报错信息 */
                        exception.printStackTrace();
                    }
                }
            });
        }
        /* 关闭资源 */
        kafkaProducer.close();
    }
}

Source code of send method: partition storage can be specified

  @Override
    public Future<RecordMetadata> send(ProducerRecord<K, V> record, Callback callback) {
    
    
        // intercept the record, which can be potentially modified; this method does not throw exceptions
        ProducerRecord<K, V> interceptedRecord = this.interceptors.onSend(record);
        return doSend(interceptedRecord, callback);
    }

public class ProducerRecord<K, V> {
    
    

    private final String topic;
    /* 这里可以指定分区 */
    private final Integer partition;
    private final Headers headers;
    private final K key;
    private final V value;
    private final Long timestamp;

After running, the default is hash partition, and it can be seen that the area is ordered

0--188
0--189
0--190
0--191
0--192
0--193
2--2467
2--2468
2--2469
2--2470
2--2471
2--2472

Fourth, create a producer of custom partitions

Create a class to implement Partitioner

import org.apache.kafka.clients.producer.Partitioner;
import org.apache.kafka.common.Cluster;
import java.util.Map;

public class Mypartition implements Partitioner {
    
    
    @Override
    public int partition(String topic, Object key, byte[] keyBytes, Object value, byte[] valueBytes, Cluster cluster) {
    
    
        /* 这里写自己对应的业务逻辑 */
//        List<PartitionInfo> mydata = cluster.availablePartitionsForTopic("mydata");
//        Integer mydata1 = cluster.partitionCountForTopic("mydata");

        return 0;
    }
    @Override
    public void close() {
    
    
    }
    @Override
    public void onNewBatch(String topic, Cluster cluster, int prevPartition) {
    
    

    }
    @Override
    public void configure(Map<String, ?> configs) {
    
    
    }
}
import org.apache.kafka.clients.producer.Callback;
import org.apache.kafka.clients.producer.KafkaProducer;
import org.apache.kafka.clients.producer.ProducerRecord;
import org.apache.kafka.clients.producer.RecordMetadata;
import java.util.Properties;
import java.util.concurrent.ExecutionException;

public class MyProducerPartition {
    
    
    public static void main(String[] args) throws InterruptedException, ExecutionException {
    
    
        /* 相当于map操作 */
        Properties properties = new Properties();

        /* kafka 集群,broker-list */
        properties.put("bootstrap.servers", "centos7-1:9092");

        /* 等待所有副本节点的应答 */
        properties.put("acks", "all");

        /* 重试次数 */
        properties.put("retries", Integer.valueOf(3));

        /* 批次大小 */
        properties.put("batch.size", Integer.valueOf(16384));

        /* 等待时间 */
        properties.put("linger.ms", Integer.valueOf(1));

        /* RecordAccumulator 缓冲区大小 */
        properties.put("buffer.memory", Integer.valueOf(33554432));

        /* key序列化 */
        properties.put("key.serializer", "org.apache.kafka.common.serialization.StringSerializer");
        /* value序列化 */
        properties.put("value.serializer", "org.apache.kafka.common.serialization.StringSerializer");

        /* 添加自定义分区 */
        properties.put("partitioner.class", "com.gu.Mypartition");

        /* 创建生产者对象 */
        KafkaProducer<String, String> kafkaProducer = new KafkaProducer(properties);

        /* 发送数据 */
        for (int i = 0; i < 10; i++) {
    
    
            /* 发送的主题、key、value */
            kafkaProducer.send(new ProducerRecord("bigdata", "jh==" + i), new Callback() {
    
    
                @Override
                public void onCompletion(RecordMetadata metadata, Exception exception) {
    
    
                    /* 如果发送成功,输出分区、offset */
                    if (exception == null) {
    
    
                        System.out.println(metadata.partition() + "--" + metadata.offset());
                    } else {
    
    
                        /* 如果发送失败,打印报错信息 */
                        exception.printStackTrace();
                    }
                }
            });
        }

        /* 关闭资源 */
        kafkaProducer.close();
    }
}

Guess you like

Origin blog.csdn.net/weixin_46122692/article/details/109275559