KafkaAPI 和 KafkaStreamAPI

pom

    <dependencies>
        <!-- https://mvnrepository.com/artifact/org.apache.kafka/kafka-clients -->
        <dependency>
            <groupId>org.apache.kafka</groupId>
            <artifactId>kafka-clients</artifactId>
            <version>1.0.0</version>
        </dependency>
        <dependency>
            <groupId>org.apache.kafka</groupId>
            <artifactId>kafka-streams</artifactId>
            <version>1.0.0</version>
        </dependency>

    </dependencies>

    <build>
        <plugins>
            <!-- java编译插件 -->
            <plugin>
                <groupId>org.apache.maven.plugins</groupId>
                <artifactId>maven-compiler-plugin</artifactId>
                <version>3.2</version>
                <configuration>
                    <source>1.8</source>
                    <target>1.8</target>
                    <encoding>UTF-8</encoding>
                </configuration>
            </plugin>
        </plugins>
    </build>

生产者producer代码

import org.apache.kafka.clients.producer.KafkaProducer;
import org.apache.kafka.clients.producer.ProducerRecord;

import java.util.Properties;

/**
 * @author dell
 * @version v 1.0
 * @date 2020.3.30
 */
public class OrderProducer {
    public static void main(String[] args) {
        //1.配置kafka集群环境(设置)
        Properties p = new Properties();
        //kafka服务器地址
        p.put("bootstrap.servers", "192.168.100.100:9092,192.168.100.101:9092,192.168.100.102:9092");
        //消息确认机制
        p.put("acks", "all");
        //重试机制
        p.put("retries", 0);
        //批量发送大小
        p.put("batch.size", 16384);
        //消息延迟
        p.put("linger.ms", 1);
        //批量的缓冲区大小
        p.put("buffer.memory", 33554432);
        //kafka key 和 value 的序列化
        p.put("key.serializer", "org.apache.kafka.common.serialization.StringSerializer");
        p.put("value.serializer", "org.apache.kafka.common.serialization.StringSerializer");

        //todo 自定义分区添加项  包名点 类名
        p.put("partitioner.class", "KafkaCustomPartitioner");

        //2. 示例一个生产者对象
        KafkaProducer<String, String> producer = new KafkaProducer<>(p);

        //3.通过生产者对象将数据发送到kafka集群
        for (int i = 0; i < 10; i++) {
        	//第一个是 topic  (必填)
        	//第二个是分区(可以不指定) 指定分区必须指定key,数据会全部存入到指定的分区中 key(相当无效)
        	//第三个是指定key,如果不指定分区,只指定key,会对key取hash值,然后对分区取余,余几数据就放到哪个分区(可以不指定)
        	//第四个参数是要传输的数据 (必填)
            //ProducerRecord<String, String> record = new ProducerRecord<>("18BD12_2", 0, "test", "发送的数据:" + i);
            ProducerRecord<String, String> record = new ProducerRecord<>("18BD12_2", "发送的数据:" + i);
            //发送
            producer.send(record);
        }
        // 4. 关闭连接
        producer.close();
    }
}

自定义分区代码

import org.apache.kafka.clients.producer.Partitioner;
import org.apache.kafka.common.Cluster;
import org.apache.kafka.common.PartitionInfo;

import java.util.List;
import java.util.Map;
import java.util.Random;

/**
 * @author dell
 */
public class KafkaCustomPartitioner implements Partitioner {
	@Override
	public void configure(Map<String, ?> configs) {
	}

	@Override
	public int partition(String topic, Object arg1, byte[] keyBytes, Object arg3, byte[] arg4, Cluster cluster) {
		//获取分区
		List<PartitionInfo> partitions = cluster.partitionsForTopic(topic);
		//获取分区的个数
	    int partitionNum = partitions.size();
	    //创建随机队列
		Random random = new Random();
		//返回分区
		return random.nextInt(partitionNum);
	}

	@Override
	public void close() {
		
	}
}

消费者代码,通过分区进行拉取

import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.clients.consumer.ConsumerRecords;
import org.apache.kafka.clients.consumer.KafkaConsumer;
import org.apache.kafka.common.TopicPartition;

import java.util.Collections;
import java.util.List;
import java.util.Properties;
import java.util.Set;

/**
 * @author dell
 * @version v 1.0
 * @date 2020.3.30
 */
public class ConsumerPartitioner {
    public static void main(String[] args) {
        //1.添加配置文件
        Properties props = new Properties();
        props.put("bootstrap.servers", "192.168.100.100:9092,192.168.100.101:9092,192.168.100.102:9092");
        props.put("group.id", "test");

        //todo  ---消费者手动提交offset值 (推荐)
        props.put("enable.auto.commit", "false");

        //kafka key 和 value 的序列化
        props.put("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
        props.put("value.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");

        //2.实例消费对象
        KafkaConsumer<String, String> consumer = new KafkaConsumer<>(props);

        //3.设置读取的topic
        consumer.subscribe(Collections.singletonList("18BD12_2"));

        //4.拉取数据并输出
        //循环遍历
        while (true) {
            //获取到所有的数据
            ConsumerRecords<String, String> records = consumer.poll(1000);
            //获取到所有的分区
            Set<TopicPartition> partitions = records.partitions();
            //遍历所有分区
            for (TopicPartition partition : partitions) {
                //获取一个分区中的所有数据
                List<ConsumerRecord<String, String>> list = records.records(partition);
                //遍历所有的数据获取到一行数据
                for (ConsumerRecord<String, String> record : list) {
                    //获取偏移量
                    long offset = record.offset();
                    //获取值
                    String value = record.value();
                    //输出分区
                    System.out.println("partition = " + partition);
                    //输出偏移量
                    System.out.println("offset = " + offset);
                    //输出值
                    System.out.println("value = " + value);
                }
                //提交offset
                consumer.commitAsync();
            }
        }
    }
}

kafkaStreamAPI

import org.apache.kafka.common.serialization.Serdes;
import org.apache.kafka.streams.KafkaStreams;
import org.apache.kafka.streams.StreamsBuilder;
import org.apache.kafka.streams.StreamsConfig;
import org.apache.kafka.streams.Topology;


import java.util.Properties;

/**
 * @author dell
 * 在18BD12读取数据,将数据转换成大写,在写入18BD12-1
 */
public class KafkaStream {

    public static void main(String[] args) {
        Properties props = new Properties();
        //设置程序的唯一标识
        props.put(StreamsConfig.APPLICATION_ID_CONFIG, "wordcount-application12");
        //设置kafka集群
        props.put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, "hadoop01:9092,hadoop02:9092,hadoop03:9092");
        //设置序列化与反序列化
        props.put(StreamsConfig.DEFAULT_KEY_SERDE_CLASS_CONFIG, Serdes.String().getClass());
        props.put(StreamsConfig.DEFAULT_VALUE_SERDE_CLASS_CONFIG, Serdes.String().getClass());


        //实例一个计算逻辑
        StreamsBuilder streamsBuilder = new StreamsBuilder();
        //设置计算逻辑   stream 在哪里读取数据                ->                               to 将数据写入哪里
        streamsBuilder.stream("test01").mapValues(line -> line.toString().toUpperCase()).to("test02");


        //构建Topology对象(拓扑,流程)
        final Topology topology = streamsBuilder.build();


        //实例 kafka流
        KafkaStreams streams = new KafkaStreams(topology, props);
        //启动流计算
        streams.start();


    }
}

猜你喜欢

转载自blog.csdn.net/hongchenshijie/article/details/105207861