pom
<dependencies>
<!-- https://mvnrepository.com/artifact/org.apache.kafka/kafka-clients -->
<dependency>
<groupId>org.apache.kafka</groupId>
<artifactId>kafka-clients</artifactId>
<version>1.0.0</version>
</dependency>
<dependency>
<groupId>org.apache.kafka</groupId>
<artifactId>kafka-streams</artifactId>
<version>1.0.0</version>
</dependency>
</dependencies>
<build>
<plugins>
<!-- java编译插件 -->
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-compiler-plugin</artifactId>
<version>3.2</version>
<configuration>
<source>1.8</source>
<target>1.8</target>
<encoding>UTF-8</encoding>
</configuration>
</plugin>
</plugins>
</build>
生产者producer代码
import org.apache.kafka.clients.producer.KafkaProducer;
import org.apache.kafka.clients.producer.ProducerRecord;
import java.util.Properties;
public class OrderProducer {
public static void main(String[] args) {
Properties p = new Properties();
p.put("bootstrap.servers", "192.168.100.100:9092,192.168.100.101:9092,192.168.100.102:9092");
p.put("acks", "all");
p.put("retries", 0);
p.put("batch.size", 16384);
p.put("linger.ms", 1);
p.put("buffer.memory", 33554432);
p.put("key.serializer", "org.apache.kafka.common.serialization.StringSerializer");
p.put("value.serializer", "org.apache.kafka.common.serialization.StringSerializer");
p.put("partitioner.class", "KafkaCustomPartitioner");
KafkaProducer<String, String> producer = new KafkaProducer<>(p);
for (int i = 0; i < 10; i++) {
ProducerRecord<String, String> record = new ProducerRecord<>("18BD12_2", "发送的数据:" + i);
producer.send(record);
}
producer.close();
}
}
自定义分区代码
import org.apache.kafka.clients.producer.Partitioner;
import org.apache.kafka.common.Cluster;
import org.apache.kafka.common.PartitionInfo;
import java.util.List;
import java.util.Map;
import java.util.Random;
public class KafkaCustomPartitioner implements Partitioner {
@Override
public void configure(Map<String, ?> configs) {
}
@Override
public int partition(String topic, Object arg1, byte[] keyBytes, Object arg3, byte[] arg4, Cluster cluster) {
List<PartitionInfo> partitions = cluster.partitionsForTopic(topic);
int partitionNum = partitions.size();
Random random = new Random();
return random.nextInt(partitionNum);
}
@Override
public void close() {
}
}
消费者代码,通过分区进行拉取
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.clients.consumer.ConsumerRecords;
import org.apache.kafka.clients.consumer.KafkaConsumer;
import org.apache.kafka.common.TopicPartition;
import java.util.Collections;
import java.util.List;
import java.util.Properties;
import java.util.Set;
public class ConsumerPartitioner {
public static void main(String[] args) {
Properties props = new Properties();
props.put("bootstrap.servers", "192.168.100.100:9092,192.168.100.101:9092,192.168.100.102:9092");
props.put("group.id", "test");
props.put("enable.auto.commit", "false");
props.put("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
props.put("value.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
KafkaConsumer<String, String> consumer = new KafkaConsumer<>(props);
consumer.subscribe(Collections.singletonList("18BD12_2"));
while (true) {
ConsumerRecords<String, String> records = consumer.poll(1000);
Set<TopicPartition> partitions = records.partitions();
for (TopicPartition partition : partitions) {
List<ConsumerRecord<String, String>> list = records.records(partition);
for (ConsumerRecord<String, String> record : list) {
long offset = record.offset();
String value = record.value();
System.out.println("partition = " + partition);
System.out.println("offset = " + offset);
System.out.println("value = " + value);
}
consumer.commitAsync();
}
}
}
}
kafkaStreamAPI
import org.apache.kafka.common.serialization.Serdes;
import org.apache.kafka.streams.KafkaStreams;
import org.apache.kafka.streams.StreamsBuilder;
import org.apache.kafka.streams.StreamsConfig;
import org.apache.kafka.streams.Topology;
import java.util.Properties;
public class KafkaStream {
public static void main(String[] args) {
Properties props = new Properties();
props.put(StreamsConfig.APPLICATION_ID_CONFIG, "wordcount-application12");
props.put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, "hadoop01:9092,hadoop02:9092,hadoop03:9092");
props.put(StreamsConfig.DEFAULT_KEY_SERDE_CLASS_CONFIG, Serdes.String().getClass());
props.put(StreamsConfig.DEFAULT_VALUE_SERDE_CLASS_CONFIG, Serdes.String().getClass());
StreamsBuilder streamsBuilder = new StreamsBuilder();
streamsBuilder.stream("test01").mapValues(line -> line.toString().toUpperCase()).to("test02");
final Topology topology = streamsBuilder.build();
KafkaStreams streams = new KafkaStreams(topology, props);
streams.start();
}
}