rely:
<dependency> <groupId>org.springframework.boot</groupId> <artifactId>spring-boot-starter-web</artifactId> </dependency> <dependency> <groupId>org.springframework.kafka</groupId> <artifactId>spring-kafka</artifactId> </dependency>
application.properties:
### kafka configure spring.kafka.bootstrap-servers=10.160.3.70:9092 spring.kafka.consumer.group-id=sea-test spring.kafka.consumer.enable-auto-commit=false spring.kafka.consumer.auto-offset-reset=earliest spring.kafka.consumer.max-poll-records=2000 #spring.kafka.consumer.key-deserializer=org.apache.kafka.common.serialization.StringDeserializer #spring.kafka.consumer.value-deserializer=org.apache.kafka.common.serialization.StringDeserializer spring.kafka.producer.retries=3 spring.kafka.producer.batch-size=16384 spring.kafka.producer.buffer-memory=33554432 spring.kafka.producer.linger=10 #spring.kafka.producer.key-serializer=org.apache.kafka.common.serialization.StringSerializer #spring.kafka.producer.value-serializer=org.apache.kafka.common.serialization.StringSerializer
KafkaConfig:
package com.icil.topic.config; import java.util.HashMap; import java.util.Map; import org.apache.kafka.clients.admin.AdminClient; import org.apache.kafka.clients.admin.AdminClientConfig; import org.apache.kafka.clients.consumer.ConsumerConfig; import org.apache.kafka.clients.producer.ProducerConfig; import org.apache.kafka.common.serialization.StringDeserializer; import org.apache.kafka.common.serialization.StringSerializer; import org.springframework.beans.factory.annotation.Value; import org.springframework.context.annotation.Bean; import org.springframework.context.annotation.Configuration; import org.springframework.kafka.annotation.EnableKafka; import org.springframework.kafka.config.ConcurrentKafkaListenerContainerFactory; import org.springframework.kafka.config.KafkaListenerContainerFactory; import org.springframework.kafka.core.DefaultKafkaConsumerFactory; import org.springframework.kafka.core.DefaultKafkaProducerFactory; import org.springframework.kafka.core.KafkaAdmin; import org.springframework.kafka.core.KafkaTemplate; import org.springframework.kafka.core.ProducerFactory; import org.springframework.kafka.listener.ContainerProperties; import com.google.common.collect.Maps; @Configuration @EnableKafka public class KafkaConfig { @Value("${spring.kafka.bootstrap-servers}") private String bootstrapServers; @Value("${spring.kafka.consumer.group-id}") private String groupId; @Value("${spring.kafka.consumer.enable-auto-commit}") private Boolean autoCommit; @Value("${spring.kafka.consumer.auto-offset-reset}") private String autoOffsetReset; @Value("${spring.kafka.consumer.max-poll-records}") private Integer maxPollRecords; @Value("${spring.kafka.producer.linger}") private int linger; @Value("${spring.kafka.producer.retries}") private Integer retries; @Value("${spring.kafka.producer.batch-size}") private Integer batchSize; @Value("${spring.kafka.producer.buffer-memory}") private Integer bufferMemory; //cankao :https://blog.csdn.net/tmeng521/article/details/90901925 public the Map <String, Object> producerConfigs () { the Map <String, Object> The props = new new the HashMap <> (); props.put (ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers); // Set the number of retries props.put (ProducerConfig.RETRIES_CONFIG , retries); // achieve batchSize sends messages when the size props.put (ProducerConfig.BATCH_SIZE_CONFIG, batchSize); // delay time, delay time after reaching the batch size calculated transmission does not reach also send messages props.put ( ProducerConfig.LINGER_MS_CONFIG, the linger); // buffer value props.put (ProducerConfig.BUFFER_MEMORY_CONFIG, bufferMemory); // serialization means props.put (ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer. class ); props.put (ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, StringSerializer. class ); // message acknowledgment. producer end, 1 and all are to be written not only a message indicating further local leader to write a copy of the corresponding props.put (ProducerConfig.ACKS_CONFIG, -1 ); // maximum value of a single message, in bytes, the default value is 1048576 props.put (ProducerConfig.LINGER_MS_CONFIG, 10485760 ); // set broker response time, within 60 seconds if the broker is not returned to the producer of the acknowledgment message, the transmission is considered failed props.put (ProducerConfig.REQUEST_TIMEOUT_MS_CONFIG, 60000 ); // specified interceptor (value of the corresponding class) props.put (ProducerConfig.INTERCEPTOR_CLASSES_CONFIG, "com.te.handler.KafkaProducerInterceptor" ); // Set the compression algorithm (default is wood compression algorithm) props.put (ProducerConfig.COMPRESSION_TYPE_CONFIG, "LZ4"); // Snappy return the props; } @Bean // Create a kafka management class management class rabbitAdmin rabbitMQ equivalent is not used from the bean can not create a defined adminClient Topic public KafkaAdmin kafkaAdmin () { the Map <String, Object> the props = new new the HashMap <> (); // configuration example of a connection address Kafka // Kafka address, not zookeeper props.put (AdminClientConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers); KafkaAdmin ADMIN = new new KafkaAdmin (The props); return ADMIN; } @Bean // after kafka client, the bean created in the spring and may be injected to create Topic, a cluster environment, creation of a copy of the public the AdminClient AdminClient () { return AdminClient.create (kafkaAdmin () getConfig ().); } @Bean public ProducerFactory <String, String> producerFactory () { return new new DefaultKafkaProducerFactory <> (producerConfigs ()); } @Bean public KafkaTemplate<String, String> kafkaTemplate() { return new KafkaTemplate<>(producerFactory()); } @Bean public Map<String, Object> consumerConfigs() { Map<String, Object> props = Maps.newHashMap(); props.put(ConsumerConfig.GROUP_ID_CONFIG, groupId); props.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, autoCommit); props.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, autoOffsetReset); props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers); props.put(ConsumerConfig.MAX_POLL_RECORDS_CONFIG, maxPollRecords); // props.put(ConsumerConfig.SESSION_TIMEOUT_MS_CONFIG, 180000); // props.put(ConsumerConfig.REQUEST_TIMEOUT_MS_CONFIG, 900000); // props.put(ConsumerConfig.MAX_POLL_INTERVAL_MS_CONFIG, 900000); props.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class); props.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class); return props; } @Bean public KafkaListenerContainerFactory<?> batchFactory() { ConcurrentKafkaListenerContainerFactory<Integer, String> factory = newConcurrentKafkaListenerContainerFactory <> (); factory.setConsumerFactory ( new new DefaultKafkaConsumerFactory <> (consumerConfigs ())); // set volume consumer, the quantity of each batch provided ConsumerConfig.MAX_POLL_RECORDS_CONFIG Kafka configuration parameters factory.setBatchListener ( to true ); / / SET the retry The Template // factory.setRetryTemplate (retryTemplate ()); factory.getContainerProperties () setAckMode (ContainerProperties.AckMode.MANUAL);. return Factory; } }
If you need to initialize topic: You can configure // cankao: https: //blog.csdn.net/tmeng521/article/details/90901925
@Configuration public class KafkaInitialConfiguration { // create TopicName to topic.quick.initial of Topic and set the number of partitions to 8, and number of copies to 1 @Bean // by bean creation (the name for the bean initialTopic) public NewTopic initialTopic () { return new new NewTopic ( "topic.quick.initial", 8, ( Short ) 1 ); } / ** * @Bean this way, if the topic of the same name, it overwrites the previous * @return * / // // modified | number of partitions attention will turn 11 the number of partitions can only increase not decrease @Bean public NewTopic initialTopic2 () { return new newNewTopic ( "topic.quick.initial", 11, ( Short ) 1 ); } @Bean // create a kafka management class, management class rabbitAdmin rabbitMQ equivalent, without this bean can not use self-defined adminClient create Topic public KafkaAdmin kafkaAdmin () { the Map <String, Object> the props = new new the HashMap <> (); // configuration example of a connection address Kafka // Kafka address, not ZooKeeper props.put (AdminClientConfig.BOOTSTRAP_SERVERS_CONFIG, "127.0.0.1:9092" ); KafkaAdmin ADMIN = new new KafkaAdmin (The props); return ADMIN; } @Bean // may be injected and then create topic kafka client, the bean created in the spring public the AdminClient AdminClient () { return AdminClient.create (kafkaAdmin () getConfig ().); } }
Creating topic, manually check all topic test manual
@Autowired // adminClien needs its own configuration to generate the bean Private the AdminClient AdminClient; @Autowired Private KafkaTemplate <String, String> kafkaTemplate; @Test // custom partitions and manually create topic public void testCreateTopic () throws InterruptedException { // this is a manual Creating // 10 partitions, a copy of the // partition many benefits that can quickly deal with concurrency, but also according to the configuration of the machine NewTopic Topic = new new NewTopic ( "topic.manual.create", 10, ( Short ) 1 ); adminClient.createTopics (Arrays.asList (Topic)); the Thread.sleep (1000); } /** * 获取所有的topic * @throws Exception */ @Test public void getAllTopic() throws Exception { ListTopicsResult listTopics = adminClient.listTopics(); Set<String> topics = listTopics.names().get(); for (String topic : topics) { System.err.println(topic); } }