kafka 使用

先把防火墙搞垮
~ service iptables stop

再来启动zookeeper
~ ./zkServer.sh start

然后后台静默启动kafka
~ nohup bin/kafka-server-start.sh config/server.properties >> /dev/null &

在zookeeper的一个节点上创建一个topic
~ bin/kafka-topics.sh --create --zookeeper 192.168.1.123:2181 --replication-factor 1 --partitions 1 --topic first

查看topic创建是否成功
~ bin/kafka-topics.sh --describe --zookeeper 192.168.1.123:2181 --topic first

打开消费者窗口,来监听消息变化
~ bin/kafka-console-consumer.sh --bootstrap-server localhost:9092 --topic first


producer:
import java .util.Properties ;

import org .apache.kafka.clients.producer.KafkaProducer ;
import org .apache.kafka.clients.producer.Producer ;
import org .apache.kafka.clients.producer.ProducerRecord ;

public class ProducerTest {

public static void main(String[] args) {
Properties props = new Properties() ;
props .put ( "bootstrap.servers" , "master:9092,master:9093" ) ;
props .put ( "acks" , "all" ) ;
props .put ( "retries" , 0 ) ;
props .put ( "batch.size" , 16384 ) ;
props .put ( "linger.ms" , 1 ) ;
props .put ( "key.serializer" , "org.apache.kafka.common.serialization.StringSerializer" ) ;
props .put ( "value.serializer" , "org.apache.kafka.common.serialization.StringSerializer" ) ;

Producer<String,String> producer = new KafkaProducer<String,String>(props) ;

for(int i= 0 ;i<100;i++){
ProducerRecord<String,String> r = new ProducerRecord<String,String>( "message" , "key-" +i, "value-" +i) ;
producer .send (r) ;
}

producer .close () ;
}

}
consumer:
import java .util.Arrays ;
import java .util.Properties ;

import org .apache.kafka.clients.consumer.Consumer ;
import org .apache.kafka.clients.consumer.ConsumerRecord ;
import org .apache.kafka.clients.consumer.ConsumerRecords ;
import org .apache.kafka.clients.consumer.KafkaConsumer ;

public class ConsumerTest {
public static void main(String[] args) {
Properties props = new Properties() ;
props .put ( "bootstrap.servers" , "master:9092,master:9093" ) ;
props .put ( "group.id" , "test" ) ;
props .put ( "enable.auto.commit" , "true" ) ;
props .put ( "auto.commit.interval.ms" , "1000" ) ;
props .put ( "session.timeout.ms" , "30000" ) ;
props .put ( "key.deserializer" , "org.apache.kafka.common.serialization.StringDeserializer" ) ;
props .put ( "value.deserializer" , "org.apache.kafka.common.serialization.StringDeserializer" ) ;

Consumer<String,String> consumer = new KafkaConsumer<String,String>(props) ;
consumer .subscribe (Arrays .asList ( "message" )) ;
while(true){
ConsumerRecords<String,String> records = consumer .poll ( 10 ) ;
for(ConsumerRecord<String,String> record : records){
System .out.println ( "offset=" + record .offset () + ",--key=" + record .key () + ",--value=" + record .value ()) ;
}
}
}
}

猜你喜欢

转载自blog.csdn.net/singgel/article/details/79999477