SparkStreaming读取Kafka数据

版权声明:本文为博主原创文章,未经博主允许不得转载。 https://blog.csdn.net/GUANYAQI1996/article/details/78663057
package kafka

import org.apache.spark.SparkConf
import org.apache.spark.streaming.kafka010.KafkaUtils
import org.apache.spark.streaming.{Duration, StreamingContext}
import org.apache.spark.streaming.kafka010.LocationStrategies.PreferConsistent
import org.apache.spark.streaming.kafka010.ConsumerStrategies.Subscribe
/*记得添加依赖
<dependency>
  <groupId>org.apache.spark</groupId>
  <artifactId>spark-streaming-kafka-0-10_2.11</artifactId>
  <version>2.2.0</version>
</dependency>*/

object KafkaSparkStreaming {
  val conf=new SparkConf().setMaster("local[*]").setAppName("data from kafka")
  val ssc=new StreamingContext(conf,Duration(5))

  def main(args: Array[String]): Unit = {
    //kafka的配置参数
    val kafkaParams = Map(
      "bootstrap.servers" -> "master:9092,master:9093",
      ("key.deserializer" -> "org.apache.kafka.common.serialization.StringDeserializer"),
      "value.deserializer" -> "org.apache.kafka.common.serialization.StringDeserializer",
      "group.id" -> "kafkaTest",
      "enable.auto.commit" -> "false"
    )

    val topics = Set("spark")//创建topic
    val kafkaDstream = KafkaUtils.createDirectStream[String, String](
      ssc,
      PreferConsistent,//本地策略
      Subscribe[String, String](topics, kafkaParams)//消费策略
    ).map(x=>x.value())
      .flatMap(_.split("\\s"))
      .map((_,1))
      .reduceByKey(_+_)
      .print()
    
    ssc.start()
    ssc.awaitTermination()
  }
}

猜你喜欢

转载自blog.csdn.net/GUANYAQI1996/article/details/78663057