ip归属地统计 II 优化(广播变量)

优化上一篇 : ip归属地统计I(广播变量)

package com.ws.spark
import org.apache.spark.broadcast.Broadcast
import org.apache.spark.rdd.RDD
import org.apache.spark.{SparkConf, SparkContext}

/**
  * 统计日志中ip归属地出现次数优化 1
  */
object IpFromCount2 {

  def main(args: Array[String]): Unit = {

    val conf = new SparkConf().setAppName("IpFromCount2").setMaster("local[4]")

    val sc = new SparkContext(conf)

    //从hdfs中读取规则
    val rulesHDFS: RDD[String] = sc.textFile(args(0))

    val rules: RDD[(Long, Long, String)] = rulesHDFS.map(line => {
      val rules: (Long, Long, String) = IpFromUtils.generalRules(line)
      rules
    })

    //将多个Executor中的ip规则聚合到Driver端
    val allRules: Array[(Long, Long, String)] = rules.collect()

    //Driver端的数据广播到Executor,广播变量的引用(还在Driver端)
    val broadCast: Broadcast[Array[(Long, Long, String)]] = sc.broadcast(allRules)

    //读取log数据
    val data: RDD[String] = sc.textFile(args(1))

    //清洗数据
    val provinceData: RDD[(String, Int)] = data.map(line => {
      //该函数是在Executor中执行的
      val lineArr: Array[String] = line.split("[|]")
      val ip = lineArr(1)
      //转换成十进制
      val ipNum: Long = IpFromUtils.ipToLong(ip)

      //使用广播变量,Driver端的变量是如何广播到Executor中?
      //Task是在Driver端生成的,广播变量的引用是伴随task被发送到Executor的。
      val broadCastValue: Array[(Long, Long, String)] = broadCast.value

      val index: Int = IpFromUtils.binarySearch(broadCastValue, ipNum)

      var province = "未知地区"

      if (index != -1) {
        province = broadCastValue(index)._3
      }
      (province, 1)
    })

    //聚合
    val reduce: RDD[(String, Int)] = provinceData.reduceByKey(_ + _)
    //排序
    val sort: RDD[(String, Int)] = reduce.sortBy(_._2, false)

    println(sort.collect().toBuffer)

    sc.stop()
  }
}

猜你喜欢

转载自blog.csdn.net/bb23417274/article/details/82935264
今日推荐