spark中ip归属地访问的次数练习

版权声明:本文为博主原创文章,未经博主允许不得转载。 https://blog.csdn.net/lv_yishi/article/details/83830366

IP地址归属地信息练习

用户访问日志信息:

**案例需求:**
根据访问日志的IP地址计算出访问者的归属地,并且按照省份,计算出访问次数,然后将计算好的结果写入到MySQL

**案例实现步骤**
1,加载IP地址归属地信息,切分出IP字段和省份信息,(将IP地址转换成十进制,方便于后面快速查找)
2,将IP地址和省份信息通过广播缓存到各个节点内存中(Executor中的内存中)
3,分析访问log中的IP地址,根据IP规则进行匹配出给该访问的地域即省份信息
4,将每一条访问log,分析出省份并和1组合成一个tuple返回
5,按省份名称进行聚合
6,将聚合后的数据写入到MySQL数据库

//(1)使用最基本的方式
import java.sql.{Connection, Date, DriverManager, PreparedStatement}

import org.apache.spark.rdd.RDD
import org.apache.spark.{SparkConf, SparkContext}
//D:\数据\IPSearch
object IPSearch {
  def main(args: Array[String]): Unit = {
    val conf = new SparkConf().setAppName("IPSearch").setMaster("local[2]")
    val sc = new SparkContext(conf)
    val ipInfo = sc.textFile("D:\\数据\\IPSearch\\ip.txt")
    //切分
    val splitIPInfo = ipInfo.map(x => {
      val filds = x.split("\\|")
      val startIP = filds(2)
      //起始ip
      val endIP = filds(3)
      //结束ip
      val provice = filds(6) //ip对应的省份
      (startIP, endIP, provice)
    })

    //使用广播变量,使用之前需要使用action将算子的数据提取到,最后通过driver广播到worker端
    val arrIPInfo = splitIPInfo.collect()

    //定义广播变量
    val broadcastIPInfo = sc.broadcast(arrIPInfo)
    //获取用户点击流日志,找到该用户属于哪个省并返回
    val proviceAndOne = sc.textFile("D:\\数据\\IPSearch\\http.log").map(line => {
      val fields = line.split("\\|")
      val ip = fields(1)
      // 用户的IP
      val ipToLong = ip2Long(ip)
      // 得到用户的Long类型的IP
      val arrIPInfo = broadcastIPInfo.value
      // IP基础数据
      val index = binarySearch(arrIPInfo, ipToLong)
      // 根据索引找到对应的省
      val provice = arrIPInfo(index)._3
      (provice, 1)
    })

    //按照省份名称进行聚合
    //聚合访问
    val res: RDD[(String, Int)] = proviceAndOne.reduceByKey(_+_)
    //6,将聚合后的数据写入数据库
    res.foreachPartition(data2MySql)
    sc.stop()

  }

  //将ip转为long类型
  def ip2Long(ip: String) = {
    val fragments: Array[String] = ip.split("[.]")
    var ipNum = 0L
    for (i <- 0 until fragments.length) {
      ipNum = fragments(i).toLong | ipNum << 8L
    }
    ipNum
  }
  //使用二分法查找指定的范围所属
  def binarySearch(arr: Array[(String, String, String)],ip: Long):Int={
    var low=0
    var high=arr.length
    while (low<=high){
      val middle=(low+high)/2
      if ((ip >= arr(middle)._1.toLong) && (ip <= arr(middle)._2.toLong)){
        return middle
      }
      if (ip<arr(middle)._1.toLong) {
        high = middle - 1
      }else{
        low=middle+1
      }
    }
    -1
  }
  //将数据写入到数据库中
  val data2MySql=(it:Iterator[(String,Int)])=>{
    var conn:Connection=null
    var ps:PreparedStatement=null
    val sql="insert into location_info(location,counts,access_date) values(?,?,?)"

    conn = DriverManager.getConnection("jdbc:mysql://192.168.88.130:3306/sessioanalyze?useUnicode=true&character&characterEncoding=utf8","root","root")
    it.foreach(line=>{
      ps=conn.prepareStatement(sql)
      ps.setString(1,line._1)
      ps.setInt(2,line._2)
      ps.setDate(3,new Date(System.currentTimeMillis()))
      ps.executeUpdate()
    })
    if (ps!=null){
      ps.close()
    }
    if (conn!=null)
      conn.close()
  }
}

方式2 使用dataframe的格式

自定义一个工具类:

import java.sql.{Connection, Date, DriverManager, PreparedStatement}

object utils {
  //将ip转为long类型
  def ip2Long(ip:String)={
    val fragments: Array[String] = ip.split("[.]")
    var ipNum = 0L
    for (i <- 0 until fragments.length) {
      ipNum = fragments(i).toLong | ipNum << 8L
    }
    ipNum
  }
  //使用二分法查找范围
  def binarySearch(arr: Array[(String, String, String)],ip: Long):Int={
    var low=0
    var high=arr.length
    while (low<=high){
      val middle:Int = (low+high)/2
      if ((ip>=arr(middle)._1.toLong && (ip<=arr(middle)._2.toLong))){
          return middle
      }
      if(ip<arr(middle)._1.toLong){
        high=middle-1
      }else{
        low=middle+1
      }
    }
  -1//如果都不符合就直接返回
  }

  //将读到的数据写入到数据库里面
  val data2MySql=(it:Iterator[(String,Int)])=>{
    var conn:Connection=null
    var ps:PreparedStatement=null
    val sql="insert into location_info(location,counts,access_date) values(?,?,?)"

    conn = DriverManager.getConnection("jdbc:mysql://192.168.88.130:3306/sessioanalyze?useUnicode=true&character&characterEncoding=utf8","root","root")
    it.foreach(line=>{
      ps=conn.prepareStatement(sql)
      ps.setString(1,line._1)
      ps.setInt(2,line._2)
      ps.setDate(3,new Date(System.currentTimeMillis()))
      ps.executeUpdate()
    })
    if (ps!=null){
      ps.close()
    }
    if (conn!=null)
      conn.close()

  }

}

使用dataframe的格式

import org.apache.spark.sql.{DataFrame, Dataset, SparkSession}

//通过字典数据和日志分别放到dataframe,放到两张表里,使用join的方式得到结果
object Exercese2 {
  def main(args: Array[String]): Unit = {
    //创建一个Sparksession对象
    val sparkSession = SparkSession.builder().appName("Exercese2").master("local[*]").getOrCreate()

    //处理字典数据
    val dictds = sparkSession.read.textFile("D:\\数据\\IPSearch\\ip.txt")

    //添加隐式转换
    import sparkSession.implicits._
    val dictdf: DataFrame = dictds.map(line=>{
      val fields = line.split("[|]")
      val start=fields(2).toLong
      val stop=fields(3).toLong
      val province = fields(6)
      (start,stop,province)
    }).toDF("start","stop","province")

    val logds = sparkSession.read.textFile("D:\\数据\\IPSearch\\http.log")
    val logdf = logds.map(line=>{
      val fields = line.split("[|]")
      val ip=fields(1)
      val ipL=utils.ip2Long(ip)
      (ipL)
    }).toDF("ip_Num")

    //创建两张临时视图表
    dictdf.createTempView("v_dic")
    logdf.createTempView("v_log")

    //开始写sql语句,虽然这种方式可以计算出来,但是需要的内存比较大,效率比较低
    val res = sparkSession.sql("select province,count(*) counts from v_dic join v_log " +
      "on(ip_Num>=start AND ip_Num<=stop) group by province order by counts desc")
    res.show()

    sparkSession.stop()
  }
}
/*输出结果:
+--------+------+
|province|counts|
+--------+------+
|      陕西|  1824|
|      北京|  1535|
|      重庆|   868|
|      河北|   383|
|      云南|   126|
+--------+------+
 */
方式(3)使用自定义函数的形式
import org.apache.spark.sql.{DataFrame, SparkSession}
object Exercese3 {

  //(3)使用自定义函数的形式
  def main(args: Array[String]): Unit = {
    //创建一个Sparksession对象
    val sparkSession = SparkSession.builder().appName("Exercese2").master("local[*]").getOrCreate()
    //获取ip数据,字典
    val dictds = sparkSession.read.textFile("D:\\数据\\IPSearch\\ip.txt")

    //添加隐式转换
    import sparkSession.implicits._
    val dictdf = dictds.map(line=>{
      val fields = line.split("[|]")
      val start=fields(2)
      val stop=fields(3)
      val province = fields(6)
      (start,stop,province)
    })
    //把字典数据在driver端收集
    val dictinfo = dictdf.collect()
    //广播到worker端
    val broadcast = sparkSession.sparkContext.broadcast(dictinfo)
    //读取访问日志
    val logds = sparkSession.read.textFile("D:\\数据\\IPSearch\\http.log")
    //数据整理
    val logdf = logds.map(line=>{
      val fields = line.split("[|]")
      val ip=fields(1)
      val ipL=utils.ip2Long(ip)
      (ipL)
    }).toDF("ip_num")
    //创建视图
    logdf.createTempView("v_log")
    //自定义函数实现把ip地址映射成省份
    val iptoprovince = (ipnum:Long)=>{
      //读取字典
      val ipdict=broadcast.value
      val index=utils.binarySearch(ipdict,ipnum)
      var province="未知"
      if (index != -1){
        province=ipdict(index)._3
      }
      province
    }
    //注册自定义函数
    sparkSession.udf.register("iptoprovince",iptoprovince)//下面如果换行的话一定最后添加空格
    val res =sparkSession.sql("select iptoprovince(ip_num) province, count(*) counts from v_log " +
      "group by province order by counts desc")


    res.show()
    sparkSession.stop()
  }
}
/*
+--------+------+
|province|counts|
+--------+------+
|      陕西|  1824|
|      北京|  1535|
|      重庆|   868|
|      河北|   383|
|      云南|   126|
+--------+------+
 */

总结:使用第三种的方式相对于多并效率也会高许多

猜你喜欢

转载自blog.csdn.net/lv_yishi/article/details/83830366