大数据入门:Spark+Kudu的广告业务项目实战笔记(二)

第一章点这里传送

1.功能二开发

统计省份、城市数量分布情况,按照provincename与cityname分组统计

package com.imooc.bigdata.cp08.business

import com.imooc.bigdata.cp08.utils.SQLUtils
import org.apache.spark.sql.SparkSession

object ProvinceCityStatApp {

  def main(args: Array[String]): Unit = {
    val spark = SparkSession.builder()
      .master("local[2]")
      .appName("ProvinceCityStatApp")
      .getOrCreate()

    //从Kudu的ods表中读取数据,然后按照省份和城市分组即可
    val sourceTableName = "ods"
    val masterAddress = "hadoop000"

    val odsDF = spark.read.format("org.apache.kudu.spark.kudu")
      .option("kudu.table", sourceTableName)
      .option("kudu.master", masterAddress)
      .load()
    //odsDF.show(false)

    odsDF.createOrReplaceTempView("ods")
    val result = spark.sql(SQLUtils.PROVINCE_CITY_SQL)
    result.show(false)

    spark.stop()
    
  }

}

其中SQLUtils中填写SQL代码:

 lazy val PROVINCE_CITY_SQL = "select provincename,cityname,count(1) as cnt from ods group by provincename,cityname" lazy val PROVINCE_CITY_SQL = "select provincename,cityname,count(1) as cnt from ods group by provincename,cityname"

运行结果如图所示:

2.数据落地Kudu

其中KuduUtils.sink内容详见上一篇文章:

    val sinkTableName = "province_city_stat"
    val partitionId = "provincename"
    val schema = SchemaUtils.ProvinceCitySchema

    KuduUtils.sink(result,sinkTableName,masterAddress,schema,partitionId)

其中的Schema信息为:

  lazy val ProvinceCitySchema: Schema = {
    val columns = List(
      new ColumnSchemaBuilder("provincename",Type.STRING).nullable(false).key(true).build(),
      new ColumnSchemaBuilder("cityname",Type.STRING).nullable(false).key(true).build(),
      new ColumnSchemaBuilder("cnt",Type.INT64).nullable(false).key(true).build()
    ).asJava

    new Schema(columns)
  }

导入之后查一下:

    spark.read.format("org.apache.kudu.spark.kudu")
      .option("kudu.master",masterAddress)
      .option("kudu.table",sinkTableName)
      .load().show()

有数据就可以啦,啵啵啵!

 

发布了66 篇原创文章 · 获赞 28 · 访问量 1万+

猜你喜欢

转载自blog.csdn.net/qq_36329973/article/details/104513468
今日推荐