spark udap 使用2

  1. package com.dt.spark
    • 1
    • 1
  2. import org.apache.spark.sql.expressions.{MutableAggregationBuffer, UserDefinedAggregateFunction}
    • 1
  3. import org.apache.spark.sql.types._
    • 1
  4. import org.apache.spark.sql.{Row, SQLContext}
    • 1
  5. import org.apache.spark.{SparkConf, SparkContext}
    • 1
    • 1
  6. object SparkSQLUDFUDAF {
    • 1
  7. def main(args: Array[String]) {
    • 1
  8. System.setProperty("hadoop.home.dir", "G:/datarguru spark/tool/hadoop-2.6.0");
    • 1
  9. val conf = new SparkConf()
    • 1
  10. conf.setAppName("SparkSQLUDFUDAF")
    • 1
  11. conf.setMaster("local")
    • 1
  12. val sc = new SparkContext(conf)
    • 1
  13. val sqlContext = new SQLContext(sc)
    • 1
    • 1
  14. //模拟实际使用数据
    • 1
  15. val bigData = Array("Spark", "Spark", "Hadoop", "Spark", "Hadoop", "Spark", "Spark", "Hadoop", "Spark", "Hadoop")
    • 1
    • 1
  16. //基于提供的数据创建DataFrame
    • 1
  17. val bigDataRDD = sc.parallelize(bigData)
    • 1
  18. val bigDataRow = bigDataRDD.map(item => Row(item))
    • 1
  19. val structType = StructType(Array(StructField("word", StringType, true)))
    • 1
  20. val bigDataDF = sqlContext.createDataFrame(bigDataRow, structType)
    • 1
  21. bigDataDF.registerTempTable("bigDataTable") //注册成为临时表
    • 1
    • 1
  22. //通过SQLContext注册UDF,在Scala 2.10.x版本UDF函数最多可以接受22个输入参数
    • 1
  23. sqlContext.udf.register("computeLength", (input: String) => input.length)
    • 1
    • 1
  24. //直接在SQL语句中使用UDF,就像使用SQL自动的内部函数一样
    • 1
  25. sqlContext.sql("select word, computeLength(word) as length from bigDataTable").show()
    • 1
    • 1
  26. sqlContext.udf.register("wordCount", new MyUDAF)
    • 1
  27. sqlContext.sql("select word,wordCount(word) as count,computeLength(word) " +
    • 1
  28. "as length from bigDataTable group by word").show()
    • 1
  29. while(true){}
    • 1
    • 1
  30. }
    • 1
    • 1
  31. }
    • 1
    • 1
  32. class MyUDAF extends UserDefinedAggregateFunction{ //ctrl+I实现复写方法
    • 1
  33. /**
    • 1
  34. * 该方法指定具体输入数据的类型
    • 1
  35. * @return
    • 1
  36. */
    • 1
  37. override def inputSchema: StructType = StructType(Array(StructField("input", StringType, true)))
    • 1
    • 1
  38. /**
    • 1
  39. * 在进行聚合操作的时候要处理的数据的结果的类型
    • 1
  40. * @return
    • 1
  41. */
    • 1
  42. override def bufferSchema: StructType = StructType(Array(StructField("count", IntegerType, true)))
    • 1
    • 1
  43. /**
    • 1
  44. * 指定UDAF函数计算后返回的结果类型
    • 1
  45. * @return
    • 1
  46. */
    • 1
  47. override def dataType: DataType = IntegerType
    • 1
    • 1
  48. override def deterministic: Boolean = true
    • 1
    • 1
  49. /**
    • 1
  50. * 在Aggregate之前每组数据的初始化结果
    • 1
  51. * @param buffer
    • 1
  52. * @param input
    • 1
  53. */
    • 1
  54. override def initialize(buffer: MutableAggregationBuffer): Unit = {buffer(0)=0}
    • 1
    • 1
  55. /**
    • 1
  56. * 在进行聚合的时候有新的值进来,对分组后的聚合如何进行计算
    • 1
  57. * 本地的聚合操作,相当于Hadoop MapReduce模型中的Combiner(这里的Row跟DataFrame的Row无关)
    • 1
  58. * @param buffer
    • 1
  59. * @param input
    • 1
  60. */
    • 1
  61. override def update(buffer: MutableAggregationBuffer, input: Row): Unit = {
    • 1
  62. buffer(0) = buffer.getAs[Int](0) + 1
    • 1
  63. }
    • 1
    • 1
  64. /**
    • 1
  65. * 最后在分布式节点进行Local Reduce完成后需要进行全局级别的Merge操作
    • 1
  66. * @param buffer1
    • 1
  67. * @param buffer2
    • 1
  68. */
    • 1
  69. override def merge(buffer1: MutableAggregationBuffer, buffer2: Row): Unit = {
    • 1
  70. buffer1(0) = buffer1.getAs[Int](0) + buffer2.getAs[Int](0)
    • 1
  71. }
    • 1
    • 1
  72. /**
    • 1
  73. * 返回UDAF最后的计算结果
    • 1
  74. * @param buffer
    • 1
  75. * @return
    • 1
  76. */
    • 1
  77. override def evaluate(buffer: Row): Any = buffer.getAs[Int](0)
    • 1
  78. }

猜你喜欢

转载自blog.csdn.net/seareal1/article/details/80325610