package com.dt.spark
- 1
- 1
import org.apache.spark.sql.expressions.{MutableAggregationBuffer, UserDefinedAggregateFunction}
- 1
import org.apache.spark.sql.types._
- 1
import org.apache.spark.sql.{Row, SQLContext}
- 1
import org.apache.spark.{SparkConf, SparkContext}
- 1
- 1
object SparkSQLUDFUDAF {
- 1
def main(args: Array[String]) {
- 1
System.setProperty("hadoop.home.dir", "G:/datarguru spark/tool/hadoop-2.6.0");
- 1
val conf = new SparkConf()
- 1
conf.setAppName("SparkSQLUDFUDAF")
- 1
conf.setMaster("local")
- 1
val sc = new SparkContext(conf)
- 1
val sqlContext = new SQLContext(sc)
- 1
- 1
//模拟实际使用数据
- 1
val bigData = Array("Spark", "Spark", "Hadoop", "Spark", "Hadoop", "Spark", "Spark", "Hadoop", "Spark", "Hadoop")
- 1
- 1
//基于提供的数据创建DataFrame
- 1
val bigDataRDD = sc.parallelize(bigData)
- 1
val bigDataRow = bigDataRDD.map(item => Row(item))
- 1
val structType = StructType(Array(StructField("word", StringType, true)))
- 1
val bigDataDF = sqlContext.createDataFrame(bigDataRow, structType)
- 1
bigDataDF.registerTempTable("bigDataTable") //注册成为临时表
- 1
- 1
//通过SQLContext注册UDF,在Scala 2.10.x版本UDF函数最多可以接受22个输入参数
- 1
sqlContext.udf.register("computeLength", (input: String) => input.length)
- 1
- 1
//直接在SQL语句中使用UDF,就像使用SQL自动的内部函数一样
- 1
sqlContext.sql("select word, computeLength(word) as length from bigDataTable").show()
- 1
- 1
sqlContext.udf.register("wordCount", new MyUDAF)
- 1
sqlContext.sql("select word,wordCount(word) as count,computeLength(word) " +
- 1
"as length from bigDataTable group by word").show()
- 1
while(true){}
- 1
- 1
}
- 1
- 1
}
- 1
- 1
class MyUDAF extends UserDefinedAggregateFunction{ //ctrl+I实现复写方法
- 1
/**
- 1
* 该方法指定具体输入数据的类型
- 1
* @return
- 1
*/
- 1
override def inputSchema: StructType = StructType(Array(StructField("input", StringType, true)))
- 1
- 1
/**
- 1
* 在进行聚合操作的时候要处理的数据的结果的类型
- 1
* @return
- 1
*/
- 1
override def bufferSchema: StructType = StructType(Array(StructField("count", IntegerType, true)))
- 1
- 1
/**
- 1
* 指定UDAF函数计算后返回的结果类型
- 1
* @return
- 1
*/
- 1
override def dataType: DataType = IntegerType
- 1
- 1
override def deterministic: Boolean = true
- 1
- 1
/**
- 1
* 在Aggregate之前每组数据的初始化结果
- 1
* @param buffer
- 1
* @param input
- 1
*/
- 1
override def initialize(buffer: MutableAggregationBuffer): Unit = {buffer(0)=0}
- 1
- 1
/**
- 1
* 在进行聚合的时候有新的值进来,对分组后的聚合如何进行计算
- 1
* 本地的聚合操作,相当于Hadoop MapReduce模型中的Combiner(这里的Row跟DataFrame的Row无关)
- 1
* @param buffer
- 1
* @param input
- 1
*/
- 1
override def update(buffer: MutableAggregationBuffer, input: Row): Unit = {
- 1
buffer(0) = buffer.getAs[Int](0) + 1
- 1
}
- 1
- 1
/**
- 1
* 最后在分布式节点进行Local Reduce完成后需要进行全局级别的Merge操作
- 1
* @param buffer1
- 1
* @param buffer2
- 1
*/
- 1
override def merge(buffer1: MutableAggregationBuffer, buffer2: Row): Unit = {
- 1
buffer1(0) = buffer1.getAs[Int](0) + buffer2.getAs[Int](0)
- 1
}
- 1
- 1
/**
- 1
* 返回UDAF最后的计算结果
- 1
* @param buffer
- 1
* @return
- 1
*/
- 1
override def evaluate(buffer: Row): Any = buffer.getAs[Int](0)
- 1
}
spark udap 使用2
猜你喜欢
转载自blog.csdn.net/seareal1/article/details/80325610
今日推荐
周排行