Hive UDAF开发

UDAF指的是自定义聚合函数,指0行到多行的0个到多个列作为参数输入,返回单一值的函数,经常和group by子句一起用。如:sum(col),avg(col),max(col),std(col)等。

下面通过模拟hive中的avg函数求平均值来介绍hive中的udaf

首先引入Maven依赖

        <dependency>
            <groupId>org.apache.hive</groupId>
            <artifactId>hive-exec</artifactId>
            <version>2.3.2</version>
        </dependency>

在以前版本的hive中主要是写一个类继承UDAF,然后声明一个实现UDAFEvaluator接口的类,代码相对于新版的类来说更简洁

package com.dxc.test.thread;


import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.hive.ql.exec.UDAF;
import org.apache.hadoop.hive.ql.exec.UDAFEvaluator;
import org.apache.hadoop.hive.ql.metadata.HiveException;

/**
 * @Author
 * @Description Old UDAF
 **/

public class MyUDAF extends UDAF {
    // 打印日志
    static final Log LOG = LogFactory.getLog(MyUDAF.class.getName());

    //声明一个静态内部类来实现UDAFEvaluator接口,类似于udf中的evaluate,声明主要逻辑的地方
    public static class AvgUDAF implements UDAFEvaluator {

        //声明udf的数据结构,求平均值:总数,个数
        public static class Column {
            double sum = 0;
            int count = 0;
        }

        private Column col = null;

        public AvgUDAF() {
            super();
            init();
        }

        //初始化求值程序并重置其内部状态,使用new Column()来表示还没有聚合任何值。
        public void init() {
            LOG.debug("Initialize evaluator");
            col = new Column();
        }

        //每当有要聚合的新值时,就调用此方法,返回true来表示输入是有效的。
        public boolean iterate(double value) throws HiveException {
            LOG.debug("Iterating over each value for aggregation");
            if (col == null)
                throw new HiveException("Item is not initialized");
            col.sum = col.sum + value;
            col.count = col.count + 1;
            return true;
        }

        //当Hive需要局部聚合的结果时,调用这个方法。该方法必须返回封装聚合状态的对象。
        public Column terminatePartial() {
            LOG.debug("Return partially aggregated results");
            return col;
        }

        //当Hive决定将一个部分聚合与另一个合并时,将调用此方法。
        public boolean merge(Column other) {
            LOG.debug("merging by combining partial aggregation");
            if (other == null) {
                return true;
            }
            col.sum += other.sum;
            col.count += other.count;
            return true;
        }

        //在需要聚合的最终结果时调用此方法
        public double terminate() {
            LOG.debug("At the end of last record of the group - returning final result");
            return col.sum / col.count;
        }
    }
}

上面的方法现在已经过时也多被弃用了,下面是继承AbstractGenericUDAFResolver类然后再声明一个类继承GenericUDAFEvaluator,也是现在hive中udaf的主流

package blog;

import java.util.ArrayList;

import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.hive.ql.exec.UDFArgumentTypeException;
import org.apache.hadoop.hive.ql.metadata.HiveException;
import org.apache.hadoop.hive.ql.parse.SemanticException;
import org.apache.hadoop.hive.ql.udf.generic.AbstractGenericUDAFResolver;
import org.apache.hadoop.hive.ql.udf.generic.GenericUDAFEvaluator;
import org.apache.hadoop.hive.serde2.io.DoubleWritable;
import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector;
import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorFactory;
import org.apache.hadoop.hive.serde2.objectinspector.PrimitiveObjectInspector;
import org.apache.hadoop.hive.serde2.objectinspector.StructField;
import org.apache.hadoop.hive.serde2.objectinspector.StructObjectInspector;
import org.apache.hadoop.hive.serde2.objectinspector.primitive.DoubleObjectInspector;
import org.apache.hadoop.hive.serde2.objectinspector.primitive.LongObjectInspector;
import org.apache.hadoop.hive.serde2.objectinspector.primitive.PrimitiveObjectInspectorFactory;
import org.apache.hadoop.hive.serde2.objectinspector.primitive.PrimitiveObjectInspectorUtils;
import org.apache.hadoop.hive.serde2.typeinfo.PrimitiveTypeInfo;
import org.apache.hadoop.hive.serde2.typeinfo.TypeInfo;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.util.StringUtils;

/**
 * @Author Daniel
 * @Description New UDAF
 **/

public class NewUDAF extends AbstractGenericUDAFResolver {

    static final Log LOG = LogFactory.getLog(NewUDAF.class.getName());

    //重写getEvaluator方法来检验传入的参数
    @Override
    public GenericUDAFEvaluator getEvaluator(TypeInfo[] parameters)
            throws SemanticException {
        if (parameters.length != 1) {
            throw new UDFArgumentTypeException(parameters.length - 1,
                    "Exactly one argument is expected.");
        }

        if (parameters[0].getCategory() != ObjectInspector.Category.PRIMITIVE) {
            throw new UDFArgumentTypeException(0,
                    "Only primitive type arguments are accepted but "
                            + parameters[0].getTypeName() + " is passed.");
        }
        switch (((PrimitiveTypeInfo) parameters[0]).getPrimitiveCategory()) {
            case BYTE:
            case SHORT:
            case INT:
            case LONG:
            case FLOAT:
            case DOUBLE:
            case STRING:
            case TIMESTAMP:
                return new GenericUDAFAverageEvaluator();
            case BOOLEAN:
            default:
                throw new UDFArgumentTypeException(0,
                        "Only numeric or string type arguments are accepted but "
                                + parameters[0].getTypeName() + " is passed.");
        }
    }


    //定义一个继承GenericUDAFEvaluator的类实现主要逻辑
    public static class GenericUDAFAverageEvaluator extends GenericUDAFEvaluator {

        //定义全局输入输出数据的类型OI实例,用于解析输入输出数据
        // input For PARTIAL1 and COMPLETE
        PrimitiveObjectInspector inputOI;

        // input For PARTIAL2 and FINAL
        // output For PARTIAL1 and PARTIAL2
        StructObjectInspector soi;
        StructField countField;
        StructField sumField;
        LongObjectInspector countFieldOI;
        DoubleObjectInspector sumFieldOI;

        //定义全局输出数据的类型,用于存储实际数据
        // output For PARTIAL1 and PARTIAL2
        Object[] partialResult;

        // output For FINAL and COMPLETE
        DoubleWritable result;

        @Override
        public ObjectInspector init(Mode mode, ObjectInspector[] parameters)
                throws HiveException {
            assert (parameters.length == 1);
            super.init(mode, parameters);

            // init input
            if (mode == Mode.PARTIAL1 || mode == Mode.COMPLETE) {
                inputOI = (PrimitiveObjectInspector) parameters[0];
            } else {
                //部分数据作为输入参数时,用到的struct的OI实例,指定输入数据类型,用于解析数据
                soi = (StructObjectInspector) parameters[0];
                countField = soi.getStructFieldRef("count");
                sumField = soi.getStructFieldRef("sum");
                //数组中的每个数据,需要其各自的基本类型OI实例解析
                countFieldOI = (LongObjectInspector) countField.getFieldObjectInspector();
                sumFieldOI = (DoubleObjectInspector) sumField.getFieldObjectInspector();
            }

            // init output
            if (mode == Mode.PARTIAL1 || mode == Mode.PARTIAL2) {

                //部分聚合结果是一个数组
                partialResult = new Object[2];
                partialResult[0] = new LongWritable(0);
                partialResult[1] = new DoubleWritable(0);
                //构造Struct的OI实例,用于设定聚合结果数组的类型,需要字段名List和字段类型List作为参数来构造
                ArrayList<String> fname = new ArrayList<String>();
                fname.add("count");
                fname.add("sum");
                ArrayList<ObjectInspector> foi = new ArrayList<ObjectInspector>();
                //此处的两个OI类型 描述的是 partialResult[] 的两个类型,所以在这里是一样的
                foi.add(PrimitiveObjectInspectorFactory.writableLongObjectInspector);
                foi.add(PrimitiveObjectInspectorFactory.writableDoubleObjectInspector);
                return ObjectInspectorFactory.getStandardStructObjectInspector(fname, foi);
            } else {
                //FINAL 最终聚合结果为一个数值(hadoop中的数据类型),并用基本类型OI设定其类型
                result = new DoubleWritable(0);
                return PrimitiveObjectInspectorFactory.writableDoubleObjectInspector;
            }
        }

        //聚合数据缓存存储结构
        static class AverageAgg implements AggregationBuffer {
            long count;
            double sum;
        }

        //获得新的聚合对象
        @Override
        public AggregationBuffer getNewAggregationBuffer() throws HiveException {
            AverageAgg result = new AverageAgg();
            reset(result);
            return result;
        }

        //重置聚合对象
        @Override
        public void reset(AggregationBuffer agg) throws HiveException {
            AverageAgg myagg = (AverageAgg) agg;
            myagg.count = 0;
            myagg.sum = 0;
        }

        boolean warned = false;

        //遍历原始数据
        @Override
        public void iterate(AggregationBuffer agg, Object[] parameters)
                throws HiveException {
            assert (parameters.length == 1);
            Object p = parameters[0];
            if (p != null) {
                AverageAgg myagg = (AverageAgg) agg;
                try {
                    //通过基本数据类型OI解析Object p的值
                    double v = PrimitiveObjectInspectorUtils.getDouble(p, inputOI);
                    myagg.count++;
                    myagg.sum += v;
                } catch (NumberFormatException e) {
                    if (!warned) {
                        warned = true;
                        LOG.warn(getClass().getSimpleName() + " "
                                + StringUtils.stringifyException(e));
                        LOG.warn(getClass().getSimpleName()
                                + " ignoring similar exceptions.");
                    }
                }
            }
        }

        //得出部分聚合结果
        @Override
        public Object terminatePartial(AggregationBuffer agg) throws HiveException {
            AverageAgg myagg = (AverageAgg) agg;
            ((LongWritable) partialResult[0]).set(myagg.count);
            ((DoubleWritable) partialResult[1]).set(myagg.sum);
            return partialResult;
        }

        //合并部分聚合结果
        @Override
        public void merge(AggregationBuffer agg, Object partial)
                throws HiveException {
            if (partial != null) {
                AverageAgg myagg = (AverageAgg) agg;
                //通过StandardStructObjectInspector实例,分解出 partial 数组元素值
                Object partialCount = soi.getStructFieldData(partial, countField);
                Object partialSum = soi.getStructFieldData(partial, sumField);
                //通过基本数据类型的OI实例解析Object的值
                myagg.count += countFieldOI.get(partialCount);
                myagg.sum += sumFieldOI.get(partialSum);
            }
        }

        //最终聚合结果
        @Override
        public Object terminate(AggregationBuffer agg) throws HiveException {
            AverageAgg myagg = (AverageAgg) agg;
            if (myagg.count == 0) {
                return null;
            } else {
                result.set(myagg.sum / myagg.count);
                return result;
            }
        }
    }

}

GenericUDAFEvaluator中内部枚举Mode的介绍

初始化:对各个模式处理过程,提取输入数据类型OI,返回输出数据类型OI

每个模式(Mode)都会执行初始化

  1. 输入参数parameters:

    • 对于PARTIAL1 和COMPLETE模式来说,是原始数据(单值)
      设定了iterate()方法的输入参数的类型OI为:
      • PrimitiveObjectInspector 的实现类 WritableDoubleObjectInspector 的实例
        通过输入OI实例解析输入参数值
    • 对于PARTIAL2 和FINAL模式来说,是模式聚合数据(双值)
      设定了merge()方法的输入参数的类型OI为:
      • StructObjectInspector 的实现类 StandardStructObjectInspector 的实例通过输入OI实例解析输入参数值
  2. 返回值OI:

    • 对于PARTIAL1 和PARTIAL2模式来说,是设定了方法terminatePartial()返回值的OI实例,输出OI为 StructObjectInspector 的实现类 StandardStructObjectInspector 的实例
    • 对于FINAL 和COMPLETE模式来说,是设定了方法terminate()返回值的OI实例,输出OI为 PrimitiveObjectInspector 的实现类 WritableDoubleObjectInspector 的实例

打成jar上传至集群

数据如下

info

1,liu,99
2,li,88
3,zhang,77
4,wang,66
5,huang,55

hql:

add jar /home/hadoop/hive_jar/udaf.jar;
create temporary function oldavg as "blog.OldUDAF";
create temporary function newavg as "blog.NewUDAF";
create table if not exists student_info(id int,name string,score double)row format delimited
fields terminated by ',';
load data local inpath '/home/hadoop/hive_data/student_info' into table student_info;
select * from student_info;
select oldavg(score) from student_info;
select newavg(score) from student_info;

结果:

在这里插入图片描述

在这里插入图片描述

在这里插入图片描述

发布了112 篇原创文章 · 获赞 291 · 访问量 9万+

猜你喜欢

转载自blog.csdn.net/a805814077/article/details/104645447
今日推荐