【十八掌●武功篇】第七掌:MapReduce之group

这一篇博文是【大数据技术●降龙十八掌】系列文章的其中一篇,点击查看目录:这里写图片描述大数据技术●降龙十八掌


group是mapreduce处理数据的常用操作,group操作是基于数据集中某一个字段或者多个字段为键,对记录进行分组,然后对每一组中的数据进行聚和操作(最大值、最小值、求和、平均值、中位数、方差、标准差)。

1、mapreduce实现group的过程

  • map函数中读取每一条记录,抽取出分组字段和相关的数值。
  • mapper输出的key应该为分组的所有字段,value应该是相关的数值字段。这里的value只是包含相关的字段,不相关的字段删除掉,可以减少传入reduce端的数据。
  • 对于很多的聚合操作可以使用combiner来减少传入到reduce的数据量,只有满足结合律和交换律的聚合操作才能使用combiner,比如:sum()、min()、max()可以使用combiner,但是像avg()、求方差、标准差这样的聚合操作就不能使用combiner。
  • 使用合适的partitoner来进行分区,合适和平衡的partitoner能避免数据倾斜问题,提高效率。
  • 在reduce端,对于相同分组的值进行聚合操作,将输出结果存放到文件中。

2、示例一:最大值、最小值、计数、平均值示例

这个例子是演示group by分组后求最大值、最小值、每个分组中总个数、每个分组中平均值的应用。

=========================

数据源中数据如下所示:第一列是商品ID,第二列是订单编号,第三列为订购数量
P105,T30001,2
P106,T30001,3
P107,T30001,3
P101,T30001,6
P102,T30001,6
P103,T30001,5
P104,T30001,3
P108,T30001,7
P110,T30002,6
P101,T30002,6
P106,T30002,2
P107,T30002,4
P104,T30002,1
P105,T30002,2
P102,T30002,5
P109,T30002,4
P108,T30002,5

============================

需求为:根据商品ID分组,计算每一组中订购数量的最大值、最小值和每种商品订购总量、平均每单订购量。

这个例子中,因为分组中的最大值、最小值、分组内计数、分组内总数都是符合交换律和结合律的,所有可以使用combiner来优化,而求平均值是不能适用于交换律和结合律的,不能直接使用combiner,但是平均值是总数除以个数,而总和和个数是可以使用combiner的,所以可以先求总是和个数,最后在分组内进行相除求平均值。


package mapreduce.group;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.util.ToolRunner;

/**
 * Created by 鸣宇淳 on 2018/1/12.
 * 执行脚本
 * hadoop jar ~/chybin/orderdemo-1.0-SNAPSHOT.jar mapreduce.group.MinMaxCountMapMain hdfs://ClusterTest/chybin/group.log hdfs://ClusterTest/chybin/out/2 2
 */
public class MinMaxCountMapMain {
    public static void main(String[] args) throws Exception {
        Configuration configuration = new Configuration();
        int status = ToolRunner.run(configuration, new MinMaxCountMapReduce(), args);
        System.exit(status);
    }
}

package mapreduce.group;

import mapreduce.join.MapJoinMapReduce;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.util.Tool;

import java.io.IOException;

/**
 * Created by 鸣宇淳 on 2018/1/11.
 */
public class MinMaxCountMapReduce extends Configured implements Tool {
    @Override
    public int run(String[] args) throws Exception {
        //获取配置
        Configuration configuration = this.getConf();
        //创建job
        Job job = Job.getInstance(configuration, MinMaxCountMapReduce.class.getSimpleName());
        //指定MapReduce主类
        job.setJarByClass(MapJoinMapReduce.class);
        //指定输入路径
        FileInputFormat.addInputPath(job, new Path(args[0]));
        job.setMapperClass(MinMaxCountMapper.class);
        //指定输出路径
        Path outpath = new Path(args[1]);
        FileOutputFormat.setOutputPath(job, outpath);
        //定义Map输出类型
        job.setMapOutputKeyClass(Text.class);
        job.setMapOutputValueClass(ValueWritable.class);
        //定义reducer类
        job.setReducerClass(MinMaxCountReducer.class);
        //定义输出类型
        job.setOutputKeyClass(Text.class);
        //输出值的类型为ValueWritable
        job.setOutputValueClass(ValueWritable.class);
        //指定reduce个数
        job.setNumReduceTasks(Integer.valueOf(args[2]));

        //定义Combiner
        job.setCombinerClass(MinMaxCountReducer.class);

        boolean isSucces = job.waitForCompletion(true);
        return isSucces ? 0 : 1;
    }

    public static class MinMaxCountMapper extends Mapper<Object, Text, Text, ValueWritable> {
        @Override
        protected void map(Object key, Text value, Context context) throws IOException, InterruptedException {
            System.out.println("读取");
            System.out.println(value);

            String[] list = value.toString().split(",");
            if (list.length != 3) {
                //格式不对的数量丢弃掉
                return;
            }
            ValueWritable mapOutValue = new ValueWritable();
            int num = Integer.parseInt(list[2]);

            //mapper中读取每一行的数据,先设置最大值、最小值、计数为当期行的订购数量
            mapOutValue.setMax(num);
            mapOutValue.setMin(num);
            //每行个数为线设置为1
            mapOutValue.setCount(1);
            mapOutValue.setSum(num);

            System.out.println("map输出:");
            System.out.println(mapOutValue);

            context.write(new Text(list[0]), mapOutValue);
        }
    }

    public static class MinMaxCountReducer extends Reducer<Text, ValueWritable, Text, ValueWritable> {
        @Override
        protected void reduce(Text key, Iterable<ValueWritable> values, Context context) throws IOException, InterruptedException {

            //初始化输出类型
            ValueWritable result = new ValueWritable();
            result.setMin(null);
            result.setMax(null);
            result.setCount(0);
            result.setSum(0);

            //计算各个分组中的最小值、最大值和计数
            for (ValueWritable item : values) {
                System.out.println("reduce读取:" + item);
                if (result.getMin() == null || item.getMin().compareTo(result.getMin()) < 0) {
                    result.setMin(item.getMin());
                }
                if (result.getMax() == null || item.getMax().compareTo(result.getMax()) > 0) {
                    result.setMax(item.getMax());
                }
                result.setCount(result.getCount() + item.getCount());
                result.setSum(result.getSum() + item.getSum());
            }
            //最后算平均值,在这个方法做为combiner,这个值是无用的
            //当reduce时,输出的平均值是最终所需要的平均值
            result.setAvg((double) result.getSum() / (double) result.getCount());
            System.out.println("值输出:");
            System.out.println(key);
            System.out.println(result);
            context.write(key, result);
        }
    }
}
package mapreduce.group;

import org.apache.hadoop.io.Writable;

import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;

/**
 * Created by 鸣宇淳 on 2018/1/11.
 * 这个类作为map和reduce的输出值的类型,
 * Hadoop的自定义类型都要继承于Writable
 * 虽然map和reduce的输出值可以为一行字符串,但是封装为类是更好的方式。
 */
public class ValueWritable implements Writable {

    //分组中的最大值、最小值、个数,订购总数
    private Integer max = 0;
    private Integer min = 0;
    private Integer count = 0;
    private Integer sum = 0;
    private double avg = 0;

    @Override
    public void write(DataOutput out) throws IOException {
        out.writeInt(max);
        out.writeInt(min);
        out.writeInt(count);
        out.writeInt(sum);
        out.writeDouble(avg);
    }

    @Override
    public void readFields(DataInput in) throws IOException {
        max = in.readInt();
        min = in.readInt();
        count = in.readInt();
        sum = in.readInt();
        avg = in.readDouble();
    }

    public String toString() {
        return "min:" + min + ";max:" + max + ";count:" + count + ";sum:" + sum + ";avg:" + avg;
    }

    public Integer getMax() {
        return max;
    }

    public void setMax(Integer max) {
        this.max = max;
    }

    public Integer getMin() {
        return min;
    }

    public void setMin(Integer min) {
        this.min = min;
    }

    public Integer getCount() {
        return count;
    }

    public void setCount(Integer count) {
        this.count = count;
    }

    public Integer getSum() {
        return sum;
    }

    public void setSum(Integer sum) {
        this.sum = sum;
    }

    public double getAvg() {
        return avg;
    }

    public void setAvg(double avg) {
        this.avg = avg;
    }
}

3、示例二:求中位数、标准差的示例

中位数和标准差的计算因为不满足交换律和结合律,所以不能很容易地使用combinner。
一个列表的中位数是可以将列表平分为上下两部分的那个数,首先要将列表元素进行排序,如果列表中元素的个数为奇数,中位数是中间那个值,如果元素个数为偶数,那么中位数就是中间两个数的平均数。
标准差体现的是列表中元素的离散程度,也就是相对于平均值的离散程度。计算标准差首先要先求出平均值,然后将每个元素值与平均值的差值的平方求和,就得到了方差,然后求方差的算术平方根,就是标准差。

下面这个例子还是利用实例一种的数据源来演示group by分组后求各个分组的中位数和标准差

需求为:根据商品ID分组,计算每一组中订购数量的中位数、标准差。

package mapreduce.group;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.util.ToolRunner;

/**
 * Created by 鸣宇淳 on 2018/1/16.
 * hadoop jar ~/chybin/orderdemo-1.0-SNAPSHOT.jar mapreduce.group.MedStdMain hdfs://ClusterTest/chybin/group.log hdfs://ClusterTest/chybin/out/2 2
 */
public class MedStdMain {
    public static void main(String[] args) throws Exception {
        Configuration configuration = new Configuration();
        int status = ToolRunner.run(configuration, new MedStdMapReduce(), args);
        System.exit(status);
    }
}
package mapreduce.group;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.util.Tool;

import java.io.IOException;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;

/**
 * Created by 鸣宇淳 on 2018/1/16.
 */
public class MedStdMapReduce extends Configured implements Tool {
    @Override
    public int run(String[] args) throws Exception {
        //获取配置
        Configuration configuration = this.getConf();
        //创建job
        Job job = Job.getInstance(configuration, MedStdMapReduce.class.getSimpleName());
        //指定MapReduce主类
        job.setJarByClass(MedStdMapReduce.class);
        //指定输入路径
        FileInputFormat.addInputPath(job, new Path(args[0]));
        job.setMapperClass(MedStdMapper.class);
        //指定输出路径
        Path outpath = new Path(args[1]);
        FileOutputFormat.setOutputPath(job, outpath);
        //定义Map输出类型
        job.setMapOutputKeyClass(Text.class);
        job.setMapOutputValueClass(IntWritable.class);
        //定义reducer类
        job.setReducerClass(MedStdReducer.class);
        //定义输出类型
        job.setOutputKeyClass(Text.class);
        //输出值的类型
        job.setOutputValueClass(MedStdValueWritable.class);
        //指定reduce个数
        job.setNumReduceTasks(Integer.valueOf(args[2]));

        boolean isSucces = job.waitForCompletion(true);
        return isSucces ? 0 : 1;
    }

    public static class MedStdMapper extends Mapper<Object, Text, Text, IntWritable> {
        @Override
        protected void map(Object key, Text value, Context context) throws IOException, InterruptedException {
            String[] list = value.toString().split(",");
            if (list.length != 3) {
                //格式不对的数量丢弃掉
                return;
            }
            //获取订购数量
            int num = Integer.parseInt(list[2]);
            //map输出key为商品ID,值为订购数量
            context.write(new Text(list[0]), new IntWritable(num));
        }
    }

    public static class MedStdReducer extends Reducer<Text, IntWritable, Text, MedStdValueWritable> {
        @Override
        protected void reduce(Text key, Iterable<IntWritable> values, Context context) throws IOException, InterruptedException {
            System.out.println("reduce输入:key:" + key);
            List<Integer> valList = new ArrayList<Integer>();
            int count = 0;
            int sum = 0;
            double med = 0;
            //把每个分组中的订购数量值,复制到列表中。
            for (IntWritable item : values) {
                System.out.println("value:" + item);
                valList.add(item.get());
                count++;
                sum += item.get();
            }
            //排序
            Collections.sort(valList);
            //求中位数
            if (count % 2 == 1) {
                //奇数时取中间的值
                med = valList.get(count / 2);
            } else {
                //偶数,取中间两个值的平均值
                med = (valList.get((count / 2) - 1) + valList.get(count / 2)) / (double) 2;
            }
            //平均值
            double avg = (double) sum / (double) count;
            double sumOfList = 0;
            for (int i : valList) {
                //求各个元素和平均值差值的平方和
                sumOfList += (i - avg) * (i - avg);
            }
            //标准差
            double std = Math.sqrt((double) sumOfList / (double) count);

            MedStdValueWritable result = new MedStdValueWritable();
            result.setMed(med);
            result.setStd(std);
            System.out.println("reduce输出:key:" + key + "平均值:" + avg + ";value:" + result);
            context.write(key, result);
        }
    }
}
package mapreduce.group;

import org.apache.hadoop.io.Writable;

import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;

/**
 * Created by 鸣宇淳 on 2018/1/15.
 */
public class MedStdValueWritable implements Writable {
    private Double med;//中位数
    private Double std;//标准差

    @Override
    public void write(DataOutput out) throws IOException {
        out.writeDouble(med);
        out.writeDouble(std);
    }

    @Override
    public void readFields(DataInput in) throws IOException {
        med = in.readDouble();
        std = in.readDouble();
    }

    public Double getMed() {
        return med;
    }

    public void setMed(Double med) {
        this.med = med;
    }

    public Double getStd() {
        return std;
    }

    public void setStd(Double std) {
        this.std = std;
    }

    @Override
    public String toString() {
        return "中位数:" + med + ";标准差:" + std;
    }
}

4、示例三:改进中位数、标准差的示例

上面的实例有两个问题:一是因为求中位数、标准差不符合交换律和结合律,所以不能简单地使用combiner在map端进行一些优化;二是在reduce端需要将每一个分组汇总的数据列表全部加载到内存,占用了大量内存。可以针对这两个问题改进一下。

map端的订购数量列表类似于以下方式存放(2,1,3,2,5,3,1,1,1),每个元素表示每行记录的订购数量,有多少行记录就对应多少个元素,可以换一种方式表示,类似于((1,4),(2,2),(3,2),(5,1)),其中每个元素中是值和个数,这样元素的个数不取决于记录个数,而是取决于值的个数,在大量数据的情况下能大大减少内存消耗。这样就可以在map执行combiner对相同值的数据进行合并。

具体实现:


package mapreduce.group;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.util.ToolRunner;

/**
 * Created by 鸣宇淳 on 2018/1/12.
 * 执行脚本
 * hadoop jar ~/chybin/orderdemo-1.0-SNAPSHOT.jar mapreduce.group.MedStdWithCombinerMain hdfs://ClusterTest/chybin/group.log hdfs://ClusterTest/chybin/out/2 2
 */
public class MedStdWithCombinerMain {
    public static void main(String[] args) throws Exception {
        Configuration configuration = new Configuration();
        int status = ToolRunner.run(configuration, new MedStdWithCombinerMR(), args);
        System.exit(status);
    }
}

package mapreduce.group;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.*;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.util.Tool;

import java.io.IOException;
import java.util.Map;
import java.util.TreeMap;

/**
 * Created by 鸣宇淳 on 2018/1/16.
 */
public class MedStdWithCombinerMR extends Configured implements Tool {
    @Override
    public int run(String[] args) throws Exception {
        //获取配置
        Configuration configuration = this.getConf();
        //创建job
        Job job = Job.getInstance(configuration, MedStdWithCombinerMR.class.getSimpleName());
        //指定MapReduce主类
        job.setJarByClass(MedStdWithCombinerMR.class);
        //指定输入路径
        FileInputFormat.addInputPath(job, new Path(args[0]));
        job.setMapperClass(MedStdWithCombinerMR.MedStdMapper.class);
        //指定输出路径
        Path outpath = new Path(args[1]);
        FileOutputFormat.setOutputPath(job, outpath);
        //定义Map输出类型
        job.setMapOutputKeyClass(Text.class);
        job.setMapOutputValueClass(SortedMapWritable.class);
        //定义reducer类
        job.setReducerClass(MedStdWithCombinerMR.MedStdReducer.class);
        //定义输出类型
        job.setOutputKeyClass(Text.class);
        //输出值的类型
        job.setOutputValueClass(MedStdValueWritable.class);
        //指定reduce个数
        job.setNumReduceTasks(Integer.valueOf(args[2]));

        //job.setCombinerClass(MedStdCombiner.class);
        boolean isSucces = job.waitForCompletion(true);
        return isSucces ? 0 : 1;
    }

    public static class MedStdMapper extends Mapper<Object, Text, Text, SortedMapWritable> {
        private final LongWritable ONE = new LongWritable(1);

        @Override
        protected void map(Object key, Text value, Context context) throws IOException, InterruptedException {
            String[] list = value.toString().split(",");
            if (list.length != 3) {
                //格式不对的数量丢弃掉
                return;
            }
            //获取订购数量
            int num = Integer.parseInt(list[2]);
            SortedMapWritable sortedMapWritable = new SortedMapWritable();
            //键为订购数量,值为个数,map中个数先为1
            sortedMapWritable.put(new IntWritable(num), ONE);
            context.write(new Text(list[0]), sortedMapWritable);
        }
    }

    public static class MedStdCombiner extends Reducer<Text, SortedMapWritable, Text, SortedMapWritable> {
        @Override
        protected void reduce(Text key, Iterable<SortedMapWritable> values, Context context) throws IOException, InterruptedException {
            System.out.println("combiner:key:" + key);
            //map阶段进行combiner,对各个分组中的数据进行初步合并
            //相同订购数据量值的个数进行累加。
            //key是订购个数,value为计数
            SortedMapWritable valMap = new SortedMapWritable();
            for (SortedMapWritable item : values) {
                for (Map.Entry<WritableComparable, Writable> entry : item.entrySet()) {
                    System.out.println("combiner:value:" + entry.getKey() + ";" + entry.getValue());

                    LongWritable count = (LongWritable) valMap.get(entry.getKey());

                    if (count == null) {
                        //valMap里没有就添加上
                        valMap.put(entry.getKey(), new LongWritable(((LongWritable) entry.getValue()).get()));
                    } else {
                        //valMap里已经存在,就累加个数
                        long v = ((LongWritable) entry.getValue()).get() + count.get();
                        valMap.put(entry.getKey(), new LongWritable(v));
                    }
                }
                System.out.println("-----");
            }
            context.write(key, valMap);
        }
    }

    public static class MedStdReducer extends Reducer<Text, SortedMapWritable, Text, MedStdValueWritable> {
        @Override
        protected void reduce(Text key, Iterable<SortedMapWritable> values, Context context) throws IOException, InterruptedException {
            //排好序的值列表
            TreeMap<Integer, Long> valList = new TreeMap<Integer, Long>();
            long count = 0;
            int sum = 0;
            double med = 0;
            for (SortedMapWritable item : values) {
                for (Map.Entry<WritableComparable, Writable> entry : item.entrySet()) {
                    int val = ((IntWritable) entry.getKey()).get();
                    long num = ((LongWritable) entry.getValue()).get();
                    System.out.println("reduce输入:key:" + key + ";value" + val + ";" + num);
                    //统计同一分组中的订购记录个数和订购总量
                    count += num;//记录数
                    sum += val * num;//订购总量

                    //放入根据值排序的map
                    if (valList.get(val) == null) {
                        valList.put(val, num);
                    } else {
                        valList.put(val, valList.get(val) + num);
                    }
                }
            }
            long medCount = count / 2;
            long prevVal = 0;
            long prevCount = 0;
            for (Map.Entry<Integer, Long> entry : valList.entrySet()) {
                if (prevCount <= medCount && prevCount + entry.getValue() > medCount) {//当前map元素跨越中间个数,就是中位数
                    //求中位数
                    if (medCount % 2 == 1)//是奇数,取中间值
                    {
                        med = entry.getKey();
                    } else {//是偶数,取中间两个值的平均值
                        med = (prevVal + entry.getKey()) / (double) 2;
                    }
                    break;//找到中位数就退出循环
                }
                prevVal += entry.getKey();
                prevCount += entry.getValue();
            }

            //平均值
            double avg = (double) sum / (double) count;
            double sumOfList = 0;
            for (Map.Entry<Integer, Long> entry : valList.entrySet()) {
                //求各个元素和平均值差值的平方和
                sumOfList += (entry.getKey() - avg) * (entry.getKey() - avg) * entry.getValue();
            }
            //标准差
            double std = Math.sqrt((double) sumOfList / (double) count);

            MedStdValueWritable result = new MedStdValueWritable();
            result.setMed(med);
            result.setStd(std);
            System.out.println("reduce输出:key:" + key + "平均值:" + avg + ";value:" + result);
            context.write(key, result);
        }
    }
}

发布了74 篇原创文章 · 获赞 74 · 访问量 5万+

猜你喜欢

转载自blog.csdn.net/chybin500/article/details/79202450