Hadoop之MR简单例子(分组统计手机号通讯总数)

一、测试数据

 目标:按照归属地分组统计出手机号通讯总数

手机号   归属地   打出电话  接收电话

15367483854 长沙 38 65
15367483855 长沙 87 47
15367483855 长沙 158 75
15367483852 长沙 2 15
15367483854 长沙 2 15
15367483850 长沙 48 22
15367483851 长沙 100 78
15367483853 长沙 24 63
18205002216 徐州 87 45
18205002216 徐州 187 45
18205002215 徐州 66 12
18205002214 徐州 7 23
18205002213 徐州 18 45
18205002212 徐州 28 96
18205002212 徐州 38 56
18205002212 徐州 48 66
18205002212 徐州 58 76
18205002211 徐州 45 54
13429103229 杭州 66 88
13429103228 杭州 71 38
13429103227 杭州 35 46
13429103227 杭州 78 63
13429103226 杭州 2 3
13429103225 杭州 12 25
13429103224 杭州 17 69
13429103223 杭州 78 105

二、序列化后的实体 

//将自定义实体序列化
public class CallBean implements Writable {

    private String phoneNo;//手机号
    private Long callOut;//打出电话
    private Long callIn; //接入电话
    private Long sumCalls;//总通讯电话

    public CallBean() {
    }

    public CallBean(String phoneNo, Long callOut, Long callIn) {
        this.phoneNo = phoneNo;
        this.callOut = callOut;
        this.callIn = callIn;
        this.sumCalls = callIn + callOut;
    }

    public String getPhoneNo() {
        return phoneNo;
    }

    public void setPhoneNo(String phoneNo) {
        this.phoneNo = phoneNo;
    }

    public Long getCallOut() {
        return callOut;
    }

    public void setCallOut(Long callOut) {
        this.callOut = callOut;
    }

    public Long getCallIn() {
        return callIn;
    }

    public void setCallIn(Long callIn) {
        this.callIn = callIn;
    }

    public Long getSumCalls() {
        return sumCalls;
    }

    public void setSumCalls(Long sumCalls) {
        this.sumCalls = sumCalls;
    }

    @Override
    public String toString() {
        return callOut + "\t" + callIn + "\t" + sumCalls;
    }

    //将对象序列化至数据流中
    @Override
    public void write(DataOutput dataOutput) throws IOException {
        dataOutput.writeUTF(phoneNo);
        dataOutput.writeLong(callIn);
        dataOutput.writeLong(callOut);
        dataOutput.writeLong(sumCalls);
    }

    //从数据流中反序列化成对象
    //从数据流中读出对象字段时,必须和序列化时顺序保持一致
    @Override
    public void readFields(DataInput dataInput) throws IOException {
        phoneNo = dataInput.readUTF();
        callIn = dataInput.readLong();
        callOut = dataInput.readLong();
        sumCalls = dataInput.readLong();
    }


}

三、Map和Reduce

1.Map

//偏移量,每一行数据,输入key,输入value(自定义CallBean需要遵循Hadoop实例化接口Writable)
public class CallSumMapper extends Mapper<LongWritable, Text, Text, CallBean> {

    //拿到日志中一行数据,切分各个字段,抽取出我们需要的字段,然后封装成KV发送出去
    @Override
    protected void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException {
        //取出我们需要的字段
        String lineData = value.toString();
        String[] datas = lineData.split("\\s+");
        String phone  = datas[0];
        Long callOut  = Long.parseLong(datas[2]);
        Long callIn  = Long.parseLong(datas[3]);
        //封装对象
        CallBean callBean = new CallBean(phone,callOut,callIn);

        //输出数据
        context.write(new Text(phone),callBean);

    }
}

2.Reduce

public class CallSumReducer extends Reducer<Text, CallBean, Text, CallBean> {

    //map传递来数据形式<15243665454,{callBean1,callBean2,...}>
    @Override
    protected void reduce(Text key, Iterable<CallBean> values, Context context) throws IOException,
            InterruptedException {
        //计数
        long countSumCallOut = 0;
        long countSumCallIn = 0;
        for(CallBean callBean : values){
            countSumCallOut += callBean.getCallOut();
            countSumCallIn += callBean.getCallIn();
        }
        //封装对象
        CallBean callBean = new CallBean(key.toString(), countSumCallOut, countSumCallIn);
        //写出数据
        context.write(key,callBean);
    }
}

四、运行主方法

public class CallSumRunner extends Configured implements Tool {
    @Override
    public int run(String[] args) throws Exception {
        //设置需要远程服务器登录名称(防止没有权限操作)
        System.setProperty("HADOOP_USER_NAME", "hadoop");

        //读取src路径下配置文件
        Configuration conf = new Configuration();

        Job job = Job.getInstance(conf);
        //设置主运行jar包
        job.setJarByClass(CallSumRunner.class);

        //设置Map和Reduce类
        job.setMapperClass(CallSumMapper.class);
        job.setReducerClass(CallSumReducer.class);

        //设置map输出类型
        job.setMapOutputKeyClass(Text.class);
        job.setMapOutputValueClass(CallBean.class);
        //设置reduce输出类型
        job.setOutputKeyClass(Text.class);
        job.setMapOutputValueClass(CallBean.class);


        // 判断output文件夹是否存在,如果存在则删除
        Path path = new Path(args[1]);// 取第1个表示输出目录参数(第0个参数是输入目录)
        FileSystem fileSystem = path.getFileSystem(conf);// 根据path找到这个文件
        if (fileSystem.exists(path)) {
            fileSystem.delete(path, true);// true的意思是,就算output有东西,也一带删除
        }
        //设置要处理数据存放位置
        FileInputFormat.setInputPaths(job,new Path(args[0]));
        //设置处理结果的输出数据存放路径
        FileOutputFormat.setOutputPath(job,new Path(args[1]));


        //打印执行过程,正常返回0否则返回1
        return job.waitForCompletion(true)?0:1;
    }

    public static void main(String[] args) throws Exception {
        String [] arg = {"hdfs://192.168.2.100:9000/user/phonecount/data","hdfs://192.168.2.100:9000/user/phonecount/out"};
        //执行主方法
        int runRes = ToolRunner.run(new Configuration(), new CallSumRunner(), arg);
        //退出
        System.exit(runRes);
    }
}

五、测试结果

猜你喜欢

转载自blog.csdn.net/mmake1994/article/details/87908097
今日推荐