MapReduce根据WordCount分析map和Reducer原理

版权声明:有一种生活不去经历不知其中艰辛,有一种艰辛不去体会,不会知道其中快乐,有一种快乐,没有拥有不知其中纯粹 https://blog.csdn.net/wwwzydcom/article/details/83927812

Mapper 阶段

package com.zyd.wc;

import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Mapper;

import java.io.IOException;

/**
 * 输入的kye LongWritable 行号
 * 输入的value 序列化的String类型 Text 一行的内容
 * 输出的key Text 单词
 * 输出的value 数字 IntWritable类型 单词个数
 */
public class WordCountMapper extends Mapper<LongWritable,Text,Text,IntWritable>{
    //避免循环时不断创建对象
    Text k = new Text();
    IntWritable v = new IntWritable(1);
    @Override
    protected void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException {
        // 1. 将一行内容转换成String,因为传进来的是Text类型
        String line = value.toString();
        // 2. 按照空格进行切割成一个个的单词
        String[] words = line.split(" ");
        //3. 循环写出到下一阶段 形式是 <word,1>
        for (String word : words){
            //输出时类型不匹配 但是避免每一次创建一个对象对于内存的损耗在方法外进行创建
            k.set(word);
            context.write(k,v);
        }
    }
}

Reducer阶段

package com.zyd.wc;

import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Reducer;

import java.io.IOException;

/**
 * Reducer的输入数map的输出,所以序列化的类型要匹配
 */
public class WordCountReducer extends Reducer<Text,IntWritable,Text,IntWritable> {
    @Override
    /**
     * 相同的key进行计算
     * 相同的key<word,1> 有多个,需要迭代器
     * context:输出
     */
    protected void reduce(Text key, Iterable<IntWritable> values, Context context) throws IOException, InterruptedException {
        //统计单词总个数
        int sum =0;

        for (IntWritable count:values){
            sum +=count.get();
        }


        //输出单词总个数
        context.write(key,new IntWritable(sum));
    }
}

驱动类

package com.zyd.wc;


import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;

import java.io.IOException;

public class WordCountRunner {
    public static void main (String[] args) throws IOException, ClassNotFoundException, InterruptedException {
            //1. 获取配置信息 或者job对象实例
        Configuration conf= new Configuration();
        Job job = Job.getInstance(conf);
        //6. 指定本程序的jar包所在本地路径
        //job.setJar("/home/wc.jar"); 由于地址变化,不合适
        //底层框架实现,自动找jar的位置
        job.setJarByClass(WordCountRunner.class);

        //2. 指定本业务job所使用的mapper和reducer业务类
        job.setMapperClass(WordCountMapper.class);
        job.setReducerClass(WordCountReducer.class);

        //3. 指定mapper输出数据k,v类型
        job.setMapOutputKeyClass(Text.class);
        job.setMapOutputValueClass(IntWritable.class);

        //4.指定最终输出数据的k,v类型
        job.setOutputKeyClass(Text.class);
        job.setMapOutputValueClass(IntWritable.class);

        //5. 指定job的输入原始文件所在目录
        FileInputFormat.setInputPaths(job,new Path(args[0]));
        FileOutputFormat.setOutputPath(job,new Path(args[1]));

        //7. 将job中配置的相关参数,以及job中所用的java类所在的jar包,
        //提交给yarn去运行
        boolean result = job.waitForCompletion(true);
        System.out.println(result?0:1);
    }
}

猜你喜欢

转载自blog.csdn.net/wwwzydcom/article/details/83927812