WordCount程序分为三个部分,,当然在计算模型里面只有mapper任务和reduce任务,这里我们加入一个驱动程序即Runner类。
(1)WorkCountMapper的代码
package com.zaiou.hadoop.MrWordCount;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Mapper;
import java.io.IOException;
/**
* @Description: MapperReduce WordCount案例
* KEYIN 默认情况下,是mr框架所读到的一行文本的起始偏移量,
* Long,但是在hadoop中有自己更精简的序列化接口,所以不直接用Long,用LongWritable
* VALUEIN 默认情况下,是mr框架所读到的一行文本的内容,String
* KEYOUT 是用户自定义逻辑处理完之后输出数据中的key,String
* VALUEOUT 用户自定义逻辑处理完之后输出数据中的value, Integer
* @auther: LB 2019/3/8 09:01
* @modify: LB 2019/3/8 09:01
*/
public class WorkCountMapper extends Mapper<LongWritable, Text, Text, IntWritable> {
/**
* mapper阶段的自定义逻辑就写在自定义的map方法中
* maptask 会对每一行输入的数据调用一次我们自定义的map
* @param key
* @param value
* @param context
* @throws IOException
* @throws InterruptedException
*/
@Override
protected void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException {
//将maptsk装给我们的文本内容转换成String
String line = value.toString();
//根据空格将这一行切分成单词
String[] works = line.split(" ");
//将单词输出为 <单词,1>
for (String work : works){
System.out.println("单词: "+work);
//将单词作为key,将value作为value,以便于后续的数据分发,可以根据单词分发,以便相同的单词会到相同的reduce task
context.write(new Text(work),new IntWritable(1));
}
}
}
(2)WorkCountReducer的代码
package com.zaiou.hadoop.MrWordCount;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Reducer;
import java.io.IOException;
/**
* @Description:
* KEYIN, VALUEIN对应mapper输出的KEYOUT, VALUEOUT类型对应
* KEYOUT 是单词
* VALUEOUT 是总次数
* @auther: LB 2019/3/8 11:04
* @modify: LB 2019/3/8 11:04
*/
public class WorkCountReducer extends Reducer<Text, IntWritable, Text, IntWritable> {
/**
* 入参key,是一组相同单词kv对应的key
* @param key
* @param values
* @param context
* @throws IOException
* @throws InterruptedException
*/
@Override
protected void reduce(Text key, Iterable<IntWritable> values, Context context) throws IOException, InterruptedException {
int count=0;
for (IntWritable value : values){
count+=value.get();
}
context.write(key, new IntWritable(count));
}
}
(3)WorkCountRunner的代码
package com.zaiou.hadoop.MrWordCount;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
/**
* @Description:相当于一个yarn集群的客户端
* 需要在此封装我们的mr程序的相关运行参数,指定jar包
* @auther: LB 2019/3/8 11:35
* @modify: LB 2019/3/8 11:35
*/
public class WorkCountRunner {
public static void main(String[] args) throws Exception{
//构造一个配置对象,读取配置文件,或者往该对象中设值
Configuration conf = new Configuration();
//建job对象,用来描述本任务的相关信息
Job job = Job.getInstance(conf);
//指定本程序jar所在路径
job.setJarByClass(WorkCountRunner.class);
//指定本业务的job要使用的Mapper/Reducer业务类
job.setMapperClass(WorkCountMapper.class);
job.setReducerClass(WorkCountReducer.class);
//指定本job中mapper输出的kv类型
job.setMapOutputKeyClass(Text.class);
job.setMapOutputValueClass(IntWritable.class);
//指定本job中reduce输出的kv类型
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(IntWritable.class);
//指定job原始文件所在目录
FileInputFormat.setInputPaths(job, new Path(args[0]));
//指定job输出结果所在目录
FileOutputFormat.setOutputPath(job, new Path(args[1]));
//将job中相关配置的参数,以及job所用的java类所在的jar包,提交给yarn去运行
boolean res=job.waitForCompletion(true);
System.exit(res?0:1);
}
}
(4)我的是maven项目,需要在pom.xml文件中修改
<packaging>jar</packaging>
(5)运行wordcount程序
a>上传 jar包到 liuux文件夹
b>hafs创建文件夹 /wordcount/input
c>执行程序
hadoop jar hadoop-study-1.0-SNAPSHOT.jar com.zaiou.hadoop.MrWordCount.WorkCountRunner /wordcount/input /wordcount/output