WordCountMapper
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Mapper;
import java.io.IOException;
public class WordCountMapper extends Mapper<LongWritable, Text, Text, LongWritable> {
@Override
protected void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException {
//获取传入的每一行内容
String line = value.toString();
//按照数据分隔符切割
String[] words = line.split(" ");
//遍历单词数组,出现单词就标记为1
for (String word : words) {
context.write(new Text(word), new LongWritable(1));
}
}
}
WordCountReducer
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Reducer;
import java.io.IOException;
public class WordCountReducer extends Reducer<Text,LongWritable,Text,LongWritable> {
@Override
protected void reduce(Text key, Iterable<LongWritable> values, Context context) throws IOException, InterruptedException {
//声明一个变量
int count=0;
for (LongWritable value: values) {
count +=value.get();
}
context.write(key, new LongWritable(count));
}
}
WordCountRunner
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
public class WordCountRunner {
public static void main(String[] args) throws Exception {
// 创建本次mr程序的job实例
Configuration conf = new Configuration();
// conf.set("mapreduce.framework.name", "local");
Job job = Job.getInstance(conf);
// 指定本次job运行的主类
job.setJarByClass(WordCountRunner.class);
// 指定本次job的具体mapper reducer实现类
job.setMapperClass(WordCountMapper.class);
job.setReducerClass(WordCountReducer.class);
// 指定本次job map阶段的输出数据类型
job.setMapOutputKeyClass(Text.class);
job.setMapOutputValueClass(LongWritable.class);
// 指定本次job reduce阶段的输出数据类型 也就是整个mr任务的最终输出类型
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(LongWritable.class);
long startTime=System.currentTimeMillis(); //获取开始时间
//FileInputFormat.setMaxInputSplitSize(job, 128);
//FileInputFormat.setMinInputSplitSize(job, 128);
//设置第一个参数和第二个参数
FileInputFormat.setInputPaths(job,args[0]);
FileOutputFormat.setOutputPath(job, new Path(args[1]));
// 提交本次job
boolean b = job.waitForCompletion(true);
long endTime=System.currentTimeMillis(); //获取结束时间
System.out.println("程序运行时间: "+(endTime-startTime)+"ms");
System.exit(b ? 0 : 1);
}
}
加下来打包
把完整版上传到linux系统上
打开集群
把你要计算的文本上传到hdfs上面
//把 word.txt 上传到/根目录
hadoop fs -put word.txt /
//把你在上传jar包名(hdfs_HelloWorld-1.0-SNAPSHOT.jar)和WordCountRunner 具体位置(service.WordCountRunner)
// 需要计算的文件(/word.txt) 和计算出后的结果输入在哪里(/ddd)
hadoop jar hdfs_HelloWorld-1.0-SNAPSHOT.jar service.WordCountRunner /word.txt /ddd
最后查看你输入目录就好啦