版权声明:数据丁 https://blog.csdn.net/reasery/article/details/82839598
package mrpro924;
import java.io.IOException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
public class WordCountMP {
//写一个内部类,继承Mapper 类
//<偏移量类型,一行内容的类型,输出key类型,输出value类型>
public static class MyMapper extends Mapper<LongWritable, Text, Text, IntWritable>{
//一行调用一次
@Override
protected void map(LongWritable key, Text value,
Mapper<LongWritable, Text, Text, IntWritable>.Context context)
throws IOException, InterruptedException {
//逐行读取,内容在value中
//Text对象转String用toString()
//String对象转Text用new
String line= value.toString();
String[] split = line.split("\t");
for(String s:split){
Text t = new Text(s);
//int转IntWritable用new
//IntWritable 转int 用.get(),hadoop 转java基本类型都是.get()
IntWritable i = new IntWritable(1);
//map写出的k,v,一个是Text类型,一个是IntWritable类型
context.write(t, i);
}
}
}
//写一个内部类,继承Reducer类,当然这些类都可以写出来
//输入的k,v是map输出的k,v
//相同的key会被框架分为一组
public static class MyReducer extends Reducer<Text, IntWritable, Text, IntWritable>{
@Override
protected void reduce(Text key, Iterable<IntWritable> values,
Reducer<Text, IntWritable, Text, IntWritable>.Context context)
throws IOException, InterruptedException {
//num用来计数
int num =0;
//values是一个迭代器,里面存着一个单词的出现的次数<1,1,....> ,我们计算出迭代器里元素个数就可以得出单词出现的频率了
for(IntWritable i:values){
num+=i.get();
}
context.write(key, new IntWritable(num));
}
}
//main方法写一下job的配置,当然也可以另外写一个driver类
public static void main(String[] args) throws IOException, ClassNotFoundException, InterruptedException {
//加载配置文件
Configuration conf = new Configuration();
//eclipse运行设置linux用户名
System.setProperty("HADOOP_USER_NAME", "mading");
//启动一个job
Job job = Job.getInstance(conf);
//指定当前任务的主类
job.setJarByClass(WordCountMP.class);
//指定mapper和reducer类
job.setMapperClass(MyMapper.class);
job.setReducerClass(MyReducer.class);
//指定map输出的key,value类型,如果和reduce的输出类型相同的情况下可以省略
job.setMapOutputKeyClass(Text.class);
job.setMapOutputValueClass(IntWritable.class);
//指定reduce输出的key,value类型
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(IntWritable.class);
//指定文件输入的路径,这里是HA高可用集群的路径
FileInputFormat.addInputPath(job, new Path("hdfs://master:9000/in"));
//指定文件的输出路径
FileOutputFormat.setOutputPath(job, new Path("hdfs://master:9000/out02"));
//提交job
job.waitForCompletion(true);
}
}
得到的控制台输出的结果
...
GC time elapsed (ms)=3
Total committed heap usage (bytes)=1505230848
Shuffle Errors
BAD_ID=0
CONNECTION=0
IO_ERROR=0
WRONG_LENGTH=0
WRONG_MAP=0
WRONG_REDUCE=0
File Input Format Counters
Bytes Read=161
File Output Format Counters
Bytes Written=50
我先把文件上传到hdfs的/in目录下,wordcount程序将直接将这三个小文件进行读取
然后在hdfs的根目录下查看你的输出路径/out02
查看part-r-000文件的内容就是我们想要的结果
asdfasdfasdfsadf 1
hello 6
mading 9
tom 3
world 6
运行方式:
1.打jar包,上传服务器,hadoop jar ,提交集群运行
2.eclipse 本地测试运行