版权声明:数据丁 https://blog.csdn.net/reasery/article/details/82876589
代码书写
package mrpro924;
import java.io.IOException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.jobcontrol.ControlledJob;
import org.apache.hadoop.mapreduce.lib.jobcontrol.JobControl;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
public class WordCountMP {
//写一个内部类,继承Mapper 类
public static class MyMapper extends Mapper<LongWritable, Text, Text, IntWritable>{
@Override
protected void map(LongWritable key, Text value,
Mapper<LongWritable, Text, Text, IntWritable>.Context context)
throws IOException, InterruptedException {
//逐行读取,内容在value中
//Text对象转String用toString()
//String对象转Text用new
String line= value.toString();
String[] split = line.split("\t");
for(String s:split){
Text t = new Text(s);
//int转IntWritable用new
//IntWritable 转int 用.get(),hadoop 转java基本类型都是.get()
IntWritable i = new IntWritable(1);
//map写出的k,v,一个是Text类型,一个是IntWritable类型
context.write(t, i);
}
}
}
//写一个内部类,继承Reducer类,当然这些类都可以写出来
//输入的k,v是map输出的k,v
//相同的key会被框架分为一组
public static class MyReducer extends Reducer<Text, IntWritable, Text, IntWritable>{
@Override
protected void reduce(Text key, Iterable<IntWritable> values,
Reducer<Text, IntWritable, Text, IntWritable>.Context context)
throws IOException, InterruptedException {
//num用来计数
int num =0;
//values是一个迭代器,里面存着一个单词的出现的次数<1,1,....> ,我们计算出迭代器里元素个数就可以得出单词出现的频率了
for(IntWritable i:values){
num+=i.get();
}
context.write(key, new IntWritable(num));
}
}
//main方法写一下job的配置,当然也可以另外写一个driver类
public static void main(String[] args) throws IOException, ClassNotFoundException, InterruptedException {
//加载配置文件
Configuration conf = new Configuration();
//eclipse运行设置linux用户名
System.setProperty("HADOOP_USER_NAME", "mading");
//启动一个job1
Job job1 = Job.getInstance(conf);
//指定当前任务的主类
job1.setJarByClass(WordCountMP.class);
//指定mapper和reducer类
job1.setMapperClass(MyMapper.class);
job1.setReducerClass(MyReducer.class);
//指定map输出的key,value类型,如果和reduce的输出类型相同的情况下可以省略
job1.setMapOutputKeyClass(Text.class);
job1.setMapOutputValueClass(IntWritable.class);
//指定reduce输出的key,value类型
job1.setOutputKeyClass(Text.class);
job1.setOutputValueClass(IntWritable.class);
//指定文件输入的路径,这里是HA高可用集群的路径
FileInputFormat.addInputPath(job1, new Path("hdfs://master:9000/in"));
//指定文件的输出路径
FileOutputFormat.setOutputPath(job1, new Path("hdfs://master:9000/out02"));
//设置第二个job2
Job job2 = Job.getInstance();
job2.setJarByClass(WordCountMP.class);
//指定mapper和reducer类
job2.setMapperClass(MyMapper.class);
job2.setReducerClass(MyReducer.class);
//指定map输出的key,value类型,如果和reduce的输出类型相同的情况下可以省略
job2.setMapOutputKeyClass(Text.class);
job2.setMapOutputValueClass(IntWritable.class);
//指定reduce输出的key,value类型
job2.setOutputKeyClass(Text.class);
job2.setOutputValueClass(IntWritable.class);
//指定文件输入的路径,这里是HA高可用集群的路径
FileInputFormat.addInputPath(job2, new Path("hdfs://master:9000/in"));
//指定文件的输出路径
FileOutputFormat.setOutputPath(job2, new Path("hdfs://master:9000/out02"));
//jobcontrol类底层线程的方式来实现
//导包的时候注意要导新包org.apache.hadoop.mapreduce.lib.jobcontrol.JobControl;
//参数为组名,所有job组成一个组,随意不影响
JobControl jc = new JobControl("groupname");
//需要的参数配置文件是job.xml,从job上获取配置文件
ControlledJob ajob = new ControlledJob(job1.getConfiguration());
ControlledJob bjob = new ControlledJob(job2.getConfiguration());
/*
* 如果要添加多个job的依赖关系
* bjob.addDependingJob(ajob);
* bjob.addDependingJob(cjob);
* 那么bjob会在ajob和cjob运行完之后再运行
*/
//类似于线程池,都放在jc里
jc.addJob(ajob);
jc.addJob(bjob);
//提交任务
new Thread(jc).start();
//判断jc里的job是否都执行完成,执行完成了之后就可以把线程关闭
while(!jc.allFinished()){
Thread.sleep(500);
}
jc.stop();
}
}