案例需求:
统计每个手机号的总上行流量和总下行流量以及总流量(总上行流量+总下行流量)
输入:
输出:
导包:
<dependencies>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-common</artifactId>
<version>2.10.0</version>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-hdfs</artifactId>
<version>2.10.0</version>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-mapreduce-client-core</artifactId>
<version>2.10.0</version>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-mapreduce-client-jobclient</artifactId>
<version>2.10.0</version>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-mapreduce-client-common</artifactId>
<version>2.10.0</version>
</dependency>
</dependencies>
自定义数据类:
注意的是:
1实现Writable接口,实现序列化方法,序列化和反序列化顺序必须一致
2实现空参构造函数
import org.apache.hadoop.io.Writable;
import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;
public class FlowBean implements Writable {
long upFlow;//上行流量
long downFlow;//下行流量
long totalFlow;//总流量
public FlowBean() {
super();
}
public void write(DataOutput dataOutput) throws IOException {
dataOutput.writeLong(upFlow);
dataOutput.writeLong(downFlow);
dataOutput.writeLong(totalFlow);
}
public void readFields(DataInput dataInput) throws IOException {
upFlow=dataInput.readLong();
downFlow=dataInput.readLong();
totalFlow=dataInput.readLong();
}
@Override
public String toString() {
return upFlow+"\t"+downFlow+"\t"+totalFlow;
}
public long getUpFlow() {
return upFlow;
}
public void setUpFlow(long upFlow) {
this.upFlow = upFlow;
}
public long getDownFlow() {
return downFlow;
}
public void setDownFlow(long downFlow) {
this.downFlow = downFlow;
}
public long getTotalFlow() {
return totalFlow;
}
public void setTotalFlow(long totalFlow) {
this.totalFlow = totalFlow;
}
}
MyMapper类:
以电话号码为key
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Mapper;
import java.io.IOException;
public class MyMapper extends Mapper<LongWritable,Text,LongWritable, FlowBean> {
@Override
protected void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException {
String line=value.toString();
String[] s = line.split("\\t");
FlowBean v = new FlowBean();
LongWritable k = new LongWritable();
k.set(Long.parseLong(s[0]));
v.setUpFlow(Long.parseLong(s[1]));
v.setDownFlow(Long.parseLong(s[2]));
v.setTotalFlow(v.getDownFlow()+v.getUpFlow());
context.write(k,v);
}
}
MyReducer类:
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.mapreduce.Reducer;
import java.io.IOException;
public class MyReducer extends Reducer<LongWritable,FlowBean,LongWritable,FlowBean> {
FlowBean v = new FlowBean();
@Override
protected void reduce(LongWritable key, Iterable<FlowBean> values, Context context) throws IOException, InterruptedException {
long totalUpFlow=0L;
long totalDownFlow=0L;
for (FlowBean value : values) {
totalUpFlow+=value.getUpFlow();
totalDownFlow=value.getDownFlow();
}
v.setUpFlow(totalUpFlow);
v.setDownFlow(totalDownFlow);
v.setTotalFlow(v.getDownFlow()+v.getUpFlow());
context.write(key,v);
}
}
MyDriver类:
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import java.io.IOException;
public class MyDriver {
public static void main(String[] args) throws IOException, ClassNotFoundException, InterruptedException {
Configuration conf = new Configuration();
Job job = Job.getInstance(conf);
job.setJarByClass(MyDriver.class);
job.setMapperClass(MyMapper.class);
job.setReducerClass(MyReducer.class);
// job.setMapOutputKeyClass(LongWritable.class);
// job.setMapOutputValueClass(FlowBean.class);
job.setOutputKeyClass(LongWritable.class);
job.setOutputValueClass(FlowBean.class);
FileInputFormat.addInputPath(job,new Path("/home/hadoop/temp/phone_info.txt"));
FileOutputFormat.setOutputPath(job,new Path("/home/hadoop/temp/phone_info_RES"));
FileSystem.get(conf).delete(new Path("/home/hadoop/temp/phone_info_RES"),true);
boolean b = job.waitForCompletion(true);
System.exit(b?0:1);
}
}
遇到的坑:
1 导包要导对,hadoop里面有好多相似的包
2 遇到reducer不工作的原因是a:map时没写context.write() b:context.write()后部分数据不符合reducer接受要求 c:各个节点通讯有问题导致无法copy数据到reducer节点