第2节 mapreduce深入学习:9、手机上行流量排序

还是上次那个例子,需求二:上行流量倒序排序(递减排序)

分析,以需求一的输出数据作为排序的输入数据,自定义FlowBean,以FlowBean为map输出的key,以手机号作为Map输出的value,因为MapReduce程序会对Map阶段输出的key进行排序。

代码:

FlowMain:
package cn.itcast.demo3.flowSort;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.lib.input.TextInputFormat;
import org.apache.hadoop.mapreduce.lib.output.TextOutputFormat;
import org.apache.hadoop.util.Tool;
import org.apache.hadoop.util.ToolRunner;

public class FlowMain extends Configured implements Tool {
@Override
public int run(String[] args) throws Exception {

Job job = Job.getInstance(this.getConf(), FlowMain.class.getSimpleName());
// job.setJarByClass(FlowMain.class);

job.setInputFormatClass(TextInputFormat.class);
TextInputFormat.addInputPath(job,new Path("file:///D:\\Study\\BigData\\heima\\stage2\\4、大数据离线第四天\\流量统计\\1sum\\part-r-00000"));

job.setMapperClass(FlowMapper.class);
job.setMapOutputKeyClass(FlowBean.class);
job.setMapOutputValueClass(Text.class);

job.setReducerClass(FlowReducer.class);
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(FlowBean.class);

job.setOutputFormatClass(TextOutputFormat.class);
TextOutputFormat.setOutputPath(job,new Path("file:///D:\\Study\\BigData\\heima\\stage2\\4、大数据离线第四天\\流量统计\\2sort"));


boolean b = job.waitForCompletion(true);
return b?0:1;
}

public static void main(String[] args) throws Exception {
int run = ToolRunner.run(new Configuration(), new FlowMain(), args);
System.exit(run);
}
}

FlowMapper:
package cn.itcast.demo3.flowSort;

import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Mapper;

import java.io.IOException;

public class FlowMapper extends Mapper<LongWritable,Text,FlowBean,Text> {
//13726230503 24 27 2481 24681
@Override
protected void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException {
FlowBean flowBean = new FlowBean();

String[] split = value.toString().split("\t");

flowBean.setUpFlow(Integer.parseInt(split[1]));//上行流量
flowBean.setDownFlow(Integer.parseInt(split[2]));//下行流量
flowBean.setUpCountFlow(Integer.parseInt(split[3]));//上行总流量
flowBean.setDownCountFlow(Integer.parseInt(split[4]));//下行总流量
//split[0]) 手机号
context.write(flowBean,new Text(split[0]));
}
}
FlowReducer:
package cn.itcast.demo3.flowSort;

import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Reducer;

import java.io.IOException;

public class FlowReducer extends Reducer<FlowBean,Text,Text,FlowBean> {

@Override
protected void reduce(FlowBean key, Iterable<Text> values, Context context) throws IOException, InterruptedException {
for(Text text:values)
context.write(text,key);
}
}
FlowBean:
package cn.itcast.demo3.flowSort;

import org.apache.hadoop.io.Writable;
import org.apache.hadoop.io.WritableComparable;

import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;

public class FlowBean implements WritableComparable<FlowBean> {
//上行流量
private Integer upFlow;
//下行流量
private Integer downFlow;
//上行总流量
private Integer upCountFlow;
//下行总流量
private Integer downCountFlow;

/**
* 序列化方法
* @param out
* @throws IOException
*/
@Override
public void write(DataOutput out) throws IOException {
out.writeInt(this.upFlow);
out.writeInt(this.downFlow);
out.writeInt(this.upCountFlow);
out.writeInt(this.downCountFlow);
}

/**
* 反序列化的方法
* @param in
* @throws IOException
*/
@Override
public void readFields(DataInput in) throws IOException {
this.upFlow = in.readInt();
this.downFlow = in.readInt();
this.upCountFlow = in.readInt();
this.downCountFlow = in.readInt();
}

public void setUpFlow(Integer upFlow) {
this.upFlow = upFlow;
}

public void setDownFlow(Integer downFlow) {
this.downFlow = downFlow;
}

public void setUpCountFlow(Integer upCountFlow) {
this.upCountFlow = upCountFlow;
}

public void setDownCountFlow(Integer downCountFlow) {
this.downCountFlow = downCountFlow;
}

public Integer getUpFlow() {
return upFlow;
}

public Integer getDownFlow() {
return downFlow;
}

public Integer getUpCountFlow() {
return upCountFlow;
}

public Integer getDownCountFlow() {
return downCountFlow;
}

@Override
public String toString() {
/*return "上行流量=" + upFlow +
", 下行流量=" + downFlow +
", 上行总流量=" + upCountFlow +
", 下行总流量=" + downCountFlow;*/
return upFlow+"\t"+downFlow+"\t"+upCountFlow+"\t"+downCountFlow;
}

/**
* 实现比较器
* @param o
* @return
*/
@Override
public int compareTo(FlowBean o) {
int i = this.upFlow.compareTo(o.upFlow);
return -i;
}
}

注意:如果使用FlowBean作为Map阶段输出的key,则必须实现WritableComparable接口,否则报错:

java.lang.Exception: java.lang.ClassCastException: class cn.itcast.demo3.flowSort.FlowBean
at org.apache.hadoop.mapred.LocalJobRunner$Job.run(LocalJobRunner.java:406)
Caused by: java.lang.ClassCastException: class cn.itcast.demo3.flowSort.FlowBean
at java.lang.Class.asSubclass(Class.java:3404)

猜你喜欢

转载自www.cnblogs.com/mediocreWorld/p/11025864.html
今日推荐