需求:1.实现用户手机流量统计(ReduceTask并行度控制)
数据如下:保存为.dat文件(因为以\t切分数据,文件格式必须合适)
13726230503 00-FD-07-A4-72-B8:CMCC 120.196.100.82 i02.c.aliimg.com 24 27 2481 24681 200
13826544101 5C-0E-8B-C7-F1-E0:CMCC 120.197.40.4 4 0 264 0 200
13926435656 20-10-7A-28-CC-0A:CMCC 120.196.100.99 2 4 132 1512 200
13926251106 5C-0E-8B-8B-B1-50:CMCC 120.197.40.4 4 0 240 0 200
18211575961 94-71-AC-CD-E6-18:CMCC-EASY 120.196.100.99 iface.qiyi.com 视频网站 15 12 1527 2106 200
84138413 5C-0E-8B-8C-E8-20:7DaysInn 120.197.40.4 122.72.52.12 20 16 4116 1432 200
13560439658 C4-17-FE-BA-DE-D9:CMCC 120.196.100.99 18 15 1116 954 200
15920133257 5C-0E-8B-C7-BA-20:CMCC 120.197.40.4 sug.so.360.cn 信息安全 20 20 3156 2936 200
13719199419 68-A1-B7-03-07-B1:CMCC-EASY 120.196.100.82 4 0 240 0 200
13660577991 5C-0E-8B-92-5C-20:CMCC-EASY 120.197.40.4 s19.cnzz.com 站点统计 24 9 6960 690 200
15013685858 5C-0E-8B-C7-F7-90:CMCC 120.197.40.4 rank.ie.sogou.com 搜索引擎 28 27 3659 3538 200
15989002119 E8-99-C4-4E-93-E0:CMCC-EASY 120.196.100.99 www.umeng.com 站点统计 3 3 1938 180 200
13560439658 C4-17-FE-BA-DE-D9:CMCC 120.196.100.99 15 9 918 4938 200
13480253104 5C-0E-8B-C7-FC-80:CMCC-EASY 120.197.40.4 3 3 180 180 200
13602846565 5C-0E-8B-8B-B6-00:CMCC 120.197.40.4 2052.flash2-http.qq.com 综合门户 15 12 1938 2910 200
13922314466 00-FD-07-A2-EC-BA:CMCC 120.196.100.82 img.qfc.cn 12 12 3008 3720 200
13502468823 5C-0A-5B-6A-0B-D4:CMCC-EASY 120.196.100.99 y0.ifengimg.com 综合门户 57 102 7335 110349 200
18320173382 84-25-DB-4F-10-1A:CMCC-EASY 120.196.100.99 input.shouji.sogou.com 搜索引擎 21 18 9531 2412 200
13925057413 00-1F-64-E1-E6-9A:CMCC 120.196.100.55 t3.baidu.com 搜索引擎 69 63 11058 48243 200
13760778710 00-FD-07-A4-7B-08:CMCC 120.196.100.82 2 2 120 120 200
13726238888 00-FD-07-A4-72-B8:CMCC 120.196.100.82 i02.c.aliimg.com 24 27 2481 24681 200
13560436666 C4-17-FE-BA-DE-D9:CMCC 120.196.100.99 18 15 1116 954 200
技术实现过程:
1.首先将Map输入中的手机号,上行流量,下行流量数据抽取出来(每一行输入数据调用一次自定义map方法处理数据),
然后根据相同的key进行数据分发,以便于相同key会到同一个ReduceTask
2.Map输出为<手机号,bean>,自定义javaBean来封装流量信息,并将javaBean充当Map输出的Value来传输,javaBean
要实现Writable序列化接口,实现两个方法
3.Reduce在获得<手机号,list>后进行累积,然后输出结果即可(框架每传递进来一个kv组,reduce方法被调用一次)
代码:FlowCount.java
package com.ghq.mr;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import java.io.IOException;
public class FlowCount {
/**
* 流量统计业务
*/
static class FlowCountMap extends Mapper<LongWritable, Text, Text, FlowBean> {
@Override
protected void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException {
//将一行内容转成string
String line = value.toString();
//切分字段
String[] fields = line.split("\t");
//取出手机号
String phoneNbr = fields[1];
//取出上行流量下行流量
long upFlow = Long.parseLong(fields[fields.length-3]);
long dFlow = Long.parseLong(fields[fields.length-2]);
context.write(new Text(phoneNbr), new FlowBean(upFlow, dFlow));
}
}
/**
*
*/
static class FlowCountReducer extends Reducer<Text, FlowBean, Text, FlowBean> {
/**
* <183323,bean1><183323,bean2><183323,bean3><183323,bean4>.......
* @param key
* @param values
* @param context
* @throws IOException
* @throws InterruptedException
*/
@Override
protected void reduce(Text key, Iterable<FlowBean> values, Context context) throws IOException, InterruptedException {
long sum_upFlow = 0;
long sum_dFlow = 0;
//遍历所有bean,将其中的上行流量,下行流量分别累加
for(FlowBean bean: values){
sum_upFlow += bean.getUpFlow();
sum_dFlow += bean.getDownFlow();
}
FlowBean resultBean = new FlowBean(sum_upFlow, sum_dFlow);
context.write(key, resultBean);
}
}
public static void main(String[] args) throws Exception {
Configuration conf = new Configuration();
Job job = Job.getInstance(conf);
//指定本程序的jar包所在的本地路径
job.setJarByClass(FlowCount.class);
//指定本业务job要使用的mapper/Reducer业务类
job.setMapperClass(FlowCountMap.class);
job.setReducerClass(FlowCountReducer.class);
//指定mapper输出数据的kv类型
job.setMapOutputKeyClass(Text.class);
job.setMapOutputValueClass(FlowBean.class);
//指定最终输出的数据的kv类型
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(FlowBean.class);
//指定job的输入原始文件所在目录
FileInputFormat.setInputPaths(job, new Path(args[0]));
//指定job的输出结果所在目录
FileOutputFormat.setOutputPath(job, new Path(args[1]));
//将job中配置的相关参数,以及job所用的java类所在的jar包,提交给yarn去运行
/*job.submit();*/
boolean res = job.waitForCompletion(true);
System.exit(res?0:1);
}
}
FlowBean.java
如果想在Reducer的输出结果中使用自定义的数据类型,重写FlowBean的toString()方法即可。
package com.ghq.mr;
import org.apache.hadoop.io.Writable;
import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;
/**
* @author snow
*/
public class FlowBean implements Writable {
private Long upFlow;
private Long downFlow;
private Long sumFlow;
public FlowBean(){}
public FlowBean(Long upFlow, Long downFlow) {
this.upFlow = upFlow;
this.downFlow = downFlow;
this.sumFlow = upFlow+downFlow;
}
public Long getUpFlow() {
return upFlow;
}
public void setUpFlow(Long upFlow) {
this.upFlow = upFlow;
}
public Long getDownFlow() {
return downFlow;
}
public void setDownFlow(Long downFlow) {
this.downFlow = downFlow;
}
public Long getSumFlow() {
return sumFlow;
}
public void setSumFlow(Long sumFlow) {
this.sumFlow = sumFlow;
}
@Override
public void write(DataOutput dataOutput) throws IOException {
dataOutput.writeLong(upFlow);
dataOutput.writeLong(downFlow);
dataOutput.writeLong(sumFlow);
}
@Override
public void readFields(DataInput dataInput) throws IOException {
upFlow = dataInput.readLong();
downFlow = dataInput.readLong();
sumFlow = dataInput.readLong();
}
@Override
public String toString() {
return "FlowBean{" +
"upFlow=" + upFlow +
", downFlow=" + downFlow +
", sumFlow=" + sumFlow +
'}';
}
}
4.执行程序:
4.1.创建HDFS文件存放目录:hadoop fs -mkdir -p /wordcount/phoneFlum
4.2.运行MapReduce程序jar包:
hadoop jar flowsum.jar cn.bigdata.hdfs.flowsum.FlowCount /wordcount/phoneFlum /wordcount/phoneFlumOut
5.查看执行结果:
[hadoop@master ~]$ hadoop jar a.jar com.ghq.mr.FlowCount /flow/input /flow/output2
18/09/02 21:01:14 INFO client.RMProxy: Connecting to ResourceManager at master/192.168.248.143:8032
18/09/02 21:01:15 WARN mapreduce.JobResourceUploader: Hadoop command-line option parsing not performed. Implement the Tool interface and execute your application with ToolRunner to remedy this.
18/09/02 21:01:15 INFO input.FileInputFormat: Total input files to process : 1
18/09/02 21:01:15 INFO mapreduce.JobSubmitter: number of splits:1
18/09/02 21:01:16 INFO mapreduce.JobSubmitter: Submitting tokens for job: job_1535864964114_0008
18/09/02 21:01:16 INFO impl.YarnClientImpl: Submitted application application_1535864964114_0008
18/09/02 21:01:16 INFO mapreduce.Job: The url to track the job: http://master:8088/proxy/application_1535864964114_0008/
18/09/02 21:01:16 INFO mapreduce.Job: Running job: job_1535864964114_0008
18/09/02 21:01:27 INFO mapreduce.Job: Job job_1535864964114_0008 running in uber mode : false
18/09/02 21:01:27 INFO mapreduce.Job: map 0% reduce 0%
18/09/02 21:01:35 INFO mapreduce.Job: map 100% reduce 0%
18/09/02 21:01:44 INFO mapreduce.Job: map 100% reduce 100%
18/09/02 21:01:44 INFO mapreduce.Job: Job job_1535864964114_0008 completed successfully
18/09/02 21:01:44 INFO mapreduce.Job: Counters: 49
File System Counters
FILE: Number of bytes read=839
FILE: Number of bytes written=316469
FILE: Number of read operations=0
FILE: Number of large read operations=0
FILE: Number of write operations=0
HDFS: Number of bytes read=2042
HDFS: Number of bytes written=1307
HDFS: Number of read operations=6
HDFS: Number of large read operations=0
HDFS: Number of write operations=2
Job Counters
Launched map tasks=1
Launched reduce tasks=1
Data-local map tasks=1
Total time spent by all maps in occupied slots (ms)=5063
Total time spent by all reduces in occupied slots (ms)=6247
Total time spent by all map tasks (ms)=5063
Total time spent by all reduce tasks (ms)=6247
Total vcore-milliseconds taken by all map tasks=5063
Total vcore-milliseconds taken by all reduce tasks=6247
Total megabyte-milliseconds taken by all map tasks=5184512
Total megabyte-milliseconds taken by all reduce tasks=6396928
Map-Reduce Framework
Map input records=22
Map output records=22
Map output bytes=789
Map output materialized bytes=839
Input split bytes=100
Combine input records=0
Combine output records=0
Reduce input groups=21
Reduce shuffle bytes=839
Reduce input records=22
Reduce output records=21
Spilled Records=44
Shuffled Maps =1
Failed Shuffles=0
Merged Map outputs=1
GC time elapsed (ms)=232
CPU time spent (ms)=1650
Physical memory (bytes) snapshot=299077632
Virtual memory (bytes) snapshot=4161282048
Total committed heap usage (bytes)=140873728
Shuffle Errors
BAD_ID=0
CONNECTION=0
IO_ERROR=0
WRONG_LENGTH=0
WRONG_MAP=0
WRONG_REDUCE=0
File Input Format Counters
Bytes Read=1942
File Output Format Counters
Bytes Written=1307
[hadoop@master ~]$ hadoop fs -cat /flow/output2/part-r-00000
13480253104 FlowBean{upFlow=180, downFlow=180, sumFlow=360}
13502468823 FlowBean{upFlow=7335, downFlow=110349, sumFlow=117684}
13560436666 FlowBean{upFlow=1116, downFlow=954, sumFlow=2070}
13560439658 FlowBean{upFlow=2034, downFlow=5892, sumFlow=7926}
13602846565 FlowBean{upFlow=1938, downFlow=2910, sumFlow=4848}
13660577991 FlowBean{upFlow=6960, downFlow=690, sumFlow=7650}
13719199419 FlowBean{upFlow=240, downFlow=0, sumFlow=240}
13726230503 FlowBean{upFlow=2481, downFlow=24681, sumFlow=27162}
13726238888 FlowBean{upFlow=2481, downFlow=24681, sumFlow=27162}
13760778710 FlowBean{upFlow=120, downFlow=120, sumFlow=240}
13826544101 FlowBean{upFlow=264, downFlow=0, sumFlow=264}
13922314466 FlowBean{upFlow=3008, downFlow=3720, sumFlow=6728}
13925057413 FlowBean{upFlow=11058, downFlow=48243, sumFlow=59301}
13926251106 FlowBean{upFlow=240, downFlow=0, sumFlow=240}
13926435656 FlowBean{upFlow=132, downFlow=1512, sumFlow=1644}
15013685858 FlowBean{upFlow=3659, downFlow=3538, sumFlow=7197}
15920133257 FlowBean{upFlow=3156, downFlow=2936, sumFlow=6092}
15989002119 FlowBean{upFlow=1938, downFlow=180, sumFlow=2118}
18211575961 FlowBean{upFlow=1527, downFlow=2106, sumFlow=3633}
18320173382 FlowBean{upFlow=9531, downFlow=2412, sumFlow=11943}
84138413 FlowBean{upFlow=4116, downFlow=1432, sumFlow=5548}
需求:2.将流量统计结果按照手机归属地省份不同输出到不同文件中(ReduceTask并行度控制,自定义Partitioner)
1.Mapreduce中会将map输出的kv对,按照相同key分组(调用getPartition),然后分发给不同的reducetask
2.Map输出结果的时候调用了Partitioner组件(返回分区号),由它决定将数据放到哪个区中,默认的分组规则为:根据key的hashcode%reducetask数来分发,源代码如下:
package org.apache.hadoop.mapreduce.lib.partition;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.mapreduce.Partitioner;
/** Partition keys by their {@link Object#hashCode()}. */
@InterfaceAudience.Public
@InterfaceStability.Stable
public class HashPartitioner<K, V> extends Partitioner<K, V> {
/** Use {@link Object#hashCode()} to partition. */
public int getPartition(K key, V value,
int numReduceTasks) {
return (key.hashCode() & Integer.MAX_VALUE) % numReduceTasks;
}
}
3.所以:如果要按照我们自己的需求进行分组,则需要改写数据分发(分组)组件Partitioner,自定义一个ProvincePartitioner继承抽象类:Partitioner,来返回一个分区编号
4.然后在job对象中,设置自定义partitioner: job.setPartitionerClass(ProvincePartitioner.class)
5.自定义partition后,要根据自定义partitioner的逻辑设置相应数量的ReduceTask
代码实现自定义partitioner数据分区规则:
package com.ghq.mr;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Partitioner;
import java.util.HashMap;
import java.util.Map;
/**
* KEY, VALUE 对应的是map输出的KEY, VALUE类型
*
* @author snow
*/
public class ProvincePartitioner extends Partitioner<Text,FlowBean>{
public static Map<String,Integer> provinceDict = new HashMap<>();
static {
provinceDict.put("136",0);
provinceDict.put("137",1);
provinceDict.put("138",2);
provinceDict.put("139",3);
}
@Override
public int getPartition(Text key, FlowBean value, int numPartitions) {
String prefix = key.toString().substring(0,3);
Integer pId = provinceDict.get(prefix);
return pId == null ? 4:pId;
}
}
指定Partitioner和ReduceTask的数量
//指定我们自定义的数据分区器
job.setPartitionerClass(ProvincePartitioner.class);
//同时指定相应“分区”数量的reducetask
job.setNumReduceTasks(5);
运行程序:
hadoop jar a.jar com.ghq.mr.FlowCount /flow/input /flow/output3
运行过程:
[hadoop@master ~]$ hadoop jar a.jar com.ghq.mr.FlowCount /flow/input /flow/output3
18/09/02 21:11:10 INFO client.RMProxy: Connecting to ResourceManager at master/192.168.248.143:8032
18/09/02 21:11:11 WARN mapreduce.JobResourceUploader: Hadoop command-line option parsing not performed. Implement the Tool interface and execute your application with ToolRunner to remedy this.
18/09/02 21:11:12 INFO input.FileInputFormat: Total input files to process : 1
18/09/02 21:11:12 INFO mapreduce.JobSubmitter: number of splits:1
18/09/02 21:11:13 INFO mapreduce.JobSubmitter: Submitting tokens for job: job_1535864964114_0009
18/09/02 21:11:14 INFO impl.YarnClientImpl: Submitted application application_1535864964114_0009
18/09/02 21:11:14 INFO mapreduce.Job: The url to track the job: http://master:8088/proxy/application_1535864964114_0009/
18/09/02 21:11:14 INFO mapreduce.Job: Running job: job_1535864964114_0009
18/09/02 21:11:25 INFO mapreduce.Job: Job job_1535864964114_0009 running in uber mode : false
18/09/02 21:11:25 INFO mapreduce.Job: map 0% reduce 0%
18/09/02 21:11:33 INFO mapreduce.Job: map 100% reduce 0%
18/09/02 21:11:49 INFO mapreduce.Job: map 100% reduce 20%
18/09/02 21:11:51 INFO mapreduce.Job: map 100% reduce 40%
18/09/02 21:11:53 INFO mapreduce.Job: map 100% reduce 60%
18/09/02 21:11:57 INFO mapreduce.Job: map 100% reduce 100%
18/09/02 21:11:58 INFO mapreduce.Job: Job job_1535864964114_0009 completed successfully
18/09/02 21:11:59 INFO mapreduce.Job: Counters: 50
File System Counters
FILE: Number of bytes read=863
FILE: Number of bytes written=947201
FILE: Number of read operations=0
FILE: Number of large read operations=0
FILE: Number of write operations=0
HDFS: Number of bytes read=2042
HDFS: Number of bytes written=1307
HDFS: Number of read operations=18
HDFS: Number of large read operations=0
HDFS: Number of write operations=10
Job Counters
Killed reduce tasks=1
Launched map tasks=1
Launched reduce tasks=5
Data-local map tasks=1
Total time spent by all maps in occupied slots (ms)=6323
Total time spent by all reduces in occupied slots (ms)=80616
Total time spent by all map tasks (ms)=6323
Total time spent by all reduce tasks (ms)=80616
Total vcore-milliseconds taken by all map tasks=6323
Total vcore-milliseconds taken by all reduce tasks=80616
Total megabyte-milliseconds taken by all map tasks=6474752
Total megabyte-milliseconds taken by all reduce tasks=82550784
Map-Reduce Framework
Map input records=22
Map output records=22
Map output bytes=789
Map output materialized bytes=863
Input split bytes=100
Combine input records=0
Combine output records=0
Reduce input groups=21
Reduce shuffle bytes=863
Reduce input records=22
Reduce output records=21
Spilled Records=44
Shuffled Maps =5
Failed Shuffles=0
Merged Map outputs=5
GC time elapsed (ms)=1497
CPU time spent (ms)=5630
Physical memory (bytes) snapshot=701714432
Virtual memory (bytes) snapshot=12501250048
Total committed heap usage (bytes)=213114880
Shuffle Errors
BAD_ID=0
CONNECTION=0
IO_ERROR=0
WRONG_LENGTH=0
WRONG_MAP=0
WRONG_REDUCE=0
File Input Format Counters
Bytes Read=1942
File Output Format Counters
Bytes Written=1307
查看结果:
[hadoop@master ~]$ hadoop fs -ls /flow/output3
Found 6 items
-rw-r--r-- 2 hadoop supergroup 0 2018-09-02 21:11 /flow/output3/_SUCCESS
-rw-r--r-- 2 hadoop supergroup 125 2018-09-02 21:11 /flow/output3/part-r-00000
-rw-r--r-- 2 hadoop supergroup 248 2018-09-02 21:11 /flow/output3/part-r-00001
-rw-r--r-- 2 hadoop supergroup 58 2018-09-02 21:11 /flow/output3/part-r-00002
-rw-r--r-- 2 hadoop supergroup 249 2018-09-02 21:11 /flow/output3/part-r-00003
-rw-r--r-- 2 hadoop supergroup 627 2018-09-02 21:11 /flow/output3/part-r-00004
查看单独一个文件内容:
[hadoop@master ~]$ hadoop fs -cat /flow/output3/part-r-00000
13602846565 FlowBean{upFlow=1938, downFlow=2910, sumFlow=4848}
13660577991 FlowBean{upFlow=6960, downFlow=690, sumFlow=7650}
结果符合预期。
需求:3.将统计结果按照总流量倒序排序
思路:对第一次统计结果使用mapreduce处理
实现代码如下:
package com.ghq.mr;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import java.io.IOException;
public class FlowCountSort {
static class FlowCountSortMapper extends Mapper<LongWritable,Text,FlowBean,Text> {
FlowBean bean = new FlowBean();
Text v = new Text();
@Override
protected void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException {
//获取到上一个统计程序的输出结果,已经是各手机号的总流量信息
String line = value.toString();
//按照制表符切割,说明上个程序统计结果也必须是制表符输入
String[] fields = line.split("\t");
String phone = fields[0];
long up = Long.parseLong(fields[1]);
long dFlow = Long.parseLong(fields[2]);
bean.set(up,dFlow);
v.set(phone);
context.write(bean,v);
}
}
static class FlowCountSortReducer extends Reducer<FlowBean,Text,Text,FlowBean>{
@Override
protected void reduce(FlowBean key, Iterable<Text> values, Context context) throws IOException, InterruptedException {
context.write(values.iterator().next(),key);
}
}
public static void main(String[] args) throws Exception {
Configuration conf = new Configuration();
Job job = Job.getInstance(conf);
//指定本程序的jar包所在的本地路径
job.setJarByClass(FlowCountSort.class);
//指定本业务job要使用的mapper/Reducer业务类
job.setMapperClass(FlowCountSortMapper.class);
job.setReducerClass(FlowCountSortReducer.class);
//指定mapper输出数据的kv类型
job.setMapOutputKeyClass(FlowBean.class);
job.setMapOutputValueClass(Text.class);
//指定最终输出的数据的kv类型
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(FlowBean.class);
//指定我们自定义的数据分区器
//job.setPartitionerClass(ProvincePartitioner.class);
//同时指定相应“分区”数量的reducetask
job.setNumReduceTasks(1);
//指定job的输入原始文件所在目录
FileInputFormat.setInputPaths(job, new Path(args[0]));
//指定job的输出结果所在目录
Path outPath = new Path(args[1]);
FileSystem fs = FileSystem.get(conf);
if(fs.exists(outPath)){
fs.delete(outPath,true);
}
FileOutputFormat.setOutputPath(job,outPath);
//将job中配置的相关参数,以及job所用的java类所在的jar包,提交给yarn去运行
/*job.submit();*/
boolean res = job.waitForCompletion(true);
System.exit(res?0:1);
}
}
说明:FlowBean需要实现Comparable接口
public class FlowBean implements Writable,Comparable<FlowBean> {
@Override
public int compareTo(FlowBean o) {
return this.sumFlow>=o.getSumFlow()?-1:1;
}
}
执行和结果略去。