Hadoop序列化及案例

1、什么是序列化?
序列化就是把内存中的对象,转化成字节序列(或者其他数据传输协议)以便与持久化储存到磁盘和网络传输。

2、什么是反序列化?
反序列化就是将收到的字节序列或者其他数据协议或者是磁盘的持久化数据转换成内存中的对象。

3、为什么要序列化?
序列化可以存储“活的”对象,可以将活的对象发送到远程计算机。

为什么不用java序列化?
java序列化是个重量级的序列化框架,一个对象被序列化后,会附带很多额外的信息,降低网络传输效率。

Hadoop序列化的特点:
1、紧凑
2、快速
3、可拓展性
4、互操作性

案列(案例及数据来源:尚硅谷)
输入数据:

1 13736230513 192.196.100.1 www.atguigu.com 2481 24681 200
2 13846544121 192.196.100.2 264 0 200 3
13956435636 192.196.100.3 132 1512 200 4
13966251146 192.168.100.1 240 0 404 5
18271575951 192.168.100.2 www.atguigu.com 1527 2106 200 6
84188413 192.168.100.3 www.atguigu.com 4116 1432 200 7
13590439668 192.168.100.4 1116 954 200 8
15910133277 192.168.100.5 www.hao123.com 3156 2936 200 9
13729199489 192.168.100.6 240 0 200 10
13630577991 192.168.100.7 www.shouhu.com 6960 690 200 11
15043685818 192.168.100.8 www.baidu.com 3659 3538 200 12
15959002129 192.168.100.9 www.atguigu.com 1938 180 500 13
13560439638 192.168.100.10 918 4938 200 14
13470253144 192.168.100.11 180 180 200 15
13682846555 192.168.100.12 www.qq.com 1938 2910 200 16
13992314666 192.168.100.13 www.gaga.com 3008 3720 200 17
13509468723 192.168.100.14 www.qinghua.com 7335 110349 404 18
18390173782 192.168.100.15 www.sogou.com 9531 2412 200 19
13975057813 192.168.100.16 www.baidu.com 11058 48243 200 20
13768778790 192.168.100.17 120 120 200 21
13568436656 192.168.100.18 www.alibaba.com 2481 24681 200 22
13568436656 192.168.100.19 1116 954 200

需求:统计每个手机号消耗的总上行流量,下行流量,总流量
流量统计的bean对象类:

package MaperReduce04;

import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;

import org.apache.hadoop.io.Writable;

//实现Writable接口
public class FlowBean implements Writable {
    
    

	private long upFlow;// 上行流量
	private long downFlow;// 下行流量
	private long sumFlow; // 总流量

	// 空参构造,反射作用
	public FlowBean() {
    
    
		super();
	}

	// 有参构造
	public FlowBean(long upFlow, long downFlow) {
    
    
		super();
		this.upFlow = upFlow;
		this.downFlow = downFlow;
		sumFlow = upFlow + downFlow;
	}

	// 序列化
	@Override
	public void write(DataOutput out) throws IOException {
    
    

		out.writeLong(upFlow);
		out.writeLong(downFlow);
		out.writeLong(sumFlow);
	}

	// 反序列化
	@Override
	public void readFields(DataInput in) throws IOException {
    
    
		upFlow = in.readLong();
		downFlow = in.readLong();
		sumFlow = in.readLong();
	}

	// 重写tostring
	@Override
	public String toString() {
    
    
		return upFlow + "\t" + downFlow + "\t" + sumFlow;
	}

	public long getUpFlow() {
    
    
		return upFlow;
	}

	public void setUpFlow(long upFlow) {
    
    
		this.upFlow = upFlow;
	}

	public long getDownFlow() {
    
    
		return downFlow;
	}

	public void setDownFlow(long downFlow) {
    
    
		this.downFlow = downFlow;
	}

	public long getSumFlow() {
    
    
		return sumFlow;
	}

	public void setSumFlow(long sumFlow) {
    
    
		this.sumFlow = sumFlow;
	}

	public void set(long upFlow2, long downFlow2) {
    
    
		upFlow=upFlow2;
		downFlow=downFlow2;
		sumFlow=upFlow2+downFlow2;
	}
}

map类

package MaperReduce04;

import java.io.IOException;

import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Mapper;

public class FlowCountmapper extends Mapper<LongWritable, Text, Text, FlowBean>{
    
    
	Text k = new Text();
	FlowBean v = new FlowBean();
	
	@Override
	protected void map(LongWritable key, Text value, Context context)
			throws IOException, InterruptedException {
    
    

		//		7 	13560436666	120.196.100.99		1116		 954			200
		//		id	手机号码		网络ip			上行流量  下行流量     网络状态码
		
		//1、获取一行
		String line = value.toString();
		
		//2、切割
		String[] fields = line.split("\t");
		
		//3、封装对象
		k.set(fields[1]);//封装手机号
		
		long upFlow=Long.parseLong(fields[fields.length-3]);
		long downFlow=Long.parseLong(fields[fields.length-2]);
		
		v.setUpFlow(upFlow);
		v.setDownFlow(downFlow);
		v.set(upFlow,downFlow);
		
		//4、写出
		context.write(k, v);
	}
}

reducer类:

package MaperReduce04;

import java.io.IOException;

import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Reducer;

public class FlowCountReducer extends Reducer<Text, FlowBean, Text, FlowBean>{
    
    
	
	FlowBean v = new FlowBean();
	
	@Override
protected void reduce(Text key, Iterable<FlowBean> values,Context context)
		throws IOException, InterruptedException {
    
    
	
		//13525624824 2546 25662
		//12523212564 15524 2552
		
		long sum_upFlow=0;
		long sum_downFlow=0;
		
		//累加求和
		for (FlowBean flowBean : values) {
    
    
			sum_upFlow+=flowBean.getUpFlow();
			sum_downFlow+=flowBean.getDownFlow();
		}
		
		v.set(sum_upFlow, sum_downFlow);
		
		//写出
		context.write(key, v);
}
}

driver类:

package MaperReduce04;

import java.io.IOException;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;

public class FlowsumDriver {
    
    
	public static void main(String[] args) throws IOException, ClassNotFoundException, InterruptedException {
    
    

		args = new String[] {
    
     "e:/input", "e:/output03" };

		Configuration conf = new Configuration();
		// 1、获取job对象
		Job job = Job.getInstance(conf);

		// 2、设置jar路径
		job.setJarByClass(FlowsumDriver.class);

		// 3、关联mapper和reducer
		job.setMapperClass(FlowCountmapper.class);
		job.setReducerClass(FlowCountReducer.class);

		// 4、设置mapper和reducer的类型
		job.setMapOutputKeyClass(Text.class);
		job.setMapOutputValueClass(FlowBean.class);

		// 5、设置最终输出的key和value类型
		job.setOutputKeyClass(Text.class);
		job.setOutputValueClass(FlowBean.class);

		// 6、设置输出路径
		FileInputFormat.setInputPaths(job, new Path(args[0]));
		FileOutputFormat.setOutputPath(job, new Path(args[1]));

		// 7、提交job
		boolean result = job.waitForCompletion(true);
		System.exit(result ? 0 : 1);
	}
}

猜你喜欢

转载自blog.csdn.net/weixin_46457946/article/details/113995053