(七)MapReduce自定义类型及分区算法

版权声明:本文为博主原创文章,未经博主允许不得转载。 https://blog.csdn.net/qinshi965273101/article/details/83308903

需求

有以下数据:电话 | 地区 | 姓名 | 使用流量

三个reduce生成三个文件,按照地区来分区,得到每个人使用流量的汇总结果。

13877779999 bj zs 2145
13766668888 sh ls 1028
13766668888 sh ls 9987
13877779999 bj zs 5678
13544445555 sz ww 10577
13877779999 sh zs 2145
13766668888 sh ls 9987

涉及的知识点

  • 编写Map组件,Reduce组件
  • 编写自定义的类作为输入输出类型
  • 编写自定义的分区类
  • 设置ReduceTask的数量
  • MR会对Mapper输出key默认做排序

代码实现

package hadoop;

import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;

import org.apache.hadoop.io.Writable;

//用户自定义的javabean,必须实现hadoop的Writable序列化机制
public class JavaBeanDemo implements Writable {
	private String phone;
	private String address;
	private String name;
	private long flow;
	
	//序列化方法
	@Override
	public void write(DataOutput out) throws IOException {
		out.writeUTF(phone);
		out.writeUTF(address);
		out.writeUTF(name);
		out.writeLong(flow);
	}
		
	//反列化方法
	//反序列化与序列化的顺序必须一致
	@Override
	public void readFields(DataInput in) throws IOException {
		this.phone = in.readUTF();
		this.address = in.readUTF();
		this.name = in.readUTF();
		this.flow = in.readLong();
	}

	public String getPhone() {
		return phone;
	}

	public void setPhone(String phone) {
		this.phone = phone;
	}

	public String getAddress() {
		return address;
	}

	public void setAddress(String address) {
		this.address = address;
	}

	public String getName() {
		return name;
	}

	public void setName(String name) {
		this.name = name;
	}

	public long getFlow() {
		return flow;
	}

	public void setFlow(long flow) {
		this.flow = flow;
	}

	@Override
	public String toString() {
		return "JavaBeanDemo [phone=" + phone + ", address=" + address + ", name=" + name + ", flow=" + flow + "]";
	}
	
}
package hadoop;

import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Partitioner;

public class PartitionDemo extends Partitioner<Text, JavaBeanDemo>{

	//根据地点对数据分区,分别传给对应的reduce
	@Override
	public int getPartition(Text key, JavaBeanDemo value, int numPartitions) {
		if(value.getAddress().equals("bj")) {
			return 0;
		}else if(value.getAddress().equals("sh")) {
			return 1;
		}else if(value.getAddress().equals("sz")) {
			return 2;
		}else{
			return 3;
		}
	}

}
package hadoop;

import java.io.IOException;

import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Mapper;

//开发Mapper组件,让该类继承Mapper
/*四个泛型的含义:前面两个泛型类型是固定的,后面两个是根据需求来决定的
  				 Mapper输入key类型(key是每行行首偏移量)
				 Mapper输入value类型(value是每行的内容)
				 Mapper输出key类型
				 Mapper输出value类型
*/				 
public class MapperDemo extends Mapper<LongWritable, Text, Text, JavaBeanDemo>{
	
	/*
	 * Mapper组件通过map方法,将输入key和value传给开发者
	 * 利用context.write(key, value)可以输出key和value
	 */
	@Override
	protected void map(LongWritable key, Text value, Mapper<LongWritable, Text, Text, JavaBeanDemo>.Context context)
			throws IOException, InterruptedException {
		 
		String line=value.toString();
		String[] data=line.split(" ");
		
		JavaBeanDemo jbd=new JavaBeanDemo();
		jbd.setPhone(data[0]);
		jbd.setAddress(data[1]);
		jbd.setName(data[2]);
		jbd.setFlow(Long.parseLong(data[3]));
		//输出{名字,javabeandemo对象}
		context.write(new Text(jbd.getName()), jbd);
	}
}
package hadoop;

import java.io.IOException;

import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Reducer;

/*
 * 四个泛型:前面两个对应了Mapper的输出key,value,后面两个根据需求决定
 * 			Reducer输入key类型(值为Mapper输出key)
 * 			Reducer输入value类型(值为Mapper输出value)
 * 			Reducer输出key类型
 * 			Reducer输出value类型
 */
public class ReducerDemo extends Reducer<Text, JavaBeanDemo, Text, JavaBeanDemo>{
	
	@Override
	protected void reduce(Text key, Iterable<JavaBeanDemo> values,
			Reducer<Text, JavaBeanDemo, Text, JavaBeanDemo>.Context context) throws IOException, InterruptedException {
		JavaBeanDemo jbd = new JavaBeanDemo();
		
		//把相同的人,使用的流量相加后再封装
		for(JavaBeanDemo jbdTmp : values){
			jbd.setFlow(jbd.getFlow()+jbdTmp.getFlow());
			jbd.setPhone(jbdTmp.getPhone());
			jbd.setName(jbdTmp.getName());
			jbd.setAddress(jbdTmp.getAddress());
		}
		
		context.write(new Text(key), jbd);
		
	}
}
package hadoop;


import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;


public class Driver {
	public static void main(String[] args) throws Exception {
		Configuration conf = new Configuration();
		
		//创建MR job对象
		Job job = Job.getInstance(conf);
		
		//设置job的运行主类(main)入口
		job.setJarByClass(Driver.class);
		
		//设置Mapper组件类
		job.setMapperClass(MapperDemo.class);
		
		//设置Mapper的输出key和value类型
		job.setMapOutputKeyClass(Text.class);
		job.setMapOutputValueClass(JavaBeanDemo.class);
		
		//设置Reducer组件类
		job.setReducerClass(ReducerDemo.class);
		
		//设置Reducer输出key和value类型
		job.setOutputKeyClass(Text.class);
		job.setOutputValueClass(JavaBeanDemo.class);
		
		//设置自定义分区的类
		job.setPartitionerClass(PartitionDemo.class);
		
		//设置reduce task的数量,如果不设定,默认是一个
		job.setNumReduceTasks(3);
		
		//设置待处理文件的HDFS路径(文件或文件夹)
		FileInputFormat.setInputPaths(job, new Path("hdfs://192.168.80.100:9000/mrTestDir"));
		
		//设置输出结果的文件路径(该目录事先不能存在,否则报错)
		FileOutputFormat.setOutputPath(job, new Path("hdfs://192.168.80.100:9000/mrTestDir/result"));
		
		//提交job
		job.waitForCompletion(true);
	}
}

猜你喜欢

转载自blog.csdn.net/qinshi965273101/article/details/83308903