mapreduce进行数据去重的简单案例

版权声明:数据丁 https://blog.csdn.net/reasery/article/details/82872341
package mrpro927;

import java.io.IOException;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Partitioner;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;



/*
 *需求:数据去重,利用key的排序分组,不需要reduce
 * 
 */
public class phoneDataQuChong {
	//
	public static class MyMapper extends Mapper<LongWritable, Text, Text, NullWritable>{
		@Override
		protected void map(LongWritable key, Text value,
				Mapper<LongWritable, Text, Text, NullWritable>.Context context)
				throws IOException, InterruptedException {
			context.write(value, NullWritable.get());	
			
		}
	}
	
	public static class MyReducer extends Reducer<Text, NullWritable, Text, NullWritable>{
		Text k = new Text();
		@Override
		protected void reduce(Text key, Iterable<NullWritable> values,
				Reducer<Text, NullWritable, Text, NullWritable>.Context context) 
						throws IOException, InterruptedException {
			context.write(key, NullWritable.get());
		}
	}
	
	
	
	public static void main(String[] args) throws IOException, ClassNotFoundException, InterruptedException {
		//加载配置文件
		Configuration conf = new Configuration();
		//eclipse运行设置linux用户名
		System.setProperty("HADOOP_USER_NAME", "mading");
		//启动一个job
		Job job = Job.getInstance(conf);
		//指定当前任务的主类
		job.setJarByClass(phoneDataQuChong.class);
		//指定mapper和reducer类
		job.setMapperClass(MyMapper.class);
		job.setReducerClass(MyReducer.class);
		//指定map输出的key,value类型,如果和reduce的输出类型相同的情况下可以省略
		job.setMapOutputKeyClass(Text.class);
		job.setMapOutputValueClass(NullWritable.class);
		//指定reduce输出的key,value类型
		job.setOutputKeyClass(Text.class);
		job.setOutputValueClass(NullWritable.class);
		//指定分区算法
		//job.setPartitionerClass(MyPartitioner.class);
		//设置reducetask的并行度
		//job.setNumReduceTasks(1);
		//指定文件输入的路径,这里是HA高可用集群的路径
		FileInputFormat.addInputPath(job, new Path("hdfs://hdp03:9000/phonedatain"));
		//指定文件的输出路径
		FileOutputFormat.setOutputPath(job, new Path("hdfs://hdp03:9000/pout01"));
		//提交job
		job.waitForCompletion(true);
	}
}

将想要去重的字段作为key通过map的输出传入reduce,直接可以去重

猜你喜欢

转载自blog.csdn.net/reasery/article/details/82872341
今日推荐