MapReduce removing duplicate rows, columns

Direct code on
the data type:
Here Insert Picture Description

package com.sheng.test;

import java.io.IOException;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;




/*
   ​    KEYIN: 输入的key

   ​     VALUEIN:输入的value

   ​     KEYOUT:输出的key

   ​    VALUEOUT:输出的value

   ​    Context:Mapper的上下文
 * 	去除重复
 * 
 */
class WcMapper4 extends Mapper<LongWritable, Text, Text, IntWritable> {
		/*
		 * KeyIn:LongWritable 行的偏移量    ValueIn:Text 这一行的值 TextInputformat
		 * 
		 */

		@Override
		protected void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException {

			// 得到每一行的值,反序化为字符串
			String lines = value.toString();
			// 对每一行的字符串按空格来拆分
			String[] s = value.toString().split(",");
			// 对每个单词写入Hadoop中 写入的数据必须是Hadoop的序列化
			context.write(new Text(s[0]),new IntWritable());
               // hello:1    word:1  aaaa:1  空格 :1  空格 :1    空格 :1 
		}
	}
class WcReduce4 extends Reducer<Text, IntWritable, Text, IntWritable> {

		// reduce(单词key, 指定的单词mapper统计的List, Context context)
		@Override
		protected void reduce(Text key, Iterable<IntWritable> values, Context context)
				throws IOException, InterruptedException {

			//int sum = 0;
	       
	         context.write(key, new IntWritable());

	}
}
public class Demo3 {

   	public static void main(String[] args) throws IOException, ClassNotFoundException, InterruptedException {
   		//
   		Configuration conf = new Configuration();
   		// conf.set("HADOOP_USER_NAME","ambow");
   		// Job对像
   		Job job = Job.getInstance(conf);
   		// 注册Jar驱动类
   		job.setJarByClass(Demo3.class);
   		// 注册Mapper驱动类
   		job.setMapperClass(WcMapper4.class);
           //注册Reducer驱动类
   		job.setReducerClass(WcReduce4.class);

   		// 设置MapOutPut输出的的类型
   		job.setMapOutputKeyClass(Text.class);
   		job.setMapOutputValueClass(IntWritable.class);

   		// 设置最终输出的类型
   		job.setOutputKeyClass(Text.class);
   		job.setOutputValueClass(IntWritable.class);
   		FileInputFormat.setInputPaths(job, new Path("/user/zx/data.csv"));
   		FileOutputFormat.setOutputPath(job, new Path("/user/sheng/data52.csv"));
   		// 设置reduce任务数为0 分区多少个???
   		// job.setNumReduceTasks(0);

   		// 提交作业
   		boolean result = job.waitForCompletion(true);

   		System.exit(result ? 0 : 1);
   	}
   }
   
 	
Published 133 original articles · won praise 53 · views 20000 +

Guess you like

Origin blog.csdn.net/weixin_43599377/article/details/103499619