Hadoop之KeyValueTextInputFormat

上图是InputFormat的派生子类关系图,这篇主要讲解FileInputDormat的实现类——>KeyValueTextInputFormat 

请看KeyValueTextInputFormat的源代码:

public class KeyValueTextInputFormat extends FileInputFormat<Text, Text> {
    public KeyValueTextInputFormat() {
    }

    protected boolean isSplitable(JobContext context, Path file) {
        CompressionCodec codec = (new CompressionCodecFactory(context.getConfiguration())).getCodec(file);
        return codec == null;
    }

    public RecordReader<Text, Text> createRecordReader(InputSplit genericSplit, TaskAttemptContext context) throws IOException {
        context.setStatus(genericSplit.toString());
        return new KeyValueLineRecordReader(context.getConfiguration());
    }
}

发现也没有重写getSplit()方法,那切片机制和FileInputFormat 一样

每一行均为一条记录,被分割符分割为key,value。可以通过在驱动类中设置conf.set(KeyValueLineRecordReader.KEY_VALUE_SEPERATOR,"\t");来设定分割符,默认分割符是tab(\t).

例子:

使用案例:

需求:统计输入文件中每一行的第一个单词相同的行数。

代码实现:

(1)编写Mapper类

package com.c21.mapreduce.KeyValueTextInputFormat;
import java.io.IOException;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Mapper;

public class KVTextMapper extends Mapper<Text, Text, Text, LongWritable>{
	
// 1 设置value
   LongWritable v = new LongWritable(1);  
    
	@Override
	protected void map(Text key, Text value, Context context)
			throws IOException, InterruptedException {

// banzhang ni hao
        
        // 2 写出
        context.write(key, v);  
	}
}

(2)编写Reducer类

package com.c21.mapreduce.KeyValueTextInputFormat;
import java.io.IOException;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Reducer;

public class KVTextReducer extends Reducer<Text, LongWritable, Text, LongWritable>{
	
    LongWritable v = new LongWritable();  
    
	@Override
	protected void reduce(Text key, Iterable<LongWritable> values,	Context context) throws IOException, InterruptedException {
		
		 long sum = 0L;  

		 // 1 汇总统计
        for (LongWritable value : values) {  
            sum += value.get();  
        }
         
        v.set(sum);  
         
        // 2 输出
        context.write(key, v);  
	}
}

(3)编写Driver类

package com.c21.mapreduce.keyvaleTextInputFormat;
import java.io.IOException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.input.KeyValueLineRecordReader;
import org.apache.hadoop.mapreduce.lib.input.KeyValueTextInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;

public class KVTextDriver {

	public static void main(String[] args) throws IOException, ClassNotFoundException, InterruptedException {
		
		Configuration conf = new Configuration();
		// 设置切割符
	conf.set(KeyValueLineRecordReader.KEY_VALUE_SEPERATOR, " ");
		// 1 获取job对象
		Job job = Job.getInstance(conf);
		
		// 2 设置jar包位置,关联mapper和reducer
		job.setJarByClass(KVTextDriver.class);
		job.setMapperClass(KVTextMapper.class);
        job.setReducerClass(KVTextReducer.class);
				
		// 3 设置map输出kv类型
		job.setMapOutputKeyClass(Text.class);
		job.setMapOutputValueClass(LongWritable.class);

		// 4 设置最终输出kv类型
		job.setOutputKeyClass(Text.class);
job.setOutputValueClass(LongWritable.class);
		
		// 5 设置输入输出数据路径
		FileInputFormat.setInputPaths(job, new Path(args[0]));
		
		// 设置输入格式
	job.setInputFormatClass(KeyValueTextInputFormat.class);
		
		// 6 设置输出数据路径
		FileOutputFormat.setOutputPath(job, new Path(args[1]));
		
		// 7 提交job
		job.waitForCompletion(true);
	}
}

猜你喜欢

转载自blog.csdn.net/Hao_JunJie/article/details/115209998