MapReduce之自定义InputFormat-10

无论hdfs还是mapreduce,对于小文件都有损效率,实践中,又难免面临处理大量小文件的场景,此时,就需要有相应解决方案。将多个小文件合并成一个文件SequenceFile,SequenceFile里面存储着多个文件,存储的形式为文件路径+名称为key,文件内容为value。

这其实就是重写inputformat改变它读取的规则,把几个文件合并到一个文件中去,来实现小文件合并.

package com.buba.mapreduce.wholefile;

import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.BytesWritable;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.mapreduce.InputSplit;
import org.apache.hadoop.mapreduce.JobContext;
import org.apache.hadoop.mapreduce.RecordReader;
import org.apache.hadoop.mapreduce.TaskAttemptContext;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;

import java.io.IOException;

public class WholeFileInputformat extends FileInputFormat<NullWritable, BytesWritable> {

    //是否可以切割
    @Override
    protected boolean isSplitable(JobContext context, Path filename) {
        return false;
    }

    @Override
    public RecordReader<NullWritable, BytesWritable> createRecordReader(InputSplit inputSplit, TaskAttemptContext taskAttemptContext) throws IOException, InterruptedException {

        WholeRecordReader reader = new WholeRecordReader();

        //调用初始化方法
        reader.initialize(inputSplit,taskAttemptContext);

        return reader;
    }

}
package com.buba.mapreduce.wholefile;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.BytesWritable;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.mapreduce.InputSplit;
import org.apache.hadoop.mapreduce.RecordReader;
import org.apache.hadoop.mapreduce.TaskAttemptContext;
import org.apache.hadoop.mapreduce.lib.input.FileSplit;

import java.io.IOException;

public class WholeRecordReader extends RecordReader<NullWritable, BytesWritable> {

    private BytesWritable value = new BytesWritable();

    private FileSplit split;

    private Configuration configuration;

    private boolean isProcess = false;

    //初始化方法
    @Override
    public void initialize(InputSplit inputSplit, TaskAttemptContext taskAttemptContext) throws IOException, InterruptedException {
        //获取切片信息
        this.split = (FileSplit) inputSplit;

        //获取配置信息
        configuration = taskAttemptContext.getConfiguration();
    }

    //具体业务逻辑
    @Override
    public boolean nextKeyValue() throws IOException, InterruptedException {
        FSDataInputStream fis = null;
        if(!isProcess){
            try {
                //按文件整体处理,读取
                FileSystem fs = FileSystem.get(configuration);

                //获取切片的路径
                Path path = split.getPath();

                //获取到切片的输入流
                fis= fs.open(path);

                byte[] buf = new byte[(int)split.getLength()];

                //读取数据
                IOUtils.readFully(fis,buf,0,buf.length);

                value.set(buf,0,buf.length);
            }finally {
                IOUtils.closeStream(fis);
            }

            isProcess = true;

            return true;
        }
        //如果没下一个文件就返回false,有就返回true
        return false;
    }


    @Override
    public NullWritable getCurrentKey() throws IOException, InterruptedException {
        return NullWritable.get();
    }

    @Override
    public BytesWritable getCurrentValue() throws IOException, InterruptedException {
        return value;
    }

    //是否正在读取数据  不用管
    @Override
    public float getProgress() throws IOException, InterruptedException {
        return 0;
    }

    @Override
    public void close() throws IOException {

    }
}

mapper

package com.buba.mapreduce.wholefile;

import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.BytesWritable;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.lib.input.FileSplit;

import java.io.IOException;

public class WholeMapper extends Mapper<NullWritable, BytesWritable, Text,BytesWritable> {

    Text k = new Text();

    @Override
    protected void setup(Context context) throws IOException, InterruptedException {
        //获取切片路径
        FileSplit fileSplit = (FileSplit)context.getInputSplit();

        Path path = fileSplit.getPath();

        //设置key
        k.set(path.toString());
    }

    @Override
    protected void map(NullWritable key, BytesWritable value, Context context) throws IOException, InterruptedException {
        //控制输出数据
        context.write(k,value);
    }
}

reducer 

package com.buba.mapreduce.wholefile;

import org.apache.hadoop.io.BytesWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Reducer;

import java.io.IOException;
import java.util.Iterator;

public class WholeReducer extends Reducer<Text, BytesWritable,Text,Text> {

    @Override
    protected void reduce(Text key, Iterable<BytesWritable> values, Context context) throws IOException, InterruptedException {
        Iterator<BytesWritable> iterator = values.iterator();

        while (iterator.hasNext()){
            BytesWritable next = iterator.next();
            byte[] bytes = next.getBytes();
            String s = new String(bytes);
            context.write(key,new Text(s));
        }

    }
}

driver 

package com.buba.mapreduce.wholefile;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.BytesWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.mapreduce.lib.output.SequenceFileOutputFormat;

public class WholeDriver {
    public static void main(String[] args)throws Exception {
        //1.获取job信息
        Configuration configuration = new Configuration();

        Job job = Job.getInstance(configuration);

        //2.获取jar的存储路径
        job.setJarByClass(WholeDriver.class);

        job.setInputFormatClass(WholeFileInputformat.class);

        //设置输出文件的格式为sequencefile
        job.setOutputFormatClass(SequenceFileOutputFormat.class);

        //3.关联map和reduce的class类
        job.setMapperClass(WholeMapper.class);

        //job.setReducerClass(TableReducer.class);

        //4.设置map阶段输出key和value类型
        job.setMapOutputKeyClass(Text.class);

        job.setMapOutputValueClass(BytesWritable.class);

        //5.设置最后输入数据的key和value的类型
        job.setOutputKeyClass(Text.class);

        job.setOutputValueClass(BytesWritable.class);

        //6.设置输入数据的路径和输出数据的路径
        FileInputFormat.setInputPaths(job,new Path(args[0]));

        FileOutputFormat.setOutputPath(job,new Path(args[1]));

        //7.提交
        boolean b = job.waitForCompletion(true);

        System.exit(b?0:1);
    }
}

猜你喜欢

转载自blog.csdn.net/kxj19980524/article/details/89342026
今日推荐