Hadoop-Mapreduce(フィルターログとカスタムログ出力(カスタムOutputFormat))

フィルタログとカスタムログ出力(カスタムOutputFormat)

  • 要求する

    入力ログに含まれているかどうかをフィルタリングします

    • それを含むウェブサイトはe:/it.logに出力されます
    • それを含まないウェブサイトはe:/other.logに出力されます
  • 入力データ

    • 期待される出力
  • 特定のプロセス

    • 出力フォーマットをカスタマイズする
import java.io.IOException;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.RecordWriter;
import org.apache.hadoop.mapreduce.TaskAttemptContext;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;

public class FilterOutputFormat extends FileOutputFormat<Text, NullWritable>{
    
    

	@Override
	public RecordWriter<Text, NullWritable> getRecordWriter(TaskAttemptContext job)
			throws IOException, InterruptedException {
    
    

		// 创建一个RecordWriter
		return new FilterRecordWriter(job);
	}
}

特定の書き込みデータRecordWriter

import java.io.IOException;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.RecordWriter;
import org.apache.hadoop.mapreduce.TaskAttemptContext;

public class FilterRecordWriter extends RecordWriter<Text, NullWritable> {
    
    
	FSDataOutputStream itstarOut = null;
	FSDataOutputStream otherOut = null;

	public FilterRecordWriter(TaskAttemptContext job) {
    
    
		// 1 获取文件系统
		FileSystem fs;

		try {
    
    
			fs = FileSystem.get(job.getConfiguration());

			// 2 创建输出文件路径
			Path itstarPath = new Path("e:/it.log");
			Path otherPath = new Path("e:/other.log");

			// 3 创建输出流
			itstarOut = fs.create(itPath);
			otherOut = fs.create(otherPath);
		} catch (IOException e) {
    
    
			e.printStackTrace();
		}
	}

	@Override
	public void write(Text key, NullWritable value) throws IOException, InterruptedException {
    
    

		// 判断是否包含“itstar”输出到不同文件
		if (key.toString().contains("it")) {
    
    
			itstarOut.write(key.toString().getBytes());
		} else {
    
    
			otherOut.write(key.toString().getBytes());
		}
	}

	@Override
	public void close(TaskAttemptContext context) throws IOException, InterruptedException {
    
    
		// 关闭资源
		if (itstarOut != null) {
    
    
			itstarOut.close();
		}
		
		if (otherOut != null) {
    
    
			otherOut.close();
		}
	}
}

FilterMapperを書く

import java.io.IOException;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Mapper;

public class FilterMapper extends Mapper<LongWritable, Text, Text, NullWritable>{
    
    
	
	Text k = new Text();
	
	@Override
	protected void map(LongWritable key, Text value, Context context)
			throws IOException, InterruptedException {
    
    
		// 1 获取一行
		String line = value.toString();
		
		k.set(line);
		
		// 3 写出
		context.write(k, NullWritable.get());
	}
}

FilterReducerを書く

import java.io.IOException;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Reducer;

public class FilterReducer extends Reducer<Text, NullWritable, Text, NullWritable> {
    
    

	@Override
	protected void reduce(Text key, Iterable<NullWritable> values, Context context)
			throws IOException, InterruptedException {
    
    

		String k = key.toString();
		k = k + "\r\n";

		context.write(new Text(k), NullWritable.get());
	}
}

FilterDriverを書く

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;

public class FilterDriver {
    
    
	public static void main(String[] args) throws Exception {
    
    

args = new String[] {
    
     "e:/input/inputoutputformat", "e:/output2" };

		Configuration conf = new Configuration();

		Job job = Job.getInstance(conf);

		job.setJarByClass(FilterDriver.class);
		job.setMapperClass(FilterMapper.class);
		job.setReducerClass(FilterReducer.class);

		job.setMapOutputKeyClass(Text.class);
		job.setMapOutputValueClass(NullWritable.class);
		
		job.setOutputKeyClass(Text.class);
		job.setOutputValueClass(NullWritable.class);

		// 要将自定义的输出格式组件设置到job中
		job.setOutputFormatClass(FilterOutputFormat.class);

		FileInputFormat.setInputPaths(job, new Path(args[0]));

		// 虽然我们自定义了outputformat,但是因为我们的outputformat继承自fileoutputformat
		// 而fileoutputformat要输出一个_SUCCESS文件,所以,在这还得指定一个输出目录
		FileOutputFormat.setOutputPath(job, new Path(args[1]));

		boolean result = job.waitForCompletion(true);
		System.exit(result ? 0 : 1);
	}
}

おすすめ

転載: blog.csdn.net/qq_45092505/article/details/105535893