多job串联,倒排索引

版权声明:Please make the source marked https://blog.csdn.net/qq_31807385/article/details/84547449

目录

需求

需求分析:

代码实现:


需求

有大量的文本(文档、网页),需要建立搜索索引,有如下的文件:

a.txt

isea pingping
isea ss
isea ss


b.txt

isea pingping
isea pingping
pingping ss


c.txt

isea ss
isea pingping

需要完成的输出结果为:

isea	c.txt-->2b.txt-->2a.txt-->3
pingping	c.txt-->1b.txt-->3a.txt-->1
ss	c.txt-->1b.txt-->1a.txt-->2

需求分析:

代码实现:

package com.isea.index;

import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.lib.input.FileSplit;

import java.io.IOException;

public class OneIndexMapper extends Mapper<LongWritable, Text,Text, IntWritable> {
    String name;
    Text k = new Text();
    IntWritable v = new IntWritable(1);

    @Override
    protected void setup(Context context) throws IOException, InterruptedException {
//        获取文件的名称
        FileSplit split = (FileSplit) context.getInputSplit();
        name = split.getPath().getName();
    }

    @Override
    protected void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException {

//        iesa pingping   a.txt

//        1,获取一行
        String line = value.toString();

//        2,切割一行
        String[] fields = line.split(" ");

        for (String field : fields) {
            k.set(field + "--" + name);
            context.write(k,v);
        }

    }
}
package com.isea.index;

import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Reducer;

import java.io.IOException;

public class OneIndexReducer extends Reducer<Text, IntWritable,Text,IntWritable> {
    IntWritable v = new IntWritable();

    @Override
    protected void reduce(Text key, Iterable<IntWritable> values, Context context) throws IOException, InterruptedException {
        int sum = 0;
        for (IntWritable value : values) {
            sum += value.get();
        }

        v.set(sum);
        context.write(key,v);
    }
}
package com.isea.index;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;

import java.io.IOException;

public class OneIndexDriver {
    public static void main(String[] args) throws IOException, ClassNotFoundException, InterruptedException {
        args = new String[]{"g:/input/reverse_sort", "g:/output1"};

        Configuration configuration = new Configuration();
        Job job = Job.getInstance(configuration);

        job.setJarByClass(OneIndexDriver.class);

        job.setMapperClass(OneIndexMapper.class);
        job.setReducerClass(OneIndexReducer.class);

        job.setMapOutputKeyClass(Text.class);
        job.setMapOutputValueClass(IntWritable.class);

        job.setOutputKeyClass(Text.class);
        job.setOutputValueClass(IntWritable.class);

        FileInputFormat.setInputPaths(job, new Path(args[0]));
        FileOutputFormat.setOutputPath(job, new Path(args[1]));

        boolean result = job.waitForCompletion(true);
        System.exit(result ? 0 : 1);

    }
}

输出结果:

isea--a.txt	3
isea--b.txt	2
isea--c.txt	2
pingping--a.txt	1
pingping--b.txt	3
pingping--c.txt	1
ss--a.txt	2
ss--b.txt	1
ss--c.txt	1
package com.isea.index;

import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Mapper;

import java.io.IOException;

public class TwoIndexMapper extends Mapper<LongWritable, Text,Text,Text> {
    Text k = new Text();
    Text v = new Text();

    @Override
    protected void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException {

        String line = value.toString();

        String[] fields = line.split("--");

        k.set(fields[0]);
        v.set(fields[1]);

        context.write(k,v);
    }
}
package com.isea.index;

import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Reducer;

import java.io.IOException;

public class TwoIndexReducer extends Reducer<Text,Text,Text,Text> {
    Text v = new Text();

    @Override
    protected void reduce(Text key, Iterable<Text> values, Context context) throws IOException, InterruptedException {
        StringBuffer sb = new StringBuffer();

        for (Text value : values) {
            sb.append(value.toString().replace("\t","-->"));
        }
        v.set(sb.toString());
        context.write(key,v);
    }
}
package com.isea.index;

import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Mapper;

import java.io.IOException;

public class TwoIndexMapper extends Mapper<LongWritable, Text,Text,Text> {
    Text k = new Text();
    Text v = new Text();

    @Override
    protected void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException {

        String line = value.toString();

        String[] fields = line.split("--");

        k.set(fields[0]);
        v.set(fields[1]);

        context.write(k,v);
    }
}

最终的输出结果:

isea	c.txt-->2b.txt-->2a.txt-->3
pingping	c.txt-->1b.txt-->3a.txt-->1
ss	c.txt-->1b.txt-->1a.txt-->2

猜你喜欢

转载自blog.csdn.net/qq_31807385/article/details/84547449