API对HBase进行MR操作

目录

1、配置

方法1:

方法2:

2、一些MR操作HBase的官方案例

案例1:统计指定表中的数据数量

案例2:将HDFS中的数据文件写入到HBase中

3、API实现MR操作HBase


1、配置

要对HBase进行MR操作首先需要hadoop需要持有HBase的一些jar包才能连接到HBase

这些需要的jar包我们可以通过在hbase目录下输入:bin/hbase mapredcp查看

有两种方式实现

方法1:

这个方式是临时的,即将之前命令得到的值临时加到环境变量中

export HADOOP_CLASSPATH=`${HBASE_HOME}/bin/hbase mapredcp`

 若没有配置HBASE_HOME也可临时配置

方法2:

这个方法时将配置代码写到hadoop-env.sh中,使得每次启动hadoop都会执行这个配置代码:

注:要写到HADOOP_CLASSPATH的for循环后

export HADOOP_CLASSPATH=$HADOOP_CLASSPATH:/opt/module/hbase-1.3.1/lib/*

2、一些MR操作HBase的官方案例

案例1:统计指定表中的数据数量

/opt/module/hadoop-2.7.2/bin/yarn jar lib/hbase-server-1.3.1.jar rowcounter stu2

注hbase-server-1.3.1.jar就是官方案例的jar包

 row=4代表有4个数据

案例2:将HDFS中的数据文件写入到HBase中

先将数据文件上传到HDFS中:

hadoop fs -put fruit.tsv /input/

 在HBase中创建对应的表

执行官方文档MR:

/opt/module/hadoop-2.7.2/bin/yarn jar lib/hbase-server-1.3.1.jar importtsv \
-Dimporttsv.columns=HBASE_ROW_KEY,info:name,info:color fruit \
hdfs://hadoop102:9000/input/fruit.tsv

其中-Dimpoettsv.columns=HBASE_ROW_KEY指定的是第一列数据为row key

info:name指定的是第二列数据为info列族下的name列

info:color也是如此

且这些值的顺序可以打乱,只要符合你表的数据的分布就行

fruit为表名

我们来看看结果:

3、API实现MR操作HBase

第一个MR代码是实现将HDFS中的数据文件保存到HBase中

第二个MR代码是实现将HBase表中的符合条件的数据提取出保存到另一个表中

若是要读HDFS中的文件则map继承Mapper,若要读取HBase中的文件则继承TableMapper

其中TableMapper只需要传两个out put的参数,另外两个是默认的:

input Key:ImmutableBytesWritable:用来存储Row Key

input Value:Result:用来存储Cell,是一个Cell的集合,即一个Row Key中的所有Cell都存储到这个集合

若要写入HBase则reduce要继承TableReducer,其中Out Put Key是指定了Mutation类型的,支持增(put)删(delete)改(put)几种类型

若用了TableMapper则不需要指定输入路径,且在Driver中用TableMapReduceUtil工具类设置Map

若用了TableReducer则不需要指定输出路径,且在Driver中用TableMapReduceUtil工具类设置Reduce

且若要实现在Windows下操作Hbase,则有两个地方需要注意:

①需要将HBase配置文件中的hbase-site.xml配置文件导入项目中

②则是configuration对象需要用HBaseConfiguration.create()来new


Mapper

package tyh.MR1;

import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Mapper;

import java.io.IOException;

public class fruitMapper extends Mapper<LongWritable, Text, NullWritable, Put> {

    @Override
    protected void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException {
        super.map(key, value, context);

        //获取一行HDFS读取的数据
        String lineValue = value.toString();

        //切分数据
        String[] split = lineValue.split("\t");

        //数据中各值的含义
        String Row_key = split[0];
        String name = split[1];
        String color = split[2];

        //初始化put对象
        Put put = new Put(Bytes.toBytes(Row_key));

        //传值
        put.addColumn(Bytes.toBytes("info"), Bytes.toBytes("name"), Bytes.toBytes(name));
        put.addColumn(Bytes.toBytes("info"), Bytes.toBytes("color"), Bytes.toBytes(color));

        //写出数据
        context.write(NullWritable.get(), put);
    }
}

Reducer

package tyh.MR1;

import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.mapreduce.TableReducer;
import org.apache.hadoop.io.NullWritable;

import java.io.IOException;

public class fruitReducer extends TableReducer<NullWritable, Put, NullWritable> {

    @Override
    protected void reduce(NullWritable key, Iterable<Put> values, Context context) throws IOException, InterruptedException {
        super.reduce(key, values, context);
        for (Put put : values) {
            context.write(NullWritable.get(), put);
        }
    }
}

Driver

package tyh.MR1;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.mapreduce.TableMapReduceUtil;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.util.Tool;
import org.apache.hadoop.util.ToolRunner;

public class fruitRunner implements Tool {

    //newConfiguration对象
    private Configuration configuration = null;

    @Override
    public int run(String[] args) throws Exception {
        Configuration conf = this.getConf();

        //1、创建Job对象
        Job job = Job.getInstance(conf, this.getClass().getSimpleName());

        //2、设置驱动类路径
        job.setJarByClass(fruitRunner.class);

        //3、设置Maper
        job.setMapperClass(fruitMapper.class);
        job.setMapOutputKeyClass(NullWritable.class);
        job.setMapOutputValueClass(Put.class);

        //4、设置reducer
        TableMapReduceUtil.initTableReducerJob("fruit2", fruitReducer.class, job);

        //5、设置输入参数
        FileInputFormat.setInputPaths(job, new Path("hdfs://hadoop100:8020/input/fruit.tsv"));

        //6、提交任务
        boolean result = job.waitForCompletion(true);

        return result ? 0 : 1;
    }

    @Override
    public void setConf(Configuration conf) { configuration = conf; }

    @Override
    public Configuration getConf() {
        return configuration;
    }

    public static void main(String[] args) {
        try {
            Configuration configuration = HBaseConfiguration.create();
            int status = ToolRunner.run(configuration, new fruitRunner(), args);
            System.exit(status);
        } catch (Exception e) {
            e.printStackTrace();
        }
    }
}

Mapper

package tyh.MR3;

import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.CellUtil;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
import org.apache.hadoop.hbase.mapreduce.TableMapper;
import org.apache.hadoop.hbase.util.Bytes;

import java.io.IOException;

public class TMapper extends TableMapper<ImmutableBytesWritable, Put> {

    @Override
    protected void map(ImmutableBytesWritable key, Result value, Context context) throws IOException, InterruptedException {
        super.map(key, value, context);
        //1.读取数据 拿到一个rowkey的数据
        Put put = new Put(key.get());

        //2.遍历column
        for(Cell cell:value.rawCells()){
            //3.加入列族数据 当前列族是info要 不是info列族的不要 是info数据才导入lovemr表中
            if("info".equals(Bytes.toString(CellUtil.cloneFamily(cell)))){
                //4.拿到指定列的数据
                if("name".equals(Bytes.toString(CellUtil.cloneQualifier(cell)))){
                    put.add(cell);
                }
            }
        }
        context.write(key,put);
    }
}

Reduce

package tyh.MR3;

import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
import org.apache.hadoop.hbase.mapreduce.TableReducer;
import org.apache.hadoop.io.NullWritable;

import java.io.IOException;

public class TReducer extends TableReducer<ImmutableBytesWritable, Put, NullWritable> {

    @Override
    protected void reduce(ImmutableBytesWritable key, Iterable<Put> values, Context context) throws IOException, InterruptedException {
        super.reduce(key, values, context);
        for(Put put : values){
            context.write(NullWritable.get(), put);
        }
    }
}

Driver

package tyh.MR3;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.client.Scan;
import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
import org.apache.hadoop.hbase.mapreduce.TableMapReduceUtil;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.util.Tool;
import org.apache.hadoop.util.ToolRunner;

public class TRunner implements Tool {

    private Configuration configuration = null;

    @Override
    public int run(String[] args) throws Exception {

        Job job = Job.getInstance(configuration);
        job.setJarByClass(TRunner.class);
        TableMapReduceUtil.initTableMapperJob("fruit",
                new Scan(),
                TMapper.class,
                ImmutableBytesWritable.class,
                Put.class,
                job
        );
        TableMapReduceUtil.initTableReducerJob("fruit2", TReducer.class, job);
        boolean result = job.waitForCompletion(true);
        return result ? 0 : 1;
    }

    @Override
    public void setConf(Configuration conf) {
        configuration = conf;
    }

    @Override
    public Configuration getConf() {
        return configuration;
    }

    public static void main(String[] args) {
        try {
            Configuration configuration = HBaseConfiguration.create();
            int run = ToolRunner.run(configuration, new TRunner(), args);
            System.exit(run);
        } catch (Exception e) {
            e.printStackTrace();
        }
    }
}

猜你喜欢

转载自blog.csdn.net/tyh1579152915/article/details/109247527