HBase学习之八:自定义hbase协处理器endpoint和遇到的问题

hbase rpc采用protobuf作为数据交换格式,自定义协处理器需要先创建一个protobuf作为rpc的client端和server端的数据请求和响应载体,在windows环境下需下载protobuf工具,如:
protoc-2.5.0-win32.zip地址:http://download.csdn.net/detail/javajxz008/9616971
解压至文件夹protoc-2.5.0-win32,在其中可以看到protoc.exe编译工具,在同级目录下定义自己的protobuf格式:
如pageresult.proto:
option java_package = "com.huateng.ivr.page";#包名
option java_outer_classname = "SplitPage";#类名
option java_generic_services = true;#生成服务
option java_generate_equals_and_hash = true;
option optimize_for = SPEED;
message SplitPageRequest {
    required string rowkey = 1;#请求参数,required表示必须,message相当java中的类
}


message SplitPageResponse {  #响应参数,可包含message
   message result{
     required string rowkey = 1;
     required string cf = 2;
     required string col1 = 3;
     required string col2 = 4;
    required string col3 = 5;
   }
   repeated result rs = 6;#repeated表示重复,相当于java中的list
}



service SplitPageService { #服务方法
  rpc getSplitPageResult(SplitPageRequest)
    returns (SplitPageResponse);(
}
进入刚刚解压的目录下,执行protoc --java_out=. ./pageresult.proto,在当前目录下会产生一个包含SplitPage.java类的文件夹,把该类拷贝到同包名的java工程下,紧接着开始
编写协处理器代码,我的hbase版本0.98,服务器端endpoint需实现Coprocessor, CoprocessorService类继承SplitPageService类(在产生的SplitPage类中),完整代码如下:
package com.huateng.ivr.page;

import java.io.IOException;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;

import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.Coprocessor;
import org.apache.hadoop.hbase.CoprocessorEnvironment;
import org.apache.hadoop.hbase.client.Scan;
import org.apache.hadoop.hbase.coprocessor.CoprocessorException;
import org.apache.hadoop.hbase.coprocessor.CoprocessorService;
import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
import org.apache.hadoop.hbase.filter.BinaryComparator;
import org.apache.hadoop.hbase.filter.CompareFilter;
import org.apache.hadoop.hbase.filter.Filter;
import org.apache.hadoop.hbase.filter.FilterList;
import org.apache.hadoop.hbase.filter.PageFilter;
import org.apache.hadoop.hbase.filter.RowFilter;
import org.apache.hadoop.hbase.filter.SubstringComparator;
import org.apache.hadoop.hbase.protobuf.ResponseConverter;
import org.apache.hadoop.hbase.regionserver.InternalScanner;
import org.apache.hadoop.hbase.util.Bytes;

import com.google.protobuf.RpcCallback;
import com.google.protobuf.RpcController;
import com.google.protobuf.Service;
import com.huateng.ivr.page.SplitPage.SplitPageRequest;
import com.huateng.ivr.page.SplitPage.SplitPageResponse;
import com.huateng.ivr.page.SplitPage.SplitPageResponse.Builder;
import com.huateng.ivr.page.SplitPage.SplitPageResponse.result;
import com.huateng.ivr.page.SplitPage.SplitPageService;

public class PageServerCoprocessor extends SplitPageService implements
  Coprocessor, CoprocessorService {

 public static final String ROWKEY_FIRST = "00";
 public static final int PAGE_SIZE = 10000;
 public static final String DATA_DATE = "20160820";

 private RegionCoprocessorEnvironment env;

 @Override
 public Service getService() {
  // TODO Auto-generated method stub
  return this;
 }

 @Override
 public void start(CoprocessorEnvironment env) throws IOException {
  // TODO Auto-generated method stub
  if (env instanceof RegionCoprocessorEnvironment) {
   this.env = (RegionCoprocessorEnvironment) env;
  } else {
   throw new CoprocessorException("Must be loaded on a table region!");
  }

 }

 @Override
 public void stop(CoprocessorEnvironment env) throws IOException {
  // TODO Auto-generated method stub

 }

 @Override
 public void getSplitPageResult(RpcController controller,
   SplitPageRequest request, RpcCallback<SplitPageResponse> done) {
  // TODO Auto-generated method stub
  Scan scan = new Scan();
  FilterList filterList = new FilterList(
    FilterList.Operator.MUST_PASS_ALL);
  filterList.addFilter(new PageFilter(PAGE_SIZE));
  Filter rowFilter1 = new RowFilter(CompareFilter.CompareOp.EQUAL,
    new SubstringComparator(DATA_DATE));
  filterList.addFilter(rowFilter1);
  String lastrowKey = request.getRowkey();
  if (!ROWKEY_FIRST.equals(lastrowKey)) {
   Filter rowFilter2 = new RowFilter(CompareFilter.CompareOp.GREATER,
     new BinaryComparator(Bytes.toBytes(lastrowKey)));
   filterList.addFilter(rowFilter2);
  }
  scan.setFilter(filterList);
  InternalScanner scanner = null;
  SplitPageResponse response = null;
  Builder builder = SplitPageResponse.newBuilder();
  try {
   scanner = env.getRegion().getScanner(scan);
   List<Cell> results = new ArrayList<Cell>();
   boolean hasMore = false;
   do {
    hasMore = scanner.next(results);
    Map<String, String> map = getRowByCellList(results);
    String rk = map.get("rk");
    String cf = map.get("cf");
    String val1 = map.get("col1");
    String val2 = map.get("col2");
    String val3 = map.get("col3");
    results.clear();
    SplitPageResponse.result rs = result.newBuilder().setRowkey(rk).setCf(cf).setCol1(val1).setCol2(val2).setCol3(val3).build();
    builder.addRs(rs);
   } while (hasMore);
   response = builder.build();
  } catch (IOException e) {
   // TODO Auto-generated catch block
   ResponseConverter.setControllerException(controller, e);
  }finally {
            if (scanner != null) {
                try {
                    scanner.close();
                } catch (IOException ignored) {}
            }
        }
  done.run(response);
 }

 private Map<String, String> getRowByCellList(List<Cell> results) {
  if (results == null) {
   return null;
  }
  Map<String, String> cellMap = new HashMap<String, String>();
  for (Cell cell : results) {
   String rowkey = Bytes.toString(cell.getRowArray(),
     cell.getRowOffset(), cell.getRowLength());
   String cf = Bytes.toString(cell.getFamilyArray(),
     cell.getFamilyOffset(), cell.getFamilyLength());
   String qf = Bytes.toString(cell.getQualifierArray(),
     cell.getQualifierOffset(), cell.getQualifierLength());
   String value = Bytes.toString(cell.getValueArray(),
     cell.getValueOffset(), cell.getValueLength());
   cellMap.put("rk", rowkey);
   cellMap.put("cf", cf);
   cellMap.put(qf, value);
  }
  return cellMap;
 }
}
将PageServerCoprocessor类和SplitPage类打成jar包上传到hdfs路径下,并将协处理器加到表上,代码如下:
private static void addPageCoprocessor() throws MasterNotRunningException, ZooKeeperConnectionException, IOException{
      HBaseAdmin admin = new HBaseAdmin(conf); 
        admin.disableTable(tableName);
        HTableDescriptor htd = admin.getTableDescriptor(Bytes.toBytes(tableName));
        HColumnDescriptor columnFamily1 = new HColumnDescriptor("info");
        columnFamily1.setMaxVersions(3);
        columnFamily1.setMinVersions(1);
        htd.addFamily(columnFamily1);
        htd.addCoprocessor(PageServerCoprocessor.class.getCanonicalName(), new Path("hdfs://172.30.115.58:8020/apps/hive/warehouse/coprocessor/pagecoprocessor.jar"),
          Coprocessor.PRIORITY_USER, null);
        admin.modifyTable(tableName, htd); 
        admin.enableTable(tableName); 
        admin.close(); 
}
可以在hbase shell下describe表看是否成功,这里有一个问题,如果协处理器加不成功会造成regionserver挂掉,从而影响hbase的使用,所以要确保加载成功,如果想协处理器加载失败不影响
hbase的正常使用,则在hbase-site.xml中可加入参数hbase.coprocessor.abortonerror=false。接下来编写客户端代码:
核心代码如下:
public static String getPageByConditions(String tableName,String rowkey) throws Exception{
  HConnection conn = HConnectionManager.createConnection(conf);
  HTable hTable = (HTable) conn.getTable(Bytes.toBytes(tableName));
  final SplitPageRequest request = SplitPageRequest.newBuilder().setRowkey(rowkey).build();
  try {
    Map<byte[], List<result>> res = hTable.coprocessorService(SplitPageService.class, null, null, new Batch.Call<SplitPageService, List<SplitPageResponse.result>>() {

    @Override
    public List<result> call(SplitPageService service)
      throws IOException {
     // TODO Auto-generated method stub
     BlockingRpcCallback rpcCallback = new BlockingRpcCallback();
     service.getSplitPageResult(null, request, rpcCallback);
     SplitPageResponse reponse = (SplitPageResponse) rpcCallback.get();
     return reponse.getRsList();
    }
   });
    Set<Entry<byte[], List<result>>> set = res.entrySet();
    Iterator<Entry<byte[], List<result>>> it = set.iterator();
    Map<String,String> map = new HashMap<String, String>();
    while(it.hasNext()){
     Entry<byte[], List<result>> entry = it.next();
     List<result> list = entry.getValue();
     for(result r:list){
     System.out.println("rowkey:"+r.getRowkey()+",cf:"+r.getCf()+",col1:"+r.getCol1()+",col2:"+r.getCol2()+",col3:"+r.getCol3());
     }
    }
    return map.get("rk");
  } catch (Throwable e) {
   // TODO Auto-generated catch block
   e.printStackTrace();
  }
  return "";
 }
如果遇到诸如no registered service on table(记不清了)...,其实是在前一步加载协处理器没有成功,仔细检查重新load,hbase官网关于协处理器给出了很详细的阐述和例子:
http://hbase.apache.org/book.html#cp_loading,侧边栏目录:Apache HBase Coprocessors
hbase rpc采用protobuf作为数据交换格式,自定义协处理器需要先创建一个protobuf作为rpc的client端和server端的数据请求和响应载体,在windows环境下需下载protobuf工具,如:
protoc-2.5.0-win32.zip地址:http://download.csdn.net/detail/javajxz008/9616971
解压至文件夹protoc-2.5.0-win32,在其中可以看到protoc.exe编译工具,在同级目录下定义自己的protobuf格式:
如pageresult.proto:
option java_package = "com.huateng.ivr.page";#包名
option java_outer_classname = "SplitPage";#类名
option java_generic_services = true;#生成服务
option java_generate_equals_and_hash = true;
option optimize_for = SPEED;
message SplitPageRequest {
    required string rowkey = 1;#请求参数,required表示必须,message相当java中的类
}


message SplitPageResponse {  #响应参数,可包含message
   message result{
     required string rowkey = 1;
     required string cf = 2;
     required string col1 = 3;
     required string col2 = 4;
    required string col3 = 5;
   }
   repeated result rs = 6;#repeated表示重复,相当于java中的list
}



service SplitPageService { #服务方法
  rpc getSplitPageResult(SplitPageRequest)
    returns (SplitPageResponse);(
}
进入刚刚解压的目录下,执行protoc --java_out=. ./pageresult.proto,在当前目录下会产生一个包含SplitPage.java类的文件夹,把该类拷贝到同包名的java工程下,紧接着开始
编写协处理器代码,我的hbase版本0.98,服务器端endpoint需实现Coprocessor, CoprocessorService类继承SplitPageService类(在产生的SplitPage类中),完整代码如下:
package com.huateng.ivr.page;

import java.io.IOException;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;

import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.Coprocessor;
import org.apache.hadoop.hbase.CoprocessorEnvironment;
import org.apache.hadoop.hbase.client.Scan;
import org.apache.hadoop.hbase.coprocessor.CoprocessorException;
import org.apache.hadoop.hbase.coprocessor.CoprocessorService;
import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
import org.apache.hadoop.hbase.filter.BinaryComparator;
import org.apache.hadoop.hbase.filter.CompareFilter;
import org.apache.hadoop.hbase.filter.Filter;
import org.apache.hadoop.hbase.filter.FilterList;
import org.apache.hadoop.hbase.filter.PageFilter;
import org.apache.hadoop.hbase.filter.RowFilter;
import org.apache.hadoop.hbase.filter.SubstringComparator;
import org.apache.hadoop.hbase.protobuf.ResponseConverter;
import org.apache.hadoop.hbase.regionserver.InternalScanner;
import org.apache.hadoop.hbase.util.Bytes;

import com.google.protobuf.RpcCallback;
import com.google.protobuf.RpcController;
import com.google.protobuf.Service;
import com.huateng.ivr.page.SplitPage.SplitPageRequest;
import com.huateng.ivr.page.SplitPage.SplitPageResponse;
import com.huateng.ivr.page.SplitPage.SplitPageResponse.Builder;
import com.huateng.ivr.page.SplitPage.SplitPageResponse.result;
import com.huateng.ivr.page.SplitPage.SplitPageService;

public class PageServerCoprocessor extends SplitPageService implements
  Coprocessor, CoprocessorService {

 public static final String ROWKEY_FIRST = "00";
 public static final int PAGE_SIZE = 10000;
 public static final String DATA_DATE = "20160820";

 private RegionCoprocessorEnvironment env;

 @Override
 public Service getService() {
  // TODO Auto-generated method stub
  return this;
 }

 @Override
 public void start(CoprocessorEnvironment env) throws IOException {
  // TODO Auto-generated method stub
  if (env instanceof RegionCoprocessorEnvironment) {
   this.env = (RegionCoprocessorEnvironment) env;
  } else {
   throw new CoprocessorException("Must be loaded on a table region!");
  }

 }

 @Override
 public void stop(CoprocessorEnvironment env) throws IOException {
  // TODO Auto-generated method stub

 }

 @Override
 public void getSplitPageResult(RpcController controller,
   SplitPageRequest request, RpcCallback<SplitPageResponse> done) {
  // TODO Auto-generated method stub
  Scan scan = new Scan();
  FilterList filterList = new FilterList(
    FilterList.Operator.MUST_PASS_ALL);
  filterList.addFilter(new PageFilter(PAGE_SIZE));
  Filter rowFilter1 = new RowFilter(CompareFilter.CompareOp.EQUAL,
    new SubstringComparator(DATA_DATE));
  filterList.addFilter(rowFilter1);
  String lastrowKey = request.getRowkey();
  if (!ROWKEY_FIRST.equals(lastrowKey)) {
   Filter rowFilter2 = new RowFilter(CompareFilter.CompareOp.GREATER,
     new BinaryComparator(Bytes.toBytes(lastrowKey)));
   filterList.addFilter(rowFilter2);
  }
  scan.setFilter(filterList);
  InternalScanner scanner = null;
  SplitPageResponse response = null;
  Builder builder = SplitPageResponse.newBuilder();
  try {
   scanner = env.getRegion().getScanner(scan);
   List<Cell> results = new ArrayList<Cell>();
   boolean hasMore = false;
   do {
    hasMore = scanner.next(results);
    Map<String, String> map = getRowByCellList(results);
    String rk = map.get("rk");
    String cf = map.get("cf");
    String val1 = map.get("col1");
    String val2 = map.get("col2");
    String val3 = map.get("col3");
    results.clear();
    SplitPageResponse.result rs = result.newBuilder().setRowkey(rk).setCf(cf).setCol1(val1).setCol2(val2).setCol3(val3).build();
    builder.addRs(rs);
   } while (hasMore);
   response = builder.build();
  } catch (IOException e) {
   // TODO Auto-generated catch block
   ResponseConverter.setControllerException(controller, e);
  }finally {
            if (scanner != null) {
                try {
                    scanner.close();
                } catch (IOException ignored) {}
            }
        }
  done.run(response);
 }

 private Map<String, String> getRowByCellList(List<Cell> results) {
  if (results == null) {
   return null;
  }
  Map<String, String> cellMap = new HashMap<String, String>();
  for (Cell cell : results) {
   String rowkey = Bytes.toString(cell.getRowArray(),
     cell.getRowOffset(), cell.getRowLength());
   String cf = Bytes.toString(cell.getFamilyArray(),
     cell.getFamilyOffset(), cell.getFamilyLength());
   String qf = Bytes.toString(cell.getQualifierArray(),
     cell.getQualifierOffset(), cell.getQualifierLength());
   String value = Bytes.toString(cell.getValueArray(),
     cell.getValueOffset(), cell.getValueLength());
   cellMap.put("rk", rowkey);
   cellMap.put("cf", cf);
   cellMap.put(qf, value);
  }
  return cellMap;
 }
}
将PageServerCoprocessor类和SplitPage类打成jar包上传到hdfs路径下,并将协处理器加到表上,代码如下:
private static void addPageCoprocessor() throws MasterNotRunningException, ZooKeeperConnectionException, IOException{
      HBaseAdmin admin = new HBaseAdmin(conf); 
        admin.disableTable(tableName);
        HTableDescriptor htd = admin.getTableDescriptor(Bytes.toBytes(tableName));
        HColumnDescriptor columnFamily1 = new HColumnDescriptor("info");
        columnFamily1.setMaxVersions(3);
        columnFamily1.setMinVersions(1);
        htd.addFamily(columnFamily1);
        htd.addCoprocessor(PageServerCoprocessor.class.getCanonicalName(), new Path("hdfs://172.30.115.58:8020/apps/hive/warehouse/coprocessor/pagecoprocessor.jar"),
          Coprocessor.PRIORITY_USER, null);
        admin.modifyTable(tableName, htd); 
        admin.enableTable(tableName); 
        admin.close(); 
}
可以在hbase shell下describe表看是否成功,这里有一个问题,如果协处理器加不成功会造成regionserver挂掉,从而影响hbase的使用,所以要确保加载成功,如果想协处理器加载失败不影响
hbase的正常使用,则在hbase-site.xml中可加入参数hbase.coprocessor.abortonerror=false。接下来编写客户端代码:
核心代码如下:
public static String getPageByConditions(String tableName,String rowkey) throws Exception{
  HConnection conn = HConnectionManager.createConnection(conf);
  HTable hTable = (HTable) conn.getTable(Bytes.toBytes(tableName));
  final SplitPageRequest request = SplitPageRequest.newBuilder().setRowkey(rowkey).build();
  try {
    Map<byte[], List<result>> res = hTable.coprocessorService(SplitPageService.class, null, null, new Batch.Call<SplitPageService, List<SplitPageResponse.result>>() {

    @Override
    public List<result> call(SplitPageService service)
      throws IOException {
     // TODO Auto-generated method stub
     BlockingRpcCallback rpcCallback = new BlockingRpcCallback();
     service.getSplitPageResult(null, request, rpcCallback);
     SplitPageResponse reponse = (SplitPageResponse) rpcCallback.get();
     return reponse.getRsList();
    }
   });
    Set<Entry<byte[], List<result>>> set = res.entrySet();
    Iterator<Entry<byte[], List<result>>> it = set.iterator();
    Map<String,String> map = new HashMap<String, String>();
    while(it.hasNext()){
     Entry<byte[], List<result>> entry = it.next();
     List<result> list = entry.getValue();
     for(result r:list){
     System.out.println("rowkey:"+r.getRowkey()+",cf:"+r.getCf()+",col1:"+r.getCol1()+",col2:"+r.getCol2()+",col3:"+r.getCol3());
     }
    }
    return map.get("rk");
  } catch (Throwable e) {
   // TODO Auto-generated catch block
   e.printStackTrace();
  }
  return "";
 }
如果遇到诸如no registered service on table(记不清了)...,其实是在前一步加载协处理器没有成功,仔细检查重新load,hbase官网关于协处理器给出了很详细的阐述和例子:
http://hbase.apache.org/book.html#cp_loading,侧边栏目录:Apache HBase Coprocessors

猜你喜欢

转载自blog.csdn.net/javajxz008/article/details/52372999