HBase的java客户端

工作中用到的HBase版本是 1.2.0-cdh5.12.0,使用的开发工具是IDEA,简单的写一个Demo。

开发环境依赖:

<dependencies>
    <!--dependency>
      <groupId>spark</groupId>
      <artifactId>[the artifact id of the block to be mounted]</artifactId>
      <version>1.0-SNAPSHOT</version>
    </dependency-->
    <!-- https://mvnrepository.com/artifact/org.apache.hbase/hbase -->

    <dependency>
      <groupId>junit</groupId>
      <artifactId>junit</artifactId>
      <version>3.8.1</version>
      <scope>test</scope>
    </dependency>
    <dependency>
      <groupId>org.junit.jupiter</groupId>
      <artifactId>junit-jupiter-api</artifactId>
      <version>RELEASE</version>
    </dependency>
    <dependency>
      <groupId>log4j</groupId>
      <artifactId>log4j</artifactId>
      <version>1.2.17</version>
    </dependency>
    <dependency>
      <groupId>org.apache.hadoop</groupId>
      <artifactId>hadoop-common</artifactId>
      <version>2.6.0-cdh5.12.0</version>
    </dependency>

    <dependency>
      <groupId>org.apache.hadoop</groupId>
      <artifactId>hadoop-hdfs</artifactId>
      <version>2.6.0-cdh5.12.0</version>
    </dependency>
<!--hbase的简单的增删改查只需要引入HBase的client即可-->
    <dependency>
      <groupId>org.apache.hbase</groupId>
      <artifactId>hbase-client</artifactId>
      <version>1.2.0-cdh5.12.0</version>
    </dependency>
<!--hbase的高级应用,比如自定义协处理器则需要引入hbase服务端-->
    <dependency>
      <groupId>org.apache.hbase</groupId>
      <artifactId>hbase-server</artifactId>
      <version>1.2.0-cdh5.12.0</version>
    </dependency>
  
  </dependencies>


<!--添加CDH版本的HBase需要添加cloudera仓库,否则无法使用-->

  <repositories>
    <repository>
      <id>cloudera</id>
      <url>https://repository.cloudera.com/artifactory/cloudera-repos/</url>
    </repository>

  </repositories>
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.CellUtil;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.*;
import org.apache.hadoop.hbase.util.Bytes;

import java.io.IOException;
/*
测试HBase的增加和查询
*/

public class HBaseTest {
    public static void main(String[] args) throws IOException {
        Configuration conf= HBaseConfiguration.create();
        conf.set("hbase.zookeeper.quorum","10.1.34.124");
        conf.set("hbase.zookeeper.quorum.clientPort","2181");
        Connection connection= ConnectionFactory.createConnection(conf);
        TableName tableName=TableName.valueOf("test2");
        Table table=connection.getTable(tableName);
       /* Put put=new Put("001".getBytes());
        put.addColumn("f".getBytes(),"name".getBytes(),"zhgangsan".getBytes());
        table.put(put);*/

       Scan scan=new Scan();
       ResultScanner scanner=table.getScanner(scan);
       for(Result result:scanner){
         /*String row= Bytes.toString(result.getRow());
           System.out.println(row);*/
//Cell是最新的API
         for(Cell cell:result.rawCells()){
             String rowKey=Bytes.toString(CellUtil.cloneRow(cell));
             String family=Bytes.toString(CellUtil.cloneFamily(cell));
             String quailify=Bytes.toString(CellUtil.cloneQualifier(cell));
             String values=Bytes.toString(CellUtil.cloneValue(cell));
             System.out.println("rowKey="+rowKey);
         }
       }
        System.out.println("success------------------------");

    }
}

猜你喜欢

转载自blog.csdn.net/chenyu_wtt/article/details/82986903