一、安装前提
找到官网下载 hbase 安装包 hbase-1.2.0-bin.tar.gz, 下载地址:http://mirrors.hust.edu.cn/apache/hbase/
上传安装包到指定目录,解压
tar -zxvf [文件]
二、修改配置文件
==>进入conf目录下
1.修改hbase-env.sh
export JAVA_HOME=/opt/soft/jdk180 具体的jdk安装路径
export HBASE_MANAGES_ZK=false 使用外部zookeeper
2.修改hbase-site.xml文件
<!--hbase.rootdir 将数据写入哪个目录 如果是单机版只要配置此属性就可以,value中file:/绝对路径,如果是分布式则配置与hadoop的core-site.sh服务器、端口以及zookeeper中事先创建的目录一致-->
<property>
<name>>hbase.rootdir</name>
<value>hdfs://192.168.56.137:9000/hbase</value>
</property>
<!--单机模式不需要配置,分布式配置此项为true-->
<property>
<name>hbase.cluster.distributed</name>
<value>true</value>
</property>
<!--单机模式不需要配置 分布是配置此项为zookeeper指定的物理路径名 指定 zk 的地址,多个用“,”分割-->
<property>
<name>hbase.zookeeper.property.dataDir</name>
<value>/opt/soft/zookeeper345/data</value>
</property>
3.修改regionservers
如果搭建的是完全分布式的集群,就还需要修改regionservers ,可参考如下方式,伪分布式保持原本的localhost
[hadoop@hadoop1 conf]$ vi regionservers
hadoop1
hadoop2
hadoop3
hadoop4
配置环境变量
zookeeper 和hbase的环境变量
export ZOOKEEPER_HOME=/opt/soft/zookeeper345 具体的zookeeper安装路径
export PATH=$PATH:$ZOOKEEPER_HOME/bin
export HBASE_HOME=/opt/soft/hbase120 具体的hbase安装路径
export PATH=$PATH:$HBASE_HOME/bin
三、启动Hbase
按顺序启动
1、启动zookeeper
zkServer.sh start (stop/停止)
2、启动hbase
start-hbase.sh
使用zookeeper客户端
zkCli.sh
使用hbase shell
hbase shell
四、Hbase API
1、创建Maven工程引入架包
pom.xml
<!-- https://mvnrepository.com/artifact/org.apache.hbase/hbase-client -->
<dependency>
<groupId>org.apache.hbase</groupId>
<artifactId>hbase-client</artifactId>
<version>1.2.0</version>
</dependency>
<!-- https://mvnrepository.com/artifact/org.apache.hbase/hbase-common -->
<dependency>
<groupId>org.apache.hbase</groupId>
<artifactId>hbase-common</artifactId>
<version>1.2.0</version>
</dependency>
<!-- https://mvnrepository.com/artifact/org.apache.hbase/hbase-server -->
<dependency>
<groupId>org.apache.hbase</groupId>
<artifactId>hbase-server</artifactId>
<version>1.2.0</version>
</dependency>
编写Java类
package com.services;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.*;
import org.apache.hadoop.hbase.util.Bytes;
/**
* @Date: 2019/12/23
* @Description:
*/
public class MyDemo {
public static void main(String[] args) throws Exception {
//配置文件
Configuration conf = HBaseConfiguration.create();
conf.set("hbase.zookeeper.quorum", "192.168.56.171:2181");
//创建连接
Connection connection = ConnectionFactory.createConnection(conf);
Table customer = connection.getTable(TableName.valueOf("mydemo:users"));
Scan scan = new Scan();
//结果集
ResultScanner res = customer.getScanner(scan);
for (Result rs : res) {
System.out.println(Bytes.toString(rs.getValue("base".getBytes(),"username".getBytes())));
}
//关闭
res.close();
connection.close();
}
}
模糊查询
package com.services;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.*;
import org.apache.hadoop.hbase.filter.CompareFilter;
import org.apache.hadoop.hbase.filter.RegexStringComparator;
import org.apache.hadoop.hbase.filter.SingleColumnValueFilter;
import org.apache.hadoop.hbase.util.Bytes;
/**
* @Date: 2019/12/23
* @Description:
*/
public class MyScan {
public static void main(String[] args) throws Exception {
Configuration conf = HBaseConfiguration.create();
conf.set("hbase.zookeeper.quorum", "192.168.56.171:2181");//连接zookeeper配置端口
//创建连接
Connection connection = ConnectionFactory.createConnection(conf);
//
Table customer = connection.getTable(TableName.valueOf("customer"));
//
Scan scan = new Scan();
//模糊查询
SingleColumnValueFilter filter = new SingleColumnValueFilter("cf".getBytes(),
"fname".getBytes(),
CompareFilter.CompareOp.EQUAL,
new RegexStringComparator("J.*"));
scan.setFilter(filter);
//结果集
ResultScanner res = customer.getScanner(scan);
for (Result rs : res) {
System.out.println(Bytes.toString(rs.getValue("cf".getBytes(),"fname".getBytes()))
+":"+Bytes.toString(rs.getValue("cf".getBytes(),"lname".getBytes())));
}
//关闭
res.close();
connection.close();
}
}
插入操作
package com.njbdqn.services;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Connection;
import org.apache.hadoop.hbase.client.ConnectionFactory;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.client.Table;
/**
* @Date: 2019/12/23
* @Description:
*/
public class MyPut {
public static void main(String[] args) throws Exception {
Configuration conf = HBaseConfiguration.create();
conf.set("hbase.zookeeper.quorum", "192.168.56.171:2181");//连接zookeeper配置端口
//创建连接
Connection connection = ConnectionFactory.createConnection(conf);
//表名
Table users = connection.getTable(TableName.valueOf("mydemo:users"));
//插入数据 指定行键
Put pt = new Put("5".getBytes());
pt.addColumn("base".getBytes(), "username".getBytes(), "sq".getBytes());
pt.addColumn("base".getBytes(), "age".getBytes(), "20".getBytes());
pt.addColumn("details".getBytes(), "like".getBytes(), "sleep,eat".getBytes());
users.put(pt);
}
}