大数据集群之hbase2.1.0高可用安装配置

一、安装准备

下载地址:http://archive.apache.org/dist/hbase/

参考文档:http://hbase.apache.org/book.html

ssh免密登录:https://blog.csdn.net/qq262593421/article/details/105325593

zookeeper安装:https://blog.csdn.net/qq262593421/article/details/106955485

hadoop集群安装:https://blog.csdn.net/qq262593421/article/details/106956480

二、解压安装

1、解压文件

cd /usr/local/hadoop
tar zxpf hbase-2.1.0-bin.tar.gz

2、创建软链接

ln -s hbase-2.1.0 hbase

三、修改配置文件

regionservers

regionservers配置和hadoop的work一样,hadoop的DataNode节点是哪个regionservers就是哪几个

hadoop003
hadoop004
hadoop005

hbase-site.xml

<?xml version="1.0"?>
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
<configuration>
	<!-- 
	<property>
     <name>zookeeper.znode.parent</name>
     <value>/hbase/master</value>
 	</property>
 	-->
	<property>	
	  <name>hbase.master</name>
	  <value>60000</value>
	  <!-- Hbase HA 方式下只需配置端口 -->
    </property>
  	<property>
      <name>hbase.tmp.dir</name>
      <value>/home/data/cluster/hbase/tmp</value>
  	</property>
  	<property>
	  <name>hbase.rootdir</name>
	  <value>hdfs://ns1/hbase</value>
	</property>
	<property>
	  <name>hbase.cluster.distributed</name>
	  <value>true</value>
	</property>
	<property>
	  <name>hbase.zookeeper.property.clientPort</name>
	  <value>2181</value>
	</property>
	<property>
	  <name>hbase.zookeeper.quorum</name>
	  <value>hadoop001,hadoop002,hadoop003</value>
	  <!-- <value>hadoop001:2181,hadoop002:2181,hadoop003:2181</value> -->
	</property>
	<property>
	  <name>hbase.zookeeper.property.dataDir</name>
	  <value>/usr/local/zookeeper/data</value>
	</property>
	<property>
	  <name>dfs.datanode.max.transfer.threads</name>
	  <value>4096</value>
	</property>
	<!-- 
	<property>
	  <name>hbase.master</name>
	  <value>hadoop1</value>
	</property>
	-->
	<!-- 
	<property>
      <name>hbase.masters</name>
      <value>hadoop1,hadoop2</value>
      <description>List of master rpc end points for the hbase cluster.</description>
    </property>
    -->
    <property>
	  <name>hbase.unsafe.stream.capability.enforce</name>
	  <value>false</value>
	</property>
	<!-- 
	<property>
	  <name>hbase.lease.recovery.dfs.timeout</name>
	  <value>23000</value>
	  <description>How much time we allow elapse between calls to recover lease.
	  Should be larger than the dfs timeout.</description>
	</property>
	<property>
	  <name>dfs.client.socket-timeout</name>
	  <value>10000</value>
	  <description>Down the DFS timeout from 60 to 10 seconds.</description>
	</property>
	<property>
	  <name>dfs.client.socket-timeout</name>
	  <value>10000</value>
	  <description>Down the DFS timeout from 60 to 10 seconds.</description>
	</property>
	<property>
	  <name>dfs.datanode.socket.write.timeout</name>
	  <value>10000</value>
	  <description>Down the DFS timeout from 8 * 60 to 10 seconds.</description>
	</property>
	<property>
	  <name>ipc.client.connect.timeout</name>
	  <value>3000</value>
	  <description>Down from 60 seconds to 3.</description>
	</property>
	-->

	<!-- 
	<property>
	  <name>ipc.client.connect.max.retries.on.timeouts</name>
	  <value>2</value>
	  <description>Down from 45 seconds to 3 (2 == 3 retries).</description>
	</property>
	<property>
	  <name>dfs.namenode.avoid.read.stale.datanode</name>
	  <value>true</value>
	  <description>Enable stale state in hdfs</description>
	</property>
	<property>
	  <name>dfs.namenode.stale.datanode.interval</name>
	  <value>20000</value>
	  <description>Down from default 30 seconds</description>
	</property>
	<property>
	  <name>dfs.namenode.avoid.write.stale.datanode</name>
	  <value>true</value>
	  <description>Enable stale state in hdfs</description>
	</property>
	-->

	<!-- 
	<property>
	  <name>hbase.security.authentication</name>
	  <value>simple</value>
	</property>
	<property>
	  <name>hbase.security.authorization</name>
	  <value>true</value>
	</property>
	<property>
	  <name>hbase.coprocessor.master.classes</name>
	  <value>org.apache.hadoop.hbase.security.access.AccessController</value>
	</property>
	<property>
	  <name>hbase.coprocessor.region.classes</name>
	  <value>org.apache.hadoop.hbase.security.access.AccessController</value>
	</property>
	<property>
	  <name>hbase.coprocessor.regionserver.classes</name>
	  <value>org.apache.hadoop.hbase.security.access.AccessController</value>
	</property>
	<property>
	  <name>hbase.security.authentication</name>
	  <value>simple</value>
	</property>
	<property>
	  <name>hbase.rpc.engine</name>
	  <value>org.apache.hadoop.hbase.ipc.SecureRpcEngine</value>
	</property>
	-->

	<!-- HFile v3 Support -->
	<property>
	  <name>hfile.format.version</name>
	  <value>3</value>
	</property>
	<!-- HBase Superuser -->
	<property>
	  <name>hbase.superuser</name>
	  <value>hbase,admin,root,hdfs,zookeeper,hive,hadoop,hue,impala,spark,kylin</value>
	</property>
	
	<!-- geomesa-hbase -->
	<property>
    	<name>hbase.coprocessor.user.region.classes</name>
    	<value>org.locationtech.geomesa.hbase.coprocessor.GeoMesaCoprocessor</value>
  	</property>
	<property>  
	   <name>hbase.table.sanity.checks</name>  
	   <value>false</value>  
	</property>
	<property>  
	   <name>hbase.coprocessor.abortonerror</name>  
	   <value>false</value>  
	</property>

	<!-- adjust and optimize --> 
	<property>  
	   <name>hfile.block.cache.size</name> 
	   <value/>
	<!--    <value>0.2</value>   -->
	   <description>stofile的读缓存占用Heap的大小百分比。该值直接影响数据读的性能当然是越大越好,如果写比读少很多,开到0.4-0.5也没问题,如果读写均衡,设置为0.3左右。如果写比读多,果断使用默认就行。</description>
	</property>


</configuration>

hbase-env.sh

hbase环境变量配置

#!/usr/bin/env bash

export HBASE_OPTS="-XX:+UseConcMarkSweepGC -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -Xloggc:/usr/local/hadoop/hbase/logs/jvm-gc-hbase.log"

export JAVA_HOME=/usr/java/jdk1.8
export HBASE_HEAPSIZE=4G

export HADOOP_HOME=/usr/local/hadoop/hadoop
export HBASE_HOME=/usr/local/hadoop/hbase
export HBASE_CLASSPATH=/usr/local/hadoop/hadoop/etc/hadoop
export HBASE_MANAGES_ZK=false
export HBASE_PID_DIR=/var/hadoop/pids

backup-masters

启动hbase时会将配置的backup-masters节点作为备用HMaster

hadoop001
hadoop002

四、环境变量配置

编辑 /etc/profile 文件

vim /etc/profile

添加一下内容

export HBASE_HOME=/usr/local/hadoop/hbase
export PATH=$PATH:$HBASE_HOME

 五、启动hbase

start-hbase.sh

六、验证安装

1、执行shell命令

hbase shell
create 'tb1','cmf1','cmf2','cmf3'
list
list_namespace

 

2、访问web页面 

http://hadoop001:16010/master-status

http://hadoop002:16010/master-status

猜你喜欢

转载自blog.csdn.net/qq262593421/article/details/106961836