大数据集群部署

1、zookeeper部署

wget http://apache.fayea.com/zookeeper/zookeeper-3.4.10/zookeeper-3.4.10.tar.gz

tar xf zookeeper-3.4.10.tar.gz 

cd zookeeper-3.4.10

cp -a zoo_sample.cfg zoo.cfg

[root@node1 opt]# cat /opt/zookeeper-3.4.10/conf/zoo.cfg
# The number of milliseconds of each tick
tickTime=2000
# The number of ticks that the initial
# synchronization phase can take
initLimit=10
# The number of ticks that can pass between
# sending a request and getting an acknowledgement
syncLimit=5
# the directory where the snapshot is stored.
# do not use /tmp for storage, /tmp here is just
# example sakes.
dataDir=/opt/zookeeper-3.4.10/data
dataLogDir=/opt/zookeeper-3.4.10/logs
# the port at which the clients will connect
clientPort=2181
# the maximum number of client connections.
# increase this if you need to handle more clients
#maxClientCnxns=60
#
# Be sure to read the maintenance section of the
# administrator guide before turning on autopurge.
#
# http://zookeeper.apache.org/doc/current/zookeeperAdmin.html#sc_maintenance
#
# The number of snapshots to retain in dataDir
#autopurge.snapRetainCount=3
# Purge task interval in hours
# Set to "0" to disable auto purge feature
#autopurge.purgeInterval=1
server.1=10.61.98.71:2888:3888
server.2=10.61.98.72:2888:3888
server.3=10.61.98.73:2888:3888

scp -rp zookeeper-3.4.10 node2:/opt/

scp -rp zookeeper-3.4.10 node3:/opt/

2、hadoop部署

wget http://apache.01link.hk/hadoop/common/hadoop-2.6.5/hadoop-2.6.5.tar.gz

groupadd hadoop

useradd -m -g hadoop hadoop

tar xf hadoop-2.6.5.tar.gz 

cd hadoop-2.6.5

cd /opt/hadoop-2.6.5/etc/hadoop

[root@node1 hadoop]# egrep -v "^#|^$" core-site.xml
<?xml version="1.0" encoding="UTF-8"?>
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
<!--
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. See accompanying LICENSE file.
-->
<!-- Put site-specific property overrides in this file. -->
<configuration>
<!-- hdfs地址,ha模式中是连接到nameservice -->
<property>
<name>fs.defaultFS</name>
<value>hdfs://ns1</value>
</property>
<!-- 这里的路径默认是NameNode、DataNode、JournalNode等存放数据的公共目录,也可以单独指定 -->
<property>
<name>hadoop.tmp.dir</name>
<value>/opt/hadoop-2.6.5/tmp</value>
</property>
<!-- 指定ZooKeeper集群的地址和端口。注意,数量一定是奇数,且不少于三个节点-->
<property>
<name>ha.zookeeper.quorum</name>
<value>node1:2181,node2:2181,node3:2181</value>
</property>
</configuration>

[root@node1 hadoop]# egrep -v "^#|^$" hdfs-site.xml
<?xml version="1.0" encoding="UTF-8"?>
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
<!--
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. See accompanying LICENSE file.
-->
<!-- Put site-specific property overrides in this file. -->
<configuration>
<!-- 指定副本数,不能超过机器节点数 -->
<property>
<name>dfs.replication</name>
<value>3</value>
</property>
<!-- 为namenode集群定义一个services name -->
<property>
<name>dfs.nameservices</name>
<value>ns1</value>
</property>
<!-- nameservice 包含哪些namenode,为各个namenode起名 -->
<property>
<name>dfs.ha.namenodes.ns1</name>
<value>node1,node2</value>
</property>
<!-- 名为master188的namenode的rpc地址和端口号,rpc用来和datanode通讯 -->
<property>
<name>dfs.namenode.rpc-address.ns1.node1</name>
<value>node1:9000</value>
</property>
<!-- 名为master189的namenode的rpc地址和端口号,rpc用来和datanode通讯 -->
<property>
<name>dfs.namenode.rpc-address.ns1.node2</name>
<value>node2:9000</value>
</property>
<!--名为master188的namenode的http地址和端口号,用来和web客户端通讯 -->
<property>
<name>dfs.namenode.http-address.ns1.node1</name>
<value>node1:50070</value>
</property>
<!-- 名为master189的namenode的http地址和端口号,用来和web客户端通讯 -->
<property>
<name>dfs.namenode.http-address.ns1.node2</name>
<value>node2:50070</value>
</property>

<!-- namenode间用于共享编辑日志的journal节点列表 -->
<property>
<name>dfs.namenode.shared.edits.dir</name>
<value>qjournal://node1:8485;node2:8485;node3:8485/ns1</value>
</property>
<!-- 指定该集群出现故障时,是否自动切换到另一台namenode -->
<property>
<name>dfs.ha.automatic-failover.enabled.ns1</name>
<value>true</value>
</property>
<!-- journalnode 上用于存放edits日志的目录 -->
<property>
<name>dfs.journalnode.edits.dir</name>
<value>/opt/hadoop-2.6.5/tmp/data/dfs/journalnode</value>
</property>
<!-- 客户端连接可用状态的NameNode所用的代理类 -->
<property>
<name>dfs.client.failover.proxy.provider.ns1</name>
<value>org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider</value>
</property>
<!-- 一旦需要NameNode切换,使用ssh方式进行操作 -->
<property>
<name>dfs.ha.fencing.methods</name>
<value>sshfence</value>
</property>
<!-- 如果使用ssh进行故障切换,使用ssh通信时用的密钥存储的位置 -->
<property>
<name>dfs.ha.fencing.ssh.private-key-files</name>
<value>/home/hadoop/.ssh/id_rsa</value>
</property>
<!-- connect-timeout超时时间 -->
<property>
<name>dfs.ha.fencing.ssh.connect-timeout</name>
<value>30000</value>
</property>
</configuration>

[root@node1 hadoop]# egrep -v "^#|^$" mapred-site.xml
<?xml version="1.0"?>
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
<!--
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. See accompanying LICENSE file.
-->
<!-- Put site-specific property overrides in this file. -->
<!-- 采用yarn作为mapreduce的资源调度框架 -->
<configuration>
<property>
<name>mapreduce.framework.name</name>
<value>yarn</value>
</property>
</configuration>

[root@node1 hadoop]# egrep -v "^#|^$" yarn-site.xml
<?xml version="1.0"?>
<!--
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. See accompanying LICENSE file.
-->
<configuration>
<!-- 启用HA高可用性 -->
<property>
<name>yarn.resourcemanager.ha.enabled</name>
<value>true</value>
</property>
<!-- 指定resourcemanager的名字 -->
<property>
<name>yarn.resourcemanager.cluster-id</name>
<value>yrc</value>
</property>
<!-- 使用了2个resourcemanager,分别指定Resourcemanager的地址 -->
<property>
<name>yarn.resourcemanager.ha.rm-ids</name>
<value>rm1,rm2</value>
</property>

<!-- 指定rm1的地址 -->
<property>
<name>yarn.resourcemanager.hostname.rm1</name>
<value>node1</value>
</property>

<!-- 指定rm2的地址 -->
<property>
<name>yarn.resourcemanager.hostname.rm2</name>
<value>node2</value>
</property>

<!-- 指定当前机器master188作为rm1 -->
<property>
<name>yarn.resourcemanager.ha.id</name>
<value>rm1</value>
</property>

<!-- 指定zookeeper集群机器 -->
<property>
<name>yarn.resourcemanager.zk-address</name>
<value>node1:2181,node2:2181,node3:2181</value>
</property>

<!-- NodeManager上运行的附属服务,默认是mapreduce_shuffle -->
<property>
<name>yarn.nodemanager.aux-services</name>
<value>mapreduce_shuffle</value>
</property>
</configuration>

[root@node1 hadoop]# egrep -v "^#|^$" slaves
node1
node2
node3

scp -r hadoop-2.6.5 hadoop02:/opt/
scp -r hadoop-2.6.5 hadoop03:/opt/

修改yarn-site.xml

在node2机器,即ResourceManager备用主节点上修改如下属性,表示当前机器作为rm2::

 
  <property>
    <name>yarn.resourcemanager.ha.id</name> <value>rm2</value> </property>

启动Hadoop

cd hadoop-2.6.5/sbin/

./hadoop-daemons.sh start journalnode
启动前需要先初始话JournalNode.
2.0 登陆journalnode_1,journalnode_2,journalnode_3机器, 执行 sbin/hadoop-daemon.sh start journalnode
2.1 格式化nn1, 执行 bin/hadoop namenode -format
./hdfs zkfc -formatZK

2.2 在nn1上,执行 bin/hdfs namenode -initializeSharedEdits
2.3 启动nn1, 执行 sbin/hadoop-daemon.sh start namenode
2.4 登录nn2,拉取nn1 的元数据(注意,nn2无需进行格式化), bin/hdfs namenode -bootstrapStandby
2.5 启动nn2, sbin/hadoop-daemon.sh start namenode

5)启动HDFS、YARN、ZookeeperFailoverController
./start-dfs.sh

./start-yarn.sh

./hadoop-daemon.sh start zkfc

 在node2机器上,启动ResourceManager,备用主节点的ResourceManager需要手动启动:

yarn-daemon.sh start resourcemanager

6)查看Namenode、ResourceManager状态

hdfs haadmin -getServiceState master188
yarn rmadmin -getServiceState rm1

hdfs haadmin -getServiceState master189
yarn rmadmin -getServiceState rm2

也可以通过Web界面来查看,浏览器中输入 ip:50070 查看HDFS,输入 ip:8088/cluster/cluster 查看YARN。

7)测试高可用
a.主节点--->备用主节点

kill掉主节点的namenode,查看备用主节点的namenode状态是否切换为active;

kill掉主节点的ResourceManager,查看备用主节点的ResourceManager是否切换为active;

b.备用主节点--->主节点

若上述操作执行成功,那么再测试反向故障自动转移

先启动被杀死的原主节点的namenode和ResourceManager

hadoop-daemon.sh start namenode 

yarn-daemon.sh start resourcemanager

再kill备用主节点的namenode和ResourceManager,查看主节点的状态,若能切换为active,那么Hadoop HA高可用集群搭建完成。

五、安装配置HBase

1、下载及安装

下载地址:http://mirrors.hust.edu.cn/ap...

在master188机器上,解压到/home/hadoop/目录下:

tar -zxvf hbase-1.3.1-bin.tar.gz

2、配置

进入hbase-1.3.1/conf/目录,修改配置文件:

cd hbase-1.3.1/conf/
1)vi hbase-env.sh
//配置JDK
export JAVA_HOME=/opt/jdk

//保存pid文件
export HBASE_PID_DIR=/home/hadoop/data/hbase/pids //修改HBASE_MANAGES_ZK,禁用HBase自带的Zookeeper,因为我们是使用独立的Zookeeper export HBASE_MANAGES_ZK=false
2)vi hbase-site.xml
<configuration>
  <!-- 设置HRegionServers共享目录,请加上端口号 -->
  <property> <name>hbase.rootdir</name> <value>hdfs://master188:9000/hbase</value> </property> <!-- 指定HMaster主机 --> <property> <name>hbase.master</name> <value>hdfs://master188:60000</value> </property> <!-- 启用分布式模式 --> <property> <name>hbase.cluster.distributed</name> <value>true</value> </property> <!-- 指定Zookeeper集群位置 --> <property> <name>hbase.zookeeper.quorum</name> <value>master188:2181,master189:2181,slave190:2181</value> </property> <!-- 指定独立Zookeeper安装路径 --> <property> <name>hbase.zookeeper.property.dataDir</name> <value>/home/hadoop/zookeeper-3.4.11</value> </property> <!-- 指定ZooKeeper集群端口 --> <property> <name>hbase.zookeeper.property.clientPort</name> <value>2181</value> </property> </configuration> 
3)vi regionservers

修改regionservers文件,因为当前是使用独立的Zookeeper集群,所以要指定RegionServers所在机器:


master188
master189
slave190
4)创建pid文件保存目录

在/home/hadoop/目录下:

mkdir data/hbase/pids -p

3、拷贝HBase到其他机器

cd /home/hadoop/

scp -r hbase-1.3.1 hadoop@master189:/home/hadoop/

scp -r hbase-1.3.1 hadoop@slave190:/home/hadoop/ 

4、启动HBase

在主节点上启动HBase(这里的主节点是指NameNode状态为active的节点,而非指文中对本实验的机器声明):

cd hbase-1.3.1/bin

./start-hbase.sh

//查看HMaster、Regionserver进程是否启动
jps
注意:此时Hadoop集群应处于启动状态,并且是在主节点执行start-hbase.sh启动HBase集群,否则HMaster进程将在启动几秒后消失,而备用的HMaster进程需要在备用主节点单独启动,命令是: ./hbase-daemon.sh start master

在备用主节点启动HMaster进程,作为备用HMaster:

cd hbase-1.3.1/bin

./hbase-daemon.sh start master

5、HA高可用测试

在浏览器中输入 ip:16010 ,查看主节点和备用主节点上的HMaster的状态,在备用主节点的web界面中,可以看到“Current Active Master: master188”,表示当前HBase主节点是master188机器;

主节点--->备用主节点
这里的主节点指使用start-hbase.sh命令启动HBase集群的机器

kill掉主节点的HMaster进程,在浏览器中查看备用主节点的HBase是否切换为active;

若上述操作成功,则在主节点启动被杀死的HMaster进程:

cd hbase-1.3.1/bin/

./hbase-daemon.sh start master

然后,kill掉备用主节点的HMaster进程,在浏览器中查看主节点的HBase是否切换为active,若操作成功,则HBase高可用集群搭建完成;

6、HBase基本操作

//启动HBase
[root@vnet ~] start-hbase.sh

//进入HBase Shell
[root@vnet ~] hbase shell

//查看当前HBase有哪些表
hbase(main):> list

//创建表t_user,cf1和cf2是列族,列族一般不超过3个
hbase(main):> create 't_user','cf1','cf2' //获得表t_user的描述信息 hbase(main):> describe 't_user' //禁用表 hbase(main):> disable 't_user' //删除表,删除表之前要先把表禁用掉 hbase(main):> drop 't_user' //查询表是否存在 hbase(main):> exists 't_user' //查看全表数据 hbase(main):> scan 't_user' //插入数据,分别是表名、key、列(列族:具体列)、值。HBase是面向列的数据库,列可无限扩充 hbase(main):> put 't_user' ,'001','cf1:name','chenxj' hbase(main):> put 't_user' ,'001','cf1:age','18' hbase(main):> put 't_user' ,'001','cf2:sex','man' hbase(main):> put 't_user' ,'002','cf1:name','chenxj' hbase(main):> put 't_user' ,'002','cf1:address','fuzhou' hbase(main):> put 't_user' ,'002','cf2:sex','man' //获取数据,可根据key、key和列族等进行查询 hbase(main):> get 't_user','001' hbase(main):> get 't_user','002','cf1' hbase(main):> get 't_user','001','cf1:age' 

六、集群启动结果

Hadoop + Zookeeper + HBase 高可用集群启动后,进程状态如下:

描述 node1   node2 node3
HDFS主 NameNode NameNode
HDFS从 DataNode DataNode DataNode
YARN主 ResourceManager ResourceManager
YARN从 NodeManager NodeManager NodeManager
HBase主 HMaster HMaster
HBase从 HRegionServer HRegionServer HRegionServer
Zookeeper独立进程 QuorumPeerMain QuorumPeerMain QuorumPeerMain
NameNodes数据同步 JournalNode JournalNode JournalNode
主备故障切换 DFSZKFailoverController DFSZKFailoverController

七、总结

需要注意的地方:

1)备用节点上的NameNode、ResourceManager、HMaster均需单独启动;
hadoop-daemon.sh start namenode

yarn-daemon.sh start resourcemanager

hbase-daemon.sh start master 
2)可以使用-forcemanual参数强制切换主节点与备用主节点,但强制切换后集群的自动故障转移将会失效,需要重新格式化zkfc: hdfs zdfc -formatZK;
hdfs haadmin -transitionToActive/transitionToStandby  -forcemanual  master node2
yarn rmadmin -transitionToActive/transitionToStandby  -forcemanual  rm2
3)在备用主节点同步主节点的元数据时,主节点的HDFS必须已经启动;

   4)无法查看standby状态的节点上的hdfs;

   5)格式化namenode时要先启动各个JournalNode机器上的journalnode进程:hadoop-daemon.sh start journalnode

   6)若遇到问题,可以先考虑是哪个组件出现问题,然后查看该组件或与该组件相关的组件的日志信息;若各组件web页面无法访问,或存在其他连接问题,可以从「防火墙是否关闭」、「端口是否被占用」、「SSH」、「集群机器是否处于同一网段」内等角度考虑;

3、spark集群安装
spark on yarn 模式


猜你喜欢

转载自www.cnblogs.com/mylovelulu/p/10334600.html