Hadoop2.X搭建

为了方便该环境是在1.X的环境上修改的,你可先看下1.X搭建,接下来再看这个

1 安装Zookeeper
[root@node001 software]# ll
total 320464
-rw-r–r-- 1 root root 183594876 Apr 29 2018 hadoop-2.6.5.tar.gz
-rw-r–r-- 1 root root 126857158 Sep 7 2016 jdk-7u67-linux-x64.rpm
-rw-r–r-- 1 root root 17699306 Apr 29 2018 zookeeper-3.4.6.tar.gz
[root@node001 software]# tar xf zookeeper-3.4.6.tar.gz -C /opt/sxt/
[root@node001 software]# cd /opt/sxt/
[root@node001 sxt]# ll
total 8
drwxr-xr-x 10 root root 4096 May 13 23:49 hadoop-2.6.5
drwxr-xr-x 10 1000 1000 4096 Feb 20 2014 zookeeper-3.4.6
[root@node001 sxt]# mv zookeeper-3.4.6/ zk
[root@node001 sxt]# ll
total 8
drwxr-xr-x 10 root root 4096 May 13 23:49 hadoop-2.6.5
drwxr-xr-x 10 1000 1000 4096 Feb 20 2014 zk
[root@node001 sxt]#
2配置Zookeeper的环境变量
[root@node001 sxt]# vi + /etc/profile
export JAVA_HOME=/usr/java/jdk1.7.0_67/
export HADOOP_HOME=/opt/sxt/hadoop-2.6.5
export ZOOKEEPER_HOME=/opt/sxt/zk
PATH= P A T H : PATH: JAVA_HOME/bin: H A D O O P H O M E / b i n : HADOOP_HOME/bin: HADOOP_HOME/sbin:$ZOOKEEPER_HOME/bin

给node002 node003分发配置文件
    [root@node001 sxt]# scp /etc/profile node002:/etc/
    profile                                                                                                                                                                                                        100% 1993     2.0KB/s   00:00    
    [root@node001 sxt]# scp /etc/profile node003:/etc/
    profile 

3 修改zookeeper的配置文件 zoo_sample.cfg
[root@node001 zk]# ll
total 1552
drwxr-xr-x 2 1000 1000 4096 Feb 20 2014 bin
-rw-rw-r-- 1 1000 1000 82446 Feb 20 2014 build.xml
-rw-rw-r-- 1 1000 1000 80776 Feb 20 2014 CHANGES.txt
drwxr-xr-x 2 1000 1000 4096 Feb 20 2014 conf
drwxr-xr-x 10 1000 1000 4096 Feb 20 2014 contrib
drwxr-xr-x 2 1000 1000 4096 Feb 20 2014 dist-maven
drwxr-xr-x 6 1000 1000 4096 Feb 20 2014 docs
-rw-rw-r-- 1 1000 1000 1953 Feb 20 2014 ivysettings.xml
-rw-rw-r-- 1 1000 1000 3375 Feb 20 2014 ivy.xml
drwxr-xr-x 4 1000 1000 4096 Feb 20 2014 lib
-rw-rw-r-- 1 1000 1000 11358 Feb 20 2014 LICENSE.txt
-rw-rw-r-- 1 1000 1000 170 Feb 20 2014 NOTICE.txt
-rw-rw-r-- 1 1000 1000 1770 Feb 20 2014 README_packaging.txt
-rw-rw-r-- 1 1000 1000 1585 Feb 20 2014 README.txt
drwxr-xr-x 5 1000 1000 4096 Feb 20 2014 recipes
drwxr-xr-x 8 1000 1000 4096 Feb 20 2014 src
-rw-rw-r-- 1 1000 1000 1340305 Feb 20 2014 zookeeper-3.4.6.jar
-rw-rw-r-- 1 1000 1000 836 Feb 20 2014 zookeeper-3.4.6.jar.asc
-rw-rw-r-- 1 1000 1000 33 Feb 20 2014 zookeeper-3.4.6.jar.md5
-rw-rw-r-- 1 1000 1000 41 Feb 20 2014 zookeeper-3.4.6.jar.sha1
[root@node001 zk]# cd conf
total 12
-rw-rw-r-- 1 1000 1000 535 Feb 20 2014 configuration.xsl
-rw-rw-r-- 1 1000 1000 2161 Feb 20 2014 log4j.properties
-rw-rw-r-- 1 1000 1000 922 Feb 20 2014 zoo_sample.cfg
[root@node001 conf]#
3.1修改配置文件zoo_sample.cfg 名称为zoo.cfg
mv zoo_sample.cfg zoo.cfg
3.1.1 修改配置文件内容
dataDir=/var/sxt/zk

    在最后追加
    server.1=node001:2888:3888
    server.2=node002:2888:3888
    server.3=node003:2888:3888

    报存退出
3.2分发zk 给node002和node003
    [root@node001 conf]# cd ..
    [root@node001 zk]# cd ..
    [root@node001 sxt]# scp -r zk/ node002:`pwd`
    [root@node001 sxt]# scp -r zk/ node003:`pwd`

    执行配置文件 . /etc/profi*

    在3个节点都创建/var/sxt/zk 目录 在对应的节点创建myid 并把对应数字写入到myid
    mkdir -p /var/sxt/zk
    echo 1 > /var/sxt/zk/myid
    echo 2 > /var/sxt/zk/myid
    echo 3 > /var/sxt/zk/myid

    zkServer.sh start 每个节点执行启动 
    zkServer.sh status查看状态

4配置高可用
进入hadoop 安装目录 
cd /opt/sxt/hadoop-2.6.5/etc/hadoop/
vi vi hdfs-site.xml
删除下边的配置
 <property>
    <name>dfs.namenode.secondary.http-address</name>
    <value>node002:50090</value>
</property>
加上
<property>
    <name>dfs.nameservices</name>
    <value>mycluster</value>
</property>
<property>
    <name>dfs.ha.namenodes.mycluster</name>
    <value>nn1,nn2</value>
</property>
<property>
<name>dfs.namenode.rpc-address.mycluster.nn1</name>
<value>node001:8020</value>
</property>
<property>
<name>dfs.namenode.rpc-address.mycluster.nn2</name>
<value>node002:8020</value>
</property>
<property>
<name>dfs.namenode.http-address.mycluster.nn1</name>
<value>node001:50070</value>
</property>
<property>
<name>dfs.namenode.http-address.mycluster.nn2</name>
<value>node002:50070</value>
</property>
<property>
<name>dfs.namenode.shared.edits.dir</name>
<value>qjournal://node001:8485;node002:8485;node003:8485/mycluster</value>
</property>
<property>
<name>dfs.client.failover.proxy.provider.mycluster</name>
<value>org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider</value>
</property>
<property>
<name>dfs.ha.fencing.methods</name>
<value>sshfence</value>
</property>

<property>
<name>dfs.ha.fencing.ssh.private-key-files</name>
<value>/root/.ssh/id_rsa</value>
</property>
<property>

dfs.journalnode.edits.dir
/var/sxt/hadoop/ha/journalnode


dfs.ha.automatic-failover.enabled
true

修改core-site.xml
[root@node001 hadoop]# vi core-site.xml 
    <property>
    <name>fs.defaultFS</name>
    <value>hdfs://mycluster:9000</value>
    </property>
    <property>
        <name>hadoop.tmp.dir</name>
        <value>/var/sxt/hadoop/ha</value>
    </property>
    <property>
    <name>ha.zookeeper.quorum</name>
    <value>node001:2181,node002:2181,node003:2181</value>
    </property>
分发3个配置文件给node002 node003 node004
 scp core-site.xml hdfs-site.xml node002:`pwd`
 scp core-site.xml hdfs-site.xml node003:`pwd`
 scp core-site.xml hdfs-site.xml node004:`pwd`

 执行 source /etc/profile 每个节点都执行

 执行 hadoop-daemon.sh start journalnode  每个节点都执行

 在node001 执行  hdfs namenode -format 变为高可用

 先在node001 执行hadoop-daemon.sh start namenode 启动namenode
 然后在node002 执行 hdfs namenode -bootstrapStandby

 hdfs zkfc -formatZK

猜你喜欢

转载自blog.csdn.net/weixin_40210830/article/details/90229944