[hadoop那些事] 高可用完全分布式集群安装

不如安装个高可用完全分布式集群压压惊吧

部分安装教程在此篇省略,这里放下连接,如果有不太清楚的可以从里面找
hadoop教程索引

一、规划

在这里插入图片描述

二、准备四台机器

  • 安装好 jdk
  • 安装zookeeper 并启动
  • 配好ssh免密
  • ntp服务
  • 设置好hostname
  • 关闭防火墙
  • hosts文件
  • 下载好ntp net-tools

三、配置HA-hadoop

hadoop-env.sh:

export JAVA_HOME=/usr/java/jdk1.8.0_171

export HDFS_NAMENODE_USER="root"
export HDFS_DATANODE_USER="root"
export YARN_RESOURCEMANAGER_USER="root"
export YARN_NODEMANAGER_USER="root"
export HDFS_ZKFC_USER="root"
export HDFS_JOURNALNODE_USER="root"

core-site.xml

<configuration>
    <!-- 指定hdfs的nameservice为masternode -->
    <property>
        <name>fs.defaultFS</name>
        <value>hdfs://hacluster</value>
    </property>

    <!-- 指定hadoop临时目录 -->
    <property>
        <name>hadoop.tmp.dir</name>
        <value>/usr/hadoop/hadoop-3.1.2/hdfs/tmp</value>
    </property>

    <!-- 指定zookeeper地址 -->
	<property>
        <name>ha.zookeeper.quorum</name>
        <value>master:2181,slave2:2181,slave3:2181</value>
    </property>

    <!-- hadoop链接zookeeper的超时时长设置 -->
    <property>
        <name>ha.zookeeper.session-timeout.ms</name>
        <value>1000</value>
        <description>ms</description>
    </property>
	
	<property>
        <name>io.file.buffer.size</name>
        <value>131072</value>
    </property>
	
    <property>
        <name>fs.checkpoint.period</name>
        <value>60</value>
    </property>
	
    <property>
        <name>fs.checkpoint.size</name>
        <value>67108864</value>
    </property>
	
</configuration>

hdfs-site.xml

<configuration>

    <!-- 指定副本数 -->
    <property>
        <name>dfs.replication</name>
        <value>3</value>
    </property>

    <!-- 配置namenode和datanode的工作目录-数据存储目录 -->
    <property>
        <name>dfs.namenode.name.dir</name>
        <value>file:/usr/hadoop/hadoop-3.1.2/hdfs/name</value>
    </property>
    <property>
        <name>dfs.datanode.data.dir</name>
        <value>file:/usr/hadoop/hadoop-3.1.2/hdfs/data</value>
    </property>

    <property>
        <name>dfs.webhdfs.enabled</name>
        <value>true</value>
    </property>

    <property>
        <name>dfs.nameservices</name>
        <value>hacluster</value>
    </property>

    <!-- hacluster下面有两个NameNode,分别是nn1,nn2 -->
    <property>
        <name>dfs.ha.namenodes.hacluster</name>
        <value>nn1,nn2</value>
    </property> 

    <!-- nn1的RPC通信地址 -->
    <property>
        <name>dfs.namenode.rpc-address.hacluster.nn1</name>
        <value>master:9000</value>
    </property>

    <!-- nn1的http通信地址 -->
    <property>
        <name>dfs.namenode.http-address.hacluster.nn1</name>
        <value>master:9870</value>
    </property>
	
    <!-- nn2的RPC通信地址 -->
    <property>
        <name>dfs.namenode.rpc-address.hacluster.nn2</name>
        <value>slave1:9000</value>
    </property>
	
    <!-- nn2的http通信地址 -->
    <property>
        <name>dfs.namenode.http-address.hacluster.nn2</name>
        <value>slave1:9870</value>
    </property>

    <!-- 指定NameNode的edits元数据的共享存储位置。也就是JournalNode列表 
                 该url的配置格式:qjournal://host1:port1;host2:port2;host3:port3/journalId 
        journalId推荐使用nameservice,默认端口号是:8485 -->
    <property>
        <name>dfs.namenode.shared.edits.dir</name>
        <value>qjournal://master:8485;slave2:8485;slave3:8485/hacluster</value>
    </property>

    <!-- 指定JournalNode在本地磁盘存放数据的位置 -->
    <property>
        <name>dfs.journalnode.edits.dir</name>
        <value>/usr/hadoop/hadoop-3.1.2/hdfs/journaldata</value>
    </property>
	
	<property>
        <name>dfs.namenode.datanode.registration.ip-hostname-check</name>
        <value>false</value>
    </property>
	

	<!-- namenode操作日志的存放位置 -->
    <property>
        <name>dfs.namenode.edits.dir</name>
        <value>/usr/hadoop/hadoop-3.1.2/hdfs/edits</value>
    </property>


    <!-- 开启NameNode失败自动切换 -->
    <property>
        <name>dfs.ha.automatic-failover.enabled</name>
        <value>true</value>
    </property>

    <!-- 配置失败自动切换实现方式 -->
    <property>
        <name>dfs.client.failover.proxy.provider.hacluster</name>
        <value>org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider</value>
    </property>

    <!-- 配置隔离机制方法,多个机制用换行分割,即每个机制暂用一行 -->
    <property>
        <name>dfs.ha.fencing.methods</name>
        <value>sshfence
        shell(/bin/true)</value>
    </property>

    <!-- 使用sshfence隔离机制时需要ssh免登陆 -->
    <property>
        <name>dfs.ha.fencing.ssh.private-key-files</name>
        <value>/root/.ssh/id_rsa</value>
    </property>


	<!-- hdfs文件操作权限,false不验证 -->
    <property>
        <name>dfs.permissions</name>
        <value>false</value>
    </property>
	
    <!-- 配置sshfence隔离机制超时时间 -->
    <property>
        <name>dfs.ha.fencing.ssh.connect-timeout</name>
        <value>30000</value>
    </property>

    <property>
        <name>ha.failover-controller.cli-check.rpc-timeout.ms</name>
        <value>60000</value>
    </property>
	
</configuration>

yarn-site.xml

<configuration>
    <!-- 开启RM高可用 -->
    <property>
        <name>yarn.resourcemanager.ha.enabled</name>
        <value>true</value>
    </property>

    <!-- 指定yarn集群在zookeeper上注册的节点名 -->
    <property>
        <name>yarn.resourcemanager.cluster-id</name>
        <value>fzy_rm</value>
    </property>

    <!-- 指定RM的名字 -->
    <property>
        <name>yarn.resourcemanager.ha.rm-ids</name>
        <value>rm1,rm2</value>
    </property>

    <!-- 分别指定RM的地址 -->
    <property>
        <name>yarn.resourcemanager.hostname.rm1</name>
        <value>master</value>
    </property>

    <property>
        <name>yarn.resourcemanager.hostname.rm2</name>
        <value>slave1</value>
    </property>

    <!-- 指定zk集群地址 -->
    <property>
        <name>yarn.resourcemanager.zk-address</name>
        <value>master:2181,slave2:2181,slave3:2181</value>
    </property>


	<!-- 启用yarn自动恢复 -->
    <property>
        <name>yarn.resourcemanager.recovery.enabled</name>
        <value>true</value>
    </property>
	
	<!-- nodemanager获取数据的方式 -->
    <property>
        <name>yarn.nodemanager.aux-services</name>
        <value>mapreduce_shuffle</value>
    </property>
	
	<!-- 开启日志聚集功能 -->
    <property>
        <name>yarn.log-aggregation-enable</name>
        <value>true</value>
    </property>
	
	<!-- 配置日志保留7天 -->
    <property>
        <name>yarn.log-aggregation.retain-seconds</name>
        <value>86400</value>
    </property>

    

    <!-- 制定resourcemanager的状态信息存储在zookeeper集群上 -->
    <property>
        <name>yarn.resourcemanager.store.class</name>
        <value>org.apache.hadoop.yarn.server.resourcemanager.recovery.ZKRMStateStore</value>
    </property>
	
	<property>
        <name>yarn.resourcemanager.webapp.address.rm1</name>
        <value>master:8088</value>
    </property>
	
	<property>
        <name>yarn.resourcemanager.webapp.address.rm2</name>
        <value>slave1:8088</value>
    </property>
	
	<!-- 指定主resourcemanage的地址 -->
	<property>
        <name>yarn.resourcemanager.hostname.rm1</name>
        <value>master</value>
    </property>
	
	<property>
        <name>yarn.resourcemanager.hostname.rm2</name>
        <value>slave1</value>
    </property>
	
	
	<property>
        <name>yarn.log.server.url</name>
        <value>http://master:19888/jobhistory/logs</value>
    </property>
	
	
	 <property>
	<name>yarn.application.classpath</name>
	<value>/usr/hadoop/hadoop-3.1.2/etc/hadoop:/usr/hadoop/hadoop-3.1.2/share/hadoop/common/lib/*:/usr/hadoop/hadoop-3.1.2/share/hadoop/common/*:/usr/hadoop/hadoop-3.1.2/share/hadoop/hdfs:/usr/hadoop/hadoop-3.1.2/share/hadoop/hdfs/lib/*:/usr/hadoop/hadoop-3.1.2/share/hadoop/hdfs/*:/usr/hadoop/hadoop-3.1.2/share/hadoop/mapreduce/lib/*:/usr/hadoop/hadoop-3.1.2/share/hadoop/mapreduce/*:/usr/hadoop/hadoop-3.1.2/share/hadoop/yarn/lib/*:/usr/hadoop/hadoop-3.1.2/share/hadoop/yarn/*</value>
	</property>
	
</configuration>

mapred-site.xml

<configuration>
    <property>
        <name>mapreduce.framework.name</name>
        <value>yarn</value>
    </property>


	<property>
        <name>yarn.application.classpath</name>
        <value>/usr/hadoop/hadoop-3.1.2/etc/hadoop:/usr/hadoop/hadoop-3.1.2/share/hadoop/common/lib/*:/usr/hadoop/hadoop-3.1.2/share/hadoop/common/*:/usr/hadoop/hadoop-3.1.2/share/hadoop/hdfs:/usr/hadoop/hadoop-3.1.2/share/hadoop/hdfs/lib/*:/usr/hadoop/hadoop-3.1.2/share/hadoop/hdfs/*:/usr/hadoop/hadoop-3.1.2/share/hadoop/mapreduce/lib/*:/usr/hadoop/hadoop-3.1.2/share/hadoop/mapreduce/*:/usr/hadoop/hadoop-3.1.2/share/hadoop/yarn/lib/*:/usr/hadoop/hadoop-3.1.2/share/hadoop/yarn/*</value>
        </property>


	<property>
	 <name>mapreduce.jobhistory.address</name>
	 <value>master:10020</value>
	</property>
	<property>
	 <name>mapreduce.jobhistory.webapp.address</name>
	 <value>master:19888</value>
	</property>
	<property>
	    <name>yarn.app.mapreduce.am.staging-dir</name>
	    <value>/user</value>
	</property>

</configuration>

四、启动顺序

1. 确保zookeeper启动成功

2. 启动journalnode

hadoop-daemon.sh start journalnode

3. 格式化namenode(第一次就行)

hdfs namenode -format

4. 启动hdfs相关服务

start-dfs.sh

5. 启动yarn相关服务

start-yarn.sh

6. master同步信息slave1(在slave1上)

hdfs namenode -bootstrapStandby

7. 在namenode节点(其中任意一个即可)格式化zkfc

hdfs zkfc -formatZK

8. 启动zkfc(master、slave1)

sbin/hadoop-daemon.sh start zkfc

9. 启动master历史任务服务器

mr-jobhistory-daemon.sh start historyserver

五、验证

在这里插入图片描述
在这里插入图片描述
在这里插入图片描述

非常完美~

六、最后说一句

如果遇到一些本应该出现但是没有的情况,可以试一试把相关服务关掉再开启~
然后再运行个例子岂不美哉https://blog.csdn.net/qq_44172732/article/details/108321966

猜你喜欢

转载自blog.csdn.net/qq_44172732/article/details/108323246