hadoop3.1.2部署


参考:

部署
https://www.cnblogs.com/yinzhengjie/articles/10693555.html
官网:
http://hadoop.apache.org/docs/r3.1.2/hadoop-project-dist/hadoop-common/ClusterSetup.html

高可用:
https://www.cnblogs.com/yinzhengjie/p/10698719.html
2.0和3.0区别:
https://www.bilibili.com/video/av34395604/


centos7基础安装:
https://www.cnblogs.com/hongfeng2019/p/11353249.html

安装JDK:
https://www.cnblogs.com/hongfeng2019/p/11331688.html

下载hadoop-3.1.2
wget http://mirror.bit.edu.cn/apache/hadoop/common/hadoop-3.1.2/hadoop-3.1.2.tar.gz

安装ansible:
yum repolist
yum info ansible
yum -y install ansible

vim /etc/ansible/hosts

[all]
Fengfeng-dr-algo1
Fengfeng-dr-algo2
Fengfeng-dr-algo3
Fengfeng-dr-algo4

[namenode]
Fengfeng-dr-algo1

[datanode]
Fengfeng-dr-algo2
Fengfeng-dr-algo3
Fengfeng-dr-algo4

测试:
ansible all -m ping

#jps的软连接
ansible all -m shell -a 'ln -s /usr/lib/jvm/java-1.8.0-openjdk-1.8.0.222.b10-0.el7_6.x86_64/bin/jps /usr/local/bin/'

解压配置文件到指定目录并配置环境变量
tar -zxf hadoop-3.1.2.tar.gz -C /hongfeng/software/

mkdir -pv /data/hadoop/{hdfs,yarn,mapreduce}

vim /etc/profile
HADOOP_HOME=/hongfeng/software/hadoop-3.1.2
PATH=$PATH:$HADOOP_HOME/bin:$HADOOP_HOME/sbin

vi /hongfeng/software/hadoop-3.1.2/etc/hadoop/hadoop-env.sh
export JAVA_HOME=/usr/lib/jvm/java-1.8.0-openjdk-1.8.0.222.b10-0.el7_6.x86_64
export HDFS_NAMENODE_USER=root
export HDFS_DATANODE_USER=root
export HDFS_SECONDARYNAMENODE_USER=root
export HDFS_NAMENODE_OPTS="-XX:+UseParallelGC -Xmx4g"

编辑hadoop相关配置文件:

vim /hongfeng/software/hadoop-3.1.2/etc/hadoop/core-site.xml

<configuration>
<property>
<name>fs.defaultFS</name>
<value>hdfs://Fengfeng-dr-algo1:9820</value>
</property>
<property>
<name>hadoop.tmp.dir</name>
<value>/data/hadoop/hdfs</value>
</property>
<property>
<name>fs.trash.interval</name>
<value>1440</value>
</property>
</configuration>

vim /hongfeng/software/hadoop-3.1.2/etc/hadoop/hdfs-site.xml
<?xml version="1.0" encoding="UTF-8"?>
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>

<configuration>
<property>
<name>dfs.replication</name>
<value>3</value>
</property>
<property>
<name>dfs.namenode.http-address</name>
<value>Fengfeng-dr-algo1:50070</value>
</property>
<property>
<name>dfs.namenode.secondary.http-address</name>
<value>Fengfeng-dr-algo4:9868</value>
</property>
<configuration>

vim /hongfeng/software/hadoop-3.1.2/etc/hadoop/workers
Fengfeng-dr-algo1
Fengfeng-dr-algo2
Fengfeng-dr-algo3
Fengfeng-dr-algo4

vim /hongfeng/software/hadoop-3.1.2/etc/hadoop/yarn-env.sh
export JAVA_HOME=/usr/lib/jvm/java-1.8.0-openjdk-1.8.0.222.b10-0.el7_6.x86_64
export YARN_RESOURCEMANAGER_USER=root
export HADOOP_SECURE_DN_USER=yarn
export YARN_NODEMANAGER_USER=root


vim /hongfeng/software/hadoop-3.1.2/etc/hadoop/yarn-site.xml
<property>
<name>yarn.nodemanager.aux-services</name>
<value>mapreduce_shuffle</value>
</property>
<property>
<name>yarn.resourcemanager.hostname</name>
<value>Fengfeng-dr-algo2</value>
</property>
<property>
<name>yarn.resourcemanager.address</name>
<value>Fengfeng-dr-algo2:8032</value>
</property>
<property>
<name>yarn.resourcemanager.scheduler.address</name>
<value>Fengfeng-dr-algo2:8030</value>
</property>
<property>
<name>yarn.resourcemanager.resource-tracker.address</name>
<value>Fengfeng-dr-algo2:8031</value>
</property>
<property>
<name>yarn.resourcemanager.admin.address</name>
<value>Fengfeng-dr-algo2:8033</value>
</property>
<property>
<name>yarn.resourcemanager.webapp.address</name>
<value>Fengfeng-dr-algo2:8088</value>
</property>
<property>
<name>yarn.log-aggregation-enable</name>
<value>true</value>
</property>

cp mapred-site.xml.template mapred-site.xml
<property>
<name>mapreduce.framework.name</name>
<value>yarn</value>
</property>
<property>
<name>mapreduce.jobhistory.address</name>
<value>Fengfeng-dr-algo2:10020</value>
</property>
<property>
<name>mapreduce.jobhistory.webapp.address</name>
<value>Fengfeng-dr-algo2:19888</value>
</property>
<property>
<name>yarn.app.mapreduce.am.staging-dir</name>
<value>/history</value>
</property>
<property>
<name>mapreduce.jobhistory.done-dir</name>
<value>${yarn.app.mapreduce.am.staging-dir}/history/done</value>
</property>
<property>
<name>mapreduce.jobhistory.intermediate-done-dir</name>
<value>${yarn.app.mapreduce.am.staging-dir}/history/done_intermediate</value>
</property>
<property>
<name>mapreduce.map.log.level</name>
<value>ERROR</value>
</property>
<property>
<name>mapreduce.reduce.log.level</name>
<value>ERROR</value>
</property>


scp /etc/profile Fengfeng-dr-algo2:/etc/profile
scp /etc/profile Fengfeng-dr-algo3:/etc/profile
scp /etc/profile Fengfeng-dr-algo4:/etc/profile

scp -r /hongfeng/software/hadoop-3.1.2/ Fengfeng-dr-algo2:/hongfeng/software/
scp -r /hongfeng/software/hadoop-3.1.2/ Fengfeng-dr-algo3:/hongfeng/software/
scp -r /hongfeng/software/hadoop-3.1.2/ Fengfeng-dr-algo4:/hongfeng/software/

#格式化
hdfs namenode -format
[root@Fengfeng-dr-algo1 hadoop]# ll /data/hadoop/hdfs/
total 0
drwxr-xr-x. 3 root root 18 Aug 14 07:55 dfs
[root@Fengfeng-dr-algo1 hadoop]# ll /data/hadoop/hdfs/dfs/name/current/
total 16
-rw-r--r--. 1 root root 391 Aug 14 07:55 fsimage_0000000000000000000
-rw-r--r--. 1 root root 62 Aug 14 07:55 fsimage_0000000000000000000.md5
-rw-r--r--. 1 root root 2 Aug 14 07:55 seen_txid
-rw-r--r--. 1 root root 216 Aug 14 07:55 VERSION

启动集群:
start-dfs.sh

ansible all -m shell -a 'jps'

在Fengfeng-dr-algo2上启动yarn:
start-yarn.sh

ansible all -m shell -a 'jps'
Fengfeng-dr-algo3 | SUCCESS | rc=0 >>
20978 DataNode
21444 Jps
21295 NodeManager

Fengfeng-dr-algo2 | SUCCESS | rc=0 >>
24867 ResourceManager
24356 DataNode
25480 NodeManager
25676 Jps

Fengfeng-dr-algo4 | SUCCESS | rc=0 >>
24625 NodeManager
24180 DataNode
24292 SecondaryNameNode
24775 Jps

Fengfeng-dr-algo1 | SUCCESS | rc=0 >>
24641 NameNode
25700 Jps
24789 DataNode
25500 NodeManager

hdfs web:
http://Fengfeng-dr-algo1:50070
yarn web:
http://Fengfeng-dr-algo2:8088/cluster

猜你喜欢

转载自www.cnblogs.com/hongfeng2019/p/11346352.html