1.安装Hadoop的注意事项
- 确保每个节点安装了JDK
[root@node01 ~]# java -version
- 修改主机名hostname,三台机器分别执行如下命令,依次填入node01,node02,node03
[root@node01 ~] vim /etc/hostname
- 添加内网域名映射(所有节点)
[root@node01 ~]# vi /etc/hosts
192.168.100.101 node01
192.168.100.102 node02
192.168.100.103 node03
- 关闭防火墙
查看防火墙状态:service iptables status
关闭防火墙:service iptables stop
关闭防火墙开机启动:chkconfig iptables off
2.Hadoop集群安装
hadoop-2.6.0-cdh5.14.0下载
链接:https://pan.baidu.com/s/1ONUiyBP0Fhu-9zc0wZIVtw
提取码:kfpf
创建文件夹
[root@node01 /]# mkdir -p /export/servers/
[root@node01 /]# mkdir -p /export/install/
[root@node01 /]# mkdir -p /export/soft/
将下载好的hadoop-2.6.0-cdh5.14.0上传到 /export/soft/
[root@node01 /]# cd /export/soft/
[root@node01 soft]# rz
将hadoop-2.6.0-cdh5.14.0解压到 /export/servers/
[root@node01 /]# tar -zxvf /export/soft/hadoop-2.6.0-cdh5.14.0 -C /export/servers/
在第一台机器中执行以下命令
[root@node01 /]# cd /export/servers/hadoop-2.6.0-cdh5.14.0/bin/
[root@node01 bin]# bin/hadoop checknative
如果出现openssl为false,那么所有机器安装openssl即可
[root@node01 bin]# yum -y install openssl-devel
3.修改core-site.xml文件
[root@node01 bin]# vim /export/servers/hadoop-2.6.0-cdh5.14.0/etc/hadoop/core-site.xml
# 在<configuration></configuration>标签中插入
<property>
<name>fs.defaultFS</name>
<value>hdfs://node01:8020</value>
</property>
<property>
<name>hadoop.tmp.dir</name>
<value>/export/servers/hadoop-2.6.0-cdh5.14.0/hadoopDatas/tempDatas</value>
</property>
<!-- 缓冲区大小,实际工作中根据服务器性能动态调整 -->
<property>
<name>io.file.buffer.size</name>
<value>4096</value>
</property>
<!-- 开启hdfs的垃圾桶机制,删除掉的数据可以从垃圾桶中回收,单位分钟 -->
<property>
<name>fs.trash.interval</name>
<value>10080</value>
</property>
4.修改hdfs-site.xml文件
[root@node01 bin]# vim /export/servers/hadoop-2.6.0-cdh5.14.0/etc/hadoop/hdfs-site.xml
#在<configuration></configuration>标签中插入
<!-- NameNode存储元数据信息的路径,实际工作中,一般先确定磁盘的挂载目录,然后多个目录用,进行分割 -->
<!-- 集群动态上下线
<property>
<name>dfs.hosts</name>
<value>/export/servers/hadoop-2.6.0-cdh5.14.0/etc/hadoop/accept_host</value>
</property>
<property>
<name>dfs.hosts.exclude</name>
<value>/export/servers/hadoop-2.6.0-cdh5.14.0/etc/hadoop/deny_host</value>
</property>
-->
<property>
<name>dfs.namenode.secondary.http-address</name>
<value>node01:50090</value>
</property>
<property>
<name>dfs.namenode.http-address</name>
<value>node01:50070</value>
</property>
<property>
<name>dfs.namenode.name.dir</name>
<value>file:///export/servers/hadoop-2.6.0-cdh5.14.0/hadoopDatas/namenodeDatas</value>
</property>
<!-- 定义dataNode数据存储的节点位置,实际工作中,一般先确定磁盘的挂载目录,然后多个目录用,进行分割 -->
<property>
<name>dfs.datanode.data.dir</name>
<value>file:///export/servers/hadoop-2.6.0-cdh5.14.0/hadoopDatas/datanodeDatas</value>
</property>
<property>
<name>dfs.namenode.edits.dir</name>
<value>file:///export/servers/hadoop-2.6.0-cdh5.14.0/hadoopDatas/dfs/nn/edits</value>
</property>
<property>
<name>dfs.namenode.checkpoint.dir</name>
<value>file:///export/servers/hadoop-2.6.0-cdh5.14.0/hadoopDatas/dfs/snn/name</value>
</property>
<property>
<name>dfs.namenode.checkpoint.edits.dir</name>
<value>file:///export/servers/hadoop-2.6.0-cdh5.14.0/hadoopDatas/dfs/nn/snn/edits</value>
</property>
<property>
<name>dfs.replication</name>
<value>2</value>
</property>
<property>
<name>dfs.permissions</name>
<value>false</value>
</property>
<property>
<name>dfs.blocksize</name>
<value>134217728</value>
</property>
5.修改mapred-site.xml文件
[root@node01 bin]# vim /export/servers/hadoop-2.6.0-cdh5.14.0/etc/hadoop/mapred-site.xml
#该文件没内容,进入文件后复制下面代码
<configuration>
<property>
<name>mapreduce.framework.name</name>
<value>yarn</value>
</property>
<property>
<name>mapreduce.job.ubertask.enable</name>
<value>true</value>
</property>
<property>
<name>mapreduce.jobhistory.address</name>
<value>node01:10020</value>
</property>
<property>
<name>mapreduce.jobhistory.webapp.address</name>
<value>node01:19888</value>
</property>
</configuration>
6.修改yarn-site.xml文件
[root@node01 bin]# vim /export/servers/hadoop-2.6.0-cdh5.14.0/etc/hadoop/yarn-site.xml
# 在<configuration></configuration>标签中插入
<!-- Site specific YARN configuration properties -->
<property>
<name>yarn.resourcemanager.hostname</name>
<value>node01</value>
</property>
<property>
<name>yarn.nodemanager.aux-services</name>
<value>mapreduce_shuffle</value>
</property>
7.修改slaves文件
[root@node01 bin]# vim /export/servers/hadoop-2.6.0-cdh5.14.0/etc/hadoop/slaves
# 进入文件后写上你集群对应的主机名。这里用node1,2,3代替
node01
node02
node03
8.创建文件夹
mkdir -p /export/servers/hadoop-2.6.0-cdh5.14.0/hadoopDatas/tempDatas
mkdir -p /export/servers/hadoop-2.6.0-cdh5.14.0/hadoopDatas/namenodeDatas
mkdir -p /export/servers/hadoop-2.6.0-cdh5.14.0/hadoopDatas/datanodeDatas
mkdir -p /export/servers/hadoop-2.6.0-cdh5.14.0/hadoopDatas/dfs/nn/edits
mkdir -p /export/servers/hadoop-2.6.0-cdh5.14.0/hadoopDatas/dfs/snn/name
mkdir -p /export/servers/hadoop-2.6.0-cdh5.14.0/hadoopDatas/dfs/nn/snn/edits
9.将安装包分发给其他机器
[root@node01 bin]# cd /export/servers/
[root@node01 servers]# scp -r hadoop-2.6.0-cdh5.14.0/ node02:$PWD
[root@node01 servers]# scp -r hadoop-2.6.0-cdh5.14.0/ node03:$PWD
10.配置Hadoop环境变量
[root@node01 servers]# vim /etc/profile.d/hadoop.sh
export HADOOP_HOME=/export/servers/hadoop-2.6.0-cdh5.14.0
export PATH=$PATH:$HADOOP_HOME/bin
export PATH=$PATH:$HADOOP_HOME/sbin
[root@node01 servers]# source /etc/profile
11.格式化
[root@node01 servers]# cd /export/servers/hadoop-2.6.0-cdh5.14.0/
[root@node01 hadoop-2.6.0-cdh5.14.0]# bin/hdfs namenode -format
格式化操作不要过多,不然会导致namenode和datanode的id不一样,导致启动失败
12.集群启动
一键启动集群:start-all.sh
一键关闭集群:stop-all.sh
访问浏览器,输入地址:
- http://192.168.100.101:50070/dfshealth.html#tab-overview
输入命令jps也可以验证
主节点
其他节点