1.rz命令将hadoop压缩包上传至Linux服务器中
2.tar -zxvf hadoop-2.7.7.tar.gz(解压即可用)
3.将解压出来的hadoop移到想要放的位置 mv hadoop-2.7.7 ../software
4.配置六个文件
1.hadoop-env.sh
cd /opt/software/hadoop-2.7.7
vim etc/hadoop/hadoop-env.sh
export JAVA_HOME=/opt/software/jdk1.8 #修改25行
export HADOOP_CONF_DIR=/opt/software/hadoop-2.7.7/etc/hadoop #修改33行
2.core.site.xml
<configuration>
<!--指定HDFS中NameNode的地址-->
<property>
<name>fs.defalutFS</name>
<value>hdfs://主机名:9000</value>
</property>
<!--指定hadoop运行是产生文件的存储目录-->
<property>
<name>hadoop.tmp.dir</name>
<value>/opt/software/hadoop-2.7.7/tmp</value>
</property>
</configuration>
3.hdfs-site.xml
<configuration>
<!--指定HDFS副本的数量-->
<property>
<name>dfs.replication</name>
<value>1</value>
</property>
<property>
<name>dfs.permissions</name>
<value>false</value>
</property>
</configuration>
4.mapred-site.xml
<configuration>
<!--指定mr运行在yarn上-->
<property>
<name>mapreduce.framework.name</name>
<value>yarn</value>
</property>
</configuration>
5.yarn-site.xml
<configuration>
<!--指定yarn的ResourceManager的地址-->
<property>
<name>yarn.resourcemanager.hostname</name>
<value>主机名</value>
</property>
<!--指定reduce获取数据的方式-->
<property>
<name>yarn.nodemanager.aux-services</name>
<value>mapreduce_shuffle</value>
</property>
</configuration>
5.slaves
主机名
5.给hadoop配置环境变量
vim /etc/profile
JAVA_HOME=/opt/software/jdk1.8
HADOOP_HOME=/opt/software/hadoop-2.7.7
PATH=$PATH:$JAVA_HOME/bin:$HADOOP_HOME/bin:$HADOOP_HOME/sbin
source /etc/profile
6.修改主机映射
vim /etc/hosts
192.168.2.121 主机名
7.格式化文件系统
hadoop namenode -format
8.启动hadoop
start-all.sh
stop-all.sh(关闭hadoop)