docker 安装centos7配置hadoop集群

版权声明:本文为博主原创文章,未经博主允许不得转载。 https://blog.csdn.net/u013719012/article/details/82759192
		$ docker pull centos
		
		##查看下载镜像##查看下载镜像
		$ doker image ls -a
		
		##启动容器加载镜像,同时进入启动的容器
		$ docker run -it --name centos-1 centos /bin/bash
		
		## 配置时区
		$ ln -sf /usr/share/zoneinfo/Asia/Shanghai /etc/localtime
		##配置ifconfig
		$ yum install net-tools.x86_64
		
		##查看网卡信息
		$ ifconfig
		
		##修改root 密码
		$ passwd

		##安装openssh
		$ yum install openssh-server -y
		
		##生成公钥、私钥
		$ ssh-keygen -t rsa -f /etc/ssh/ssh_host_rsa_key
		$ ssh-keygen -t rsa -f /etc/ssh/ssh_host_ecdsa_key
		$ ssh-keygen -t rsa -f /etc/ssh/ssh_host_ed25519_key
		
		##编写启动脚本
		$ vi run.sh
		内容:
			#!/bin/bash
			/usr/sbin/sshd -D
			
		##更改脚本权限
		$ chmod +x ./run.sh	
			
		##退出并保存容器
		$ exit
		$ docker container stop cnetos-1
		$ docker commit centos-1 centos-me:v1.0
		
		##启动新的容器(下一条更加)
		$ docker run --name hadoop0 -d -p 5001:22 -p 50070:50070 -p 9000:9000 centos_me:v1.0 /usr/sbin/sshd -D
		## 用下面这条命令替换可免去后面配置hosts:
		docker run --name hadoop2 -d -p 5001:22 -p 50070:50070 -p 9000:9000 --add-host hadoop2:172.17.0.2 --add-host 			hadoop3:172.17.0.3 --add-host hadoop4:172.17.0.4 centos_me:v1.0 /usr/sbin/sshd -D
		$ docker container ls
		$ docker exec -it hadoop0 bash

		##安装和配置java 和 hadoop  
		$ vim /etc/profile
			
		export JAVA_HOME=/usr/local/jdk/jdk1.8.0_181
		export HADOOP_HOME=/usr/local/hadoop/hadoop-2.9.1
		export PATH=$JAVA_HOME/bin:$JAVA_HOME/jre/bin:$HADOOP_HOME/sbin:$HADOOP_HOME/lib/native:$HADOOP_HOME/bin:$PATH
		export CLASSSPATH=.:$JAVA_HOME/libtools.jar:$JAVA_HOME/lib/dt.jar
		
		##配置免密登陆
		$ ssh-keygen -t rsa -P '' -f ~/.ssh/id_rsa
		$ cat ~/.ssh/id_rsa.pub >> ~/.ssh/authorized_keys
		$ chmod 0600 ~/.ssh/authorized_keys
		
		##在hadoop根目录下 /usr/local/hadoop/hadoop-2.9.1/etc/hadoop修改slaves 文件 写入:
			hadoop3
			hadoop4
		
		## 修改etc/hadoop下 hadoop-env.sh 在export JAVA_HOME之前插入一行
			JAVA_HOME=/usr/local/jdk/jdk1.8.0_181/
	##修改配置文件
		1、core-site.xml
		<configuration>
		<property>
		<name>fs.default.name</name>
		<value>hdfs://hadoop2:9000</value>
		</property>
		<property>
		<name>io.file.buffer.size</name>
		<value>131072</value>
		</property>
		<property>
		<name>hadoop.tmp.dir</name>
		<value>/home/hadoop/tmp</value>
		<description>Abase for other temporary directories.</description>
		</property>
		</configuration>
		
		2、hdfs-site.xml
		<configuration>
		<property>
		<name>dfs.namenode.secondary.http-address</name>
		<value>hadoop2:9001</value>
		<description>#through web see HDFS status </description>
		</property>
		<property>
		<name>dfs.namenode.name.dir</name>
		<value>/home/hadoop/dfs/name</value>
		</property>
		<property>
		<name>dfs.datanode.data.dir</name>
		<value>/home/hadoop/dfs/data</value>
		</property>
		<property>
		<name>dfs.replication</name>
		<value>2</value>
		<description># each Block has 2backup</description>
		</property>
		<property>
		<name>dfs.webhdfs.enabled</name>
		<value>true</value>
		</property>
		</configuration>
		
		3、yarn-site.xml
		<configuration>
		<!-- Site specific YARN configuration properties -->
		<property>
		<name>yarn.nodemanager.aux-services</name>
		<value>mapreduce_shuffle</value>
		</property>
		<property>
		<name>yarn.nodemanager.aux-services.mapreduce.shuffle.class</name>
		<value>org.apache.hadoop.mapred.ShuffleHandler</value>
		</property>
		<property>
		<name>yarn.resourcemanager.address</name>
		<value>hadoop2:8032</value>
		</property>
		<property>
		<name>yarn.resourcemanager.scheduler.address</name>
		<value>hadoop2:8030</value>
		</property>
		<property>
		<name>yarn.resourcemanager.resource-tracker.address</name>
		<value>hadoop2:8031</value>
		</property>
		<property>
		<name>yarn.resourcemanager.admin.address</name>
		<value>hadoop2:8033</value>
		</property>
		<property>
		<name>yarn.resourcemanager.webapp.address</name>
		<value>hadoop2:8088</value>
		</property>
		<property>
		<name>yarn.nodemanager.resource.memory-mb</name>
		<value>1024</value>
		</property>
		<property>
		<name>yarn.nodemanager.resource.cpu-vcores</name>
		<value>1</value>
		</property>
		</configuration>
		
		4、mapred-site.xml
		<configuration>
		<property>
		<name>mapreduce.framework.name</name>
		<value>yarn</value>
		</property>
		<property>
		<name>mapreduce.jobhistory.address</name>
		<value>hadoop2:10020</value>
		</property>
		<property>
		<name>mapreduce.jobhistory.webapp.address</name>
		<value>hadoop2:19888</value>
		</property>
		</configuration>

## 配置ssh文件
vi /etc/ssh/ssh_config  增加

		StrictHostKeyChecking no
        UserKnownHostsFile /dev/null

##停止容器
docker stop hadoop2
##保存镜像
docker commit hadoop2 centos_me:v1.0


###启动
从新镜像启动3个容器
docker run --name hadoop2 --add-host hadoop2:172.17.0.2 --add-host hadoop3:172.17.0.3 --add-host hadoop4:172.17.0.4 -d -p 5002:22 -p 9870:9870 -p 8088:8088 -p 19888:19888 -p 50070:50070 -p 9000:9000 centos_me:v1.0 /usr/sbin/sshd -D

docker run --name hadoop3 --add-host hadoop2:172.17.0.2 --add-host hadoop3:172.17.0.3 --add-host hadoop4:172.17.0.4 -d -p 5003:22 centos_me:v1.0 /usr/sbin/sshd -D

docker run --name hadoop4 --add-host hadoop2:172.17.0.2 --add-host hadoop3:172.17.0.3 --add-host hadoop4:172.17.0.4 -d -p 5004:22 centos_me:v1.0 /usr/sbin/sshd -D


###为每台机器安装:
yum -y install openssh-clients
yum -y install which
		
		

		
		
		

猜你喜欢

转载自blog.csdn.net/u013719012/article/details/82759192
今日推荐