hadoop hbase 集群的安装(未整理,先记录在这)
ssh 免登陆的配置
ssh-keygen -t rsa
cd ~/.ssh
[root@master .ssh]# ll
总用量 8
-rw------- 1 root root 1679 1月 2 21:04 id_rsa
-rw-r--r-- 1 root root 393 1月 2 21:04 id_rsa.pub
slave1
scp ~/.ssh/id_rsa.pub root@master:~/.ssh/id_rsa.pub_slave1
slave2
scp ~/.ssh/id_rsa.pub root@master:~/.ssh/id_rsa.pub_slave2
[root@master .ssh]# ll
总用量 20
-rw------- 1 root root 1679 1月 2 21:04 id_rsa
-rw-r--r-- 1 root root 393 1月 2 21:04 id_rsa.pub
-rw-r--r-- 1 root root 393 1月 2 21:24 id_rsa.pub_slave1
-rw-r--r-- 1 root root 393 1月 2 21:24 id_rsa.pub_slave2
-rw-r--r-- 1 root root 808 1月 2 21:11 known_hosts
cd ~/.ssh
touch authorized_keys
cat ~/.ssh/id_rsa.pub >> authorized_keys
cat ~/.ssh/id_rsa.pub_slave1 >> authorized_keys
cat ~/.ssh/id_rsa.pub_slave2 >> authorized_keys
cat authorized_keys
[root@master .ssh]# cat authorized_keys
ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEA29JCTGZjQBQUubsyMeL0rCRsRLlDWi7FqQTyp5/u+3WtS2Sg8psi/k3B+okQ6rVx6fYaUvF7TsBzjZv51ru+3Utlh56XuxL+gtrVr4KV1St/1DLuOpAvHImEbRIQxzAedaxY8PLSScVnQCoU5T9XYXdjb/z3AcvXC8Kr5GtTwGEgxndmbdmKKw0+VBboJNjNZ1chCfCpdJdZv5DK7a6uUhElXC/60+/OkM0C6lKi1/UVYdwN+A3Lch8OjfcX3iABWLjf8g2Z+tuHyDJ74XGSkkTRmgUBG1zFobnYgfnBk3LmRRtMK6yY6gsI9sYAEJs9LvP72PsXv08EcHG9qGmUNQ== root@master
ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEAkV6o6cLTfUclAQpC1oJRJi+0xtX6Bx3rLlEi/nZtw6CkpGkh3JIq5MJ9A7lSfVIcLTjv6i1MovrPc9wEbVao89E5l1gUBf8ZXcoDIg6y1UknRpdkfft348/NmIg858QAENXT3XVCDN4HviIM2kpdli5r+PLbis0XFdv5Coetx/bSHkak8v0rkQSIjMrQI6ClVI3sjOfZSYwT9C3TfNK0DI7hCyqHZnAuRS52v7ntti39KOnDmCPoBf+Aat4uv5zm3KmWG43qY+fFqsBiwCnT+7omKrdQSdhhosbZsPrc7oPJ0f2snyJDWalnZx9TFYOMoXlktiZZadFo41JEGJufiQ== root@slave1
ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEAxQswKavM4rPd5GLwKNpOzNhjKiw8JOd0x2EPJJ0SayV5C+/99htVQ7e6tWNaMoHhO4JiWQXnkMK7PHd8J0EyI+IzfPTcjJOMlp2oduivPknp8THMjjYdVq+NhkybNeaF9/ZCZc7/S6x/hU+Q5nPxev9glBNeZtEFiOrSV0wPeKwykBUDPJItNZNwSFJedfxtNz+LC7TPkK+EwuEnLxZz03EEWYGv8105LROdHzuPnwSXooyXHEfNGmrGkeAeUC8ghKxOiUBXvAngjXZMtQIJlmiho3b5YD1Nfawl4/RIvFz+3R+KRFbVm/AULN5z375ekLk/w6SGjgmDsY71QrE2RQ== root@slave2
rm -rf ~/.ssh/id_rsa.pub_slave1
rm -rf ~/.ssh/id_rsa.pub_slave2
scp ~/.ssh/authorized_keys root@slave1:~/.ssh/authorized_keys
scp ~/.ssh/authorized_keys root@slave2:~/.ssh/authorized_keys
zookeeper 的安装
cd /usr/local/software
tar -zxvf zookeeper-3.4.7.tar.gz -C /usr/local
cd /usr/local/zookeeper-3.4.7/conf
cp zoo_sample.cfg zoo.cfg
chmod 777 zoo.cfg
mkdir /usr/local/zookeeper-3.4.7/dataDir
cd /usr/local/zookeeper-3.4.7/dataDir
touch myid
echo 1 >> myid
// 修改 dataDir 路径
dataDir=/usr/local/zookeeper-3.4.7/dataDir
server.1=master:2888:3888
server.2=slave1:2888:3888
server.3=slave2:2888:3888
scp -r /usr/local/zookeeper-3.4.7 root@slave1:/usr/local/
scp -r /usr/local/zookeeper-3.4.7 root@slave2:/usr/local/
/usr/local/zookeeper-3.4.7/bin/zkServer.sh start
Usage: /usr/local/zookeeper-3.4.7/bin/zkServer.sh {start|start-foreground|stop|restart|status|upgrade|print-cmd}
/usr/local/zookeeper-3.4.7/bin/zkServer.sh status
配置文件如下
# The number of milliseconds of each tick
tickTime=2000
# The number of ticks that the initial
# synchronization phase can take
initLimit=10
# The number of ticks that can pass between
# sending a request and getting an acknowledgement
syncLimit=5
# the directory where the snapshot is stored.
# do not use /tmp for storage, /tmp here is just
# example sakes.
dataDir=/usr/local/zookeeper-3.4.7/dataDir
# the port at which the clients will connect
clientPort=2181
# the maximum number of client connections.
# increase this if you need to handle more clients
#maxClientCnxns=60
#
# Be sure to read the maintenance section of the
# administrator guide before turning on autopurge.
#
# http://zookeeper.apache.org/doc/current/zookeeperAdmin.html#sc_maintenance
#
# The number of snapshots to retain in dataDir
#autopurge.snapRetainCount=3
# Purge task interval in hours
# Set to "0" to disable auto purge feature
#autopurge.purgeInterval=1
server.1=0.0.0.0:2888:3888
server.2=slave1:2888:3888
server.3=slave2:2888:3888
hadoop 集群的安装
tar -zxvf /usr/local/software/hadoop-2.6.0.tar.gz -C /usr/local
vi /etc/profile
HADOOP_HOME=/usr/local/hadoop-2.6.0
export HADOOP_HOME
source /etc/profile
cp /usr/local/hadoop-2.6.0/etc/hadoop/mapred-site.xml.template /usr/local/hadoop-2.6.0/etc/hadoop/mapred-site.xml
/usr/local/hadoop-2.6.0/etc/hadoop/hadoop-env.sh 这个文件的第一行
export JAVA_HOME=/usr/local/jdk1.8.0_65
/usr/local/hadoop-2.6.0/etc/hadoop/yarn-env.sh 这个文件的第一行
export JAVA_HOME=/usr/local/jdk1.8.0_65
/usr/local/hadoop-2.6.0/bin/hdfs namenode -format
/usr/local/hadoop-2.6.0/bin/hdfs dfsadmin -report
scp -r /usr/local/hadoop-2.6.0 root@slave1:/usr/local
scp -r /usr/local/hadoop-2.6.0 root@slave2:/usr/local
cat /usr/local/hadoop-2.6.0/etc/hadoop/slaves
[root@master ~]# cat /usr/local/hadoop-2.6.0/etc/hadoop/slaves
slave1
slave2
scp -r /usr/local/jdk1.8.0_65 root@slave1:/usr/local
scp -r /usr/local/jdk1.8.0_65 root@slave2:/usr/local
scp /etc/profile root@slave1:/etc/profile
scp /etc/profile root@slave2:/etc/profile
/usr/local/hadoop-2.6.0/sbin/start-all.sh
/usr/local/hadoop-2.6.0/sbin/stop-all.sh
http://192.168.140.128:50070
/usr/local/hadoop-2.6.0/bin/hadoop fs -copyFromLocal /usr/local/jdk1.8.0_65/src.zip hdfs://master:9000/test/src.zip
/usr/local/hadoop-2.6.0/bin/hadoop fs -mkdir hdfs://master:9000/test
/usr/local/hadoop-2.6.0/bin/hadoop fs -ls /
/usr/local/hadoop-2.6.0/bin/hadoop fs -ls /test
// 报错的解决
rm -rf /usr/local/hadoop-2.6.0/etc/hadoop/slaves
touch /usr/local/hadoop-2.6.0/etc/hadoop/slaves
chmod 777 /usr/local/hadoop-2.6.0/etc/hadoop/slaves
scp /usr/local/hadoop-2.6.0/etc/hadoop/slaves root@slave1:/usr/local/hadoop-2.6.0/etc/hadoop/slaves
scp /usr/local/hadoop-2.6.0/etc/hadoop/slaves root@slave2:/usr/local/hadoop-2.6.0/etc/hadoop/slaves
[root@master hadoop]# cat /etc/hosts
192.168.140.128 master
192.168.140.129 slave1
192.168.140.130 slave2
关闭ipv6
vi /etc/sysconfig/network-scripts/ifcfg-eth0
vi /etc/sysconfig/network
vi /etc/modprobe.d/dist.conf
hbase 的安装
cd /usr/local/software
tar -zxvf hbase-1.1.2-bin.tar.gz -C /usr/local
cd /usr/local/hbase-1.1.2/lib
ls -l | grep hadoop
[root@master lib]# ls -l | grep hadoop
-rw-r--r-- 1 root root 17041 8月 27 10:57 hadoop-annotations-2.5.1.jar
-rw-r--r-- 1 root root 52449 8月 27 10:57 hadoop-auth-2.5.1.jar
-rw-r--r-- 1 root root 2557 8月 27 10:59 hadoop-client-2.5.1.jar
-rw-r--r-- 1 root root 2962685 8月 27 10:57 hadoop-common-2.5.1.jar
-rw-r--r-- 1 root root 7095230 8月 27 10:59 hadoop-hdfs-2.5.1.jar
-rw-r--r-- 1 root root 491409 8月 27 10:59 hadoop-mapreduce-client-app-2.5.1.jar
-rw-r--r-- 1 root root 662892 8月 27 10:59 hadoop-mapreduce-client-common-2.5.1.jar
-rw-r--r-- 1 root root 1498368 8月 27 10:57 hadoop-mapreduce-client-core-2.5.1.jar
-rw-r--r-- 1 root root 35733 8月 27 10:59 hadoop-mapreduce-client-jobclient-2.5.1.jar
-rw-r--r-- 1 root root 43642 8月 27 10:59 hadoop-mapreduce-client-shuffle-2.5.1.jar
-rw-r--r-- 1 root root 1649852 8月 27 10:57 hadoop-yarn-api-2.5.1.jar
-rw-r--r-- 1 root root 117982 8月 27 10:59 hadoop-yarn-client-2.5.1.jar
-rw-r--r-- 1 root root 1416427 8月 27 10:57 hadoop-yarn-common-2.5.1.jar
-rw-r--r-- 1 root root 242381 8月 27 10:59 hadoop-yarn-server-common-2.5.1.jar
-rw-r--r-- 1 root root 87662 8月 27 11:11 hbase-hadoop2-compat-1.1.2.jar
-rw-r--r-- 1 root root 35944 8月 27 11:11 hbase-hadoop-compat-1.1.2.jar
hadoop-annotations-2.5.1.jar
find -name 'hadoop-yarn-server-common*.jar'
cp /usr/local/hadoop-2.6.0/share/hadoop/common/lib/hadoop-annotations-2.6.0.jar /usr/local/hbase-1.1.2/lib/
rm -rf /usr/local/hbase-1.1.2/lib/hadoop-annotations-2.5.1.jar
cp /usr/local/hadoop-2.6.0/share/hadoop/common/lib/hadoop-auth-2.6.0.jar /usr/local/hbase-1.1.2/lib/
rm -rf /usr/local/hbase-1.1.2/lib/hadoop-auth-2.5.1.jar
cp /usr/local/hadoop-2.6.0/share/hadoop/common/hadoop-common-2.6.0.jar /usr/local/hbase-1.1.2/lib/
rm -rf /usr/local/hbase-1.1.2/lib/hadoop-common-2.5.1.jar
cp /usr/local/hadoop-2.6.0/share/hadoop/hdfs/hadoop-hdfs-2.6.0.jar /usr/local/hbase-1.1.2/lib/
rm -rf /usr/local/hbase-1.1.2/lib/hadoop-hdfs-2.5.1.jar
cp /usr/local/hadoop-2.6.0/share/hadoop/mapreduce/hadoop-mapreduce-client-app-2.6.0.jar /usr/local/hbase-1.1.2/lib/
rm -rf /usr/local/hbase-1.1.2/lib/hadoop-mapreduce-client-app-2.5.1.jar
cp /usr/local/hadoop-2.6.0/share/hadoop/mapreduce/hadoop-mapreduce-client-common-2.6.0.jar /usr/local/hbase-1.1.2/lib/
rm -rf /usr/local/hbase-1.1.2/lib/hadoop-mapreduce-client-common-2.5.1.jar
cp /usr/local/hadoop-2.6.0/share/hadoop/mapreduce/hadoop-mapreduce-client-core-2.6.0.jar /usr/local/hbase-1.1.2/lib/
rm -rf /usr/local/hbase-1.1.2/lib/hadoop-mapreduce-client-core-2.5.1.jar
cp /usr/local/hadoop-2.6.0/share/hadoop/mapreduce/hadoop-mapreduce-client-jobclient-2.6.0.jar /usr/local/hbase-1.1.2/lib/
rm -rf /usr/local/hbase-1.1.2/lib/hadoop-mapreduce-client-jobclient-2.5.1.jar
cp /usr/local/hadoop-2.6.0/share/hadoop/mapreduce/hadoop-mapreduce-client-shuffle-2.6.0.jar /usr/local/hbase-1.1.2/lib/
rm -rf /usr/local/hbase-1.1.2/lib/hadoop-mapreduce-client-shuffle-2.5.1.jar
cp /usr/local/hadoop-2.6.0/share/hadoop/yarn/hadoop-yarn-api-2.6.0.jar /usr/local/hbase-1.1.2/lib/
rm -rf /usr/local/hbase-1.1.2/lib/hadoop-yarn-api-2.5.1.jar
cp /usr/local/hadoop-2.6.0/share/hadoop/yarn/hadoop-yarn-client-2.6.0.jar /usr/local/hbase-1.1.2/lib/
rm -rf /usr/local/hbase-1.1.2/lib/hadoop-yarn-client-2.5.1.jar
cp /usr/local/hadoop-2.6.0/share/hadoop/yarn/hadoop-yarn-common-2.6.0.jar /usr/local/hbase-1.1.2/lib/
rm -rf /usr/local/hbase-1.1.2/lib/hadoop-yarn-common-2.5.1.jar
cp /usr/local/hadoop-2.6.0/share/hadoop/yarn/hadoop-yarn-server-common-2.6.0.jar /usr/local/hbase-1.1.2/lib/
rm -rf /usr/local/hbase-1.1.2/lib/hadoop-yarn-server-common-2.5.1.jar
hbase 安装
mkdir /usr/local/hbase-1.1.2/pids
/usr/local/hadoop-2.6.0/bin/hadoop fs -mkdir hdfs://master:9000/hbase
<configuration>
<property>
<name>hbase.rootdir</name>
<value>hdfs://master:9000/hbase</value>
</property>
<property>
<name>hbase.tmp.dir</name>
<value>/usr/local/hbase-1.1.2/tmp</value>
</property>
<property>
<name>hbase.cluster.distributed</name>
<value>true</value>
</property>
<property>
<name>hbase.zookeeper.quorum</name>
<value>master,slave1,slave2</value>
</property>
</configuration>
scp -r /usr/local/hbase-1.1.2 root@slave1:/usr/local
scp -r /usr/local/hbase-1.1.2 root@slave2:/usr/local
export HBASE_HOME=/usr/local/hbase-1.1.2
export PATH=$PATH:$HBASE_HOME/bin
scp /etc/profile root@slave1:/etc/profile
scp /etc/profile root@slave2:/etc/profile
scp /usr/local/hbase-1.1.2/conf/hbase-site.xml root@slave1:/usr/local/hbase-1.1.2/conf/hbase-site.xml
scp /usr/local/hbase-1.1.2/conf/hbase-site.xml root@slave2:/usr/local/hbase-1.1.2/conf/hbase-site.xml
/usr/local/hbase-1.1.2/bin/start-hbase.sh
/usr/local/hbase-1.1.2/bin/stop-hbase.sh
date -R
yum install -y ntpdate
ntpdate time.nist.gov
cp /usr/share/zoneinfo/Asia/Shanghai /etc/localtime
ntpdate us.pool.ntp.org
cp /usr/local/hadoop-2.6.0/share/hadoop/common/lib/htrace-core-3.0.4.jar /usr/local/hbase-1.1.2/lib
/usr/local/hbase-1.1.2/bin/hbase shell
create 'emp','emp_no','emp_name'
list
describe 'emp'
put 'emp', 'row_7369', 'emp_no', '7369'
put 'emp', 'row_7369', 'emp_name','Jay'
get 'emp', 'row_7369'
启动
/usr/local/zookeeper-3.4.7/bin/zkServer.sh start
/usr/local/hadoop-2.6.0/sbin/start-all.sh
/usr/local/hbase-1.1.2/bin/start-hbase.sh
停止
/usr/local/hbase-1.1.2/bin/stop-hbase.sh
/usr/local/hadoop-2.6.0/sbin/stop-all.sh
/usr/local/zookeeper-3.4.7/bin/zkServer.sh stop
猜你喜欢
转载自zhangyu84849467.iteye.com/blog/2268286
今日推荐
周排行