cdh5.16.1部署

1.修改主机名:以hadoop3为例:
[root@localhost ~]# echo "hadoop003" > /etc/hostname
[root@localhost ~]# vim /etc/hostname 
[root@localhost ~]# echo "192.168.244.111" hadoop001 >> /etc/hosts
[root@localhost ~]# echo "192.168.244.112" hadoop002 >> /etc/hosts
[root@localhost ~]# echo "192.168.244.113" hadoop003 >> /etc/hosts
再重启
[root@localhost ~]# scp /etc/hosts hadoop002:/etc/
[root@localhost ~]# scp /etc/hosts hadoop003:/etc/


3.节点初始化
   3.1 hosts文件
    192.168.244.111 hadoop001
    192.168.244.112 hadoop002
    192.168.244.113 hadoop003
   3.2 防火墙
     内部防火墙关闭之后去部署,等部署好之后通过cdh界面提供的端口,将防火墙开启再放行这些端口。
    systemctl stop firewalld
    systemctl disable firewalld
    iptables -F //清空iptables规则
   3.3 selinux
    vi /etc/selinux/config 改为:SELINUX=disable

4.时间
   修改系统时间:
  [root@hadoop001 ~]# date -s 10/16/2019
  [root@hadoop001 ~]# date -s 15:55:30
  [root@hadoop001 ~]# clock -w //同步bios时钟,强制将系统时间写入cmos   另查看BIOS的时间用hwclock -r
    1 .date查看时间是否一致
    2.timedatectl list-timezones // 查看时区列表
    3.timedatectl set-timezone Asia/Shanghai若不是亚洲上海的时区,修改为此时区
--------或者安装ntp服务
yum install -y ntp  首先要挂在镜像 mount /dev/sr0 /mnt 集群的节点都装
设计为:hadoop001为时间同步的主节点 hadoop002和003为时间同步的从节点
修改hadoop001ntp服务的配置文件:vim /etc/ntp.conf
server 127.127.1.0 iburst local clock
restrict 192.168.244.0 mask 255.255.255.0 nomodify notrap
其它的客户端不用动
同步ntp:
[root@hadoop003 ~]# which ntpdate
/usr/sbin/ntpdate
[root@hadoop003 ~]# /usr/sbin/ntpdate hadoop001
    the NTP socket is in use, exiting错误:
解决办法:首先确保安装lsof,再 ps -ef|grep xntpd试图查找一下它的pid
若不行,再用lsof -i:123 ,kill -9杀死上部查找到的进程  之后再用/usr/sbin/ntpdate hadoop001同步时间

5. jdk(3台都必须装)
注意:
   1.安装的目录为/usr/java  2.解压之后的用户及用户组的修正
     mkdir /usr/java
     tar -xzvf jdk-8u45-linux-x64.gz -C /usr/java(之后查看目录可观察到文件夹的目录所属主和所属组已经变化,使用下面的命令重新制定文件的所有者)
     chown -R root:root /usr/java/jdk1.8.0_45
在/etc/profile后边追加
#env
export JAVA_HOME=/usr/java/jdk1.8.0_45
export PATH=$JAVA_HOME/bin:$PATH
[root@hadoop001 cdh5.16.1]#scp -r /usr/java hadoop002:/usr/
[root@hadoop001 cdh5.16.1]#scp -r /usr/java hadoop003:/usr/
[root@hadoop001 cdh5.16.1]# scp /etc/profile hadoop002:/etc/
[root@hadoop001 cdh5.16.1]# scp /etc/profile hadoop003:/etc/
再共同source /etc/profile

6.安装mysql
[root@hadoop001 local]# rpm -qa|grep -i mysql  若有组件则卸载
[root@hadoop001 local]# rpm -qa|grep -i mariadb  若有组件则卸载
[root@hadoop001 cdh5.16.1]# tar xzvf /root/cdh5.16.1/mysql-5.7.11-linux-glibc2.5-x86_64.tar.gz -C /usr/local/
[root@hadoop39 local]#cd /usr/local
[root@hadoop39 local]# mv mysql-5.7.11-linux-glibc2.5-x86_64 mysql
[root@hadoop001 local]# mkdir mysql/arch mysql/data mysql/tmp
改my.cnf文件
vim /etc/my.cnf
[client]
port            = 3306
socket          = /usr/local/mysql/data/mysql.sock
default-character-set=utf8mb4
[mysqld]
port            = 3306
socket          = /usr/local/mysql/data/mysql.sock
skip-slave-start
skip-external-locking
key_buffer_size = 256M
sort_buffer_size = 2M
read_buffer_size = 2M
read_rnd_buffer_size = 4M
query_cache_size= 32M
max_allowed_packet = 16M
myisam_sort_buffer_size=128M
tmp_table_size=32M
table_open_cache = 512
thread_cache_size = 8
wait_timeout = 86400
interactive_timeout = 86400
max_connections = 600
# Try number of CPU's*2 for thread_concurrency
#thread_concurrency = 32 
#isolation level and default engine 
default-storage-engine = INNODB
transaction-isolation = READ-COMMITTED
server-id  = 1739
basedir     = /usr/local/mysql
datadir     = /usr/local/mysql/data
pid-file     = /usr/local/mysql/data/hostname.pid
#open performance schema
log-warnings
sysdate-is-now
binlog_format = ROW
log_bin_trust_function_creators=1
log-error  = /usr/local/mysql/data/hostname.err
log-bin = /usr/local/mysql/arch/mysql-bin
expire_logs_days = 7
innodb_write_io_threads=16
relay-log  = /usr/local/mysql/relay_log/relay-log
relay-log-index = /usr/local/mysql/relay_log/relay-log.index
relay_log_info_file= /usr/local/mysql/relay_log/relay-log.info
log_slave_updates=1
gtid_mode=OFF
enforce_gtid_consistency=OFF
# slave
slave-parallel-type=LOGICAL_CLOCK
slave-parallel-workers=4
master_info_repository=TABLE
relay_log_info_repository=TABLE
relay_log_recovery=ON
#other logs
#general_log =1
#general_log_file  = /usr/local/mysql/data/general_log.err
#slow_query_log=1
#slow_query_log_file=/usr/local/mysql/data/slow_log.err
#for replication slave
sync_binlog = 500
#for innodb options 
innodb_data_home_dir = /usr/local/mysql/data/
innodb_data_file_path = ibdata1:1G;ibdata2:1G:autoextend
innodb_log_group_home_dir = /usr/local/mysql/arch
innodb_log_files_in_group = 4
innodb_log_file_size = 1G
innodb_log_buffer_size = 200M
#根据生产需要,调整pool size 
innodb_buffer_pool_size = 2G
#innodb_additional_mem_pool_size = 50M #deprecated in 5.6
tmpdir = /usr/local/mysql/tmp
innodb_lock_wait_timeout = 1000
#innodb_thread_concurrency = 0
innodb_flush_log_at_trx_commit = 2
innodb_locks_unsafe_for_binlog=1
#innodb io features: add for mysql5.5.8
performance_schema
innodb_read_io_threads=4
innodb-write-io-threads=4
innodb-io-capacity=200
#purge threads change default(0) to 1 for purge
innodb_purge_threads=1
innodb_use_native_aio=on
#case-sensitive file names and separate tablespace
innodb_file_per_table = 1
lower_case_table_names=1
[mysqldump]
quick
max_allowed_packet = 128M
[mysql]
no-auto-rehash
default-character-set=utf8mb4
[mysqlhotcopy]
interactive-timeout
[myisamchk]
key_buffer_size = 256M
sort_buffer_size = 256M
read_buffer = 2M
write_buffer = 2M

[root@hadoop001 local]# groupadd -g 101 dba
[root@hadoop001 local]# useradd -u 514 -g dba -G root -d /usr/local/mysql mysqladmin
[root@hadoop001 local]# cp /etc/skel/.* /usr/local/mysql
[root@hadoop001 local]# vi mysql/.bashrc
追加:
export MYSQL_BASE=/usr/local/mysql
export PATH=${MYSQL_BASE}/bin:$PATH
unset USERNAME
#stty erase ^H
set umask to 022
umask 022
PS1=`uname -n`":"'$USER'":"'$PWD'":>"; export PS1
[root@hadoop001 local]# chown  mysqladmin:dba /etc/my.cnf 
[root@hadoop001 local]# chmod  640 /etc/my.cnf  
[root@hadoop001 local]# chown -R mysqladmin:dba /usr/local/mysql
[root@hadoop001 local]# chmod -R 755 /usr/local/mysql 
[root@hadoop001 local]# cd /usr/local/mysql
#将服务文件拷贝到init.d下,并重命名为mysql
[root@hadoop001 mysql]# cp support-files/mysql.server /etc/rc.d/init.d/mysql 
#赋予可执行权限
[root@hadoop001 mysql]# chmod +x /etc/rc.d/init.d/mysql
#删除服务
[root@hadoop001 mysql]# chkconfig --del mysql
#添加服务
[root@hadoop001 mysql]# chkconfig --add mysql
[root@hadoop001 mysql]# chkconfig --level 345 mysql on
#安装libaio及安装mysql的初始db 
[root@hadoop39 mysql]# yum -y install libaio
[root@hadoop39 mysql]# sudo su - mysqladmin
hadoop001:mysqladmin:/usr/local/mysql:>bin/mysqld --defaults-file=/etc/my.cnf  --user=mysqladmin  --basedir=/usr/local/mysql/  --datadir=/usr/local/mysql/data/ --initialize
#查看临时密码:
hadoop001:mysqladmin:/usr/local/mysql/data:>cat hostname.err |grep password    W9h5lsSOUD%g
上步运行出此结果:2019-10-16T11:07:21.328047Z 1 [Note] A temporary password is generated for root@localhost: d5d&5Mj_%2ze  
#启动   Ul#!tijaw5q>  cu%gt9jNSHM9
/usr/local/mysql/bin/mysqld_safe --defaults-file=/etc/my.cnf &
#登录及修改用户密码
mysql -uroot -p
alter user root@localhost identified by 'root123';
GRANT ALL PRIVILEGES ON *.* TO 'root'@'%' IDENTIFIED BY 'root123' ;
flush privileges;
#重启mysql
hadoop001:mysqladmin:/usr/local/mysql/data:>service mysql restart

7.提前创建db用户
create database cmf default character set utf8;
GRANT ALL PRIVILEGES ON cmf.* TO 'cmf'@'%' IDENTIFIED BY 'cmf123' ;
create database amon default character set utf8;
GRANT ALL PRIVILEGES ON amon.* TO 'amon'@'%' IDENTIFIED BY 'amon123' ;
create database hive default character set utf8;
GRANT ALL PRIVILEGES ON hive .* TO 'hive '@'%' IDENTIFIED BY 'hive123' ;
flush privileges;

8.部署 mysql jdbc jar
cmf
amon 
都要部署在hadoop001节点上  那么此节点就要部署jar
[root@hadoop001 cdh5.16.1]# mkdir -p /usr/share/java
[root@hadoop001 cdh5.16.1]# cp mysql-connector-java-5.1.47.jar /usr/share/java/mysql-connector-java.jar  //注意复制的时候重命名(去掉版本号)必须!!!

9.离线部署cm(3节点都配)  agent配置hadoop001-003  server配置hadooop001
 [root@hadoop001 cm-5.16.1]# scp cloudera-manager-centos7-cm5.16.1_x86_64.tar.gz  hadoop002:/root/cdh5.16.1/
 [root@hadoop001 cm-5.16.1]# scp cloudera-manager-centos7-cm5.16.1_x86_64.tar.gz hadoop003:/root/cdh5.16.1/
 [root@hadoop001 cm-5.16.1]# cd /root/cdh5.16.1
 [root@hadoop001 cm-5.16.1]# mkdir /opt/cloudera-manager
 [root@hadoop002 cm-5.16.1]# mkdir /opt/cloudera-manager
 [root@hadoop003 cm-5.16.1]# mkdir /opt/cloudera-manager
 [root@hadoop001 cm-5.16.1]#tar -zxvf cloudera-manager-centos7-cm5.16.1_x86_64.tar.gz -C /opt/cloudera-manager
 [root@hadoop002 cm-5.16.1]#tar -zxvf cloudera-manager-centos7-cm5.16.1_x86_64.tar.gz -C /opt/cloudera-manager
 [root@hadoop003 cm-5.16.1]#tar -zxvf cloudera-manager-centos7-cm5.16.1_x86_64.tar.gz -C /opt/cloudera-manager
    9.1 agent配置
[root@hadoop001 cm-5.16.1]# cd /opt/cloudera-manager/cm-5.16.1/etc/cloudera-scm-agent/
[root@hadoop001 cloudera-scm-agent]# vim config.ini  #修改其中的server_host=hadoop001
[root@hadoop002 cdh5.16.1]# cd /opt/cloudera-manager/cm-5.16.1/etc/cloudera-scm-agent/
[root@hadoop002 cloudera-scm-agent]# vim config.ini  #修改其中的server_host=hadoop001
[root@hadoop003 cdh5.16.1]# cd /opt/cloudera-manager/cm-5.16.1/etc/cloudera-scm-agent/
[root@hadoop003 cloudera-scm-agent]# vim config.ini #修改其中的server_host=hadoop001
    9.2 server配置(支配hadoop001)
[root@hadoop001 cloudera-scm-server]# vim /opt/cloudera-manager/cm-5.16.1/etc/cloudera-scm-server/db.properties   #修改一下选项:com.cloudera.cmf.db.type=mysql  com.cloudera.cmf.db.name=cmf  com.cloudera.cmf.db.host=node1
com.cloudera.cmf.db.user=cmf  com.cloudera.cmf.db.password=root com.cloudera.cmf.db.setupType=EXTERNAL(这个没有的话放过)
   9.3创建默认用户(cloudera-scm)
[root@hadoop001 cloudera-scm-server]# pwd
/opt/cloudera-manager/cm-5.16.1/etc/cloudera-scm-server
[root@hadoop001 cloudera-scm-server]# useradd --system --home=/opt/cloudera-manager/cm-5.16.1/run/cloudera-scm-server/ --no-create-home --shell=/bin/false cloudera-scm
[root@hadoop002 cloudera-scm-server]# useradd --system --home=/opt/cloudera-manager/cm-5.16.1/run/cloudera-scm-server/ --no-create-home --shell=/bin/false cloudera-scm
[root@hadoop003 cloudera-scm-server]# useradd --system --home=/opt/cloudera-manager/cm-5.16.1/run/cloudera-scm-server/ --no-create-home --shell=/bin/false cloudera-scm
[root@hadoop001 cloudera-scm-server]# chown -R cloudera-scm:cloudera-scm /opt/cloudera-manager  #这一步是文件夹用户用户组的修改
[root@hadoop002 cloudera-scm-server]# chown -R cloudera-scm:cloudera-scm /opt/cloudera-manager
[root@hadoop003 cloudera-scm-server]# chown -R cloudera-scm:cloudera-scm /opt/cloudera-manager

10.离线parcel文件源  选择hadoop001(只配置hadoop001)

mkdir -p  /opt/cloudera/parcel-repo
[root@hadoop001 cdh5.16.1]# mv /root/cdh5.16.1/CDH-5.16.1-1.cdh5.16.1.p0.3-el7.parcel.sha1 /opt/cloudera/parcel-repo/CDH-5.16.1-1.cdh5.16.1.p0.3-el7.parcel.sha(移动的过程中必须改名)
[root@hadoop001 cdh5.16.1]# mv manifest.json /opt/cloudera/parcel-repo/
[root@hadoop001 parcel-repo]# chown -R cloudera-scm:cloudera-scm /opt/cloudera
[root@hadoop001 cdh5.16.1]# mv /root/cdh5.16.1/CDH-5.16.1-1.cdh5.16.1.p0.3-el7.parcel  /opt/cloudera/parcel-repo/CDH-5.16.1-1.cdh5.16.1.p0.3-el7.parcel(移动的过程中必须改名)

11.所有节点创建大数据软件的安装目录  用户和用户组权限
[root@hadoop001 cdh5.16.1]# mkdir -p /opt/cloudera/parcels
[root@hadoop002 cdh5.16.1]# mkdir -p /opt/cloudera/parcels
[root@hadoop003 cdh5.16.1]# mkdir -p /opt/cloudera/parcels
[root@hadoop001 cdh5.16.1]#  chown -R cloudera-scm:cloudera-scm /opt/cloudera
[root@hadoop002 cdh5.16.1]#  chown -R cloudera-scm:cloudera-scm /opt/cloudera
[root@hadoop003 cdh5.16.1]#  chown -R cloudera-scm:cloudera-scm /opt/cloudera

系统若是最小安装的话 还必须得安装: yum install psmisc -y  yum install perl

之后再启动ser和agent
12.启动server及agent
hadoop001:
/opt/cloudera-manager/cm-5.16.1/etc/init.d/cloudera-scm-server start

hadoop001-003:
/opt/cloudera-manager/cm-5.16.1/etc/init.d/cloudera-scm-agent start

等待1min 看日志出现7180
/opt/cloudera-manager/cm-5.16.1/log/cloudera-scm-server/cloudera-scm-server.log

2019-05-11 12:47:50,705 INFO WebServerImpl:org.mortbay.log: Started [email protected]:7180
2019-05-11 12:47:50,705 INFO WebServerImpl:com.cloudera.server.cmf.WebServerImpl: Started Jetty server.

web界面 防火墙开启7180端口


echo 10 > /proc/sys/vm/swappiness
  

   

おすすめ

転載: blog.csdn.net/Albert__Einstein/article/details/121325845