centos7安装大数据平台

centos7安装大数据实验平台

1 网络配置

  1. 配置静态Ip

    [root@hadoop1 network-scripts]# cat ifcfg-ens160
    TYPE=Ethernet
    PROXY_METHOD=none
    BROWSER_ONLY=no
    BOOTPROTO=none
    DEFROUTE=yes
    IPV4_FAILURE_FATAL=no
    IPV6INIT=yes
    IPV6_AUTOCONF=yes
    IPV6_DEFROUTE=yes
    IPV6_FAILURE_FATAL=no
    IPV6_ADDR_GEN_MODE=stable-privacy
    NAME=ens160
    UUID=b23db62d-b406-4bf6-b91a-93145cd8488a
    DEVICE=ens160
    ONBOOT=yes
    IPADDR=192.168.42.96
    PREFIX=24
    GATEWAY=192.168.42.1
    DNS1=192.168.35.2
    IPV6_PRIVACY=no
    
  2. 设置主机名称修改hosts文件

    hostnamectl
    
    hostnamectl set-hostname hadoop4
    
    cat /etc/hosts
    127.0.0.1   localhost localhost.localdomain localhost4 localhost4.localdomain4
    ::1         localhost localhost.localdomain localhost6 localhost6.localdomain6
    
    192.168.42.96 hadoop1
    192.168.42.97 hadoop2
    192.168.42.98 hadoop3
    192.168.42.99 hadoop4
    

2 SSH免密码登录,每台机器都执行,互相可以登录

ssh-keygen -t rsa   		//生成公钥和私钥
ssh-copy-id -i hadoop4      //分发公钥
ssh hadoop4					//远程无密码登录

3 关闭防火墙和安全防护

systemctl | grep firefirewalld
systemctl status firewalld
systemctl stop firewalld
systemctl disable firewalld

vim /etc/selinux/config
[root@hadoop4 ~]# cat /etc/selinux/config

# This file controls the state of SELinux on the system.
# SELINUX= can take one of these three values:
#     enforcing - SELinux security policy is enforced.
#     permissive - SELinux prints warnings instead of enforcing.
#     disabled - No SELinux policy is loaded.
SELINUX=disabled
# SELINUXTYPE= can take one of three two values:
#     targeted - Targeted processes are protected,
#     minimum - Modification of targeted policy. Only selected processes are protected.
#     mls - Multi Level Security protection.
SELINUXTYPE=targeted

4 安装jdk1.8

rpm -qa | grep jdk
java-1.7.0-openjdk-headless-1.7.0.171-2.6.13.2.el7.x86_64
java-1.7.0-openjdk-1.7.0.171-2.6.13.2.el7.x86_64
java-1.8.0-openjdk-headless-1.8.0.161-2.b14.el7.x86_64
java-1.8.0-openjdk-1.8.0.161-2.b14.el7.x86_64
copy-jdk-configs-3.3-2.el7.noarch

rpm -e --nodeps java-1.7.0-openjdk-headless-1.7.0.171-2.6.13.2.el7.x86_64 java-1.7.0-openjdk-1.7.0.171-2.6.13.2.el7.x86_64 java-1.8.0-openjdk-headless-1.8.0.161-2.b14.el7.x86_64 java-1.8.0-openjdk-1.8.0.161-2.b14.el7.x86_64 copy-jdk-configs-3.3-2.el7.noarch jdk1.8-1.8.0_271-fcs.x86_64

 mv jdk-8u271-linux-x64.rpm  /usr/local/bin/
rpm -ivh --prefix=/usr/local/jdk1.8 jdk-8u271-linux-x64.rpm


vim /etr/profile
export JAVA_HOME=/usr/java/jdk1.8.0_271-amd64
export PATH=$PATH:$JAVA_HOME/bin:$JAVA_HOME/sbin
export CLASSPATH=.:$JAVA_HOME/lib/dt.jar:$JAVA_HOME/lib/tools.jar


[root@hadoop1 alternatives]# source /etc/profile
[root@hadoop1 alternatives]# java -version
java version "1.8.0_271"
Java(TM) SE Runtime Environment (build 1.8.0_271-b09)
Java HotSpot(TM) 64-Bit Server VM (build 25.271-b09, mixed mode)
[root@hadoop1 alternatives]#



5 ntp校时

使用阿里云ntp server ntp.aliyun.com iburst,注释掉其他的

[root@hadoop1 alternatives]# cat /etc/ntp.conf

# For more information about this file, see the man pages

# ntp.conf(5), ntp_acc(5), ntp_auth(5), ntp_clock(5), ntp_misc(5), ntp_mon(5).

driftfile /var/lib/ntp/drift

# Permit time synchronization with our time source, but do not

# permit the source to query or modify the service on this system.

restrict default nomodify notrap nopeer noquery

# Permit all access over the loopback interface.  This could

# be tightened as well, but to do so would effect some of

# the administrative functions.

restrict 127.0.0.1
restrict ::1

# Hosts on local network are less restricted.

#restrict 192.168.1.0 mask 255.255.255.0 nomodify notrap

# Use public servers from the pool.ntp.org project.

# Please consider joining the pool (http://www.pool.ntp.org/join.html).

server ntp.aliyun.com  iburst
#server 1.centos.pool.ntp.org iburst

6 安装配置MySql(Hive需要)

#卸载mariadb
`[root@hadoop2 bin]# rpm -qa|grep mariadb`
`mariadb-libs-5.5.56-2.el7.x86_64`
`[root@hadoop2 bin]# rpm -e --nodeps mariadb-libs-5.5.56-2.el7.x86_64`
`[root@hadoop2 bin]# rpm -e --nodeps mariadb-libs-5.5.56-2.el7.x86_64`
`error: package mariadb-libs-5.5.56-2.el7.x86_64 is not installed`
`[root@hadoop2 bin]#`

[root@hadoop1 bin]# rpm -ivh mysql-community-release-el7-5.noarch.rpm
Preparing...                          ################################# [100%]
Updating / installing...
   1:mysql-community-release-el7-5    ################################# [100%]


[root@hadoop1 bin]# yum -y install mysql57-community-release-el7-10.noarch.rpm
Loaded plugins: fastestmirror, langpacks
Loading mirror speeds from cached hostfile
 * base: mirror.bit.edu.cn
 * extras: mirrors.aliyun.com
 * updates: mirrors.aliyun.com
base                                                                                                                                                               | 3.6 kB  00:00:00
extras                                                                                                                                                             | 2.9 kB  00:00:00
mysql-connectors-community                                                                                                                                         | 2.6 kB  00:00:00
mysql-tools-community                                                                                                                                              | 2.6 kB  00:00:00
mysql56-community                                                                                                                                                  | 2.6 kB  00:00:00
updates                                                                                                                                                            | 2.9 kB  00:00:00
(1/3): mysql-connectors-community/x86_64/primary_db                                                                                                                |  68 kB  00:00:01
(2/3): mysql-tools-community/x86_64/primary_db                                                                                                                     |  83 kB  00:00:01
(3/3): mysql56-community/x86_64/primary_db                                                                                                                         | 288 kB  00:00:01
No package mysql57-community-release-el7-10.noarch.rpm available.
Error: Nothing to do

[root@hadoop1 bin]#  yum -y install mysql-community-server
Loaded plugins: fastestmirror, langpacks
Loading mirror speeds from cached hostfile
 * base: mirror.bit.edu.cn

[root@hadoop1 bin]# systemctl start  mysqld.service
[root@hadoop1 bin]# systemctl status mysql
● mysqld.service - MySQL Community Server
   Loaded: loaded (/usr/lib/systemd/system/mysqld.service; enabled; vendor preset: disabled)
   Active: active (running) since Thu 2021-01-14 16:55:01 CST; 36s ago

#启动并设置开机启动
[root@hadoop1 bin]# systemctl start mysqld
[root@hadoop1 bin]# systemctl enable mysqld
[root@hadoop1 bin]# systemctl daemon-reload
[root@hadoop1 bin]# systemctl status mysqld
● mysqld.service - MySQL Community Server
   Loaded: loaded (/usr/lib/systemd/system/mysqld.service; enabled; vendor preset: disabled)
   Active: active (running) since Mon 2021-01-25 23:03:15 CST; 29s ago
 Main PID: 55924 (mysqld_safe)
   CGroup: /system.slice/mysqld.service
           ├─55924 /bin/sh /usr/bin/mysqld_safe --basedir=/usr
           └─56090 /usr/sbin/mysqld --basedir=/usr --datadir=/var/lib/mysql --plugin-dir=/usr/lib64/mysql/plugin --log-error=/var/log/mysqld.log --pid-file=/var...

Jan 25 23:03:14 hadoop1 mysql-systemd-start[55857]: Alternatively you can run:
Jan 25 23:03:14 hadoop1 mysql-systemd-start[55857]: /usr/bin/mysql_secure_installation
Jan 25 23:03:14 hadoop1 mysql-systemd-start[55857]: which will also give you the option of removing the test
Jan 25 23:03:14 hadoop1 mysql-systemd-start[55857]: databases and anonymous user created by default.  This is
Jan 25 23:03:14 hadoop1 mysql-systemd-start[55857]: strongly recommended for production servers.
Jan 25 23:03:14 hadoop1 mysql-systemd-start[55857]: See the manual for more instructions.
Jan 25 23:03:14 hadoop1 mysql-systemd-start[55857]: Please report any problems at http://bugs.mysql.com/
Jan 25 23:03:14 hadoop1 mysqld_safe[55924]: 210125 23:03:14 mysqld_safe Logging to '/var/log/mysqld.log'.
Jan 25 23:03:14 hadoop1 mysqld_safe[55924]: 210125 23:03:14 mysqld_safe Starting mysqld daemon with databases from /var/lib/mysql
Jan 25 23:03:15 hadoop1 systemd[1]: Started MySQL Community Server.
[root@hadoop1 bin]#

#以下命令可以用与防火墙过滤端口,这里不使用,直接关闭防火墙
[root@localhost ~]# firewall-cmd --zone=public --add-port=3306/tcp --permanent
[root@localhost ~]# firewall-cmd --zone=public --add-port=8080/tcp --permanent
[root@localhost ~]# firewall-cmd --reload

防火墙常用命令(引用自其它博客):

一、防火墙的开启、关闭、禁用命令

(1)设置开机启用防火墙:systemctl enable firewalld.service

(2)设置开机禁用防火墙:systemctl disable firewalld.service

(3)启动防火墙:systemctl start firewalld

(4)关闭防火墙:systemctl stop firewalld

(5)检查防火墙状态:systemctl status firewalld

二、使用firewall-cmd配置端口

(1)查看防火墙状态:firewall-cmd --state

(2)重新加载配置:firewall-cmd --reload

(3)查看开放的端口:firewall-cmd --list-ports

(4)开启防火墙端口:firewall-cmd --zone=public --add-port=9200/tcp --permanent

命令含义:

–zone #作用域

–add-port=9200/tcp #添加端口,格式为:端口/通讯协议

–permanent #永久生效,没有此参数重启后失效

注意:添加端口后,必须用命令firewall-cmd --reload重新加载一遍才会生效

(5)关闭防火墙端口:firewall-cmd --zone=public --remove-port=9200/tcp --permanent

7 安装hadoop

1 下载hadoop3.0.0安装包,解压重命名到/usr/local/hadoop目录

2 设置环境变量/etc/profile

export JAVA_HOME=/usr/java/jdk1.8.0_271-amd64
export CLASSPATH=.:$JAVA_HOME/lib/dt.jar:$JAVA_HOME/lib/tools.jar

export HADOOP_HOME=/usr/local/hadoop
export HBASE_HOME=/usr/local/hbase
export HIVE_HOME=/usr/local/hive
export PATH=$PATH:$JAVA_HOME/bin:$JAVA_HOME/sbin:$HADOOP_HOME/bin:$HADOOP_HOME/sbin:$HBASE_HOME/bin:$HBASE_HOME/sbin:$HIVE_HOME/bin:$HIVE_HOME/sbin

export HDFS_NAMENODE_USER=root
export HDFS_DATANODE_USER=root
export HDFS_SECONDARYNAMENODE_USER=root
export YARN_RESOURCEMANAGER_USER=root
export YARN_NODEMANAGER_USER=root
export HDFS_DATANODE_USER=root
[root@hadoop1 conf]# source /etc/profile

3 修改配置

​ 修改hadoop.env.sh


[root@hadoop1 hadoop]#
[root@hadoop1 hadoop]# cd /usr/local/hadoop/etc/hadoop/
[root@hadoop1 hadoop]# cat hadoop-env.sh
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements.  See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership.  The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License.  You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Set Hadoop-specific environment variables here.
##
## THIS FILE ACTS AS THE MASTER FILE FOR ALL HADOOP PROJECTS.
## SETTINGS HERE WILL BE READ BY ALL HADOOP COMMANDS.  THEREFORE,
## ONE CAN USE THIS FILE TO SET YARN, HDFS, AND MAPREDUCE
## CONFIGURATION OPTIONS INSTEAD OF xxx-env.sh.
##
## Precedence rules:
##
## {yarn-env.sh|hdfs-env.sh} > hadoop-env.sh > hard-coded defaults
##
## {YARN_xyz|HDFS_xyz} > HADOOP_xyz > hard-coded defaults
#
# Many of the options here are built from the perspective that users
# may want to provide OVERWRITING values on the command line.
# For example:
JAVA_HOME=/usr/java/jdk1.8.0_271-amd64

修改core-site.xml

[root@hadoop1 hadoop]# cat core-site.xml
<?xml version="1.0" encoding="UTF-8"?>
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
<!--
  Licensed under the Apache License, Version 2.0 (the "License");
  you may not use this file except in compliance with the License.
  You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

  Unless required by applicable law or agreed to in writing, software
  distributed under the License is distributed on an "AS IS" BASIS,
  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  See the License for the specific language governing permissions and
  limitations under the License. See accompanying LICENSE file.
-->

<!-- Put site-specific property overrides in this file. -->

<configuration>

<property>
<name>fs.defaultFS</name>
<value>hdfs://hadoop1:9000</value>
<description>hdfs uri</description>
</property>

<property>
<name>hadoop.tmp.dir</name>
<value>/usr/local/hadoop/tmp</value>
<description>temp path</description>
</property>

</configuration>
[root@hadoop1 hadoop]#

修改 yarn-site.xml



[root@hadoop1 hadoop]# cat yarn-site.xml
<?xml version="1.0"?>
<!--
  Licensed under the Apache License, Version 2.0 (the "License");
  you may not use this file except in compliance with the License.
  You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

  Unless required by applicable law or agreed to in writing, software
  distributed under the License is distributed on an "AS IS" BASIS,
  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  See the License for the specific language governing permissions and
  limitations under the License. See accompanying LICENSE file.
-->
<configuration>

<!-- Site specific YARN configuration properties -->
    <property>
        <name>yarn.resourcemanager.hostname</name>
        <value>hadoop1</value>
    </property>

    <property>
        <name>yarn.nodemanager.aux-services</name>
        <value>mapreduce_shuffle</value>
    </property>
    
        <property>
        <name>yarn.nodemanager.vmem-check-enabled</name>
        <value>false</value>
    </property>
    
        <property>
        <name>yarn.nodemanager.env-whitelist</name>
        <value>JAVA_HOME,HADOOP_COMMON_HOME,HADOOP_HDFS_HOME,HADOOP_CONF_DIR,CLASSPATH_PREPEND_DISTCACHE,HADOOP_YARN_HOME,HADOOP_MAPRED_HOME</value>
    </property>

</configuration>
[root@hadoop1 hadoop]#

​ 修改mapred-site.xml

[root@hadoop1 hadoop]# cat mapred-site.xml
<?xml version="1.0"?>
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
<!--
  Licensed under the Apache License, Version 2.0 (the "License");
  you may not use this file except in compliance with the License.
  You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

  Unless required by applicable law or agreed to in writing, software
  distributed under the License is distributed on an "AS IS" BASIS,
  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  See the License for the specific language governing permissions and
  limitations under the License. See accompanying LICENSE file.
-->

<!-- Put site-specific property overrides in this file. -->

<configuration>
        <property>
                <name>mapreduce.framework.name</name>
                <value>yarn</value>
        </property>

        <property>
                <name>mapreduce.admin.user.env</name>
                <value>HADOOP_MAPRED_HOME=/usr/local/hadoop</value>
        </property>
    
        <property>
                <name>yarn.app.mapreduce.am.env</name>
                <value>HADOOP_MAPRED_HOME=/usr/local/hadoop</value>
        </property>

</configuration>
[root@hadoop1 hadoop]#

修改hdfs-site.xml

[root@hadoop1 hadoop]# cat hdfs-site.xml
<?xml version="1.0" encoding="UTF-8"?>
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
<!--
  Licensed under the Apache License, Version 2.0 (the "License");
  you may not use this file except in compliance with the License.
  You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

  Unless required by applicable law or agreed to in writing, software
  distributed under the License is distributed on an "AS IS" BASIS,
  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  See the License for the specific language governing permissions and
  limitations under the License. See accompanying LICENSE file.
-->

<!-- Put site-specific property overrides in this file. -->

<configuration>
        <property>
                <name>dfs.namenode.http-address</name>
                <value>hadoop1:50070</value>
        </property>

        <property>
                <name>dfs.replication</name>
                <value>3</value>
        </property>
    
        <property>
                <name>dfs.permissions.enabled</name>
                <value>false</value>
        </property>
    
        <property>
                <name>dfs.block.size</name>
                <value>134217728</value>
        </property>

</configuration>
[root@hadoop1 hadoop]#

修改workers文件

[root@hadoop1 hadoop]# cat workers
hadoop1
hadoop2
hadoop3

拷贝该配置到hadoop2,hadoop3主机

[root@hadoop1 hadoop]#
[root@hadoop1 hadoop]# scp -r /usr/local/hadoop hadoop2:/usr/local/
[root@hadoop1 hadoop]# scp /etc/profile hadoop2:/etc/profile

格式化hadoop,并启动

[root@hadoop1 hadoop]# hadoop namenode -format
WARNING: Use of this script to execute namenode is deprecated.
WARNING: Attempting to execute replacement "hdfs namenode" instead.

WARNING: /usr/local/hadoop/logs does not exist. Creating.
2021-01-25 21:55:18,745 INFO namenode.NameNode: STARTUP_MSG:
/************************************************************
STARTUP_MSG: Starting NameNode
STARTUP_MSG:   host = hadoop1/192.168.11.105

2021-01-25 22:00:38,756 INFO namenode.NNStorageRetentionManager: Going to retain 1 images with txid >= 0
2021-01-25 22:00:38,779 INFO namenode.NameNode: SHUTDOWN_MSG:
/************************************************************
SHUTDOWN_MSG: Shutting down NameNode at hadoop1/192.168.11.105
************************************************************/



[root@hadoop1 current]# start-all.sh
Starting namenodes on [hadoop1]
Last login: Mon Jan 25 22:13:35 CST 2021 on pts/1
Starting datanodes
Last login: Mon Jan 25 22:14:10 CST 2021 on pts/1
Starting secondary namenodes [hadoop1]
Last login: Mon Jan 25 22:14:13 CST 2021 on pts/1
Starting resourcemanager
Last login: Mon Jan 25 22:14:23 CST 2021 on pts/1
Starting nodemanagers
Last login: Mon Jan 25 22:14:34 CST 2021 on pts/1


[root@hadoop1 hadoop]# jps
51792 NameNode
51936 DataNode
52945 Jps
52423 ResourceManager
52568 NodeManager
11211 QuorumPeerMain
52171 SecondaryNameNode
[root@hadoop1 hadoop]#


[root@hadoop2 hadoop]# jps
11040 QuorumPeerMain
40403 NodeManager
40285 DataNode
40557 Jps
[root@hadoop2 hadoop]#


登录 http://hadoop1:50070/dfshealth.html#tab-datanode,查看数据节点:
在这里插入图片描述

[外链图片转存失败,源站可能有防盗链机制,建议将图片保存下来直接上传(img-S4QQet5d-1611589803765)(https://i.loli.net/2021/01/25/V1TnigqEdOlfywG.png)]

如果查询不到datanode,则表示格式化出现异常,通常是因为datanode和metanode版本不匹配,查看日志
在这里插入图片描述
在这里插入图片描述

[外链图片转存失败,源站可能有防盗链机制,建议将图片保存下来直接上传(img-sJsFB1qv-1611589803767)(https://i.loli.net/2021/01/25/gYGCzAaUQ9k2PJW.png)]

[外链图片转存失败,源站可能有防盗链机制,建议将图片保存下来直接上传(img-GGYx213J-1611589803768)(https://i.loli.net/2021/01/25/zeGuKTFpL4R6J2D.png)]

遇到这种情况,直接删除该文件和日志记录,重新启动即可
在这里插入图片描述

[外链图片转存失败,源站可能有防盗链机制,建议将图片保存下来直接上传(img-kGNfTUub-1611589803770)(https://i.loli.net/2021/01/25/69cNWqoe5uFXiQh.png)]

8 安装HBase

hbase使用hbase-1.4.0-bin.tar.gz

修改JAVA_HOME、HBASE_MANAGES_ZK环境变量

# The reason for changing default to RFA is to avoid the boundary case of filling out disk space as

# DRFA doesn't put any cap on the log size. Please refer to HBase-5655 for more context.

[root@hadoop1 conf]# pwd
/usr/local/hbase/conf
[root@hadoop1 conf]# cat hbase-env.sh

[root@hadoop1 conf]# cat hbase-env.sh | grep JAVA
export JAVA_HOME=/usr/local/jdk1.8
[root@hadoop1 conf]# cat hbase-env.sh | grep ZOO
# export HBASE_ZOOKEEPER_OPTS="$HBASE_ZOOKEEPER_OPTS $HBASE_JMX_BASE -Dcom.sun.management.jmxremote.port=10104"
# export HBASE_ZOOKEEPER_OPTS="$HBASE_ZOOKEEPER_OPTS -Xdebug -Xrunjdwp:transport=dt_socket,server=y,suspend=n,address=8073"
[root@hadoop1 conf]# cat hbase-env.sh | grep ZK
export HBASE_MANAGES_ZK=true
[root@hadoop1 conf]#

修改hbase-site.xml

[root@hadoop1 conf]# cat hbase-site.xml

<?xml version="1.0"?> <?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
<configuration>
    <property>
            <name>hbase.rootdir</name>
            <value>hdfs://hadoop1:9000/hbase</value>
    </property>
<property>
            <name>hbase.cluster.distributed</name>
            <value>true</value>
    </property>

    <property>
            <name>hbase.zookeeper.quorum</name>
            <value>hadoop1,hadoop2,hadoop3</value>
    </property>
    
    <property>
            <name>hbase.zookeeper.property.dataDir</name>
            <value>/usr/local/hbase/zookeeper</value>
    </property>
    
    <property>
            <name>hbase.master.info.port</name>
            <value>16010</value>
    </property>

</configuration>
[root@hadoop1 conf]#

# 修改regionservers

[root@hadoop1 conf]# cat regionservers
hadoop1
hadoop2
hadoop3
[root@hadoop1 conf]#


# 分发到其他两台主机

[root@hadoop1 conf]# scp -r /usr/local/hbase hadoop2:/usr/local/
[root@hadoop1 conf]# scp -r /usr/local/hbase hadoop3:/usr/local/


# 验证

 [root@hadoop1 bin]# start-hbase.sh
 hadoop3: running zookeeper, logging to /usr/local/hbase/bin/../logs/hbase-root-zookeeper-hadoop3.out
 hadoop2: running zookeeper, logging to /usr/local/hbase/bin/../logs/hbase-root-zookeeper-hadoop2.out
 hadoop1: running zookeeper, logging to /usr/local/hbase/bin/../logs/hbase-root-zookeeper-hadoop1.out
 running master, logging to /usr/local/hbase/logs/hbase-root-master-hadoop1.out
 Java HotSpot(TM) 64-Bit Server VM warning: ignoring option PermSize=128m; support was removed in 8.0
 Java HotSpot(TM) 64-Bit Server VM warning: ignoring option MaxPermSize=128m; support was removed in 8.0
 hadoop2: running regionserver, logging to /usr/local/hbase/bin/../logs/hbase-root-regionserver-hadoop2.out
 hadoop3: running regionserver, logging to /usr/local/hbase/bin/../logs/hbase-root-regionserver-hadoop3.out
 hadoop1: running regionserver, logging to /usr/local/hbase/bin/../logs/hbase-root-regionserver-hadoop1.out
 hadoop3: Java HotSpot(TM) 64-Bit Server VM warning: ignoring option PermSize=128m; support was removed in 8.0
 hadoop3: Java HotSpot(TM) 64-Bit Server VM warning: ignoring option MaxPermSize=128m; support was removed in 8.0
 hadoop2: Java HotSpot(TM) 64-Bit Server VM warning: ignoring option PermSize=128m; support was removed in 8.0
 hadoop2: Java HotSpot(TM) 64-Bit Server VM warning: ignoring option MaxPermSize=128m; support was removed in 8.0
 hadoop1: Java HotSpot(TM) 64-Bit Server VM warning: ignoring option PermSize=128m; support was removed in 8.0
 hadoop1: Java HotSpot(TM) 64-Bit Server VM warning: ignoring option MaxPermSize=128m; support was removed in 8.0
 [root@hadoop1 bin]# hbase shell
 SLF4J: Class path contains multiple SLF4J bindings.
 SLF4J: Found binding in [jar:file:/usr/local/hbase/lib/slf4j-log4j12-1.7.10.jar!/org/slf4j/impl/StaticLoggerBinder.class]
 SLF4J: Found binding in [jar:file:/usr/local/hadoop/share/hadoop/common/lib/slf4j-log4j12-1.7.25.jar!/org/slf4j/impl/StaticLoggerBinder.class]
 SLF4J: See http://www.slf4j.org/codes.html#multiple_bindings for an explanation.
 SLF4J: Actual binding is of type [org.slf4j.impl.Log4jLoggerFactory]
 HBase Shell
 Use "help" to get list of supported commands.
 Use "exit" to quit this interactive shell.
 Version 1.4.0, r10b9b9fae6b557157644fb9a0dc641bb8cb26e39, Fri Dec  8 16:09:13 PST 2017
 
 hbase(main):001:0> list
 TABLE
 0 row(s) in 0.7750 seconds
 
 => []
 hbase(main):002:0>
 hbase(main):003:0*
 hbase(main):004:0* exit
 
 
 [root@hadoop1 bin]# hadoop fs -ls /
 Found 1 items
 drwxr-xr-x   - root supergroup          0 2021-01-25 22:52 /hbase
 [root@hadoop1 bin]#
 
 master节点
 [root@hadoop1 bin]# jps
 51792 NameNode
 51936 DataNode
 54881 HMaster
 52423 ResourceManager
 55703 Jps
 52568 NodeManager
 55049 HRegionServer
 52171 SecondaryNameNode
 54814 HQuorumPeer
 [root@hadoop1 bin]#
 
 slave节点
 [root@hadoop2 bin]# jps
 40403 NodeManager
 41347 HRegionServer
 41270 HQuorumPeer
 40285 DataNode
 41597 Jps

在这里插入图片描述

9 安装Hive

配置数据库, root用户赋权

shell> mysql -u root
mysql> SET PASSWORD FOR 'root'@'localhost' = PASSWORD('XXXXXXXX');
mysql> SET PASSWORD FOR 'root'@'127.0.0.1' = PASSWORD('XXXXXXXX');
mysql> SET PASSWORD FOR 'root'@'::1' = PASSWORD('XXXXXXXX');
mysql> SET PASSWORD FOR 'root'@'hadoop1' = PASSWORD('XXXXXXXX');

修改mysql配置/etc/my.cnf

[mysqld]
character_set_server=utf8
init_connect='SET NAMES utf8'

创建hiveuser用户,并赋权

mysql> CREATE USER 'hiveuser'@'%' IDENTIFIED BY '123456';
Query OK, 0 rows affected (0.00 sec)

mysql> grant all privileges on *.* to 'hiveuser'@'%' IDENTIFIED BY '123456';
Query OK, 0 rows affected (0.00 sec)

mysql> grant all privileges on *.* to 'hiveuser'@'hadoop1' IDENTIFIED BY '123456';
Query OK, 0 rows affected (0.00 sec)

mysql> grant all privileges on *.* to 'hiveuser'@'localhost' IDENTIFIED BY '123456';
Query OK, 0 rows affected (0.00 sec)

mysql> grant all privileges on *.* to 'hiveuser'@'127.0.0.1' IDENTIFIED BY '123456';
Query OK, 0 rows affected (0.00 sec)

mysql> FLUSH PRIVILEGES;

配置Hive
下载hive安装包apache-hive-2.3.2-bin.tar.gz

#拷贝配置文件
[root@hadoop1 conf]#[root@hadoop1 hive]# cd /usr/local/hive/conf/
[root@hadoop1 conf]# cp hive-env.sh.template hive-env.sh
[root@hadoop1 conf]# cp hive-default.xml.template hive-site.xml

#修改环境变量,增加最后3行
[root@hadoop1 hive]# tail -10  bin/hive-config.sh
HIVE_CONF_DIR="${HIVE_CONF_DIR:-$HIVE_HOME/conf}"

export HIVE_CONF_DIR=$HIVE_CONF_DIR
export HIVE_AUX_JARS_PATH=$HIVE_AUX_JARS_PATH

# Default to use 256MB
export HADOOP_HEAPSIZE=${HADOOP_HEAPSIZE:-256}
export JAVA_HOME=/usr/local/jdk1.8
export HADOOP_HOME=/usr/local/hadoop
export HIVE_HOME=/usr/local/hive
[root@hadoop1 hive]#

#设置mysql驱动
[root@hadoop1 conf]# cp /home/hero/mysql-connector-java-5.1.46.jar  /usr/local/hive/lib/
cp: overwrite ‘/usr/local/hive/lib/mysql-connector-java-5.1.46.jar’? y
[root@hadoop1 conf]#

#修改数据库配置javax.jdo.option.ConnectionURL javax.jdo.option.ConnectionDriverName  javax.jdo.option.ConnectionUserName 
#javax.jdo.option.ConnectionPassword

[root@hadoop1 conf]# cat hive-site.xml | grep option.Connection -A 5
    <name>javax.jdo.option.ConnectionPassword</name>
    <value>123456</value>
    <description>password to use against metastore database</description>
  </property>
  <property>
    <name>hive.metastore.ds.connection.url.hook</name>
--
    <description>Name of the hook to use for retrieving the JDO connection URL. If empty, the value in javax.jdo.option.ConnectionURL is used</description>
  </property>
  <property>
    <name>javax.jdo.option.Multithreaded</name>
    <value>true</value>
    <description>Set this to true if multiple threads access metastore through JDO concurrently.</description>
--
    <name>javax.jdo.option.ConnectionURL</name>
    <value>jdbc:mysql://hadoop1:3306/hive?createDatabaseIfNotExist=true</value>
    <description>
      JDBC connect string for a JDBC metastore.
      To use SSL to encrypt/authenticate the connection, provide database-specific SSL flag in the connection URL.
      For example, jdbc:postgresql://myhost/db?ssl=true for postgres database.
--
    <name>javax.jdo.option.ConnectionDriverName</name>
    <value>com.mysql.jdbc.Driver</value>
    <description>Driver class name for a JDBC metastore</description>
  </property>
  <property>
    <name>javax.jdo.PersistenceManagerFactoryClass</name>
--
    <name>javax.jdo.option.ConnectionUserName</name>
    <value>hiveuser</value>
    <description>Username to use against metastore database</description>
  </property>
  <property>
    <name>hive.metastore.end.function.listeners</name>
--
    <value>javax.jdo.option.ConnectionPassword,hive.server2.keystore.password,fs.s3.awsAccessKeyId,fs.s3.awsSecretAccessKey,fs.s3n.awsAccessKeyId,fs.s3n.awsSecretAccessKey,fs.s3a.access.key,fs.s3a.secret.key,fs.s3a.proxy.password</value>
    <description>Comma separated list of configuration options which should not be read by normal user like passwords</description>
  </property>
  <property>
    <name>hive.conf.internal.variable.list</name>
    <value>hive.added.files.path,hive.added.jars.path,hive.added.archives.path</value>
[root@hadoop1 conf]#

#创建临时目录
mkdir /usr/local/hive/tmp

#修改cat hive-site.xml ${system:Java.io.tmpdir}为/usr/local/hive/tmp,以下4处
[root@hadoop1 conf]# cat hive-site.xml | grep /usr/local/hive/tmp -B 2
  <property>
    <name>hive.exec.local.scratchdir</name>
    <value>/usr/local/hive/tmp/root</value>
--
  <property>
    <name>hive.downloaded.resources.dir</name>
    <value>/usr/local/hive/tmp/${hive.session.id}_resources</value>
--
  <property>
    <name>hive.querylog.location</name>
    <value>$/usr/local/hive/tmp/root</value>
--
  <property>
    <name>hive.server2.logging.operation.log.location</name>
    <value>/usr/local/hive/tmp/root/operation_logs</value>
[root@hadoop1 conf]#

#修改${system:user.name}为root,以下3处
 <property>
    <name>hive.exec.local.scratchdir</name>
    <value>/usr/local/hive/tmp/root</value>
--
    <value>/hivedelegation</value>
    <description>
      The root path for token store data. Note that this is used by both HiveServer2 and
--
  <property>
    <name>hive.querylog.location</name>
    <value>$/usr/local/hive/tmp/root</value>
--
  <property>
    <name>hive.server2.logging.operation.log.location</name>
    <value>/usr/local/hive/tmp/root/operation_logs</value>

#分发到其他机器
[root@hadoop1 conf]# scp -r  /usr/local/hive hadoop2:/usr/local/
[root@hadoop1 conf]# scp -r  /usr/local/hive hadoop3:/usr/local/

#启动并测试
[root@hadoop1 conf]# schematool -dbType mysql -initSchema
SLF4J: Class path contains multiple SLF4J bindings.
SLF4J: Found binding in [jar:file:/usr/local/hive/lib/log4j-slf4j-impl-2.6.2.jar!/org/slf4j/impl/StaticLoggerBinder.class]
SLF4J: Found binding in [jar:file:/usr/local/hadoop/share/hadoop/common/lib/slf4j-log4j12-1.7.25.jar!/org/slf4j/impl/StaticLoggerBinder.class]
SLF4J: See http://www.slf4j.org/codes.html#multiple_bindings for an explanation.
SLF4J: Actual binding is of type [org.apache.logging.slf4j.Log4jLoggerFactory]
Metastore connection URL:        jdbc:mysql://hadoop1:3306/hive?createDatabaseIfNotExist=true
Metastore Connection Driver :    com.mysql.jdbc.Driver
Metastore connection User:       hiveuser
Starting metastore schema initialization to 2.3.0
Initialization script hive-schema-2.3.0.mysql.sql
Initialization script completed
schemaTool completed
[root@hadoop1 conf]# hive
SLF4J: Class path contains multiple SLF4J bindings.
SLF4J: Found binding in [jar:file:/usr/local/hive/lib/log4j-slf4j-impl-2.6.2.jar!/org/slf4j/impl/StaticLoggerBinder.class]
SLF4J: Found binding in [jar:file:/usr/local/hadoop/share/hadoop/common/lib/slf4j-log4j12-1.7.25.jar!/org/slf4j/impl/StaticLoggerBinder.class]
SLF4J: See http://www.slf4j.org/codes.html#multiple_bindings for an explanation.
SLF4J: Actual binding is of type [org.apache.logging.slf4j.Log4jLoggerFactory]

Logging initialized using configuration in jar:file:/usr/local/hive/lib/hive-common-2.3.2.jar!/hive-log4j2.properties Async: true
Hive-on-MR is deprecated in Hive 2 and may not be available in the future versions. Consider using a different execution engine (i.e. spark, tez) or using Hive 1.X releases.
hive> show tables;
OK
Time taken: 10.113 seconds
hive>

Initialization script hive-schema-2.3.0.mysql.sql
Initialization script completed
schemaTool completed
[root@hadoop1 conf]# hive
SLF4J: Class path contains multiple SLF4J bindings.
SLF4J: Found binding in [jar:file:/usr/local/hive/lib/log4j-slf4j-impl-2.6.2.jar!/org/slf4j/impl/StaticLoggerBinder.class]
SLF4J: Found binding in [jar:file:/usr/local/hadoop/share/hadoop/common/lib/slf4j-log4j12-1.7.25.jar!/org/slf4j/impl/StaticLoggerBinder.class]
SLF4J: See http://www.slf4j.org/codes.html#multiple_bindings for an explanation.
SLF4J: Actual binding is of type [org.apache.logging.slf4j.Log4jLoggerFactory]

Logging initialized using configuration in jar:file:/usr/local/hive/lib/hive-common-2.3.2.jar!/hive-log4j2.properties Async: true
Hive-on-MR is deprecated in Hive 2 and may not be available in the future versions. Consider using a different execution engine (i.e. spark, tez) or using Hive 1.X releases.
hive> show tables;
OK
Time taken: 10.113 seconds
hive>

#### 10 安装Spark(后续)

猜你喜欢

转载自blog.csdn.net/zhaoyaxiong_ctu/article/details/113151938
今日推荐