搭建linux 安装jdk mysq5.7 nacos zookeeper 安装大数据生态圈kafka flume hadoop hive hbase zeppelin sp es 啥都有plus

第一步安装虚拟机

 

 

 

 

 

 

 

 

 

 

 

 

 输入

ip a

查看自己的ip地址

  第二步FinallShell(xhell等)配置端口连接

#进入 cd /opt

 第二步安装软件

 ##运行以下shell.sh脚本安装基本的环境 ​

##运行以下shell.sh脚本安装基本的环境 

#修改机器名
hostnamectl set-hostname $1

#修改静态网络
addr=$2 #192.168.64.130
sed -i 's/dhcp/static/' /etc/sysconfig/network-scripts/ifcfg-ens33
echo "IPADDR=$addr" >> /etc/sysconfig/network-scripts/ifcfg-ens33
echo "NETMASK=255.255.255.0" /etc/sysconfig/network-scripts/ifcfg-ens33 #子网掩码
gw=`awk 'BEGIN{split("'"$addr"'",ips,".");print ips[1] "." ips[2] "." ips[3] "." 2 }'`
echo "GATEWAY=$gw" >> /etc/sysconfig/network-scripts/ifcfg-ens33 #网关
echo "DNS1=114.114.114.114" >> /etc/sysconfig/network-scripts/ifcfg-ens33
echo "DNS2=8.8.8.8" >> /etc/sysconfig/network-scripts/ifcfg-ens33
systemctl restart network #重启网络

#绑定地址和名字
echo "$addr $1" >> /etc/hosts

#关闭防火墙
systemctl stop firewalld
systemctl disable firewalld

#安装vim和 wget
yum install -y vim wget

#更换yum源
mv /etc/yum.repos.d/CentOS-Base.repo /etc/yum.repos.d/CentOS-Base.repo.bak3
#从阿里云下载
wget -O /etc/yum.repos.d/CentOS-Base.repo http://mirrors.aliyun.com/repo/Centos-7.repo 
yum clean all 
yum makecache

#创建软件安装文件夹
mkdir -p /opt/soft

#配置JDK
mkdir -p /opt/soft/jdk180
jdkPath=`find /opt/ -name 'jdk*.tar.gz*'`
tar -zxf $jdkPath -C /opt/soft/jdk180 --strip-components 1 #解压到jdk180 去一层文件夹

if [ ! $JAVA_HOME ]
then 
    echo 'export JAVA_HOME=/opt/soft/jdk180' >> /etc/profile
    echo 'export CLASSPATH=.:%JAVA_HOME/lib/dt.jar:%JAVA_HOME/lib/tools.jar' >> /etc/profile
    echo 'export PATH=$PATH:$JAVA_HOME/bin' >> /etc/profile
    source /etc/profile
fi  
 

#1 启动激活 安装jdk

#myseata是自定义主机名 
#192.168.64.135 是自己的ip地址
source shell02.sh myseata 192.168.64.135


#2 安装mysql5.7

卸载原有的**mariadb**

#查询文件名
rpm -qa | grep mariadb 
#xxx 上步查询到的文件名 然后卸载
rpm -e --nodeps xxx 


下载安装mysql 5.7

wget -i -c http://dev.mysql.com/get/mysql57-community-release-el7-10.noarch.rpm

yum -y install mysql57-community-release-el7-10.noarch.rpm

rpm --import https://repo.mysql.com/RPM-GPG-KEY-mysql-2022

yum install mysql-server -y


文件授权

chown -R root:root /var/lib/mysql

chown root /var/lib/mysql/


mysql 中文乱码处理

# 编辑 /etc/my.cnf
vim /etc/my.cnf

[mysqld] 
character-set-server=utf8

[client] 
default-character-set=utf8 
[mysql] 
default-character-set=utf8

#保存退出
:wq
#重启
service mysqld restart


修改**mysql** 登录密码 开放远程登录权限

mysql5.7 登录

#查看临时密码 (在opt目录下)
grep "password" /var/log/mysqld.log 

#复制这个密码 等下用这个临时密码登录 



#登录数据库 nU5TydlD__a是你的自己的临时密码
mysql -uroot -p

nU5TydlD__a

use mysql

#3090_Cmok 你的密码
ALTER USER 'root'@'localhost' IDENTIFIED BY '3090_Cmok';

# 修改远程登录
GRANT ALL PRIVILEGES ON *.* TO root@"%" IDENTIFIED BY "3090_Cmok";

flush privileges;

exit;

#(如果登不上 显示密码错误走下面方法)
----------------------------------------------------------------------------------- 

基本思路都是按照如下步骤解决:

通过 vi /etc/my.cnf进入my.cnf配置文件;

在[mysqld]底下,加入一行 skip-grant-tables,以跳过安全验证;

systemctl restart mysqld重启MySQL;

mysql -uroot -p登陆MySQL,输入任意密码进入;

输入use mysql;进入名为mysql的数据库;

输入UPDATE user SET password=PASSWORD("你自己的密码") WHERE user='root';进行密码修改;

输入quit;退出MySQL;

重复步骤1;

删除刚才加入到[mysqld]下面的 skip-grant-tables,退出vim,重启MySQL;

mysql -uroot -p输入刚才设置的新密码,登陆MySQL。

然而这个办法尝试了多次,都是在步骤6进行完以后报错

ERROR 1054 (42S22): Unknown column 'password' in 'field list'

都快疯了!!!!

偶然的机会,找到了CSDN上一篇点赞量很少的文章,提到

原来是MySQL5.7的mysql数据库下已经没有password这个字段了,password字段改成

authentication_string

有句小可爱特别想喊出来!!!!

按照这篇文章的说法,将步骤6中的命令改成了:

update mysql.user set authentication_string=password('HJZ@bb1314') where user='root';

重新操作一遍。一下子就成功了!

PS!!!!!!!!!!!!!!!!!!!!!!!!

后面用新修改的密码登陆MySQL的时候,可以登陆,但进行一些操作的时候,有如下warning:

ERROR 1820 (HY000): You must reset your password using ALTER USER statement before executing this statement.

意思是说,你刚才修改的新密码其实是一个初始密码,这里还要求你要换个密码才能继续登陆。

这就跟手动部署LNMP环境(Alibaba Cloud Linux 2)“步骤六:配置MySQL”接上了。



mysql -uroot -p

你的设置的密码

use mysql

#3090_Cmok 你的密码
ALTER USER 'root'@'localhost' IDENTIFIED BY '密码';

# 修改远程登录
GRANT ALL PRIVILEGES ON *.* TO root@"%" IDENTIFIED BY "密码";

flush privileges;

exit;

#3 搭建单机 nacos

jps
cd /opt/
ls
tar -zxf nacos-server-1.4.2.tar.gz 
mv nacos soft/nacos8848
cd soft/nacos8848/conf/
vim application.properties
#放开 spring db db db db 修改ip地址192.168.64.135这ip是我的用自己的
#账号 root 密码 3090_Cmok(这里是自己mysql的密码)


cd /opt/soft/nacos8848/bin/
#关闭集群模式
vim startup.sh
#=============================#
#修改下面文件
export MODE="standalone"
#=============================#

#在/etc/profile文件中修改环境变量
vim /etc/profile
#添加到末尾
=============================
#nacos env
export NACOS_HOME=/opt/soft/nacos8848
export PATH=$PATH:$NACOS_HOME/bin
=============================
#:wq!
source /etc/profile

-----------------------------------------
#重新进入mysql5.7激活sql
mysql -uroot -p

3090_Cmok

show databases;

#创建数据库1
create database mydemo;

use mydemo;

#新建表单
create table stocks(id int primary key not null auto_increment,shopid int not null,storenum;

#创建数据库2
create database nacos;

use nacos;

#激活
source /opt/soft/nacos8848/conf/nacos-mysql.sql

exit
-----------------------------------------
#启动
sh startup.sh
#打开浏览器
192.168.64.135:8848/nacos/#/login

如果不能成功打开浏览器!

查看logs日志

发现nacos启动 报错 这个 Constructor threw exception; nested exception is ErrCode:500, ErrMsg:jmenv.tbsite.net 异常

需要conf目录下的cluster.conf.example,需要手动改为cluster.conf

 

 进入成功!!

安装zookeper

 cd /opt/
 ls
 tar -zxf zookeeper-3.4.5-cdh5.14.2.tar.gz
 mv zookeeper-3.4.5-cdh5.14.2 /opt/soft/zk345
 cd /opt/soft/zk345/conf/
 ls
cp zoo_sample.cfg zoo.cfg
 vim zoo.cfg
 ======================
 #修改这个文件
 dataDir=/opt/soft/zk345/data
 #插入以下内容 ip地址用自己的
 server.1=192.168.64.128:2888:3888
  ======================
  
  
 vim /etc/profile
配置环境变量

#zookeeper env
export ZOOKEEPER_HOME=/opt/soft/zk345
export PATH=$PATH:$ZOOKEEPER_HOME/bin
:wq
#激活配置
source /etc/profile
#启动
zkServer.sh start

安装kafka 

##########安装kafka##########
cd /opt/
ls
tar -zxf kafka_2.11-2.0.0.tgz
ls
mv kafka_2.11-2.0.0 soft/kafka200
cd soft/kafka200/config/
ls
vim server.properties
#修改下面的内容
#!!!ip地址使用自己的地址
=========放开> listeners=PLAINTEXT://192.168.64.138:9092
=========> log.dirs=/opt/soft/kafka200/kafka-logs
=========> zookeeper.connect=192.168.64.138:2181
#保存退出
:wq

配置环境变量

vim /etc/profile

  ======================

#kafka env
export KAFKA_HOME=/opt/soft/kafka200
export PATH=$PATH:$KAFKA_HOME/bin

 =======================

source /etc/profile

#运行启动

cd /opt/soft/kafka200/bin
./kafka-server-start.sh /opt/soft/kafka200/config/server.properties
 

 6 安装flume 

 #1 解压文件夹 移动到指定位置
cd /opt
tar -zxf flume-ng-1.6.0-cdh5.14.2.tar.gz
mv apache-flume-1.6.0-cdh5.14.2-bin/ /opt/soft/flume160

cd /opt/soft/flume160/conf
 配置flume配置文件
cp flume-env.sh.template flume.env.sh
vim flume.env.sh
<=======================================>
export JAVA_HOME=/opt/soft/jdk180
<=======================================>

#4 配置环境变量
vim /etc/profile
=======================================

#flume env
export FLUME_HOME=/opt/soft/flume160
export PATH=$PATH:$FLUME_HOME/bin
=======================================
#激活配置
source /etc/profile

#启动flume 把数据传入目标conf

./flume-ng agent -n a1 -c /opt/soft/flume160/conf -f /opt/flumeconf/third.conf

 7 安装hadoop 

拖入hadoop相关jar包到 /opt
cd /opt
tar -zxf hadoop-2.6.0-cdh5.14.2.tar.gz
mv hadoop-2.6.0-cdh5.14.2 soft/hadoop260
cd soft/hadoop260
cd etc/hadoop
pwd
vim hadoop-env.sh
1=============================
export JAVA_HOME=/opt/soft/jdk180
:wq
1=============================

vim core-site.xml
2============================
<configuration>
    <property>
        <name>fs.defaultFS</name>
        <value>hdfs://192.168.64.128:9000</value>
    </property>
    <property>
        <name>hadoop.tmp.dir</name>
        <value>/opt/soft/hadoop260/tmp</value>
    </property>
</configuration>
:wq
2============================

vim hdfs-site.xml
3============================
<configuration>
    <property>
        <name>dfs.replication</name>
        <value>1</value>
    </property>
</configuration>
:wq
3============================

cp mapred-site.xml.template mapred-site.xml
vim mapred-site.xml
4============================
<configuration>
    <property>
        <name>mapreduce.framework.name</name>
        <value>yarn</value>
    </property>
</configuration>
:wq
4============================

vim yarn-site.xml
5============================
<configuration>
    <property>
        <name>yarn.resourcemanager.localhost</name>
        <value>localhost</value>
    </property>
    <property>
        <name>yarn.nodemanager.aux-services</name>
        <value>mapreduce_shuffle</value>
    </property>
</configuration>
:wq
5============================

#配置hadoop环境变量 注意用自己的 hadoop260
vim /etc/profile
6============================
# Hadoop ENV
export HADOOP_HOME=/opt/soft/hadoop260
export HADOOP_MAPRED_HOME=$HADOOP_HOME
export HADOOP_COMMON_HOME=$HADOOP_HOME
export HADOOP_HDFS_HOME=$HADOOP_HOME
export YARN_HOME=$HADOOP_HOME
export HADOOP_COMMON_LIB_NATIVE_DIR=$HADOOP_HOME/lib/native
export PATH=$PATH:$HADOOP_HOME/sbin:$HADOOP_HOME/bin
export HADOOP_INSTALL=$HADOOP_HOME

:wq
6============================
#激活上面的配置
source /etc/profile
#无密码登录
ssh-keygen -t rsa -P ''
cd /root/.ssh/
ls
ssh-copy-id -i ~/.ssh/id_rsa.pub [email protected]
yes
ok
ls
ll
ssh 192.168.64.128
exit
#远程登录 hadoop210为自己的主机名 /ect/hosts 或者systemctl sethostname hadoop210#
ssh hadoop01
yes
exit
#直接登录 免密
ssh hadoop01
exit
#格式化NameNode
hdfs namenode -format
#启动hadoop

start-all.sh

yes

yes

 

 8 安装 hive 

 cd /opt

tar -zxf hive-1.1.0-cdh5.14.2.tar.gz 

mv hive-1.1.0-cdh5.14.2 /opt/soft/hive110

cd /opt/soft/hive110/conf

vim hive-site.xml  #添加下面代码
====================================
<?xml version="1.0" encoding="UTF-8" standalone="no"?>
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>

<configuration>
<property>
<name>hive.metastore.warehouse.dir</name>
<value>/hive/warehouse</value>
</property>
<property>
<name>hive.metastore.local</name>
<value>false</value>
</property>
<property>
<name>javax.jdo.option.ConnectionURL</name>
<value>jdbc:mysql://192.168.64.210:3306/hive?createDatabaseIfNotExist=true</value>
</property>
<property>
<name>javax.jdo.option.ConnectionDriverName</name>
<value>com.mysql.jdbc.Driver</value>
</property>
<property>
<name>javax.jdo.option.ConnectionUserName</name>
<value>root</value>
</property>
<property>
<name>javax.jdo.option.ConnectionPassword</name>
<value>3090_Cmok</value>
</property>

<property>
    <name>hive.server2.authentication</name>
    <value>NONE</value>
  </property>
  <property>
    <name>hive.server2.thrift.client.user</name>
    <value>root</value>
  </property>
  <property>
    <name>hive.server2.thrift.client.password</name>
    <value>root</value>
  </property>
</configuration>

<!-- mysql 数据库密码<value>3090_Cmok</value> 用自己的3090_Cmok>
<!-- password=root等于无密码登录 方便连接>
<!-- 如果是远程mysql数据库的话需要在这里写入远程的IP或hosts -->
====================================

 2 hadoop配置core-site.xml(注意一共5个 不要多也不要少!!!)

 <property>
                <name>fs.defaultFS</name>
                <value>hdfs://192.168.64.128:9000</value>
    </property>
    <property>
             <name>hadoop.tmp.dir</name>           
             <value>file:/home/hadoop/temp</value>
    </property>
    <property>
        <name>hadoop.proxyuser.root.groups</name>
        <value>*</value>
    </property>
    <property>
        <name>hadoop.proxyuser.root.hosts</name>
        <value>*</value>
    </property>
       <property>
                <name>hadoop.proxyuser.root.users</name>
                <value>*</value>
        </property>

3.拖入mysql驱动 

 4.配置环境变量

vim /etc/profile
​
#Hive
export HIVE_HOME=/opt/soft/hive110
export PATH=$PATH:$HIVE_HOME/bin
​
:wq
​
source /etc/profile

 5初始化数据库

schematool -dbType mysql -initSchema

6.启动 hive

zkServer.sh start
start-all.sh
​
hive --service metastore
​
hive --service hiveserver2
​
hive

7.hql 

show databases;
create database mydemo;
use mydemo;
create table userinfos(userid int,username string,birthday string);
insert into userinfos values(1,'zs',30);
select * from userinfos;
 

 

9 Hbase安装  

安装前提拖入jar包!!!

cd /opt/
tar -zxf hbase-1.2.0-cdh5.14.2.tar.gz
mv hbase-1.2.0-cdh5.14.2 /opt/soft/hbase120
cd /opt/soft/hbase120/conf/
ls

 vim hbase-env.sh

export JAVA_HOME=/opt/soft/jdk180
export HBASE_MANAGES_ZK=false
:wq
​

 vim hbase-site.xml

<property>
     <name>>hbase.rootdir</name>
     <value>hdfs://192.168.64.210:9000/hbase</value>
</property>
<!—单机模式不需要配置,分布式配置此项为true-->
<property>
     <name>hbase.cluster.distributed</name>
     <value>true</value>
</property>
<!—单机模式不需要配置 分布是配置此项为zookeeper指定的物理路径名-- >
<property>
     <name>hbase.zookeeper.property.dataDir</name>
     <value>/home/cm/hbase</value>
</property>
<!—1.2.4--> 
<property>   


#无中文版
<property>
     <name>>hbase.rootdir</name>
     <value>hdfs://192.168.64.128:9000/hbase</value>
</property>
<property>
     <name>hbase.cluster.distributed</name>
     <value>true</value>
</property>
<property>
     <name>hbase.zookeeper.property.dataDir</name>
     <value>/opt/soft/hbase120/data</value>
</property>
<property>
     <name>hbase.zookeeper.property.clientPort</name>
     <value>2181</value>
</property>

配置环境变量 

vim /etc/profile
==========================
#habase env
export HBASE_HOME=/opt/soft/hbase120
export PATH=$PATH:$HBASE_HOME/bin
==========================
:wq
source /etc/profile
 

先启动hdoop在启动hbase 

start-all.sh
#jps 一应该出现7个
jps
#启动hbase (HMaster HRegionServer)
start-hbase.sh
hbase shell
list

创建表空间

create_namespace 'mydemo'
create 'mydemo:userinfos','base'

create 'events','base'

create 'eventsattends','base'

#删除表
disable 'mydemo:userinfos'
drop 'mydemo:userinfos'
exit
​

10安装zeppelin   

tar -zxf zeppelin-0.8.1-bin-all.tgz -C /opt/soft/

hdfs dfs -cat /hive/warehouse/mydemo.db/userinfos/000000_0


cd /opt/soft/

ls

mv zeppelin-0.8.1-bin-all/ zeppelin081

ls

cd /opt/soft/zeppelin081/conf/

ls

cp zeppelin-site.xml.template zeppelin-site.xml

vim zeppelin-site.xml
==============================
<property>
  <name>zeppelin.helium.registry</name>
  <value>helium</value>
</property>
==============================

cp zeppelin-env.sh.template zeppelin-env.sh

vim zeppelin-env.sh
==============================
export JAVA_HOME=/opt/soft/jdk180
export HADOOP_CONF_DIR=/opt/soft/hadoop260/etc/hadoop
==============================

cp /opt/soft/hive110/conf/hive-site.xml /opt/soft/zeppelin081/conf/

cp /opt/soft/hadoop260/share/hadoop/common/hadoop-common-2.6.0-cdh5.14.2.jar /opt/soft/zeppelin081/interpreter/jdbc/

cp /opt/soft/hive110/lib/hive-jdbc-1.1.0-cdh5.14.2-standalone.jar /opt/soft/zeppelin081/interpreter/jdbc/

2.配置环境变量 

vim /etc/profile
#Zeppelin
export ZEPPELIN_HOME=/opt/soft/zeppelin081
export PATH=$PATH:$ZEPPELIN_HOME/bin
​
:wq
​
source /etc/profile

3.启动

cd /opt/soft/zeppelin081/bin/

./zeppelin-daemon.sh start

http://192.168.64.128:8080/ #浏览器输入地址 进入zeppelin
#http://192.168.64.128:50070/ #hadoop查看

 

(2)设置properties

default.driver   org.apache.hive.jdbc.HiveDriver

default.url     jdbc:hive2://192.168.64.128:10000

default.user    hive

11安装sqoop

前提

1文件配置 

cd /opt/

tar -zxf sqoop

ls

mv sqoop-1.4.6-cdh5.14.2 /opt/soft/sqoop146

cd /opt/soft/sqoop146/conf/

cp sqoop-env-template.sh sqoop-env.sh

vim sqoop-env.sh

=====================

export HADOOP_COMMON_HOME=/opt/soft/hadoop260

export HADOOP_MAPRED_HOME=/opt/soft/hadoop260

export HBASE_HOME=/opt/soft/hbase120

export HIVE_HOME=/opt/soft/hive110

export ZOOKEEPER_HOME=/opt/soft/zk345
export ZOOCFGDIR=/opt/soft/zk345

=====================

:wq

2.配置环境变量

vim /etc/profile

=====================

#Sqoop
export SQOOP_HOME=/opt/soft/sqoop146
export PATH=$PATH:$SQOOP_HOME/bin

=====================

:wq

source /etc/profile

3.拖入jar包

 

 

#1

cp /opt/soft/hadoop260/share/hadoop/common/hadoop-common-2.6.0-cdh5.14.2.jar  /opt/soft/sqoop146/lib/
#2

cp /opt/soft/hive110/lib/hive-jdbc-1.1.0-cdh5.14.2-standalone.jar /opt/soft/sqoop146/lib/

4 zeppelin將mysql數據表導入到hdfs再用hive映射

%shsoop import --connect jdbc:mysq1://192.168.64.128: 3386/party --table users --username root --password ok --delete-target-dir --target-dir /party/users --split-b user_id -m 1

12安装ElasticSearch

前提

Elasticsearch 分布式安装步骤 1

#考虑需要安装elasticsearch-head作为web展示 所以首先安装nodejs 主要是利用npm

​
wget https://npm.taobao.org/mirrors/node/v11.0.0/node-v11.0.0.tar.gz
​
tar -zxvf node-v11.0.0.tar.gz
​
mv node-v11.0.0 /opt/soft/
​
cd /opt/soft/node-v11.0.0
​
yum install gcc gcc-c++
​
./configure
​
make
​
make install
​
node -v
​

步骤 2

cd /opt/
tar -zxf elasticsearch-6.7.1.tar.gz
mv elasticsearch-6.7.1 /opt/soft/
cd /opt/soft/es671/config/
vim elasticsearch.yml
==============
#修改
cluster.name: es-app
node.name: es-1
network.host: 192.168.64.128
http.port: 9200
#插入
http.cors.enabled: true 
http.cors.allow-origin: "*"
==============
:wq
​
#创建一个用户
useradd cm
passwd cm  
ok
ok
su cm
su
vim /etc/security/limits.conf 
#文件末尾追加  问题 1 系统最大文件数太低 
​
cm soft nofile 65536
cm hard nofile 131072
cm soft nproc 4096
cm hard nproc 4096
​
vim /etc/sysctl.conf
#文件末尾追加 问题 2 虚拟内存太低
vm.max_map_count=655360
​
#激活
sysctl -p
​
#授权
chown cm:cm -R /opt/soft/es671/
​
su cm
cd ..
cd /opt/soft/es671/bin/
ls
./elasticsearch
#浏览器查看
192.168.64.128:9200

步骤 3(开启新窗口)

cd /opt/
#安装zip
yum install -y unzip zip
#解压
unzip elasticsearch-head-master.zip
​
mv elasticsearch-head-master /opt/soft/eshead
​
cd /opt/soft/eshead/
#告诉系统导包 最后有个文件找不到报错(不重要不影响开发)
npm install
​
#开启新窗口
cd /opt/soft/eshead
npm run start#浏览器访问(窗口可视化es)
http://192.168.64.128:9100

猜你喜欢

转载自blog.csdn.net/just_learing/article/details/126333328
今日推荐