Hadoop+Hive+Presto环境搭建

环境

master: vertica1 172.16.1.234

slave1: vertica2 172.16.1.235

slave2: vertica3 172.16.1.236

postgresql: 172.16.0.80

版本:
jdk: 1.8

Hadoop: 2.8.3 下载链接

Hive: 2.3.2下载链接

Presto: 0.197下载链接

sqoop: 1.4.7下载链接


准备

  1. 用户创建
  2. jdk安装
  3. ssh免密钥登录
  4. 目录创建
  5. 配置hosts

用户创建

useradd hadoop
echo  'hadoop'|passwd --stdin hadoop

jdk安装

ssh免密钥登录

# 生成密钥
ssh-keygen -t rsa

# 配置信任主机
cd .ssh
touch authorized_keys
cat id_rsa.pub >> authorized_keys
# 全部主机的公钥都要添加到authorized_keys上

# 设置文件权限
chmod 700 ../.ssh/
chmod 600 ./authorized_keys 

# 测试
ssh [email protected]

目录创建

# /data/hadoop 作为hadoop的安装路径
mkdir -p /data/hadoop
chown -R hadoop.hadoop /data/hadoop

hosts文件配置

cat /etc/hosts

127.0.0.1   localhost
::1         localhost
172.16.1.234   vertica1
172.16.1.235   vertica2
172.16.1.236   vertica3

配置操作均在hadoop用户下执行

Hadoop安装配置

core-site.xml

# core-site.xml 是hdfs的配置
<configuration>
  <property>
    <name>fs.defaultFS</name>
    <value>hdfs://vertica1:8020</value>
    <description> 设定namenode的主机名及端口 
    </description>
  </property>
       
  <property>
    <name>io.file.buffer.size</name>
    <value>131072</value>
    <description>设置缓存大小 </description>
  </property>
  
  <property>
    <name>hadoop.tmp.dir</name>
    <value>file://data/hadoop/tmp</value>
    <description>临时文件的目录 </description>
  </property>
</configuration>

hdfs-site.xml

<configuration>
  <property>
    <name>dfs.namenode.name.dir</name>
    <value>file:/data/hadoop/hdfs/name</value>
    <description>namenode存放命名空间与交换日志
    </description> 
  </property>
  
  <property>
    <name>dfs.datanode.data.dir</name>
    <value>file:/data/hadoop/hdfs/data</value>
    <description> DataNode 在本地存放块文件的目录
    </description> 
  </property>
  
  <property>
    <name>dfs.replication</name>
    <value>3</value>
    <description> HDFS 存储文件的副本个数,默认为3
    </description>
  </property>
  
  <property>
    <name>dfs.webhdfs.enabled</name>
    <value>true</value>
  </property>
</configuration>

yarn-site.xml

<configuration>
  <property>
    <name>yarn.nodemanager.aux-services</name>
    <value>mapreduce_shuffle</value>
  </property>
  
  <property>
    <name>yarn.nodemanager.aux-services.mapreduce.shuffle.class</name>
    <value>org.apache.hadoop.mapred.ShuffleHandler</value>
  </property>
  
  <property>
    <name>yarn.resourcemanager.address</name>
    <value>vertica1:8032</value>
  </property>
  
  <property>
    <name>yarn.resourcemanager.scheduler.address</name>
    <value>vertica1:8030</value>
  </property>
       
  <property>
    <name>yarn.resourcemanager.resource-tracker.address</name>
    <value>vertica1:8031</value>
  </property>
  
  <property>
    <name>yarn.resourcemanager.admin.address</name>
    <value>vertica1:8033</value>
  </property>
  
  <property>
    <name>yarn.resourcemanager.webapp.address</name>
    <value>vertica1:8088</value>
  </property>
</configuration>

mapred-site.xml

<configuration>
  <property>
    <name>mapreduce.framework.name</name>
    <value>yarn</value>
    <final>true</final>
  </property>
  
  <property>
    <name>mapreduce.jobtracker.http.address</name>
    <value>vertica1:50030</value>
  </property>
  
  <property>
    <name>mapreduce.jobhistory.address</name>
    <value>vertica1:10020</value>
  </property>
  
  <property>
    <name>mapreduce.jobhistory.webapp.address</name>
    <value>vertica1:19888</value>
  </property>
  
  <property>
    <name>mapred.job.tracker</name>
    <value>http://vertica1:9001</value>
  </property>
</configuration>

slaves

cat ./slaves 
# slave配置的为datanode节点
vertica1
vertica2
vertica3

环境变量配置

# 把下面内容加入到/etc/profile
# 然后source /etc/profile

JAVA_HOME=/usr/java/jdk1.8.0_112
JRE_HOME=$JAVA_HOME/jre
CLASSPATH=.:$JAVA_HOME/lib/dt.jar:$JAVA_HOME/lib/tools.jar:$JRE_HOME/lib

export HADOOP_INSTALL=/data/hadoop/hadoop
export HADOOP_HOME=$HADOOP_INSTALL
export HADOOP_CONF_DIR=$HADOOP_HOME/etc/hadoop
export PATH=$PATH:$HADOOP_INSTALL/bin

启动hadoop

# 初始化hdfs
$HADOOP/bin/hdfs namenode -format
# 启动hadoop各个组件
$HADOOP/sbin/start-all.sh
# 查看组件启动情况
jps

ResourceManager
NodeManager
NameNode
DataNode
SecondaryNameNode

# master上应该有以上5个进程
# slave上应该只有1个进程
# 如果有个别进程没启动成功, 可查看具体日志

Hive安装配置

<configuration>
  <property>
    <name>javax.jdo.option.ConnectionURL</name>
    <value>jdbc:postgresql://172.16.1.80:5432/hive?create=true&amp;useUnicode=true&amp;characterEncoding=utf8&amp;allowEncodingChanges=true&amp;loglevel=1;useSSL=false</value>
    <description>metastore数据库连接</description>
  </property> 
  
  <property>
    <name>javax.jdo.option.ConnectionDriverName</name>
    <value>org.postgresql.Driver</value>
    <description>Driver class name for a JDBCmetastore</description>
  </property> 
  
  <property>
    <name>javax.jdo.option.ConnectionUserName</name>
    <value>hadoop</value>
    <description>连接metastore的数据库用户</description>
  </property>
  
  <property>
    <name>javax.jdo.option.ConnectionPassword</name>
    <value>hadoop</value>
    <description>连接metastore的数据库用户的密码</description>
  </property>
  
  <property>
    <name>hive.server2.thrift.port</name>
    <value>10000</value>
  </property>
  
  <property>
    <name>hive.server2.thrift.bind.host</name>
    <value>vertica1</value>
    <description>主机名称</description>
  </property>

  <property>
    <name>hive.server2.authentication</name>
    <value>NONE</value>
    <description></description>
  </property>

  <property>
    <name>hive.server2.enable.doAs</name>
    <value>true</value>
  </property>

  <property>
    <name>datanucleus.schema.autoCreateAll</name>
    <value>false</value>
    <description>自动创建metastore</description>
  </property>
  
  <property>
    <name>hive.server2.logging.operation.log.location</name>
    <value>/data/hadoop/hive/tmp/${user.name}/operation_logs</value>
    <description>操作日志路径</description>
  </property>

  <property>
    <name>hive.metastore.warehouse.dir</name>
    <value>/warehouse</value>
  </property>
  
  <property>
    <name>hive.exec.scratchdir</name>
    <value>/tmp/hive</value>
  </property>
  <property>
    <name>hive.querylog.location</name>
    <value>/log</value>
    <description></description>
  </property>

  <property>
    <name>hive.metastore.uris</name>
    <value>thrift://172.16.1.234:9083</value>
    <description>Thrift URI for the remote metastore. Used by metastore client to connect to remote metastore.</description>
  </property>

  <property>
    <name>hive.server2.transport.mode</name>
    <value>binary</value>
    <description>
       当为binary的时候,只是启动10000端口,反之启用10001端口.    
    </description>
  </property>
</configuration>

使用postgresql作为元数据库需要额外的配置, 可参考

https://my.oschina.net/sucre/blog/365351

启动Hive

# 初始化元数据库前需要先在postgresql创建数据库
# 初始化成功后可以在数据库看到生成了很多的表
# 初始化元数据
schematool -dbType postgres -initSchema

# 启动Hive
nohup hive --service metastore &

presto安装配置

# 解压presto后需要手动创建配置目录与文件
cd /data/hadoop/presto
mkdir -p ./etc/catalog
touch ./etc/config.properties
touch ./etc/jmx.properties
touch ./etc/jvm.config
touch ./etc/log.properties
touch ./etc/node.properties

config.properties

node.environment=presto # 集群名字
node.id=node1 # 节点名字
node.data-dir=/data/hadoop/presto/data # 数据路径

jvm.config

-server
-Xmx4G
-XX:+UseG1GC
-XX:G1HeapRegionSize=32M
-XX:+UseGCOverheadLimit
-XX:+ExplicitGCInvokesConcurrent
-XX:+HeapDumpOnOutOfMemoryError
-XX:+ExitOnOutOfMemoryError

config.properties

# master
coordinator=true # 是否作为协调员即master
# 是否运行scheduler在coordinator的节点上运行, 从节点必须为True, 主节点可选
node-scheduler.include-coordinator=true 
http-server.http.port=8080
query.max-memory=4GB
query.max-memory-per-node=1GB
discovery-server.enabled=true
discovery.uri=http://172.16.1.234:8080

# slave
coordinator=false
http-server.http.port=8080
query.max-memory=4GB
query.max-memory-per-node=1GB
discovery.uri=http://172.16.1.234:8080

log.properties

com.facebook.presto=INFO # 日志等级

jmx.properties

connector.name=jmx

配置好启动presto测试

/data/hadoop/presto/bin/launcher run # 前台启动
/data/hadoop/presto/bin/launcher start # 后台启动

附sqoop数据导入

# 把数据从postgresql导入hive
# 先在hive创建数据库
hive> create database testdata
# 导入数据, 数据会先从postgresql导入hdfs, 再由hdfs导入hive
sqoop import-all-tables  --connect "jdbc:postgresql://172.16.0.80/testdata" --username username --password password --hive-database testdata -m 3 --direct  --hive-import   --create-hive-table

猜你喜欢

转载自www.cnblogs.com/dears/p/8944358.html