spark部署(集群)

前提部署好zookeeper环境

下载spark压缩包(jdk起码1.7以上)

● 修改配置

[root@localhost conf]# pwd
/usr/local/apps/spark-2.1.3-bin-hadoop2.7/conf
[root@localhost conf]# cp slaves.template ./slaves
[root@localhost conf]# vi slaves
#添加worker节点(hostname),默认有一个localhost,我就一台虚拟机,将就着用,所以就不需要再填节点,
#多个节点需要填写各自的ip或者hostname
localhost
[root@localhost conf]# cp spark-env.sh.template spark-env.sh
[root@localhost conf]# vi spark-env.sh
#添加如下
EXPORT JAVA_HOME=/usr/java/jdk1.7.0_79
export SPARK_DAEMON_JAVA_OPTS="-Dspark.deploy.recoveryMode=ZOOKEEPER -Dspark.deploy.zookeeper.url=localhost:2181 -Dspark.deploy.zookeeper.dir=/spark"

● 启动master

[root@localhost spark-2.1.3-bin-hadoop2.7]# ./sbin/start-master.sh 
/usr/local/apps/spark-2.1.3-bin-hadoop2.7/conf/spark-env.sh: line 67: EXPORT: command not found
starting org.apache.spark.deploy.master.Master, logging to /usr/local/apps/spark-2.1.3-bin-hadoop2.7/logs/spark-root-org.apache.spark.deploy.master.Master-1-localhost.localdomain.out
[root@localhost spark-2.1.3-bin-hadoop2.7]# jps
14263 Launcher
20167 ZooKeeperMain
20661 Master
19231 QuorumPeerMain
20745 Jps

● 启动master和work

#需要输入密码,最好做成免密登陆
[root@localhost sbin]# ./start-all.sh 
/usr/local/apps/spark-2.1.3-bin-hadoop2.7/conf/spark-env.sh: line 67: EXPORT: command not found
org.apache.spark.deploy.master.Master running as process 21210.  Stop it first.
/usr/local/apps/spark-2.1.3-bin-hadoop2.7/conf/spark-env.sh: line 67: EXPORT: command not found
root@localhost's password: 
localhost: /usr/local/apps/spark-2.1.3-bin-hadoop2.7/conf/spark-env.sh: line 67: EXPORT: command not found
localhost: starting org.apache.spark.deploy.worker.Worker, logging to /usr/local/apps/spark-2.1.3-bin-hadoop2.7/logs/spark-root-org.apache.spark.deploy.worker.Worker-1-localhost.localdomain.out
[root@localhost sbin]# jps
14263 Launcher
20167 ZooKeeperMain
21210 Master
21374 Worker
19231 QuorumPeerMain
21441 Jps 

● 浏览器查看

http://192.168.x.xx:8080/

猜你喜欢

转载自blog.csdn.net/bb23417274/article/details/83039449
今日推荐