原文地址: https://blog.csdn.net/dream_flying_bj/article/details/73188952
#########安装storm########
storm_install.sh
sudo ansible -i ansible_hosts elk -m copy -a "src=/letv/apache-storm-1.1.0.tar.gz dest=/letv/apache-storm-1.1.0.tar.gz "
ansible -i ansible_hosts elk -m script -a '/root/script/storm_install.sh'
ansible -i ansible_hosts elk -m shell -a 'source /etc/profile && storm supervisor &'ansible -i ansible_hosts elk -m shell -a 'ss -lnpt'
ansible -i ansible_hosts elk -m shell -a 'ps -ef | grep storm'
#!/bin/bash
cd /letv/
tar zvxf apache-storm-1.1.0.tar.gzln -s /letv/apache-storm-1.1.0 /usr/local/storm
#改配置文件
cat >> /letv/apache-storm-1.1.0/conf/storm.yaml << EOF#Storm集群对应的ZooKeeper集群的主机列表
storm.zookeeper.servers:
- "bops-10-183-93-129"
- "bops-10-183-93-131"
- "bops-10-183-93-132"
#Storm集群对应的ZooKeeper集群的服务端口,ZooKeeper默认端口为21818
storm.zookeeper.port: 21818
#Storm使用的本地文件系统目录(必须存在并且Storm进程可读写)
storm.local.dir: /data/hadoop/data5/storm-workdir
#Storm运行模式,集群模式需设置为distributed(分布式的)
storm.cluster.mode: distributed
#storm.local.mode.zmq true
#Storm的元数据在ZooKeeper中存储的根目录
storm.zookeeper.root: /storm
storm.zookeeper.session.timeout: 60000
#整个Storm集群的Nimbus节点
nimbus.host: bops-10-183-93-129
storm.log.dir: /data/hadoop/data4/storm-logs
#Storm的Slot,最好设置成OS核数的整数倍;同时由于Storm是基于内存的实时计算,Slot数不要大于每台物理机可运行Slot个数:(物理内存-虚拟内存)/单个Java进程最大可占用内存数
supervisor.slots.ports:
- 6700
- 6701
- 6702
- 6703mkdir -p /data/hadoop/data5/storm-workdir && mkdir -p /data/hadoop/data4/storm-logs
#创建storm工作目录 storm-workdir storm-logs
storm.local.dir: /data/hadoop/data5/storm-workdir
storm.log.dir: /data/hadoop/data4/storm-logs
mkdir /data/hadoop/data5/storm-workdir && mkdir /data/hadoop/data4/storm-logs
#启动nimbus
storm nimbus &
###########################start-nimbus.sh
#! /bin/bash
source /etc/profile
cd `dirname $0`
pwd
DAY=`date +%F`
if [ -f "/data/hadoop/data4/storm-logs/nimbus.log" ]; then
mv /data/hadoop/data4/storm-logs/nimbus.log /data/hadoop/data4/storm-logs/nimbus.log_${DAY}
fi
COUNT=`jps | grep nimbus | wc -l`
if [ $COUNT -eq 0 ]; then
nohup /usr/local/storm/bin/storm nimbus 1>/data/hadoop/data4/storm-logs/nimbus.log 2>&1 &
sleep 5
fi
#################启动nimbus storm
storm supervisor &##############start-storm.sh
#! /bin/bash
source /etc/profile
cd `dirname $0`
pwdDAY=`date +%F`
OLD=`date -d "2 days ago" +%F`
TIME=`date +"%F %H:%M:%S"`LOG="log_monitor_storm_supervisor_$DAY"
rm -f log_monitor_storm_supervisor_$OLD.log
echo starting supervisors,time: `date` 1>> $LOG.log 2>&1
for stormserver in $(cat ../conf/slaves)
do
echo stormserver: $stormserver
echo sh monitor-java-process.sh ${stormserver} 15801342789 supervisor "sh /usr/local/storm/bin/start-supervisor.sh"
sh monitor-java-process.sh ${stormserver} 15801342789 supervisor "sh /usr/local/storm/bin/start-supervisor.sh"
done 1>> $LOG.log 2>&1NIMBUS_HOST=`tail -n 1 ../conf/nimbus`
sh monitor-java-process.sh ${NIMBUS_HOST} 15801342789 nimbus "sh /usr/local/storm/bin/start-nimbus.sh" 1>> $LOG.log 2>&1
echo ending time: `date` 1>> $LOG.log 2>&1
############启动ui
storm ui &
####################ui############
#!/bin/bash
source /etc/profile
cd `dirname $0`
pwdDAY=`date +%F`
if [ -f "/data/hadoop/data4/storm-logs/storm-ui.log" ]; then
mv /data/hadoop/data4/storm-logs/storm-ui.log /data/hadoop/data4/storm-logs/storm-ui.log_${DAY}
fi
COUNT=`jps | grep core | wc -l`
if [ $COUNT -eq 0 ]; then
nohup /usr/local/storm/bin/storm ui > /data/hadoop/data4/storm-logs/storm-ui.log 2>&1 &
sleep 5
fi
#################start-supervisor.sh
#!/bin/bash
source /etc/profile
cd `dirname $0`
pwdDAY=`date +%F`
if [ -f "/data/hadoop/data4/storm-logs/supervisor.log" ]; then
mv /data/hadoop/data4/storm-logs/supervisor.log /data/hadoop/data4/storm-logs/supervisor.log_${DAY}
fiCOUNT=`jps | grep supervisor | wc -l`
if [ $COUNT -eq 0 ]; then
echo starting supervisor
echo `date "+%Y%m%d %H:%M:%S"` COUNT=0, starting supervisor >> /data/hadoop/data4/storm-logs/supervisor-restart.log
nohup /usr/local/storm/bin/storm supervisor 1> /data/hadoop/data4/storm-logs/supervisor.log 2>&1 &
sleep 5
#bin/storm all-supervisor >/dev/null 2>&1 &
fi#########
1. Nimbus:在 master 机器上,在监控下执行 bin/storm nimbus 命令。
2. Supervisor:在每个工作节点上,在监控下执行 bin/storm supervisor 命令。Supervisor 的后台进程主要负责启动/停止该机器上的 worker 进程。
3. UI:在 master 机器上,在监控下执行 bin/storm ui 命令启动 Storm UI(Storm UI 是一个可以在浏览器中方便地监控集群与拓扑运行状况的站点)后台进程。可以通过 http://{nimbus.host}:8080 来访问 UI 站点。最后加环境变量
#!/bin/bashcat >> /etc/profile << EOF
export JAVA_HOME=/usr/local/java
export SCALA_HOME=/usr/local/scala
export KAFKA_HOME=/usr/local/kafka
export PATH=$JAVA_HOME/bin:$SCALA_HOME/bin:$KAFKA_HOME/bin:$PATH
export STORM_HOME=/usr/local/storm
export PATH=${PATH}:${STORM_HOME}/bin
export ZK_CONNECT=10.183.93.129:21818,10.183.93.131:21818,10.183.93.132:21818/kafka
EOF
windows环境安装:
1、解压 apache-storm-1.2.2.zip
2、设置 STORM_HOME 环境变量:
3、path 中加入 %STORM_HOME%\bin
4、启动主控服务:
F:\apache-storm-1.2.2\bin>storm nimbus
5、启动工作服务:
F:\apache-storm-1.2.2\bin>storm supervisor
6、启动管理服务:
F:\apache-storm-1.2.2\bin>storm ui
7、启动日志服务:
F:\apache-storm-1.2.2\bin>storm logviewer
storm配置:
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
########### These MUST be filled in for a storm configuration
#Storm集群对应的ZooKeeper集群的主机列表
storm.zookeeper.servers:
- "192.168.0.109"
#Storm集群对应的ZooKeeper集群的服务端口,ZooKeeper默认端口为2181
storm.zookeeper.port: 2181
#Storm的元数据在ZooKeeper中存储的根目录
storm.zookeeper.root: /storm
storm.zookeeper.session.timeout: 60000
#Storm使用的本地文件系统目录(必须存在并且Storm进程可读写)
storm.local.dir: F:/storm_data/word_dir
#Storm运行模式,集群模式需设置为distributed(分布式的)
#storm.cluster.mode: distributed
#整个Storm集群的Nimbus节点
nimbus.host: "192.168.0.115"
ui.port: 9090
storm.log.dir: F:/storm_data/storm-logs
#Storm的Slot,最好设置成OS核数的整数倍;同时由于Storm是基于内存的实时计算,Slot数不要大于每台物理机可运行Slot个数:(物理内存-虚拟内存)/单个Java进程最大可占用内存数
supervisor.slots.ports:
- 6700
- 6701
- 6702
- 6703
nimbus.seeds: ["host1", "host2", "host3"]
#
#
# ##### These may optionally be filled in:
#
## List of custom serializations
# topology.kryo.register:
# - org.mycompany.MyType
# - org.mycompany.MyType2: org.mycompany.MyType2Serializer
#
## List of custom kryo decorators
# topology.kryo.decorators:
# - org.mycompany.MyDecorator
#
## Locations of the drpc servers
# drpc.servers:
# - "server1"
# - "server2"
## Metrics Consumers
## max.retain.metric.tuples
## - task queue will be unbounded when max.retain.metric.tuples is equal or less than 0.
## whitelist / blacklist
## - when none of configuration for metric filter are specified, it'll be treated as 'pass all'.
## - you need to specify either whitelist or blacklist, or none of them. You can't specify both of them.
## - you can specify multiple whitelist / blacklist with regular expression
## expandMapType: expand metric with map type as value to multiple metrics
## - set to true when you would like to apply filter to expanded metrics
## - default value is false which is backward compatible value
## metricNameSeparator: separator between origin metric name and key of entry from map
## - only effective when expandMapType is set to true
# topology.metrics.consumer.register:
# - class: "org.apache.storm.metric.LoggingMetricsConsumer"
# max.retain.metric.tuples: 100
# parallelism.hint: 1
# - class: "org.mycompany.MyMetricsConsumer"
# max.retain.metric.tuples: 100
# whitelist:
# - "execute.*"
# - "^__complete-latency$"
# parallelism.hint: 1
# argument:
# - endpoint: "metrics-collector.mycompany.org"
# expandMapType: true
# metricNameSeparator: "."
## Cluster Metrics Consumers
# storm.cluster.metrics.consumer.register:
# - class: "org.apache.storm.metric.LoggingClusterMetricsConsumer"
# - class: "org.mycompany.MyMetricsConsumer"
# argument:
# - endpoint: "metrics-collector.mycompany.org"
#
# storm.cluster.metrics.consumer.publish.interval.secs: 60
# Event Logger
# topology.event.logger.register:
# - class: "org.apache.storm.metric.FileBasedEventLogger"
# - class: "org.mycompany.MyEventLogger"
# arguments:
# endpoint: "event-logger.mycompany.org"
# Metrics v2 configuration (optional)
#storm.metrics.reporters:
# # Graphite Reporter
# - class: "org.apache.storm.metrics2.reporters.GraphiteStormReporter"
# daemons:
# - "supervisor"
# - "nimbus"
# - "worker"
# report.period: 60
# report.period.units: "SECONDS"
# graphite.host: "localhost"
# graphite.port: 2003
#
# # Console Reporter
# - class: "org.apache.storm.metrics2.reporters.ConsoleStormReporter"
# daemons:
# - "worker"
# report.period: 10
# report.period.units: "SECONDS"
# filter:
# class: "org.apache.storm.metrics2.filters.RegexFilter"
# expression: ".*my_component.*emitted.*"
jar文件在storm上运行:
/usr/storm/apache-storm-0.9.6/bin/storm jar kafkastorm-0.0.1-SNAPSHOT-jar-with-dependencies.jar net.zengzhiying.StormKafkaTopo kafkagostorm
前面是storm的绝对路径,参数jar执行jar包,后面跟的是上传topology的主类,最后kafkagostorm是我们上传拓扑的名称