配置storm

版权声明:本文为博主原创文章,未经博主允许不得转载。 https://blog.csdn.net/zzw0221/article/details/82428485

1、确保集群中已经配置好zookeeper

2、下载storm,并解压

3、配置storm_env.ini

#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements.  See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership.  The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License.  You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#


# Environment variables in the following section will be used
# in storm python script. They override the environment variables
# set in the shell.
[environment]

# The java implementation to use. If JAVA_HOME is not found we expect java to be in path
JAVA_HOME:/opt/java/jdk1.7.0_80

# JVM options to be used in "storm jar" commad
#STORM_JAR_JVM_OPTS:

4、配置storm.yaml

# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements.  See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership.  The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License.  You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

########### These MUST be filled in for a storm configuration
 storm.zookeeper.servers:
     - "master.cdh.com"
     - "slave1.cdh.com"
     - "slave2.cdh.com"
# 
 nimbus.seeds: ["master.cdh.com"]
 storm.local.dir: "/opt/storm-1.2.2/data"
# 
# 
# ##### These may optionally be filled in:
#    
## List of custom serializations
# topology.kryo.register:
#     - org.mycompany.MyType
#     - org.mycompany.MyType2: org.mycompany.MyType2Serializer
#
## List of custom kryo decorators
# topology.kryo.decorators:
#     - org.mycompany.MyDecorator
#
## Locations of the drpc servers
 drpc.servers:
     - "master.cdh.com"
     - "slave1.cdh.com"
     - "slave2.cdh.com"

## Metrics Consumers
## max.retain.metric.tuples
## - task queue will be unbounded when max.retain.metric.tuples is equal or less than 0.
## whitelist / blacklist
## - when none of configuration for metric filter are specified, it'll be treated as 'pass all'.
## - you need to specify either whitelist or blacklist, or none of them. You can't specify both of them.
## - you can specify multiple whitelist / blacklist with regular expression
## expandMapType: expand metric with map type as value to multiple metrics
## - set to true when you would like to apply filter to expanded metrics
## - default value is false which is backward compatible value
## metricNameSeparator: separator between origin metric name and key of entry from map
## - only effective when expandMapType is set to true
# topology.metrics.consumer.register:
#   - class: "org.apache.storm.metric.LoggingMetricsConsumer"
#     max.retain.metric.tuples: 100
#     parallelism.hint: 1
#   - class: "org.mycompany.MyMetricsConsumer"
#     max.retain.metric.tuples: 100
#     whitelist:
#       - "execute.*"
#       - "^__complete-latency$"
#     parallelism.hint: 1
#     argument:
#       - endpoint: "metrics-collector.mycompany.org"
#     expandMapType: true
#     metricNameSeparator: "."

## Cluster Metrics Consumers
# storm.cluster.metrics.consumer.register:
#   - class: "org.apache.storm.metric.LoggingClusterMetricsConsumer"
#   - class: "org.mycompany.MyMetricsConsumer"
#     argument:
#       - endpoint: "metrics-collector.mycompany.org"
#
# storm.cluster.metrics.consumer.publish.interval.secs: 60

# Event Logger
# topology.event.logger.register:
#   - class: "org.apache.storm.metric.FileBasedEventLogger"
#   - class: "org.mycompany.MyEventLogger"
#     arguments:
#       endpoint: "event-logger.mycompany.org"

# Metrics v2 configuration (optional)
#storm.metrics.reporters:
#  # Graphite Reporter
#  - class: "org.apache.storm.metrics2.reporters.GraphiteStormReporter"
#    daemons:
#        - "supervisor"
#        - "nimbus"
#        - "worker"
#    report.period: 60
#    report.period.units: "SECONDS"
#    graphite.host: "localhost"
#    graphite.port: 2003
#
#  # Console Reporter
#  - class: "org.apache.storm.metrics2.reporters.ConsoleStormReporter"
#    daemons:
#        - "worker"
#    report.period: 10
#    report.period.units: "SECONDS"
#    filter:
#        class: "org.apache.storm.metrics2.filters.RegexFilter"
#        expression: ".*my_component.*emitted.*"
 supervisor.slots.ports:
  - 6700
  - 6701
  - 6702
  - 6703
  - 6704
  - 6705
  - 6706
  - 6707
  - 6708
  - 6709
  - 6710
  - 6711
  - 6712
  - 6713
  - 6714
  - 6715
  - 6716

5、将storm文件复制到其他机器:

scp -r /opt/storm-1.2.2/ slave1.cdh.com:/opt/
scp -r /opt/storm-1.2.2/ slave2.cdh.com:/opt/

6、在master机器上启动nimbus以及UI

bin/storm nimbus > /dev/null &
bin/storm ui > /dev/null &

7、 在三台机器上启动:supervisor

bin/storm supervisor &

8、查看UI

 

猜你喜欢

转载自blog.csdn.net/zzw0221/article/details/82428485
今日推荐