ssh_flume_zookeeper_kafka 安装

ssh 免密码登录
ssh-keygen -t rsa
scp -p ~/.ssh/id_rsa.pub root@master:/root/.ssh/authorized_keys
scp -p ~/.ssh/id_rsa.pub root@slave1:/root/.ssh/authorized_keys
scp -p ~/.ssh/id_rsa.pub root@slave2:/root/.ssh/authorized_keys


#jdk-8u111-linux-x64
export JAVA_HOME=/usr/local/jdk1.8.0_111
export JRE_HOME=${JAVA_HOME}/jre
export CLASSPATH=.:${JAVA_HOME}/lib:${JRE_HOME}/lib
export PATH=${JAVA_HOME}/bin:$PATH
#scala-2.11.8
export SCALA_HOME=/usr/local/scala-2.11.8
export PATH=${SCALA_HOME}/bin:$PATH

#zookeeper-3.4.6
export ZOOKEEPER_HOME=/usr/local/zookeeper-3.4.6
export PATH=${ZOOKEEPER_HOME}/bin:$PATH

cp ${FLUME_HOME}/conf/flume-conf.properties.template ${FLUME_HOME}/conf/exec.conf
cp ${FLUME_HOME}/conf/flume-env.sh.template ${FLUME_HOME}/conf/flume-env.sh

vi flume-env.sh

export JAVA_HOME=/usr/local/jdk1.8.0_111

vi  exec.conf

a2.sources = r2
a2.sinks = k2
a2.channels = c2
# Describe/configure the source
a2.sources.r2.type = exec
a2.sources.r2.channels = c2
a2.sources.r2.command=tail -n +0 -F /root/tmp/flume/test.log
# Describe the sink
a2.sinks.k2.type = logger
# Use a channel which buffers events in memory
a2.channels.c2.type = memory
a2.channels.c2.capacity = 1000
a2.channels.c2.transactionCapacity = 100
# Bind the source and sink to the channel
a2.sources.r2.channels = c2
a2.sinks.k2.channel = c2

#apache-flume-1.7.0-bin
export FLUME_HOME=/root/flume
export PATH=${FLUME_HOME}/bin:$PATH

flume-ng agent --conf /root/flume/conf/ -f /root/flume/conf/exec.conf -Dflume.root.logger=DEBUG,console -n a2

flume-ng agent -n producer -c --conf -f /root/flume/conf/flume-kafka-sink.properties -Dflume.root.logger=ERROR,console

bin/flume-ng  agent --conf conf --conf-file conf/file_to_kafa_conf.properties --name agent -Dflume.root.logger=INFO,console
nohup flume-ng  agent --conf conf --conf-file /data/apache-flume-1.6.0-cdh5.10.0-bin/conf/dirfile_to_kafa_conf.properties --name agent -Dflume.root.logger=INFO,console & >/dev/null
三。zookeeper安装
$ZOOKEEPER_HOME/conf/zoo.cfg
cp zoo_sample.cfg zoo.cfg
# The number of milliseconds of each tick
tickTime=2000
# The number of ticks that the initial 
# synchronization phase can take
initLimit=10
# The number of ticks that can pass between 
# sending a request and getting an acknowledgement
syncLimit=5
# the directory where the snapshot is stored.
# do not use /tmp for storage, /tmp here is just 
# example sakes.
dataDir=/usr/local/zookeeper-3.4.6/data/zookeeper
# the port at which the clients will connect
clientPort=2181
server.1=master:2888:3888
server.2=slave1:2888:3888
server.3=slave2:2888:3888

mkdir -p /root/data/zookeeper
cd /root/data/zookeeper
touch myid
echo "1" > myid

#设置日志
#conf/log4j.properties
#
# Define some default values that can be overridden by system properties
#zookeeper.root.logger=INFO, CONSOLE
#改为
# Define some default values that can be overridden by system properties  
#zookeeper.root.logger=INFO, ROLLINGFILE
#将
#
#
# Add ROLLINGFILE to rootLogger to get log file output
#    Log DEBUG level and above messages to a log file
#log4j.appender.ROLLINGFILE=org.apache.log4j.RollingFileAppender
#改为---每天一个log日志文件,而不是在同一个log文件中递增日志
#
# Add ROLLINGFILE to rootLogger to get log file output
#    Log DEBUG level and above messages to a log file
#log4j.appender.ROLLINGFILE=org.apache.log4j.DailyRollingFileAppender
#
#bin/zkEvn.sh
#
#if [ "x${ZOO_LOG_DIR}" = "x" ]
#then
#    ZOO_LOG_DIR="."
#fi
#
#if [ "x${ZOO_LOG4J_PROP}" = "x" ]
#then
#    ZOO_LOG4J_PROP="INFO,CONSOLE"
#fi
#
#改为
#
#if [ "x${ZOO_LOG_DIR}" = "x" ]
#then
#    ZOO_LOG_DIR="$ZOOBINDIR/../logs"
#fi
#
#if [ "x${ZOO_LOG4J_PROP}" = "x" ]
#then
#    ZOO_LOG4J_PROP="INFO,ROLLINGFILE"
#fi

bin/zkServer.sh start
并用命令查看启动状态

bin/zkServer.sh status

四:kafka的安装
#kafka_2.11-0.9.0.1
export KAFKA_HOME=/root/kafka
export PATH=${KAFKA_HOME}/bin:$PATH


${KAFKA_HOME}/bin/zookeeper-server-start.sh ${KAFKA_HOME}/config/zookeeper.properties &

${KAFKA_HOME}/bin/kafka-server-start.sh ${KAFKA_HOME}/config/server.properties


java -cp KafkaOffsetMonitor-assembly-0.2.0.jar \
 com.quantifind.kafka.offsetapp.OffsetGetterWeb \
 --zk slave1:2181 \
 --port 8089 \
 --refresh 10.seconds \
 --retain 1.days


#启动flume
flume-ng  agent --conf conf --conf-file conf/flume-kafka-sink.properties --name agent -Dflume.root.logger=INFO,console
#启动flume 监听端口
flume-ng  agent --conf conf --conf-file conf/flume-kafka-sink.properties --name agent -Dflume.monitoring.port=5653 -Dflume.root.logger=INFO,console
#kafka 控制台输出
kafka-console-consumer -zookeeper slave1:2181 --from-beginning --topic testTopic
#创建topic
kafka-topics --create --zookeeper slave1:2181 --replication-factor 2 --partitions 3 --topic channeltopic

 kafka-topics.sh --create --zookeeper 127.0.0.1:2181 --replication-factor 2 --partitions 4 --topic test
 
 bin/kafka-console-consumer.sh --zookeeper 127.0.0.1:2181 --topic  test  --from-beginning

 kafka-topics -zookeeper 127.0.0.1:2181 -describe -topic test
 bin/kafka-list-topic.sh --zookeeper 127.0.0.1:2181 --topic test
 --正式集群查看topic  
--创建topic
kafka-topics.sh --create --zookeeper 10.0.186.10:2181,10.0.186.11:2181,10.0.186.12:2181 --replication-factor 3 --partitions 8 --topic testTopic
--查看
kafka-topics -zookeeper.sh 10.0.186.10:2181,10.0.186.11:2181,10.0.186.12:2181 -describe -topic testTopic
--生产
kafka-console-producer.sh --broker-list 10.0.186.17:9092,10.0.186.18:9092,10.0.186.19:9092 --topic testTopic
--消费
kafka-console-consumer.sh -zookeeper 10.0.186.10:2181,10.0.186.11:2181,10.0.186.12:2181 --from-beginning --topic testTopic
 
删除Topic
kafka-topics.sh -delete -zookeeper slave1:2181 -topic test
zkCli.sh -server slave1:2181
进入/admin/delete_topics目录下,找到删除的topic,删除对应的信息。

猜你喜欢

转载自my.oschina.net/u/2510243/blog/1796755