1、问题描述
在server.properties中配置了log.dirs值,表示kafka数据的存放目录,而非Kafka的日志目录。
[root@node1 kafka_2.11-1.0.1]# vi config/server.properties
1
log.dirs=/data/kafka
1
Kafka运行时日志默认输出到$KAFKA_HOME/logs目录下,容易撑爆分区,造成操作系统崩溃。需要将日志输出到指定分区,比如/var/log目录下。
2、解决办法
(1)首先停止Kafka
[root@node1 kafka_2.11-1.0.1]# bin/kafka-server-stop.sh
1
(2)修改$KAFKA_HOME/bin/kafka-run-class.sh
[root@node1 kafka_2.11-1.0.1]# vi bin/kafka-run-class.sh
1
定位到LOG_DIR
# Log directory to use
if [ "x$LOG_DIR" = "x" ]; then
LOG_DIR="$base_dir/logs"
fi
增加一行,修改为
LOG_DIR=/var/log/kafka
# Log directory to use
if [ "x$LOG_DIR" = "x" ]; then
LOG_DIR="$base_dir/logs"
fi
(3)修改log4j.properties
[root@node1 kafka_2.11-1.0.1]# sed -i 's/log4j.rootLogger=INFO/log4j.rootLogger=WARN/' config/log4j.properties
1
查看发现log4j.rootLogger的值已经修改为WARN级别
# Unspecified loggers and loggers with additivity=true output to server.log and stdout
# Note that INFO only applies to unspecified loggers, the log level of the child logger is used otherwise
log4j.rootLogger=WARN, stdout, kafkaAppender
[root@node1 kafka_2.11-1.0.1]# vi config/log4j.properties
1
将最后几行的TRACE修改为INFO
修改前
# Uncomment the lines below and change log4j.logger.kafka.network.RequestChannel$ to TRACE for additional output
# related to the handling of requests
#log4j.logger.kafka.network.Processor=TRACE, requestAppender
#log4j.logger.kafka.server.KafkaApis=TRACE, requestAppender
#log4j.additivity.kafka.server.KafkaApis=false
log4j.logger.kafka.network.RequestChannel$=WARN, requestAppender
log4j.additivity.kafka.network.RequestChannel$=false
log4j.logger.kafka.controller=TRACE, controllerAppender
log4j.additivity.kafka.controller=false
log4j.logger.kafka.log.LogCleaner=INFO, cleanerAppender
log4j.additivity.kafka.log.LogCleaner=false
log4j.logger.state.change.logger=TRACE, stateChangeAppender
log4j.additivity.state.change.logger=false
# Access denials are logged at INFO level, change to DEBUG to also log allowed accesses
log4j.logger.kafka.authorizer.logger=INFO, authorizerAppender
log4j.additivity.kafka.authorizer.logger=false
修改后
# Uncomment the lines below and change log4j.logger.kafka.network.RequestChannel$ to TRACE for additional output
# related to the handling of requests
#log4j.logger.kafka.network.Processor=TRACE, requestAppender
#log4j.logger.kafka.server.KafkaApis=TRACE, requestAppender
#log4j.additivity.kafka.server.KafkaApis=false
log4j.logger.kafka.network.RequestChannel$=WARN, requestAppender
log4j.additivity.kafka.network.RequestChannel$=false
log4j.logger.kafka.controller=INFO, controllerAppender
log4j.additivity.kafka.controller=false
log4j.logger.kafka.log.LogCleaner=INFO, cleanerAppender
log4j.additivity.kafka.log.LogCleaner=false
log4j.logger.state.change.logger=INFO, stateChangeAppender
log4j.additivity.state.change.logger=false
# Access denials are logged at INFO level, change to DEBUG to also log allowed accesses
log4j.logger.kafka.authorizer.logger=INFO, authorizerAppender
log4j.additivity.kafka.authorizer.logger=false
(4)重启Kafka
[root@node1 kafka_2.11-1.0.1]# bin/kafka-server-start.sh config/server.properties &
[root@node1 kafka_2.11-1.0.1]# cd /var/log/kafka/
[root@node1 kafka]# ll
total 76
-rw-r--r-- 1 root root 11111 Apr 24 13:49 controller.log
-rw-r--r-- 1 root root 0 Apr 24 13:16 kafka-authorizer.log
-rw-r--r-- 1 root root 0 Apr 24 13:16 kafka-request.log
-rw-r--r-- 1 root root 4472 Apr 24 13:49 kafkaServer-gc.log.0.current
-rw-r--r-- 1 root root 722 Apr 24 13:49 log-cleaner.log
-rw-r--r-- 1 root root 42951 Apr 24 13:49 server.log
-rw-r--r-- 1 root root 2715 Apr 24 13:20 state-change.log
[root@node1 kafka]#