hdfs.conf-tail
#### is the solution to this dynamic file, document, put the side edge acquisition
to obtain data using the tail command, sinking to hdfs
start command:
bin / Flume Agent-ng -c conf / tail -f-hdfs AG1 -Dflume.root.logger the INFO = -n .conf, Console
########
# Define the name of the three components of
ag1.sources = source1
ag1.sinks = sink1
ag1.channels = CHANnel1
# Configure source assembly
ag1.sources.source1.type Exec =
ag1.sources.source1.command = tail -F /usr/local/nginx/logs/log.frame.access.log
# 配置sink组件
ag1.sinks.sink1.type = hdfs
ag1.sinks.sink1.hdfs.path =hdfs://hdp-1:9000/nginx_log/%y-%m-%d/%H-%M
ag1.sinks.sink1.hdfs.filePrefix = app_log
ag1.sinks.sink1.hdfs.fileSuffix = .log
ag1.sinks.sink1.hdfs.batchSize= 100
ag1.sinks.sink1.hdfs.fileType = DataStream
ag1.sinks.sink1.hdfs.writeFormat =Text
Roll ##: scroll handover: handover control rules to write the file
## by the file size (bytes) to cut
ag1.sinks.sink1.hdfs.rollSize 5120 =
## the number of pieces cut by the event
ag1.sinks.sink1.hdfs. = 1000000 rollCount
## switching time intervals file
ag1.sinks.sink1.hdfs.rollInterval = 60
## generates control rules directory
ag1.sinks.sink1.hdfs.round to true =
ag1.sinks.sink1.hdfs.roundValue = 10
ag1.sinks.sink1.hdfs.roundUnit = minute
ag1.sinks.sink1.hdfs.useLocalTimeStamp = true
# Channel assembly is configured
ag1.channels.channel1.type = Memory
## Event Article Number
ag1.channels.channel1.capacity = 500000
## Flume transaction control required buffer capacity 600 Event
ag1.channels.channel1.transactionCapacity = 600
# Binding link between the source, channel and sink
ag1.sources.source1.channels = CHANnel1
ag1.sinks.sink1.channel = CHANnel1