Conquer flume three - use log4j to output logs to flume

In the next few articles, we will gradually learn to use various methods to collect logs.

This article describes how to use log4j to output logs directly to flume.
First on the dry goods, then talk about the theory!

1. Flume configuration file
agent.sources = so1
agent.channels = c1
agent.sinks = s1

# For each one of the sources, the type is defined
agent.sources.so1.type = avro
agent.sources.so1.bind = 0.0.0.0
agent.sources.so1.port = 44444
tier1.channels.channel1.keep-alive=30

# The channel can be defined as follows.
# agent.sources.seqGenSrc.channels = memoryChannel

# Each sink's type must be defined

agent.sinks.s1.type = logger

#Specify the channel the sink should use
# agent.sinks.loggerSink.channel = memoryChannel

# Each channel's type is defined.
agent.channels.c1.type = memory
agent.channels.c1.capacity = 1000
agent.channels.c1.transactionCapacity = 100

# Bind the source and sink to the channel
agent.sources.so1.channels = c1
agent.sinks.s1.channel = c1


2. Test code
public class FlumeLogTest {
	private Logger logger = LoggerFactory.getLogger(getClass());
	public static void main(String[] args) throws Exception {
		DOMConfigurator.configureAndWatch("config/log4j.xml");
		new FlumeLogTest().start();
	}

	public void start() {
		while(true){
			logger.debug("flume log test:{}",System.currentTimeMillis());
			try {
				Thread.sleep(1000);
			} catch (InterruptedException e) {
				e.printStackTrace ();
			}
		}
		
	}
}


3、log4j.xml
<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE log4j:configuration SYSTEM "log4j.dtd" >
<log4j:configuration>
	<appender name="flume"
		class="org.apache.flume.clients.log4jappender.Log4jAppender">
		<param name="hostname" value="192.168.113.181" />
		<param name="port" value="44444" />
		<layout class="org.apache.log4j.PatternLayout">
			<param name="ConversionPattern" value="[%p] %d{dd MMM hh:mm:ss aa} %t [%l] %m%n" />
		</layout>
	</appender>
	<appender name="async" class="org.apache.log4j.AsyncAppender">
		<param name="Blocking" value="false" />
		<param name="BufferSize" value="500" />
		<appender-ref ref="flume" />
	</appender>

	<appender name="CONSOLE.OUT" class="org.apache.log4j.ConsoleAppender">
		<param name="target" value="System.out" />
		<layout class="org.apache.log4j.PatternLayout">
			<param name="ConversionPattern" value="[%d][%p, (%F:%L).%M] %m%n" />
		</layout>
		<filter class="org.apache.log4j.varia.LevelRangeFilter">
			<param name="LevelMin" value="debug" />
			<param name="LevelMax" value="info" />
			<param name="AcceptOnMatch" value="false" />
		</filter>
	</appender>

	<logger name="org.springframework">
		<level value="ERROR" />
	</logger>
	<logger name="com.cp.flume">
		<level value="debug" />
	</logger>
	<root>
		<priority value="info"></priority>
		<appender-ref ref="async" />
		<appender-ref ref="CONSOLE.OUT" />
	</root>
</log4j:configuration>

4 、 pom.xml
		<dependency>
			<groupId>org.apache.flume.flume-ng-clients</groupId>
			<artifactId>flume-ng-log4jappender</artifactId>
			<version>1.7.0-SNAPSHOT</version>
		</dependency>


A few points to be explained here:
1. Flume's Log4j Appender must use Log4j's asynchronous loader, otherwise once the log server hangs, the application server will be down; the message queue in the asynchronous loader needs to be set to non-blocking mode Blocking=false, and set the corresponding buffersize, otherwise, when the flume server is down, the application server will be down.
2. When the flume server is down, the append method of the Log4jAppender class will throw a FlumeException, which needs to be caught, otherwise the application server will also be down. I implemented this part by inheriting Log4jAppender and rewriting the append interface, see the following code.
3. Under normal conditions, Log4jAppender itself has an automatic reconnection mechanism (tested)

package org.apache;


import java.util.Properties;

import org.apache.commons.lang.StringUtils;
import org.apache.flume.FlumeException;
import org.apache.flume.api.RpcClientConfigurationConstants;
import org.apache.flume.api.RpcClientFactory;
import org.apache.flume.api.RpcClientFactory.ClientType;
import org.apache.flume.clients.log4jappender.Log4jAppender;
import org.apache.log4j.helpers.LogLog;
import org.apache.log4j.spi.LoggingEvent;

/**
* @project:		flume-log4j-test
* @Title:		FailoverLog4jAppender.java
* @Package:		org.apache
  @author: 		chenpeng
* @email: 		[email protected]
* @date: Feb 24, 2016 2:12:16 PM
* @description:
* @version:
*/
public class FailoverLog4jAppender extends Log4jAppender {

	private String hosts;
	private String maxAttempts;
	private boolean configured = false;

	public void setHosts(String hostNames) {
		this.hosts = hostNames;
	}

	public void setMaxAttempts(String maxAttempts) {
		this.maxAttempts = maxAttempts;
	}

	@Override
	public synchronized void append(LoggingEvent event) {
		if (!configured) {
			String errorMsg = "Flume Log4jAppender not configured correctly! Cannot"
					+ " send events to Flume.";
			LogLog.error(errorMsg);
			if (getUnsafeMode()) {
				return;
			}
			throw new FlumeException(errorMsg);
		}
		try {
			super.append(event);
		} catch (FlumeException e) {
			e.printStackTrace ();
		}
	}

	/**
	 *
	 * @throws FlumeException
	 *             if the FailoverRpcClient cannot be instantiated.
	 */
	@Override
	public void activateOptions() throws FlumeException {
		try {
			final Properties properties = getProperties(hosts, maxAttempts,
					getTimeout());
			rpcClient = RpcClientFactory.getInstance(properties);
			if (layout != null) {
				layout.activateOptions();
			}
			configured = true;
		} catch (Exception e) {
			String errormsg = "RPC client creation failed! " + e.getMessage();
			LogLog.error(errormsg);
			if (getUnsafeMode()) {
				return;
			}
			throw new FlumeException(e);
		}

	}

	/**
	*/
	private Properties getProperties(String hosts, String maxAttempts,
			long timeout) throws FlumeException {

		if (StringUtils.isEmpty(hosts)) {
			throw new FlumeException("hosts must not be null");
		}

		Properties props = new Properties();
		String[] hostsAndPorts = hosts.split("\\s+");
		StringBuilder names = new StringBuilder();
		for (int i = 0; i < hostsAndPorts.length; i++) {
			String hostAndPort = hostsAndPorts[i];
			String name = "h" + i;
			props.setProperty(
					RpcClientConfigurationConstants.CONFIG_HOSTS_PREFIX + name,
					hostAndPort);
			names.append(name).append(" ");
		}
		props.put(RpcClientConfigurationConstants.CONFIG_HOSTS,
				names.toString());
		props.put(RpcClientConfigurationConstants.CONFIG_CLIENT_TYPE,
				ClientType.DEFAULT_FAILOVER.toString());

		if (StringUtils.isEmpty(maxAttempts)) {
			throw new FlumeException("hosts must not be null");
		}

		props.put(RpcClientConfigurationConstants.CONFIG_MAX_ATTEMPTS,
				maxAttempts);

		props.setProperty(
				RpcClientConfigurationConstants.CONFIG_CONNECT_TIMEOUT,
				String.valueOf(timeout));
		props.setProperty(
				RpcClientConfigurationConstants.CONFIG_REQUEST_TIMEOUT,
				String.valueOf(timeout));
		return props;
	}

}

Guess you like

Origin http://10.200.1.11:23101/article/api/json?id=327079214&siteId=291194637