Java tutorial
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.apache.flume.agent; import com.alibaba.fastjson.JSONObject; import com.google.common.base.Preconditions; import com.google.common.base.Throwables; import java.io.FileInputStream; import java.io.IOException; import java.net.InetSocketAddress; import java.security.KeyStore; import java.security.Security; import java.util.*; import java.util.concurrent.Executors; import java.util.concurrent.ScheduledExecutorService; import java.util.concurrent.ScheduledFuture; import java.util.concurrent.TimeUnit; import java.util.regex.Matcher; import java.util.regex.Pattern; import javax.net.ssl.KeyManagerFactory; import javax.net.ssl.SSLContext; import javax.net.ssl.SSLEngine; import com.google.common.util.concurrent.ThreadFactoryBuilder; import org.apache.avro.ipc.NettyServer; import org.apache.avro.ipc.NettyTransceiver; import org.apache.avro.ipc.Responder; import org.apache.avro.ipc.Server; import org.apache.avro.ipc.specific.SpecificResponder; import org.apache.flume.Channel; import org.apache.flume.ChannelException; import org.apache.flume.Context; import org.apache.flume.Event; import org.apache.flume.EventDrivenSource; import org.apache.flume.FlumeException; import org.apache.flume.Source; import org.apache.flume.conf.Configurable; import org.apache.flume.conf.Configurables; import org.apache.flume.event.EventBuilder; import org.apache.flume.instrumentation.SourceCounter; import org.apache.flume.source.AbstractSource; import org.apache.flume.source.avro.AvroFlumeEvent; import org.apache.flume.source.avro.AvroSourceProtocol; import org.apache.flume.source.avro.Status; import org.apache.flume.util.FileUtils; import org.apache.flume.util.ZookeeperUtils; import org.apache.zookeeper.KeeperException; import org.apache.zookeeper.WatchedEvent; import org.apache.zookeeper.Watcher; import org.apache.zookeeper.Watcher.Event.EventType; import org.apache.zookeeper.ZooKeeper; import org.jboss.netty.channel.ChannelPipelineFactory; import org.jboss.netty.channel.ChannelPipeline; import org.jboss.netty.channel.socket.nio.NioServerSocketChannelFactory; import org.jboss.netty.channel.Channels; import org.jboss.netty.handler.codec.compression.ZlibDecoder; import org.jboss.netty.handler.codec.compression.ZlibEncoder; import org.jboss.netty.handler.ipfilter.IpFilterRule; import org.jboss.netty.handler.ipfilter.IpFilterRuleHandler; import org.jboss.netty.handler.ipfilter.PatternRule; import org.jboss.netty.handler.ssl.SslHandler; import org.slf4j.Logger; import org.slf4j.LoggerFactory; /** * <p> * A {@link Source} implementation that receives Avro events from clients that * implement {@link AvroSourceProtocol}. * </p> * <p> * This source forms one half of Flume's tiered collection support. Internally, * this source uses Avro's <tt>NettyTransceiver</tt> to listen for, and handle * events. It can be paired with the builtin <tt>AvroSink</tt> to create tiered * collection topologies. Of course, nothing prevents one from using this source * to receive data from other custom built infrastructure that uses the same * Avro protocol (specifically {@link AvroSourceProtocol}). * </p> * <p> * Events may be received from the client either singly or in batches.Generally, * larger batches are far more efficient, but introduce a slight delay (measured * in millis) in delivery. A batch submitted to the configured {@link Channel} * atomically (i.e. either all events make it into the channel or none). * </p> * <p> * <b>Configuration options</b> * </p> * <table> * <tr> * <th>Parameter</th> * <th>Description</th> * <th>Unit / Type</th> * <th>Default</th> * </tr> * <tr> * <td><tt>bind</tt></td> * <td>The hostname or IP to which the source will bind.</td> * <td>Hostname or IP / String</td> * <td>none (required)</td> * </tr> * <tr> * <td><tt>port</tt></td> * <td>The port to which the source will bind and listen for events.</td> * <td>TCP port / int</td> * <td>none (required)</td> * </tr> * <tr> * <td><tt>threads</tt></td> * <td>Max number of threads assigned to thread pool, 0 being unlimited</td> * <td>Count / int</td> * <td>0(optional)</td> * </tr> * </table> * <p> * <b>Metrics</b> * </p> * <p> * TODO * </p> */ public class SmartAvroSource extends AbstractSource implements EventDrivenSource, Configurable, AvroSourceProtocol { private static final String THREADS = "threads"; private static final Logger logger = LoggerFactory.getLogger(SmartAvroSource.class); private static final String PORT_KEY = "port"; private static final String BIND_KEY = "bind"; private static final String COMPRESSION_TYPE = "compression-type"; private static final String SSL_KEY = "ssl"; private static final String IP_FILTER_KEY = "ipFilter"; private static final String IP_FILTER_RULES_KEY = "ipFilterRules"; private static final String KEYSTORE_KEY = "keystore"; private static final String KEYSTORE_PASSWORD_KEY = "keystore-password"; private static final String KEYSTORE_TYPE_KEY = "keystore-type"; private static final String EXCLUDE_PROTOCOLS = "exclude-protocols"; private int port; private String bindAddress; private String compressionType; private String keystore; private String keystorePassword; private String keystoreType; private final List<String> excludeProtocols = new LinkedList<String>(); private boolean enableSsl = false; private boolean enableIpFilter; private String patternRuleConfigDefinition; private Server server; private SourceCounter sourceCounter; private int maxThreads; private ScheduledExecutorService connectionCountUpdater; private List<IpFilterRule> rules; private ZookeeperUtils congZkUtils = null; private ZookeeperUtils zhuZkUtils = null; private ZooKeeper zhuZk = null; private ZooKeeper congZk = null; private boolean isMonitor = false; private ZooKeeper zk = null; private SystemProperties systemProperties = null; private HeartInfo heartInfo = HeartInfo.getInstance(); private String logPath = null; private ScheduledFuture<?> heart_sender_future_ = null; private ScheduledExecutorService executor_service_ = null; private Runnable heart_sender_runnable_ = null; private String ipAndPort = null; private String configFilePath = null; private String senderIpAndPort = null; private String hostname = null; @Override public void configure(Context context) { Configurables.ensureRequiredNonNull(context, PORT_KEY, BIND_KEY); port = context.getInteger(PORT_KEY); bindAddress = context.getString(BIND_KEY); compressionType = context.getString(COMPRESSION_TYPE, "none"); try { maxThreads = context.getInteger(THREADS, 0); } catch (NumberFormatException e) { logger.warn("AVRO source\'s \"threads\" property must specify an integer value.", context.getString(THREADS)); } enableSsl = context.getBoolean(SSL_KEY, false); keystore = context.getString(KEYSTORE_KEY); keystorePassword = context.getString(KEYSTORE_PASSWORD_KEY); keystoreType = context.getString(KEYSTORE_TYPE_KEY, "JKS"); String excludeProtocolsStr = context.getString(EXCLUDE_PROTOCOLS); if (excludeProtocolsStr == null) { excludeProtocols.add("SSLv3"); } else { excludeProtocols.addAll(Arrays.asList(excludeProtocolsStr.split(" "))); if (!excludeProtocols.contains("SSLv3")) { excludeProtocols.add("SSLv3"); } } if (enableSsl) { Preconditions.checkNotNull(keystore, KEYSTORE_KEY + " must be specified when SSL is enabled"); Preconditions.checkNotNull(keystorePassword, KEYSTORE_PASSWORD_KEY + " must be specified when SSL is enabled"); try { KeyStore ks = KeyStore.getInstance(keystoreType); ks.load(new FileInputStream(keystore), keystorePassword.toCharArray()); } catch (Exception ex) { throw new FlumeException("Avro source configured with invalid keystore: " + keystore, ex); } } enableIpFilter = context.getBoolean(IP_FILTER_KEY, false); if (enableIpFilter) { patternRuleConfigDefinition = context.getString(IP_FILTER_RULES_KEY); if (patternRuleConfigDefinition == null || patternRuleConfigDefinition.trim().isEmpty()) { throw new FlumeException( "ipFilter is configured with true but ipFilterRules is not defined:" + " "); } String[] patternRuleDefinitions = patternRuleConfigDefinition.split(","); rules = new ArrayList<IpFilterRule>(patternRuleDefinitions.length); for (String patternRuleDefinition : patternRuleDefinitions) { rules.add(generateRule(patternRuleDefinition)); } } if (sourceCounter == null) { sourceCounter = new SourceCounter(getName()); } isMonitor = context.getBoolean("isMonitor"); if (isMonitor) { ipAndPort = context.getString("ipAndPort"); senderIpAndPort = context.getString("senderIpAndPort"); congZkUtils = new ZookeeperUtils(); congZkUtils.createConnection(ipAndPort, 3000); zhuZkUtils = new ZookeeperUtils(); zhuZkUtils.createConnection(senderIpAndPort, 3000); systemProperties = new SystemProperties(); systemProperties.Configure(context); heartInfo.Configure(context); heartInfo.setLevel(2); configFilePath = context.getString("configFilePath"); heart_sender_runnable_ = new HeartSenderRunnable(context); executor_service_ = Executors.newScheduledThreadPool(1); hostname = context.getString("hostname"); /* * try { createConfigNodes(heartInfo, congZkUtils); // zk? * createConfigNodes(heartInfo, zhuZkUtils, hostname); // zk? * } catch (KeeperException e1) { e1.printStackTrace(); } catch * (InterruptedException e1) { e1.printStackTrace(); } */ logPath = context.getString("logPath"); /* * try { createLogNodes(logPath, congZkUtils);// zk * createLogNodes(logPath, zhuZkUtils, hostname);// zk } * catch (KeeperException e) { e.printStackTrace(); } catch * (InterruptedException e) { e.printStackTrace(); } */ } } public void createConfigNodes(String configInfo, ZookeeperUtils zkUtil) throws KeeperException, InterruptedException { String nodePath = FlumeConstants.AGENT_CONFIG_ROOT + "/" + systemProperties.getIp_(); heartInfo.setIp(systemProperties.getIp_()); if (!zkUtil.pathExis(FlumeConstants.AGENT_CONFIG_ROOT)) { // ?,??? zkUtil.create(FlumeConstants.AGENT_CONFIG_ROOT, "0".getBytes()); } if (!zkUtil.pathExis(nodePath)) { zkUtil.create(nodePath, configInfo.getBytes()); } else { zkUtil.writeData(nodePath, configInfo.toString()); } } public void createConfigNodes(String configInfo, ZookeeperUtils zkUtil, String ip) throws KeeperException, InterruptedException { String nodePath = FlumeConstants.AGENT_CONFIG_ROOT + "/" + ip + "/" + systemProperties.getIp_(); heartInfo.setIp(systemProperties.getIp_()); if (!zkUtil.pathExis(FlumeConstants.AGENT_CONFIG_ROOT)) { // ?,??? zkUtil.create(FlumeConstants.AGENT_CONFIG_ROOT, "0".getBytes()); } if (!zkUtil.pathExis(nodePath)) { zkUtil.create(nodePath, configInfo.getBytes()); } else { zkUtil.writeData(nodePath, configInfo); } } public void createLogNodes(HeartInfo heartInfo, ZookeeperUtils zkUtil) throws KeeperException, InterruptedException { if (!zkUtil.pathExis(FlumeConstants.AGENT_LOG_ROOT)) { // ?,???? zkUtil.create(FlumeConstants.AGENT_LOG_ROOT, "0".getBytes()); } String logInfoPath = FlumeConstants.AGENT_LOG_ROOT + "/" + systemProperties.getIp_(); if (!zkUtil.pathExis(logInfoPath)) { zkUtil.create(logInfoPath, heartInfo.toString().getBytes()); } else { zkUtil.writeData(logInfoPath, heartInfo.toString()); } } public void createLogNodes(HeartInfo heartInfo, ZookeeperUtils zkUtil, String ip) throws KeeperException, InterruptedException { if (!zkUtil.pathExis(FlumeConstants.AGENT_LOG_ROOT)) { // ?,???? zkUtil.create(FlumeConstants.AGENT_LOG_ROOT, "0".getBytes()); } String logInfoPath = FlumeConstants.AGENT_LOG_ROOT + "/" + ip + "/" + systemProperties.getIp_(); if (!zkUtil.pathExis(logInfoPath)) { zkUtil.create(logInfoPath, heartInfo.toString().getBytes()); } else { zkUtil.writeData(logInfoPath, heartInfo.toString()); } } @Override public void start() { logger.info("Starting {}...", this); Responder responder = new SpecificResponder(AvroSourceProtocol.class, this); NioServerSocketChannelFactory socketChannelFactory = initSocketChannelFactory(); ChannelPipelineFactory pipelineFactory = initChannelPipelineFactory(); server = new NettyServer(responder, new InetSocketAddress(bindAddress, port), socketChannelFactory, pipelineFactory, null); connectionCountUpdater = Executors.newSingleThreadScheduledExecutor(); server.start(); sourceCounter.start(); super.start(); final NettyServer srv = (NettyServer) server; connectionCountUpdater.scheduleWithFixedDelay(new Runnable() { @Override public void run() { sourceCounter.setOpenConnectionCount(Long.valueOf(srv.getNumActiveConnections())); } }, 0, 60, TimeUnit.SECONDS); logger.info("Avro source {} started.", getName()); if (isMonitor) { try { String configInfo = FileUtils.readFileByLines(configFilePath, -1); createConfigNodes(configInfo, congZkUtils); createConfigNodes(configInfo, zhuZkUtils, hostname); // zk? } catch (KeeperException e1) { // TODO Auto-generated catch block e1.printStackTrace(); } catch (InterruptedException e1) { // TODO Auto-generated catch block e1.printStackTrace(); } // zk? try { createLogNodes(heartInfo, congZkUtils);// zk createLogNodes(heartInfo, zhuZkUtils, hostname);// zk } catch (KeeperException e) { e.printStackTrace(); } catch (InterruptedException e) { e.printStackTrace(); } heart_sender_future_ = executor_service_.scheduleWithFixedDelay(heart_sender_runnable_, 0L, 4, TimeUnit.SECONDS); try { zhuZk = new ZooKeeper(senderIpAndPort, 3000, new Watcher() { public void process(WatchedEvent event) { logger.info("--> zk???"); } }); congZk = new ZooKeeper(ipAndPort, 3000, new Watcher() { public void process(WatchedEvent event) { logger.info("--> zk???"); } }); } catch (IOException e) { e.printStackTrace(); } // //6.zk?,?,?zk?, monitorLocalNodeData(FlumeConstants.AGENT_LOG_ROOT + "/" + systemProperties.getIp_(), 0); //7.zk?????,?zk? monitorLocalChildNodeDataChange(FlumeConstants.AGENT_LOG_ROOT + "/" + systemProperties.getIp_()); //8.zk??,?,?zk? monitorLocalNodeData(FlumeConstants.AGENT_CONFIG_ROOT + "/" + systemProperties.getIp_(), 0); //4.zk???,???,?. //5.zk???,???,?zk?. monitorSenderNodeData( FlumeConstants.AGENT_CONFIG_ROOT + "/" + hostname + "/" + systemProperties.getIp_()); } } public void monitorLocalChildNodeDataChange(String nodePath) { List<String> nodes = congZkUtils.getChildren(nodePath); for (int i = 0; i < nodes.size(); i++) { try { congZk.exists(nodePath + "/" + nodes.get(i), new WatcherClass(zk)); } catch (KeeperException | InterruptedException e) { e.printStackTrace(); } } } public void monitorSenderNodeData(String nodePath) { try { if (nodePath.indexOf(FlumeConstants.AGENT_CONFIG_ROOT) == 0) { zhuZk.exists(nodePath, new SenderWatcherClass(zk)); zhuZk.getChildren(nodePath, new SenderWatcherClass(zk)); List<String> nodes = zhuZkUtils.getChildren(nodePath); for (int i = 0; i < nodes.size(); i++) { zhuZk.exists(nodePath + "/" + nodes.get(i), new SenderWatcherClass(zk)); } } } catch (KeeperException e) { e.printStackTrace(); } catch (InterruptedException e) { e.printStackTrace(); } } public void monitorLocalNodeData(String nodePath, int type) { try { if (type == 0) { // ? zk congZk.getChildren(nodePath, new WatcherClass(zk)); } else if (type == 1) { // ? zk? congZk.exists(nodePath, new WatcherClass(zk)); } } catch (KeeperException e) { e.printStackTrace(); } catch (InterruptedException e) { e.printStackTrace(); } } class SenderWatcherClass implements Watcher { ZooKeeper zk; public SenderWatcherClass(ZooKeeper zk) { this.zk = zk; } @Override public void process(WatchedEvent event) { if (event.getType() == EventType.NodeDataChanged) { String zhuPath = event.getPath(); String nodeData = zhuZkUtils.readData(zhuPath); try { if (getSubCount(zhuPath, "/") == 3) { // ??,?agent? FileUtils.writeDataToConfigFile(configFilePath, nodeData); } else if (getSubCount(zhuPath, "/") == 4) { String congPath = zhuPath.replace("/" + hostname, ""); congZkUtils.writeData(congPath, nodeData); zhuZk.exists(zhuPath, new SenderWatcherClass(zk)); } } catch (IOException e1) { e1.printStackTrace(); } /* * try { zhuZk.exists(zhuPath, new SenderWatcherClass(zk)); } * catch (KeeperException e) { e.printStackTrace(); } catch * (InterruptedException e) { e.printStackTrace(); } */ catch (KeeperException e) { e.printStackTrace(); } catch (InterruptedException e) { e.printStackTrace(); } } else if (event.getType() == EventType.NodeChildrenChanged) { String ip = zhuZkUtils.readData(FlumeConstants.AGENT_CONFIG_ROOT); logger.info("?ip:" + ip); String path = event.getPath() + "/" + ip; logger.info("zk,:" + path); try { //? zhuZk.exists(path, new SenderWatcherClass(zk)); //?? zhuZk.getChildren(event.getPath(), new SenderWatcherClass(zk)); } catch (KeeperException e) { e.printStackTrace(); } catch (InterruptedException e) { e.printStackTrace(); } } } } public int getSubCount(String str, String key) { int count = 0; Pattern p = Pattern.compile(key); Matcher m = p.matcher(str); while (m.find()) { count++; } return count; } class WatcherClass implements Watcher { ZooKeeper zk; public WatcherClass(ZooKeeper zk) { this.zk = zk; } @Override public void process(WatchedEvent event) { if (event.getType() == EventType.NodeChildrenChanged) { if (event.getPath().indexOf(FlumeConstants.AGENT_LOG_ROOT) == 0) { etlAndMonitorData(event, FlumeConstants.AGENT_LOG_ROOT); } else if (event.getPath().indexOf(FlumeConstants.AGENT_CONFIG_ROOT) == 0) { etlAndMonitorData(event, FlumeConstants.AGENT_CONFIG_ROOT); } } else if (event.getType() == EventType.NodeDataChanged) { if (event.getPath().indexOf(FlumeConstants.AGENT_LOG_ROOT) == 0) { // aaa(event, FlumeConstants.AGENT_LOG_ROOT); } else if (event.getPath().indexOf(FlumeConstants.AGENT_CONFIG_ROOT) == 0) { } } } } public void aaa(WatchedEvent event, String path) { String getpchangeDathPath = event.getPath(); String changeLogDataInfo = congZkUtils.readData(getpchangeDathPath); String zhuPath = getTargetPath(getpchangeDathPath, hostname); zhuZkUtils.writeData(zhuPath, changeLogDataInfo); monitorLocalNodeData(getpchangeDathPath, 1); } public String getTargetPath(String tempPath, String host) { return tempPath.replace(FlumeConstants.AGENT_LOG_ROOT, FlumeConstants.AGENT_LOG_ROOT + "/" + host); } public void etlAndMonitorData(WatchedEvent event, String path) { // ?? String ip = congZkUtils.readData(path); String tempPath = event.getPath() + "/" + ip; if ("".equals(ip)) { return; } String nodeData = congZkUtils.readData(tempPath); String zhuPath = path + "/" + hostname + "/" + systemProperties.getIp_() + "/" + ip; // ?zk zhuZkUtils.writeData(path, ip); zhuZkUtils.writeData(zhuPath, nodeData); logger.info("??,:" + zhuPath); // ? monitorLocalNodeData(path + "/" + systemProperties.getIp_(), 0); if (path.equals(FlumeConstants.AGENT_LOG_ROOT)) { monitorLocalNodeData(tempPath, 1); } // monitorLocalNodeData(path + "/" + systemProperties.getIp_(), congZk, 1); // monitorSenderNodeData(zhuPath, zhuZk); } private NioServerSocketChannelFactory initSocketChannelFactory() { NioServerSocketChannelFactory socketChannelFactory; if (maxThreads <= 0) { socketChannelFactory = new NioServerSocketChannelFactory( Executors.newCachedThreadPool(new ThreadFactoryBuilder() .setNameFormat("Avro " + NettyTransceiver.class.getSimpleName() + " Boss-%d").build()), Executors.newCachedThreadPool(new ThreadFactoryBuilder() .setNameFormat("Avro " + NettyTransceiver.class.getSimpleName() + " I/O Worker-%d") .build())); } else { socketChannelFactory = new NioServerSocketChannelFactory( Executors.newCachedThreadPool(new ThreadFactoryBuilder() .setNameFormat("Avro " + NettyTransceiver.class.getSimpleName() + " Boss-%d").build()), Executors.newFixedThreadPool(maxThreads, new ThreadFactoryBuilder() .setNameFormat( "Avro " + NettyTransceiver.class.getSimpleName() + " I/O Worker-%d") .build())); } return socketChannelFactory; } private ChannelPipelineFactory initChannelPipelineFactory() { ChannelPipelineFactory pipelineFactory; boolean enableCompression = compressionType.equalsIgnoreCase("deflate"); if (enableCompression || enableSsl || enableIpFilter) { pipelineFactory = new AdvancedChannelPipelineFactory(enableCompression, enableSsl, keystore, keystorePassword, keystoreType, enableIpFilter, patternRuleConfigDefinition); } else { pipelineFactory = new ChannelPipelineFactory() { @Override public ChannelPipeline getPipeline() throws Exception { return Channels.pipeline(); } }; } return pipelineFactory; } @Override public void stop() { logger.info("Avro source {} stopping: {}", getName(), this); String nodePath = FlumeConstants.AGENT_LOG_ROOT + "/" + hostname + "/" + systemProperties.getIp_(); String nodeData = zhuZkUtils.readData(nodePath); JSONObject dataObj = JSONObject.parseObject(nodeData); dataObj.put("status", 2); zhuZkUtils.writeData(nodePath, dataObj.toString()); server.close(); try { server.join(); } catch (InterruptedException e) { logger.info("Avro source " + getName() + ": Interrupted while waiting " + "for Avro server to stop. Exiting. Exception follows.", e); } sourceCounter.stop(); connectionCountUpdater.shutdown(); while (!connectionCountUpdater.isTerminated()) { try { Thread.sleep(100); } catch (InterruptedException ex) { logger.error("Interrupted while waiting for connection count executor " + "to terminate", ex); Throwables.propagate(ex); } } super.stop(); logger.info("Avro source {} stopped. Metrics: {}", getName(), sourceCounter); } @Override public String toString() { return "Avro source " + getName() + ": { bindAddress: " + bindAddress + ", port: " + port + " }"; } /** * Helper function to convert a map of CharSequence to a map of String. */ private static Map<String, String> toStringMap(Map<CharSequence, CharSequence> charSeqMap) { Map<String, String> stringMap = new HashMap<String, String>(); for (Map.Entry<CharSequence, CharSequence> entry : charSeqMap.entrySet()) { stringMap.put(entry.getKey().toString(), entry.getValue().toString()); } return stringMap; } @Override public Status append(AvroFlumeEvent avroEvent) { logger.debug("Avro source {}: Received avro event: {}", getName(), avroEvent); sourceCounter.incrementAppendReceivedCount(); sourceCounter.incrementEventReceivedCount(); Event event = EventBuilder.withBody(avroEvent.getBody().array(), toStringMap(avroEvent.getHeaders())); try { getChannelProcessor().processEvent(event); } catch (ChannelException ex) { logger.warn("Avro source " + getName() + ": Unable to process event. " + "Exception follows.", ex); return Status.FAILED; } sourceCounter.incrementAppendAcceptedCount(); sourceCounter.incrementEventAcceptedCount(); return Status.OK; } @Override public Status appendBatch(List<AvroFlumeEvent> events) { logger.debug("Avro source {}: Received avro event batch of {} events.", getName(), events.size()); sourceCounter.incrementAppendBatchReceivedCount(); sourceCounter.addToEventReceivedCount(events.size()); List<Event> batch = new ArrayList<Event>(); for (AvroFlumeEvent avroEvent : events) { Event event = EventBuilder.withBody(avroEvent.getBody().array(), toStringMap(avroEvent.getHeaders())); batch.add(event); } try { getChannelProcessor().processEventBatch(batch); } catch (Throwable t) { logger.error("Avro source " + getName() + ": Unable to process event " + "batch. Exception follows.", t); if (t instanceof Error) { throw (Error) t; } return Status.FAILED; } sourceCounter.incrementAppendBatchAcceptedCount(); sourceCounter.addToEventAcceptedCount(events.size()); return Status.OK; } private PatternRule generateRule(String patternRuleDefinition) throws FlumeException { patternRuleDefinition = patternRuleDefinition.trim(); // first validate the format int firstColonIndex = patternRuleDefinition.indexOf(":"); if (firstColonIndex == -1) { throw new FlumeException("Invalid ipFilter patternRule '" + patternRuleDefinition + "' should look like <'allow' or 'deny'>:<'ip' or " + "'name'>:<pattern>"); } else { String ruleAccessFlag = patternRuleDefinition.substring(0, firstColonIndex); int secondColonIndex = patternRuleDefinition.indexOf(":", firstColonIndex + 1); if ((!ruleAccessFlag.equals("allow") && !ruleAccessFlag.equals("deny")) || secondColonIndex == -1) { throw new FlumeException("Invalid ipFilter patternRule '" + patternRuleDefinition + "' should look like <'allow' or 'deny'>:<'ip' or " + "'name'>:<pattern>"); } String patternTypeFlag = patternRuleDefinition.substring(firstColonIndex + 1, secondColonIndex); if ((!patternTypeFlag.equals("ip") && !patternTypeFlag.equals("name"))) { throw new FlumeException("Invalid ipFilter patternRule '" + patternRuleDefinition + "' should look like <'allow' or 'deny'>:<'ip' or " + "'name'>:<pattern>"); } boolean isAllow = ruleAccessFlag.equals("allow"); String patternRuleString = (patternTypeFlag.equals("ip") ? "i" : "n") + ":" + patternRuleDefinition.substring(secondColonIndex + 1); logger.info("Adding ipFilter PatternRule: " + (isAllow ? "Allow" : "deny") + " " + patternRuleString); return new PatternRule(isAllow, patternRuleString); } } /** * Factory of SSL-enabled server worker channel pipelines Copied from Avro's * org.apache.avro.ipc.TestNettyServerWithSSL test */ private class AdvancedChannelPipelineFactory implements ChannelPipelineFactory { private boolean enableCompression; private boolean enableSsl; private String keystore; private String keystorePassword; private String keystoreType; private boolean enableIpFilter; private String patternRuleConfigDefinition; public AdvancedChannelPipelineFactory(boolean enableCompression, boolean enableSsl, String keystore, String keystorePassword, String keystoreType, boolean enableIpFilter, String patternRuleConfigDefinition) { this.enableCompression = enableCompression; this.enableSsl = enableSsl; this.keystore = keystore; this.keystorePassword = keystorePassword; this.keystoreType = keystoreType; this.enableIpFilter = enableIpFilter; this.patternRuleConfigDefinition = patternRuleConfigDefinition; } private SSLContext createServerSSLContext() { try { KeyStore ks = KeyStore.getInstance(keystoreType); ks.load(new FileInputStream(keystore), keystorePassword.toCharArray()); // Set up key manager factory to use our key store KeyManagerFactory kmf = KeyManagerFactory.getInstance(getAlgorithm()); kmf.init(ks, keystorePassword.toCharArray()); SSLContext serverContext = SSLContext.getInstance("TLS"); serverContext.init(kmf.getKeyManagers(), null, null); return serverContext; } catch (Exception e) { throw new Error("Failed to initialize the server-side SSLContext", e); } } private String getAlgorithm() { String algorithm = Security.getProperty("ssl.KeyManagerFactory.algorithm"); if (algorithm == null) { algorithm = "SunX509"; } return algorithm; } @Override public ChannelPipeline getPipeline() throws Exception { ChannelPipeline pipeline = Channels.pipeline(); if (enableCompression) { ZlibEncoder encoder = new ZlibEncoder(6); pipeline.addFirst("deflater", encoder); pipeline.addFirst("inflater", new ZlibDecoder()); } if (enableSsl) { SSLEngine sslEngine = createServerSSLContext().createSSLEngine(); sslEngine.setUseClientMode(false); List<String> enabledProtocols = new ArrayList<String>(); for (String protocol : sslEngine.getEnabledProtocols()) { if (!excludeProtocols.contains(protocol)) { enabledProtocols.add(protocol); } } sslEngine.setEnabledProtocols(enabledProtocols.toArray(new String[0])); logger.info("SSLEngine protocols enabled: " + Arrays.asList(sslEngine.getEnabledProtocols())); // addFirst() will make SSL handling the first stage of decoding // and the last stage of encoding this must be added after // adding compression handling above pipeline.addFirst("ssl", new SslHandler(sslEngine)); } if (enableIpFilter) { logger.info( "Setting up ipFilter with the following rule definition: " + patternRuleConfigDefinition); IpFilterRuleHandler ipFilterHandler = new IpFilterRuleHandler(); ipFilterHandler.addAll(rules); logger.info("Adding ipFilter with " + ipFilterHandler.size() + " rules"); pipeline.addFirst("ipFilter", ipFilterHandler); } return pipeline; } } }