private void doConfigure(Map<String, String> properties) { properties = EmbeddedAgentConfiguration.configure(name, properties); if (LOGGER.isDebugEnabled() && LogPrivacyUtil.allowLogPrintConfig()) { LOGGER.debug("Agent configuration values"); for (String key : new TreeSet<String>(properties.keySet())) { LOGGER.debug(key + " = " + properties.get(key)); } } MaterializedConfiguration conf = configurationProvider.get(name, properties); Map<String, SourceRunner> sources = conf.getSourceRunners(); if (sources.size() != 1) { throw new FlumeException("Expected one source and got " + sources.size()); } Map<String, Channel> channels = conf.getChannels(); if (channels.size() != 1) { throw new FlumeException("Expected one channel and got " + channels.size()); } Map<String, SinkRunner> sinks = conf.getSinkRunners(); if (sinks.size() != 1) { throw new FlumeException("Expected one sink group and got " + sinks.size()); } this.sourceRunner = sources.values().iterator().next(); this.channel = channels.values().iterator().next(); this.sinkRunner = sinks.values().iterator().next(); }
private Map<String, String> getHeaders(HttpServletRequest request) { if (LOGGER.isDebugEnabled() && LogPrivacyUtil.allowLogRawData()) { Map requestHeaders = new HashMap(); Enumeration iter = request.getHeaderNames(); while (iter.hasMoreElements()) { String name = (String) iter.nextElement(); requestHeaders.put(name, request.getHeader(name)); } LOGGER.debug("requestHeaders: {}", requestHeaders); } Map<String, String> headers = new HashMap(); if (request.getContentType() != null) { headers.put(Metadata.CONTENT_TYPE, request.getContentType()); } Enumeration iter = request.getParameterNames(); while (iter.hasMoreElements()) { String name = (String) iter.nextElement(); headers.put(name, request.getParameter(name)); } return headers; } }
if (!removedHeaders.isEmpty() && LogPrivacyUtil.allowLogRawData()) { LOG.trace("Removed headers \"{}\" for event: {}", removedHeaders, event);
if (LOGGER.isDebugEnabled() && LogPrivacyUtil.allowLogPrintConfig()) { LOGGER.debug("Initial configuration: {}", getPrevalidationConfig()); sinkgroups = getSpaceDelimitedList(sinkgroupSet); if (LOGGER.isDebugEnabled() && LogPrivacyUtil.allowLogPrintConfig()) { LOGGER.debug("Post validation configuration for {}", agentName); LOGGER.debug(getPostvalidationConfig());
if (LOGGER.isDebugEnabled() && LogPrivacyUtil.allowLogRawData()) { LOGGER.debug("blobEvent: {}", event);
if (logger.isDebugEnabled() && LogPrivacyUtil.allowLogPrintConfig()) { logger.debug("Kafka producer properties: {}", kafkaProps);
for (String parameter : parameters.keySet()) { String value = parameters.get(parameter)[0]; if (LOG.isDebugEnabled() && LogPrivacyUtil.allowLogRawData()) { LOG.debug("Setting Header [Key, Value] as [{},{}] ", parameter, value);
zookeeperConnect = ctx.getString(ZOOKEEPER_CONNECT_FLUME_KEY); if (logger.isDebugEnabled() && LogPrivacyUtil.allowLogPrintConfig()) { logger.debug("Kafka properties: {}", ctx);
if (LogPrivacyUtil.allowLogRawData()) { log.trace("Topic: {} Partition: {} Message: {}", new String[]{ message.topic(),
if (log.isDebugEnabled() && LogPrivacyUtil.allowLogPrintConfig()) { log.debug("Kafka consumer properties: {}", kafkaProps);
@Override public Status append(AvroFlumeEvent avroEvent) { if (logger.isDebugEnabled()) { if (LogPrivacyUtil.allowLogRawData()) { logger.debug("Avro source {}: Received avro event: {}", getName(), avroEvent); } else { logger.debug("Avro source {}: Received avro event", getName()); } } sourceCounter.incrementAppendReceivedCount(); sourceCounter.incrementEventReceivedCount(); Event event = EventBuilder.withBody(avroEvent.getBody().array(), toStringMap(avroEvent.getHeaders())); try { getChannelProcessor().processEvent(event); } catch (ChannelException ex) { logger.warn("Avro source " + getName() + ": Unable to process event. " + "Exception follows.", ex); sourceCounter.incrementChannelWriteFail(); return Status.FAILED; } sourceCounter.incrementAppendAcceptedCount(); sourceCounter.incrementEventAcceptedCount(); return Status.OK; }
if (LOGGER.isDebugEnabled() && LogPrivacyUtil.allowLogPrintConfig()) { LOGGER.debug("Initial configuration: {}", getPrevalidationConfig()); sinkgroups = getSpaceDelimitedList(sinkgroupSet); if (LOGGER.isDebugEnabled() && LogPrivacyUtil.allowLogPrintConfig()) { LOGGER.debug("Post validation configuration for {}", agentName); LOGGER.debug(getPostvalidationConfig());
if (LogPrivacyUtil.allowLogRawData()) { logger.trace("{Event} " + eventTopic + " : " + eventKey + " : " + new String(eventBody, "UTF-8"));
if (logger.isDebugEnabled() && LogPrivacyUtil.allowLogPrintConfig()) { logger.debug("Kafka producer properties: {}", kafkaProps);
if (LogPrivacyUtil.allowLogRawData()) { logger.trace("Seen raw event: {}", msg); } else {
zookeeperConnect = ctx.getString(ZOOKEEPER_CONNECT_FLUME_KEY); if (logger.isDebugEnabled() && LogPrivacyUtil.allowLogPrintConfig()) { logger.debug("Kafka properties: {}", ctx);
if (LOGGER.isTraceEnabled() && LogPrivacyUtil.allowLogRawData()) { LOGGER.trace("Flume event arrived {}", event);
if (log.isDebugEnabled() && LogPrivacyUtil.allowLogPrintConfig()) { log.debug("Kafka consumer properties: {}", kafkaProps);
private Map<String, String> getHeaders(HttpServletRequest request) { if (LOGGER.isDebugEnabled() && LogPrivacyUtil.allowLogRawData()) { Map requestHeaders = new HashMap(); Enumeration iter = request.getHeaderNames(); while (iter.hasMoreElements()) { String name = (String) iter.nextElement(); requestHeaders.put(name, request.getHeader(name)); } LOGGER.debug("requestHeaders: {}", requestHeaders); } Map<String, String> headers = new HashMap(); if (request.getContentType() != null) { headers.put(Metadata.CONTENT_TYPE, request.getContentType()); } Enumeration iter = request.getParameterNames(); while (iter.hasMoreElements()) { String name = (String) iter.nextElement(); headers.put(name, request.getParameter(name)); } return headers; } }
if (!removedHeaders.isEmpty() && LogPrivacyUtil.allowLogRawData()) { LOG.trace("Removed headers \"{}\" for event: {}", removedHeaders, event);