Refine search
@Override public void configure(Context context) { this.context = context; maxBatchSize = context.getInteger(BATCH_SIZE, maxBatchSize); maxBatchDurationMillis = context.getLong(BATCH_DURATION_MILLIS, maxBatchDurationMillis); handlerClass = context.getString(HANDLER_CLASS, MorphlineHandlerImpl.class.getName()); if (sinkCounter == null) { sinkCounter = new SinkCounter(getName()); } }
@Override public synchronized void start() { producer = new DefaultMQProducer(producerGroup); producer.setNamesrvAddr(nameServer); try { producer.start(); } catch (MQClientException e) { sinkCounter.incrementConnectionFailedCount(); log.error("RocketMQ producer start failed", e); throw new FlumeException("Failed to start RocketMQ producer", e); } sinkCounter.incrementConnectionCreatedCount(); sinkCounter.start(); super.start(); }
Channel channel = getChannel(); Transaction transaction = channel.getTransaction(); transaction.begin(); Event event = channel.take(); sinkCounter.incrementBatchUnderflowCount(); else sinkCounter.incrementBatchCompleteCount(); sinkCounter.incrementBatchEmptyCount(); sinkCounter.addToEventDrainAttemptCount(batch.size()); transaction.commit(); sinkCounter.addToEventDrainSuccessCount(batch.size()); transaction.rollback(); throw new EventDeliveryException(e);
/** * Stops the grid. */ @Override public synchronized void stop() { if (ignite != null) ignite.close(); sinkCounter.incrementConnectionClosedCount(); sinkCounter.stop(); super.stop(); }
@Override public void start() { Preconditions.checkArgument(client == null, "Please call stop " + "before calling start on an old instance."); sinkCounter.start(); sinkCounter.incrementConnectionCreatedCount(); client = initHBaseClient(); super.start(); }
private void destroyConnection() { if (client != null) { logger.debug("Rpc sink {} closing Rpc client: {}", getName(), client); try { client.close(); sinkCounter.incrementConnectionClosedCount(); } catch (FlumeException e) { sinkCounter.incrementConnectionFailedCount(); logger.error("Rpc sink " + getName() + ": Attempt to close Rpc " + "client failed. Exception follows.", e); } } client = null; }
tableName = context.getString(HBase2SinkConfigurationConstants.CONFIG_TABLE); String cf = context.getString( HBase2SinkConfigurationConstants.CONFIG_COLUMN_FAMILY); batchSize = context.getLong( HBase2SinkConfigurationConstants.CONFIG_BATCHSIZE, 100L); Context serializerContext = new Context(); this.config.set(HConstants.ZOOKEEPER_ZNODE_PARENT, hbaseZnode); sinkCounter = new SinkCounter(this.getName());
serializer.beforeClose(); outputStream.close(); sinkCounter.incrementConnectionClosedCount(); shouldRotate = false; } catch (IOException e) { sinkCounter.incrementConnectionFailedCount(); throw new EventDeliveryException("Unable to rotate file " + pathController.getCurrentFile() + " while delivering event", e); } finally { serializerType, serializerContext, outputStream); serializer.afterCreate(); sinkCounter.incrementConnectionCreatedCount(); } catch (IOException e) { sinkCounter.incrementConnectionFailedCount(); throw new EventDeliveryException("Failed to open file " + pathController.getCurrentFile() + " while delivering event", e); transaction.begin(); int eventAttemptCounter = 0; for (int i = 0; i < batchSize; i++) { event = channel.take(); if (event != null) { sinkCounter.incrementEventDrainAttemptCount(); eventAttemptCounter++; serializer.write(event); transaction.commit(); sinkCounter.addToEventDrainSuccessCount(eventAttemptCounter);
/** * Sink configurations with Ignite-specific settings. * * @param context Context for sink. */ @Override public void configure(Context context) { springCfgPath = context.getString(IgniteSinkConstants.CFG_PATH); cacheName = context.getString(IgniteSinkConstants.CFG_CACHE_NAME); eventTransformerCls = context.getString(IgniteSinkConstants.CFG_EVENT_TRANSFORMER); batchSize = context.getInteger(IgniteSinkConstants.CFG_BATCH_SIZE, DFLT_BATCH_SIZE); if (sinkCounter == null) sinkCounter = new SinkCounter(getName()); }
@Override public void configure(Context context) { String pathManagerType = context.getString("sink.pathManager", "DEFAULT"); String directory = context.getString("sink.directory"); String rollInterval = context.getString("sink.rollInterval"); serializerType = context.getString("sink.serializer", "TEXT"); serializerContext = new Context(context.getSubProperties("sink." + EventSerializer.CTX_PREFIX)); Context pathManagerContext = new Context(context.getSubProperties("sink." + PathManager.CTX_PREFIX)); pathController = PathManagerFactory.getInstance(pathManagerType, pathManagerContext); Preconditions.checkArgument(directory != null, "Directory may not be null"); Preconditions.checkNotNull(serializerType, "Serializer type is undefined"); if (rollInterval == null) { this.rollInterval = defaultRollInterval; } else { this.rollInterval = Long.parseLong(rollInterval); } batchSize = context.getInteger("sink.batchSize", defaultBatchSize); this.directory = new File(directory); if (sinkCounter == null) { sinkCounter = new SinkCounter(getName()); } }
@Override public void configure(Context context) { if (!isLocal) { if (StringUtils.isNotBlank(context.getString(HOSTNAMES))) { serverAddresses = StringUtils.deleteWhitespace( context.getString(HOSTNAMES)).split(","); if (StringUtils.isNotBlank(context.getString(INDEX_NAME))) { this.indexName = context.getString(INDEX_NAME); sinkCounter = new SinkCounter(getName()); sinkCounter = new SinkCounter(getName());
@Override public void configure(Context context) { clientProps = new Properties(); hostname = context.getString("hostname"); port = context.getInteger("port"); Preconditions.checkState(hostname != null, "No hostname specified"); Preconditions.checkState(port != null, "No port specified"); clientProps.setProperty(RpcClientConfigurationConstants.CONFIG_HOSTS, "h1"); clientProps.setProperty(RpcClientConfigurationConstants.CONFIG_HOSTS_PREFIX + "h1", hostname + ":" + port); for (Entry<String, String> entry: context.getParameters().entrySet()) { clientProps.setProperty(entry.getKey(), entry.getValue()); } batchSize = AbstractRpcClient.parseBatchSize(clientProps); if (sinkCounter == null) { sinkCounter = new SinkCounter(getName()); } cxnResetInterval = context.getInteger("reset-connection-interval", DEFAULT_CXN_RESET_INTERVAL); if (cxnResetInterval == DEFAULT_CXN_RESET_INTERVAL) { logger.info("Connection reset is set to " + String.valueOf(DEFAULT_CXN_RESET_INTERVAL) + ". Will not reset connection to next hop"); } }
for (; txnEventCount < batchSize; ++txnEventCount) { Event event = channel.take(); if (event == null) { break; sinkCounter.incrementBatchEmptyCount(); } else if (txnEventCount == batchSize) { sinkCounter.incrementBatchCompleteCount(); } else { sinkCounter.incrementBatchUnderflowCount(); sinkCounter.addToEventDrainAttemptCount(txnEventCount); sinkCounter.addToEventDrainSuccessCount(txnEventCount); return txnEventCount; } catch (HiveWriter.Failure e) {
Event event = channel.take(); } catch (EventDeliveryException ex) { LOG.warn("Error closing writer there may be temp files that need to" + " be manually recovered: " + ex.getLocalizedMessage()); LOG.debug("Exception follows.", ex); throw new EventDeliveryException(th); counter.incrementBatchEmptyCount(); return Status.BACKOFF; } else if (processedEvents < batchSize) { counter.incrementBatchUnderflowCount(); } else { counter.incrementBatchCompleteCount(); counter.addToEventDrainSuccessCount(processedEvents);
txn.commit(); sinkCounter.addToEventDrainSuccessCount(actions.size());
@Override public void start() { ElasticSearchClientFactory clientFactory = new ElasticSearchClientFactory(); logger.info("ElasticSearch sink {} started"); sinkCounter.start(); try { if (isLocal) { client = clientFactory.getLocalClient( clientType, eventSerializer, indexRequestFactory); } else { client = clientFactory.getClient(clientType, serverAddresses, clusterName, eventSerializer, indexRequestFactory); client.configure(elasticSearchClientContext); } sinkCounter.incrementConnectionCreatedCount(); } catch (Exception ex) { ex.printStackTrace(); sinkCounter.incrementConnectionFailedCount(); if (client != null) { client.close(); sinkCounter.incrementConnectionClosedCount(); } } super.start(); }
@Override public final void start() { LOG.info("Starting HttpSink"); sinkCounter.start(); }
@Override public void stop() { logger.info("RollingFile sink {} stopping...", getName()); sinkCounter.stop(); super.stop(); serializer.beforeClose(); outputStream.close(); sinkCounter.incrementConnectionClosedCount(); } catch (IOException e) { sinkCounter.incrementConnectionFailedCount(); logger.error("Unable to close output stream. Exception follows.", e); } finally {
@Override public final void stop() { LOG.info("Stopping HttpSink"); sinkCounter.stop(); }
private HiveWriter getOrCreateWriter(Map<HiveEndPoint, HiveWriter> activeWriters, HiveEndPoint endPoint) throws HiveWriter.ConnectException, InterruptedException { try { HiveWriter writer = allWriters.get( endPoint ); if (writer == null) { LOG.info(getName() + ": Creating Writer to Hive end point : " + endPoint); writer = new HiveWriter(endPoint, txnsPerBatchAsk, autoCreatePartitions, callTimeout, callTimeoutPool, proxyUser, serializer, sinkCounter); sinkCounter.incrementConnectionCreatedCount(); if (allWriters.size() > maxOpenConnections) { int retired = closeIdleWriters(); if (retired == 0) { closeEldestWriter(); } } allWriters.put(endPoint, writer); activeWriters.put(endPoint, writer); } else { if (activeWriters.get(endPoint) == null) { activeWriters.put(endPoint,writer); } } return writer; } catch (HiveWriter.ConnectException e) { sinkCounter.incrementConnectionFailedCount(); throw e; } }