@Override public String toString() { return "RpcSink " + getName() + " { host: " + hostname + ", port: " + port + " }"; }
/** * Stops the grid. */ @Override public synchronized void stop() { if (ignite != null) ignite.close(); sinkCounter.incrementConnectionClosedCount(); sinkCounter.stop(); super.stop(); }
/** * The start() of RpcSink is more of an optimization that allows connection * to be created before the process() loop is started. In case it so happens * that the start failed, the process() loop will itself attempt to reconnect * as necessary. This is the expected behavior since it is possible that the * downstream source becomes unavailable in the middle of the process loop * and the sink will have to retry the connection again. */ @Override public void start() { logger.info("Starting {}...", this); sinkCounter.start(); try { createConnection(); } catch (FlumeException e) { logger.warn("Unable to create Rpc client using hostname: " + hostname + ", port: " + port, e); /* Try to prevent leaking resources. */ destroyConnection(); } super.start(); logger.info("Rpc sink {} started.", getName()); }
@Override public void stop() { logger.info("IRC sink {} stopping", this.getName()); destroyConnection(); super.stop(); logger.debug("IRC sink {} stopped. Metrics:{}", this.getName(), counterGroup); }
@Override public void stop() { logger.info("Rpc sink {} stopping...", getName()); destroyConnection(); cxnResetExecutor.shutdown(); try { if (cxnResetExecutor.awaitTermination(5, TimeUnit.SECONDS)) { cxnResetExecutor.shutdownNow(); } } catch (Exception ex) { logger.error("Interrupted while waiting for connection reset executor to shut down"); } sinkCounter.stop(); super.stop(); logger.info("Rpc sink {} stopped. Metrics: {}", getName(), sinkCounter); }
@Override public synchronized void start() { // instantiate the producer producer = new KafkaProducer<String,byte[]>(kafkaProps); counter.start(); super.start(); }
private void resetConnection() { try { destroyConnection(); createConnection(); } catch (Throwable throwable) { // Don't rethrow, else this runnable won't get scheduled again. logger.error("Error while trying to expire connection", throwable); } }
@Override public void configure(Context context) { super.configure(context); if (maxTimeOut != 0) { selector.setMaxTimeOut(maxTimeOut); } }
public FailedSink(Integer priority, Sink sink, int seqFailures) { this.sink = sink; this.priority = priority; this.sequentialFailures = seqFailures; adjustRefresh(); }
@Override public String toString() { return "NullSink " + getName() + " { batchSize: " + batchSize + " }"; }
@Override public void configure(ComponentConfiguration conf) { this.conf = (SinkGroupConfiguration) conf; processor = SinkProcessorFactory.getProcessor(this.conf.getProcessorContext(), sinks); } }
private Sink moveActiveToDeadAndGetNext() { Integer key = liveSinks.lastKey(); failedSinks.add(new FailedSink(key, activeSink, 1)); liveSinks.remove(key); if (liveSinks.isEmpty()) return null; if (liveSinks.lastKey() != null) { return liveSinks.get(liveSinks.lastKey()); } else { return null; } }
@Override public void configure(Context context) { conf = new SinkGroupConfiguration("sinkgrp"); try { conf.configure(context); } catch (ConfigurationException e) { throw new FlumeException("Invalid Configuration!", e); } configure(conf); }
@Override public synchronized void stop() { producer.shutdown(); sinkCounter.incrementConnectionClosedCount(); sinkCounter.stop(); super.stop(); }
/** * Ensure the connection exists and is active. * If the connection is not active, destroy it and recreate it. * * @throws org.apache.flume.FlumeException If there are errors closing or opening the RPC * connection. */ private void verifyConnection() throws FlumeException { if (client == null) { createConnection(); } else if (!client.isActive()) { destroyConnection(); createConnection(); } }
@Override public void configure(Context context) { super.configure(context); if (maxTimeOut != 0) { selector.setMaxTimeOut(maxTimeOut); } }