private Sink.Status prepareAndSend(Context context, String msg) throws EventDeliveryException { Sink kafkaSink = new KafkaSink(); Configurables.configure(kafkaSink, context); Channel memoryChannel = new MemoryChannel(); Configurables.configure(memoryChannel, context); kafkaSink.setChannel(memoryChannel); kafkaSink.start(); Transaction tx = memoryChannel.getTransaction(); tx.begin(); Event event = EventBuilder.withBody(msg.getBytes()); memoryChannel.put(event); tx.commit(); tx.close(); return kafkaSink.process(); }
Channel channel = getChannel(); Transaction transaction = channel.getTransaction(); transaction.begin(); Event event = channel.take(); sinkCounter.incrementBatchUnderflowCount(); else sinkCounter.incrementBatchCompleteCount(); sinkCounter.incrementBatchEmptyCount(); sinkCounter.addToEventDrainAttemptCount(batch.size()); transaction.commit(); sinkCounter.addToEventDrainSuccessCount(batch.size()); transaction.rollback(); throw new EventDeliveryException(e);
@Override public String toString() { return this.getClass().getName() + "{name:" + name + ", channel:" + channel.getName() + "}"; } }
@Override public Status process() throws EventDeliveryException { Status result = Status.READY; Channel channel = getChannel(); Transaction transaction = channel.getTransaction(); Event event = null; try { transaction.begin(); event = channel.take(); if (event != null) { if (logger.isInfoEnabled()) { logger.info("Event: " + EventHelper.dumpEvent(event, maxBytesToLog)); } } else { // No event found, request back-off semantics from the sink runner result = Status.BACKOFF; } transaction.commit(); } catch (Exception ex) { transaction.rollback(); throw new EventDeliveryException("Failed to log event: " + event, ex); } finally { transaction.close(); } return result; } }
Transaction txn = ch.getTransaction(); txn.begin(); Event event = ch.take(); sinkCounter.incrementEventDrainAttemptCount(); LOG.debug("Sending request : " + new String(event.getBody())); txn.rollback(); } else { txn.commit(); sinkCounter.incrementEventDrainSuccessCount(); sinkCounter.incrementEventWriteFail();
serializerType, serializerContext, outputStream); serializer.afterCreate(); sinkCounter.incrementConnectionCreatedCount(); throw new EventDeliveryException("Failed to open file " + pathController.getCurrentFile() + " while delivering event", e); Transaction transaction = channel.getTransaction(); Event event = null; Status result = Status.READY; transaction.begin(); int eventAttemptCounter = 0; for (int i = 0; i < batchSize; i++) { event = channel.take(); transaction.commit(); transaction.rollback(); throw new EventDeliveryException("Failed to process transaction", ex);
Transaction transaction = channel.getTransaction(); transaction.begin(); boolean success = false; try { transaction.commit(); success = true; return Status.BACKOFF; } catch (Exception e) { sinkCounter.incrementEventWriteOrChannelFail(e); throw new EventDeliveryException(e); } finally { if (!success) { transaction.rollback(); transaction.close();
Event event = channel.take(); } catch (EventDeliveryException ex) { LOG.warn("Error closing writer there may be temp files that need to" + " be manually recovered: " + ex.getLocalizedMessage()); LOG.debug("Exception follows.", ex); throw new EventDeliveryException(th); counter.incrementBatchEmptyCount(); return Status.BACKOFF; } else if (processedEvents < batchSize) { counter.incrementBatchUnderflowCount(); } else { counter.incrementBatchCompleteCount();
for (; txnEventCount < batchSize; ++txnEventCount) { Event event = channel.take(); if (event == null) { break; sinkCounter.incrementBatchEmptyCount(); } else if (txnEventCount == batchSize) { sinkCounter.incrementBatchCompleteCount(); } else { sinkCounter.incrementBatchUnderflowCount(); sinkCounter.addToEventDrainAttemptCount(txnEventCount);
/** * Enter the transaction boundary. This will either begin a new transaction * if one didn't already exist. If we're already in a transaction boundary, * then this method does nothing. * * @param channel The Sink's channel * @throws EventDeliveryException There was an error starting a new batch * with the failure policy. */ private void enterTransaction(Channel channel) throws EventDeliveryException { // There's no synchronization around the transaction instance because the // Sink API states "the Sink#process() call is guaranteed to only // be accessed by a single thread". Technically other methods could be // called concurrently, but the implementation of SinkRunner waits // for the Thread running process() to end before calling stop() if (transaction == null) { this.transaction = channel.getTransaction(); transaction.begin(); failurePolicy = FAILURE_POLICY_FACTORY.newPolicy(context); } }
private List<Event> takeEventsFromChannel(Channel channel) { List<Event> events = new ArrayList<Event>(); for (int i = 0; i < this.batchsize; i++) { this.sinkCounter.incrementEventDrainAttemptCount(); events.add(channel.take()); } events.removeAll(Collections.singleton(null)); return events; }
private Event buildEvent(Channel channel) { final Event takenEvent = channel.take(); final ObjectNode objectNode = new ObjectNode(JsonNodeFactory.instance); Event event = null; if (takenEvent != null) { event = EventBuilder.withBody(objectNode.toString().getBytes(Charsets.UTF_8), takenEvent.getHeaders()); } return event; }
@Override public Event call() { return channel.take(); } });
while (ch.getLifecycleState() != LifecycleState.START && !supervisor.isComponentInErrorState(ch)) { try { logger.info("Waiting for channel: " + ch.getName() + " to start. Sleeping for 500 ms"); Thread.sleep(500);
@Override public void run() { channel.put(event); } });
Transaction tx = reqChannel.getTransaction(); Preconditions.checkNotNull(tx, "Transaction object must not be null"); try { tx.begin(); reqChannel.put(event); tx.commit(); } catch (Throwable t) { tx.rollback(); if (t instanceof Error) { LOG.error("Error while writing to required channel: " + reqChannel, t); Transaction tx = null; try { tx = optChannel.getTransaction(); tx.begin(); optChannel.put(event); tx.commit();
Transaction transaction = channel.getTransaction(); transaction.begin(); boolean success = false; try { transaction.commit(); success = true; return Status.BACKOFF; } catch (Exception e) { sinkCounter.incrementEventWriteOrChannelFail(e); throw new EventDeliveryException(e); } finally { if (!success) { transaction.rollback(); transaction.close();
private List<Event> takeEventsFromChannel(Channel channel, int eventsToTake) { List<Event> events = new ArrayList<Event>(); for (int i = 0; i < eventsToTake; i++) { this.sinkCounter.incrementEventDrainAttemptCount(); events.add(channel.take()); } events.removeAll(Collections.singleton(null)); return events; }
@Override public List<Event> call() { List<Event> events = new ArrayList<Event>(max); while (events.size() < max) { Event event = channel.take(); if (event == null) { break; } events.add(event); } return events; } });