private void commitPutsToOverflow_core(Transaction overflowPutTx) throws InterruptedException { // reattempt only once if overflow is full first time around for (int i = 0; i < 2; ++i) { try { synchronized (queueLock) { overflowPutTx.commit(); drainOrder.putOverflow(putList.size()); channelCounter.setChannelSize(memQueue.size() + drainOrder.overflowCounter); break; } } catch (ChannelFullException e) { // drop lock & reattempt if (i == 0) { Thread.sleep(overflowTimeout * 1000); } else { throw e; } } } }
/** * Commit and close the transaction. * * If this method throws an Exception the caller *must* ensure that the * transaction is rolled back. Callers can roll back the transaction by * calling {@link #rollbackTransaction()}. * * @return True if there was an open transaction and it was committed, false * otherwise. * @throws EventDeliveryException There was an error ending the batch with * the failure policy. */ @VisibleForTesting boolean commitTransaction() throws EventDeliveryException { if (transaction != null) { failurePolicy.sync(); transaction.commit(); transaction.close(); this.transaction = null; return true; } else { return false; } }
txn.commit(); sinkCounter.addToEventDrainSuccessCount(actions.size());
@Override public Status process() throws EventDeliveryException { Status result = Status.READY; Channel channel = getChannel(); Transaction transaction = channel.getTransaction(); Event event = null; try { transaction.begin(); event = channel.take(); if (event != null) { if (logger.isInfoEnabled()) { logger.info("Event: " + EventHelper.dumpEvent(event, maxBytesToLog)); } } else { // No event found, request back-off semantics from the sink runner result = Status.BACKOFF; } transaction.commit(); } catch (Exception ex) { transaction.rollback(); throw new EventDeliveryException("Failed to log event: " + event, ex); } finally { transaction.close(); } return result; } }
transaction.commit();
}); txn.commit(); sinkCounter.addToEventDrainSuccessCount(actions.size());
transaction.commit(); counterGroup.addAndGet("events.success", (long) Math.min(batchSize, i)); counterGroup.incrementAndGet("transaction.success");
transaction.commit(); success = true;
transaction.commit();
transaction.commit(); sinkCounter.addToEventDrainSuccessCount(size);
client.execute(); txn.commit(); sinkCounter.addToEventDrainSuccessCount(count); counterGroup.incrementAndGet("transaction.success");
tx.commit(); tx.close();
tx.commit(); tx.close();
Event event = EventBuilder.withBody(msg.getBytes(), headers); memoryChannel.put(event); tx.commit(); tx.close();
@Test public void testTopicAndKeyFromHeader() { Sink kafkaSink = new KafkaSink(); Context context = prepareDefaultContext(); Configurables.configure(kafkaSink, context); Channel memoryChannel = new MemoryChannel(); Configurables.configure(memoryChannel, context); kafkaSink.setChannel(memoryChannel); kafkaSink.start(); String msg = "test-topic-and-key-from-header"; Map<String, String> headers = new HashMap<String, String>(); headers.put("topic", TestConstants.CUSTOM_TOPIC); headers.put("key", TestConstants.CUSTOM_KEY); Transaction tx = memoryChannel.getTransaction(); tx.begin(); Event event = EventBuilder.withBody(msg.getBytes(), headers); memoryChannel.put(event); tx.commit(); tx.close(); try { Sink.Status status = kafkaSink.process(); if (status == Sink.Status.BACKOFF) { fail("Error Occurred"); } } catch (EventDeliveryException ex) { // ignore } checkMessageArrived(msg, TestConstants.CUSTOM_TOPIC); }
private Sink.Status prepareAndSend(Context context, String msg) throws EventDeliveryException { Sink kafkaSink = new KafkaSink(); Configurables.configure(kafkaSink, context); Channel memoryChannel = new MemoryChannel(); Configurables.configure(memoryChannel, context); kafkaSink.setChannel(memoryChannel); kafkaSink.start(); Transaction tx = memoryChannel.getTransaction(); tx.begin(); Event event = EventBuilder.withBody(msg.getBytes()); memoryChannel.put(event); tx.commit(); tx.close(); return kafkaSink.process(); }
Event event = EventBuilder.withBody(msg.getBytes(), headers); memoryChannel.put(event); tx.commit(); tx.close();
@Test public void testReplaceSubStringOfTopicWithHeaders() { String topic = TestConstants.HEADER_1_VALUE + "-topic"; Sink kafkaSink = new KafkaSink(); Context context = prepareDefaultContext(); context.put(TOPIC_CONFIG, TestConstants.HEADER_TOPIC); Configurables.configure(kafkaSink, context); Channel memoryChannel = new MemoryChannel(); Configurables.configure(memoryChannel, context); kafkaSink.setChannel(memoryChannel); kafkaSink.start(); String msg = "test-replace-substring-of-topic-with-headers"; Map<String, String> headers = new HashMap<>(); headers.put(TestConstants.HEADER_1_KEY, TestConstants.HEADER_1_VALUE); Transaction tx = memoryChannel.getTransaction(); tx.begin(); Event event = EventBuilder.withBody(msg.getBytes(), headers); memoryChannel.put(event); tx.commit(); tx.close(); try { Sink.Status status = kafkaSink.process(); if (status == Sink.Status.BACKOFF) { fail("Error Occurred"); } } catch (EventDeliveryException ex) { // ignore } checkMessageArrived(msg, topic); }
Event event = EventBuilder.withBody(msg.getBytes(), headers); memoryChannel.put(event); tx.commit(); tx.close();
@Test public void testDefaultTopic() { Sink kafkaSink = new KafkaSink(); Context context = prepareDefaultContext(); Configurables.configure(kafkaSink, context); Channel memoryChannel = new MemoryChannel(); Configurables.configure(memoryChannel, context); kafkaSink.setChannel(memoryChannel); kafkaSink.start(); String msg = "default-topic-test"; Transaction tx = memoryChannel.getTransaction(); tx.begin(); Event event = EventBuilder.withBody(msg.getBytes()); memoryChannel.put(event); tx.commit(); tx.close(); try { Sink.Status status = kafkaSink.process(); if (status == Sink.Status.BACKOFF) { fail("Error Occurred"); } } catch (EventDeliveryException ex) { // ignore } checkMessageArrived(msg, DEFAULT_TOPIC); }