private void maybeAbortBatches(RuntimeException exception) { if (accumulator.hasIncomplete()) { log.error("Aborting producer batches due to fatal error", exception); accumulator.abortBatches(exception); } }
@Test public void testFlush() throws Exception { long lingerMs = Integer.MAX_VALUE; final RecordAccumulator accum = createTestRecordAccumulator( 4 * 1024 + DefaultRecordBatch.RECORD_BATCH_OVERHEAD, 64 * 1024, CompressionType.NONE, lingerMs); for (int i = 0; i < 100; i++) { accum.append(new TopicPartition(topic, i % 3), 0L, key, value, Record.EMPTY_HEADERS, null, maxBlockTimeMs); assertTrue(accum.hasIncomplete()); } RecordAccumulator.ReadyCheckResult result = accum.ready(cluster, time.milliseconds()); assertEquals("No nodes should be ready.", 0, result.readyNodes.size()); accum.beginFlush(); result = accum.ready(cluster, time.milliseconds()); // drain and deallocate all batches Map<Integer, List<ProducerBatch>> results = accum.drain(cluster, result.readyNodes, Integer.MAX_VALUE, time.milliseconds()); assertTrue(accum.hasIncomplete()); for (List<ProducerBatch> batches: results.values()) for (ProducerBatch batch: batches) accum.deallocate(batch); // should be complete with no unsent records. accum.awaitFlushCompletion(); assertFalse(accum.hasUndrained()); assertFalse(accum.hasIncomplete()); }
Map<Integer, List<ProducerBatch>> drained = accum.drain(cluster, result.readyNodes, Integer.MAX_VALUE, time.milliseconds()); assertTrue(accum.hasUndrained()); assertTrue(accum.hasIncomplete()); assertEquals(numRecords, numExceptionReceivedInCallback.get()); assertFalse(accum.hasUndrained()); assertFalse(accum.hasIncomplete());
time.milliseconds()); assertTrue(accum.hasUndrained()); assertTrue(accum.hasIncomplete()); assertEquals(numRecords, numExceptionReceivedInCallback.get() + numDrainedRecords); assertFalse(accum.hasUndrained()); assertTrue(accum.hasIncomplete());
private boolean maybeSendTransactionalRequest(long now) { if (transactionManager.isCompleting() && accumulator.hasIncomplete()) { if (transactionManager.isAborting()) accumulator.abortUndrainedBatches(new KafkaException("Failing batch since transaction was aborted")); TransactionManager.TxnRequestHandler nextRequestHandler = transactionManager.nextRequestHandler(accumulator.hasIncomplete()); if (nextRequestHandler == null) return false;
assertTrue(transactionManager.isReady()); assertFalse(transactionManager.hasPartitionsToAdd()); assertFalse(accumulator.hasIncomplete());
sender.run(time.milliseconds()); assertFalse(accumulator.hasUndrained()); assertTrue(accumulator.hasIncomplete()); assertFalse(transactionManager.hasInFlightTransactionalRequest()); assertFalse(responseFuture.isDone()); assertTrue(accumulator.hasIncomplete()); assertFalse(transactionManager.hasInFlightTransactionalRequest()); assertFalse(responseFuture.isDone()); assertTrue(responseFuture.isDone()); assertFalse(accumulator.hasUndrained()); assertFalse(accumulator.hasIncomplete()); assertFalse(transactionManager.hasInFlightTransactionalRequest());
sender.run(time.milliseconds()); assertFalse(accumulator.hasUndrained()); assertTrue(accumulator.hasIncomplete()); assertFalse(transactionManager.hasInFlightTransactionalRequest()); sender.run(time.milliseconds()); assertFalse(accumulator.hasUndrained()); assertTrue(accumulator.hasIncomplete()); assertFalse(transactionManager.hasInFlightTransactionalRequest()); assertFalse(responseFuture.isDone()); assertTrue(accumulator.hasIncomplete()); assertFalse(transactionManager.hasInFlightTransactionalRequest()); assertFalse(responseFuture.isDone()); assertTrue(responseFuture.isDone()); assertFalse(accumulator.hasUndrained()); assertFalse(accumulator.hasIncomplete()); assertFalse(transactionManager.hasInFlightTransactionalRequest());
sender.run(time.milliseconds()); assertFalse(authorizedTopicProduceFuture.isDone()); assertTrue(accumulator.hasIncomplete()); assertFalse(accumulator.hasIncomplete());
assertTrue(transactionManager.isReady()); assertFalse(transactionManager.hasPartitionsToAdd()); assertFalse(accumulator.hasIncomplete());