private void completeBatch(ProducerBatch batch, ProduceResponse.PartitionResponse response) { if (transactionManager != null) { if (transactionManager.hasProducerIdAndEpoch(batch.producerId(), batch.producerEpoch())) { transactionManager .maybeUpdateLastAckedSequence(batch.topicPartition, batch.baseSequence() + batch.recordCount - 1); log.debug("ProducerId: {}; Set last ack'd sequence number for topic-partition {} to {}", batch.producerId(), batch.topicPartition, transactionManager.lastAckedSequence(batch.topicPartition)); } transactionManager.updateLastAckedOffset(response, batch); transactionManager.removeInFlightBatch(batch); } if (batch.done(response.baseOffset, response.logAppendTime, null)) { maybeRemoveFromInflightBatches(batch); this.accumulator.deallocate(batch); } }
@Test public void testBatchCannotCompleteTwice() throws Exception { ProducerBatch batch = new ProducerBatch(new TopicPartition("topic", 1), memoryRecordsBuilder, now); MockCallback callback = new MockCallback(); FutureRecordMetadata future = batch.tryAppend(now, null, new byte[10], Record.EMPTY_HEADERS, callback, now); batch.done(500L, 10L, null); assertEquals(1, callback.invocations); assertNull(callback.exception); assertNotNull(callback.metadata); try { batch.done(1000L, 20L, null); fail("Expected exception from done"); } catch (IllegalStateException e) { // expected } RecordMetadata recordMetadata = future.get(); assertEquals(500L, recordMetadata.offset()); assertEquals(10L, recordMetadata.timestamp()); }
if (batch.done(baseOffset, logAppendTime, exception)) { maybeRemoveFromInflightBatches(batch); this.accumulator.deallocate(batch);
assertFalse(drained.isEmpty()); assertFalse(drained.get(node1.id()).isEmpty()); drained.get(node1.id()).get(0).done(acked.get(), 100L, null); assertEquals("The first message should have been acked.", 1, acked.get()); assertTrue(future1.isDone()); assertFalse(drained.isEmpty()); assertFalse(drained.get(node1.id()).isEmpty()); drained.get(node1.id()).get(0).done(acked.get(), 100L, null); assertEquals("Both message should have been acked.", 2, acked.get()); assertTrue(future2.isDone());
private BatchDrainedResult completeOrSplitBatches(RecordAccumulator accum, int batchSize) { int numSplit = 0; int numBatches = 0; boolean batchDrained; do { batchDrained = false; RecordAccumulator.ReadyCheckResult result = accum.ready(cluster, time.milliseconds()); Map<Integer, List<ProducerBatch>> batches = accum.drain(cluster, result.readyNodes, Integer.MAX_VALUE, time.milliseconds()); for (List<ProducerBatch> batchList : batches.values()) { for (ProducerBatch batch : batchList) { batchDrained = true; numBatches++; if (batch.estimatedSizeInBytes() > batchSize + DefaultRecordBatch.RECORD_BATCH_OVERHEAD) { accum.splitAndReenqueue(batch); // release the resource of the original big batch. numSplit++; } else { batch.done(0L, 0L, null); } accum.deallocate(batch); } } } while (batchDrained); return new BatchDrainedResult(numSplit, numBatches); }
@Test public void testBatchAbort() throws Exception { ProducerBatch batch = new ProducerBatch(new TopicPartition("topic", 1), memoryRecordsBuilder, now); MockCallback callback = new MockCallback(); FutureRecordMetadata future = batch.tryAppend(now, null, new byte[10], Record.EMPTY_HEADERS, callback, now); KafkaException exception = new KafkaException(); batch.abort(exception); assertTrue(future.isDone()); assertEquals(1, callback.invocations); assertEquals(exception, callback.exception); assertNull(callback.metadata); // subsequent completion should be ignored assertFalse(batch.done(500L, 2342342341L, null)); assertFalse(batch.done(-1, -1, new KafkaException())); assertEquals(1, callback.invocations); assertTrue(future.isDone()); try { future.get(); fail("Future should have thrown"); } catch (ExecutionException e) { assertEquals(exception, e.getCause()); } }