private int prepareSplitBatches(RecordAccumulator accum, long seed, int recordSize, int numRecords) throws InterruptedException { Random random = new Random(); random.setSeed(seed); // First set the compression ratio estimation to be good. CompressionRatioEstimator.setEstimation(tp1.topic(), CompressionType.GZIP, 0.1f); // Append 20 records of 100 bytes size with poor compression ratio should make the batch too big. for (int i = 0; i < numRecords; i++) { accum.append(tp1, 0L, null, bytesWithPoorCompression(random, recordSize), Record.EMPTY_HEADERS, null, 0); } RecordAccumulator.ReadyCheckResult result = accum.ready(cluster, time.milliseconds()); assertFalse(result.readyNodes.isEmpty()); Map<Integer, List<ProducerBatch>> batches = accum.drain(cluster, result.readyNodes, Integer.MAX_VALUE, time.milliseconds()); assertEquals(1, batches.size()); assertEquals(1, batches.values().iterator().next().size()); ProducerBatch batch = batches.values().iterator().next().get(0); int numSplitBatches = accum.splitAndReenqueue(batch); accum.deallocate(batch); return numSplitBatches; }
assertEquals("Only one batch should be drained", 1, drained.get(node1.id()).size()); accum.splitAndReenqueue(drained.get(node1.id()).get(0)); time.sleep(101L);
private BatchDrainedResult completeOrSplitBatches(RecordAccumulator accum, int batchSize) { int numSplit = 0; int numBatches = 0; boolean batchDrained; do { batchDrained = false; RecordAccumulator.ReadyCheckResult result = accum.ready(cluster, time.milliseconds()); Map<Integer, List<ProducerBatch>> batches = accum.drain(cluster, result.readyNodes, Integer.MAX_VALUE, time.milliseconds()); for (List<ProducerBatch> batchList : batches.values()) { for (ProducerBatch batch : batchList) { batchDrained = true; numBatches++; if (batch.estimatedSizeInBytes() > batchSize + DefaultRecordBatch.RECORD_BATCH_OVERHEAD) { accum.splitAndReenqueue(batch); // release the resource of the original big batch. numSplit++; } else { batch.done(0L, 0L, null); } accum.deallocate(batch); } } } while (batchDrained); return new BatchDrainedResult(numSplit, numBatches); }
if (transactionManager != null) transactionManager.removeInFlightBatch(batch); this.accumulator.splitAndReenqueue(batch); this.accumulator.deallocate(batch); this.sensors.recordBatchSplit();