private long sendProducerData(long now) { Cluster cluster = metadata.fetch(); RecordAccumulator.ReadyCheckResult result = this.accumulator.ready(cluster, now);
@Test public void testPartialDrain() throws Exception { RecordAccumulator accum = createTestRecordAccumulator( 1024 + DefaultRecordBatch.RECORD_BATCH_OVERHEAD, 10 * 1024, CompressionType.NONE, 10L); int appends = 1024 / msgSize + 1; List<TopicPartition> partitions = asList(tp1, tp2); for (TopicPartition tp : partitions) { for (int i = 0; i < appends; i++) accum.append(tp, 0L, key, value, Record.EMPTY_HEADERS, null, maxBlockTimeMs); } assertEquals("Partition's leader should be ready", Collections.singleton(node1), accum.ready(cluster, time.milliseconds()).readyNodes); List<ProducerBatch> batches = accum.drain(cluster, Collections.singleton(node1), 1024, 0).get(node1.id()); assertEquals("But due to size bound only one partition should have been retrieved", 1, batches.size()); }
private int prepareSplitBatches(RecordAccumulator accum, long seed, int recordSize, int numRecords) throws InterruptedException { Random random = new Random(); random.setSeed(seed); // First set the compression ratio estimation to be good. CompressionRatioEstimator.setEstimation(tp1.topic(), CompressionType.GZIP, 0.1f); // Append 20 records of 100 bytes size with poor compression ratio should make the batch too big. for (int i = 0; i < numRecords; i++) { accum.append(tp1, 0L, null, bytesWithPoorCompression(random, recordSize), Record.EMPTY_HEADERS, null, 0); } RecordAccumulator.ReadyCheckResult result = accum.ready(cluster, time.milliseconds()); assertFalse(result.readyNodes.isEmpty()); Map<Integer, List<ProducerBatch>> batches = accum.drain(cluster, result.readyNodes, Integer.MAX_VALUE, time.milliseconds()); assertEquals(1, batches.size()); assertEquals(1, batches.values().iterator().next().size()); ProducerBatch batch = batches.values().iterator().next().get(0); int numSplitBatches = accum.splitAndReenqueue(batch); accum.deallocate(batch); return numSplitBatches; }
@Test public void testLinger() throws Exception { long lingerMs = 10L; RecordAccumulator accum = createTestRecordAccumulator( 1024 + DefaultRecordBatch.RECORD_BATCH_OVERHEAD, 10 * 1024, CompressionType.NONE, lingerMs); accum.append(tp1, 0L, key, value, Record.EMPTY_HEADERS, null, maxBlockTimeMs); assertEquals("No partitions should be ready", 0, accum.ready(cluster, time.milliseconds()).readyNodes.size()); time.sleep(10); assertEquals("Our partition's leader should be ready", Collections.singleton(node1), accum.ready(cluster, time.milliseconds()).readyNodes); List<ProducerBatch> batches = accum.drain(cluster, Collections.singleton(node1), Integer.MAX_VALUE, 0).get(node1.id()); assertEquals(1, batches.size()); ProducerBatch batch = batches.get(0); Iterator<Record> iter = batch.records().records().iterator(); Record record = iter.next(); assertEquals("Keys should match", ByteBuffer.wrap(key), record.key()); assertEquals("Values should match", ByteBuffer.wrap(value), record.value()); assertFalse("No more records", iter.hasNext()); }
RecordAccumulator.ReadyCheckResult result = accum.ready(cluster, now + lingerMs + 1); assertEquals("Node1 should be ready", Collections.singleton(node1), result.readyNodes); Map<Integer, List<ProducerBatch>> batches = accum.drain(cluster, result.readyNodes, Integer.MAX_VALUE, now + lingerMs + 1); result = accum.ready(cluster, now + lingerMs + 1); assertEquals("Node1 should be ready", Collections.singleton(node1), result.readyNodes); result = accum.ready(cluster, now + retryBackoffMs + 1); assertEquals("Node1 should be ready", Collections.singleton(node1), result.readyNodes); batches = accum.drain(cluster, result.readyNodes, Integer.MAX_VALUE, now + retryBackoffMs + 1);
time.setCurrentTimeMs(System.currentTimeMillis()); accum.append(tp1, 0L, key, value, Record.EMPTY_HEADERS, null, maxBlockTimeMs); assertEquals("No partition should be ready.", 0, accum.ready(cluster, time.milliseconds()).readyNodes.size()); readyNodes = accum.ready(cluster, time.milliseconds()).readyNodes; assertEquals("Our partition's leader should be ready", Collections.singleton(node1), readyNodes); expiredBatches = accum.expiredBatches(time.milliseconds()); assertEquals("The batch may expire when the partition is muted", 1, expiredBatches.size()); assertEquals("No partitions should be ready.", 0, accum.ready(cluster, time.milliseconds()).readyNodes.size());
long now = time.milliseconds(); while (read < numThreads * msgs) { Set<Node> nodes = accum.ready(cluster, now).readyNodes; List<ProducerBatch> batches = accum.drain(cluster, nodes, 5 * 1024, 0).get(node1.id()); if (batches != null) {
assertEquals("No partitions should be ready.", 0, accum.ready(cluster, now).readyNodes.size()); Iterator<ProducerBatch> partitionBatchesIterator = partitionBatches.iterator(); assertTrue(partitionBatchesIterator.next().isWritable()); assertEquals("Our partition's leader should be ready", Collections.singleton(node1), accum.ready(cluster, time.milliseconds()).readyNodes);
@Test public void testSplitBatchOffAccumulator() throws InterruptedException { long seed = System.currentTimeMillis(); final int batchSize = 1024; final int bufferCapacity = 3 * 1024; // First set the compression ratio estimation to be good. CompressionRatioEstimator.setEstimation(tp1.topic(), CompressionType.GZIP, 0.1f); RecordAccumulator accum = createTestRecordAccumulator(batchSize, bufferCapacity, CompressionType.GZIP, 0L); int numSplitBatches = prepareSplitBatches(accum, seed, 100, 20); assertTrue("There should be some split batches", numSplitBatches > 0); // Drain all the split batches. RecordAccumulator.ReadyCheckResult result = accum.ready(cluster, time.milliseconds()); for (int i = 0; i < numSplitBatches; i++) { Map<Integer, List<ProducerBatch>> drained = accum.drain(cluster, result.readyNodes, Integer.MAX_VALUE, time.milliseconds()); assertFalse(drained.isEmpty()); assertFalse(drained.get(node1.id()).isEmpty()); } assertTrue("All the batches should have been drained.", accum.ready(cluster, time.milliseconds()).readyNodes.isEmpty()); assertEquals("The split batches should be allocated off the accumulator", bufferCapacity, accum.bufferPoolAvailableMemory()); }
private void testAppendLarge(CompressionType compressionType) throws Exception { int batchSize = 512; byte[] value = new byte[2 * batchSize]; RecordAccumulator accum = createTestRecordAccumulator( batchSize + DefaultRecordBatch.RECORD_BATCH_OVERHEAD, 10 * 1024, compressionType, 0L); accum.append(tp1, 0L, key, value, Record.EMPTY_HEADERS, null, maxBlockTimeMs); assertEquals("Our partition's leader should be ready", Collections.singleton(node1), accum.ready(cluster, time.milliseconds()).readyNodes); Deque<ProducerBatch> batches = accum.batches().get(tp1); assertEquals(1, batches.size()); ProducerBatch producerBatch = batches.peek(); List<MutableRecordBatch> recordBatches = TestUtils.toList(producerBatch.records().batches()); assertEquals(1, recordBatches.size()); MutableRecordBatch recordBatch = recordBatches.get(0); assertEquals(0L, recordBatch.baseOffset()); List<Record> records = TestUtils.toList(recordBatch); assertEquals(1, records.size()); Record record = records.get(0); assertEquals(0L, record.offset()); assertEquals(ByteBuffer.wrap(key), record.key()); assertEquals(ByteBuffer.wrap(value), record.value()); assertEquals(0L, record.timestamp()); }
RecordAccumulator.ReadyCheckResult result = accum.ready(cluster, time.milliseconds()); assertEquals("No nodes should be ready.", 0, result.readyNodes.size()); assertEquals("Next check time should be the linger time", lingerMs, result.nextReadyCheckDelayMs); result = accum.ready(cluster, time.milliseconds()); assertEquals("No nodes should be ready.", 0, result.readyNodes.size()); assertEquals("Next check time should be defined by node1, half remaining linger time", lingerMs / 2, result.nextReadyCheckDelayMs); result = accum.ready(cluster, time.milliseconds()); assertEquals("Node1 should be ready", Collections.singleton(node1), result.readyNodes);
private void testAppendLargeOldMessageFormat(CompressionType compressionType) throws Exception { int batchSize = 512; byte[] value = new byte[2 * batchSize]; ApiVersions apiVersions = new ApiVersions(); apiVersions.update(node1.idString(), NodeApiVersions.create(Collections.singleton( new ApiVersionsResponse.ApiVersion(ApiKeys.PRODUCE.id, (short) 0, (short) 2)))); RecordAccumulator accum = createTestRecordAccumulator( batchSize + DefaultRecordBatch.RECORD_BATCH_OVERHEAD, 10 * 1024, compressionType, 0L); accum.append(tp1, 0L, key, value, Record.EMPTY_HEADERS, null, maxBlockTimeMs); assertEquals("Our partition's leader should be ready", Collections.singleton(node1), accum.ready(cluster, time.milliseconds()).readyNodes); Deque<ProducerBatch> batches = accum.batches().get(tp1); assertEquals(1, batches.size()); ProducerBatch producerBatch = batches.peek(); List<MutableRecordBatch> recordBatches = TestUtils.toList(producerBatch.records().batches()); assertEquals(1, recordBatches.size()); MutableRecordBatch recordBatch = recordBatches.get(0); assertEquals(0L, recordBatch.baseOffset()); List<Record> records = TestUtils.toList(recordBatch); assertEquals(1, records.size()); Record record = records.get(0); assertEquals(0L, record.offset()); assertEquals(ByteBuffer.wrap(key), record.key()); assertEquals(ByteBuffer.wrap(value), record.value()); assertEquals(0L, record.timestamp()); }
@Test public void testMutedPartitions() throws InterruptedException { long now = time.milliseconds(); // test case assumes that the records do not fill the batch completely int batchSize = 1025; RecordAccumulator accum = createTestRecordAccumulator( batchSize + DefaultRecordBatch.RECORD_BATCH_OVERHEAD, 10 * batchSize, CompressionType.NONE, 10); int appends = expectedNumAppends(batchSize); for (int i = 0; i < appends; i++) { accum.append(tp1, 0L, key, value, Record.EMPTY_HEADERS, null, maxBlockTimeMs); assertEquals("No partitions should be ready.", 0, accum.ready(cluster, now).readyNodes.size()); } time.sleep(2000); // Test ready with muted partition accum.mutePartition(tp1); RecordAccumulator.ReadyCheckResult result = accum.ready(cluster, time.milliseconds()); assertEquals("No node should be ready", 0, result.readyNodes.size()); // Test ready without muted partition accum.unmutePartition(tp1, 0L); result = accum.ready(cluster, time.milliseconds()); assertTrue("The batch should be ready", result.readyNodes.size() > 0); // Test drain with muted partition accum.mutePartition(tp1); Map<Integer, List<ProducerBatch>> drained = accum.drain(cluster, result.readyNodes, Integer.MAX_VALUE, time.milliseconds()); assertEquals("No batch should have been drained", 0, drained.get(node1.id()).size()); // Test drain without muted partition. accum.unmutePartition(tp1, 0L); drained = accum.drain(cluster, result.readyNodes, Integer.MAX_VALUE, time.milliseconds()); assertTrue("The batch should have been drained.", drained.get(node1.id()).size() > 0); }
@Test public void testSoonToExpireBatchesArePickedUpForExpiry() throws InterruptedException { long lingerMs = 500L; int batchSize = 1025; RecordAccumulator accum = createTestRecordAccumulator( batchSize + DefaultRecordBatch.RECORD_BATCH_OVERHEAD, 10 * batchSize, CompressionType.NONE, lingerMs); accum.append(tp1, 0L, key, value, Record.EMPTY_HEADERS, null, maxBlockTimeMs); Set<Node> readyNodes = accum.ready(cluster, time.milliseconds()).readyNodes; Map<Integer, List<ProducerBatch>> drained = accum.drain(cluster, readyNodes, Integer.MAX_VALUE, time.milliseconds()); assertTrue(drained.isEmpty()); //assertTrue(accum.soonToExpireInFlightBatches().isEmpty()); // advanced clock and send one batch out but it should not be included in soon to expire inflight // batches because batch's expiry is quite far. time.sleep(lingerMs + 1); readyNodes = accum.ready(cluster, time.milliseconds()).readyNodes; drained = accum.drain(cluster, readyNodes, Integer.MAX_VALUE, time.milliseconds()); assertEquals("A batch did not drain after linger", 1, drained.size()); //assertTrue(accum.soonToExpireInFlightBatches().isEmpty()); // Queue another batch and advance clock such that batch expiry time is earlier than request timeout. accum.append(tp2, 0L, key, value, Record.EMPTY_HEADERS, null, maxBlockTimeMs); time.sleep(lingerMs * 4); // Now drain and check that accumulator picked up the drained batch because its expiry is soon. readyNodes = accum.ready(cluster, time.milliseconds()).readyNodes; drained = accum.drain(cluster, readyNodes, Integer.MAX_VALUE, time.milliseconds()); assertEquals("A batch did not drain after linger", 1, drained.size()); }
@Test public void testFlush() throws Exception { long lingerMs = Integer.MAX_VALUE; final RecordAccumulator accum = createTestRecordAccumulator( 4 * 1024 + DefaultRecordBatch.RECORD_BATCH_OVERHEAD, 64 * 1024, CompressionType.NONE, lingerMs); for (int i = 0; i < 100; i++) { accum.append(new TopicPartition(topic, i % 3), 0L, key, value, Record.EMPTY_HEADERS, null, maxBlockTimeMs); assertTrue(accum.hasIncomplete()); } RecordAccumulator.ReadyCheckResult result = accum.ready(cluster, time.milliseconds()); assertEquals("No nodes should be ready.", 0, result.readyNodes.size()); accum.beginFlush(); result = accum.ready(cluster, time.milliseconds()); // drain and deallocate all batches Map<Integer, List<ProducerBatch>> results = accum.drain(cluster, result.readyNodes, Integer.MAX_VALUE, time.milliseconds()); assertTrue(accum.hasIncomplete()); for (List<ProducerBatch> batches: results.values()) for (ProducerBatch batch: batches) accum.deallocate(batch); // should be complete with no unsent records. accum.awaitFlushCompletion(); assertFalse(accum.hasUndrained()); assertFalse(accum.hasIncomplete()); }
time.sleep(101L); RecordAccumulator.ReadyCheckResult result = accum.ready(cluster, time.milliseconds()); assertTrue("The batch should be ready", result.readyNodes.size() > 0); Map<Integer, List<ProducerBatch>> drained = accum.drain(cluster, result.readyNodes, Integer.MAX_VALUE, time.milliseconds());
private BatchDrainedResult completeOrSplitBatches(RecordAccumulator accum, int batchSize) { int numSplit = 0; int numBatches = 0; boolean batchDrained; do { batchDrained = false; RecordAccumulator.ReadyCheckResult result = accum.ready(cluster, time.milliseconds()); Map<Integer, List<ProducerBatch>> batches = accum.drain(cluster, result.readyNodes, Integer.MAX_VALUE, time.milliseconds()); for (List<ProducerBatch> batchList : batches.values()) { for (ProducerBatch batch : batchList) { batchDrained = true; numBatches++; if (batch.estimatedSizeInBytes() > batchSize + DefaultRecordBatch.RECORD_BATCH_OVERHEAD) { accum.splitAndReenqueue(batch); // release the resource of the original big batch. numSplit++; } else { batch.done(0L, 0L, null); } accum.deallocate(batch); } } } while (batchDrained); return new BatchDrainedResult(numSplit, numBatches); }
accum.append(tp1, 0L, key, value, Record.EMPTY_HEADERS, null, 0); time.sleep(lingerMs); readyNodes = accum.ready(cluster, time.milliseconds()).readyNodes; assertEquals("Our partition's leader should be ready", Collections.singleton(node1), readyNodes); Map<Integer, List<ProducerBatch>> drained = accum.drain(cluster, readyNodes, Integer.MAX_VALUE, time.milliseconds());
RecordAccumulator.ReadyCheckResult result = accum.ready(cluster, time.milliseconds()); assertFalse(result.readyNodes.isEmpty()); Map<Integer, List<ProducerBatch>> drained = accum.drain(cluster, result.readyNodes, Integer.MAX_VALUE, time.milliseconds());
RecordAccumulator.ReadyCheckResult result = accum.ready(cluster, time.milliseconds()); assertFalse(result.readyNodes.isEmpty()); Map<Integer, List<ProducerBatch>> drained = accum.drain(cluster, result.readyNodes, Integer.MAX_VALUE,