size += batch.records().sizeInBytes(); ready.add(batch);
@Test public void testLinger() throws Exception { long lingerMs = 10L; RecordAccumulator accum = createTestRecordAccumulator( 1024 + DefaultRecordBatch.RECORD_BATCH_OVERHEAD, 10 * 1024, CompressionType.NONE, lingerMs); accum.append(tp1, 0L, key, value, Record.EMPTY_HEADERS, null, maxBlockTimeMs); assertEquals("No partitions should be ready", 0, accum.ready(cluster, time.milliseconds()).readyNodes.size()); time.sleep(10); assertEquals("Our partition's leader should be ready", Collections.singleton(node1), accum.ready(cluster, time.milliseconds()).readyNodes); List<ProducerBatch> batches = accum.drain(cluster, Collections.singleton(node1), Integer.MAX_VALUE, 0).get(node1.id()); assertEquals(1, batches.size()); ProducerBatch batch = batches.get(0); Iterator<Record> iter = batch.records().records().iterator(); Record record = iter.next(); assertEquals("Keys should match", ByteBuffer.wrap(key), record.key()); assertEquals("Values should match", ByteBuffer.wrap(value), record.value()); assertFalse("No more records", iter.hasNext()); }
if (batches != null) { for (ProducerBatch batch : batches) { for (Record record : batch.records().records()) read++; accum.deallocate(batch);
ProducerBatch batch = batches.get(0); Iterator<Record> iter = batch.records().records().iterator(); for (int i = 0; i < appends; i++) { Record record = iter.next();
for (RecordBatch splitBatch : splitProducerBatch.records().batches()) { for (Record record : splitBatch) { assertTrue("Header size should be 1.", record.headers().length == 1);
private void testAppendLarge(CompressionType compressionType) throws Exception { int batchSize = 512; byte[] value = new byte[2 * batchSize]; RecordAccumulator accum = createTestRecordAccumulator( batchSize + DefaultRecordBatch.RECORD_BATCH_OVERHEAD, 10 * 1024, compressionType, 0L); accum.append(tp1, 0L, key, value, Record.EMPTY_HEADERS, null, maxBlockTimeMs); assertEquals("Our partition's leader should be ready", Collections.singleton(node1), accum.ready(cluster, time.milliseconds()).readyNodes); Deque<ProducerBatch> batches = accum.batches().get(tp1); assertEquals(1, batches.size()); ProducerBatch producerBatch = batches.peek(); List<MutableRecordBatch> recordBatches = TestUtils.toList(producerBatch.records().batches()); assertEquals(1, recordBatches.size()); MutableRecordBatch recordBatch = recordBatches.get(0); assertEquals(0L, recordBatch.baseOffset()); List<Record> records = TestUtils.toList(recordBatch); assertEquals(1, records.size()); Record record = records.get(0); assertEquals(0L, record.offset()); assertEquals(ByteBuffer.wrap(key), record.key()); assertEquals(ByteBuffer.wrap(value), record.value()); assertEquals(0L, record.timestamp()); }
private void testAppendLargeOldMessageFormat(CompressionType compressionType) throws Exception { int batchSize = 512; byte[] value = new byte[2 * batchSize]; ApiVersions apiVersions = new ApiVersions(); apiVersions.update(node1.idString(), NodeApiVersions.create(Collections.singleton( new ApiVersionsResponse.ApiVersion(ApiKeys.PRODUCE.id, (short) 0, (short) 2)))); RecordAccumulator accum = createTestRecordAccumulator( batchSize + DefaultRecordBatch.RECORD_BATCH_OVERHEAD, 10 * 1024, compressionType, 0L); accum.append(tp1, 0L, key, value, Record.EMPTY_HEADERS, null, maxBlockTimeMs); assertEquals("Our partition's leader should be ready", Collections.singleton(node1), accum.ready(cluster, time.milliseconds()).readyNodes); Deque<ProducerBatch> batches = accum.batches().get(tp1); assertEquals(1, batches.size()); ProducerBatch producerBatch = batches.peek(); List<MutableRecordBatch> recordBatches = TestUtils.toList(producerBatch.records().batches()); assertEquals(1, recordBatches.size()); MutableRecordBatch recordBatch = recordBatches.get(0); assertEquals(0L, recordBatch.baseOffset()); List<Record> records = TestUtils.toList(recordBatch); assertEquals(1, records.size()); Record record = records.get(0); assertEquals(0L, record.offset()); assertEquals(ByteBuffer.wrap(key), record.key()); assertEquals(ByteBuffer.wrap(value), record.value()); assertEquals(0L, record.timestamp()); }
assertTrue(splitProducerBatch.isSplitBatch()); for (RecordBatch splitBatch : splitProducerBatch.records().batches()) { assertEquals(magic, splitBatch.magic()); assertEquals(0L, splitBatch.baseOffset());