private void testAppendLarge(CompressionType compressionType) throws Exception { int batchSize = 512; byte[] value = new byte[2 * batchSize]; RecordAccumulator accum = createTestRecordAccumulator( batchSize + DefaultRecordBatch.RECORD_BATCH_OVERHEAD, 10 * 1024, compressionType, 0L); accum.append(tp1, 0L, key, value, Record.EMPTY_HEADERS, null, maxBlockTimeMs); assertEquals("Our partition's leader should be ready", Collections.singleton(node1), accum.ready(cluster, time.milliseconds()).readyNodes); Deque<ProducerBatch> batches = accum.batches().get(tp1); assertEquals(1, batches.size()); ProducerBatch producerBatch = batches.peek(); List<MutableRecordBatch> recordBatches = TestUtils.toList(producerBatch.records().batches()); assertEquals(1, recordBatches.size()); MutableRecordBatch recordBatch = recordBatches.get(0); assertEquals(0L, recordBatch.baseOffset()); List<Record> records = TestUtils.toList(recordBatch); assertEquals(1, records.size()); Record record = records.get(0); assertEquals(0L, record.offset()); assertEquals(ByteBuffer.wrap(key), record.key()); assertEquals(ByteBuffer.wrap(value), record.value()); assertEquals(0L, record.timestamp()); }
private void testAppendLargeOldMessageFormat(CompressionType compressionType) throws Exception { int batchSize = 512; byte[] value = new byte[2 * batchSize]; ApiVersions apiVersions = new ApiVersions(); apiVersions.update(node1.idString(), NodeApiVersions.create(Collections.singleton( new ApiVersionsResponse.ApiVersion(ApiKeys.PRODUCE.id, (short) 0, (short) 2)))); RecordAccumulator accum = createTestRecordAccumulator( batchSize + DefaultRecordBatch.RECORD_BATCH_OVERHEAD, 10 * 1024, compressionType, 0L); accum.append(tp1, 0L, key, value, Record.EMPTY_HEADERS, null, maxBlockTimeMs); assertEquals("Our partition's leader should be ready", Collections.singleton(node1), accum.ready(cluster, time.milliseconds()).readyNodes); Deque<ProducerBatch> batches = accum.batches().get(tp1); assertEquals(1, batches.size()); ProducerBatch producerBatch = batches.peek(); List<MutableRecordBatch> recordBatches = TestUtils.toList(producerBatch.records().batches()); assertEquals(1, recordBatches.size()); MutableRecordBatch recordBatch = recordBatches.get(0); assertEquals(0L, recordBatch.baseOffset()); List<Record> records = TestUtils.toList(recordBatch); assertEquals(1, records.size()); Record record = records.get(0); assertEquals(0L, record.offset()); assertEquals(ByteBuffer.wrap(key), record.key()); assertEquals(ByteBuffer.wrap(value), record.value()); assertEquals(0L, record.timestamp()); }
@Test public void testExpiryOfAllSentBatchesShouldCauseUnresolvedSequences() throws Exception { final long producerId = 343434L; TransactionManager transactionManager = new TransactionManager(); setupWithTransactionState(transactionManager); prepareAndReceiveInitProducerId(producerId, Errors.NONE); assertTrue(transactionManager.hasProducerId()); assertEquals(0, transactionManager.sequenceNumber(tp0).longValue()); // Send first ProduceRequest Future<RecordMetadata> request1 = accumulator.append(tp0, 0L, "key".getBytes(), "value".getBytes(), null, null, MAX_BLOCK_TIMEOUT).future; sender.run(time.milliseconds()); // send request sendIdempotentProducerResponse(0, tp0, Errors.NOT_LEADER_FOR_PARTITION, -1); sender.run(time.milliseconds()); // receive response assertEquals(1L, transactionManager.sequenceNumber(tp0).longValue()); Node node = metadata.fetch().nodes().get(0); time.sleep(15000L); client.disconnect(node.idString()); client.blackout(node, 10); sender.run(time.milliseconds()); // now expire the batch. assertFutureFailure(request1, TimeoutException.class); assertTrue(transactionManager.hasUnresolvedSequence(tp0)); assertFalse(client.hasInFlightRequests()); Deque<ProducerBatch> batches = accumulator.batches().get(tp0); assertEquals(0, batches.size()); assertTrue(transactionManager.hasProducerId(producerId)); // We should now clear the old producerId and get a new one in a single run loop. prepareAndReceiveInitProducerId(producerId + 1, Errors.NONE); assertTrue(transactionManager.hasProducerId(producerId + 1)); }
assertEquals(0, sender.inFlightBatches(tp0).size()); Deque<ProducerBatch> batches = accumulator.batches().get(tp0); assertEquals(1, batches.size()); assertFalse(batches.peekFirst().hasSequence());
Deque<ProducerBatch> batches = accumulator.batches().get(tp0);
assertEquals(1, request2.get().offset()); assertFalse(request1.isDone()); Deque<ProducerBatch> queuedBatches = accumulator.batches().get(tp0);
assertEquals("The last ack'd sequence number should be 1", 1, txnManager.lastAckedSequence(tp)); assertEquals("Offset of the first message should be 1", 1L, f2.get().offset()); assertTrue("There should be no batch in the accumulator", accumulator.batches().get(tp).isEmpty()); assertTrue("There should be a split", (Double) (m.metrics().get(senderMetrics.batchSplitRate).metricValue()) > 0);
Deque<ProducerBatch> queuedBatches = accumulator.batches().get(tp0);