private ProducerBatch createBatchOffAccumulatorForRecord(Record record, int batchSize) { int initialSize = Math.max(AbstractRecords.estimateSizeInBytesUpperBound(magic(), recordsBuilder.compressionType(), record.key(), record.value(), record.headers()), batchSize); ByteBuffer buffer = ByteBuffer.allocate(initialSize); // Note that we intentionally do not set producer state (producerId, epoch, sequence, and isTransactional) // for the newly created batch. This will be set when the batch is dequeued for sending (which is consistent // with how normal batches are handled). MemoryRecordsBuilder builder = MemoryRecords.builder(buffer, magic(), recordsBuilder.compressionType(), TimestampType.CREATE_TIME, 0L); return new ProducerBatch(topicPartition, builder, this.createdMs, true); }
/** * A {@link ProducerBatch} configured using a timestamp preceding its create time is interpreted correctly * * as not expired by {@link ProducerBatch#hasReachedDeliveryTimeout(long, long)}. */ @Test public void testBatchExpirationAfterReenqueue() { ProducerBatch batch = new ProducerBatch(new TopicPartition("topic", 1), memoryRecordsBuilder, now); // Set batch.retry = true batch.reenqueued(now); // Set `now` to 2ms before the create time. assertFalse(batch.hasReachedDeliveryTimeout(10240, now - 2L)); }
@Test public void testAppendedChecksumMagicV0AndV1() { for (byte magic : Arrays.asList(MAGIC_VALUE_V0, MAGIC_VALUE_V1)) { MemoryRecordsBuilder builder = MemoryRecords.builder(ByteBuffer.allocate(128), magic, CompressionType.NONE, TimestampType.CREATE_TIME, 0L); ProducerBatch batch = new ProducerBatch(new TopicPartition("topic", 1), builder, now); byte[] key = "hi".getBytes(); byte[] value = "there".getBytes(); FutureRecordMetadata future = batch.tryAppend(now, key, value, Record.EMPTY_HEADERS, null, now); assertNotNull(future); byte attributes = LegacyRecord.computeAttributes(magic, CompressionType.NONE, TimestampType.CREATE_TIME); long expectedChecksum = LegacyRecord.computeChecksum(magic, attributes, now, key, value); assertEquals(expectedChecksum, future.checksumOrNull().longValue()); } }
@Test public void testChecksumNullForMagicV2() { ProducerBatch batch = new ProducerBatch(new TopicPartition("topic", 1), memoryRecordsBuilder, now); FutureRecordMetadata future = batch.tryAppend(now, null, new byte[10], Record.EMPTY_HEADERS, null, now); assertNotNull(future); assertNull(future.checksumOrNull()); }
/** * A {@link ProducerBatch} configured using a timestamp preceding its create time is interpreted correctly * as not expired by {@link ProducerBatch#hasReachedDeliveryTimeout(long, long)}. */ @Test public void testBatchExpiration() { long deliveryTimeoutMs = 10240; ProducerBatch batch = new ProducerBatch(new TopicPartition("topic", 1), memoryRecordsBuilder, now); // Set `now` to 2ms before the create time. assertFalse(batch.hasReachedDeliveryTimeout(deliveryTimeoutMs, now - 2)); // Set `now` to deliveryTimeoutMs. assertTrue(batch.hasReachedDeliveryTimeout(deliveryTimeoutMs, now + deliveryTimeoutMs)); }
ProducerBatch batch = new ProducerBatch(tp, recordsBuilder, time.milliseconds()); FutureRecordMetadata future = Utils.notNull(batch.tryAppend(timestamp, key, value, headers, callback, time.milliseconds()));
@Test public void testShouldNotAttemptAppendOnceRecordsBuilderIsClosedForAppends() { ProducerBatch batch = new ProducerBatch(new TopicPartition("topic", 1), memoryRecordsBuilder, now); FutureRecordMetadata result0 = batch.tryAppend(now, null, new byte[10], Record.EMPTY_HEADERS, null, now); assertNotNull(result0); assertTrue(memoryRecordsBuilder.hasRoomFor(now, null, new byte[10], Record.EMPTY_HEADERS)); memoryRecordsBuilder.closeForRecordAppends(); assertFalse(memoryRecordsBuilder.hasRoomFor(now, null, new byte[10], Record.EMPTY_HEADERS)); assertEquals(null, batch.tryAppend(now + 1, null, new byte[10], Record.EMPTY_HEADERS, null, now + 1)); }
TimestampType.CREATE_TIME, 0L); ProducerBatch batch = new ProducerBatch(new TopicPartition("topic", 1), builder, now); Header header = new RecordHeader("header-key", "header-value".getBytes());
compressionType, TimestampType.CREATE_TIME, 0L); ProducerBatch batch = new ProducerBatch(new TopicPartition("topic", 1), builder, now); while (true) { FutureRecordMetadata future = batch.tryAppend(now, "hi".getBytes(), "there".getBytes(),
ProducerBatch batch = new ProducerBatch(tp1, builder, now, true);
@Test public void testBatchCannotCompleteTwice() throws Exception { ProducerBatch batch = new ProducerBatch(new TopicPartition("topic", 1), memoryRecordsBuilder, now); MockCallback callback = new MockCallback(); FutureRecordMetadata future = batch.tryAppend(now, null, new byte[10], Record.EMPTY_HEADERS, callback, now); batch.done(500L, 10L, null); assertEquals(1, callback.invocations); assertNull(callback.exception); assertNotNull(callback.metadata); try { batch.done(1000L, 20L, null); fail("Expected exception from done"); } catch (IllegalStateException e) { // expected } RecordMetadata recordMetadata = future.get(); assertEquals(500L, recordMetadata.offset()); assertEquals(10L, recordMetadata.timestamp()); }
@Test public void testBatchCannotAbortTwice() throws Exception { ProducerBatch batch = new ProducerBatch(new TopicPartition("topic", 1), memoryRecordsBuilder, now); MockCallback callback = new MockCallback(); FutureRecordMetadata future = batch.tryAppend(now, null, new byte[10], Record.EMPTY_HEADERS, callback, now); KafkaException exception = new KafkaException(); batch.abort(exception); assertEquals(1, callback.invocations); assertEquals(exception, callback.exception); assertNull(callback.metadata); try { batch.abort(new KafkaException()); fail("Expected exception from abort"); } catch (IllegalStateException e) { // expected } assertEquals(1, callback.invocations); assertTrue(future.isDone()); try { future.get(); fail("Future should have thrown"); } catch (ExecutionException e) { assertEquals(exception, e.getCause()); } }
@Test public void testBatchAbort() throws Exception { ProducerBatch batch = new ProducerBatch(new TopicPartition("topic", 1), memoryRecordsBuilder, now); MockCallback callback = new MockCallback(); FutureRecordMetadata future = batch.tryAppend(now, null, new byte[10], Record.EMPTY_HEADERS, callback, now); KafkaException exception = new KafkaException(); batch.abort(exception); assertTrue(future.isDone()); assertEquals(1, callback.invocations); assertEquals(exception, callback.exception); assertNull(callback.metadata); // subsequent completion should be ignored assertFalse(batch.done(500L, 2342342341L, null)); assertFalse(batch.done(-1, -1, new KafkaException())); assertEquals(1, callback.invocations); assertTrue(future.isDone()); try { future.get(); fail("Future should have thrown"); } catch (ExecutionException e) { assertEquals(exception, e.getCause()); } }