/** * This method is only used by {@link #split(int)} when splitting a large batch to smaller ones. * @return true if the record has been successfully appended, false otherwise. */ private boolean tryAppendForSplit(long timestamp, ByteBuffer key, ByteBuffer value, Header[] headers, Thunk thunk) { if (!recordsBuilder.hasRoomFor(timestamp, key, value, headers)) { return false; } else { // No need to get the CRC. this.recordsBuilder.append(timestamp, key, value, headers); this.maxRecordSize = Math.max(this.maxRecordSize, AbstractRecords.estimateSizeInBytesUpperBound(magic(), recordsBuilder.compressionType(), key, value, headers)); FutureRecordMetadata future = new FutureRecordMetadata(this.produceFuture, this.recordCount, timestamp, thunk.future.checksumOrNull(), key == null ? -1 : key.remaining(), value == null ? -1 : value.remaining(), Time.SYSTEM); // Chain the future to the original thunk. thunk.future.chain(future); this.thunks.add(thunk); this.recordCount++; return true; } }
@Override public RecordMetadata get() throws InterruptedException, ExecutionException { this.result.await(); if (nextRecordMetadata != null) return nextRecordMetadata.get(); return valueOrError(); }
@Override public boolean isDone() { if (nextRecordMetadata != null) return nextRecordMetadata.isDone(); return this.result.completed(); }
@Test public void testBatchAbort() throws Exception { ProducerBatch batch = new ProducerBatch(new TopicPartition("topic", 1), memoryRecordsBuilder, now); MockCallback callback = new MockCallback(); FutureRecordMetadata future = batch.tryAppend(now, null, new byte[10], Record.EMPTY_HEADERS, callback, now); KafkaException exception = new KafkaException(); batch.abort(exception); assertTrue(future.isDone()); assertEquals(1, callback.invocations); assertEquals(exception, callback.exception); assertNull(callback.metadata); // subsequent completion should be ignored assertFalse(batch.done(500L, 2342342341L, null)); assertFalse(batch.done(-1, -1, new KafkaException())); assertEquals(1, callback.invocations); assertTrue(future.isDone()); try { future.get(); fail("Future should have thrown"); } catch (ExecutionException e) { assertEquals(exception, e.getCause()); } }
/** * Test that waiting on a request that never completes times out */ @Test public void testTimeout() throws Exception { ProduceRequestResult request = new ProduceRequestResult(topicPartition); FutureRecordMetadata future = new FutureRecordMetadata(request, relOffset, RecordBatch.NO_TIMESTAMP, 0L, 0, 0, Time.SYSTEM); assertFalse("Request is not completed", future.isDone()); try { future.get(5, TimeUnit.MILLISECONDS); fail("Should have thrown exception."); } catch (TimeoutException e) { /* this is good */ } request.set(baseOffset, RecordBatch.NO_TIMESTAMP, null); request.done(); assertTrue(future.isDone()); assertEquals(baseOffset + relOffset, future.get().offset()); }
/** * Test that an asynchronous request will eventually throw the right exception */ @Test(expected = ExecutionException.class) public void testError() throws Exception { FutureRecordMetadata future = new FutureRecordMetadata(asyncRequest(baseOffset, new CorruptRecordException(), 50L), relOffset, RecordBatch.NO_TIMESTAMP, 0L, 0, 0, Time.SYSTEM); future.get(); }
@Test public void testFutureGetWithSeconds() throws ExecutionException, InterruptedException, TimeoutException { ProduceRequestResult produceRequestResult = mockProduceRequestResult(); FutureRecordMetadata future = futureRecordMetadata(produceRequestResult); ProduceRequestResult chainedProduceRequestResult = mockProduceRequestResult(); future.chain(futureRecordMetadata(chainedProduceRequestResult)); future.get(1L, TimeUnit.SECONDS); verify(produceRequestResult).await(1L, TimeUnit.SECONDS); verify(chainedProduceRequestResult).await(1000L, TimeUnit.MILLISECONDS); }
private FutureRecordMetadata futureRecordMetadata(ProduceRequestResult produceRequestResult) { return new FutureRecordMetadata( produceRequestResult, 0, RecordBatch.NO_TIMESTAMP, 0L, 0, 0, time ); }
@Test public void testBatchCannotCompleteTwice() throws Exception { ProducerBatch batch = new ProducerBatch(new TopicPartition("topic", 1), memoryRecordsBuilder, now); MockCallback callback = new MockCallback(); FutureRecordMetadata future = batch.tryAppend(now, null, new byte[10], Record.EMPTY_HEADERS, callback, now); batch.done(500L, 10L, null); assertEquals(1, callback.invocations); assertNull(callback.exception); assertNotNull(callback.metadata); try { batch.done(1000L, 20L, null); fail("Expected exception from done"); } catch (IllegalStateException e) { // expected } RecordMetadata recordMetadata = future.get(); assertEquals(500L, recordMetadata.offset()); assertEquals(10L, recordMetadata.timestamp()); }
@Test public void testAppendedChecksumMagicV0AndV1() { for (byte magic : Arrays.asList(MAGIC_VALUE_V0, MAGIC_VALUE_V1)) { MemoryRecordsBuilder builder = MemoryRecords.builder(ByteBuffer.allocate(128), magic, CompressionType.NONE, TimestampType.CREATE_TIME, 0L); ProducerBatch batch = new ProducerBatch(new TopicPartition("topic", 1), builder, now); byte[] key = "hi".getBytes(); byte[] value = "there".getBytes(); FutureRecordMetadata future = batch.tryAppend(now, key, value, Record.EMPTY_HEADERS, null, now); assertNotNull(future); byte attributes = LegacyRecord.computeAttributes(magic, CompressionType.NONE, TimestampType.CREATE_TIME); long expectedChecksum = LegacyRecord.computeChecksum(magic, attributes, now, key, value); assertEquals(expectedChecksum, future.checksumOrNull().longValue()); } }
@Override public RecordMetadata get(long timeout, TimeUnit unit) throws InterruptedException, ExecutionException, TimeoutException { boolean occurred = this.result.await(timeout, unit); if (!occurred) throw new TimeoutException("Timeout after waiting for " + TimeUnit.MILLISECONDS.convert(timeout, unit) + " ms."); return valueOrError(); }
/** * This method is used when we have to split a large batch in smaller ones. A chained metadata will allow the * future that has already returned to the users to wait on the newly created split batches even after the * old big batch has been deemed as done. */ void chain(FutureRecordMetadata futureRecordMetadata) { if (nextRecordMetadata == null) nextRecordMetadata = futureRecordMetadata; else nextRecordMetadata.chain(futureRecordMetadata); }
RecordMetadata valueOrError() throws ExecutionException { if (this.result.error() != null) throw new ExecutionException(this.result.error()); else return value(); }
@Test public void testBatchCannotAbortTwice() throws Exception { ProducerBatch batch = new ProducerBatch(new TopicPartition("topic", 1), memoryRecordsBuilder, now); MockCallback callback = new MockCallback(); FutureRecordMetadata future = batch.tryAppend(now, null, new byte[10], Record.EMPTY_HEADERS, callback, now); KafkaException exception = new KafkaException(); batch.abort(exception); assertEquals(1, callback.invocations); assertEquals(exception, callback.exception); assertNull(callback.metadata); try { batch.abort(new KafkaException()); fail("Expected exception from abort"); } catch (IllegalStateException e) { // expected } assertEquals(1, callback.invocations); assertTrue(future.isDone()); try { future.get(); fail("Future should have thrown"); } catch (ExecutionException e) { assertEquals(exception, e.getCause()); } }
/** * Test that an asynchronous request will eventually return the right offset */ @Test public void testBlocking() throws Exception { FutureRecordMetadata future = new FutureRecordMetadata(asyncRequest(baseOffset, null, 50L), relOffset, RecordBatch.NO_TIMESTAMP, 0L, 0, 0, Time.SYSTEM); assertEquals(baseOffset + relOffset, future.get().offset()); }
@Test public void testFutureGetWithMilliSeconds() throws ExecutionException, InterruptedException, TimeoutException { ProduceRequestResult produceRequestResult = mockProduceRequestResult(); FutureRecordMetadata future = futureRecordMetadata(produceRequestResult); ProduceRequestResult chainedProduceRequestResult = mockProduceRequestResult(); future.chain(futureRecordMetadata(chainedProduceRequestResult)); future.get(1000L, TimeUnit.MILLISECONDS); verify(produceRequestResult).await(1000L, TimeUnit.MILLISECONDS); verify(chainedProduceRequestResult).await(1000L, TimeUnit.MILLISECONDS); }
/** * Append the record to the current record set and return the relative offset within that record set * * @return The RecordSend corresponding to this record or null if there isn't sufficient room. */ public FutureRecordMetadata tryAppend(long timestamp, byte[] key, byte[] value, Header[] headers, Callback callback, long now) { if (!recordsBuilder.hasRoomFor(timestamp, key, value, headers)) { return null; } else { Long checksum = this.recordsBuilder.append(timestamp, key, value, headers); this.maxRecordSize = Math.max(this.maxRecordSize, AbstractRecords.estimateSizeInBytesUpperBound(magic(), recordsBuilder.compressionType(), key, value, headers)); this.lastAppendTime = now; FutureRecordMetadata future = new FutureRecordMetadata(this.produceFuture, this.recordCount, timestamp, checksum, key == null ? -1 : key.length, value == null ? -1 : value.length, Time.SYSTEM); // we have to keep every future returned to the users in case the batch needs to be // split to several new batches and resent. thunks.add(new Thunk(callback, future)); this.recordCount++; return future; } }
/** * Complete the request * * @param baseOffset The base offset of the messages assigned by the server * @param exception The exception that occurred (or null if the request was successful) */ public void done(long baseOffset, RuntimeException exception) { this.produceFuture.done(topicPartition, baseOffset, exception); log.trace("Produced messages to topic-partition {} with base offset offset {} and error: {}.", topicPartition, baseOffset, exception); // execute callbacks for (int i = 0; i < this.thunks.size(); i++) { try { Thunk thunk = this.thunks.get(i); if (exception == null) thunk.callback.onCompletion(thunk.future.get(), null); else thunk.callback.onCompletion(null, exception); } catch (Exception e) { log.error("Error executing user-provided callback on message for topic-partition {}:", topicPartition, e); } } }
@Test public void testChecksumNullForMagicV2() { ProducerBatch batch = new ProducerBatch(new TopicPartition("topic", 1), memoryRecordsBuilder, now); FutureRecordMetadata future = batch.tryAppend(now, null, new byte[10], Record.EMPTY_HEADERS, null, now); assertNotNull(future); assertNull(future.checksumOrNull()); }