/** * Besides committing {@link org.apache.kafka.clients.producer.KafkaProducer#commitTransaction} is also adding new * partitions to the transaction. flushNewPartitions method is moving this logic to pre-commit/flush, to make * resumeTransaction simpler. Otherwise resumeTransaction would require to restore state of the not yet added/"in-flight" * partitions. */ private void flushNewPartitions() { LOG.info("Flushing new partitions"); TransactionalRequestResult result = enqueueNewPartitions(); Object sender = getValue(kafkaProducer, "sender"); invoke(sender, "wakeup"); result.await(); }
public boolean await(long timeout, TimeUnit unit) throws InterruptedException { boolean success = latch.await(timeout, unit); if (!isSuccessful()) throw error(); return success; }
if (result.isCompleted()) { pendingTxnOffsetCommits.clear(); } else if (pendingTxnOffsetCommits.isEmpty()) { result.done(); } else {
@Test public void testTransactionalIdAuthorizationFailureInAddOffsetsToTxn() { final String consumerGroupId = "consumer"; final long pid = 13131L; final short epoch = 1; final TopicPartition tp = new TopicPartition("foo", 0); doInitTransactions(pid, epoch); transactionManager.beginTransaction(); TransactionalRequestResult sendOffsetsResult = transactionManager.sendOffsetsToTransaction( singletonMap(tp, new OffsetAndMetadata(39L)), consumerGroupId); prepareAddOffsetsToTxnResponse(Errors.TRANSACTIONAL_ID_AUTHORIZATION_FAILED, consumerGroupId, pid, epoch); sender.run(time.milliseconds()); // AddOffsetsToTxn Handled assertTrue(transactionManager.hasError()); assertTrue(transactionManager.lastError() instanceof TransactionalIdAuthorizationException); assertTrue(sendOffsetsResult.isCompleted()); assertFalse(sendOffsetsResult.isSuccessful()); assertTrue(sendOffsetsResult.error() instanceof TransactionalIdAuthorizationException); assertFatalError(TransactionalIdAuthorizationException.class); }
@Test public void shouldNotSendAbortTxnRequestWhenOnlyAddPartitionsRequestFailed() { final long pid = 13131L; final short epoch = 1; doInitTransactions(pid, epoch); transactionManager.beginTransaction(); transactionManager.maybeAddPartitionToTransaction(tp0); prepareAddPartitionsToTxnResponse(Errors.TOPIC_AUTHORIZATION_FAILED, tp0, epoch, pid); sender.run(time.milliseconds()); // Send AddPartitionsRequest TransactionalRequestResult abortResult = transactionManager.beginAbort(); assertFalse(abortResult.isCompleted()); sender.run(time.milliseconds()); assertTrue(abortResult.isCompleted()); assertTrue(abortResult.isSuccessful()); }
assertFalse(commitResult.isCompleted()); assertTrue(commitResult.isCompleted()); // commit should be cancelled with exception without being sent. commitResult.await(); assertTrue(abortResult.isCompleted()); assertTrue(abortResult.isSuccessful());
assertFalse(commitResult.isCompleted()); assertFalse(commitResult.isCompleted()); assertTrue(transactionManager.hasOngoingTransaction()); assertTrue(transactionManager.isCompleting()); assertTrue(commitResult.isCompleted()); assertFalse(transactionManager.hasOngoingTransaction());
TxnRequestHandler() { this(new TransactionalRequestResult()); }
@Test public void testTransactionalIdAuthorizationFailureInFindCoordinator() { TransactionalRequestResult initPidResult = transactionManager.initializeTransactions(); prepareFindCoordinatorResponse(Errors.TRANSACTIONAL_ID_AUTHORIZATION_FAILED, false, CoordinatorType.TRANSACTION, transactionalId); sender.run(time.milliseconds()); // find coordinator sender.run(time.milliseconds()); assertTrue(transactionManager.hasError()); assertTrue(transactionManager.lastError() instanceof TransactionalIdAuthorizationException); sender.run(time.milliseconds()); // one more run to fail the InitProducerId future assertTrue(initPidResult.isCompleted()); assertFalse(initPidResult.isSuccessful()); assertTrue(initPidResult.error() instanceof TransactionalIdAuthorizationException); assertFatalError(TransactionalIdAuthorizationException.class); }
@Test public void shouldFailAbortIfAddOffsetsFailsWithFatalError() { final long pid = 13131L; final short epoch = 1; doInitTransactions(pid, epoch); transactionManager.beginTransaction(); Map<TopicPartition, OffsetAndMetadata> offsets = new HashMap<>(); offsets.put(tp1, new OffsetAndMetadata(1)); final String consumerGroupId = "myconsumergroup"; transactionManager.sendOffsetsToTransaction(offsets, consumerGroupId); TransactionalRequestResult abortResult = transactionManager.beginAbort(); prepareAddOffsetsToTxnResponse(Errors.UNKNOWN_SERVER_ERROR, consumerGroupId, pid, epoch); sender.run(time.milliseconds()); // Send AddOffsetsToTxnRequest assertFalse(abortResult.isCompleted()); sender.run(time.milliseconds()); assertTrue(abortResult.isCompleted()); assertFalse(abortResult.isSuccessful()); assertTrue(transactionManager.hasFatalError()); }
@Test public void testLookupCoordinatorOnNotCoordinatorError() { // This is called from the initTransactions method in the producer as the first order of business. // It finds the coordinator and then gets a PID. final long pid = 13131L; final short epoch = 1; TransactionalRequestResult initPidResult = transactionManager.initializeTransactions(); prepareFindCoordinatorResponse(Errors.NONE, false, CoordinatorType.TRANSACTION, transactionalId); sender.run(time.milliseconds()); // find coordinator sender.run(time.milliseconds()); assertEquals(brokerNode, transactionManager.coordinator(CoordinatorType.TRANSACTION)); prepareInitPidResponse(Errors.NOT_COORDINATOR, false, pid, epoch); sender.run(time.milliseconds()); // send pid, get not coordinator. Should resend the FindCoordinator and InitPid requests assertNull(transactionManager.coordinator(CoordinatorType.TRANSACTION)); assertFalse(initPidResult.isCompleted()); assertFalse(transactionManager.hasProducerId()); prepareFindCoordinatorResponse(Errors.NONE, false, CoordinatorType.TRANSACTION, transactionalId); sender.run(time.milliseconds()); assertEquals(brokerNode, transactionManager.coordinator(CoordinatorType.TRANSACTION)); assertFalse(initPidResult.isCompleted()); prepareInitPidResponse(Errors.NONE, false, pid, epoch); sender.run(time.milliseconds()); // get pid and epoch assertTrue(initPidResult.isCompleted()); // The future should only return after the second round of retries succeed. assertTrue(transactionManager.hasProducerId()); assertEquals(pid, transactionManager.producerIdAndEpoch().producerId); assertEquals(epoch, transactionManager.producerIdAndEpoch().epoch); }
/** * Besides committing {@link org.apache.kafka.clients.producer.KafkaProducer#commitTransaction} is also adding new * partitions to the transaction. flushNewPartitions method is moving this logic to pre-commit/flush, to make * resumeTransaction simpler. Otherwise resumeTransaction would require to restore state of the not yet added/"in-flight" * partitions. */ private void flushNewPartitions() { LOG.info("Flushing new partitions"); TransactionalRequestResult result = enqueueNewPartitions(); Object sender = getValue(kafkaProducer, "sender"); invoke(sender, "wakeup"); result.await(); }
@Test public void testTransactionalIdAuthorizationFailureInInitProducerId() { final long pid = 13131L; TransactionalRequestResult initPidResult = transactionManager.initializeTransactions(); prepareFindCoordinatorResponse(Errors.NONE, false, CoordinatorType.TRANSACTION, transactionalId); sender.run(time.milliseconds()); // find coordinator sender.run(time.milliseconds()); assertEquals(brokerNode, transactionManager.coordinator(CoordinatorType.TRANSACTION)); prepareInitPidResponse(Errors.TRANSACTIONAL_ID_AUTHORIZATION_FAILED, false, pid, RecordBatch.NO_PRODUCER_EPOCH); sender.run(time.milliseconds()); assertTrue(transactionManager.hasError()); assertTrue(initPidResult.isCompleted()); assertFalse(initPidResult.isSuccessful()); assertTrue(initPidResult.error() instanceof TransactionalIdAuthorizationException); assertFatalError(TransactionalIdAuthorizationException.class); }
@Test public void shouldNotSendAbortTxnRequestWhenOnlyAddOffsetsRequestFailed() { final long pid = 13131L; final short epoch = 1; doInitTransactions(pid, epoch); transactionManager.beginTransaction(); Map<TopicPartition, OffsetAndMetadata> offsets = new HashMap<>(); offsets.put(tp1, new OffsetAndMetadata(1)); final String consumerGroupId = "myconsumergroup"; transactionManager.sendOffsetsToTransaction(offsets, consumerGroupId); TransactionalRequestResult abortResult = transactionManager.beginAbort(); prepareAddOffsetsToTxnResponse(Errors.GROUP_AUTHORIZATION_FAILED, consumerGroupId, pid, epoch); sender.run(time.milliseconds()); // Send AddOffsetsToTxnRequest assertFalse(abortResult.isCompleted()); sender.run(time.milliseconds()); assertTrue(transactionManager.isReady()); assertTrue(abortResult.isCompleted()); assertTrue(abortResult.isSuccessful()); }
public void await() { boolean completed = false; while (!completed) { try { latch.await(); completed = true; } catch (InterruptedException e) { // Keep waiting until done, we have no other option for these transactional requests. } } if (!isSuccessful()) throw error(); }
@Test public void testLookupCoordinatorOnDisconnectAfterSend() { // This is called from the initTransactions method in the producer as the first order of business. // It finds the coordinator and then gets a PID. final long pid = 13131L; final short epoch = 1; TransactionalRequestResult initPidResult = transactionManager.initializeTransactions(); prepareFindCoordinatorResponse(Errors.NONE, false, CoordinatorType.TRANSACTION, transactionalId); sender.run(time.milliseconds()); // find coordinator sender.run(time.milliseconds()); assertEquals(brokerNode, transactionManager.coordinator(CoordinatorType.TRANSACTION)); prepareInitPidResponse(Errors.NONE, true, pid, epoch); // send pid to coordinator, should get disconnected before receiving the response, and resend the // FindCoordinator and InitPid requests. sender.run(time.milliseconds()); assertNull(transactionManager.coordinator(CoordinatorType.TRANSACTION)); assertFalse(initPidResult.isCompleted()); assertFalse(transactionManager.hasProducerId()); prepareFindCoordinatorResponse(Errors.NONE, false, CoordinatorType.TRANSACTION, transactionalId); sender.run(time.milliseconds()); assertEquals(brokerNode, transactionManager.coordinator(CoordinatorType.TRANSACTION)); assertFalse(initPidResult.isCompleted()); prepareInitPidResponse(Errors.NONE, false, pid, epoch); sender.run(time.milliseconds()); // get pid and epoch assertTrue(initPidResult.isCompleted()); // The future should only return after the second round of retries succeed. assertTrue(transactionManager.hasProducerId()); assertEquals(pid, transactionManager.producerIdAndEpoch().producerId); assertEquals(epoch, transactionManager.producerIdAndEpoch().epoch); }
/** * Besides committing {@link org.apache.kafka.clients.producer.KafkaProducer#commitTransaction} is also adding new * partitions to the transaction. flushNewPartitions method is moving this logic to pre-commit/flush, to make * resumeTransaction simpler. * Otherwise resumeTransaction would require to restore state of the not yet added/"in-flight" partitions. */ private void flushNewPartitions() { LOG.info("Flushing new partitions"); TransactionalRequestResult result = enqueueNewPartitions(); Object sender = getValue(kafkaProducer, "sender"); invoke(sender, "wakeup"); result.await(); }
@Test public void testGroupAuthorizationFailureInFindCoordinator() { final String consumerGroupId = "consumer"; final long pid = 13131L; final short epoch = 1; doInitTransactions(pid, epoch); transactionManager.beginTransaction(); TransactionalRequestResult sendOffsetsResult = transactionManager.sendOffsetsToTransaction( singletonMap(new TopicPartition("foo", 0), new OffsetAndMetadata(39L)), consumerGroupId); prepareAddOffsetsToTxnResponse(Errors.NONE, consumerGroupId, pid, epoch); sender.run(time.milliseconds()); // AddOffsetsToTxn Handled, TxnOffsetCommit Enqueued sender.run(time.milliseconds()); // FindCoordinator Enqueued prepareFindCoordinatorResponse(Errors.GROUP_AUTHORIZATION_FAILED, false, CoordinatorType.GROUP, consumerGroupId); sender.run(time.milliseconds()); // FindCoordinator Failed sender.run(time.milliseconds()); // TxnOffsetCommit Aborted assertTrue(transactionManager.hasError()); assertTrue(transactionManager.lastError() instanceof GroupAuthorizationException); assertTrue(sendOffsetsResult.isCompleted()); assertFalse(sendOffsetsResult.isSuccessful()); assertTrue(sendOffsetsResult.error() instanceof GroupAuthorizationException); GroupAuthorizationException exception = (GroupAuthorizationException) sendOffsetsResult.error(); assertEquals(consumerGroupId, exception.groupId()); assertAbortableError(GroupAuthorizationException.class); }
@Test public void testCancelUnsentAddPartitionsAndProduceOnAbort() throws InterruptedException { final long pid = 13131L; final short epoch = 1; doInitTransactions(pid, epoch); transactionManager.beginTransaction(); transactionManager.maybeAddPartitionToTransaction(tp0); Future<RecordMetadata> responseFuture = accumulator.append(tp0, time.milliseconds(), "key".getBytes(), "value".getBytes(), Record.EMPTY_HEADERS, null, MAX_BLOCK_TIMEOUT).future; assertFalse(responseFuture.isDone()); TransactionalRequestResult abortResult = transactionManager.beginAbort(); // note since no partitions were added to the transaction, no EndTxn will be sent sender.run(time.milliseconds()); // try to abort assertTrue(abortResult.isCompleted()); assertTrue(abortResult.isSuccessful()); assertTrue(transactionManager.isReady()); // make sure we are ready for a transaction now. try { responseFuture.get(); fail("Expected produce future to raise an exception"); } catch (ExecutionException e) { assertTrue(e.getCause() instanceof KafkaException); } }
assertFalse(addOffsetsResult.isCompleted()); // the result doesn't complete until TxnOffsetCommit returns assertTrue(addOffsetsResult.isCompleted()); // We should only be done after both RPCs complete.