@Test public void testFailIfNotReadyForSendIdempotentProducer() { TransactionManager idempotentTransactionManager = new TransactionManager(); idempotentTransactionManager.failIfNotReadyForSend(); }
private static TransactionManager configureTransactionState(ProducerConfig config, LogContext logContext, Logger log) { TransactionManager transactionManager = null; boolean userConfiguredIdempotence = false; if (config.originals().containsKey(ProducerConfig.ENABLE_IDEMPOTENCE_CONFIG)) userConfiguredIdempotence = true; boolean userConfiguredTransactions = false; if (config.originals().containsKey(ProducerConfig.TRANSACTIONAL_ID_CONFIG)) userConfiguredTransactions = true; boolean idempotenceEnabled = config.getBoolean(ProducerConfig.ENABLE_IDEMPOTENCE_CONFIG); if (!idempotenceEnabled && userConfiguredIdempotence && userConfiguredTransactions) throw new ConfigException("Cannot set a " + ProducerConfig.TRANSACTIONAL_ID_CONFIG + " without also enabling idempotence."); if (userConfiguredTransactions) idempotenceEnabled = true; if (idempotenceEnabled) { String transactionalId = config.getString(ProducerConfig.TRANSACTIONAL_ID_CONFIG); int transactionTimeoutMs = config.getInt(ProducerConfig.TRANSACTION_TIMEOUT_CONFIG); long retryBackoffMs = config.getLong(ProducerConfig.RETRY_BACKOFF_MS_CONFIG); transactionManager = new TransactionManager(logContext, transactionalId, transactionTimeoutMs, retryBackoffMs); if (transactionManager.isTransactional()) log.info("Instantiated a transactional producer."); else log.info("Instantiated an idempotent producer."); } return transactionManager; }
@Test(expected = IllegalStateException.class) public void testInvalidSequenceIncrement() { TransactionManager transactionManager = new TransactionManager(); transactionManager.incrementSequenceNumber(tp0, 3333); }
@Test public void testIdempotentSplitBatchAndSend() throws Exception { TopicPartition tp = new TopicPartition("testSplitBatchAndSend", 1); TransactionManager txnManager = new TransactionManager(); ProducerIdAndEpoch producerIdAndEpoch = new ProducerIdAndEpoch(123456L, (short) 0); txnManager.setProducerIdAndEpoch(producerIdAndEpoch); testSplitBatchAndSend(txnManager, producerIdAndEpoch, tp); }
@Test(expected = KafkaException.class) public void testFailIfNotReadyForSendIdempotentProducerFatalError() { TransactionManager idempotentTransactionManager = new TransactionManager(); idempotentTransactionManager.transitionToFatalError(new KafkaException()); idempotentTransactionManager.failIfNotReadyForSend(); }
@Test(expected = UnsupportedVersionException.class) public void testIdempotenceWithOldMagic() throws InterruptedException { // Simulate talking to an older broker, ie. one which supports a lower magic. ApiVersions apiVersions = new ApiVersions(); int batchSize = 1025; int requestTimeoutMs = 1600; long deliveryTimeoutMs = 3200L; long lingerMs = 10L; long retryBackoffMs = 100L; long totalSize = 10 * batchSize; String metricGrpName = "producer-metrics"; apiVersions.update("foobar", NodeApiVersions.create(Arrays.asList(new ApiVersionsResponse.ApiVersion(ApiKeys.PRODUCE.id, (short) 0, (short) 2)))); RecordAccumulator accum = new RecordAccumulator(logContext, batchSize + DefaultRecordBatch.RECORD_BATCH_OVERHEAD, CompressionType.NONE, lingerMs, retryBackoffMs, deliveryTimeoutMs, metrics, metricGrpName, time, apiVersions, new TransactionManager(), new BufferPool(totalSize, batchSize, metrics, time, metricGrpName)); accum.append(tp1, 0L, key, value, Record.EMPTY_HEADERS, null, 0); }
@Test public void testDefaultSequenceNumber() { TransactionManager transactionManager = new TransactionManager(); assertEquals((int) transactionManager.sequenceNumber(tp0), 0); transactionManager.incrementSequenceNumber(tp0, 3); assertEquals((int) transactionManager.sequenceNumber(tp0), 3); }
@Test public void testTransactionalSplitBatchAndSend() throws Exception { ProducerIdAndEpoch producerIdAndEpoch = new ProducerIdAndEpoch(123456L, (short) 0); TopicPartition tp = new TopicPartition("testSplitBatchAndSend", 1); TransactionManager txnManager = new TransactionManager(logContext, "testSplitBatchAndSend", 60000, 100); setupWithTransactionState(txnManager); doInitTransactions(txnManager, producerIdAndEpoch); txnManager.beginTransaction(); txnManager.maybeAddPartitionToTransaction(tp); client.prepareResponse(new AddPartitionsToTxnResponse(0, Collections.singletonMap(tp, Errors.NONE))); sender.run(time.milliseconds()); testSplitBatchAndSend(txnManager, producerIdAndEpoch, tp); }
@Before public void setup() { Map<String, String> metricTags = new LinkedHashMap<>(); metricTags.put("client-id", CLIENT_ID); int batchSize = 16 * 1024; long deliveryTimeoutMs = 3000L; long totalSize = 1024 * 1024; String metricGrpName = "producer-metrics"; MetricConfig metricConfig = new MetricConfig().tags(metricTags); this.brokerNode = new Node(0, "localhost", 2211); this.transactionManager = new TransactionManager(logContext, transactionalId, transactionTimeoutMs, DEFAULT_RETRY_BACKOFF_MS); Metrics metrics = new Metrics(metricConfig, time); SenderMetricsRegistry senderMetrics = new SenderMetricsRegistry(metrics); this.accumulator = new RecordAccumulator(logContext, batchSize, CompressionType.NONE, 0L, 0L, deliveryTimeoutMs, metrics, metricGrpName, time, apiVersions, transactionManager, new BufferPool(totalSize, batchSize, metrics, time, metricGrpName)); this.sender = new Sender(logContext, this.client, this.metadata, this.accumulator, true, MAX_REQUEST_SIZE, ACKS_ALL, MAX_RETRIES, senderMetrics, this.time, REQUEST_TIMEOUT, 50, transactionManager, apiVersions); this.client.updateMetadata(TestUtils.metadataUpdateWith(1, singletonMap("test", 2))); }
@Test public void testExpiryOfUnsentBatchesShouldNotCauseUnresolvedSequences() throws Exception { final long producerId = 343434L; TransactionManager transactionManager = new TransactionManager(); setupWithTransactionState(transactionManager); prepareAndReceiveInitProducerId(producerId, Errors.NONE); assertTrue(transactionManager.hasProducerId()); assertEquals(0, transactionManager.sequenceNumber(tp0).longValue()); // Send first ProduceRequest Future<RecordMetadata> request1 = accumulator.append(tp0, 0L, "key".getBytes(), "value".getBytes(), null, null, MAX_BLOCK_TIMEOUT).future; Node node = metadata.fetch().nodes().get(0); time.sleep(10000L); client.disconnect(node.idString()); client.blackout(node, 10); sender.run(time.milliseconds()); assertFutureFailure(request1, TimeoutException.class); assertFalse(transactionManager.hasUnresolvedSequence(tp0)); }
@Test public void testInitProducerIdRequest() throws Exception { final long producerId = 343434L; TransactionManager transactionManager = new TransactionManager(); setupWithTransactionState(transactionManager); prepareAndReceiveInitProducerId(producerId, Errors.NONE); assertTrue(transactionManager.hasProducerId()); assertEquals(producerId, transactionManager.producerIdAndEpoch().producerId); assertEquals((short) 0, transactionManager.producerIdAndEpoch().epoch); }
@Test public void testProducerIdReset() { TransactionManager transactionManager = new TransactionManager(); assertEquals((int) transactionManager.sequenceNumber(tp0), 0); transactionManager.incrementSequenceNumber(tp0, 3); assertEquals((int) transactionManager.sequenceNumber(tp0), 3); transactionManager.resetProducerId(); assertEquals((int) transactionManager.sequenceNumber(tp0), 0); }
@Test public void testUnsupportedForMessageFormatInProduceRequest() throws Exception { final long producerId = 343434L; TransactionManager transactionManager = new TransactionManager(); setupWithTransactionState(transactionManager); prepareAndReceiveInitProducerId(producerId, Errors.NONE); assertTrue(transactionManager.hasProducerId()); Future<RecordMetadata> future = accumulator.append(tp0, time.milliseconds(), "key".getBytes(), "value".getBytes(), null, null, MAX_BLOCK_TIMEOUT).future; client.prepareResponse(new MockClient.RequestMatcher() { @Override public boolean matches(AbstractRequest body) { return body instanceof ProduceRequest && ((ProduceRequest) body).hasIdempotentRecords(); } }, produceResponse(tp0, -1, Errors.UNSUPPORTED_FOR_MESSAGE_FORMAT, 0)); sender.run(time.milliseconds()); assertFutureFailure(future, UnsupportedForMessageFormatException.class); // unsupported for message format is not a fatal error assertFalse(transactionManager.hasError()); }
@Test public void testUnsupportedVersionInProduceRequest() throws Exception { final long producerId = 343434L; TransactionManager transactionManager = new TransactionManager(); setupWithTransactionState(transactionManager); prepareAndReceiveInitProducerId(producerId, Errors.NONE); assertTrue(transactionManager.hasProducerId()); Future<RecordMetadata> future = accumulator.append(tp0, time.milliseconds(), "key".getBytes(), "value".getBytes(), null, null, MAX_BLOCK_TIMEOUT).future; client.prepareUnsupportedVersionResponse(new MockClient.RequestMatcher() { @Override public boolean matches(AbstractRequest body) { return body instanceof ProduceRequest && ((ProduceRequest) body).hasIdempotentRecords(); } }); sender.run(time.milliseconds()); assertFutureFailure(future, UnsupportedVersionException.class); // unsupported version errors are fatal, so we should continue seeing it on future sends assertTrue(transactionManager.hasFatalError()); assertSendFailure(UnsupportedVersionException.class); }
@Test public void testClusterAuthorizationExceptionInInitProducerIdRequest() throws Exception { final long producerId = 343434L; TransactionManager transactionManager = new TransactionManager(); setupWithTransactionState(transactionManager); prepareAndReceiveInitProducerId(producerId, Errors.CLUSTER_AUTHORIZATION_FAILED); assertFalse(transactionManager.hasProducerId()); assertTrue(transactionManager.hasError()); assertTrue(transactionManager.lastError() instanceof ClusterAuthorizationException); // cluster authorization is a fatal error for the producer assertSendFailure(ClusterAuthorizationException.class); }
@Test public void testClusterAuthorizationExceptionInProduceRequest() throws Exception { final long producerId = 343434L; TransactionManager transactionManager = new TransactionManager(); setupWithTransactionState(transactionManager); prepareAndReceiveInitProducerId(producerId, Errors.NONE); assertTrue(transactionManager.hasProducerId()); // cluster authorization is a fatal error for the producer Future<RecordMetadata> future = accumulator.append(tp0, time.milliseconds(), "key".getBytes(), "value".getBytes(), null, null, MAX_BLOCK_TIMEOUT).future; client.prepareResponse(new MockClient.RequestMatcher() { @Override public boolean matches(AbstractRequest body) { return body instanceof ProduceRequest && ((ProduceRequest) body).hasIdempotentRecords(); } }, produceResponse(tp0, -1, Errors.CLUSTER_AUTHORIZATION_FAILED, 0)); sender.run(time.milliseconds()); assertFutureFailure(future, ClusterAuthorizationException.class); // cluster authorization errors are fatal, so we should continue seeing it on future sends assertTrue(transactionManager.hasFatalError()); assertSendFailure(ClusterAuthorizationException.class); }
@Test public void testResetWhenOutOfOrderSequenceReceived() throws InterruptedException { final long producerId = 343434L; TransactionManager transactionManager = new TransactionManager(); transactionManager.setProducerIdAndEpoch(new ProducerIdAndEpoch(producerId, (short) 0)); setupWithTransactionState(transactionManager); int maxRetries = 10; Metrics m = new Metrics(); SenderMetricsRegistry senderMetrics = new SenderMetricsRegistry(m); Sender sender = new Sender(logContext, client, metadata, this.accumulator, true, MAX_REQUEST_SIZE, ACKS_ALL, maxRetries, senderMetrics, time, REQUEST_TIMEOUT, 50, transactionManager, apiVersions); Future<RecordMetadata> responseFuture = accumulator.append(tp0, time.milliseconds(), "key".getBytes(), "value".getBytes(), null, null, MAX_BLOCK_TIMEOUT).future; sender.run(time.milliseconds()); // connect. sender.run(time.milliseconds()); // send. assertEquals(1, client.inFlightRequestCount()); assertEquals(1, sender.inFlightBatches(tp0).size()); client.respond(produceResponse(tp0, 0, Errors.OUT_OF_ORDER_SEQUENCE_NUMBER, 0)); sender.run(time.milliseconds()); assertTrue(responseFuture.isDone()); assertEquals(0, sender.inFlightBatches(tp0).size()); assertFalse("Expected transaction state to be reset upon receiving an OutOfOrderSequenceException", transactionManager.hasProducerId()); }
@Test public void testShouldResetProducerStateAfterResolvingSequences() { // Create a TransactionManager without a transactionalId to test // shouldResetProducerStateAfterResolvingSequences. TransactionManager manager = new TransactionManager(logContext, null, transactionTimeoutMs, DEFAULT_RETRY_BACKOFF_MS); assertFalse(manager.shouldResetProducerStateAfterResolvingSequences()); TopicPartition tp0 = new TopicPartition("foo", 0); TopicPartition tp1 = new TopicPartition("foo", 1); assertEquals(Integer.valueOf(0), manager.sequenceNumber(tp0)); assertEquals(Integer.valueOf(0), manager.sequenceNumber(tp1)); manager.incrementSequenceNumber(tp0, 1); manager.incrementSequenceNumber(tp1, 1); manager.maybeUpdateLastAckedSequence(tp0, 0); manager.maybeUpdateLastAckedSequence(tp1, 0); manager.markSequenceUnresolved(tp0); manager.markSequenceUnresolved(tp1); assertFalse(manager.shouldResetProducerStateAfterResolvingSequences()); manager.maybeUpdateLastAckedSequence(tp0, 5); manager.incrementSequenceNumber(tp0, 1); manager.markSequenceUnresolved(tp0); manager.markSequenceUnresolved(tp1); assertTrue(manager.shouldResetProducerStateAfterResolvingSequences()); }
@Test public void testSequenceNumberOverflow() { TransactionManager transactionManager = new TransactionManager(); assertEquals((int) transactionManager.sequenceNumber(tp0), 0); transactionManager.incrementSequenceNumber(tp0, Integer.MAX_VALUE); assertEquals((int) transactionManager.sequenceNumber(tp0), Integer.MAX_VALUE); transactionManager.incrementSequenceNumber(tp0, 100); assertEquals((int) transactionManager.sequenceNumber(tp0), 99); transactionManager.incrementSequenceNumber(tp0, Integer.MAX_VALUE); assertEquals((int) transactionManager.sequenceNumber(tp0), 98); }
@Test public void testExpiryOfAllSentBatchesShouldCauseUnresolvedSequences() throws Exception { final long producerId = 343434L; TransactionManager transactionManager = new TransactionManager(); setupWithTransactionState(transactionManager); prepareAndReceiveInitProducerId(producerId, Errors.NONE); assertTrue(transactionManager.hasProducerId()); assertEquals(0, transactionManager.sequenceNumber(tp0).longValue()); // Send first ProduceRequest Future<RecordMetadata> request1 = accumulator.append(tp0, 0L, "key".getBytes(), "value".getBytes(), null, null, MAX_BLOCK_TIMEOUT).future; sender.run(time.milliseconds()); // send request sendIdempotentProducerResponse(0, tp0, Errors.NOT_LEADER_FOR_PARTITION, -1); sender.run(time.milliseconds()); // receive response assertEquals(1L, transactionManager.sequenceNumber(tp0).longValue()); Node node = metadata.fetch().nodes().get(0); time.sleep(15000L); client.disconnect(node.idString()); client.blackout(node, 10); sender.run(time.milliseconds()); // now expire the batch. assertFutureFailure(request1, TimeoutException.class); assertTrue(transactionManager.hasUnresolvedSequence(tp0)); assertFalse(client.hasInFlightRequests()); Deque<ProducerBatch> batches = accumulator.batches().get(tp0); assertEquals(0, batches.size()); assertTrue(transactionManager.hasProducerId(producerId)); // We should now clear the old producerId and get a new one in a single run loop. prepareAndReceiveInitProducerId(producerId + 1, Errors.NONE); assertTrue(transactionManager.hasProducerId(producerId + 1)); }