@Test(expected = IllegalStateException.class) public void ifNotSimulatingAdvanceTimeThrowsTest() { Time.advanceTime(1000); }
public static void waitForReader(int port) { AtomicBoolean ab = getHasReader(port); long start = Time.currentTimeMillis(); while (!ab.get()) { if (Time.isSimulating()) { Time.advanceTime(10); } try { Thread.sleep(10); } catch (InterruptedException e) { //Ignored } if (Time.currentTimeMillis() - start > 20000) { LOG.error("DONE WAITING FOR READER AFTER {} ms", Time.currentTimeMillis() - start); break; } } }
private void failAllExceptTheFirstMessageThenCommit(ArgumentCaptor<KafkaSpoutMessageId> messageIds) { //Fail all emitted messages except the first. Commit the first. List<KafkaSpoutMessageId> messageIdList = messageIds.getAllValues(); for (int i = 1; i < messageIdList.size(); i++) { spout.fail(messageIdList.get(i)); } spout.ack(messageIdList.get(0)); Time.advanceTime(commitOffsetPeriodMs + KafkaSpout.TIMER_DELAY_MS); spout.nextTuple(); }
void commitAndVerifyAllMessagesCommitted(long msgCount) { // reset commit timer such that commit happens on next call to nextTuple() Time.advanceTime(commitOffsetPeriodMs + KafkaSpout.TIMER_DELAY_MS); //Commit offsets spout.nextTuple(); verifyAllMessagesCommitted(msgCount); }
@Test(expected = IllegalArgumentException.class) public void shouldThrowIfAttemptToAdvanceBackwardsTest() { try (SimulatedTime t = new SimulatedTime()) { Time.advanceTime(-1500); } }
@Test public void uploadedBlobPersistsMinimumTime() { Set<String> idleTopologies = new HashSet<>(); idleTopologies.add("topology1"); Map<String, Object> conf = new HashMap<>(); conf.put(DaemonConfig.NIMBUS_TOPOLOGY_BLOBSTORE_DELETION_DELAY_MS, 300000); try (Time.SimulatedTime t = new Time.SimulatedTime(null)) { Set<String> toDelete = Nimbus.getExpiredTopologyIds(idleTopologies, conf); Assert.assertTrue(toDelete.isEmpty()); Time.advanceTime(10 * 60 * 1000L); toDelete = Nimbus.getExpiredTopologyIds(idleTopologies, conf); Assert.assertTrue(toDelete.contains("topology1")); Assert.assertEquals(1, toDelete.size()); } } }
@Test public void testSpoutMustRefreshPartitionsEvenIfNotPolling() throws Exception { SingleTopicKafkaUnitSetupHelper.initializeSpout(spout, conf, topologyContext, collectorMock); //Nothing is assigned yet, should emit nothing spout.nextTuple(); verify(collectorMock, never()).emit(anyString(), anyList(), any(KafkaSpoutMessageId.class)); SingleTopicKafkaUnitSetupHelper.populateTopicData(kafkaUnitExtension.getKafkaUnit(), SingleTopicKafkaSpoutConfiguration.TOPIC, 1); Time.advanceTime(KafkaSpoutConfig.DEFAULT_PARTITION_REFRESH_PERIOD_MS + KafkaSpout.TIMER_DELAY_MS); //The new partition should be discovered and the message should be emitted spout.nextTuple(); verify(collectorMock).emit(anyString(), anyList(), any(KafkaSpoutMessageId.class)); }
@Test public void testShouldEmitAllMessages() throws Exception { final int messageCount = 10; prepareSpout(messageCount); //Emit all messages and check that they are emitted. Ack the messages too for(int i = 0; i < messageCount; i++) { spout.nextTuple(); ArgumentCaptor<Object> messageId = ArgumentCaptor.forClass(Object.class); verify(collectorMock).emit( eq(SingleTopicKafkaSpoutConfiguration.STREAM), eq(new Values(SingleTopicKafkaSpoutConfiguration.TOPIC, Integer.toString(i), Integer.toString(i))), messageId.capture()); spout.ack(messageId.getValue()); reset(collectorMock); } Time.advanceTime(commitOffsetPeriodMs + KafkaSpout.TIMER_DELAY_MS); //Commit offsets spout.nextTuple(); verifyAllMessagesCommitted(messageCount); }
@Test public void testNextTupleWillRespectMaxUncommittedOffsetsWhenThereAreAckedUncommittedTuples() throws Exception { //The spout must respect maxUncommittedOffsets even if some tuples have been acked but not committed try (Time.SimulatedTime simulatedTime = new Time.SimulatedTime()) { //First check that maxUncommittedOffsets is respected when emitting from scratch ArgumentCaptor<KafkaSpoutMessageId> messageIds = emitMaxUncommittedOffsetsMessagesAndCheckNoMoreAreEmitted(numMessages); reset(collector); //Fail all emitted messages except the last one. Try to commit. List<KafkaSpoutMessageId> messageIdList = messageIds.getAllValues(); for (int i = 0; i < messageIdList.size() - 1; i++) { spout.fail(messageIdList.get(i)); } spout.ack(messageIdList.get(messageIdList.size() - 1)); Time.advanceTime(commitOffsetPeriodMs + KafkaSpout.TIMER_DELAY_MS); spout.nextTuple(); //Now check that the spout will not emit anything else since nothing has been committed for (int i = 0; i < numMessages; i++) { spout.nextTuple(); } verify(collector, times(0)).emit( any(), any(), any()); } }
@Test public void testShouldCommitAllMessagesIfNotSetToEmitNullTuples() throws Exception { final int messageCount = 10; prepareSpout(messageCount); //All null tuples should be commited, meaning they were considered by to be emitted and acked for(int i = 0; i < messageCount; i++) { spout.nextTuple(); } verify(collectorMock,never()).emit( anyString(), anyList(), any()); Time.advanceTime(commitOffsetPeriodMs + KafkaSpout.TIMER_DELAY_MS); //Commit offsets spout.nextTuple(); verifyAllMessagesCommitted(messageCount); }
@Test public void testNextTupleCanEmitMoreMessagesWhenDroppingBelowMaxUncommittedOffsetsDueToCommit() throws Exception { //The spout must respect maxUncommittedOffsets after committing a set of records try (Time.SimulatedTime simulatedTime = new Time.SimulatedTime()) { //First check that maxUncommittedOffsets is respected when emitting from scratch ArgumentCaptor<KafkaSpoutMessageId> messageIds = emitMaxUncommittedOffsetsMessagesAndCheckNoMoreAreEmitted(numMessages); reset(collector); //Ack all emitted messages and commit them for (KafkaSpoutMessageId messageId : messageIds.getAllValues()) { spout.ack(messageId); } Time.advanceTime(commitOffsetPeriodMs + KafkaSpout.TIMER_DELAY_MS); spout.nextTuple(); //Now check that the spout will emit another maxUncommittedOffsets messages for (int i = 0; i < numMessages; i++) { spout.nextTuple(); } verify(collector, times(maxUncommittedOffsets)).emit( any(), any(), any()); } }
@Test public void shouldAdvanceForwardTest() { try (SimulatedTime t = new SimulatedTime()) { long current = Time.currentTimeMillis(); Time.advanceTime(1000); Assert.assertEquals(Time.deltaMs(current), 1000); Time.advanceTime(500); Assert.assertEquals(Time.deltaMs(current), 1500); } }
@Test public void testNoGuaranteeModeCommitsPolledTuples() throws Exception { //When using the no guarantee mode, the spout must commit tuples periodically, regardless of whether they've been acked KafkaSpoutConfig<String, String> spoutConfig = createKafkaSpoutConfigBuilder(mock(TopicFilter.class), mock(ManualPartitioner.class), -1) .setProcessingGuarantee(KafkaSpoutConfig.ProcessingGuarantee.NO_GUARANTEE) .setTupleTrackingEnforced(true) .build(); try (SimulatedTime time = new SimulatedTime()) { KafkaSpout<String, String> spout = SpoutWithMockedConsumerSetupHelper.setupSpout(spoutConfig, conf, contextMock, collectorMock, consumerMock, partition); when(consumerMock.poll(anyLong())).thenReturn(new ConsumerRecords<>(Collections.singletonMap(partition, SpoutWithMockedConsumerSetupHelper.createRecords(partition, 0, 1)))); spout.nextTuple(); when(consumerMock.position(partition)).thenReturn(1L); ArgumentCaptor<KafkaSpoutMessageId> msgIdCaptor = ArgumentCaptor.forClass(KafkaSpoutMessageId.class); verify(collectorMock).emit(eq(SingleTopicKafkaSpoutConfiguration.STREAM), anyList(), msgIdCaptor.capture()); assertThat("Should have captured a message id", msgIdCaptor.getValue(), not(nullValue())); Time.advanceTime(KafkaSpout.TIMER_DELAY_MS + spoutConfig.getOffsetsCommitPeriodMs()); spout.nextTuple(); verify(consumerMock).commitAsync(commitCapture.capture(), isNull()); CommitMetadataManager metadataManager = new CommitMetadataManager(contextMock, KafkaSpoutConfig.ProcessingGuarantee.NO_GUARANTEE); Map<TopicPartition, OffsetAndMetadata> committedOffsets = commitCapture.getValue(); assertThat(committedOffsets.get(partition).offset(), is(1L)); assertThat(committedOffsets.get(partition).metadata(), is(metadataManager.getCommitMetadata())); } }
@Test public void testAtMostOnceModeDoesNotCommitAckedTuples() throws Exception { //When tuple tracking is enabled, the spout must not commit acked tuples in at-most-once mode because they were committed before being emitted KafkaSpoutConfig<String, String> spoutConfig = createKafkaSpoutConfigBuilder(mock(TopicFilter.class), mock(ManualPartitioner.class), -1) .setProcessingGuarantee(KafkaSpoutConfig.ProcessingGuarantee.AT_MOST_ONCE) .setTupleTrackingEnforced(true) .build(); try (SimulatedTime time = new SimulatedTime()) { KafkaSpout<String, String> spout = SpoutWithMockedConsumerSetupHelper.setupSpout(spoutConfig, conf, contextMock, collectorMock, consumerMock, partition); when(consumerMock.poll(anyLong())).thenReturn(new ConsumerRecords<>(Collections.singletonMap(partition, SpoutWithMockedConsumerSetupHelper.createRecords(partition, 0, 1)))); spout.nextTuple(); clearInvocations(consumerMock); ArgumentCaptor<KafkaSpoutMessageId> msgIdCaptor = ArgumentCaptor.forClass(KafkaSpoutMessageId.class); verify(collectorMock).emit(eq(SingleTopicKafkaSpoutConfiguration.STREAM), anyList(), msgIdCaptor.capture()); assertThat("Should have captured a message id", msgIdCaptor.getValue(), not(nullValue())); spout.ack(msgIdCaptor.getValue()); Time.advanceTime(KafkaSpout.TIMER_DELAY_MS + spoutConfig.getOffsetsCommitPeriodMs()); when(consumerMock.poll(anyLong())).thenReturn(new ConsumerRecords<>(Collections.emptyMap())); spout.nextTuple(); verify(consumerMock, never()).commitSync(argThat((Map<TopicPartition, OffsetAndMetadata> arg) -> { return !arg.containsKey(partition); })); } }
@Test public void spoutMustIgnoreAcksForTuplesItIsNotAssignedAfterRebalance() throws Exception { //Acking tuples for partitions that are no longer assigned is useless since the spout will not be allowed to commit them try (SimulatedTime simulatedTime = new SimulatedTime()) { TopicAssigner assignerMock = mock(TopicAssigner.class); KafkaSpout<String, String> spout = new KafkaSpout<>(createKafkaSpoutConfigBuilder(topicFilterMock, partitionerMock, -1) .setOffsetCommitPeriodMs(offsetCommitPeriodMs) .build(), consumerFactory, assignerMock); String topic = SingleTopicKafkaSpoutConfiguration.TOPIC; TopicPartition partitionThatWillBeRevoked = new TopicPartition(topic, 1); TopicPartition assignedPartition = new TopicPartition(topic, 2); //Emit a message on each partition and revoke the first partition List<KafkaSpoutMessageId> emittedMessageIds = emitOneMessagePerPartitionThenRevokeOnePartition( spout, partitionThatWillBeRevoked, assignedPartition, assignerMock); //Ack both emitted tuples spout.ack(emittedMessageIds.get(0)); spout.ack(emittedMessageIds.get(1)); //Ensure the commit timer has expired Time.advanceTime(offsetCommitPeriodMs + KafkaSpout.TIMER_DELAY_MS); //Make the spout commit any acked tuples spout.nextTuple(); //Verify that it only committed the message on the assigned partition verify(consumerMock, times(1)).commitSync(commitCapture.capture()); Map<TopicPartition, OffsetAndMetadata> commitCaptureMap = commitCapture.getValue(); assertThat(commitCaptureMap, hasKey(assignedPartition)); assertThat(commitCaptureMap, not(hasKey(partitionThatWillBeRevoked))); } }
@Test public void shouldNotAdvanceTimeTest() { try (SimulatedTime t = new SimulatedTime()) { long current = Time.currentTimeMillis(); Time.advanceTime(0); Assert.assertEquals(Time.deltaMs(current), 0); } }
@Test public void deltaSecsConvertsToSecondsTest() { try (SimulatedTime t = new SimulatedTime()) { int current = Time.currentTimeSecs(); Time.advanceTime(1000); Assert.assertEquals(Time.deltaSecs(current), 1); } }
@Test public void testCannotContainMultipleSchedulesForId() { try (SimulatedTime time = new SimulatedTime()) { KafkaSpoutRetryExponentialBackoff retryService = createOneSecondWaitRetryService(); long offset = 0; KafkaSpoutMessageId msgId = retryService.getMessageId(testTopic, offset); msgId.incrementNumFails(); retryService.schedule(msgId); Time.advanceTime(500); boolean scheduled = retryService.schedule(msgId); retryService.remove(msgId); assertThat("The message should no longer be scheduled", retryService.isScheduled(msgId), is(false)); Time.advanceTime(500); assertThat("The message should not be ready for retry because it isn't scheduled", retryService.isReady(msgId), is(false)); } }
@Test public void deltaSecsTruncatesFractionalSecondsTest() { try (SimulatedTime t = new SimulatedTime()) { int current = Time.currentTimeSecs(); Time.advanceTime(1500); Assert.assertEquals(Time.deltaSecs(current), 1, 0); } }
@Test public void testCanRescheduleRetry() { try (SimulatedTime time = new SimulatedTime()) { KafkaSpoutRetryExponentialBackoff retryService = createOneSecondWaitRetryService(); long offset = 0; KafkaSpoutMessageId msgId = retryService.getMessageId(testTopic, offset); msgId.incrementNumFails(); retryService.schedule(msgId); Time.advanceTime(500); boolean scheduled = retryService.schedule(msgId); assertThat("The service must be able to reschedule an already scheduled id", scheduled, is(true)); Time.advanceTime(500); assertThat("The message should not be ready for retry yet since it was rescheduled", retryService.isReady(msgId), is(false)); assertThat(retryService.isScheduled(msgId), is(true)); assertThat(retryService.earliestRetriableOffsets(), is(Collections.emptyMap())); assertThat(retryService.readyMessageCount(), is(0)); Time.advanceTime(500); assertThat("The message should be ready for retry once the full delay has passed", retryService.isReady(msgId), is(true)); assertThat(retryService.isScheduled(msgId), is(true)); assertThat(retryService.earliestRetriableOffsets(), is(Collections.singletonMap(testTopic, msgId.offset()))); assertThat(retryService.readyMessageCount(), is(1)); } }