private Filter newFirstSnapshotInListFilter() { Filter filter1 = mock(Filter.class); when(filter1.filter(anyList())).thenAnswer(invocation -> Collections.singletonList(((List) invocation.getArguments()[0]).iterator().next())); return filter1; } }
private KafkaSpoutMessageId emitOne() { ArgumentCaptor<KafkaSpoutMessageId> messageId = ArgumentCaptor.forClass(KafkaSpoutMessageId.class); spout.nextTuple(); verify(collector).emit(anyString(), anyList(), messageId.capture()); clearInvocations(collector); return messageId.getValue(); }
@Test(expected = NotFoundException.class) public void readAclByIdMissingAcl() { Map<ObjectIdentity, Acl> result = new HashMap<>(); when( lookupStrategy.readAclsById(anyList(), anyList())).thenReturn(result); ObjectIdentity objectIdentity = new ObjectIdentityImpl(Object.class, 1); List<Sid> sids = Arrays.<Sid>asList(new PrincipalSid("user")); aclService.readAclById(objectIdentity, sids); }
@Test public void testNextTupleEmitsAtMostOneTuple() { //The spout should emit at most one message per call to nextTuple //This is necessary for Storm to be able to throttle the spout according to maxSpoutPending KafkaSpout<String, String> spout = SpoutWithMockedConsumerSetupHelper.setupSpout(spoutConfig, conf, contextMock, collectorMock, consumerMock, partition); Map<TopicPartition, List<ConsumerRecord<String, String>>> records = new HashMap<>(); records.put(partition, SpoutWithMockedConsumerSetupHelper.createRecords(partition, 0, 10)); when(consumerMock.poll(anyLong())) .thenReturn(new ConsumerRecords<>(records)); spout.nextTuple(); verify(collectorMock, times(1)).emit(anyString(), anyList(), any(KafkaSpoutMessageId.class)); }
@Test public void shouldNotifyPluginSettingsChange() throws Exception { String supportedVersion = "4.0"; Map<String, String> settings = Collections.singletonMap("foo", "bar"); ArgumentCaptor<GoPluginApiRequest> requestArgumentCaptor = ArgumentCaptor.forClass(GoPluginApiRequest.class); when(pluginManager.resolveExtensionVersion(eq("pluginId"), eq(NOTIFICATION_EXTENSION), anyList())).thenReturn(supportedVersion); when(pluginManager.isPluginOfType(NOTIFICATION_EXTENSION, "pluginId")).thenReturn(true); when(pluginManager.submitTo(eq("pluginId"), eq(NOTIFICATION_EXTENSION), requestArgumentCaptor.capture())).thenReturn(new DefaultGoPluginApiResponse(SUCCESS_RESPONSE_CODE, "")); NotificationExtension extension = new NotificationExtension(pluginManager); extension.notifyPluginSettingsChange("pluginId", settings); assertRequest(requestArgumentCaptor.getValue(), NOTIFICATION_EXTENSION, supportedVersion, REQUEST_NOTIFY_PLUGIN_SETTINGS_CHANGE, "{\"foo\":\"bar\"}"); }
@Test public void shouldNotifyPluginSettingsChange() throws Exception { String supportedVersion = "3.0"; Map<String, String> settings = Collections.singletonMap("foo", "bar"); ArgumentCaptor<GoPluginApiRequest> requestArgumentCaptor = ArgumentCaptor.forClass(GoPluginApiRequest.class); when(pluginManager.resolveExtensionVersion(eq("pluginId"), eq(NOTIFICATION_EXTENSION), anyList())).thenReturn(supportedVersion); when(pluginManager.isPluginOfType(NOTIFICATION_EXTENSION, "pluginId")).thenReturn(true); when(pluginManager.submitTo(eq("pluginId"), eq(NOTIFICATION_EXTENSION), requestArgumentCaptor.capture())).thenReturn(new DefaultGoPluginApiResponse(SUCCESS_RESPONSE_CODE, "")); NotificationExtension extension = new NotificationExtension(pluginManager); extension.notifyPluginSettingsChange("pluginId", settings); assertRequest(requestArgumentCaptor.getValue(), NOTIFICATION_EXTENSION, supportedVersion, REQUEST_NOTIFY_PLUGIN_SETTINGS_CHANGE, "{\"foo\":\"bar\"}"); }
@Test public void testSpoutMustRefreshPartitionsEvenIfNotPolling() throws Exception { SingleTopicKafkaUnitSetupHelper.initializeSpout(spout, conf, topologyContext, collectorMock); //Nothing is assigned yet, should emit nothing spout.nextTuple(); verify(collectorMock, never()).emit(anyString(), anyList(), any(KafkaSpoutMessageId.class)); SingleTopicKafkaUnitSetupHelper.populateTopicData(kafkaUnitExtension.getKafkaUnit(), SingleTopicKafkaSpoutConfiguration.TOPIC, 1); Time.advanceTime(KafkaSpoutConfig.DEFAULT_PARTITION_REFRESH_PERIOD_MS + KafkaSpout.TIMER_DELAY_MS); //The new partition should be discovered and the message should be emitted spout.nextTuple(); verify(collectorMock).emit(anyString(), anyList(), any(KafkaSpoutMessageId.class)); }
@Test public void shouldNotCreateStatement() { TokenQueue tokenQueue = spy(new TokenQueue(Arrays.asList(new Token("a", 1, 1)))); TokenMatcher matcher = spy(new AnyTokenMatcher()); StatementChannel channel = StatementChannel.create(matcher); List<Statement> output = mock(List.class); assertThat(channel.consume(tokenQueue, output), is(true)); verify(matcher).matchToken(eq(tokenQueue), anyList()); verifyNoMoreInteractions(matcher); verify(output).add(any()); verifyNoMoreInteractions(output); }
private void doTestModeCannotReplayTuples(KafkaSpoutConfig<String, String> spoutConfig) { KafkaSpout<String, String> spout = SpoutWithMockedConsumerSetupHelper.setupSpout(spoutConfig, conf, contextMock, collectorMock, consumerMock, partition); when(consumerMock.poll(anyLong())).thenReturn(new ConsumerRecords<>(Collections.singletonMap(partition, SpoutWithMockedConsumerSetupHelper.createRecords(partition, 0, 1)))); spout.nextTuple(); ArgumentCaptor<KafkaSpoutMessageId> msgIdCaptor = ArgumentCaptor.forClass(KafkaSpoutMessageId.class); verify(collectorMock).emit(eq(SingleTopicKafkaSpoutConfiguration.STREAM), anyList(), msgIdCaptor.capture()); assertThat("Should have captured a message id", msgIdCaptor.getValue(), not(nullValue())); spout.fail(msgIdCaptor.getValue()); reset(consumerMock); when(consumerMock.poll(anyLong())).thenReturn(new ConsumerRecords<>(Collections.singletonMap(partition, SpoutWithMockedConsumerSetupHelper.createRecords(partition, 1, 1)))); spout.nextTuple(); //The consumer should not be seeking to retry the failed tuple, it should just be continuing from the current position verify(consumerMock, never()).seek(eq(partition), anyLong()); }
@SuppressWarnings( "unchecked" ) @Test public void shouldRegisterAtRootByDefault() throws Exception { WebServer webServer = mock( WebServer.class ); Config config = mock( Config.class ); CommunityNeoServer neoServer = mock( CommunityNeoServer.class ); when( neoServer.baseUri() ).thenReturn( new URI( "http://localhost:7575" ) ); when( neoServer.getWebServer() ).thenReturn( webServer ); when( config.get( GraphDatabaseSettings.auth_enabled ) ).thenReturn( true ); DBMSModule module = new DBMSModule( webServer, config, () -> new DiscoverableURIs.Builder().build() ); module.start(); verify( webServer ).addJAXRSClasses( anyList(), anyString(), isNull() ); }
@Test public void shouldRunThePriorCommandsWithTerminateCorrectly() { // Given: when(command3.getStatement()).thenReturn(TerminateCluster.TERMINATE_CLUSTER_STATEMENT_TEXT); // When: commandRunner.processPriorCommands(); // Then: verify(ksqlEngine).stopAcceptingStatements(); verify(commandStore).close(); verify(clusterTerminator).terminateCluster(anyList()); verify(statementExecutor, never()).handleRestore(any()); }
@Test public void should_register_filters_if_master_filter_is_up() throws ServletException { MasterServletFilter.INSTANCE = mock(MasterServletFilter.class); new RegisterServletFilters(new ServletFilter[2]).start(); verify(MasterServletFilter.INSTANCE).initFilters(anyList()); }
@Test public void filters_should_be_optional() throws ServletException { MasterServletFilter.INSTANCE = mock(MasterServletFilter.class); new RegisterServletFilters().start(); // do not fail verify(MasterServletFilter.INSTANCE).initFilters(anyList()); } }
private void doTestModeDisregardsMaxUncommittedOffsets(KafkaSpoutConfig<String, String> spoutConfig) { KafkaSpout<String, String> spout = SpoutWithMockedConsumerSetupHelper.setupSpout(spoutConfig, conf, contextMock, collectorMock, consumerMock, partition); when(consumerMock.poll(anyLong())) .thenReturn(new ConsumerRecords<>(Collections.singletonMap(partition, SpoutWithMockedConsumerSetupHelper.createRecords(partition, 0, spoutConfig.getMaxUncommittedOffsets())))) .thenReturn(new ConsumerRecords<>(Collections.singletonMap(partition, SpoutWithMockedConsumerSetupHelper.createRecords(partition, spoutConfig.getMaxUncommittedOffsets() - 1, spoutConfig.getMaxUncommittedOffsets())))); for (int i = 0; i < spoutConfig.getMaxUncommittedOffsets() * 2; i++) { spout.nextTuple(); } verify(consumerMock, times(2)).poll(anyLong()); verify(collectorMock, times(spoutConfig.getMaxUncommittedOffsets() * 2)).emit(eq(SingleTopicKafkaSpoutConfiguration.STREAM), anyList()); }
@Test public void testEmitNewBatchWithNullMetaUncommittedLatest() { //Check that null meta makes the spout seek to LATEST, and that the returned meta is correct Map<String, Object> batchMeta = doEmitNewBatchTest(FirstPollOffsetStrategy.UNCOMMITTED_LATEST, collectorMock, partition, null); verify(collectorMock, never()).emit(anyList()); KafkaTridentSpoutBatchMetadata deserializedMeta = KafkaTridentSpoutBatchMetadata.fromMap(batchMeta); assertThat("The batch should start at the first offset of the polled records", deserializedMeta.getFirstOffset(), is(lastOffsetInKafka)); assertThat("The batch should end at the last offset of the polled messages", deserializedMeta.getLastOffset(), is(lastOffsetInKafka)); }
@Test public void shouldNotOnPermissionsDenied_whenNegativeButtonWithoutPermissionCallbacks() { RationaleDialogClickListener listener = new RationaleDialogClickListener(dialogFragment, dialogConfig, null, rationaleCallbacks); listener.onClick(dialogInterface, Dialog.BUTTON_NEGATIVE); verify(permissionCallbacks, never()).onPermissionsDenied(anyInt(), ArgumentMatchers.<String>anyList()); } }
@Test public void testShouldCommitAllMessagesIfNotSetToEmitNullTuples() throws Exception { final int messageCount = 10; prepareSpout(messageCount); //All null tuples should be commited, meaning they were considered by to be emitted and acked for(int i = 0; i < messageCount; i++) { spout.nextTuple(); } verify(collectorMock,never()).emit( anyString(), anyList(), any()); Time.advanceTime(commitOffsetPeriodMs + KafkaSpout.TIMER_DELAY_MS); //Commit offsets spout.nextTuple(); verifyAllMessagesCommitted(messageCount); }
@Test public void testOnePortAndSearchArchivedIsTrueAndFileOffsetIs1AndByteOffsetIs100() throws IOException { LogviewerLogSearchHandler handler = getStubbedSearchHandler(); handler.deepSearchLogsForTopology("", null, "search", "20", "6700", "1", "100", true, null, null); verify(handler, times(1)).findNMatches(anyList(), anyInt(), anyInt(), anyInt(), anyString()); verify(handler, times(2)).logsForPort(isNull(), any(File.class)); }
@Test public void testReEmitBatchForOldTopologyWhenIgnoringCommittedOffsets() { //In some cases users will want to drop retrying old batches, e.g. if the topology should start over from scratch. //If the FirstPollOffsetStrategy ignores committed offsets, we should not retry batches for old topologies //The batch retry should be skipped entirely KafkaTridentSpoutBatchMetadata batchMeta = new KafkaTridentSpoutBatchMetadata(firstOffsetInKafka, lastOffsetInKafka, "a new storm id"); KafkaTridentSpoutEmitter<String, String> emitter = createEmitter(FirstPollOffsetStrategy.EARLIEST); TransactionAttempt txid = new TransactionAttempt(10L, 0); KafkaTridentSpoutTopicPartition kttp = new KafkaTridentSpoutTopicPartition(partition); emitter.reEmitPartitionBatch(txid, collectorMock, kttp, batchMeta.toMap()); verify(collectorMock, never()).emit(anyList()); }
@Test public void testLatestStrategyWhenTopologyIsRedeployed() { /** * EARLIEST should be applied if the emitter is new and the topology has been redeployed (storm id has changed) */ long preRestartEmittedOffset = 20; int preRestartEmittedRecords = 10; KafkaTridentSpoutBatchMetadata preExecutorRestartLastMeta = new KafkaTridentSpoutBatchMetadata(preRestartEmittedOffset, preRestartEmittedOffset + preRestartEmittedRecords - 1, "Some older topology"); KafkaTridentSpoutEmitter<String, String> emitter = createEmitter(FirstPollOffsetStrategy.LATEST); TransactionAttempt txid = new TransactionAttempt(0L, 0); KafkaTridentSpoutTopicPartition kttp = new KafkaTridentSpoutTopicPartition(partition); Map<String, Object> meta = emitter.emitPartitionBatchNew(txid, collectorMock, kttp, preExecutorRestartLastMeta.toMap()); verify(collectorMock, never()).emit(anyList()); }