private void verifyNoCallsActiveRuleIndexerDelete() { verify(activeRuleIndexer, never()).commitDeletionOfProfiles(any(DbSession.class), anyCollection()); }
private static KernelTransactions newKernelTransactions( boolean testKernelTransactions, TransactionCommitProcess commitProcess, StorageReader firstReader, StorageReader... otherReaders ) throws Throwable { Locks locks = mock( Locks.class ); Locks.Client client = mock( Locks.Client.class ); when( locks.newClient() ).thenReturn( client ); StorageEngine storageEngine = mock( StorageEngine.class ); when( storageEngine.newReader() ).thenReturn( firstReader, otherReaders ); doAnswer( invocation -> { Collection<StorageCommand> argument = invocation.getArgument( 0 ); argument.add( mock( StorageCommand.class ) ); return null; } ).when( storageEngine ).createCommands( anyCollection(), any( ReadableTransactionState.class ), any( StorageReader.class ), any( ResourceLocker.class ), anyLong(), any( TxStateVisitor.Decorator.class ) ); return newKernelTransactions( locks, storageEngine, commitProcess, testKernelTransactions ); }
@Before public void before() throws Exception { collectionsFactory = Mockito.spy( new TestCollectionsFactory() ); when( headerInformation.getAdditionalHeader() ).thenReturn( new byte[0] ); when( headerInformationFactory.create() ).thenReturn( headerInformation ); when( neoStores.getMetaDataStore() ).thenReturn( metaDataStore ); when( storageEngine.newReader() ).thenReturn( readLayer ); doAnswer( invocation -> ((Collection<StorageCommand>) invocation.getArgument(0) ).add( new Command .RelationshipCountsCommand( 1, 2,3, 4L ) ) ) .when( storageEngine ).createCommands( anyCollection(), any( ReadableTransactionState.class ), any( StorageReader.class ), any( ResourceLocker.class ), anyLong(), any( TxStateVisitor.Decorator.class ) ); }
@Test public void processNextBatchShouldNotifyQueueMonitorAboutDrain() { List<Job> drainedJobs = new ArrayList<>(); Job job = Jobs.noop(); BoltConnection connection = newConnection(); doAnswer( inv -> drainedJobs.addAll( inv.getArgument( 1 ) ) ).when( queueMonitor ).drained( same( connection ), anyCollection() ); connection.enqueue( job ); connection.processNextBatch(); verify( queueMonitor ).drained( same( connection ), anyCollection() ); assertTrue( drainedJobs.contains( job ) ); }
@Test public void shouldReportThirdPartyPackagesAtSpecifiedMount() throws Exception { // Given WebServer webServer = mock( WebServer.class ); CommunityNeoServer neoServer = mock( CommunityNeoServer.class ); when( neoServer.baseUri() ).thenReturn( new URI( "http://localhost:7575" ) ); when( neoServer.getWebServer() ).thenReturn( webServer ); Database database = mock( Database.class ); when( neoServer.getDatabase() ).thenReturn( database ); Config config = mock( Config.class ); List<ThirdPartyJaxRsPackage> jaxRsPackages = new ArrayList<>(); String path = "/third/party/package"; jaxRsPackages.add( new ThirdPartyJaxRsPackage( "org.example.neo4j", path ) ); when( config.get( ServerSettings.third_party_packages ) ).thenReturn( jaxRsPackages ); // When ThirdPartyJAXRSModule module = new ThirdPartyJAXRSModule( webServer, config, NullLogProvider.getInstance(), neoServer ); module.start(); // Then verify( webServer ).addJAXRSPackages( any( List.class ), anyString(), anyCollection() ); } }
@Test void shouldCreateThreadLocalParts() throws ExecutionException, InterruptedException, IndexEntryConflictException { // given Thread mainThread = Thread.currentThread(); ConcurrentMap<Thread,NativeIndexPopulator> partPopulators = new ConcurrentHashMap<>(); ParallelNativeIndexPopulator<GenericKey,NativeIndexValue> populator = new ParallelNativeIndexPopulator<>( baseIndexFile, layout, mockPartSupplier( partPopulators, this::mockNativeIndexPopulator ) ); // when int batchCountPerThread = 10; applyBatchesInParallel( populator, batchCountPerThread ); // then assertEquals( THREADS, partPopulators.size() ); for ( Thread thread : partPopulators.keySet() ) { if ( thread != mainThread ) { NativeIndexPopulator partPopulator = partPopulators.get( thread ); verify( partPopulator, times( batchCountPerThread ) ).add( anyCollection() ); } } }
@Test public void processNextBatchShouldDoNothingIfQueueIsEmptyAndConnectionNotClosed() { BoltConnection connection = newConnection(); connection.processNextBatch(); verify( queueMonitor, never() ).drained( same( connection ), anyCollection() ); }
@Test public void processNextBatchShouldDrainMaxBatchSizeItemsOnEachCall() { List<Job> drainedJobs = new ArrayList<>(); List<Job> pushedJobs = new ArrayList<>(); BoltConnection connection = newConnection( 10 ); doAnswer( inv -> drainedJobs.addAll( inv.getArgument( 1 ) ) ).when( queueMonitor ).drained( same( connection ), anyCollection() ); for ( int i = 0; i < 15; i++ ) { Job newJob = Jobs.noop(); pushedJobs.add( newJob ); connection.enqueue( newJob ); } connection.processNextBatch(); verify( queueMonitor ).drained( same( connection ), anyCollection() ); assertEquals( 10, drainedJobs.size() ); assertTrue( drainedJobs.containsAll( pushedJobs.subList( 0, 10 ) ) ); drainedJobs.clear(); connection.processNextBatch(); verify( queueMonitor, times( 2 ) ).drained( same( connection ), anyCollection() ); assertEquals( 5, drainedJobs.size() ); assertTrue( drainedJobs.containsAll( pushedJobs.subList( 10, 15 ) ) ); }
@Test public void authenticateWhenAuthoritiesMapperSetThenReturnMappedAuthorities() { Map<String, Object> claims = new HashMap<>(); claims.put(IdTokenClaimNames.ISS, "https://provider.com"); claims.put(IdTokenClaimNames.SUB, "subject1"); claims.put(IdTokenClaimNames.AUD, Arrays.asList("client1", "client2")); claims.put(IdTokenClaimNames.AZP, "client1"); this.setUpIdToken(claims); OidcUser principal = mock(OidcUser.class); List<GrantedAuthority> authorities = AuthorityUtils.createAuthorityList("ROLE_USER"); when(principal.getAuthorities()).thenAnswer( (Answer<List<GrantedAuthority>>) invocation -> authorities); when(this.userService.loadUser(any())).thenReturn(principal); List<GrantedAuthority> mappedAuthorities = AuthorityUtils.createAuthorityList("ROLE_OIDC_USER"); GrantedAuthoritiesMapper authoritiesMapper = mock(GrantedAuthoritiesMapper.class); when(authoritiesMapper.mapAuthorities(anyCollection())).thenAnswer( (Answer<List<GrantedAuthority>>) invocation -> mappedAuthorities); this.authenticationProvider.setAuthoritiesMapper(authoritiesMapper); OAuth2LoginAuthenticationToken authentication = (OAuth2LoginAuthenticationToken) this.authenticationProvider.authenticate( new OAuth2LoginAuthenticationToken(this.clientRegistration, this.authorizationExchange)); assertThat(authentication.getAuthorities()).isEqualTo(mappedAuthorities); }
@Test public void authenticateWhenAuthoritiesMapperSetThenReturnMappedAuthorities() { OAuth2AccessTokenResponse accessTokenResponse = this.accessTokenSuccessResponse(); when(this.accessTokenResponseClient.getTokenResponse(any())).thenReturn(accessTokenResponse); OAuth2User principal = mock(OAuth2User.class); List<GrantedAuthority> authorities = AuthorityUtils.createAuthorityList("ROLE_USER"); when(principal.getAuthorities()).thenAnswer( (Answer<List<GrantedAuthority>>) invocation -> authorities); when(this.userService.loadUser(any())).thenReturn(principal); List<GrantedAuthority> mappedAuthorities = AuthorityUtils.createAuthorityList("ROLE_OAUTH2_USER"); GrantedAuthoritiesMapper authoritiesMapper = mock(GrantedAuthoritiesMapper.class); when(authoritiesMapper.mapAuthorities(anyCollection())).thenAnswer( (Answer<List<GrantedAuthority>>) invocation -> mappedAuthorities); this.authenticationProvider.setAuthoritiesMapper(authoritiesMapper); OAuth2LoginAuthenticationToken authentication = (OAuth2LoginAuthenticationToken) this.authenticationProvider.authenticate( new OAuth2LoginAuthenticationToken(this.clientRegistration, this.authorizationExchange)); assertThat(authentication.getAuthorities()).isEqualTo(mappedAuthorities); }
} ), anyString(), anyCollection() );
@Test public void install_and_load_plugins() { PluginInfo info = new PluginInfo("squid"); ImmutableMap<String, ScannerPlugin> plugins = ImmutableMap.of("squid", new ScannerPlugin("squid", 1L, info)); Plugin instance = mock(Plugin.class); when(loader.load(anyMap())).thenReturn(ImmutableMap.of("squid", instance)); when(installer.installRemotes()).thenReturn(plugins); underTest.start(); assertThat(underTest.getPluginInfos()).containsOnly(info); assertThat(underTest.getPluginsByKey()).isEqualTo(plugins); assertThat(underTest.getPluginInfo("squid")).isSameAs(info); assertThat(underTest.getPluginInstance("squid")).isSameAs(instance); underTest.stop(); verify(loader).unload(anyCollection()); }
@Test public void shouldExecuteStatements() throws Exception { // given TransitionalPeriodTransactionMessContainer kernel = mockKernel(); QueryExecutionEngine executionEngine = mock( QueryExecutionEngine.class ); Result executionResult = mock( Result.class ); TransactionalContext transactionalContext = prepareKernelWithQuerySession( kernel ); when( executionEngine.executeQuery( "query", NO_PARAMS, transactionalContext ) ).thenReturn( executionResult ); TransactionRegistry registry = mock( TransactionRegistry.class ); when( registry.begin( any( TransactionHandle.class ) ) ).thenReturn( 1337L ); TransactionHandle handle = getTransactionHandle( kernel, executionEngine, registry ); ExecutionResultSerializer output = mock( ExecutionResultSerializer.class ); // when handle.execute( statements( new Statement( "query", map(), false, (ResultDataContent[]) null ) ), output, mock( HttpServletRequest.class ) ); // then verify( executionEngine ).executeQuery( "query", NO_PARAMS, transactionalContext ); InOrder outputOrder = inOrder( output ); outputOrder.verify( output ).transactionCommitUri( uriScheme.txCommitUri( 1337 ) ); outputOrder.verify( output ).statementResult( executionResult, false, (ResultDataContent[])null ); outputOrder.verify( output ).notifications( anyCollection() ); outputOrder.verify( output ).transactionStatus( anyLong() ); outputOrder.verify( output ).errors( argThat( hasNoErrors() ) ); outputOrder.verify( output ).finish(); verifyNoMoreInteractions( output ); }
@Test public void shouldSuspendTransactionAndReleaseForOtherRequestsAfterExecutingStatements() throws Exception { // given TransitionalPeriodTransactionMessContainer kernel = mockKernel(); TransitionalTxManagementKernelTransaction transactionContext = kernel.newTransaction( explicit, AUTH_DISABLED, -1 ); TransactionRegistry registry = mock( TransactionRegistry.class ); QueryExecutionEngine executionEngine = mock( QueryExecutionEngine.class ); TransactionalContext transactionalContext = prepareKernelWithQuerySession( kernel ); Result executionResult = mock( Result.class ); when( executionEngine.executeQuery( "query", NO_PARAMS, transactionalContext) ).thenReturn( executionResult ); when( registry.begin( any( TransactionHandle.class ) ) ).thenReturn( 1337L ); TransactionHandle handle = getTransactionHandle( kernel, executionEngine, registry ); ExecutionResultSerializer output = mock( ExecutionResultSerializer.class ); // when handle.execute( statements( new Statement( "query", map(), false, (ResultDataContent[]) null ) ), output, mock( HttpServletRequest.class ) ); // then InOrder transactionOrder = inOrder( transactionContext, registry ); transactionOrder.verify( transactionContext ).suspendSinceTransactionsAreStillThreadBound(); transactionOrder.verify( registry ).release( 1337L, handle ); InOrder outputOrder = inOrder( output ); outputOrder.verify( output ).transactionCommitUri( uriScheme.txCommitUri( 1337 ) ); outputOrder.verify( output ).statementResult( executionResult, false, (ResultDataContent[])null ); outputOrder.verify( output ).notifications( anyCollection() ); outputOrder.verify( output ).transactionStatus( anyLong() ); outputOrder.verify( output ).errors( argThat( hasNoErrors() ) ); outputOrder.verify( output ).finish(); verifyNoMoreInteractions( output ); }
@Test void shouldApplyUpdatesOnEachPart() throws ExecutionException, InterruptedException, IndexEntryConflictException { // given Thread mainThread = Thread.currentThread(); ConcurrentMap<Thread,NativeIndexPopulator> partPopulators = new ConcurrentHashMap<>(); ParallelNativeIndexPopulator<GenericKey,NativeIndexValue> populator = new ParallelNativeIndexPopulator<>( baseIndexFile, layout, mockPartSupplier( partPopulators, this::mockNativeIndexPopulator ) ); int batchCountPerThread = 10; // when applyBatchesInParallel( populator, batchCountPerThread ); applyUpdates( populator, next ); applyBatchesInParallel( populator, batchCountPerThread ); applyUpdates( populator, next ); applyBatchesInParallel( populator, batchCountPerThread ); // then assertEquals( THREADS, partPopulators.size() ); for ( Thread thread : partPopulators.keySet() ) { if ( thread != mainThread ) { NativeIndexPopulator partPopulator = partPopulators.get( thread ); verify( partPopulator, times( batchCountPerThread * 3 ) ).add( anyCollection() ); CountingIndexUpdater updater = (CountingIndexUpdater) partPopulator.newPopulatingUpdater(); assertEquals( 10, updater.count ); } } }
@Test public void shouldPopulateIndexWithASmallDataset() throws Exception { // GIVEN String value = "Mattias"; long node1 = createNode( map( name, value ), FIRST ); createNode( map( name, value ), SECOND ); createNode( map( age, 31 ), FIRST ); long node4 = createNode( map( age, 35, name, value ), FIRST ); IndexPopulator populator = spy( indexPopulator( false ) ); LabelSchemaDescriptor descriptor = SchemaDescriptorFactory.forLabel( 0, 0 ); IndexPopulationJob job = newIndexPopulationJob( populator, new FlippableIndexProxy(), EntityType.NODE, IndexDescriptorFactory.forSchema( descriptor ) ); // WHEN job.run(); // THEN IndexEntryUpdate<?> update1 = add( node1, descriptor, Values.of( value ) ); IndexEntryUpdate<?> update2 = add( node4, descriptor, Values.of( value ) ); verify( populator ).create(); verify( populator ).includeSample( update1 ); verify( populator ).includeSample( update2 ); verify( populator, times( 2 ) ).add( anyCollection() ); verify( populator ).sampleResult(); verify( populator ).close( true ); }
@Test public void shouldPopulateRelatonshipIndexWithASmallDataset() throws Exception { // GIVEN String value = "Philip J.Fry"; long node1 = createNode( map( name, value ), FIRST ); long node2 = createNode( map( name, value ), SECOND ); long node3 = createNode( map( age, 31 ), FIRST ); long node4 = createNode( map( age, 35, name, value ), FIRST ); long rel1 = createRelationship( map( name, value ), likes, node1, node3 ); createRelationship( map( name, value ), knows, node3, node1 ); createRelationship( map( age, 31 ), likes, node2, node1 ); long rel4 = createRelationship( map( age, 35, name, value ), likes, node4, node4 ); IndexDescriptor descriptor = IndexDescriptorFactory.forSchema( SchemaDescriptorFactory.forRelType( 0, 0 ) ); IndexPopulator populator = spy( indexPopulator( descriptor ) ); IndexPopulationJob job = newIndexPopulationJob( populator, new FlippableIndexProxy(), EntityType.RELATIONSHIP, descriptor ); // WHEN job.run(); // THEN IndexEntryUpdate<?> update1 = add( rel1, descriptor, Values.of( value ) ); IndexEntryUpdate<?> update2 = add( rel4, descriptor, Values.of( value ) ); verify( populator ).create(); verify( populator ).includeSample( update1 ); verify( populator ).includeSample( update2 ); verify( populator, times( 2 ) ).add( anyCollection() ); verify( populator ).sampleResult(); verify( populator ).close( true ); }
@Test public void as_qprofile_editor() { UserDto user = db.users().insertUser(); QProfileDto qualityProfile = db.qualityProfiles().insert(organization); db.qualityProfiles().addUserPermission(qualityProfile, user); userSession.logIn(user); RuleKey ruleKey = RuleTesting.randomRuleKey(); db.rules().insert(ruleKey); ws.newRequest() .setMethod("POST") .setParam(PARAM_ORGANIZATION, organization.getKey()) .setParam(PARAM_RULE, ruleKey.toString()) .setParam(PARAM_KEY, qualityProfile.getKee()) .execute(); verify(qProfileRules).deactivateAndCommit(any(DbSession.class), any(QProfileDto.class), anyCollection()); }
@Test public void as_qprofile_editor() { UserDto user = db.users().insertUser(); QProfileDto qualityProfile = db.qualityProfiles().insert(organization); db.qualityProfiles().addUserPermission(qualityProfile, user); userSession.logIn(user); RuleKey ruleKey = RuleTesting.randomRuleKey(); db.rules().insert(ruleKey); ws.newRequest() .setMethod("POST") .setParam(PARAM_RULE, ruleKey.toString()) .setParam(PARAM_KEY, qualityProfile.getKee()) .setParam("severity", "BLOCKER") .setParam("params", "key1=v1;key2=v2") .setParam("reset", "false") .execute(); verify(qProfileRules).activateAndCommit(any(DbSession.class), any(QProfileDto.class), anyCollection()); } }