@Test public void shouldPrintDiskUsage() { // Not sure how to get around this w/o spying. The method that we're unit testing will construct // other File instances with this guy as parent and internally the File constructor uses the field 'path' // which, if purely mocked, won't be assigned. At the same time we want to control the total/free space methods // and what they return... a tough one. File storeDir = Mockito.spy( new File( "storeDir" ) ); DatabaseLayout layout = mock( DatabaseLayout.class ); when( layout.databaseDirectory() ).thenReturn( storeDir ); when( storeDir.getTotalSpace() ).thenReturn( 100L ); when( storeDir.getFreeSpace() ).thenReturn( 40L ); AssertableLogProvider logProvider = new AssertableLogProvider(); KernelDiagnostics.StoreFiles storeFiles = new KernelDiagnostics.StoreFiles( layout ); storeFiles.dump( logProvider.getLog( getClass() ).debugLogger() ); logProvider.assertContainsMessageContaining( "100 / 40 / 40" ); }
@Test public void shouldNotAuthorizeInvalidCredentials() throws Exception { // Given final AuthorizationEnabledFilter filter = new AuthorizationEnabledFilter( () -> authManager, logProvider ); String credentials = Base64.encodeBase64String( "foo:bar".getBytes( StandardCharsets.UTF_8 ) ); BasicLoginContext loginContext = mock( BasicLoginContext.class ); AuthSubject authSubject = mock( AuthSubject.class ); when( servletRequest.getMethod() ).thenReturn( "GET" ); when( servletRequest.getContextPath() ).thenReturn( "/db/data" ); when( servletRequest.getHeader( HttpHeaders.AUTHORIZATION ) ).thenReturn( "BASIC " + credentials ); when( servletRequest.getRemoteAddr() ).thenReturn( "remote_ip_address" ); when( authManager.login( authTokenArgumentMatcher( authToken( "foo", "bar" ) ) ) ).thenReturn( loginContext ); when( loginContext.subject() ).thenReturn( authSubject ); when( authSubject.getAuthenticationResult() ).thenReturn( AuthenticationResult.FAILURE ); // When filter.doFilter( servletRequest, servletResponse, filterChain ); // Then verifyNoMoreInteractions( filterChain ); logProvider.assertExactly( inLog( AuthorizationEnabledFilter.class ) .warn( "Failed authentication attempt for '%s' from %s", "foo", "remote_ip_address" ) ); verify( servletResponse ).setStatus( 401 ); verify( servletResponse ).addHeader( HttpHeaders.CONTENT_TYPE, "application/json; charset=UTF-8" ); assertThat( outputStream.toString( StandardCharsets.UTF_8.name() ), containsString( "\"code\" : \"Neo.ClientError.Security.Unauthorized\"" ) ); assertThat( outputStream.toString( StandardCharsets.UTF_8.name() ), containsString( "\"message\" : \"Invalid username or password.\"" ) ); }
@Test public void shouldLogDatabasePanicEvent() { // GIVEN AssertableLogProvider logProvider = new AssertableLogProvider(); DatabaseHealth databaseHealth = new DatabaseHealth( mock( DatabasePanicEventGenerator.class ), logProvider.getLog( DatabaseHealth.class ) ); databaseHealth.healed(); // WHEN String message = "Listen everybody... panic!"; Exception exception = new Exception( message ); databaseHealth.panic( exception ); // THEN logProvider.assertAtLeastOnce( inLog( DatabaseHealth.class ).error( is( "Database panic: The database has encountered a critical error, " + "and needs to be restarted. Please see database logs for more details." ), sameInstance( exception ) ) ); }
private void verifySectionReportedCorrectly( AssertableLogProvider logProvider ) { logProvider.assertContainsMessageContaining( VisibleMigrationProgressMonitor.MESSAGE_STARTED ); for ( int i = 10; i <= 100; i += 10 ) { logProvider.assertContainsMessageContaining( String.valueOf( i ) + "%" ); } logProvider.assertNone( AssertableLogProvider.inLog( VisibleMigrationProgressMonitor.class ).info( containsString( "110%" ) ) ); logProvider.assertContainsMessageContaining( VisibleMigrationProgressMonitor.MESSAGE_COMPLETED ); }
private void assertLogExistsForRecoveryOn( String labelAndProperty ) { logProvider.assertAtLeastOnce( inLog( IndexSamplingController.class ).debug( "Recovering index sampling for index %s", labelAndProperty ) ); }
private void assertWarning( String implementationName ) { logProvider.assertExactly( AssertableLogProvider.inLog( getClass() ).warn( "Using default tracer implementations instead of '%s'", implementationName ) ); } }
Config config = Config.defaults( default_schema_provider, PROVIDER_DESCRIPTOR.name() ); IndexProviderMap providerMap = life.add( new DefaultIndexProviderMap( buildIndexDependencies( provider ), config ) ); TokenNameLookup mockLookup = mock( TokenNameLookup.class ); when( provider.getInitialState( populatingIndex ) ).thenReturn( POPULATING ); indexes.add( populatingIndex ); StoreIndexDescriptor failedIndex = storeIndex( nextIndexId, nextIndexId++, 1, PROVIDER_DESCRIPTOR ); when( provider.getInitialState( failedIndex ) ).thenReturn( FAILED ); indexes.add( failedIndex ); for ( int i = 0; i < 10; i++ ) when( provider.getInitialState( indexRule ) ).thenReturn( ONLINE ); indexes.add( indexRule ); internalLogProvider.assertAtLeastOnce( logMatch.info( "IndexingService.init: index 1 on :Label1(prop) is POPULATING" ), logMatch.info( "IndexingService.init: index 2 on :Label2(prop) is FAILED" ), logMatch.info( "IndexingService.init: indexes not specifically mentioned above are ONLINE" ) ); internalLogProvider.assertNone( logMatch.info( "IndexingService.init: index 3 on :Label3(prop) is ONLINE" ) );
@Test public void shouldStoreIndexFailureWhenFailingToCreateOnlineAccessorAfterRecoveringPopulatingIndex() throws Exception { // given long indexId = 1; StoreIndexDescriptor indexRule = index.withId( indexId ); IndexingService indexing = newIndexingServiceWithMockedDependencies( populator, accessor, withData(), indexRule ); IOException exception = new IOException( "Expected failure" ); when( nameLookup.labelGetName( labelId ) ).thenReturn( "TheLabel" ); when( nameLookup.propertyKeyGetName( propertyKeyId ) ).thenReturn( "propertyKey" ); when( indexProvider.getInitialState( indexRule ) ).thenReturn( POPULATING ); when( indexProvider.getOnlineAccessor( any( StoreIndexDescriptor.class ), any( IndexSamplingConfig.class ) ) ) .thenThrow( exception ); life.start(); ArgumentCaptor<Boolean> closeArgs = ArgumentCaptor.forClass( Boolean.class ); // when waitForIndexesToGetIntoState( indexing, InternalIndexState.FAILED, indexId ); verify( populator, timeout( 10000 ).times( 2 ) ).close( closeArgs.capture() ); // then assertEquals( FAILED, indexing.getIndexProxy( 1 ).getState() ); assertEquals( asList( true, false ), closeArgs.getAllValues() ); assertThat( storedFailure(), containsString( format( "java.io.IOException: Expected failure%n\tat " ) ) ); internalLogProvider.assertAtLeastOnce( inLog( IndexPopulationJob.class ).error( equalTo( "Failed to populate index: [:TheLabel(propertyKey) [provider: {key=quantum-dex, version=25.0}]]" ), causedBy( exception ) ) ); internalLogProvider.assertNone( inLog( IndexPopulationJob.class ).info( "Index population completed. Index is now online: [%s]", ":TheLabel(propertyKey) [provider: {key=quantum-dex, version=25.0}]" ) ); }
@Test public void logConstraintJobProgress() throws Exception { // Given createNode( map( name, "irrelephant" ), FIRST ); AssertableLogProvider logProvider = new AssertableLogProvider(); FlippableIndexProxy index = mock( FlippableIndexProxy.class ); when( index.getState() ).thenReturn( InternalIndexState.POPULATING ); IndexPopulator populator = spy( indexPopulator( false ) ); try { IndexPopulationJob job = newIndexPopulationJob( populator, index, indexStoreView, logProvider, EntityType.NODE, indexDescriptor( FIRST, name, true ) ); // When job.run(); // Then LogMatcherBuilder match = inLog( IndexPopulationJob.class ); logProvider.assertExactly( match.info( "Index population started: [%s]", ":FIRST(name)" ), match.info( "Index created. Starting data checks. Index [%s] is %s.", ":FIRST(name)", "POPULATING" ) ); } finally { populator.close( true ); } }
DefaultIndexProviderMap providerMap = new DefaultIndexProviderMap( buildIndexDependencies( provider ), config ); providerMap.init(); TokenNameLookup mockLookup = mock( TokenNameLookup.class ); mock( JobScheduler.class ), providerMap, storeView, mockLookup, asList( onlineIndex, populatingIndex, failedIndex ), internalLogProvider, userLogProvider, IndexingService.NO_MONITOR, schemaState ); when( provider.getInitialState( onlineIndex ) ) .thenReturn( ONLINE ); when( provider.getInitialState( populatingIndex ) ) .thenReturn( InternalIndexState.POPULATING ); when( provider.getInitialState( failedIndex ) ) .thenReturn( InternalIndexState.FAILED ); when( storeView.indexSample( anyLong(), any( DoubleLongRegister.class ) ) ).thenReturn( newDoubleLongRegister( 32L, 32L ) ); internalLogProvider.clear(); verify( provider ).getPopulationFailure( failedIndex ); internalLogProvider.assertAtLeastOnce( logMatch.debug( "IndexingService.start: index 1 on :LabelOne(propertyOne) is ONLINE" ), logMatch.debug( "IndexingService.start: index 2 on :LabelOne(propertyTwo) is POPULATING" ), logMatch.debug( "IndexingService.start: index 3 on :LabelTwo(propertyTwo) is FAILED" ) );
@Test public void startTransactionWithCustomTimeout() { when( request.getHeader( HttpHeaderUtils.MAX_EXECUTION_TIME_HEADER ) ) .thenReturn( String.valueOf( CUSTOM_TRANSACTION_TIMEOUT ) ); CypherExecutor cypherExecutor = new CypherExecutor( database, logProvider ); cypherExecutor.start(); cypherExecutor.createTransactionContext( QUERY, VirtualValues.emptyMap(), request ); verify( databaseQueryService ).beginTransaction( KernelTransaction.Type.implicit, AUTH_DISABLED, CUSTOM_TRANSACTION_TIMEOUT, TimeUnit.MILLISECONDS ); logProvider.assertNoLoggingOccurred(); }
@Test public void shouldNotLogConnectionResetErrors() throws Exception { // Given AssertableLogProvider logProvider = new AssertableLogProvider(); HouseKeeper keeper = new HouseKeeper( null, logProvider.getLog( HouseKeeper.class ) ); Channel channel = mock( Channel.class ); when( channel.toString() ).thenReturn( "[some channel info]" ); ChannelHandlerContext ctx = mock( ChannelHandlerContext.class ); when( ctx.channel() ).thenReturn( channel ); when( ctx.executor() ).thenReturn( mock( EventExecutor.class ) ); IOException connResetError = new IOException( "Connection reset by peer" ); // When keeper.exceptionCaught( ctx, connResetError ); // Then logProvider.assertExactly( AssertableLogProvider.inLog( HouseKeeper.class ).warn( "Fatal error occurred when handling a client connection, " + "remote peer unexpectedly closed connection: %s", channel ) ); }
@Test void terminateExpiredTransactions() { HashSet<KernelTransactionHandle> transactions = new HashSet<>(); KernelTransactionImplementation tx1 = prepareTxMock( 3, 1, 3 ); KernelTransactionImplementation tx2 = prepareTxMock( 4, 1, 8 ); KernelTransactionImplementationHandle handle1 = new KernelTransactionImplementationHandle( tx1, fakeClock ); KernelTransactionImplementationHandle handle2 = new KernelTransactionImplementationHandle( tx2, fakeClock ); transactions.add( handle1 ); transactions.add( handle2 ); when( kernelTransactions.activeTransactions()).thenReturn( transactions ); KernelTransactionMonitor transactionMonitor = buildTransactionMonitor(); fakeClock.forward( 3, TimeUnit.MILLISECONDS ); transactionMonitor.run(); verify( tx1, never() ).markForTermination( Status.Transaction.TransactionTimedOut ); verify( tx2, never() ).markForTermination( Status.Transaction.TransactionTimedOut ); logProvider.assertNoMessagesContaining( "timeout" ); fakeClock.forward( 2, TimeUnit.MILLISECONDS ); transactionMonitor.run(); verify( tx1 ).markForTermination( EXPECTED_REUSE_COUNT, Status.Transaction.TransactionTimedOut ); verify( tx2, never() ).markForTermination( Status.Transaction.TransactionTimedOut ); logProvider.assertContainsLogCallContaining( "timeout" ); logProvider.clear(); fakeClock.forward( 10, TimeUnit.MILLISECONDS ); transactionMonitor.run(); verify( tx2 ).markForTermination( EXPECTED_REUSE_COUNT, Status.Transaction.TransactionTimedOut ); logProvider.assertContainsLogCallContaining( "timeout" ); }
@Test public void logMessagesAboutConstraintCreation() throws SchemaKernelException, UniquePropertyValueValidationException, TransactionFailureException, IndexNotFoundKernelException { StubKernel kernel = new StubKernel(); IndexProxy indexProxy = mock( IndexProxy.class ); IndexingService indexingService = mock( IndexingService.class ); when( indexingService.getIndexProxy( INDEX_ID ) ).thenReturn( indexProxy ); when( indexingService.getIndexProxy( descriptor ) ).thenReturn( indexProxy ); when( indexProxy.getDescriptor() ).thenReturn( index.withId( INDEX_ID ).withoutCapabilities() ); NodePropertyAccessor propertyAccessor = mock( NodePropertyAccessor.class ); ConstraintIndexCreator creator = new ConstraintIndexCreator( () -> kernel, indexingService, propertyAccessor, logProvider ); KernelTransactionImplementation transaction = createTransaction(); creator.createUniquenessConstraintIndex( transaction, descriptor, "indexProviderByName-1.0" ); logProvider.assertContainsLogCallContaining( "Starting constraint creation: %s." ); logProvider.assertContainsLogCallContaining( "Constraint %s populated, starting verification." ); logProvider.assertContainsLogCallContaining( "Constraint %s verified." ); }
@Test public void startDefaultTransactionWhenHeaderHasIncorrectValue() { when( request.getHeader( HttpHeaderUtils.MAX_EXECUTION_TIME_HEADER ) ) .thenReturn( "not a number" ); CypherExecutor cypherExecutor = new CypherExecutor( database, logProvider ); cypherExecutor.start(); cypherExecutor.createTransactionContext( QUERY, VirtualValues.emptyMap(), request ); verify( databaseQueryService ).beginTransaction( KernelTransaction.Type.implicit, AUTH_DISABLED ); logProvider.assertContainsMessageContaining( "Fail to parse `max-execution-time` header with value: 'not a " + "number'. Should be a positive number." ); }
@Test public void shouldLogExceptionOnExceptionCaught() { AssertableLogProvider logProvider = new AssertableLogProvider(); BoltConnection connection = mock( BoltConnection.class ); channel = new EmbeddedChannel( new HouseKeeper( connection, logProvider.getLog( HouseKeeper.class ) ) ); RuntimeException exception = new RuntimeException( "some exception" ); channel.pipeline().fireExceptionCaught( exception ); verify( connection ).stop(); logProvider.assertExactly( inLog( HouseKeeper.class ).error( startsWith( "Fatal error occurred when handling a client connection" ), equalTo( exception ) ) ); }
@Test public void retrieveCustomTransactionTimeout() { when( request.getHeader( MAX_EXECUTION_TIME_HEADER ) ).thenReturn( "100" ); Log log = logProvider.getLog( HttpServletRequest.class ); long transactionTimeout = getTransactionTimeout( request, log ); assertEquals( "Transaction timeout should be retrieved.", 100, transactionTimeout ); logProvider.assertNoLoggingOccurred(); }
@Test public void defaultValueWhenCustomTransactionTimeoutNotANumber() { when( request.getHeader( MAX_EXECUTION_TIME_HEADER ) ).thenReturn( "aa" ); Log log = logProvider.getLog( HttpServletRequest.class ); long transactionTimeout = getTransactionTimeout( request, log ); assertEquals( "Transaction timeout not specified.", 0, transactionTimeout ); logProvider.assertContainsMessageContaining("Fail to parse `max-execution-time` " + "header with value: 'aa'. Should be a positive number."); }
@Test public void processNextBatchShouldThrowAssertionErrorIfStatementOpen() throws Exception { BoltConnection connection = newConnection( 1 ); connection.enqueue( Jobs.noop() ); connection.enqueue( Jobs.noop() ); // force to a message waiting loop when( stateMachine.hasOpenStatement() ).thenReturn( true ); connection.processNextBatch(); logProvider.assertExactly( AssertableLogProvider.inLog( DefaultBoltConnection.class.getName() ).error( startsWith( "Unexpected error" ), isA( AssertionError.class ) ) ); }
@Test public void processNextBatchShouldNotThrowAssertionErrorIfStatementOpenButStopping() throws Exception { BoltConnection connection = newConnection( 1 ); connection.enqueue( Jobs.noop() ); connection.enqueue( Jobs.noop() ); // force to a message waiting loop when( stateMachine.hasOpenStatement() ).thenReturn( true ); connection.stop(); connection.processNextBatch(); logProvider.assertNone( AssertableLogProvider.inLog( DefaultBoltConnection.class.getName() ).error( startsWith( "Unexpected error" ), isA( AssertionError.class ) ) ); }