private void verifySectionReportedCorrectly( AssertableLogProvider logProvider ) { logProvider.assertContainsMessageContaining( VisibleMigrationProgressMonitor.MESSAGE_STARTED ); for ( int i = 10; i <= 100; i += 10 ) { logProvider.assertContainsMessageContaining( String.valueOf( i ) + "%" ); } logProvider.assertNone( AssertableLogProvider.inLog( VisibleMigrationProgressMonitor.class ).info( containsString( "110%" ) ) ); logProvider.assertContainsMessageContaining( VisibleMigrationProgressMonitor.MESSAGE_COMPLETED ); }
@Test public void processNextBatchShouldCloseConnectionOnFatalAuthenticationError() { BoltConnection connection = newConnection(); connection.enqueue( machine -> { throw new BoltConnectionAuthFatality( "auth failure", new RuntimeException( "inner error" ) ); } ); connection.processNextBatch(); verify( stateMachine ).close(); logProvider.assertNone( AssertableLogProvider.inLog( containsString( BoltServer.class.getPackage().getName() ) ).warn( any( String.class ) ) ); }
@Test public void mustLimitByConfiguredCount() { int limit = 10; logger.setCountLimit( limit ); String[] lines = logLines( limit + 1 ); assertLoggedLines( lines, limit ); logProvider.assertNone( currentLog( inLog( CappedLogger.class ), containsString( lines[limit] ) ) ); }
@Test public void mustLogAfterResetWithCountLimit() { int limit = 10; logger.setCountLimit( limit ); String[] lines = logLines( limit + 1 ); logger.reset(); String[] moreLines = logLines( 1, limit + 1 ); assertLoggedLines( ArrayUtils.addAll( ArrayUtils.subarray( lines, 0, limit ), moreLines ), 1 + limit ); logProvider.assertNone( currentLog( inLog( CappedLogger.class ), containsString( lines[limit] ) ) ); logProvider.assertContainsMessageMatching( containsString( moreLines[0] ) ); }
@Test public void shouldStoreIndexFailureWhenFailingToCreateOnlineAccessorAfterRecoveringPopulatingIndex() throws Exception { // given long indexId = 1; StoreIndexDescriptor indexRule = index.withId( indexId ); IndexingService indexing = newIndexingServiceWithMockedDependencies( populator, accessor, withData(), indexRule ); IOException exception = new IOException( "Expected failure" ); when( nameLookup.labelGetName( labelId ) ).thenReturn( "TheLabel" ); when( nameLookup.propertyKeyGetName( propertyKeyId ) ).thenReturn( "propertyKey" ); when( indexProvider.getInitialState( indexRule ) ).thenReturn( POPULATING ); when( indexProvider.getOnlineAccessor( any( StoreIndexDescriptor.class ), any( IndexSamplingConfig.class ) ) ) .thenThrow( exception ); life.start(); ArgumentCaptor<Boolean> closeArgs = ArgumentCaptor.forClass( Boolean.class ); // when waitForIndexesToGetIntoState( indexing, InternalIndexState.FAILED, indexId ); verify( populator, timeout( 10000 ).times( 2 ) ).close( closeArgs.capture() ); // then assertEquals( FAILED, indexing.getIndexProxy( 1 ).getState() ); assertEquals( asList( true, false ), closeArgs.getAllValues() ); assertThat( storedFailure(), containsString( format( "java.io.IOException: Expected failure%n\tat " ) ) ); internalLogProvider.assertAtLeastOnce( inLog( IndexPopulationJob.class ).error( equalTo( "Failed to populate index: [:TheLabel(propertyKey) [provider: {key=quantum-dex, version=25.0}]]" ), causedBy( exception ) ) ); internalLogProvider.assertNone( inLog( IndexPopulationJob.class ).info( "Index population completed. Index is now online: [%s]", ":TheLabel(propertyKey) [provider: {key=quantum-dex, version=25.0}]" ) ); }
@Test public void mustNotLogMessagesWithinConfiguredTimeLimit() { FakeClock clock = getDefaultFakeClock(); logger.setTimeLimit( 1, TimeUnit.MILLISECONDS, clock ); logMethod.log( logger, "### AAA ###" ); logMethod.log( logger, "### BBB ###" ); clock.forward( 1, TimeUnit.MILLISECONDS ); logMethod.log( logger, "### CCC ###" ); logProvider.assertContainsMessageMatching( containsString( "### AAA ###" ) ); logProvider.assertNone( currentLog( inLog( CappedLogger.class ), containsString( "### BBB ###" ) ) ); logProvider.assertContainsMessageMatching( containsString( "### CCC ###" ) ); }
@Test public void mustLogAfterResetWithTimeLimit() { FakeClock clock = getDefaultFakeClock(); logger.setTimeLimit( 1, TimeUnit.MILLISECONDS, clock ); logMethod.log( logger, "### AAA ###" ); logMethod.log( logger, "### BBB ###" ); logger.reset(); logMethod.log( logger, "### CCC ###" ); logProvider.assertContainsMessageMatching( containsString( "### AAA ###" ) ); logProvider.assertNone( currentLog( inLog( CappedLogger.class ), containsString( "### BBB ###" ) ) ); logProvider.assertContainsMessageMatching( containsString( "### CCC ###" ) ); }
@Test public void shouldStoreIndexFailureWhenFailingToCreateOnlineAccessorAfterPopulating() throws Exception { // given long indexId = 1; IndexingService indexing = newIndexingServiceWithMockedDependencies( populator, accessor, withData() ); IOException exception = new IOException( "Expected failure" ); when( nameLookup.labelGetName( labelId ) ).thenReturn( "TheLabel" ); when( nameLookup.propertyKeyGetName( propertyKeyId ) ).thenReturn( "propertyKey" ); when( indexProvider.getOnlineAccessor( any( StoreIndexDescriptor.class ), any( IndexSamplingConfig.class ) ) ) .thenThrow( exception ); life.start(); ArgumentCaptor<Boolean> closeArgs = ArgumentCaptor.forClass( Boolean.class ); // when indexing.createIndexes( index.withId( indexId ) ); waitForIndexesToGetIntoState( indexing, InternalIndexState.FAILED, indexId ); verify( populator, timeout( 10000 ).times( 2 ) ).close( closeArgs.capture() ); // then assertEquals( FAILED, indexing.getIndexProxy( 1 ).getState() ); assertEquals( asList( true, false ), closeArgs.getAllValues() ); assertThat( storedFailure(), containsString( format( "java.io.IOException: Expected failure%n\tat " ) ) ); internalLogProvider.assertAtLeastOnce( inLog( IndexPopulationJob.class ).error( equalTo( "Failed to populate index: [:TheLabel(propertyKey) [provider: {key=quantum-dex, version=25.0}]]" ), causedBy( exception ) ) ); internalLogProvider.assertNone( inLog( IndexPopulationJob.class ).info( "Index population completed. Index is now online: [%s]", ":TheLabel(propertyKey) [provider: {key=quantum-dex, version=25.0}]" ) ); }
@Test public void mustOnlyLogMessagesThatPassBothLimits() { FakeClock clock = getDefaultFakeClock(); logger.setCountLimit( 2 ); logger.setTimeLimit( 1, TimeUnit.MILLISECONDS, clock ); logMethod.log( logger, "### AAA ###" ); logMethod.log( logger, "### BBB ###" ); // Filtered by the time limit clock.forward( 1, TimeUnit.MILLISECONDS ); logMethod.log( logger, "### CCC ###" ); // Filtered by the count limit logger.reset(); logMethod.log( logger, "### DDD ###" ); logProvider.assertContainsMessageMatching( containsString( "### AAA ###" ) ); logProvider.assertNone( currentLog( inLog( CappedLogger.class ), containsString( "### BBB ###" ) ) ); logProvider.assertNone( currentLog( inLog( CappedLogger.class ), containsString( "### CCC ###" ) ) ); logProvider.assertContainsMessageMatching( containsString( "### DDD ###" ) ); }
@Test public void unsettingTimeLimitMustLetMessagesThrough() { FakeClock clock = getDefaultFakeClock(); logger.setTimeLimit( 1, TimeUnit.MILLISECONDS, clock ); logMethod.log( logger, "### AAA ###" ); logMethod.log( logger, "### BBB ###" ); clock.forward( 1, TimeUnit.MILLISECONDS ); logMethod.log( logger, "### CCC ###" ); logMethod.log( logger, "### DDD ###" ); logger.unsetTimeLimit(); // Note that we are not advancing the clock! logMethod.log( logger, "### EEE ###" ); logProvider.assertContainsMessageMatching( containsString( "### AAA ###" ) ); logProvider.assertNone( currentLog( inLog( CappedLogger.class ), containsString( "### BBB ###" ) ) ); logProvider.assertContainsMessageMatching( containsString( "### CCC ###" ) ); logProvider.assertNone( currentLog( inLog( CappedLogger.class ), containsString( "### DDD ###" ) ) ); logProvider.assertContainsMessageMatching( containsString( "### EEE ###" ) ); }
logMatch.info( "IndexingService.init: indexes not specifically mentioned above are ONLINE" ) ); internalLogProvider.assertNone( logMatch.info( "IndexingService.init: index 3 on :Label3(prop) is ONLINE" ) );
@Test public void unsettingCountLimitMustLetMessagesThrough() { int limit = 10; logger.setCountLimit( limit ); String[] lines = logLines( limit + 1 ); logger.unsetCountLimit(); int moreLineCount = 1000; String[] moreLines = logLines( moreLineCount, limit + 1 ); assertLoggedLines( ArrayUtils.addAll( ArrayUtils.subarray( lines, 0, limit ), moreLines ), moreLineCount + limit ); logProvider.assertNone( currentLog( inLog( CappedLogger.class ), containsString( lines[limit] ) ) ); assertLoggedLines( moreLines, moreLineCount, limit ); }
@Test public void processNextBatchShouldNotThrowAssertionErrorIfStatementOpenButStopping() throws Exception { BoltConnection connection = newConnection( 1 ); connection.enqueue( Jobs.noop() ); connection.enqueue( Jobs.noop() ); // force to a message waiting loop when( stateMachine.hasOpenStatement() ).thenReturn( true ); connection.stop(); connection.processNextBatch(); logProvider.assertNone( AssertableLogProvider.inLog( DefaultBoltConnection.class.getName() ).error( startsWith( "Unexpected error" ), isA( AssertionError.class ) ) ); }
logMatch.info( "IndexingService.start: indexes not specifically mentioned above are ONLINE" ) ); internalLogProvider.assertNone( logMatch.info( "IndexingService.start: index 3 on :Label3(prop) is ONLINE" ) );
@Test( timeout = TEST_TIMEOUT ) public void doNotMonitorTransactionLogFiles() throws InterruptedException, IOException { assumeFalse( SystemUtils.IS_OS_WINDOWS ); FileWatcher fileWatcher = getFileWatcher( database ); CheckPointer checkpointer = getCheckpointer( database ); String metadataStore = testDirectory.databaseLayout().metadataStore().getName(); ModificationEventListener modificationEventListener = new ModificationEventListener( metadataStore ); fileWatcher.addFileWatchEventListener( modificationEventListener ); do { createNode( database ); forceCheckpoint( checkpointer ); } while ( !modificationEventListener.awaitModificationNotification() ); String fileName = TransactionLogFiles.DEFAULT_NAME + ".0"; DeletionLatchEventListener deletionListener = new DeletionLatchEventListener( fileName ); fileWatcher.addFileWatchEventListener( deletionListener ); deleteFile( testDirectory.storeDir(), fileName ); deletionListener.awaitDeletionNotification(); AssertableLogProvider.LogMatcher logMatcher = AssertableLogProvider.inLog( DefaultFileDeletionEventListener.class ) .info( containsString( fileName ) ); logProvider.assertNone( logMatcher ); }
@Test public void shutdownDatabaseDuringIndexPopulations() { AssertableLogProvider assertableLogProvider = new AssertableLogProvider( true ); File storeDir = directory.directory( "shutdownDbTest" ); Label testLabel = Label.label( "testLabel" ); String propertyName = "testProperty"; GraphDatabaseService shutDownDb = new TestGraphDatabaseFactory().setInternalLogProvider( assertableLogProvider ) .newEmbeddedDatabase( storeDir ); prePopulateDatabase( shutDownDb, testLabel, propertyName ); try ( Transaction transaction = shutDownDb.beginTx() ) { shutDownDb.schema().indexFor( testLabel ).on( propertyName ).create(); transaction.success(); } shutDownDb.shutdown(); assertableLogProvider.assertNone( AssertableLogProvider.inLog( IndexPopulationJob.class ).anyError() ); }