private void assertContainsWarningMessage() { logProvider.assertContainsMessageContaining( "WARNING! Deprecated configuration options used. See manual for details" ); }
private void assertContainsWarningMessage( String deprecationMessage ) { assertContainsWarningMessage(); if ( StringUtils.isNotEmpty( deprecationMessage ) ) { logProvider.assertContainsMessageContaining( deprecationMessage ); } } }
private void verifySectionReportedCorrectly( AssertableLogProvider logProvider ) { logProvider.assertContainsMessageContaining( VisibleMigrationProgressMonitor.MESSAGE_STARTED ); for ( int i = 10; i <= 100; i += 10 ) { logProvider.assertContainsMessageContaining( String.valueOf( i ) + "%" ); } logProvider.assertNone( AssertableLogProvider.inLog( VisibleMigrationProgressMonitor.class ).info( containsString( "110%" ) ) ); logProvider.assertContainsMessageContaining( VisibleMigrationProgressMonitor.MESSAGE_COMPLETED ); }
@Test public void shouldLogShutdown() { server.stop(); logProvider.assertContainsMessageContaining( "Stopped." ); } }
@Test public void defaultValueWhenCustomTransactionTimeoutNotANumber() { when( request.getHeader( MAX_EXECUTION_TIME_HEADER ) ).thenReturn( "aa" ); Log log = logProvider.getLog( HttpServletRequest.class ); long transactionTimeout = getTransactionTimeout( request, log ); assertEquals( "Transaction timeout not specified.", 0, transactionTimeout ); logProvider.assertContainsMessageContaining("Fail to parse `max-execution-time` " + "header with value: 'aa'. Should be a positive number."); }
@Test public void notificationInLogAboutFileDeletion() { AssertableLogProvider internalLogProvider = new AssertableLogProvider( false ); DefaultFileDeletionEventListener listener = buildListener( internalLogProvider ); listener.fileDeleted( "testFile.db" ); listener.fileDeleted( "anotherDirectory" ); internalLogProvider.assertContainsMessageContaining( "'testFile.db' which belongs to the store was deleted while database was running." ); internalLogProvider.assertContainsMessageContaining( "'anotherDirectory' which belongs to the store was deleted while database was running." ); }
@Test public void shouldPrintDiskUsage() { // Not sure how to get around this w/o spying. The method that we're unit testing will construct // other File instances with this guy as parent and internally the File constructor uses the field 'path' // which, if purely mocked, won't be assigned. At the same time we want to control the total/free space methods // and what they return... a tough one. File storeDir = Mockito.spy( new File( "storeDir" ) ); DatabaseLayout layout = mock( DatabaseLayout.class ); when( layout.databaseDirectory() ).thenReturn( storeDir ); when( storeDir.getTotalSpace() ).thenReturn( 100L ); when( storeDir.getFreeSpace() ).thenReturn( 40L ); AssertableLogProvider logProvider = new AssertableLogProvider(); KernelDiagnostics.StoreFiles storeFiles = new KernelDiagnostics.StoreFiles( layout ); storeFiles.dump( logProvider.getLog( getClass() ).debugLogger() ); logProvider.assertContainsMessageContaining( "100 / 40 / 40" ); }
@Test public void startDefaultTransactionWhenHeaderHasIncorrectValue() { when( request.getHeader( HttpHeaderUtils.MAX_EXECUTION_TIME_HEADER ) ) .thenReturn( "not a number" ); CypherExecutor cypherExecutor = new CypherExecutor( database, logProvider ); cypherExecutor.start(); cypherExecutor.createTransactionContext( QUERY, VirtualValues.emptyMap(), request ); verify( databaseQueryService ).beginTransaction( KernelTransaction.Type.implicit, AUTH_DISABLED ); logProvider.assertContainsMessageContaining( "Fail to parse `max-execution-time` header with value: 'not a " + "number'. Should be a positive number." ); }
@Test public void recoverFirstCorruptedTransactionSingleFileNoCheckpoint() throws IOException { addCorruptedCommandsToLastLogFile(); GraphDatabaseService recoveredDatabase = startDbNoRecoveryOfCorruptedLogs(); recoveredDatabase.shutdown(); logProvider.assertContainsMessageContaining( "Fail to read transaction log version 0." ); logProvider.assertContainsMessageContaining( "Fail to read first transaction of log version 0." ); logProvider.assertContainsMessageContaining( "Recovery required from position LogPosition{logVersion=0, byteOffset=16}" ); logProvider.assertContainsMessageContaining( "Fail to recover all transactions. Any later transactions after" + " position LogPosition{logVersion=0, byteOffset=16} are unreadable and will be truncated." ); assertEquals( 0, logFiles.getHighestLogVersion() ); ObjectLongMap<Class> logEntriesDistribution = getLogEntriesDistribution( logFiles ); assertEquals( 1, logEntriesDistribution.size() ); assertEquals( 1, logEntriesDistribution.get( CheckPoint.class ) ); }
@Test public void shouldLogCorrectTransactionLogDiagnosticsForTransactionsInSecondOldestLog() throws Exception { // GIVEN long logVersion = 2; long prevLogLastTxId = 45; NeoStoreDataSource dataSource = neoStoreDataSourceWithLogFilesContainingLowestTxId( logWithTransactionsInNextToOldestLog( logVersion, prevLogLastTxId ) ); AssertableLogProvider logProvider = new AssertableLogProvider(); Logger logger = logProvider.getLog( getClass() ).infoLogger(); // WHEN DataSourceDiagnostics.TRANSACTION_RANGE.dump( dataSource, logger ); // THEN logProvider.assertContainsMessageContaining( "transaction " + (prevLogLastTxId + 1) ); logProvider.assertContainsMessageContaining( "version " + (logVersion + 1) ); }
@Test public void shouldLogCorrectTransactionLogDiagnosticsForTransactionsInOldestLog() throws Exception { // GIVEN long logVersion = 2; long prevLogLastTxId = 45; NeoStoreDataSource dataSource = neoStoreDataSourceWithLogFilesContainingLowestTxId( logWithTransactions( logVersion, prevLogLastTxId ) ); AssertableLogProvider logProvider = new AssertableLogProvider(); Logger logger = logProvider.getLog( getClass() ).infoLogger(); // WHEN DataSourceDiagnostics.TRANSACTION_RANGE.dump( dataSource, logger ); // THEN logProvider.assertContainsMessageContaining( "transaction " + (prevLogLastTxId + 1) ); logProvider.assertContainsMessageContaining( "version " + logVersion ); }
@Test public void shouldLogCorrectTransactionLogDiagnosticsForNoTransactionLogs() { // GIVEN NeoStoreDataSource dataSource = neoStoreDataSourceWithLogFilesContainingLowestTxId( noLogs() ); AssertableLogProvider logProvider = new AssertableLogProvider(); Logger logger = logProvider.getLog( getClass() ).infoLogger(); // WHEN DataSourceDiagnostics.TRANSACTION_RANGE.dump( dataSource, logger ); // THEN logProvider.assertContainsMessageContaining( "No transactions" ); }
@Test public void shouldCountFileSizeRecursively() throws IOException { // file structure: // storeDir/indexDir/indexFile (1 kB) // storeDir/neostore (3 kB) File storeDir = directory.directory( "storeDir" ); DatabaseLayout layout = DatabaseLayout.of( storeDir ); File indexDir = directory( storeDir, "indexDir" ); file( indexDir, "indexFile", (int) kibiBytes( 1 ) ); file( storeDir, layout.metadataStore().getName(), (int) kibiBytes( 3 ) ); AssertableLogProvider logProvider = new AssertableLogProvider(); KernelDiagnostics.StoreFiles storeFiles = new KernelDiagnostics.StoreFiles( layout ); storeFiles.dump( logProvider.getLog( getClass() ).debugLogger() ); logProvider.assertContainsMessageContaining( "Total size of store: 4.00 kB" ); logProvider.assertContainsMessageContaining( "Total size of mapped files: 3.00 kB" ); }
@Test public void mustUseAndLogConfiguredPageSwapper() { // Given Config config = Config.defaults( stringMap( pagecache_memory.name(), "8m", pagecache_swapper.name(), TEST_PAGESWAPPER_NAME ) ); AssertableLogProvider logProvider = new AssertableLogProvider(); Log log = logProvider.getLog( PageCache.class ); // When ConfiguringPageCacheFactory cacheFactory = new ConfiguringPageCacheFactory( fsRule.get(), config, PageCacheTracer.NULL, PageCursorTracerSupplier.NULL, log, EmptyVersionContextSupplier.EMPTY, jobScheduler ); cacheFactory.getOrCreatePageCache().close(); // Then assertThat( PageSwapperFactoryForTesting.countCreatedPageSwapperFactories(), is( 1 ) ); assertThat( PageSwapperFactoryForTesting.countConfiguredPageSwapperFactories(), is( 1 ) ); logProvider.assertContainsMessageContaining( TEST_PAGESWAPPER_NAME ); }
@Test public void truncateNewerTransactionLogFileWhenForced() throws IOException { GraphDatabaseAPI database = (GraphDatabaseAPI) databaseFactory.newEmbeddedDatabase( storeDir ); for ( int i = 0; i < 10; i++ ) { generateTransaction( database ); } TransactionIdStore transactionIdStore = getTransactionIdStore( database ); long numberOfClosedTransactions = transactionIdStore.getLastClosedTransactionId() - 1; database.shutdown(); removeLastCheckpointRecordFromLastLogFile(); addRandomBytesToLastLogFile( this::randomBytes ); database = startDbNoRecoveryOfCorruptedLogs(); database.shutdown(); logProvider.assertContainsMessageContaining( "Fail to read transaction log version 0." ); logProvider.assertContainsMessageContaining( "Fail to read transaction log version 0. Last valid transaction start offset is: 5668." ); assertEquals( numberOfClosedTransactions, recoveryMonitor.getNumberOfRecoveredTransactions() ); }
@Test public void reportProgressOnRecovery() throws IOException { GraphDatabaseService database = startDatabase( directory.databaseDir() ); for ( int i = 0; i < 10; i++ ) { try ( Transaction transaction = database.beginTx() ) { database.createNode(); transaction.success(); } } File restoreDbStoreDir = copyTransactionLogs(); GraphDatabaseService recoveredDatabase = startDatabase( restoreDbStoreDir ); try ( Transaction transaction = recoveredDatabase.beginTx() ) { assertEquals( 10, count( recoveredDatabase.getAllNodes() ) ); } logProvider.assertContainsMessageContaining( "10% completed" ); logProvider.assertContainsMessageContaining( "100% completed" ); database.shutdown(); recoveredDatabase.shutdown(); }
@Test( timeout = TEST_TIMEOUT ) public void notifyWhenFileWatchingFailToStart() { AssertableLogProvider logProvider = new AssertableLogProvider( true ); GraphDatabaseService db = null; try { db = new TestGraphDatabaseFactory().setInternalLogProvider( logProvider ) .setFileSystem( new NonWatchableFileSystemAbstraction() ) .newEmbeddedDatabase( testDirectory.storeDir( "failed-start-db" ) ); logProvider.assertContainsMessageContaining( "Can not create file watcher for current file system. " + "File monitoring capabilities for store files will be disabled." ); } finally { shutdownDatabaseSilently( db ); } }
@Test( timeout = TEST_TIMEOUT ) public void notifyAboutStoreFileDeletion() throws Exception { assumeFalse( SystemUtils.IS_OS_WINDOWS ); String fileName = testDirectory.databaseLayout().metadataStore().getName(); FileWatcher fileWatcher = getFileWatcher( database ); CheckPointer checkpointer = getCheckpointer( database ); DeletionLatchEventListener deletionListener = new DeletionLatchEventListener( fileName ); fileWatcher.addFileWatchEventListener( deletionListener ); do { createNode( database ); forceCheckpoint( checkpointer ); } while ( !deletionListener.awaitModificationNotification() ); deleteFile( testDirectory.storeDir(), fileName ); deletionListener.awaitDeletionNotification(); logProvider.assertContainsMessageContaining( "'" + fileName + "' which belongs to the store was deleted while database was running." ); }
@Test( timeout = TEST_TIMEOUT ) public void shouldLogWhenDisabled() { AssertableLogProvider logProvider = new AssertableLogProvider( true ); GraphDatabaseService db = null; try { db = new TestGraphDatabaseFactory().setInternalLogProvider( logProvider ) .setFileSystem( new NonWatchableFileSystemAbstraction() ) .newEmbeddedDatabaseBuilder( testDirectory.directory( "failed-start-db" ) ) .setConfig( GraphDatabaseSettings.filewatcher_enabled, Settings.FALSE ) .newGraphDatabase(); logProvider.assertContainsMessageContaining( "File watcher disabled by configuration." ); } finally { shutdownDatabaseSilently( db ); } }
@Test( timeout = TEST_TIMEOUT ) public void notifyAboutExplicitIndexFolderRemoval() throws InterruptedException, IOException { String monitoredDirectory = getExplicitIndexDirectory( testDirectory.databaseLayout() ); FileWatcher fileWatcher = getFileWatcher( database ); CheckPointer checkPointer = getCheckpointer( database ); DeletionLatchEventListener deletionListener = new DeletionLatchEventListener( monitoredDirectory ); String metadataStore = testDirectory.databaseLayout().metadataStore().getName(); ModificationEventListener modificationEventListener = new ModificationEventListener( metadataStore ); fileWatcher.addFileWatchEventListener( deletionListener ); fileWatcher.addFileWatchEventListener( modificationEventListener ); do { createNode( database ); forceCheckpoint( checkPointer ); } while ( !modificationEventListener.awaitModificationNotification() ); deleteStoreDirectory( storeDir, monitoredDirectory ); deletionListener.awaitDeletionNotification(); logProvider.assertContainsMessageContaining( "'" + monitoredDirectory + "' which belongs to the store was deleted while database was running." ); }