protected void initializeJobScheduler() { jobScheduler = new ThreadPoolJobScheduler(); }
@Override public void close() throws Exception { pageCache.close(); scheduler.close(); fileSystem.close(); } }
@Override public void close() { shutdown(); }
@Before public void setUp() { jobScheduler = new ThreadPoolJobScheduler(); PageSwapperFactoryForTesting.createdCounter.set( 0 ); PageSwapperFactoryForTesting.configuredCounter.set( 0 ); }
@Before public void setUp() throws Exception { jobScheduler = new ThreadPoolJobScheduler(); }
@Before public void setUp() { jobScheduler = new ThreadPoolJobScheduler(); pageCache = pageCacheRule.getPageCache( fileSystemRule ); }
@Before public void setUpLabelScanStore() { jobScheduler = new ThreadPoolJobScheduler(); workingDatabaseLayout = directory.databaseLayout(); prepareDirectory = directory.directory( "prepare" ); }
public NeoStores build() throws IOException { if ( fs == null ) { fs = ruleFs(); } if ( config == null ) { config = new String[0]; } Config dbConfig = configOf( config ); if ( pageCache == null ) { jobScheduler = new ThreadPoolJobScheduler(); pageCache = rulePageCache( dbConfig, fs, jobScheduler ); } if ( format == null ) { format = RecordFormatSelector.selectForConfig( dbConfig, NullLogProvider.getInstance() ); } if ( idGeneratorFactory == null ) { idGeneratorFactory = DefaultIdGeneratorFactory::new; } return open( fs, pageCache, format, idGeneratorFactory, config ); } }
@BeforeEach public void setUp() throws IOException { fixture = createFixture(); Thread.interrupted(); // Clear stray interrupts fs = createFileSystemAbstraction(); jobScheduler = new ThreadPoolJobScheduler(); ensureExists( file( "a" ) ); }
static Fixture prepareDirectoryAndPageCache( Class<?> testClass ) throws IOException { DefaultFileSystemAbstraction fileSystem = new DefaultFileSystemAbstraction(); TestDirectory testDirectory = TestDirectory.testDirectory( testClass, fileSystem ); File dir = testDirectory.prepareDirectoryForTest( "test" ); ThreadPoolJobScheduler scheduler = new ThreadPoolJobScheduler(); PageCache pageCache = StandalonePageCacheFactory.createPageCache( fileSystem, scheduler ); return new Fixture( pageCache, fileSystem, dir, scheduler ); }
@Before public void prepareDb() throws IOException { jobScheduler = new ThreadPoolJobScheduler(); String version = formats.storeVersion(); databaseLayout = directory.databaseLayout( "db_" + version ); File prepareDirectory = directory.directory( "prepare_" + version ); prepareSampleDatabase( version, fileSystem, databaseLayout, prepareDirectory ); }
@Before public void start() throws IOException { jobScheduler = new ThreadPoolJobScheduler(); stores = BatchingNeoStores.batchingNeoStores( fileSystemRule.get(), directory.absolutePath(), format, CONFIG, NullLogService.getInstance(), AdditionalInitialIds.EMPTY, Config.defaults(), jobScheduler ); stores.createNew(); }
public void run() throws Exception { try ( FileSystemAbstraction fs = new DefaultFileSystemAbstraction(); JobScheduler jobScheduler = new ThreadPoolJobScheduler() ) { PageSwapperFactory swapperFactory = new SingleFilePageSwapperFactory(); swapperFactory.open( fs, Configuration.EMPTY ); try ( PageCache pageCacheUnderTest = new MuninnPageCache( swapperFactory, numberOfCachePages, tracer, pageCursorTracerSupplier, EmptyVersionContextSupplier.EMPTY, jobScheduler ) ) { PageCacheStresser pageCacheStresser = new PageCacheStresser( numberOfPages, numberOfThreads, workingDirectory ); pageCacheStresser.stress( pageCacheUnderTest, condition ); } } }
JobScheduler jobScheduler = new ThreadPoolJobScheduler(); MuninnPageCache cache = new MuninnPageCache( swapperFactory, cachePageCount, tracer, cursorTracerSupplier, EmptyVersionContextSupplier.EMPTY, jobScheduler );
public static void main( String[] args ) throws Exception { // Just start and immediately close. The process spawning this subprocess will kill it in the middle of all this File file = new File( args[0] ); try ( FileSystemAbstraction fs = new DefaultFileSystemAbstraction(); JobScheduler jobScheduler = new ThreadPoolJobScheduler() ) { SingleFilePageSwapperFactory swapper = new SingleFilePageSwapperFactory(); swapper.open( fs, EMPTY ); try ( PageCache pageCache = new MuninnPageCache( swapper, 10, PageCacheTracer.NULL, PageCursorTracerSupplier.NULL, EmptyVersionContextSupplier.EMPTY, jobScheduler ) ) { fs.deleteFile( file ); new GBPTreeBuilder<>( pageCache, file, longLayout().build() ).build().close(); } } } }
@Test void startTheDatabaseWithWrongVersionShouldFailWithUpgradeNotAllowed() throws Throwable { // given // create a store File databaseDir = testDirectory.databaseDir(); GraphDatabaseService db = new TestGraphDatabaseFactory().newEmbeddedDatabase( databaseDir ); try ( Transaction tx = db.beginTx() ) { db.createNode(); tx.success(); } db.shutdown(); // mess up the version in the metadatastore try ( FileSystemAbstraction fileSystem = new DefaultFileSystemAbstraction(); ThreadPoolJobScheduler scheduler = new ThreadPoolJobScheduler(); PageCache pageCache = createPageCache( fileSystem, scheduler ) ) { MetaDataStore.setRecord( pageCache, testDirectory.databaseLayout().metadataStore(), MetaDataStore.Position.STORE_VERSION, MetaDataStore.versionStringToLong( "bad" )); } RuntimeException exception = assertThrows( RuntimeException.class, () -> new TestGraphDatabaseFactory().newEmbeddedDatabase( databaseDir ) ); assertTrue( exception.getCause() instanceof LifecycleException ); assertTrue( exception.getCause().getCause() instanceof IllegalArgumentException ); assertEquals( "Unknown store version 'bad'", exception.getCause().getCause().getMessage() ); }
@Test public void shouldNotOpenStoreWithNodesOrRelationshipsInIt() throws Exception { // GIVEN someDataInTheDatabase(); // WHEN try ( JobScheduler jobScheduler = new ThreadPoolJobScheduler() ) { RecordFormats recordFormats = RecordFormatSelector.selectForConfig( Config.defaults(), NullLogProvider.getInstance() ); try ( BatchingNeoStores store = BatchingNeoStores.batchingNeoStores( storage.fileSystem(), storage.directory().databaseDir(), recordFormats, DEFAULT, NullLogService.getInstance(), EMPTY, Config.defaults(), jobScheduler ) ) { store.createNew(); fail( "Should fail on existing data" ); } } catch ( IllegalStateException e ) { // THEN assertThat( e.getMessage(), containsString( "already contains" ) ); } }
private void createRandomData( int count ) throws Exception { Config config = Config.defaults(); RecordFormats recordFormats = RecordFormatSelector.selectForConfig( config, NullLogProvider.getInstance() ); try ( RandomDataInput input = new RandomDataInput( count ); JobScheduler jobScheduler = new ThreadPoolJobScheduler() ) { BatchImporter importer = new ParallelBatchImporter( directory.databaseLayout(), fileSystemRule.get(), null, DEFAULT, NullLogService.getInstance(), ExecutionMonitors.invisible(), EMPTY, config, recordFormats, NO_MONITOR, jobScheduler ); importer.doImport( input ); } }
@Test void startTheDatabaseWithWrongVersionShouldFailAlsoWhenUpgradeIsAllowed() throws Throwable { // given // create a store File databaseDirectory = testDirectory.databaseDir(); GraphDatabaseService db = new TestGraphDatabaseFactory().newEmbeddedDatabase( databaseDirectory ); try ( Transaction tx = db.beginTx() ) { db.createNode(); tx.success(); } db.shutdown(); // mess up the version in the metadatastore String badStoreVersion = "bad"; try ( FileSystemAbstraction fileSystem = new DefaultFileSystemAbstraction(); ThreadPoolJobScheduler scheduler = new ThreadPoolJobScheduler(); PageCache pageCache = createPageCache( fileSystem, scheduler ) ) { MetaDataStore.setRecord( pageCache, testDirectory.databaseLayout().metadataStore(), MetaDataStore.Position.STORE_VERSION, MetaDataStore.versionStringToLong( badStoreVersion ) ); } RuntimeException exception = assertThrows( RuntimeException.class, () -> new TestGraphDatabaseFactory().newEmbeddedDatabaseBuilder( databaseDirectory ).setConfig( GraphDatabaseSettings.allow_upgrade, "true" ).newGraphDatabase() ); assertTrue( exception.getCause() instanceof LifecycleException ); assertTrue( exception.getCause().getCause() instanceof StoreUpgrader.UnexpectedUpgradingStoreVersionException ); }
long nodeRandomSeed = random.nextLong(); long relationshipRandomSeed = random.nextLong(); JobScheduler jobScheduler = new ThreadPoolJobScheduler(); final BatchImporter inserter = new ParallelBatchImporter( databaseLayout, fileSystemRule.get(), null, config, NullLogService.getInstance(),