protected void initializeJobScheduler() { jobScheduler = new ThreadPoolJobScheduler(); }
@Before public void setUp() { jobScheduler = new ThreadPoolJobScheduler(); PageSwapperFactoryForTesting.createdCounter.set( 0 ); PageSwapperFactoryForTesting.configuredCounter.set( 0 ); }
@Before public void setUp() throws Exception { jobScheduler = new ThreadPoolJobScheduler(); }
@Before public void setUp() { jobScheduler = new ThreadPoolJobScheduler(); pageCache = pageCacheRule.getPageCache( fileSystemRule ); }
@Before public void setUpLabelScanStore() { jobScheduler = new ThreadPoolJobScheduler(); workingDatabaseLayout = directory.databaseLayout(); prepareDirectory = directory.directory( "prepare" ); }
public NeoStores build() throws IOException { if ( fs == null ) { fs = ruleFs(); } if ( config == null ) { config = new String[0]; } Config dbConfig = configOf( config ); if ( pageCache == null ) { jobScheduler = new ThreadPoolJobScheduler(); pageCache = rulePageCache( dbConfig, fs, jobScheduler ); } if ( format == null ) { format = RecordFormatSelector.selectForConfig( dbConfig, NullLogProvider.getInstance() ); } if ( idGeneratorFactory == null ) { idGeneratorFactory = DefaultIdGeneratorFactory::new; } return open( fs, pageCache, format, idGeneratorFactory, config ); } }
@BeforeEach public void setUp() throws IOException { fixture = createFixture(); Thread.interrupted(); // Clear stray interrupts fs = createFileSystemAbstraction(); jobScheduler = new ThreadPoolJobScheduler(); ensureExists( file( "a" ) ); }
static Fixture prepareDirectoryAndPageCache( Class<?> testClass ) throws IOException { DefaultFileSystemAbstraction fileSystem = new DefaultFileSystemAbstraction(); TestDirectory testDirectory = TestDirectory.testDirectory( testClass, fileSystem ); File dir = testDirectory.prepareDirectoryForTest( "test" ); ThreadPoolJobScheduler scheduler = new ThreadPoolJobScheduler(); PageCache pageCache = StandalonePageCacheFactory.createPageCache( fileSystem, scheduler ); return new Fixture( pageCache, fileSystem, dir, scheduler ); }
@Before public void prepareDb() throws IOException { jobScheduler = new ThreadPoolJobScheduler(); String version = formats.storeVersion(); databaseLayout = directory.databaseLayout( "db_" + version ); File prepareDirectory = directory.directory( "prepare_" + version ); prepareSampleDatabase( version, fileSystem, databaseLayout, prepareDirectory ); }
@Before public void start() throws IOException { jobScheduler = new ThreadPoolJobScheduler(); stores = BatchingNeoStores.batchingNeoStores( fileSystemRule.get(), directory.absolutePath(), format, CONFIG, NullLogService.getInstance(), AdditionalInitialIds.EMPTY, Config.defaults(), jobScheduler ); stores.createNew(); }
public void run() throws Exception { try ( FileSystemAbstraction fs = new DefaultFileSystemAbstraction(); JobScheduler jobScheduler = new ThreadPoolJobScheduler() ) { PageSwapperFactory swapperFactory = new SingleFilePageSwapperFactory(); swapperFactory.open( fs, Configuration.EMPTY ); try ( PageCache pageCacheUnderTest = new MuninnPageCache( swapperFactory, numberOfCachePages, tracer, pageCursorTracerSupplier, EmptyVersionContextSupplier.EMPTY, jobScheduler ) ) { PageCacheStresser pageCacheStresser = new PageCacheStresser( numberOfPages, numberOfThreads, workingDirectory ); pageCacheStresser.stress( pageCacheUnderTest, condition ); } } }
public static void main( String[] args ) throws Exception { // Just start and immediately close. The process spawning this subprocess will kill it in the middle of all this File file = new File( args[0] ); try ( FileSystemAbstraction fs = new DefaultFileSystemAbstraction(); JobScheduler jobScheduler = new ThreadPoolJobScheduler() ) { SingleFilePageSwapperFactory swapper = new SingleFilePageSwapperFactory(); swapper.open( fs, EMPTY ); try ( PageCache pageCache = new MuninnPageCache( swapper, 10, PageCacheTracer.NULL, PageCursorTracerSupplier.NULL, EmptyVersionContextSupplier.EMPTY, jobScheduler ) ) { fs.deleteFile( file ); new GBPTreeBuilder<>( pageCache, file, longLayout().build() ).build().close(); } } } }
@Test void startTheDatabaseWithWrongVersionShouldFailWithUpgradeNotAllowed() throws Throwable { // given // create a store File databaseDir = testDirectory.databaseDir(); GraphDatabaseService db = new TestGraphDatabaseFactory().newEmbeddedDatabase( databaseDir ); try ( Transaction tx = db.beginTx() ) { db.createNode(); tx.success(); } db.shutdown(); // mess up the version in the metadatastore try ( FileSystemAbstraction fileSystem = new DefaultFileSystemAbstraction(); ThreadPoolJobScheduler scheduler = new ThreadPoolJobScheduler(); PageCache pageCache = createPageCache( fileSystem, scheduler ) ) { MetaDataStore.setRecord( pageCache, testDirectory.databaseLayout().metadataStore(), MetaDataStore.Position.STORE_VERSION, MetaDataStore.versionStringToLong( "bad" )); } RuntimeException exception = assertThrows( RuntimeException.class, () -> new TestGraphDatabaseFactory().newEmbeddedDatabase( databaseDir ) ); assertTrue( exception.getCause() instanceof LifecycleException ); assertTrue( exception.getCause().getCause() instanceof IllegalArgumentException ); assertEquals( "Unknown store version 'bad'", exception.getCause().getCause().getMessage() ); }
@Test public void shouldNotOpenStoreWithNodesOrRelationshipsInIt() throws Exception { // GIVEN someDataInTheDatabase(); // WHEN try ( JobScheduler jobScheduler = new ThreadPoolJobScheduler() ) { RecordFormats recordFormats = RecordFormatSelector.selectForConfig( Config.defaults(), NullLogProvider.getInstance() ); try ( BatchingNeoStores store = BatchingNeoStores.batchingNeoStores( storage.fileSystem(), storage.directory().databaseDir(), recordFormats, DEFAULT, NullLogService.getInstance(), EMPTY, Config.defaults(), jobScheduler ) ) { store.createNew(); fail( "Should fail on existing data" ); } } catch ( IllegalStateException e ) { // THEN assertThat( e.getMessage(), containsString( "already contains" ) ); } }
private void createRandomData( int count ) throws Exception { Config config = Config.defaults(); RecordFormats recordFormats = RecordFormatSelector.selectForConfig( config, NullLogProvider.getInstance() ); try ( RandomDataInput input = new RandomDataInput( count ); JobScheduler jobScheduler = new ThreadPoolJobScheduler() ) { BatchImporter importer = new ParallelBatchImporter( directory.databaseLayout(), fileSystemRule.get(), null, DEFAULT, NullLogService.getInstance(), ExecutionMonitors.invisible(), EMPTY, config, recordFormats, NO_MONITOR, jobScheduler ); importer.doImport( input ); } }
@Test void startTheDatabaseWithWrongVersionShouldFailAlsoWhenUpgradeIsAllowed() throws Throwable { // given // create a store File databaseDirectory = testDirectory.databaseDir(); GraphDatabaseService db = new TestGraphDatabaseFactory().newEmbeddedDatabase( databaseDirectory ); try ( Transaction tx = db.beginTx() ) { db.createNode(); tx.success(); } db.shutdown(); // mess up the version in the metadatastore String badStoreVersion = "bad"; try ( FileSystemAbstraction fileSystem = new DefaultFileSystemAbstraction(); ThreadPoolJobScheduler scheduler = new ThreadPoolJobScheduler(); PageCache pageCache = createPageCache( fileSystem, scheduler ) ) { MetaDataStore.setRecord( pageCache, testDirectory.databaseLayout().metadataStore(), MetaDataStore.Position.STORE_VERSION, MetaDataStore.versionStringToLong( badStoreVersion ) ); } RuntimeException exception = assertThrows( RuntimeException.class, () -> new TestGraphDatabaseFactory().newEmbeddedDatabaseBuilder( databaseDirectory ).setConfig( GraphDatabaseSettings.allow_upgrade, "true" ).newGraphDatabase() ); assertTrue( exception.getCause() instanceof LifecycleException ); assertTrue( exception.getCause().getCause() instanceof StoreUpgrader.UnexpectedUpgradingStoreVersionException ); }
@Test public void shouldRespectDbConfig() throws Exception { // GIVEN int size = 10; Config config = Config.defaults( stringMap( GraphDatabaseSettings.array_block_size.name(), String.valueOf( size ), GraphDatabaseSettings.string_block_size.name(), String.valueOf( size ) ) ); // WHEN RecordFormats recordFormats = LATEST_RECORD_FORMATS; int headerSize = recordFormats.dynamic().getRecordHeaderSize(); try ( JobScheduler jobScheduler = new ThreadPoolJobScheduler(); BatchingNeoStores store = BatchingNeoStores.batchingNeoStores( storage.fileSystem(), storage.directory().absolutePath(), recordFormats, DEFAULT, NullLogService.getInstance(), EMPTY, config, jobScheduler ) ) { store.createNew(); // THEN assertEquals( size + headerSize, store.getPropertyStore().getArrayStore().getRecordSize() ); assertEquals( size + headerSize, store.getPropertyStore().getStringStore().getRecordSize() ); } }
@Test public void shouldReportProgressOfNodeImport() throws Exception { // given CapturingMonitor progress = new CapturingMonitor(); HumanUnderstandableExecutionMonitor monitor = new HumanUnderstandableExecutionMonitor( progress, NO_EXTERNAL_MONITOR ); IdType idType = INTEGER; Input input = new DataGeneratorInput( NODE_COUNT, RELATIONSHIP_COUNT, idType, Collector.EMPTY, random.seed(), 0, bareboneNodeHeader( idType, new Extractors( ';' ) ), bareboneRelationshipHeader( idType, new Extractors( ';' ) ), 1, 1, 0, 0 ); // when try ( JobScheduler jobScheduler = new ThreadPoolJobScheduler() ) { new ParallelBatchImporter( storage.directory().databaseLayout(), storage.fileSystem(), storage.pageCache(), DEFAULT, NullLogService.getInstance(), monitor, EMPTY, defaults(), LATEST_RECORD_FORMATS, NO_MONITOR, jobScheduler ).doImport( input ); // then progress.assertAllProgressReachedEnd(); } }
@Test public void shouldImportDataComingFromCsvFiles() throws Exception { // GIVEN Config dbConfig = Config.builder().withSetting( db_timezone, LogTimeZone.SYSTEM.name() ).build(); try ( JobScheduler scheduler = new ThreadPoolJobScheduler() ) { BatchImporter importer = new ParallelBatchImporter( directory.databaseLayout(), fileSystemRule.get(), null, smallBatchSizeConfig(), NullLogService.getInstance(), invisible(), AdditionalInitialIds.EMPTY, dbConfig, RecordFormatSelector.defaultFormat(), NO_MONITOR, scheduler ); List<InputEntity> nodeData = randomNodeData(); List<InputEntity> relationshipData = randomRelationshipData( nodeData ); // WHEN importer.doImport( csv( nodeDataAsFile( nodeData ), relationshipDataAsFile( relationshipData ), IdType.STRING, lowBufferSize( COMMAS ), silentBadCollector( 0 ) ) ); // THEN verifyImportedData( nodeData, relationshipData ); } }
/** * There was this problem where some steps and in particular parallel CSV input parsing that * paniced would hang the import entirely. */ @Test public void shouldExitAndThrowExceptionOnPanic() throws Exception { try ( JobScheduler jobScheduler = new ThreadPoolJobScheduler() ) { BatchImporter importer = new ParallelBatchImporter( directory.databaseLayout(), fs, null, Configuration.DEFAULT, NullLogService.getInstance(), ExecutionMonitors.invisible(), AdditionalInitialIds.EMPTY, Config.defaults(), StandardV3_0.RECORD_FORMATS, NO_MONITOR, jobScheduler ); Iterable<DataFactory> nodeData = datas( data( NO_DECORATOR, fileAsCharReadable( nodeCsvFileWithBrokenEntries() ) ) ); Input brokenCsvInput = new CsvInput( nodeData, defaultFormatNodeFileHeader(), datas(), defaultFormatRelationshipFileHeader(), IdType.ACTUAL, csvConfigurationWithLowBufferSize(), new BadCollector( NullOutputStream.NULL_OUTPUT_STREAM, 0, 0 ) ); importer.doImport( brokenCsvInput ); fail( "Should have failed properly" ); } catch ( InputException e ) { // THEN assertTrue( e.getCause() instanceof DataAfterQuoteException ); // and we managed to shut down properly } }