private File baseDir() { return rules.directory().absolutePath(); } }
@Before public void setUp() { indexFile = deps.directory().file( "index" ); layout = getLayout(); }
@Parameters public static Collection<Function<PageCacheAndDependenciesRule,NumberArrayFactory>> data() { return Arrays.asList( storage -> HEAP, storage -> OFF_HEAP, storage -> AUTO_WITHOUT_PAGECACHE, storage -> CHUNKED_FIXED_SIZE, storage -> new PageCachedNumberArrayFactory( storage.pageCache(), storage.directory().directory() ) ); }
private IndexDirectoryStructure.Factory directory() { File storeDir = rules.directory().databaseDir(); return directoriesBySubProvider( directoriesByProvider( storeDir ).forProvider( PROVIDER ) ); }
@Before public void setup() { fs = pageCacheAndDependenciesRule.fileSystem(); graphDbDir = pageCacheAndDependenciesRule.directory().databaseDir(); PageCache pageCache = pageCacheAndDependenciesRule.pageCache(); indexProvider = testSuite.createIndexProvider( pageCache, fs, graphDbDir ); }
@Test public void closeImporterWithoutDiagnosticState() throws IOException { ExecutionMonitor monitor = mock( ExecutionMonitor.class ); try ( BatchingNeoStores stores = batchingNeoStoresWithExternalPageCache( storage.fileSystem(), storage.pageCache(), NULL, storage.directory().directory(), defaultFormat(), DEFAULT, getInstance(), EMPTY, defaults() ) ) { //noinspection EmptyTryBlock try ( ImportLogic logic = new ImportLogic( storage.directory().directory(), storage.fileSystem(), stores, DEFAULT, getInstance(), monitor, defaultFormat(), NO_MONITOR ) ) { // nothing to run in this import logic.success(); } } verify( monitor ).done( eq( true ), anyLong(), contains( "Data statistics is not available." ) ); }
private GBPTree<RawBytes,RawBytes> createIndex( Layout<RawBytes,RawBytes> layout ) throws IOException { // some random padding PageCache pageCache = storage.pageCacheRule().getPageCache( storage.fileSystem(), config().withAccessChecks( true ) ); return new GBPTreeBuilder<>( pageCache, storage.directory().file( "index" ), layout ).build(); } }
private StoreFactory getStoreFactory() { return new StoreFactory( storage.directory().databaseLayout(), Config.defaults(), new DefaultIdGeneratorFactory( storage.fileSystem() ), storage.pageCache(), storage.fileSystem(), NullLogProvider.getInstance(), EmptyVersionContextSupplier.EMPTY ); }
@Before public void setupStores() { DatabaseLayout storeLayout = storage.directory().databaseLayout(); Config config = Config.defaults( pagecache_memory, "8m" ); PageCache pageCache = storage.pageCache(); FileSystemAbstraction fs = storage.fileSystem(); DefaultIdGeneratorFactory idGeneratorFactory = new DefaultIdGeneratorFactory( fs ); NullLogProvider logProvider = NullLogProvider.getInstance(); StoreFactory storeFactory = new StoreFactory( storeLayout, config, idGeneratorFactory, pageCache, fs, logProvider, EMPTY ); neoStores = storeFactory.openAllNeoStores( true ); }
@Test public void dropShouldDeleteEntireIndexFolder() { // given File root = storage.directory().directory( "root" ); IndexDirectoryStructure directoryStructure = IndexDirectoryStructure.directoriesByProvider( root ).forProvider( GenericNativeIndexProvider.DESCRIPTOR ); long indexId = 8; File indexDirectory = directoryStructure.directoryForIndex( indexId ); File indexFile = new File( indexDirectory, "my-index" ); StoreIndexDescriptor descriptor = IndexDescriptorFactory.forSchema( SchemaDescriptorFactory.forLabel( 1, 1 ) ).withId( indexId ); IndexSpecificSpaceFillingCurveSettingsCache spatialSettings = mock( IndexSpecificSpaceFillingCurveSettingsCache.class ); FileSystemAbstraction fs = storage.fileSystem(); GenericNativeIndexAccessor accessor = new GenericNativeIndexAccessor( storage.pageCache(), fs, indexFile, new GenericLayout( 1, spatialSettings ), immediate(), EMPTY, descriptor, spatialSettings, directoryStructure, mock( SpaceFillingCurveConfiguration.class ) ); // when accessor.drop(); // then assertFalse( fs.fileExists( indexDirectory ) ); } }
@Test public void shouldNotOpenStoreWithNodesOrRelationshipsInIt() throws Exception { // GIVEN someDataInTheDatabase(); // WHEN try ( JobScheduler jobScheduler = new ThreadPoolJobScheduler() ) { RecordFormats recordFormats = RecordFormatSelector.selectForConfig( Config.defaults(), NullLogProvider.getInstance() ); try ( BatchingNeoStores store = BatchingNeoStores.batchingNeoStores( storage.fileSystem(), storage.directory().databaseDir(), recordFormats, DEFAULT, NullLogService.getInstance(), EMPTY, Config.defaults(), jobScheduler ) ) { store.createNew(); fail( "Should fail on existing data" ); } } catch ( IllegalStateException e ) { // THEN assertThat( e.getMessage(), containsString( "already contains" ) ); } }
@Before public void setupStore() { neoStores = new StoreFactory( storage.directory().databaseLayout(), Config.defaults(), new DefaultIdGeneratorFactory( storage.fileSystem() ), storage.pageCache(), storage.fileSystem(), NullLogProvider.getInstance(), EmptyVersionContextSupplier.EMPTY ).openNeoStores( true, StoreType.PROPERTY, StoreType.PROPERTY_ARRAY, StoreType.PROPERTY_STRING ); propertyStore = neoStores.getPropertyStore(); }
@Test public void shouldNotDecideToAllocateDoubleRelationshipRecordUnitsonLargeAmountOfRelationshipsOnUnsupportedFormat() throws Exception { // given RecordFormats formats = LATEST_RECORD_FORMATS; try ( BatchingNeoStores stores = BatchingNeoStores.batchingNeoStoresWithExternalPageCache( storage.fileSystem(), storage.pageCache(), PageCacheTracer.NULL, storage.directory().absolutePath(), formats, DEFAULT, NullLogService.getInstance(), EMPTY, Config.defaults() ) ) { stores.createNew(); Estimates estimates = Inputs.knownEstimates( 0, DOUBLE_RELATIONSHIP_RECORD_UNIT_THRESHOLD << 1, 0, 0, 0, 0, 0 ); // when boolean doubleUnits = stores.determineDoubleRelationshipRecordUnits( estimates ); // then assertFalse( doubleUnits ); } }
private void someDataInTheDatabase() { GraphDatabaseService db = new TestGraphDatabaseFactory() .setFileSystem( new UncloseableDelegatingFileSystemAbstraction( storage.fileSystem() ) ) .newImpermanentDatabase( storage.directory().databaseDir() ); try ( Transaction tx = db.beginTx() ) { db.createNode().createRelationshipTo( db.createNode(), MyRelTypes.TEST ); tx.success(); } finally { db.shutdown(); } } }
@Test public void shouldNotDecideToAllocateDoubleRelationshipRecordUnitsonLowAmountOfRelationshipsOnSupportedFormat() throws Exception { // given RecordFormats formats = new ForcedSecondaryUnitRecordFormats( LATEST_RECORD_FORMATS ); try ( BatchingNeoStores stores = BatchingNeoStores.batchingNeoStoresWithExternalPageCache( storage.fileSystem(), storage.pageCache(), PageCacheTracer.NULL, storage.directory().absolutePath(), formats, DEFAULT, NullLogService.getInstance(), EMPTY, Config.defaults() ) ) { stores.createNew(); Estimates estimates = Inputs.knownEstimates( 0, DOUBLE_RELATIONSHIP_RECORD_UNIT_THRESHOLD >> 1, 0, 0, 0, 0, 0 ); // when boolean doubleUnits = stores.determineDoubleRelationshipRecordUnits( estimates ); // then assertFalse( doubleUnits ); } }
@Test public void shouldRespectDbConfig() throws Exception { // GIVEN int size = 10; Config config = Config.defaults( stringMap( GraphDatabaseSettings.array_block_size.name(), String.valueOf( size ), GraphDatabaseSettings.string_block_size.name(), String.valueOf( size ) ) ); // WHEN RecordFormats recordFormats = LATEST_RECORD_FORMATS; int headerSize = recordFormats.dynamic().getRecordHeaderSize(); try ( JobScheduler jobScheduler = new ThreadPoolJobScheduler(); BatchingNeoStores store = BatchingNeoStores.batchingNeoStores( storage.fileSystem(), storage.directory().absolutePath(), recordFormats, DEFAULT, NullLogService.getInstance(), EMPTY, config, jobScheduler ) ) { store.createNew(); // THEN assertEquals( size + headerSize, store.getPropertyStore().getArrayStore().getRecordSize() ); assertEquals( size + headerSize, store.getPropertyStore().getStringStore().getRecordSize() ); } }
@Test public void shouldDecideToAllocateDoubleRelationshipRecordUnitsOnLargeAmountOfRelationshipsOnSupportedFormat() throws Exception { // given RecordFormats formats = new ForcedSecondaryUnitRecordFormats( LATEST_RECORD_FORMATS ); try ( PageCache pageCache = storage.pageCache(); BatchingNeoStores stores = BatchingNeoStores.batchingNeoStoresWithExternalPageCache( storage.fileSystem(), pageCache, PageCacheTracer.NULL, storage.directory().absolutePath(), formats, DEFAULT, NullLogService.getInstance(), EMPTY, Config.defaults() ) ) { stores.createNew(); Estimates estimates = Inputs.knownEstimates( 0, DOUBLE_RELATIONSHIP_RECORD_UNIT_THRESHOLD << 1, 0, 0, 0, 0, 0 ); // when boolean doubleUnits = stores.determineDoubleRelationshipRecordUnits( estimates ); // then assertTrue( doubleUnits ); } }
@Before public void startStore() { neoStores = new StoreFactory( storage.directory().databaseLayout(), Config.defaults(), new DefaultIdGeneratorFactory( storage.fileSystem() ), storage.pageCache(), storage.fileSystem(), NullLogProvider.getInstance(), EmptyVersionContextSupplier.EMPTY ).openNeoStores( true, StoreType.PROPERTY, StoreType.PROPERTY_STRING, StoreType.PROPERTY_ARRAY ); propertyStore = neoStores.getPropertyStore(); records = new DirectRecordAccess<>( propertyStore, Loaders.propertyLoader( propertyStore ) ); creator = new PropertyCreator( propertyStore, new PropertyTraverser() ); }
@Before public void setup() { neoStores = new StoreFactory( storage.directory().databaseLayout(), Config.defaults(), new DefaultIdGeneratorFactory( storage.fileSystem() ), storage.pageCache(), storage.fileSystem(), NullLogProvider.getInstance(), EmptyVersionContextSupplier.EMPTY ).openAllNeoStores( true ); creator = new PropertyCreator( neoStores.getPropertyStore(), new PropertyTraverser() ); owner = neoStores.getNodeStore().newRecord(); }
@Test public void shouldReportProgressOfNodeImport() throws Exception { // given CapturingMonitor progress = new CapturingMonitor(); HumanUnderstandableExecutionMonitor monitor = new HumanUnderstandableExecutionMonitor( progress, NO_EXTERNAL_MONITOR ); IdType idType = INTEGER; Input input = new DataGeneratorInput( NODE_COUNT, RELATIONSHIP_COUNT, idType, Collector.EMPTY, random.seed(), 0, bareboneNodeHeader( idType, new Extractors( ';' ) ), bareboneRelationshipHeader( idType, new Extractors( ';' ) ), 1, 1, 0, 0 ); // when try ( JobScheduler jobScheduler = new ThreadPoolJobScheduler() ) { new ParallelBatchImporter( storage.directory().databaseLayout(), storage.fileSystem(), storage.pageCache(), DEFAULT, NullLogService.getInstance(), monitor, EMPTY, defaults(), LATEST_RECORD_FORMATS, NO_MONITOR, jobScheduler ).doImport( input ); // then progress.assertAllProgressReachedEnd(); } }