private FileSystemAbstraction fs() { return rules.fileSystem(); }
private StoreFactory getStoreFactory() { return new StoreFactory( storage.directory().databaseLayout(), Config.defaults(), new DefaultIdGeneratorFactory( storage.fileSystem() ), storage.pageCache(), storage.fileSystem(), NullLogProvider.getInstance(), EmptyVersionContextSupplier.EMPTY ); }
@Before public void setup() throws IOException, EntityNotFoundException { indexProvider = providerCreator.apply( this ); rules.fileSystem().mkdirs( indexProvider.directoryStructure().rootDirectory() ); populator = indexProvider.getPopulator( descriptor, samplingConfig ); when( nodePropertyAccessor.getNodePropertyValue( anyLong(), anyInt() ) ).thenThrow( UnsupportedOperationException.class ); prevAccessCheck = UnsafeUtil.exchangeNativeAccessCheckEnabled( false ); }
@Before public void setup() { fs = pageCacheAndDependenciesRule.fileSystem(); graphDbDir = pageCacheAndDependenciesRule.directory().databaseDir(); PageCache pageCache = pageCacheAndDependenciesRule.pageCache(); indexProvider = testSuite.createIndexProvider( pageCache, fs, graphDbDir ); }
@Before public void setupStore() { neoStores = new StoreFactory( storage.directory().databaseLayout(), Config.defaults(), new DefaultIdGeneratorFactory( storage.fileSystem() ), storage.pageCache(), storage.fileSystem(), NullLogProvider.getInstance(), EmptyVersionContextSupplier.EMPTY ).openNeoStores( true, StoreType.PROPERTY, StoreType.PROPERTY_ARRAY, StoreType.PROPERTY_STRING ); propertyStore = neoStores.getPropertyStore(); }
@Before public void setup() throws IOException { File nativeSchemaIndexStoreDirectory = newProvider().directoryStructure().rootDirectory(); rules.fileSystem().mkdirs( nativeSchemaIndexStoreDirectory ); }
@Parameterized.Parameters( name = "{0}" ) public static Collection<Object[]> providers() { Collection<Object[]> parameters = new ArrayList<>(); // GenericNativeIndexProvider parameters.add( of( "generic", true, GenericNativeIndexProvider.parallelPopulation, RandomValues::nextValue, test -> new GenericNativeIndexProvider( test.directory(), test.rules.pageCache(), test.rules.fileSystem(), EMPTY, immediate(),false, defaults() ) ) ); // NumberIndexProvider parameters.add( of( "number", true, false, RandomValues::nextNumberValue, test -> new NumberIndexProvider( test.rules.pageCache(), test.rules.fileSystem(), test.directory(), EMPTY, immediate(), false ) ) ); // StringIndexProvider parameters.add( of( "string", true, false, RandomValues::nextAlphaNumericTextValue, test -> new StringIndexProvider( test.rules.pageCache(), test.rules.fileSystem(), test.directory(), EMPTY, immediate(), false ) ) ); // SpatialIndexProvider parameters.add( of( "spatial", false, false, RandomValues::nextPointValue, test -> new SpatialIndexProvider( test.rules.pageCache(), test.rules.fileSystem(), test.directory(), EMPTY, immediate(), false, defaults() ) ) ); // TemporalIndexProvider parameters.add( of( "temporal", true, false, RandomValues::nextTemporalValue, test -> new TemporalIndexProvider( test.rules.pageCache(), test.rules.fileSystem(), test.directory(), EMPTY, immediate(), false ) ) ); return parameters; }
@Test public void closeImporterWithoutDiagnosticState() throws IOException { ExecutionMonitor monitor = mock( ExecutionMonitor.class ); try ( BatchingNeoStores stores = batchingNeoStoresWithExternalPageCache( storage.fileSystem(), storage.pageCache(), NULL, storage.directory().directory(), defaultFormat(), DEFAULT, getInstance(), EMPTY, defaults() ) ) { //noinspection EmptyTryBlock try ( ImportLogic logic = new ImportLogic( storage.directory().directory(), storage.fileSystem(), stores, DEFAULT, getInstance(), monitor, defaultFormat(), NO_MONITOR ) ) { // nothing to run in this import logic.success(); } } verify( monitor ).done( eq( true ), anyLong(), contains( "Data statistics is not available." ) ); }
@Before public void startStore() { neoStores = new StoreFactory( storage.directory().databaseLayout(), Config.defaults(), new DefaultIdGeneratorFactory( storage.fileSystem() ), storage.pageCache(), storage.fileSystem(), NullLogProvider.getInstance(), EmptyVersionContextSupplier.EMPTY ).openNeoStores( true, StoreType.PROPERTY, StoreType.PROPERTY_STRING, StoreType.PROPERTY_ARRAY ); propertyStore = neoStores.getPropertyStore(); records = new DirectRecordAccess<>( propertyStore, Loaders.propertyLoader( propertyStore ) ); creator = new PropertyCreator( propertyStore, new PropertyTraverser() ); }
@Before public void setup() { neoStores = new StoreFactory( storage.directory().databaseLayout(), Config.defaults(), new DefaultIdGeneratorFactory( storage.fileSystem() ), storage.pageCache(), storage.fileSystem(), NullLogProvider.getInstance(), EmptyVersionContextSupplier.EMPTY ).openAllNeoStores( true ); creator = new PropertyCreator( neoStores.getPropertyStore(), new PropertyTraverser() ); owner = neoStores.getNodeStore().newRecord(); }
private GBPTree<RawBytes,RawBytes> createIndex( Layout<RawBytes,RawBytes> layout ) throws IOException { // some random padding PageCache pageCache = storage.pageCacheRule().getPageCache( storage.fileSystem(), config().withAccessChecks( true ) ); return new GBPTreeBuilder<>( pageCache, storage.directory().file( "index" ), layout ).build(); } }
@Before public void setupStores() { DatabaseLayout storeLayout = storage.directory().databaseLayout(); Config config = Config.defaults( pagecache_memory, "8m" ); PageCache pageCache = storage.pageCache(); FileSystemAbstraction fs = storage.fileSystem(); DefaultIdGeneratorFactory idGeneratorFactory = new DefaultIdGeneratorFactory( fs ); NullLogProvider logProvider = NullLogProvider.getInstance(); StoreFactory storeFactory = new StoreFactory( storeLayout, config, idGeneratorFactory, pageCache, fs, logProvider, EMPTY ); neoStores = storeFactory.openAllNeoStores( true ); }
@Test public void dropShouldDeleteEntireIndexFolder() { // given File root = storage.directory().directory( "root" ); IndexDirectoryStructure directoryStructure = IndexDirectoryStructure.directoriesByProvider( root ).forProvider( GenericNativeIndexProvider.DESCRIPTOR ); long indexId = 8; File indexDirectory = directoryStructure.directoryForIndex( indexId ); File indexFile = new File( indexDirectory, "my-index" ); StoreIndexDescriptor descriptor = IndexDescriptorFactory.forSchema( SchemaDescriptorFactory.forLabel( 1, 1 ) ).withId( indexId ); IndexSpecificSpaceFillingCurveSettingsCache spatialSettings = mock( IndexSpecificSpaceFillingCurveSettingsCache.class ); FileSystemAbstraction fs = storage.fileSystem(); GenericNativeIndexAccessor accessor = new GenericNativeIndexAccessor( storage.pageCache(), fs, indexFile, new GenericLayout( 1, spatialSettings ), immediate(), EMPTY, descriptor, spatialSettings, directoryStructure, mock( SpaceFillingCurveConfiguration.class ) ); // when accessor.drop(); // then assertFalse( fs.fileExists( indexDirectory ) ); } }
@Test public void shouldNotOpenStoreWithNodesOrRelationshipsInIt() throws Exception { // GIVEN someDataInTheDatabase(); // WHEN try ( JobScheduler jobScheduler = new ThreadPoolJobScheduler() ) { RecordFormats recordFormats = RecordFormatSelector.selectForConfig( Config.defaults(), NullLogProvider.getInstance() ); try ( BatchingNeoStores store = BatchingNeoStores.batchingNeoStores( storage.fileSystem(), storage.directory().databaseDir(), recordFormats, DEFAULT, NullLogService.getInstance(), EMPTY, Config.defaults(), jobScheduler ) ) { store.createNew(); fail( "Should fail on existing data" ); } } catch ( IllegalStateException e ) { // THEN assertThat( e.getMessage(), containsString( "already contains" ) ); } }
private void someDataInTheDatabase() { GraphDatabaseService db = new TestGraphDatabaseFactory() .setFileSystem( new UncloseableDelegatingFileSystemAbstraction( storage.fileSystem() ) ) .newImpermanentDatabase( storage.directory().databaseDir() ); try ( Transaction tx = db.beginTx() ) { db.createNode().createRelationshipTo( db.createNode(), MyRelTypes.TEST ); tx.success(); } finally { db.shutdown(); } } }
@Test public void shouldNotDecideToAllocateDoubleRelationshipRecordUnitsonLargeAmountOfRelationshipsOnUnsupportedFormat() throws Exception { // given RecordFormats formats = LATEST_RECORD_FORMATS; try ( BatchingNeoStores stores = BatchingNeoStores.batchingNeoStoresWithExternalPageCache( storage.fileSystem(), storage.pageCache(), PageCacheTracer.NULL, storage.directory().absolutePath(), formats, DEFAULT, NullLogService.getInstance(), EMPTY, Config.defaults() ) ) { stores.createNew(); Estimates estimates = Inputs.knownEstimates( 0, DOUBLE_RELATIONSHIP_RECORD_UNIT_THRESHOLD << 1, 0, 0, 0, 0, 0 ); // when boolean doubleUnits = stores.determineDoubleRelationshipRecordUnits( estimates ); // then assertFalse( doubleUnits ); } }
@Test public void shouldNotDecideToAllocateDoubleRelationshipRecordUnitsonLowAmountOfRelationshipsOnSupportedFormat() throws Exception { // given RecordFormats formats = new ForcedSecondaryUnitRecordFormats( LATEST_RECORD_FORMATS ); try ( BatchingNeoStores stores = BatchingNeoStores.batchingNeoStoresWithExternalPageCache( storage.fileSystem(), storage.pageCache(), PageCacheTracer.NULL, storage.directory().absolutePath(), formats, DEFAULT, NullLogService.getInstance(), EMPTY, Config.defaults() ) ) { stores.createNew(); Estimates estimates = Inputs.knownEstimates( 0, DOUBLE_RELATIONSHIP_RECORD_UNIT_THRESHOLD >> 1, 0, 0, 0, 0, 0 ); // when boolean doubleUnits = stores.determineDoubleRelationshipRecordUnits( estimates ); // then assertFalse( doubleUnits ); } }
@Test public void shouldRespectDbConfig() throws Exception { // GIVEN int size = 10; Config config = Config.defaults( stringMap( GraphDatabaseSettings.array_block_size.name(), String.valueOf( size ), GraphDatabaseSettings.string_block_size.name(), String.valueOf( size ) ) ); // WHEN RecordFormats recordFormats = LATEST_RECORD_FORMATS; int headerSize = recordFormats.dynamic().getRecordHeaderSize(); try ( JobScheduler jobScheduler = new ThreadPoolJobScheduler(); BatchingNeoStores store = BatchingNeoStores.batchingNeoStores( storage.fileSystem(), storage.directory().absolutePath(), recordFormats, DEFAULT, NullLogService.getInstance(), EMPTY, config, jobScheduler ) ) { store.createNew(); // THEN assertEquals( size + headerSize, store.getPropertyStore().getArrayStore().getRecordSize() ); assertEquals( size + headerSize, store.getPropertyStore().getStringStore().getRecordSize() ); } }
@Test public void shouldDecideToAllocateDoubleRelationshipRecordUnitsOnLargeAmountOfRelationshipsOnSupportedFormat() throws Exception { // given RecordFormats formats = new ForcedSecondaryUnitRecordFormats( LATEST_RECORD_FORMATS ); try ( PageCache pageCache = storage.pageCache(); BatchingNeoStores stores = BatchingNeoStores.batchingNeoStoresWithExternalPageCache( storage.fileSystem(), pageCache, PageCacheTracer.NULL, storage.directory().absolutePath(), formats, DEFAULT, NullLogService.getInstance(), EMPTY, Config.defaults() ) ) { stores.createNew(); Estimates estimates = Inputs.knownEstimates( 0, DOUBLE_RELATIONSHIP_RECORD_UNIT_THRESHOLD << 1, 0, 0, 0, 0, 0 ); // when boolean doubleUnits = stores.determineDoubleRelationshipRecordUnits( estimates ); // then assertTrue( doubleUnits ); } }
@Test public void shouldReportProgressOfNodeImport() throws Exception { // given CapturingMonitor progress = new CapturingMonitor(); HumanUnderstandableExecutionMonitor monitor = new HumanUnderstandableExecutionMonitor( progress, NO_EXTERNAL_MONITOR ); IdType idType = INTEGER; Input input = new DataGeneratorInput( NODE_COUNT, RELATIONSHIP_COUNT, idType, Collector.EMPTY, random.seed(), 0, bareboneNodeHeader( idType, new Extractors( ';' ) ), bareboneRelationshipHeader( idType, new Extractors( ';' ) ), 1, 1, 0, 0 ); // when try ( JobScheduler jobScheduler = new ThreadPoolJobScheduler() ) { new ParallelBatchImporter( storage.directory().databaseLayout(), storage.fileSystem(), storage.pageCache(), DEFAULT, NullLogService.getInstance(), monitor, EMPTY, defaults(), LATEST_RECORD_FORMATS, NO_MONITOR, jobScheduler ).doImport( input ); // then progress.assertAllProgressReachedEnd(); } }