Refine search
@Test public void givenBatchInserterWhenArrayPropertyUpdated4TimesThenShouldNotFail() throws IOException { BatchInserter batchInserter = BatchInserters.inserter( testDirectory.databaseDir(), fileSystemRule.get() ); long nodeId = batchInserter.createNode( Collections.emptyMap() ); for ( int i = 0; i < 4; i++ ) { batchInserter.setNodeProperty( nodeId, "array", new byte[]{2, 3, 98, 1, 43, 50, 3, 33, 51, 55, 116, 16, 23, 56, 9, -10, 1, 3, 1, 1, 1, 1, 1, 1, 1, 1, 1} ); } batchInserter.getNodeProperties( nodeId ); //fails here batchInserter.shutdown(); } }
public static BatchInserter inserter( File databaseDirectory, Map<String,String> config ) throws IOException { DefaultFileSystemAbstraction fileSystem = createFileSystem(); BatchInserter inserter = inserter( databaseDirectory, fileSystem, config, loadKernelExtension() ); return new FileSystemClosingBatchInserter( inserter, (IndexConfigStoreProvider) inserter, fileSystem ); }
public static BatchInserter inserter( File databaseDirectory, FileSystemAbstraction fs, Map<String,String> config ) throws IOException { return inserter( databaseDirectory, fs, config, loadKernelExtension() ); }
/** * Get a {@link BatchInserter} given a store directory. * * @param databaseDirectory directory where particular neo4j database is located * @return a new {@link BatchInserter} * @throws IOException if there is an IO error */ public static BatchInserter inserter( File databaseDirectory ) throws IOException { DefaultFileSystemAbstraction fileSystem = createFileSystem(); BatchInserter batchInserter = inserter( databaseDirectory, fileSystem, stringMap() ); return new FileSystemClosingBatchInserter( batchInserter, (IndexConfigStoreProvider) batchInserter, fileSystem ); }
@Test void explicitIndexPopulationWithBunchOfFields() { assertTimeout( ofMillis( TEST_TIMEOUT ), () -> { BatchInserter batchNode = BatchInserters.inserter( directory.databaseDir() ); LuceneBatchInserterIndexProvider provider = new LuceneBatchInserterIndexProvider( batchNode ); try { BatchInserterIndex batchIndex = provider.nodeIndex( "node_auto_index", stringMap( IndexManager.PROVIDER, "lucene", "type", "fulltext" ) ); Map<String,Object> properties = IntStream.range( 0, 2000 ) .mapToObj( i -> Pair.of( Integer.toString( i ), randomAlphabetic( 200 ) ) ) .collect( toMap( Pair::first, Pair::other ) ); long node = batchNode.createNode( properties, Label.label( "NODE" ) ); batchIndex.add( node, properties ); } finally { provider.shutdown(); batchNode.shutdown(); } } ); } }
@Test void createBatchIndexFromAnyIndexStoreProvider() throws Exception { createEndCloseIndexProvider( BatchInserters.inserter( getStoreDir() ) ); createEndCloseIndexProvider( BatchInserters.inserter( getStoreDir(), fileSystem ) ); createEndCloseIndexProvider( BatchInserters.inserter( getStoreDir(), getConfig() ) ); createEndCloseIndexProvider( BatchInserters.inserter( getStoreDir(), getConfigWithProvider(), getExtensions() ) ); createEndCloseIndexProvider( BatchInserters.inserter( getStoreDir(), fileSystem, getConfig() ) ); createEndCloseIndexProvider( BatchInserters.inserter( getStoreDir(), fileSystem, getConfigWithProvider(), getExtensions() ) ); }
private static void copyStore(String sourceDir, String targetDir, Set<String> ignoreRelTypes, Set<String> ignoreProperties, Set<String> ignoreLabels, Set<String> deleteNodesWithLabels, boolean stableNodeIds) throws Exception { final File target = new File(targetDir); final File source = new File(sourceDir); if (target.exists()) { // FileUtils.deleteRecursively(target); throw new IllegalArgumentException("Target Directory already exists "+target); } if (!source.exists()) throw new IllegalArgumentException("Source Database does not exist " + source); Pair<Long, Long> highestIds = getHighestNodeId(source); String pageCacheSize = System.getProperty("dbms.pagecache.memory","2G"); BatchInserter targetDb = BatchInserters.inserter(target, MapUtil.stringMap("dbms.pagecache.memory", pageCacheSize)); BatchInserter sourceDb = BatchInserters.inserter(source, MapUtil.stringMap("dbms.pagecache.memory", System.getProperty("dbms.pagecache.memory.source",pageCacheSize))); Flusher flusher = getFlusher(sourceDb); logs = new PrintWriter(new FileWriter(new File(target, "store-copy.log"))); PrimitiveLongLongMap copiedNodeIds = copyNodes(sourceDb, targetDb, ignoreProperties, ignoreLabels, deleteNodesWithLabels, highestIds.first(),flusher, stableNodeIds); copyRelationships(sourceDb, targetDb, ignoreRelTypes, ignoreProperties, copiedNodeIds, highestIds.other(), flusher); targetDb.shutdown(); try { sourceDb.shutdown(); } catch (Exception e) { logs.append(String.format("Noncritical error closing the source database:%n%s", Exceptions.stringify(e))); } logs.close(); if (stableNodeIds) copyIndex(source, target); }
public static BatchInserter inserter( File databaseDirectory, Map<String, String> config, Iterable<KernelExtensionFactory<?>> kernelExtensions ) throws IOException { DefaultFileSystemAbstraction fileSystem = createFileSystem(); BatchInserterImpl inserter = new BatchInserterImpl( databaseDirectory, fileSystem, config, kernelExtensions ); return new FileSystemClosingBatchInserter( inserter, inserter, fileSystem ); }
@Test public void lazyLoadWithinWriteTransaction() throws Exception { // Given FileSystemAbstraction fileSystem = fs.get(); BatchInserter inserter = BatchInserters.inserter( testDirectory.databaseDir(), fileSystem ); int count = 3000; long nodeId = inserter.createNode( mapWithManyProperties( count /* larger than initial property index load threshold */ ) ); inserter.shutdown(); GraphDatabaseService db = new TestGraphDatabaseFactory().setFileSystem( fileSystem ).newImpermanentDatabase( testDirectory.databaseDir() ); // When try ( Transaction tx = db.beginTx() ) { db.createNode(); Node node = db.getNodeById( nodeId ); // Then assertEquals( count, Iterables.count( node.getPropertyKeys() ) ); tx.success(); } finally { db.shutdown(); } }
@Test public void testCreatesStoreLockFile() throws Exception { // Given DatabaseLayout databaseLayout = testDirectory.databaseLayout(); // When BatchInserter inserter = BatchInserters.inserter( databaseLayout.databaseDirectory(), fileSystemRule.get() ); // Then assertThat( databaseLayout.getStoreLayout().storeLockFile().exists(), equalTo( true ) ); inserter.shutdown(); }
@Test public void automaticallyCloseCreatedFileSystemOnShutdown() throws Exception { verifyInserterFileSystemClose( inserter( getStoreDir() ) ); verifyInserterFileSystemClose( inserter( getStoreDir(), getConfig() ) ); verifyInserterFileSystemClose( inserter( getStoreDir(), getConfig(), getKernelExtensions() ) ); }
public static BatchInserter inserter( File databaseDirectory, FileSystemAbstraction fs ) throws IOException { return inserter( databaseDirectory, fs, stringMap(), loadKernelExtension() ); }
/** * Get a {@link BatchInserter} given a store directory. * * @param databaseDirectory directory where particular neo4j database is located * @return a new {@link BatchInserter} * @throws IOException if there is an IO error */ public static BatchInserter inserter( File databaseDirectory ) throws IOException { DefaultFileSystemAbstraction fileSystem = createFileSystem(); BatchInserter batchInserter = inserter( databaseDirectory, fileSystem, stringMap() ); return new FileSystemClosingBatchInserter( batchInserter, (IndexConfigStoreProvider) batchInserter, fileSystem ); }
public static BatchInserter inserter( File databaseDirectory, Map<String, String> config, Iterable<KernelExtensionFactory<?>> kernelExtensions ) throws IOException { DefaultFileSystemAbstraction fileSystem = createFileSystem(); BatchInserterImpl inserter = new BatchInserterImpl( databaseDirectory, fileSystem, config, kernelExtensions ); return new FileSystemClosingBatchInserter( inserter, inserter, fileSystem ); }
@Test( expected = ReservedIdException.class ) public void makeSureCantCreateNodeWithMagicNumber() throws IOException { // given File path = dbRule.databaseLayout().databaseDirectory(); BatchInserter inserter = BatchInserters.inserter( path, fileSystemRule.get() ); try { // when long id = IdGeneratorImpl.INTEGER_MINUS_ONE; inserter.createNode( id, null ); // then throws } finally { inserter.shutdown(); } } }
@Test public void testBatchIndexToAutoIndex() throws IOException { BatchInserter inserter = BatchInserters.inserter(new File(path)); BatchInserterIndexProvider indexProvider = new LuceneBatchInserterIndexProvider(inserter); BatchInserterIndex index = indexProvider.nodeIndex("node_auto_index", MapUtil.stringMap("type", "exact")); long node = inserter.createNode(MapUtil.map("foo", "bar")); index.add(node, MapUtil.map("foo", "bar")); index.flush(); assertThat("Batch indexed node can be retrieved", index.get("foo", "bar").next(), is(node)); indexProvider.shutdown(); inserter.shutdown(); graphDb = getGraphDb(); try (Transaction tx = graphDb.beginTx()) { assertThat("AutoIndex is not enabled after reopening the graph", graphDb.index() .getNodeAutoIndexer().isEnabled(), is(false)); assertThat("AutoIndexed properties are not maintained after closing the graph", graphDb .index().getNodeAutoIndexer().getAutoIndexedProperties(), is(empty())); assertThat("Batch index properties are in the index", graphDb.index().getNodeAutoIndexer() .getAutoIndex().query("foo", "bar").size(), is(1)); tx.success(); } }
@Test public void testHonorsPassedInParams() throws Exception { BatchInserter inserter = BatchInserters.inserter( testDirectory.databaseDir(), fileSystemRule.get(), stringMap( GraphDatabaseSettings.pagecache_memory.name(), "280K" ) ); NeoStores neoStores = ReflectionUtil.getPrivateField( inserter, "neoStores", NeoStores.class ); PageCache pageCache = ReflectionUtil.getPrivateField( neoStores, "pageCache", PageCache.class ); inserter.shutdown(); long mappedMemoryTotalSize = MuninnPageCache.memoryRequiredForPages( pageCache.maxCachedPages() ); assertThat( "memory mapped config is active", mappedMemoryTotalSize, is( allOf( greaterThan( kibiBytes( 270 ) ), lessThan( kibiBytes( 290 ) ) ) ) ); }