private <T> T getDependency( Class<T> clazz ) { return db.getDependencyResolver().resolveDependency( clazz ); } }
private <T> T getInstanceFromDb( Class<T> clazz ) { return db.getDependencyResolver().resolveDependency( clazz ); }
private GroupingRecoveryCleanupWorkCollector getGroupingRecoveryCleanupWorkCollector() { return dbRule.getDependencyResolver().resolveDependency( GroupingRecoveryCleanupWorkCollector.class ); }
private <T> T getDependency( Class<T> clazz ) { return dbRule.getDependencyResolver().resolveDependency( clazz ); }
private GraphProperties graphProperties() { return db.getDependencyResolver().resolveDependency( EmbeddedProxySPI.class ).newGraphPropertiesProxy(); } }
private Transaction transaction() throws TransactionFailureException { DependencyResolver resolver = this.db.getDependencyResolver(); return resolver.resolveDependency( Kernel.class ).beginTransaction( implicit, LoginContext.AUTH_DISABLED ); }
@Test public void shouldBeAbleToUseResultsOfPointProcedureAsInputToDistanceFunction() throws Exception { // given procedure that produces a point Procedures procedures = graphDb.getDependencyResolver().resolveDependency( Procedures.class ); procedures.registerProcedure( PointProcs.class ); // when calling procedure that produces a point Result result = graphDb.execute( "CALL spatial.point(144.317718, -37.031738) YIELD point " + "RETURN distance(point({longitude: 144.317718, latitude: -37.031738}), point) AS dist" ); // then Double dist = (Double) result.next().get( "dist" ); assertThat( dist, equalTo( 0.0 ) ); }
@Before public void setup() { databaseLayout = db.databaseLayout(); fs = db.getDependencyResolver().resolveDependency( FileSystemAbstraction.class ); }
@Test public void shouldBeAbleToUseResultsOfPointGeometryProcedureAsInputToDistanceFunction() throws Exception { // given procedure that produces a point Procedures procedures = graphDb.getDependencyResolver().resolveDependency( Procedures.class ); procedures.registerProcedure( PointProcs.class ); // when calling procedure that produces a point Result result = graphDb.execute( "CALL spatial.pointGeometry(144.317718, -37.031738) YIELD geometry " + "RETURN distance(point({longitude: 144.317718, latitude: -37.031738}), geometry) AS dist" ); // then Object dist1 = result.next().get( "dist" ); Double dist = (Double) dist1; assertThat( dist, equalTo( 0.0 ) ); }
@Test public void shouldHandleNodeDetachDeleteConcurrentlyWithIndexDrop() throws Throwable { shouldHandleIndexDropConcurrentlyWithOperation( nodeId -> { ThreadToStatementContextBridge txBridge = db.getDependencyResolver().resolveDependency( ThreadToStatementContextBridge.class ); txBridge.getKernelTransactionBoundToThisThread( true ).dataWrite().nodeDetachDelete( nodeId ); } ); }
private void runTransaction( LoginContext loginContext, Map<String,Object> metaData ) { try ( Transaction transaction = db.beginTransaction( KernelTransaction.Type.explicit, loginContext ); Statement statement = db.getDependencyResolver().resolveDependency( ThreadToStatementContextBridge.class ).get() ) { statement.queryRegistration().setMetaData( metaData ); db.createNode(); transaction.success(); } }
Config config = db.getDependencyResolver().resolveDependency( Config.class ); double divergenceThreshold = config.get( GraphDatabaseSettings.query_statistics_divergence_threshold ); long replanInterval = config.get( GraphDatabaseSettings.cypher_min_replan_interval ).toMillis();
private Node createTestNode() { Node node; try ( Transaction transaction = dbRule.beginTx() ) { node = dbRule.createNode( LABEL); KernelTransaction ktx = dbRule.getDependencyResolver() .resolveDependency( ThreadToStatementContextBridge.class ) .getKernelTransactionBoundToThisThread( true ); labelId = ktx.tokenRead().nodeLabel( LABEL.name() ); transaction.success(); } return node; }
@Test public void shutdownWhileFinishingTransactionWithIndexUpdates() { createConstraint( database ); waitIndexesOnline( database ); try ( Transaction transaction = database.beginTx() ) { Node node = database.createNode( constraintIndexLabel ); node.setProperty( UNIQUE_PROPERTY_NAME, indexProvider.getAndIncrement() ); DependencyResolver dependencyResolver = database.getDependencyResolver(); NeoStoreDataSource dataSource = dependencyResolver.resolveDependency( NeoStoreDataSource.class ); LifeSupport dataSourceLife = dataSource.getLife(); TransactionCloseListener closeListener = new TransactionCloseListener( transaction ); dataSourceLife.addLifecycleListener( closeListener ); dataSource.stop(); assertTrue( "Transaction should be closed and no exception should be thrown.", closeListener.isTransactionClosed() ); } }
@Test public void shouldDropUniquenessConstraintWhereConstraintRecordIsMissing() throws Exception { // given try ( Transaction tx = db.beginTx() ) { db.schema().constraintFor( label ).assertPropertyIsUnique( key ).create(); tx.success(); } // when intentionally breaking the schema by setting the backing index rule to unused RecordStorageEngine storageEngine = db.getDependencyResolver().resolveDependency( RecordStorageEngine.class ); SchemaStore schemaStore = storageEngine.testAccessNeoStores().getSchemaStore(); SchemaRule indexRule = single( filter( rule -> rule instanceof ConstraintRule, schemaStore.loadAllSchemaRules() ) ); setSchemaRecordNotInUse( schemaStore, indexRule.getId() ); // At this point the SchemaCache doesn't know about this change so we have to reload it storageEngine.loadSchemaCache(); try ( Transaction tx = db.beginTx() ) { // We don't use single() here, because it is okay for the schema cache reload to clean up after us. db.schema().getConstraints( label ).forEach( ConstraintDefinition::drop ); db.schema().getIndexes( label ).forEach( IndexDefinition::drop ); tx.success(); } // then try ( Transaction ignore = db.beginTx() ) { assertFalse( db.schema().getConstraints().iterator().hasNext() ); assertFalse( db.schema().getIndexes().iterator().hasNext() ); } }
@Test public void shouldDropUniquenessConstraintWithBackingIndexHavingNoOwner() throws Exception { // given try ( Transaction tx = db.beginTx() ) { db.schema().constraintFor( label ).assertPropertyIsUnique( key ).create(); tx.success(); } // when intentionally breaking the schema by setting the backing index rule to unused RecordStorageEngine storageEngine = db.getDependencyResolver().resolveDependency( RecordStorageEngine.class ); SchemaStore schemaStore = storageEngine.testAccessNeoStores().getSchemaStore(); SchemaRule indexRule = single( filter( rule -> rule instanceof StoreIndexDescriptor, schemaStore.loadAllSchemaRules() ) ); setOwnerNull( schemaStore, (StoreIndexDescriptor) indexRule ); // At this point the SchemaCache doesn't know about this change so we have to reload it storageEngine.loadSchemaCache(); try ( Transaction tx = db.beginTx() ) { single( db.schema().getConstraints( label ).iterator() ).drop(); tx.success(); } // then try ( Transaction ignore = db.beginTx() ) { assertFalse( db.schema().getConstraints().iterator().hasNext() ); assertFalse( db.schema().getIndexes().iterator().hasNext() ); } }
@Test public void shouldDropUniquenessConstraintWithBackingIndexNotInUse() { // given try ( Transaction tx = db.beginTx() ) { db.schema().constraintFor( label ).assertPropertyIsUnique( key ).create(); tx.success(); } // when intentionally breaking the schema by setting the backing index rule to unused RecordStorageEngine storageEngine = db.getDependencyResolver().resolveDependency( RecordStorageEngine.class ); SchemaStore schemaStore = storageEngine.testAccessNeoStores().getSchemaStore(); SchemaRule indexRule = single( filter( rule -> rule instanceof StoreIndexDescriptor, schemaStore.loadAllSchemaRules() ) ); setSchemaRecordNotInUse( schemaStore, indexRule.getId() ); // At this point the SchemaCache doesn't know about this change so we have to reload it storageEngine.loadSchemaCache(); try ( Transaction tx = db.beginTx() ) { single( db.schema().getConstraints( label ).iterator() ).drop(); tx.success(); } // then try ( Transaction ignore = db.beginTx() ) { assertFalse( db.schema().getConstraints().iterator().hasNext() ); assertFalse( db.schema().getIndexes().iterator().hasNext() ); } }
RecordStorageEngine storageEngine = db.getDependencyResolver().resolveDependency( RecordStorageEngine.class ); SchemaStore schemaStore = storageEngine.testAccessNeoStores().getSchemaStore(); SchemaRule constraintRule = single( filter( rule -> rule instanceof ConstraintRule, schemaStore.loadAllSchemaRules() ) );
private void writeTransactionsAndRotateTwice() throws IOException { LogRotation logRotation = db.getDependencyResolver().resolveDependency( LogRotation.class ); // Apparently we always keep an extra log file what even though the threshold is reached... produce two then try ( Transaction tx = db.beginTx() ) { db.createNode(); tx.success(); } logRotation.rotateLogFile(); try ( Transaction tx = db.beginTx() ) { db.createNode(); tx.success(); } logRotation.rotateLogFile(); try ( Transaction tx = db.beginTx() ) { db.createNode(); tx.success(); } try ( Transaction tx = db.beginTx() ) { db.createNode(); tx.success(); } }
private void verifyThatThereAreExactlyOneIndexEntryPerNodeInTheIndexes( int i, Pair<long[],long[]> data ) throws Exception { Kernel kernel = db.getDependencyResolver().resolveDependency( Kernel.class ); try ( org.neo4j.internal.kernel.api.Transaction tx = kernel.beginTransaction( implicit, AnonymousContext.read() ) ) { int labelAId = tx.tokenRead().nodeLabel( labelA( i ).name() ); int keyAId = tx.tokenRead().propertyKey( keyA( i ) ); int labelBId = tx.tokenRead().nodeLabel( labelB( i ).name() ); int keyBId = tx.tokenRead().propertyKey( keyB( i ) ); IndexReference indexA = TestIndexDescriptorFactory.forLabel( labelAId, keyAId ); IndexReference indexB = TestIndexDescriptorFactory.forLabel( labelBId, keyBId ); for ( int j = 0; j < NODES_PER_INDEX; j++ ) { long nodeAId = data.first()[j]; assertEquals( 1, tx.schemaRead().nodesCountIndexed( indexA, nodeAId, keyAId, Values.of( nodeAId ) ) ); long nodeBId = data.other()[j]; assertEquals( 1, tx.schemaRead().nodesCountIndexed( indexB, nodeBId, keyBId, Values.of( nodeBId ) ) ); } } } }