@Override public String getNamespace() { return db.getNamespace(); }
.getIndexManager() .getResourceLocator() .locate(store.getNamespace(), store.getTimestamp()); store.getNamespace());
@Override public String getNamespace() { return db.getNamespace(); }
.getIndexManager() .getResourceLocator() .locate(store.getNamespace(), store.getTimestamp()); store.getNamespace());
/** * Core ctor. You must use this variant for a scale-out triple store. * <p> * To create a {@link BigdataSail} backed by an {@link IBigdataFederation} * use the {@link ScaleOutTripleStore} ctor and then * {@link AbstractTripleStore#create()} the triple store if it does not * exist. * <p> * Note: Since BLZG-2041, this delegates through to the core constructor * which accepts (namespace, IIndexManager). * * @param database * An existing {@link AbstractTripleStore}. * @param mainIndexManager * When <i>database</i> is a {@link TempTripleStore}, this is the * {@link IIndexManager} used to resolve the * {@link QueryEngine}. Otherwise it must be the same object as * the <i>database</i>. */ public BigdataSail(final AbstractTripleStore database, final IIndexManager mainIndexManager) { this(database.getNamespace(), database.getIndexManager(), mainIndexManager); }
.getIndexManager() .getResourceLocator() .locate(store.getNamespace(), store.getTimestamp()); store.getNamespace());
/** * Core ctor. You must use this variant for a scale-out triple store. * <p> * To create a {@link BigdataSail} backed by an {@link IBigdataFederation} * use the {@link ScaleOutTripleStore} ctor and then * {@link AbstractTripleStore#create()} the triple store if it does not * exist. * <p> * Note: Since BLZG-2041, this delegates through to the core constructor * which accepts (namespace, IIndexManager). * * @param database * An existing {@link AbstractTripleStore}. * @param mainIndexManager * When <i>database</i> is a {@link TempTripleStore}, this is the * {@link IIndexManager} used to resolve the * {@link QueryEngine}. Otherwise it must be the same object as * the <i>database</i>. */ public BigdataSail(final AbstractTripleStore database, final IIndexManager mainIndexManager) { this(database.getNamespace(), database.getIndexManager(), mainIndexManager); }
@Override public ICloseableIterator<BindingSet> call( final BindingSet[] bindingSets) throws Exception { if (log.isInfoEnabled()) { log.info(bindingSets.length); log.info(Arrays.toString(bindingSets)); log.info(serviceParams); } final AbstractTripleStore tripleStore = createParams .getTripleStore(); final Future<TupleQueryResult> ft = AbstractApiTask.submitApiTask( tripleStore.getIndexManager(), new StoredQueryTask(tripleStore.getNamespace(), tripleStore .getTimestamp(), bindingSets)); try { final TupleQueryResult tupleQueryResult = ft.get(); return new Sesame2BigdataIterator<BindingSet, QueryEvaluationException>( tupleQueryResult); } finally { ft.cancel(true/* mayInterruptIfRunning */); } }
/** * The {@link SPORelation} (triples and their access paths). */ final public SPORelation getSPORelation() { if (spoRelationRef.get() == null) { /* * Note: double-checked locking pattern (mostly non-blocking). Only * synchronized if not yet resolved. The AtomicReference is reused * as the monitor to serialize the resolution of the SPORelation in * order to have that operation not contend with any other part of * the API. */ synchronized (this) { if (spoRelationRef.get() == null) { spoRelationRef.set((SPORelation) getIndexManager() .getResourceLocator().locate( getNamespace() + "." + SPORelation.NAME_SPO_RELATION, getTimestamp())); } } } return spoRelationRef.get(); }
/** * The {@link SPORelation} (triples and their access paths). */ final public SPORelation getSPORelation() { if (spoRelationRef.get() == null) { /* * Note: double-checked locking pattern (mostly non-blocking). Only * synchronized if not yet resolved. The AtomicReference is reused * as the monitor to serialize the resolution of the SPORelation in * order to have that operation not contend with any other part of * the API. */ synchronized (this) { if (spoRelationRef.get() == null) { spoRelationRef.set((SPORelation) getIndexManager() .getResourceLocator().locate( getNamespace() + "." + SPORelation.NAME_SPO_RELATION, getTimestamp())); } } } return spoRelationRef.get(); }
@Override public ICloseableIterator<BindingSet> call( final BindingSet[] bindingSets) throws Exception { if (log.isInfoEnabled()) { log.info(bindingSets.length); log.info(Arrays.toString(bindingSets)); log.info(serviceParams); } final AbstractTripleStore tripleStore = createParams .getTripleStore(); final Future<TupleQueryResult> ft = AbstractApiTask.submitApiTask( tripleStore.getIndexManager(), new StoredQueryTask(tripleStore.getNamespace(), tripleStore .getTimestamp(), bindingSets)); try { final TupleQueryResult tupleQueryResult = ft.get(); return new Sesame2BigdataIterator<BindingSet, QueryEvaluationException>( tupleQueryResult); } finally { ft.cancel(true/* mayInterruptIfRunning */); } }
this.namespace = kb.getNamespace();
/** * Return the object used to access the as-configured graph. */ private IGraphAccessor newGraphAccessor(final AbstractTripleStore kb) { /* * Use a read-only view (sampling depends on access to the BTree rather * than the ReadCommittedIndex). */ final BigdataGraphAccessor graphAccessor = new BigdataGraphAccessor( kb.getIndexManager(), kb.getNamespace(), kb .getIndexManager().getLastCommitTime()); return graphAccessor; }
/** * Return the object used to access the as-configured graph. */ private IGraphAccessor newGraphAccessor(final AbstractTripleStore kb) { /* * Use a read-only view (sampling depends on access to the BTree rather * than the ReadCommittedIndex). */ final BigdataGraphAccessor graphAccessor = new BigdataGraphAccessor( kb.getIndexManager(), kb.getNamespace(), kb .getIndexManager().getLastCommitTime()); return graphAccessor; }
final String namespace = store.getNamespace();
/** * The {@link LexiconRelation} handles all things related to the indices * mapping RDF {@link Value}s onto internal 64-bit term identifiers. */ final synchronized public LexiconRelation getLexiconRelation() { if (lexiconRelation == null && lexicon) { long t = getTimestamp(); if (TimestampUtility.isReadWriteTx(t)) { /* * A read-write tx must use the unisolated view of the lexicon. */ t = ITx.UNISOLATED; } lexiconRelation = (LexiconRelation) getIndexManager() .getResourceLocator().locate( getNamespace() + "." + LexiconRelation.NAME_LEXICON_RELATION, t); } return lexiconRelation; } private LexiconRelation lexiconRelation;
/** * The {@link LexiconRelation} handles all things related to the indices * mapping RDF {@link Value}s onto internal 64-bit term identifiers. */ final synchronized public LexiconRelation getLexiconRelation() { if (lexiconRelation == null && lexicon) { long t = getTimestamp(); if (TimestampUtility.isReadWriteTx(t)) { /* * A read-write tx must use the unisolated view of the lexicon. */ t = ITx.UNISOLATED; } lexiconRelation = (LexiconRelation) getIndexManager() .getResourceLocator().locate( getNamespace() + "." + LexiconRelation.NAME_LEXICON_RELATION, t); } return lexiconRelation; } private LexiconRelation lexiconRelation;
/** * Register an {@link IChangeLog} listener that will manage the maintenance * of the describe cache. */ @Override public void startConnection(final BigdataSailConnection conn) { /** * TODO This really should not be using getCacheConnection() but rather * getExistingCacheConnection(). I need to figure out the pattern that * brings the cache connection into existence and who is responsible for * invoking it. The problem is that there are multiple entry points, * including AST evaluation, the DescribeServlet, and the test suite. * AST2BOpContext does this, but it is not always created before we need * the cache connection. */ final ICacheConnection cacheConn = CacheConnectionFactory .getCacheConnection(conn.getBigdataSail().getQueryEngine()); if (cacheConn == null) { // Cache is not enabled. return; } final AbstractTripleStore tripleStore = conn.getTripleStore(); final IDescribeCache describeCache = cacheConn.getDescribeCache( tripleStore.getNamespace(), tripleStore.getTimestamp()); if (describeCache == null) { // DESCRIBE cache is not enabled. return; } conn.addChangeLog(new DescribeCacheChangeLogListener(describeCache)); }
/** * Register an {@link IChangeLog} listener that will manage the maintenance * of the describe cache. */ @Override public void startConnection(final BigdataSailConnection conn) { /** * TODO This really should not be using getCacheConnection() but rather * getExistingCacheConnection(). I need to figure out the pattern that * brings the cache connection into existence and who is responsible for * invoking it. The problem is that there are multiple entry points, * including AST evaluation, the DescribeServlet, and the test suite. * AST2BOpContext does this, but it is not always created before we need * the cache connection. */ final ICacheConnection cacheConn = CacheConnectionFactory .getCacheConnection(conn.getBigdataSail().getQueryEngine()); if (cacheConn == null) { // Cache is not enabled. return; } final AbstractTripleStore tripleStore = conn.getTripleStore(); final IDescribeCache describeCache = cacheConn.getDescribeCache( tripleStore.getNamespace(), tripleStore.getTimestamp()); if (describeCache == null) { // DESCRIBE cache is not enabled. return; } conn.addChangeLog(new DescribeCacheChangeLogListener(describeCache)); }
final String namespace = store.getNamespace();