/** * Sets a {@link DataSource} to use for the RDB document and blob * stores, including {@link RDBOptions}. * * @return this */ public RDBDocumentNodeStoreBuilder setRDBConnection(DataSource ds, RDBOptions options) { this.documentStoreSupplier = ofInstance(new RDBDocumentStore(ds, this, options)); if(blobStore == null) { GarbageCollectableBlobStore s = new RDBBlobStore(ds, options); setGCBlobStore(s); } return thisBuilder(); }
@Nullable private <T extends Document> T readDocumentUncached(Collection<T> collection, String id, NodeDocument cachedDoc) { Connection connection = null; RDBTableMetaData tmd = getTable(collection); final Stopwatch watch = startWatch(); boolean docFound = true; try { long lastmodcount = -1, lastmodified = -1; if (cachedDoc != null) { lastmodcount = modcountOf(cachedDoc); lastmodified = modifiedOf(cachedDoc); return castAsT(cachedDoc); } else { return convertFromDBObject(collection, row);
@Override public <T extends Document> T find(Collection<T> collection, String id) { return find(collection, id, Integer.MAX_VALUE); }
private <T extends Document> void delete(Collection<T> collection, String id) { Connection connection = null; RDBTableMetaData tmd = getTable(collection); Stopwatch watch = startWatch(); try { connection = this.ch.getRWConnection(); db.delete(connection, tmd, Collections.singletonList(id)); connection.commit(); } catch (Exception ex) { this.ch.rollbackConnection(connection); throw handleException("removing " + id, ex, collection, id); } finally { this.ch.closeConnection(connection); stats.doneRemove(watch.elapsed(TimeUnit.NANOSECONDS), collection, 1); } }
private <T extends Document> T readDocumentCached(final Collection<T> collection, final String id, int maxCacheAge) { if (collection != Collection.NODES) { return readDocumentUncached(collection, id, null); } else { NodeDocument doc = null; if (maxCacheAge == Integer.MAX_VALUE || System.currentTimeMillis() - lastCheckTime < maxCacheAge) { stats.doneFindCached(Collection.NODES, id); return castAsT(unwrap(doc)); try (CacheLock lock = acquireLockFor(id)) { invalidateNodesCache(id, true); doc = null; NodeDocument ndoc = (NodeDocument) readDocumentUncached(collection, id, cachedDoc); if (ndoc != null) { ndoc.seal(); doc = wrap(ndoc); nodesCache.put(doc); return castAsT(unwrap(doc)); } catch (ExecutionException e) { throw new IllegalStateException("Failed to load document with " + id, e);
private <T extends Document> Map<String, T> readDocumentsUncached(Collection<T> collection, Set<String> keys) { Map<String, T> result = new HashMap<String, T>(); Connection connection = null; RDBTableMetaData tmd = getTable(collection); try { connection = this.ch.getROConnection(); List<RDBRow> rows = db.read(connection, tmd, keys); int size = rows.size(); for (int i = 0; i < size; i++) { RDBRow row = rows.set(i, null); T document = convertFromDBObject(collection, row); result.put(document.getId(), document); } connection.commit(); } catch (Exception ex) { throw asDocumentStoreException(ex, "trying to read: " + keys); } finally { this.ch.closeConnection(connection); } return result; }
@Test public void init0then1() { RDBOptions op = new RDBOptions().tablePrefix("T0T1").initialSchema(0).upgradeToSchema(0).dropTablesOnClose(true); RDBDocumentStore rdb0 = null; RDBDocumentStore rdb1 = null; try { rdb0 = new RDBDocumentStore(this.ds, new DocumentMK.Builder(), op); RDBTableMetaData meta0 = rdb0.getTable(Collection.NODES); assertFalse(meta0.hasVersion()); rdb1 = new RDBDocumentStore(this.ds, new DocumentMK.Builder(), new RDBOptions().tablePrefix("T0T1").initialSchema(0).upgradeToSchema(1)); RDBTableMetaData meta1 = rdb1.getTable(Collection.NODES); assertTrue(meta1.hasVersion()); UpdateOp testInsert = new UpdateOp(Utils.getIdFromPath("/foo"), true); assertTrue(rdb1.create(Collection.NODES, Collections.singletonList(testInsert))); } finally { if (rdb1 != null) { rdb1.dispose(); } if (rdb0 != null) { rdb0.dispose(); } } }
@Test public void autoFixOAK7855() { RDBOptions op = new RDBOptions().tablePrefix("OAK7855").dropTablesOnClose(true); RDBDocumentStore rdb = null; try { rdb = new RDBDocumentStore(this.ds, new DocumentMK.Builder(), op); RDBTableMetaData meta = rdb.getTable(Collection.NODES); assertEquals(op.getTablePrefix() + "_NODES", meta.getName()); assertTrue(meta.hasVersion()); String id = Utils.getIdFromPath("/foo"); UpdateOp testInsert = new UpdateOp(id, true); assertTrue(rdb.create(Collection.NODES, Collections.singletonList(testInsert))); UpdateOp testUpdate = new UpdateOp(id, false); // set the invalid split doc type introduced by OAK-7855 testUpdate.set(NodeDocument.SD_TYPE, 0); assertNotNull(rdb.findAndUpdate(Collection.NODES, testUpdate)); rdb.getNodeDocumentCache().invalidate(id); NodeDocument doc = rdb.find(Collection.NODES, id); assertNotNull(doc); assertEquals(SplitDocType.NONE, doc.getSplitDocType()); } finally { if (rdb != null) { rdb.dispose(); } } }
private <T extends Document> List<T> internalQuery(Collection<T> collection, String fromKey, String toKey, List<String> excludeKeyPatterns, List<QueryCondition> conditions, int limit) { Connection connection = null; RDBTableMetaData tmd = getTable(collection); for (QueryCondition cond : conditions) { if (!INDEXEDPROPERTIES.contains(cond.getPropertyName())) { final Stopwatch watch = startWatch(); int resultSize = 0; try (CacheChangesTracker tracker = obtainTracker(collection, fromKey, toKey)) { long now = System.currentTimeMillis(); connection = this.ch.getROConnection(); T doc = getIfCached(collection, row.getId(), row.getModcount()); if (doc == null) { doc = convertFromDBObject(collection, row); } else { long lastmodified = modifiedOf(doc); if (lastmodified == row.getModified() && lastmodified >= 1) { try (CacheLock lock = acquireLockFor(row.getId())) { if (!tracker.mightBeenAffected(row.getId())) { doc = convertFromDBObject(collection, row); nodesCache.putNonConflictingDocs(tracker, castAsNodeDocumentList(result)); } else { Map<String, ModificationStamp> invMap = Maps.newHashMap();
return null; } else { maintainUpdateStats(collection, update.getId()); addUpdateCounters(update); T doc = createNewDocument(collection, oldDoc, update); final Stopwatch watch = startWatch(); boolean success = false; int retries = maxRetries; try (CacheLock lock = acquireLockFor(update.getId())) { while (!success && retries > 0) { long lastmodcount = modcountOf(oldDoc); success = updateDocument(collection, doc, update, lastmodcount); if (!success) { retries -= 1; oldDoc = readDocumentCached(collection, update.getId(), Integer.MAX_VALUE); if (oldDoc != null) { long newmodcount = modcountOf(oldDoc); if (lastmodcount == newmodcount) { oldDoc = readDocumentUncached(collection, update.getId(), null); if (oldDoc == null) { LOG.debug("after refetch: {} is gone", update.getId()); } else { LOG.debug("after refetch: modcount for {} is {}", update.getId(), modcountOf(oldDoc)); addUpdateCounters(update); doc = createNewDocument(collection, oldDoc, update);
@Test public void initDefault() { RDBOptions op = new RDBOptions().tablePrefix("T00").initialSchema(0).upgradeToSchema(0).dropTablesOnClose(true); RDBDocumentStore rdb = null; try { rdb = new RDBDocumentStore(this.ds, new DocumentMK.Builder(), op); RDBTableMetaData meta = rdb.getTable(Collection.NODES); assertEquals(op.getTablePrefix() + "_NODES", meta.getName()); assertFalse(meta.hasVersion()); } finally { if (rdb != null) { rdb.dispose(); } } }
private <T extends Document> boolean updateDocument(@NotNull Collection<T> collection, @NotNull T document, @NotNull UpdateOp update, Long oldmodcount) { Connection connection = null; RDBTableMetaData tmd = getTable(collection); String data = null; try { if (isAppendableUpdate(update) && modcount % 16 != 0) { String appendData = ser.asString(update, tmd.getColumnOnlyProperties()); if (appendData.length() < tmd.getDataLimitInOctets() / CHAR2OCTETRATIO) { try { Operation modOperation = update.getChanges().get(MODIFIEDKEY); long modified = getModifiedFromOperation(modOperation); boolean modifiedIsConditional = modOperation == null || modOperation.type != UpdateOp.Operation.Type.SET; success = db.appendingUpdate(connection, tmd, document.getId(), modified, modifiedIsConditional, hasBinary, connection.commit(); } catch (SQLException ex) { continueIfStringOverflow(ex); this.ch.rollbackConnection(connection); success = false; String addDiags = ""; if (data != null && RDBJDBCTools.matchesSQLState(ex, "22", "72")) { byte[] bytes = asBytes(data); addDiags = String.format(" (DATA size in Java characters: %d, in octets: %d, computed character limit: %d)", data.length(), bytes.length, tmd.getDataLimitInOctets() / CHAR2OCTETRATIO); throw handleException(message, ex, collection, document.getId()); } finally {
@Nullable private <T extends Document> T internalCreateOrUpdate(Collection<T> collection, UpdateOp update, boolean allowCreate, boolean checkConditions, int retries) { T oldDoc = readDocumentCached(collection, update.getId(), Integer.MAX_VALUE); return null; addUpdateCounters(update); UpdateUtils.applyChanges(doc, update); try { Stopwatch watch = startWatch(); if (!insertDocuments(collection, Collections.singletonList(doc))) { throw new DocumentStoreException("Can't insert the document: " + doc.getId()); oldDoc = readDocumentUncached(collection, update.getId(), null); if (oldDoc == null) { throw (ex); return internalUpdate(collection, update, oldDoc, checkConditions, retries); T result = internalUpdate(collection, update, oldDoc, checkConditions, retries); if (allowCreate && result == null) { if (retries > 0) { result = internalCreateOrUpdate(collection, update, allowCreate, checkConditions, retries - 1);
protected <T extends Document> Iterable<T> queryAsIterable(final Collection<T> collection, String fromKey, String toKey, final List<String> excludeKeyPatterns, final List<QueryCondition> conditions, final int limit, final String sortBy) { final RDBTableMetaData tmd = getTable(collection); Set<String> allowedProps = Sets.intersection(INDEXEDPROPERTIES, tmd.getColumnProperties()); for (QueryCondition cond : conditions) {
oldDocs.putAll(readDocumentsUncached(collection, missingDocs)); try (CacheChangesTracker tracker = obtainTracker(collection, Sets.union(oldDocs.keySet(), missingDocs) )) { List<T> docsToUpdate = new ArrayList<T>(updates.size()); Set<String> keysToUpdate = new HashSet<String>(); RDBTableMetaData tmd = getTable(collection); try { connection = this.ch.getRWConnection(); } catch (SQLException ex) { this.ch.rollbackConnection(connection); throw handleException("update failed for: " + keysToUpdate, ex, collection, keysToUpdate); } finally { this.ch.closeConnection(connection);
List<T> results = new ArrayList<T>(updateOps.size()); for (UpdateOp update : updateOps) { results.add(createOrUpdate(collection, update)); final Stopwatch watch = startWatch(); Map<UpdateOp, T> results = new LinkedHashMap<UpdateOp, T>(); Map<String, UpdateOp> operationsToCover = new LinkedHashMap<String, UpdateOp>(); } else { UpdateOp clone = updateOp.copy(); addUpdateCounters(clone); operationsToCover.put(clone.getId(), clone); results.put(clone, null); oldDocs.putAll(readDocumentCached(collection, operationsToCover.keySet())); Map<UpdateOp, T> successfulUpdates = bulkUpdate(collection, partition, oldDocs, upsert); results.putAll(successfulUpdates); operationsToCover.values().removeAll(successfulUpdates.keySet()); LOG.debug("update conflict on {}, retrying...", updateOp.getId()); results.put(conflictedOp, createOrUpdate(collection, updateOp)); } else if (duplicates.contains(updateOp)) { results.put(updateOp, createOrUpdate(collection, updateOp));
obtainFlagsFromResultSetMeta(met, tmd); tmd.setSchemaInfo(tableInfo); Set<String> indexOn = new HashSet<String>(); String indexInfo = dumpIndexData(con.getMetaData(), met, tableName, indexOn); tmd.setIndexInfo(indexInfo); dbWasChanged |= upgradeTable(con, tableName, 1); dbWasChanged |= upgradeTable(con, tableName, 2); dbWasChanged |= addModifiedIndex(con, tableName); getTableMetaData(con, col, tmd); upgradeTable(con, tableName, 1); upgradeTable(con, tableName, 2); getTableMetaData(con, col, tmd);
@Nullable private <T extends Document> boolean internalCreate(Collection<T> collection, List<UpdateOp> updates) { final Stopwatch watch = startWatch(); List<String> ids = new ArrayList<String>(updates.size()); boolean success = true; for (UpdateOp update : chunks) { ids.add(update.getId()); maintainUpdateStats(collection, update.getId()); UpdateUtils.assertUnconditional(update); T doc = collection.newDocument(this); addUpdateCounters(update); UpdateUtils.applyChanges(doc, update); if (!update.getId().equals(doc.getId())) { boolean done = insertDocuments(collection, docs); if (done) { if (collection == Collection.NODES) {
private <T extends Document> boolean insertDocuments(Collection<T> collection, List<T> documents) { Connection connection = null; RDBTableMetaData tmd = getTable(collection); try { connection = this.ch.getRWConnection(); byte bytes[] = asBytes(data); if (bytes.length > longest) { longest = bytes.length; throw handleException(message, ex, collection, ids); } finally { this.ch.closeConnection(connection);
@Override public Iterator<NodeDocument> getIteratorOverDocsWithBinaries() { return this.documentStore .queryAsIterable(Collection.NODES, null, null, Collections.emptyList(), WITH_BINARIES, Integer.MAX_VALUE, null) .iterator(); } }