public void putErrorTransaction(TransactionXid transactionXid, Transaction transaction) { try { TransactionArchive archive = (TransactionArchive) transaction.getTransactionArchive(); byte[] global = transactionXid.getGlobalTransactionId(); String identifier = ByteUtils.byteArrayToString(global); int status = archive.getCompensableStatus(); String databaseName = CommonUtils.getApplication(this.endpoint).replaceAll("\\W", "_"); MongoDatabase mdb = this.mongoClient.getDatabase(databaseName); MongoCollection<Document> collection = mdb.getCollection(CONSTANTS_TB_TRANSACTIONS); Document target = new Document(); target.append("modified", this.endpoint); target.append("status", status); target.append("error", true); target.append("recovered_at", archive.getRecoveredAt() == 0 ? null : new Date(archive.getRecoveredAt())); target.append("recovered_times", archive.getRecoveredTimes()); Document document = new Document(); document.append("$set", target); // document.append("$inc", new BasicDBObject("modified_time", 1)); UpdateResult result = collection.updateOne(Filters.eq(CONSTANTS_FD_GLOBAL, identifier), document); if (result.getMatchedCount() != 1) { throw new IllegalStateException( String.format("Error occurred while updating transaction(matched= %s, modified= %s).", result.getMatchedCount(), result.getModifiedCount())); } } catch (RuntimeException error) { logger.error("Error occurred while setting the error flag.", error); } }
private void markTransactionRollback(TransactionXid transactionXid) { try { byte[] global = transactionXid.getGlobalTransactionId(); String identifier = ByteUtils.byteArrayToString(global); String application = CommonUtils.getApplication(this.endpoint); String databaseName = application.replaceAll("\\W", "_"); MongoDatabase mdb = this.mongoClient.getDatabase(databaseName); MongoCollection<Document> collection = mdb.getCollection(CONSTANTS_TB_TRANSACTIONS); Document document = new Document(); document.append("$set", new Document("status", Status.STATUS_MARKED_ROLLBACK)); Bson globalFilter = Filters.eq(CONSTANTS_FD_GLOBAL, identifier); Bson statusFilter = Filters.eq("status", Status.STATUS_ACTIVE); collection.updateOne(Filters.and(globalFilter, statusFilter), document); } catch (RuntimeException error) { logger.error("Error occurred while setting the error flag.", error); } }
@Override public void upgrade() { final FindIterable<Document> documentsWithMissingFields = collection.find(or(not(exists(ContentPack.FIELD_META_ID)), not(exists(ContentPack.FIELD_META_REVISION)))); for (Document document : documentsWithMissingFields) { final ObjectId objectId = document.getObjectId("_id"); LOG.debug("Found document with missing \"id\" or \"rev\" field with ID <{}>", objectId); final String id = document.get("id", objectId.toHexString()); final int rev = document.get("rev", 0); document.put("id", id); document.put("rev", rev); final UpdateResult updateResult = collection.replaceOne(eq("_id", objectId), document); if (updateResult.wasAcknowledged()) { LOG.debug("Successfully updated document with ID <{}>", objectId); } else { LOG.error("Failed to update document with ID <{}>", objectId); } } } }
private void removeConfigPath() { final FindIterable<Document> documentsWithConfigPath = collection.find(exists("configuration_path")); for (Document document : documentsWithConfigPath) { final ObjectId objectId = document.getObjectId("_id"); document.remove("configuration_path"); final UpdateResult updateResult = collection.replaceOne(eq("_id", objectId), document); if (updateResult.wasAcknowledged()) { LOG.debug("Successfully updated document with ID <{}>", objectId); } else { LOG.error("Failed to update document with ID <{}>", objectId); } } }
MongoCollection<Document> movies = collection; InsertOneOptions insertOptions = new InsertOneOptions().bypassDocumentValidation(true); movies.insertOne(Document.parse("{ \"name\":\"Starter Wars\"}"), insertOptions); assertThat(collection.countDocuments()).isEqualTo(1); Bson filter = Filters.eq("name", "Starter Wars"); FindIterable<Document> movieResults = collection.find(filter); try (MongoCursor<Document> cursor = movieResults.iterator();) { assertThat(cursor.tryNext().getString("name")).isEqualTo("Starter Wars"); assertThat(cursor.tryNext()).isNull(); Bson filter = Filters.and(Filters.gt("ts", oplogStart), // start just after our last position Filters.exists("fromMigrate", false)); // skip internal movements across shards FindIterable<Document> results = mongo.getDatabase("local") .getCollection("oplog.rs") .find(filter) .sort(new Document("$natural", 1)) .oplogReplay(true) // tells Mongo to not rely on indexes .noCursorTimeout(true) // don't timeout waiting for events .cursorType(CursorType.TailableAwait);
final ImmutableSet.Builder<String> modifiedAlertConditions = ImmutableSet.builder(); for (Document document : collection.find().sort(ascending(FIELD_CREATED_AT))) { final String streamId = document.getObjectId(FIELD_ID).toHexString(); if (!document.containsKey(FIELD_ALERT_CONDITIONS)) { continue; final List<Document> alertConditions = (List<Document>) document.get(FIELD_ALERT_CONDITIONS); final UpdateResult result = collection.updateOne(eq(FIELD_ALERT_CONDITIONS_ID, alertConditionId), set(ALERT_CONDITIONS_PARAMETERS_PREFIX + field, intValue)); if (result.getMatchedCount() > 0) { modifiedStreams.add(streamId); modifiedAlertConditions.add(alertConditionId);
MongoCollection<Document> contacts = db.getCollection("contacts"); InsertOneOptions insertOptions = new InsertOneOptions().bypassDocumentValidation(true); contacts.insertOne(Document.parse("{ \"name\":\"Jon Snow\"}"), insertOptions); assertThat(db.getCollection("contacts").countDocuments()).isEqualTo(1); Bson filter = Filters.eq("name", "Jon Snow"); FindIterable<Document> movieResults = db.getCollection("contacts").find(filter); try (MongoCursor<Document> cursor = movieResults.iterator();) { assertThat(cursor.tryNext().getString("name")).isEqualTo("Jon Snow"); assertThat(cursor.tryNext()).isNull(); MongoCollection<Document> contacts = db.getCollection("contacts"); InsertOneOptions insertOptions = new InsertOneOptions().bypassDocumentValidation(true); contacts.insertOne(Document.parse("{ \"name\":\"Sally Hamm\"}"), insertOptions); assertThat(db.getCollection("contacts").countDocuments()).isEqualTo(2); Bson filter = Filters.eq("name", "Jon Snow"); FindIterable<Document> movieResults = db.getCollection("contacts").find(filter); try (MongoCursor<Document> cursor = movieResults.iterator();) { Document doc = cursor.tryNext(); assertThat(doc.getString("name")).isEqualTo("Jon Snow"); contacts.deleteOne(Filters.eq("name", "Jon Snow")); Testing.debug("Removed the Jon Snow document from 'dbA.contacts' collection"); });
@Nonnull @Override public Optional<SingleFeatureBean> containerOf(Id id) { checkNotNull(id, "id"); final String ownerId = idConverter.convert(id); final Bson filter = and(eq(ModelDocument.F_ID, ownerId), exists(ModelDocument.F_CONTAINER)); final Bson projection = include(ModelDocument.F_CONTAINER); final ModelDocument instance = documents.find(filter).projection(projection).first(); return Optional.ofNullable(instance) .map(ModelDocument::getContainer) .map(ContainerDocument::toBean); }
/** * Use the given primary to read the oplog. * * @param primary the connection to the replica set's primary node; may not be null */ protected void readOplog(MongoClient primary) { BsonTimestamp oplogStart = source.lastOffsetTimestamp(replicaSet.replicaSetName()); logger.info("Reading oplog for '{}' primary {} starting at {}", replicaSet, primary.getAddress(), oplogStart); // Include none of the cluster-internal operations and only those events since the previous timestamp ... MongoCollection<Document> oplog = primary.getDatabase("local").getCollection("oplog.rs"); Bson filter = Filters.and(Filters.gt("ts", oplogStart), // start just after our last position Filters.exists("fromMigrate", false)); // skip internal movements across shards FindIterable<Document> results = oplog.find(filter) .sort(new Document("$natural", 1)) // force forwards collection scan .oplogReplay(true) // tells Mongo to not rely on indexes .cursorType(CursorType.TailableAwait); // tail and await new data // Read as much of the oplog as we can ... ServerAddress primaryAddress = primary.getAddress(); try (MongoCursor<Document> cursor = results.iterator()) { while (running.get() && cursor.hasNext()) { if (!handleOplogEvent(primaryAddress, cursor.next())) { // Something happened, and we're supposed to stop reading return; } } } }
MongoCursor<Document> cursor = null; try { cursor = collection.find().limit(batchSize).iterator(); for (; cursor.hasNext(); length++) { Document document = cursor.next(); String globalValue = document.getString(CONSTANTS_FD_GLOBAL); String branchValue = document.getString(CONSTANTS_FD_BRANCH); byte[] global = ByteUtils.stringToByteArray(globalValue); byte[] branch = ByteUtils.stringToByteArray(branchValue); TransactionXid branchXid = xidFactory.createBranchXid(globalXid, branch); String resourceId = document.getString("resource_id"); if (StringUtils.isBlank(resourceId)) { continue; byte[] branch = transactionXid.getBranchQualifier(); Bson globalFilter = Filters.eq(CONSTANTS_FD_GLOBAL, ByteUtils.byteArrayToString(global)); Bson branchFilter = Filters.eq(CONSTANTS_FD_BRANCH, ByteUtils.byteArrayToString(branch)); collection.deleteOne(Filters.and(globalFilter, branchFilter));
@Override public void upgrade() { if (clusterConfigService.get(MigrationCompleted.class) != null) { LOG.debug("Migration already done."); return; } // Do not overwrite an existing default index config boolean defaultDone = clusterConfigService.get(DefaultIndexSetConfig.class) != null; final ImmutableSet.Builder<String> builder = ImmutableSet.builder(); final FindIterable<Document> documents = collection.find(exists(FIELD_DEFAULT)).sort(ascending(FIELD_CREATION_DATE)); for (final Document document : documents) { final ObjectId id = document.getObjectId(FIELD_ID); final String idString = id.toHexString(); final boolean isDefault = firstNonNull(document.getBoolean(FIELD_DEFAULT), false); if (!defaultDone && isDefault) { defaultDone = true; clusterConfigService.write(DefaultIndexSetConfig.create(idString)); } final long modifiedCount = collection.updateOne(eq(FIELD_ID, id), unset(FIELD_DEFAULT)).getMatchedCount(); if (modifiedCount > 0) { LOG.info("Removed <default> field from index set <{}> ({})", document.getString(FIELD_TITLE), idString); builder.add(idString); } else { LOG.error("Couldn't remove <default> field from index set <{}> ({})", document.getString(FIELD_TITLE), idString); } } clusterConfigService.write(MigrationCompleted.create(builder.build())); }
@Override public void upgrade() { boolean indexExists = false; for (Document document : collection.listIndexes()) { if (MongoDbGrokPatternService.INDEX_NAME.equals(document.getString("name")) && document.getBoolean("unique")) { indexExists = true; break; for (Document document : collection.find()) { final ObjectId id = document.getObjectId("_id"); final String name = document.getString("name"); final String pattern = document.getString("pattern"); collection.deleteOne(eq("_id", id));
public void recover(TransactionRecoveryCallback callback) { MongoCursor<Document> transactionCursor = null; try { String application = CommonUtils.getApplication(this.endpoint); String databaseName = application.replaceAll("\\W", "_"); MongoDatabase mdb = this.mongoClient.getDatabase(databaseName); MongoCollection<Document> transactions = mdb.getCollection(CONSTANTS_TB_TRANSACTIONS); FindIterable<Document> transactionItr = transactions.find(Filters.eq("coordinator", true)); for (transactionCursor = transactionItr.iterator(); transactionCursor.hasNext();) { Document document = transactionCursor.next(); boolean error = document.getBoolean("error"); String targetApplication = document.getString("system"); long expectVersion = document.getLong("version"); long actualVersion = this.versionManager.getInstanceVersion(targetApplication); if (error == false && actualVersion > 0 && actualVersion <= expectVersion) { continue; // ignore } callback.recover(this.reconstructTransactionArchive(document)); } } catch (RuntimeException error) { logger.error("Error occurred while recovering transaction.", error); } catch (Exception error) { logger.error("Error occurred while recovering transaction.", error); } finally { IOUtils.closeQuietly(transactionCursor); } }
@Test public void testUpsertWithoutId() { UpdateResult result = collection.updateOne(eq("a", 1), set("a", 2), new UpdateOptions().upsert(true)); assertThat(result.getModifiedCount()).isEqualTo(0); assertThat(result.getUpsertedId()).isNotNull(); assertThat(collection.find().first().get("_id")).isInstanceOf(ObjectId.class); assertThat(collection.find().first().get("a")).isEqualTo(2); }
@Override public Suite getSuite(DBKey dbKey, String name, String version) throws StorageException { MongoCollection<Document> metadata = getMetadataCollection(dbKey); LOGGER.debug("Fetching suite with name: {}, version: {}", name, version); final FindIterable<Document> found = metadata .find(Filters.and(Filters.eq(SUITE_PARAM_NAME, name), Filters.eq(SUITE_VERSION_PARAM_NAME, Integer.parseInt(version)))); final Document result = found.first(); return new DocumentConverter(result).toSuite(); }
public Object doInCollection(MongoCollection<Document> collection) throws MongoException, DataAccessException { if (dbId != null) { collection.replaceOne(Filters.eq("_id", dbId.get("_id")), dbDoc); } else { if (dbDoc.containsKey("_id") && dbDoc.get("_id") == null) { dbDoc.remove("_id"); } collection.insertOne(dbDoc); } return null; } });
/** * {@inheritDoc} */ @Override public void releaseClaim(String processorName, int segment) { UpdateResult updateResult = mongoTemplate.trackingTokensCollection() .updateOne(and( eq("processorName", processorName), eq("segment", segment), eq("owner", nodeId) ), set("owner", null)); if (updateResult.getMatchedCount() == 0) { logger.warn("Releasing claim of token {}/{} failed. It was owned by another node.", processorName, segment); } }
@Override public boolean isRecoveryNeeded() { Bson query = Filters.and( Filters.eq(ClusterNodeInfo.STATE, ClusterNodeInfo.ClusterNodeState.ACTIVE.name()), Filters.or( Filters.lt(ClusterNodeInfo.LEASE_END_KEY, clock.getTime()), Filters.eq(ClusterNodeInfo.REV_RECOVERY_LOCK, ClusterNodeInfo.RecoverLockState.ACQUIRED.name()) ) ); return getClusterNodeCollection().find(query).iterator().hasNext(); }
@Override public List<? extends DomainEventData<?>> findDomainEvents(MongoCollection<Document> collection, String aggregateIdentifier, long firstSequenceNumber, int batchSize) { FindIterable<Document> cursor = collection .find(and(eq(eventConfiguration.aggregateIdentifierProperty(), aggregateIdentifier), gte(eventConfiguration.sequenceNumberProperty(), firstSequenceNumber))) .sort(new BasicDBObject(eventConfiguration().sequenceNumberProperty(), ORDER_ASC)); cursor = cursor.batchSize(batchSize); return stream(cursor.spliterator(), false).flatMap(this::extractEvents) .filter(event -> event.getSequenceNumber() >= firstSequenceNumber).collect(Collectors.toList()); }