/** * Creates a filter that performs a logical AND of the provided list of filters. Note that this will only generate a "$and" * operator if absolutely necessary, as the query language implicity ands together all the keys. In other words, a query expression * like: * * <blockquote><pre> * and(eq("x", 1), lt("y", 3)) * </pre></blockquote> * * will generate a MongoDB query like: * * <blockquote><pre> * {x : 1, y : {$lt : 3}} * </pre></blockquote> * * @param filters the list of filters to and together * @return the filter * @mongodb.driver.manual reference/operator/query/and $and */ public static Bson and(final Bson... filters) { return and(asList(filters)); }
private boolean takeOverTransactionInMongoDB(TransactionXid transactionXid, String source, String target) { byte[] global = transactionXid.getGlobalTransactionId(); String instanceId = ByteUtils.byteArrayToString(global); try { String application = CommonUtils.getApplication(this.endpoint); String databaseName = application.replaceAll("\\W", "_"); MongoDatabase mdb = this.mongoClient.getDatabase(databaseName); MongoCollection<Document> collection = mdb.getCollection(CONSTANTS_TB_LOCKS); Bson globalFilter = Filters.eq(CONSTANTS_FD_GLOBAL, instanceId); Bson instIdFilter = Filters.eq("identifier", source); Document document = new Document("$set", new Document("identifier", target)); UpdateResult result = collection.updateOne(Filters.and(globalFilter, instIdFilter), document); return result.getMatchedCount() == 1; } catch (RuntimeException rex) { logger.error("Error occurred while locking transaction(gxid= {}).", instanceId, rex); return false; } }
private void markTransactionRollback(TransactionXid transactionXid) { try { byte[] global = transactionXid.getGlobalTransactionId(); String identifier = ByteUtils.byteArrayToString(global); String application = CommonUtils.getApplication(this.endpoint); String databaseName = application.replaceAll("\\W", "_"); MongoDatabase mdb = this.mongoClient.getDatabase(databaseName); MongoCollection<Document> collection = mdb.getCollection(CONSTANTS_TB_TRANSACTIONS); Document document = new Document(); document.append("$set", new Document("status", Status.STATUS_MARKED_ROLLBACK)); Bson globalFilter = Filters.eq(CONSTANTS_FD_GLOBAL, identifier); Bson statusFilter = Filters.eq("status", Status.STATUS_ACTIVE); collection.updateOne(Filters.and(globalFilter, statusFilter), document); } catch (RuntimeException error) { logger.error("Error occurred while setting the error flag.", error); } }
public void unlockTransactionInMongoDB(TransactionXid transactionXid, String identifier) { byte[] global = transactionXid.getGlobalTransactionId(); String instanceId = ByteUtils.byteArrayToString(global); try { String application = CommonUtils.getApplication(this.endpoint); String databaseName = application.replaceAll("\\W", "_"); MongoDatabase mdb = this.mongoClient.getDatabase(databaseName); MongoCollection<Document> collection = mdb.getCollection(CONSTANTS_TB_LOCKS); Bson globalFilter = Filters.eq(CONSTANTS_FD_GLOBAL, instanceId); Bson instIdFilter = Filters.eq("identifier", identifier); DeleteResult result = collection.deleteOne(Filters.and(globalFilter, instIdFilter)); if (result.getDeletedCount() == 0) { logger.warn("Error occurred while unlocking transaction(gxid= {}).", instanceId); } } catch (RuntimeException rex) { logger.error("Error occurred while unlocking transaction(gxid= {})!", instanceId, rex); } }
Bson branchFilter = Filters.eq(CONSTANTS_FD_BRANCH, ByteUtils.byteArrayToString(branch)); collection.deleteOne(Filters.and(globalFilter, branchFilter));
/** * Use the given primary to read the oplog. * * @param primary the connection to the replica set's primary node; may not be null */ protected void readOplog(MongoClient primary) { BsonTimestamp oplogStart = source.lastOffsetTimestamp(replicaSet.replicaSetName()); logger.info("Reading oplog for '{}' primary {} starting at {}", replicaSet, primary.getAddress(), oplogStart); // Include none of the cluster-internal operations and only those events since the previous timestamp ... MongoCollection<Document> oplog = primary.getDatabase("local").getCollection("oplog.rs"); Bson filter = Filters.and(Filters.gt("ts", oplogStart), // start just after our last position Filters.exists("fromMigrate", false)); // skip internal movements across shards FindIterable<Document> results = oplog.find(filter) .sort(new Document("$natural", 1)) // force forwards collection scan .oplogReplay(true) // tells Mongo to not rely on indexes .cursorType(CursorType.TailableAwait); // tail and await new data // Read as much of the oplog as we can ... ServerAddress primaryAddress = primary.getAddress(); try (MongoCursor<Document> cursor = results.iterator()) { while (running.get() && cursor.hasNext()) { if (!handleOplogEvent(primaryAddress, cursor.next())) { // Something happened, and we're supposed to stop reading return; } } } }
Bson errorFilter = Filters.eq("error", true); FindIterable<Document> transactionItr = transactions.find(Filters.and(globalFilter, errorFilter)); transactionCursor = transactionItr.iterator(); if (transactionCursor.hasNext() == false) {
Bson filter = Filters.and(Filters.gt("ts", oplogStart), // start just after our last position Filters.exists("fromMigrate", false)); // skip internal movements across shards FindIterable<Document> results = mongo.getDatabase("local")
private Bson addCommitPropertiesFilter(Bson query, Map<String, String> commitProperties) { List<Bson> propertyFilters = commitProperties.entrySet().stream().map( commitProperty -> new BasicDBObject(COMMIT_PROPERTIES, new BasicDBObject("$elemMatch", new BasicDBObject("key", commitProperty.getKey()).append( "value", commitProperty.getValue()))) ).collect(toImmutableList()); return Filters.and(query, Filters.and(propertyFilters.toArray(new Bson[]{}))); }
private static Bson createRemovalFilter(final CharSequence thingId, final String pointer) { final Bson pointerRegex = Filters.regex(PersistenceConstants.FIELD_ID, PersistenceConstants.REGEX_START_THING_ID + thingId + ":" + pointer + PersistenceConstants.REGEX_FIELD_END); return and(createThingIdFilter(thingId), pointerRegex); }
@Override public int countCompletedJobsToday() { Date today = Date.from(now().truncatedTo(DAYS)); return (int) collection .countDocuments( and( eq(DSL.STATUS, JobStatus.COMPLETED), gte(DSL.END_TIME, today))); }
private Bson createEntityTypeQuery(boolean aggregate, ManagedType managedType) { Bson entityTypeQuery = prefixQuery(GLOBAL_ID_KEY, managedType.getName() + "/"); if (!aggregate) { entityTypeQuery = Filters.and(entityTypeQuery, Filters.exists(GLOBAL_ID_ENTITY)); } return entityTypeQuery; }
private Bson createSnapshotIdentifiersQuery(Collection<SnapshotIdentifier> snapshotIdentifiers) { List<Bson> descFilters = snapshotIdentifiers.stream().map( snapshotIdentifier -> Filters.and( createIdQuery(snapshotIdentifier.getGlobalId()), createVersionQuery(snapshotIdentifier.getVersion()) )).collect(toImmutableList()); return Filters.or(descFilters); }
@Override public BsonFilterExpression visit(NotNullFilter filter, MongoResolutionContext<?> context) { // exists and it is not null return resolveFieldName(filter.getLeftOperand(), context) .map(fn -> Filters.and(Filters.exists(fn), Filters.not(Filters.type(fn, BsonType.NULL)))) .map(bson -> BsonFilterExpression.create(bson)).orElse(null); }
@Override public void logProjectAccess(ProjectId projectId, UserId userId, long timestamp) { collection.updateOne( and(eq(PROJECT_ID, projectId.getId()), eq(USER_ID, userId.getUserName())), new Document() .append("$inc", new Document("count", 1)) .append("$set", new Document(ACCESSED, new Date(timestamp))), new UpdateOptions().upsert(true) ); } }
@Override public TaskExecution merge(TaskExecution aTaskExecution) { collection .updateOne( and(eq("_id", aTaskExecution.getJobId()), eq("execution._id", aTaskExecution.getId())), set(format("{0}.$", DSL.EXECUTION), aTaskExecution) ); return aTaskExecution; }
private static Bson createInitialMatchStageWithNonDeleted(final Criteria filterCriteria, final Criteria aclCriteria, final Criteria globalPolicyGrantsCriteria) { final Bson authorization = or(CreateBsonVisitor.apply(globalPolicyGrantsCriteria), CreateBsonVisitor.apply(aclCriteria)); return match(and(authorization, filterNotDeleted(), CreateBsonVisitor.apply(filterCriteria))); }
private void deleteExistingContent(String fieldName, Object documentId, GridFSBucket gridFSFilesBucket) { GridFSFindIterable results = gridFSFilesBucket.find( Filters.and( Filters.eq( "filename", fileName( fieldName, documentId ) ) ) ); try ( MongoCursor<GridFSFile> iterator = results.iterator() ) { while ( iterator.hasNext() ) { GridFSFile next = iterator.next(); gridFSFilesBucket.delete( next.getId() ); } } }
@Override public boolean isRecoveryNeeded() { Bson query = Filters.and( Filters.eq(ClusterNodeInfo.STATE, ClusterNodeInfo.ClusterNodeState.ACTIVE.name()), Filters.or( Filters.lt(ClusterNodeInfo.LEASE_END_KEY, clock.getTime()), Filters.eq(ClusterNodeInfo.REV_RECOVERY_LOCK, ClusterNodeInfo.RecoverLockState.ACQUIRED.name()) ) ); return getClusterNodeCollection().find(query).iterator().hasNext(); }