private MongoCollectionResult aggregate(MongoQueryOptions mongoQueryOptions, MongoCollectionResult mongoCollectionResult, com.mongodb.client.MongoCollection<Document> collection) { AggregateIterable aggregate = collection.aggregate(mongoQueryOptions.getOperations()); int index = 0; Iterator iterator = aggregate.iterator(); while (iterator.hasNext() && index < mongoQueryOptions.getResultLimit()) { mongoCollectionResult.add((Document) iterator.next()); } return mongoCollectionResult; }
.collation(options.getCollation().map(Collation::toMongoCollation).orElse(null)) // .allowDiskUse(options.isAllowDiskUse()); aggregateIterable = aggregateIterable.batchSize(options.getCursorBatchSize()); MongoIterable<O> iterable = aggregateIterable.map(val -> {
protected <O> CloseableIterator<O> aggregateStream(Aggregation aggregation, String collectionName, Class<O> outputType, @Nullable AggregationOperationContext context) { Assert.hasText(collectionName, "Collection name must not be null or empty!"); Assert.notNull(aggregation, "Aggregation pipeline must not be null!"); Assert.notNull(outputType, "Output type must not be null!"); Assert.isTrue(!aggregation.getOptions().isExplain(), "Can't use explain option with streaming!"); AggregationUtil aggregationUtil = new AggregationUtil(queryMapper, mappingContext); AggregationOperationContext rootContext = aggregationUtil.prepareAggregationContext(aggregation, context); AggregationOptions options = aggregation.getOptions(); List<Document> pipeline = aggregationUtil.createPipeline(aggregation, rootContext); if (LOGGER.isDebugEnabled()) { LOGGER.debug("Streaming aggregation: {} in collection {}", serializeToJsonSafely(pipeline), collectionName); } ReadDocumentCallback<O> readCallback = new ReadDocumentCallback<>(mongoConverter, outputType, collectionName); return execute(collectionName, (CollectionCallback<CloseableIterator<O>>) collection -> { AggregateIterable<Document> cursor = collection.aggregate(pipeline, Document.class) // .allowDiskUse(options.isAllowDiskUse()) // .useCursor(true); if (options.getCursorBatchSize() != null) { cursor = cursor.batchSize(options.getCursorBatchSize()); } if (options.getCollation().isPresent()) { cursor = cursor.collation(options.getCollation().map(Collation::toMongoCollation).get()); } return new CloseableIterableCursorAdapter<>(cursor, exceptionTranslator, readCallback); }); }
aggregationResult.batchSize(batchSize); aggregationResult.allowDiskUse(allowDiskUse); try { result = new ArrayList<>(); aggregationResult.iterator().forEachRemaining(((List<BasicDBObject>) result)::add); exchange.getOut().setHeader(MongoDbConstants.RESULT_PAGE_SIZE, ((List<BasicDBObject>) result).size()); } finally { aggregationResult.iterator().close();
/** * Get the MongoDB cursor. */ private MongoCursor<Document> getCursor(int skip) { if (cursor == null && cursorId == 0) { Document query = Document.parse(config.getMongo().getQuery()); List<Bson> pipes = new ArrayList<>(3); pipes.add(match(query)); pipes.add(skip(skip)); Optional.ofNullable(config.getMongo().getProject()).ifPresent(p -> pipes.add(project(Document.parse(p)))); AggregateIterable<Document> aggregate = collection.aggregate(pipes) .allowDiskUse(true) .useCursor(true); cursor = aggregate.iterator(); // TODO: Persist cursor ID somewhere to allow restarts. Optional.ofNullable(cursor.getServerCursor()).ifPresent(serverCursor -> cursorId = serverCursor.getId()); } else if (cursor == null && cursorId != 0) { // TODO: Lookup cursor ID for resume. // Open existing cursor in case of restart?? } return cursor; } }
public <T> List<T> aggregate(String collectionName, List<Bson> query, boolean allowDiskUse, final Class<T> clazz) { final List list = new ArrayList(); Block<Document> block = new Block<Document>() { public void apply(Document document) { document = iding(document); list.add(parseObject(document, clazz)); } }; getCollection(collectionName).aggregate(query).allowDiskUse(allowDiskUse).forEach(block); return list; }
.allowDiskUse(true) .batchSize(PIPELINE_BATCH_SIZE) .forEach(new Block<Document>() { @Override public void apply(Document doc) {
aggregate.allowDiskUse(Boolean.valueOf(System.getProperty(D_AGGREGATION_ALLOW_DISK_USE))); aggregate.batchSize(Integer.valueOf(System.getProperty(D_AGGREGATION_BATCH_SIZE)));
@Override public TaskExecution findOne(String aId) { return collection .aggregate(ImmutableList.of( unwind('$' + DSL.EXECUTION), match(eq(format("{0}._id", DSL.EXECUTION), aId)), replaceRoot('$' + DSL.EXECUTION) )) .first(); }
context .getAggreationVars())) .maxTime(Bootstrapper.getConfiguration() .getAggregationTimeLimit(), TimeUnit.MILLISECONDS) .allowDiskUse(pipeline .getAllowDiskUse().getValue()); } catch (MongoCommandException
public List<String> findAllJobIdsDistinct() { return collection() .aggregate(Arrays.asList( new Document("$sort", new Document("started", -1)), new Document("$group", new HashMap<String, Object>() {{ put("_id", "$type"); put("latestJobId", new Document("$first", "$_id")); }}))) .maxTime(mongoProperties.getDefaultReadTimeout(), TimeUnit.MILLISECONDS) .map(doc -> doc.getString("latestJobId")) .into(new ArrayList<>()).stream() .filter(Objects::nonNull) .collect(toList()); }
@Override public AggregateOperationConfigurator batchSize(int batchSize) { iterable.batchSize(batchSize); return this; }
@Override public AggregateOperationConfigurator maxTime(long maxTime, TimeUnit timeUnit) { iterable.maxTime(maxTime, timeUnit); return this; }
"count", new Document("$sum", 1) ))) )).forEach((Consumer<Document>) d -> results.put( new UserBrowser() .withUser(d.getString("user"))
/** * Constructor. * @param aggIter Iterator of documents in AggregationPipelineQueryNode's * intermediate solution representation. * @param varToOriginalName A mapping from field names in the pipeline * result documents to equivalent variable names in the original query. * Where an entry does not exist for a field, the field name and variable * name are assumed to be the same. * @param bindings A partial solution. May be empty. */ public PipelineResultIteration(AggregateIterable<Document> aggIter, Map<String, String> varToOriginalName, BindingSet bindings) { this.varToOriginalName = Preconditions.checkNotNull(varToOriginalName); this.bindings = Preconditions.checkNotNull(bindings); Preconditions.checkNotNull(aggIter); aggIter.batchSize(BATCH_SIZE); this.cursor = aggIter.iterator(); }
protected <O> CloseableIterator<O> aggregateStream(Aggregation aggregation, String collectionName, Class<O> outputType, @Nullable AggregationOperationContext context) { Assert.hasText(collectionName, "Collection name must not be null or empty!"); Assert.notNull(aggregation, "Aggregation pipeline must not be null!"); Assert.notNull(outputType, "Output type must not be null!"); Assert.isTrue(!aggregation.getOptions().isExplain(), "Can't use explain option with streaming!"); AggregationUtil aggregationUtil = new AggregationUtil(queryMapper, mappingContext); AggregationOperationContext rootContext = aggregationUtil.prepareAggregationContext(aggregation, context); AggregationOptions options = aggregation.getOptions(); List<Document> pipeline = aggregationUtil.createPipeline(aggregation, rootContext); if (LOGGER.isDebugEnabled()) { LOGGER.debug("Streaming aggregation: {} in collection {}", serializeToJsonSafely(pipeline), collectionName); } ReadDocumentCallback<O> readCallback = new ReadDocumentCallback<>(mongoConverter, outputType, collectionName); return execute(collectionName, (CollectionCallback<CloseableIterator<O>>) collection -> { AggregateIterable<Document> cursor = collection.aggregate(pipeline, Document.class) // .allowDiskUse(options.isAllowDiskUse()) // .useCursor(true); if (options.getCursorBatchSize() != null) { cursor = cursor.batchSize(options.getCursorBatchSize()); } if (options.getCollation().isPresent()) { cursor = cursor.collation(options.getCollation().map(Collation::toMongoCollation).get()); } return new CloseableIterableCursorAdapter<>(cursor, exceptionTranslator, readCallback); }); }
public List<JSONObject> aggregate(String collectionName, List<Bson> query, boolean allowDiskUse) { final List<JSONObject> list = new ArrayList<JSONObject>(); Block<Document> block = new Block<Document>() { public void apply(Document document) { document = iding(document); list.add(parseObject(document.toJson())); } }; getCollection(collectionName).aggregate(query).allowDiskUse(allowDiskUse).forEach(block); return list; }
@Test public void testAggregateWithUnknownGroupOperator() throws Exception { Document query = json("_id: null, n: {$foo: 1}"); List<Document> pipeline = Collections.singletonList(new Document("$group", query)); assertThatExceptionOfType(MongoCommandException.class) .isThrownBy(() -> collection.aggregate(pipeline).first()) .withMessageContaining("Command failed with error 15952 (Location15952): 'unknown group operator '$foo''"); }
public List<String> findAllJobIdsDistinct() { return collection() .aggregate(Arrays.asList( new Document("$sort", new Document("started", -1)), new Document("$group", new HashMap<String, Object>() {{ put("_id", "$type"); put("latestJobId", new Document("$first", "$_id")); }}))) .maxTime(mongoProperties.getDefaultReadTimeout(), TimeUnit.MILLISECONDS) .map(doc -> doc.getString("latestJobId")) .into(new ArrayList<>()).stream() .filter(Objects::nonNull) .collect(toList()); }