InternalBucket reduce(List<InternalBucket> buckets, ReduceContext reduceContext) { List<InternalAggregations> aggregations = new ArrayList<>(buckets.size()); long docCount = 0; for (InternalBucket bucket : buckets) { docCount += bucket.docCount; aggregations.add(bucket.aggregations); } InternalAggregations aggs = InternalAggregations.reduce(aggregations, reduceContext); return new InternalBucket(sourceNames, formats, key, reverseMuls, docCount, aggs); }
public B reduce(List<B> buckets, ReduceContext context) { long subsetDf = 0; long supersetDf = 0; List<InternalAggregations> aggregationsList = new ArrayList<>(buckets.size()); for (B bucket : buckets) { subsetDf += bucket.subsetDf; supersetDf += bucket.supersetDf; aggregationsList.add(bucket.aggregations); } InternalAggregations aggs = InternalAggregations.reduce(aggregationsList, context); return newBucket(subsetDf, subsetSize, supersetDf, supersetSize, aggs); }
public Bucket reduce(List<? extends Bucket> buckets, ReduceContext context) { List<InternalAggregations> aggregationsList = new ArrayList<>(buckets.size()); long docCount = 0; for (Bucket bucket : buckets) { docCount += bucket.docCount; aggregationsList.add(bucket.aggregations); } final InternalAggregations aggs = InternalAggregations.reduce(aggregationsList, context); return new Bucket(geohashAsLong, docCount, aggs); }
InternalBucket reduce(List<InternalBucket> buckets, ReduceContext context) { InternalBucket reduced = null; List<InternalAggregations> aggregationsList = new ArrayList<>(buckets.size()); for (InternalBucket bucket : buckets) { if (reduced == null) { reduced = new InternalBucket(bucket.key, bucket.docCount, bucket.aggregations, bucket.keyed); } else { reduced.docCount += bucket.docCount; } aggregationsList.add(bucket.aggregations); } reduced.aggregations = InternalAggregations.reduce(aggregationsList, context); return reduced; }
InternalBucket reduce(List<InternalBucket> buckets, ReduceContext context) { InternalBucket reduced = null; List<InternalAggregations> aggregationsList = new ArrayList<>(buckets.size()); for (InternalBucket bucket : buckets) { if (reduced == null) { reduced = new InternalBucket(bucket.key, bucket.docCount, bucket.aggregations); } else { reduced.docCount += bucket.docCount; } aggregationsList.add(bucket.aggregations); } reduced.aggregations = InternalAggregations.reduce(aggregationsList, context); return reduced; }
Bucket reduce(List<Bucket> buckets, ReduceContext context) { List<InternalAggregations> aggregations = new ArrayList<>(buckets.size()); long docCount = 0; for (Bucket bucket : buckets) { docCount += bucket.docCount; aggregations.add((InternalAggregations) bucket.getAggregations()); } InternalAggregations aggs = InternalAggregations.reduce(aggregations, context); return new InternalHistogram.Bucket(key, docCount, keyed, format, aggs); }
Bucket reduce(List<Bucket> ranges, ReduceContext context) { long docCount = 0; List<InternalAggregations> aggregationsList = new ArrayList<>(ranges.size()); for (Bucket range : ranges) { docCount += range.docCount; aggregationsList.add(range.aggregations); } final InternalAggregations aggs = InternalAggregations.reduce(aggregationsList, context); return getFactory().createBucket(key, from, to, docCount, aggs, keyed, format); }
Bucket reduce(List<Bucket> buckets, ReduceContext context) { List<InternalAggregations> aggregations = new ArrayList<>(buckets.size()); long docCount = 0; for (Bucket bucket : buckets) { docCount += bucket.docCount; aggregations.add((InternalAggregations) bucket.getAggregations()); } InternalAggregations aggs = InternalAggregations.reduce(aggregations, context); return new InternalDateHistogram.Bucket(key, docCount, keyed, format, aggs); }
public B reduce(List<B> buckets, ReduceContext context) { long docCount = 0; // For the per term doc count error we add up the errors from the // shards that did not respond with the term. To do this we add up // the errors from the shards that did respond with the terms and // subtract that from the sum of the error from all shards long docCountError = 0; List<InternalAggregations> aggregationsList = new ArrayList<>(buckets.size()); for (B bucket : buckets) { docCount += bucket.docCount; if (docCountError != -1) { if (bucket.docCountError == -1) { docCountError = -1; } else { docCountError += bucket.docCountError; } } aggregationsList.add(bucket.aggregations); } InternalAggregations aggs = InternalAggregations.reduce(aggregationsList, context); return newBucket(docCount, aggs, docCountError); }
Bucket reduce(List<Bucket> buckets, Rounding rounding, ReduceContext context) { List<InternalAggregations> aggregations = new ArrayList<>(buckets.size()); long docCount = 0; for (Bucket bucket : buckets) { docCount += bucket.docCount; aggregations.add((InternalAggregations) bucket.getAggregations()); } InternalAggregations aggs = InternalAggregations.reduce(aggregations, context); return new InternalAutoDateHistogram.Bucket(rounding.round(key), docCount, format, aggs); }
@Override public InternalAggregation doReduce(List<InternalAggregation> aggregations, ReduceContext reduceContext) { long docCount = 0L; List<InternalAggregations> subAggregationsList = new ArrayList<>(aggregations.size()); for (InternalAggregation aggregation : aggregations) { assert aggregation.getName().equals(getName()); docCount += ((InternalSingleBucketAggregation) aggregation).docCount; subAggregationsList.add(((InternalSingleBucketAggregation) aggregation).aggregations); } final InternalAggregations aggs = InternalAggregations.reduce(subAggregationsList, reduceContext); return newAggregation(getName(), docCount, aggs); }
@Override public InternalAggregation doReduce(List<InternalAggregation> aggregations, ReduceContext reduceContext) { reduceContext.consumeBucketsAndMaybeBreak(buckets.size()); long[] docCounts = new long[buckets.size()]; InternalAggregations[][] aggs = new InternalAggregations[buckets.size()][]; for (int i = 0; i < aggs.length; ++i) { aggs[i] = new InternalAggregations[aggregations.size()]; } for (int i = 0; i < aggregations.size(); ++i) { InternalBinaryRange range = (InternalBinaryRange) aggregations.get(i); if (range.buckets.size() != buckets.size()) { throw new IllegalStateException("Expected [" + buckets.size() + "] buckets, but got [" + range.buckets.size() + "]"); } for (int j = 0; j < buckets.size(); ++j) { Bucket bucket = range.buckets.get(j); docCounts[j] += bucket.docCount; aggs[j][i] = bucket.aggregations; } } List<Bucket> buckets = new ArrayList<>(this.buckets.size()); for (int i = 0; i < this.buckets.size(); ++i) { Bucket b = this.buckets.get(i); buckets.add(new Bucket(format, keyed, b.key, b.from, b.to, docCounts[i], InternalAggregations.reduce(Arrays.asList(aggs[i]), reduceContext))); } return new InternalBinaryRange(name, format, keyed, buckets, pipelineAggregators(), metaData); }
private static InternalAggregations reduceAggs(List<InternalAggregations> aggregationsList, List<SiblingPipelineAggregator> pipelineAggregators, ReduceContext reduceContext) { InternalAggregations aggregations = InternalAggregations.reduce(aggregationsList, reduceContext); if (pipelineAggregators != null) { List<InternalAggregation> newAggs = StreamSupport.stream(aggregations.spliterator(), false) .map((p) -> (InternalAggregation) p) .collect(Collectors.toList()); for (SiblingPipelineAggregator pipelineAggregator : pipelineAggregators) { InternalAggregation newAgg = pipelineAggregator.doReduce(new InternalAggregations(newAggs), reduceContext); newAggs.add(newAgg); } return new InternalAggregations(newAggs); } return aggregations; }
InternalAggregations reducedEmptySubAggs = InternalAggregations.reduce(Collections.singletonList(bucketInfo.emptySubAggregations), reduceContext);
InternalAggregations reducedEmptySubAggs = InternalAggregations.reduce( Collections.singletonList(emptyBucketInfo.subAggregations), reduceContext);
InternalAggregations reducedEmptySubAggs = InternalAggregations.reduce(Collections.singletonList(emptyBucketInfo.subAggregations), reduceContext); if (bounds != null) {
public B reduce(List<B> buckets, ReduceContext context) { long subsetDf = 0; long supersetDf = 0; List<InternalAggregations> aggregationsList = new ArrayList<>(buckets.size()); for (B bucket : buckets) { subsetDf += bucket.subsetDf; supersetDf += bucket.supersetDf; aggregationsList.add(bucket.aggregations); } InternalAggregations aggs = InternalAggregations.reduce(aggregationsList, context); return newBucket(subsetDf, subsetSize, supersetDf, supersetSize, aggs); }
InternalBucket reduce(List<InternalBucket> buckets, ReduceContext reduceContext) { List<InternalAggregations> aggregations = new ArrayList<>(buckets.size()); long docCount = 0; for (InternalBucket bucket : buckets) { docCount += bucket.docCount; aggregations.add(bucket.aggregations); } InternalAggregations aggs = InternalAggregations.reduce(aggregations, reduceContext); return new InternalBucket(sourceNames, formats, key, reverseMuls, docCount, aggs); }
Bucket reduce(List<Bucket> buckets, ReduceContext context) { List<InternalAggregations> aggregations = new ArrayList<>(buckets.size()); long docCount = 0; for (Bucket bucket : buckets) { docCount += bucket.docCount; aggregations.add((InternalAggregations) bucket.getAggregations()); } InternalAggregations aggs = InternalAggregations.reduce(aggregations, context); return new InternalHistogram.Bucket(key, docCount, keyed, format, aggs); }
Bucket reduce(List<Bucket> ranges, ReduceContext context) { long docCount = 0; List<InternalAggregations> aggregationsList = new ArrayList<>(ranges.size()); for (Bucket range : ranges) { docCount += range.docCount; aggregationsList.add(range.aggregations); } final InternalAggregations aggs = InternalAggregations.reduce(aggregationsList, context); return getFactory().createBucket(key, from, to, docCount, aggs, keyed, format); }