CompositeAggregator(String name, AggregatorFactories factories, SearchContext context, Aggregator parent, List<PipelineAggregator> pipelineAggregators, Map<String, Object> metaData, int size, CompositeValuesSourceConfig[] sourceConfigs, CompositeKey rawAfterKey) throws IOException { super(name, factories, context, parent, pipelineAggregators, metaData); this.size = size; this.sourceNames = Arrays.stream(sourceConfigs).map(CompositeValuesSourceConfig::name).collect(Collectors.toList()); this.reverseMuls = Arrays.stream(sourceConfigs).mapToInt(CompositeValuesSourceConfig::reverseMul).toArray(); this.formats = Arrays.stream(sourceConfigs).map(CompositeValuesSourceConfig::format).collect(Collectors.toList()); this.sources = new SingleDimensionValuesSource[sourceConfigs.length]; for (int i = 0; i < sourceConfigs.length; i++) { this.sources[i] = createValuesSource(context.bigArrays(), context.searcher().getIndexReader(), context.query(), sourceConfigs[i], size, i); } this.queue = new CompositeValuesCollectorQueue(context.bigArrays(), sources, size, rawAfterKey); this.sortedDocsProducer = sources[0].createSortedDocsProducerOrNull(context.searcher().getIndexReader(), context.query()); }
private FilterableTermsEnum getTermsEnum(String field) throws IOException { if (termsEnum != null) { return termsEnum; } IndexReader reader = context.searcher().getIndexReader(); if (numberOfAggregatorsCreated > 1) { termsEnum = new FreqTermsEnum(reader, field, true, false, filter, context.bigArrays()); } else { termsEnum = new FilterableTermsEnum(reader, indexedFieldName, PostingsEnum.NONE, filter); } return termsEnum; }
private FilterableTermsEnum getTermsEnum(String field) throws IOException { if (termsEnum != null) { return termsEnum; } IndexReader reader = context.searcher().getIndexReader(); if (numberOfAggregatorsCreated > 1) { termsEnum = new FreqTermsEnum(reader, field, true, false, filter, context.bigArrays()); } else { termsEnum = new FilterableTermsEnum(reader, indexedFieldName, PostingsEnum.NONE, filter); } return termsEnum; }
@Override public SignificantStringTerms buildEmptyAggregation() { // We need to account for the significance of a miss in our global stats - provide corpus size as context ContextIndexSearcher searcher = context.searcher(); IndexReader topReader = searcher.getIndexReader(); int supersetSize = topReader.numDocs(); return new SignificantStringTerms(name, bucketCountThresholds.getRequiredSize(), bucketCountThresholds.getMinDocCount(), pipelineAggregators(), metaData(), format, numCollectedDocs, supersetSize, significanceHeuristic, emptyList()); }
@Override public SignificantStringTerms buildEmptyAggregation() { // We need to account for the significance of a miss in our global stats - provide corpus size as context ContextIndexSearcher searcher = context.searcher(); IndexReader topReader = searcher.getIndexReader(); int supersetSize = topReader.numDocs(); return new SignificantStringTerms(name, bucketCountThresholds.getRequiredSize(), bucketCountThresholds.getMinDocCount(), pipelineAggregators(), metaData(), format, 0, supersetSize, significanceHeuristic, emptyList()); }
@Override public SignificantStringTerms buildEmptyAggregation() { // We need to account for the significance of a miss in our global stats - provide corpus size as context ContextIndexSearcher searcher = context.searcher(); IndexReader topReader = searcher.getIndexReader(); int supersetSize = topReader.numDocs(); return new SignificantStringTerms(name, bucketCountThresholds.getRequiredSize(), bucketCountThresholds.getMinDocCount(), pipelineAggregators(), metaData(), format, 0, supersetSize, significanceHeuristic, emptyList()); }
@Override public SignificantLongTerms buildEmptyAggregation() { // We need to account for the significance of a miss in our global stats - provide corpus size as context ContextIndexSearcher searcher = context.searcher(); IndexReader topReader = searcher.getIndexReader(); int supersetSize = topReader.numDocs(); return new SignificantLongTerms(name, bucketCountThresholds.getRequiredSize(), bucketCountThresholds.getMinDocCount(), pipelineAggregators(), metaData(), format, 0, supersetSize, significanceHeuristic, emptyList()); }
public GlobalOrdinalsStringTermsAggregator(String name, AggregatorFactories factories, ValuesSource.Bytes.WithOrdinals valuesSource, BucketOrder order, DocValueFormat format, BucketCountThresholds bucketCountThresholds, IncludeExclude.OrdinalsFilter includeExclude, SearchContext context, Aggregator parent, boolean remapGlobalOrds, SubAggCollectionMode collectionMode, boolean showTermDocCountError, List<PipelineAggregator> pipelineAggregators, Map<String, Object> metaData) throws IOException { super(name, factories, context, parent, order, format, bucketCountThresholds, collectionMode, showTermDocCountError, pipelineAggregators, metaData); this.valuesSource = valuesSource; this.includeExclude = includeExclude; final IndexReader reader = context.searcher().getIndexReader(); final SortedSetDocValues values = reader.leaves().size() > 0 ? valuesSource.globalOrdinalsValues(context.searcher().getIndexReader().leaves().get(0)) : DocValues.emptySortedSet(); this.valueCount = values.getValueCount(); this.lookupGlobalOrd = values::lookupOrd; this.acceptedGlobalOrdinals = includeExclude != null ? includeExclude.acceptedGlobalOrdinals(values) : null; this.bucketOrds = remapGlobalOrds ? new LongHash(1, context.bigArrays()) : null; }
SearchScript[] leafScripts = null; List<ScriptFieldsContext.ScriptField> scriptFields = context.scriptFields().fields(); final IndexReader reader = context.searcher().getIndexReader(); for (SearchHit hit : hits) { int readerId = ReaderUtil.subIndex(hit.docId(), reader.leaves());
@Override public void hitsExecute(SearchContext context, SearchHit[] hits) throws IOException { if (context.version() == false || (context.storedFieldsContext() != null && context.storedFieldsContext().fetchFields() == false)) { return; } hits = hits.clone(); // don't modify the incoming hits Arrays.sort(hits, Comparator.comparingInt(SearchHit::docId)); int lastReaderId = -1; NumericDocValues versions = null; for (SearchHit hit : hits) { int readerId = ReaderUtil.subIndex(hit.docId(), context.searcher().getIndexReader().leaves()); LeafReaderContext subReaderContext = context.searcher().getIndexReader().leaves().get(readerId); if (lastReaderId != readerId) { versions = subReaderContext.reader().getNumericDocValues(VersionFieldMapper.NAME); lastReaderId = readerId; } int docId = hit.docId() - subReaderContext.docBase; long version = Versions.NOT_FOUND; if (versions != null && versions.advanceExact(docId)) { version = versions.longValue(); } hit.version(version < 0 ? -1 : version); } } }
continue; int readerId = ReaderUtil.subIndex(hit.docId(), context.searcher().getIndexReader().leaves()); LeafReaderContext subReaderContext = context.searcher().getIndexReader().leaves().get(readerId); if (lastReaderId != readerId) { docValuesMap.clear();
Weight weight = context.searcher().createNormalizedWeight(query, false); Bits matchingDocs = null; final IndexReader indexReader = context.searcher().getIndexReader(); for (int i = 0; i < hits.length; ++i) { SearchHit hit = hits[i];
final double ratio = maxOrd / ((double) context.searcher().getIndexReader().numDocs());
int readerIndex = ReaderUtil.subIndex(hit.docId(), context.searcher().getIndexReader().leaves()); subReaderContext = context.searcher().getIndexReader().leaves().get(readerIndex); data = indexFieldData.load(subReaderContext); if (format == null) {
topN = Math.min(topN, subSearchContext.searcher().getIndexReader().maxDoc()); if (sort == null) { topDocsCollector = TopScoreDocCollector.create(topN);
final int readerIndex = ReaderUtil.subIndex(parentDocId, searcher().getIndexReader().leaves()); LeafReaderContext ctx = searcher().getIndexReader().leaves().get(readerIndex); result[i] = new TopDocs(totalHitCountCollector.getTotalHits(), Lucene.EMPTY_SCORE_DOCS, 0); } else { int topN = Math.min(from() + size(), context.searcher().getIndexReader().maxDoc()); TopDocsCollector<?> topDocsCollector; if (sort() != null) {
int readerIndex = ReaderUtil.subIndex(docId, context.searcher().getIndexReader().leaves()); LeafReaderContext subReaderContext = context.searcher().getIndexReader().leaves().get(readerIndex); int subDocId = docId - subReaderContext.docBase;
assert context.searcher().getIndexReader().getReaderCacheHelper() != null;
DefaultSearchContext(long id, ShardSearchRequest request, SearchShardTarget shardTarget, Engine.Searcher engineSearcher, ClusterService clusterService, IndexService indexService, IndexShard indexShard, BigArrays bigArrays, Counter timeEstimateCounter, TimeValue timeout, FetchPhase fetchPhase, String clusterAlias, Version minNodeVersion) { this.id = id; this.request = request; this.fetchPhase = fetchPhase; this.searchType = request.searchType(); this.shardTarget = shardTarget; this.engineSearcher = engineSearcher; // SearchContexts use a BigArrays that can circuit break this.bigArrays = bigArrays.withCircuitBreaking(); this.dfsResult = new DfsSearchResult(id, shardTarget); this.queryResult = new QuerySearchResult(id, shardTarget); this.fetchResult = new FetchSearchResult(id, shardTarget); this.indexShard = indexShard; this.indexService = indexService; this.clusterService = clusterService; this.searcher = new ContextIndexSearcher(engineSearcher, indexService.cache().query(), indexShard.getQueryCachingPolicy()); this.timeEstimateCounter = timeEstimateCounter; this.timeout = timeout; this.minNodeVersion = minNodeVersion; queryShardContext = indexService.newQueryShardContext(request.shardId().id(), searcher.getIndexReader(), request::nowInMillis, clusterAlias); queryShardContext.setTypes(request.types()); queryBoost = request.indexBoost(); }
.maxDoc(context.searcher().getIndexReader().maxDoc()); } catch (Exception e) { throw new DfsPhaseExecutionException(context, "Exception during dfs phase", e);