@Override public ContextIndexSearcher searcher() { return in.searcher(); }
CompositeAggregator(String name, AggregatorFactories factories, SearchContext context, Aggregator parent, List<PipelineAggregator> pipelineAggregators, Map<String, Object> metaData, int size, CompositeValuesSourceConfig[] sourceConfigs, CompositeKey rawAfterKey) throws IOException { super(name, factories, context, parent, pipelineAggregators, metaData); this.size = size; this.sourceNames = Arrays.stream(sourceConfigs).map(CompositeValuesSourceConfig::name).collect(Collectors.toList()); this.reverseMuls = Arrays.stream(sourceConfigs).mapToInt(CompositeValuesSourceConfig::reverseMul).toArray(); this.formats = Arrays.stream(sourceConfigs).map(CompositeValuesSourceConfig::format).collect(Collectors.toList()); this.sources = new SingleDimensionValuesSource[sourceConfigs.length]; for (int i = 0; i < sourceConfigs.length; i++) { this.sources[i] = createValuesSource(context.bigArrays(), context.searcher().getIndexReader(), context.query(), sourceConfigs[i], size, i); } this.queue = new CompositeValuesCollectorQueue(context.bigArrays(), sources, size, rawAfterKey); this.sortedDocsProducer = sources[0].createSortedDocsProducerOrNull(context.searcher().getIndexReader(), context.query()); }
public AdjacencyMatrixAggregatorFactory(String name, List<KeyedFilter> filters, String separator, SearchContext context, AggregatorFactory<?> parent, AggregatorFactories.Builder subFactories, Map<String, Object> metaData) throws IOException { super(name, context, parent, subFactories, metaData); IndexSearcher contextSearcher = context.searcher(); this.separator = separator; weights = new Weight[filters.size()]; keys = new String[filters.size()]; for (int i = 0; i < filters.size(); ++i) { KeyedFilter keyedFilter = filters.get(i); this.keys[i] = keyedFilter.key(); Query filter = keyedFilter.filter().toFilter(context.getQueryShardContext()); this.weights[i] = contextSearcher.createNormalizedWeight(filter, false); } }
/** * Returns the {@link Weight} for this filter aggregation, creating it if * necessary. This is done lazily so that the {@link Weight} is only created * if the aggregation collects documents reducing the overhead of the * aggregation in the case where no documents are collected. * * Note that as aggregations are initialsed and executed in a serial manner, * no concurrency considerations are necessary here. */ public Weight getWeight() { if (weight == null) { IndexSearcher contextSearcher = context.searcher(); try { weight = contextSearcher.createNormalizedWeight(filter, false); } catch (IOException e) { throw new AggregationInitializationException("Failed to initialse filter", e); } } return weight; }
private FilterableTermsEnum getTermsEnum(String field) throws IOException { if (termsEnum != null) { return termsEnum; } IndexReader reader = context.searcher().getIndexReader(); if (numberOfAggregatorsCreated > 1) { termsEnum = new FreqTermsEnum(reader, field, true, false, filter, context.bigArrays()); } else { termsEnum = new FilterableTermsEnum(reader, indexedFieldName, PostingsEnum.NONE, filter); } return termsEnum; }
private FilterableTermsEnum getTermsEnum(String field) throws IOException { if (termsEnum != null) { return termsEnum; } IndexReader reader = context.searcher().getIndexReader(); if (numberOfAggregatorsCreated > 1) { termsEnum = new FreqTermsEnum(reader, field, true, false, filter, context.bigArrays()); } else { termsEnum = new FilterableTermsEnum(reader, indexedFieldName, PostingsEnum.NONE, filter); } return termsEnum; }
@Override public void execute(SearchContext context) { final SuggestionSearchContext suggest = context.suggest(); if (suggest == null) { return; } try { CharsRefBuilder spare = new CharsRefBuilder(); final List<Suggestion<? extends Entry<? extends Option>>> suggestions = new ArrayList<>(suggest.suggestions().size()); for (Map.Entry<String, SuggestionSearchContext.SuggestionContext> entry : suggest.suggestions().entrySet()) { SuggestionSearchContext.SuggestionContext suggestion = entry.getValue(); Suggester<SuggestionContext> suggester = suggestion.getSuggester(); Suggestion<? extends Entry<? extends Option>> result = suggester.execute(entry.getKey(), suggestion, context.searcher(), spare); if (result != null) { assert entry.getKey().equals(result.name); suggestions.add(result); } } context.queryResult().suggest(new Suggest(suggestions)); } catch (IOException e) { throw new ElasticsearchException("I/O exception during suggest phase", e); } } }
/** * Returns the {@link Weight}s for this filter aggregation, creating it if * necessary. This is done lazily so that the {@link Weight}s are only * created if the aggregation collects documents reducing the overhead of * the aggregation in the case where no documents are collected. * * Note that as aggregations are initialsed and executed in a serial manner, * no concurrency considerations are necessary here. */ public Weight[] getWeights() { if (weights == null) { try { IndexSearcher contextSearcher = context.searcher(); weights = new Weight[filters.length]; for (int i = 0; i < filters.length; ++i) { this.weights[i] = contextSearcher.createNormalizedWeight(filters[i], false); } } catch (IOException e) { throw new AggregationInitializationException("Failed to initialse filters for aggregation [" + name() + "]", e); } } return weights; }
@Override public SignificantStringTerms buildEmptyAggregation() { // We need to account for the significance of a miss in our global stats - provide corpus size as context ContextIndexSearcher searcher = context.searcher(); IndexReader topReader = searcher.getIndexReader(); int supersetSize = topReader.numDocs(); return new SignificantStringTerms(name, bucketCountThresholds.getRequiredSize(), bucketCountThresholds.getMinDocCount(), pipelineAggregators(), metaData(), format, 0, supersetSize, significanceHeuristic, emptyList()); }
@Override public SignificantStringTerms buildEmptyAggregation() { // We need to account for the significance of a miss in our global stats - provide corpus size as context ContextIndexSearcher searcher = context.searcher(); IndexReader topReader = searcher.getIndexReader(); int supersetSize = topReader.numDocs(); return new SignificantStringTerms(name, bucketCountThresholds.getRequiredSize(), bucketCountThresholds.getMinDocCount(), pipelineAggregators(), metaData(), format, 0, supersetSize, significanceHeuristic, emptyList()); }
@Override public SignificantLongTerms buildEmptyAggregation() { // We need to account for the significance of a miss in our global stats - provide corpus size as context ContextIndexSearcher searcher = context.searcher(); IndexReader topReader = searcher.getIndexReader(); int supersetSize = topReader.numDocs(); return new SignificantLongTerms(name, bucketCountThresholds.getRequiredSize(), bucketCountThresholds.getMinDocCount(), pipelineAggregators(), metaData(), format, 0, supersetSize, significanceHeuristic, emptyList()); }
@Override public SignificantStringTerms buildEmptyAggregation() { // We need to account for the significance of a miss in our global stats - provide corpus size as context ContextIndexSearcher searcher = context.searcher(); IndexReader topReader = searcher.getIndexReader(); int supersetSize = topReader.numDocs(); return new SignificantStringTerms(name, bucketCountThresholds.getRequiredSize(), bucketCountThresholds.getMinDocCount(), pipelineAggregators(), metaData(), format, numCollectedDocs, supersetSize, significanceHeuristic, emptyList()); }
public GlobalOrdinalsStringTermsAggregator(String name, AggregatorFactories factories, ValuesSource.Bytes.WithOrdinals valuesSource, BucketOrder order, DocValueFormat format, BucketCountThresholds bucketCountThresholds, IncludeExclude.OrdinalsFilter includeExclude, SearchContext context, Aggregator parent, boolean remapGlobalOrds, SubAggCollectionMode collectionMode, boolean showTermDocCountError, List<PipelineAggregator> pipelineAggregators, Map<String, Object> metaData) throws IOException { super(name, factories, context, parent, order, format, bucketCountThresholds, collectionMode, showTermDocCountError, pipelineAggregators, metaData); this.valuesSource = valuesSource; this.includeExclude = includeExclude; final IndexReader reader = context.searcher().getIndexReader(); final SortedSetDocValues values = reader.leaves().size() > 0 ? valuesSource.globalOrdinalsValues(context.searcher().getIndexReader().leaves().get(0)) : DocValues.emptySortedSet(); this.valueCount = values.getValueCount(); this.lookupGlobalOrd = values::lookupOrd; this.acceptedGlobalOrdinals = includeExclude != null ? includeExclude.acceptedGlobalOrdinals(values) : null; this.bucketOrds = remapGlobalOrds ? new LongHash(1, context.bigArrays()) : null; }
protected Weight createInnerHitQueryWeight() throws IOException { final boolean needsScores = size() != 0 && (sort() == null || sort().sort.needsScores()); return context.searcher().createNormalizedWeight(query(), needsScores); }
@Override public void hitsExecute(SearchContext context, SearchHit[] hits) throws IOException { if (context.version() == false || (context.storedFieldsContext() != null && context.storedFieldsContext().fetchFields() == false)) { return; } hits = hits.clone(); // don't modify the incoming hits Arrays.sort(hits, Comparator.comparingInt(SearchHit::docId)); int lastReaderId = -1; NumericDocValues versions = null; for (SearchHit hit : hits) { int readerId = ReaderUtil.subIndex(hit.docId(), context.searcher().getIndexReader().leaves()); LeafReaderContext subReaderContext = context.searcher().getIndexReader().leaves().get(readerId); if (lastReaderId != readerId) { versions = subReaderContext.reader().getNumericDocValues(VersionFieldMapper.NAME); lastReaderId = readerId; } int docId = hit.docId() - subReaderContext.docBase; long version = Versions.NOT_FOUND; if (versions != null && versions.advanceExact(docId)) { version = versions.longValue(); } hit.version(version < 0 ? -1 : version); } } }
@Override public void hitExecute(SearchContext context, HitContext hitContext) { if (context.explain() == false) { return; } try { final int topLevelDocId = hitContext.hit().docId(); Explanation explanation = context.searcher().explain(context.query(), topLevelDocId); for (RescoreContext rescore : context.rescore()) { explanation = rescore.rescorer().explain(topLevelDocId, context.searcher(), rescore, explanation); } // we use the top level doc id, since we work with the top level searcher hitContext.hit().explanation(explanation); } catch (IOException e) { throw new FetchPhaseExecutionException(context, "Failed to explain doc [" + hitContext.hit().getType() + "#" + hitContext.hit().getId() + "]", e); } finally { context.clearReleasables(SearchContext.Lifetime.COLLECTION); } } }
public SignificantTextAggregatorFactory(String name, IncludeExclude includeExclude, QueryBuilder filterBuilder, TermsAggregator.BucketCountThresholds bucketCountThresholds, SignificanceHeuristic significanceHeuristic, SearchContext context, AggregatorFactory<?> parent, AggregatorFactories.Builder subFactoriesBuilder, String fieldName, String [] sourceFieldNames, boolean filterDuplicateText, Map<String, Object> metaData) throws IOException { super(name, context, parent, subFactoriesBuilder, metaData); // Note that if the field is unmapped (its field type is null), we don't fail, // and just use the given field name as a placeholder. this.fieldType = context.getQueryShardContext().fieldMapper(fieldName); this.indexedFieldName = fieldType != null ? fieldType.name() : fieldName; this.sourceFieldNames = sourceFieldNames == null ? new String[] { indexedFieldName } : sourceFieldNames; this.includeExclude = includeExclude; this.filter = filterBuilder == null ? null : filterBuilder.toQuery(context.getQueryShardContext()); this.filterDuplicateText = filterDuplicateText; IndexSearcher searcher = context.searcher(); // Important - need to use the doc count that includes deleted docs // or we have this issue: https://github.com/elastic/elasticsearch/issues/7951 this.supersetNumDocs = filter == null ? searcher.getIndexReader().maxDoc() : searcher.count(filter); this.bucketCountThresholds = bucketCountThresholds; this.significanceHeuristic = significanceHeuristic; }
@Override public void execute(SearchContext context) { try { TopDocs topDocs = context.queryResult().topDocs(); for (RescoreContext ctx : context.rescore()) { topDocs = ctx.rescorer().rescore(topDocs, context.searcher(), ctx); // It is the responsibility of the rescorer to sort the resulted top docs, // here we only assert that this condition is met. assert context.sort() == null && topDocsSortedByScore(topDocs): "topdocs should be sorted after rescore"; } context.queryResult().topDocs(topDocs, context.queryResult().sortValueFormats()); } catch (IOException e) { throw new ElasticsearchException("Rescore Phase Failed", e); } }
private long getBackgroundFrequency(String value) throws IOException { Query query = fieldType.termQuery(value, context.getQueryShardContext()); if (query instanceof TermQuery) { // for types that use the inverted index, we prefer using a caching terms // enum that will do a better job at reusing index inputs Term term = ((TermQuery) query).getTerm(); FilterableTermsEnum termsEnum = getTermsEnum(term.field()); if (termsEnum.seekExact(term.bytes())) { return termsEnum.docFreq(); } else { return 0; } } // otherwise do it the naive way if (filter != null) { query = new BooleanQuery.Builder() .add(query, Occur.FILTER) .add(filter, Occur.FILTER) .build(); } return context.searcher().count(query); }
private long getBackgroundFrequency(String value) throws IOException { Query query = fieldType.termQuery(value, context.getQueryShardContext()); if (query instanceof TermQuery) { // for types that use the inverted index, we prefer using a caching terms // enum that will do a better job at reusing index inputs Term term = ((TermQuery) query).getTerm(); FilterableTermsEnum termsEnum = getTermsEnum(term.field()); if (termsEnum.seekExact(term.bytes())) { return termsEnum.docFreq(); } else { return 0; } } // otherwise do it the naive way if (filter != null) { query = new BooleanQuery.Builder() .add(query, Occur.FILTER) .add(filter, Occur.FILTER) .build(); } return context.searcher().count(query); }