Refine search
@Override public SignificantStringTerms buildEmptyAggregation() { // We need to account for the significance of a miss in our global stats - provide corpus size as context ContextIndexSearcher searcher = context.searcher(); IndexReader topReader = searcher.getIndexReader(); int supersetSize = topReader.numDocs(); return new SignificantStringTerms(name, bucketCountThresholds.getRequiredSize(), bucketCountThresholds.getMinDocCount(), pipelineAggregators(), metaData(), format, 0, supersetSize, significanceHeuristic, emptyList()); }
final ObjectHashSet<Term> termsSet = new ObjectHashSet<>(); try { context.searcher().createNormalizedWeight(context.query(), true).extractTerms(new DelegateSet(termsSet)); for (RescoreContext rescoreContext : context.rescore()) { try { rescoreContext.rescorer().extractTerms(context.searcher(), rescoreContext, new DelegateSet(termsSet)); IndexReaderContext indexReaderContext = context.searcher().getTopReaderContext(); for (int i = 0; i < terms.length; i++) { if(context.isCancelled()) { termStatistics[i] = context.searcher().termStatistics(terms[i], termContext); assert term.field() != null : "field is null"; if (!fieldStatistics.containsKey(term.field())) { final CollectionStatistics collectionStatistics = context.searcher().collectionStatistics(term.field()); fieldStatistics.put(term.field(), collectionStatistics); if(context.isCancelled()) { context.dfsResult().termsStatistics(terms, termStatistics) .fieldStatistics(fieldStatistics) .maxDoc(context.searcher().getIndexReader().maxDoc()); } catch (Exception e) { throw new DfsPhaseExecutionException(context, "Exception during dfs phase", e);
@Override public void execute(SearchContext context) { if (context.aggregations() == null) { context.queryResult().aggregations(null); return; if (context.queryResult().hasAggs()) { context.searcher().search(query, collector); } catch (Exception e) { throw new QueryPhaseExecutionException(context, "Failed to execute global aggregators", e);
if (hits.length == 0 || context.parsedQuery() == null) { return; Map<String, Query> namedQueries = new HashMap<>(context.parsedQuery().namedFilters()); if (context.parsedPostFilter() != null) { namedQueries.putAll(context.parsedPostFilter().namedFilters()); int readerIndex = -1; int docBase = -1; Weight weight = context.searcher().createNormalizedWeight(query, false); Bits matchingDocs = null; final IndexReader indexReader = context.searcher().getIndexReader(); for (int i = 0; i < hits.length; ++i) { SearchHit hit = hits[i]; int hitReaderIndex = ReaderUtil.subIndex(hit.docId(), indexReader.leaves()); if (readerIndex != hitReaderIndex) { readerIndex = hitReaderIndex; LeafReaderContext ctx = indexReader.leaves().get(readerIndex); docBase = ctx.docBase;
Query rawParentFilter; if (parentObjectMapper == null) { rawParentFilter = Queries.newNonNestedFilter(context.indexShard().indexSettings().getIndexVersionCreated()); } else { rawParentFilter = parentObjectMapper.nestedTypeFilter(); final int readerIndex = ReaderUtil.subIndex(parentDocId, searcher().getIndexReader().leaves()); LeafReaderContext ctx = searcher().getIndexReader().leaves().get(readerIndex); BitSetProducer parentFilter = context.bitsetFilterCache().getBitSetProducer(rawParentFilter); Query q = new ParentChildrenBlockJoinQuery(parentFilter, childFilter, parentDocId); Weight weight = context.searcher().createNormalizedWeight(q, false); if (size() == 0) { TotalHitCountCollector totalHitCountCollector = new TotalHitCountCollector(); result[i] = new TopDocs(totalHitCountCollector.getTotalHits(), Lucene.EMPTY_SCORE_DOCS, 0); } else { int topN = Math.min(from() + size(), context.searcher().getIndexReader().maxDoc()); TopDocsCollector<?> topDocsCollector; if (sort() != null) {
rawParentFilter = parentObjectMapper.nestedTypeFilter(); BitSetProducer parentFilter = context.bitsetFilterCache().getBitSetProducer(rawParentFilter); Query childFilter = childObjectMapper.nestedTypeFilter(); Query q = Queries.filtered(query.query(), new NestedChildrenQuery(parentFilter, childFilter, hitContext)); return new TopDocs(context.searcher().count(q), Lucene.EMPTY_SCORE_DOCS, 0); } else { int topN = Math.min(from() + size(), context.searcher().getIndexReader().maxDoc()); TopDocsCollector topDocsCollector; if (sort() != null) { context.searcher().search(q, topDocsCollector); } finally { clearReleasables(Lifetime.COLLECTION);
scriptService, cacheRecycler, pageCacheRecycler, bigArrays, threadPool.estimatedTimeInMillisCounter() ); SearchContext.setCurrent(searchContext); try { UpdateByQueryContext ubqContext = parseRequestSource(indexService, request, searchContext); searchContext.preProcess(); TopLevelFixedBitSetCollector bitSetCollector = new TopLevelFixedBitSetCollector(searchContext.searcher().getIndexReader().maxDoc()); searchContext.searcher().search(searchContext.query(), searchContext.aliasFilter(), bitSetCollector); FixedBitSet docsToUpdate = bitSetCollector.getBitSet();
StoredFieldsContext storedFieldsContext = context.storedFieldsContext(); if (!context.hasScriptFields() && !context.hasFetchSourceContext()) { context.fetchSourceContext(new FetchSourceContext(true)); int docId = context.docIdsToLoad()[context.docIdsToLoadFrom() + index]; int readerIndex = ReaderUtil.subIndex(docId, context.searcher().getIndexReader().leaves()); LeafReaderContext subReaderContext = context.searcher().getIndexReader().leaves().get(readerIndex); int subDocId = docId - subReaderContext.docBase;
Engine.GetResult result = null; try { Term uidTerm = context.mapperService().createUidTerm(request.type(), request.id()); if (uidTerm == null) { return new ExplainResponse(shardId.getIndexName(), request.type(), request.id(), false); result = context.indexShard().get(new Engine.Get(false, false, request.type(), request.id(), uidTerm)); if (!result.exists()) { return new ExplainResponse(shardId.getIndexName(), request.type(), request.id(), false); context.parsedQuery(context.getQueryShardContext().toQuery(request.query())); context.preProcess(true); int topLevelDocId = result.docIdAndVersion().docId + result.docIdAndVersion().docBase; Explanation explanation = context.searcher().explain(context.query(), topLevelDocId); for (RescoreContext ctx : context.rescore()) { Rescorer rescorer = ctx.rescorer();
int topN = subSearchContext.from() + subSearchContext.size(); if (sort == null) { for (RescoreContext rescoreContext : context.rescore()) { topN = Math.max(rescoreContext.getWindowSize(), topN); topN = Math.min(topN, subSearchContext.searcher().getIndexReader().maxDoc()); if (sort == null) { topDocsCollector = TopScoreDocCollector.create(topN);
@Override public void hitsExecute(SearchContext context, SearchHit[] hits) throws IOException { if (context.collapse() != null) { String name = context.collapse().getFieldName(); if (context.docValueFieldsContext() == null) { context.docValueFieldsContext(new DocValueFieldsContext( Collections.singletonList(new FieldAndFormat(name, DocValueFieldsContext.USE_DEFAULT_FORMAT)))); int readerIndex = ReaderUtil.subIndex(hit.docId(), context.searcher().getIndexReader().leaves()); subReaderContext = context.searcher().getIndexReader().leaves().get(readerIndex); data = indexFieldData.load(subReaderContext); if (format == null) {
public GlobalOrdinalsStringTermsAggregator(String name, AggregatorFactories factories, ValuesSource.Bytes.WithOrdinals valuesSource, BucketOrder order, DocValueFormat format, BucketCountThresholds bucketCountThresholds, IncludeExclude.OrdinalsFilter includeExclude, SearchContext context, Aggregator parent, boolean remapGlobalOrds, SubAggCollectionMode collectionMode, boolean showTermDocCountError, List<PipelineAggregator> pipelineAggregators, Map<String, Object> metaData) throws IOException { super(name, factories, context, parent, order, format, bucketCountThresholds, collectionMode, showTermDocCountError, pipelineAggregators, metaData); this.valuesSource = valuesSource; this.includeExclude = includeExclude; final IndexReader reader = context.searcher().getIndexReader(); final SortedSetDocValues values = reader.leaves().size() > 0 ? valuesSource.globalOrdinalsValues(context.searcher().getIndexReader().leaves().get(0)) : DocValues.emptySortedSet(); this.valueCount = values.getValueCount(); this.lookupGlobalOrd = values::lookupOrd; this.acceptedGlobalOrdinals = includeExclude != null ? includeExclude.acceptedGlobalOrdinals(values) : null; this.bucketOrds = remapGlobalOrds ? new LongHash(1, context.bigArrays()) : null; }
final DirectoryReader directoryReader = context.searcher().getDirectoryReader(); BytesReference bytesReference = cacheShardLevelResult(context.indexShard(), directoryReader, request.cacheKey(), out -> { queryPhase.execute(context); try { context.queryResult().writeToNoId(out);
@Override public void hitsExecute(SearchContext context, SearchHit[] hits) throws IOException { if (context.storedFieldsContext() != null && context.storedFieldsContext().fetchFields() == false) { return ; MapperService mapperService = context.mapperService(); Set<String> parentFields = new HashSet<>(); for (SearchHit hit : hits) { continue; int readerId = ReaderUtil.subIndex(hit.docId(), context.searcher().getIndexReader().leaves()); LeafReaderContext subReaderContext = context.searcher().getIndexReader().leaves().get(readerId); if (lastReaderId != readerId) { docValuesMap.clear();
@Override public void hitsExecute(SearchContext context, SearchHit[] hits) throws IOException { if (context.version() == false || (context.storedFieldsContext() != null && context.storedFieldsContext().fetchFields() == false)) { return; } hits = hits.clone(); // don't modify the incoming hits Arrays.sort(hits, Comparator.comparingInt(SearchHit::docId)); int lastReaderId = -1; NumericDocValues versions = null; for (SearchHit hit : hits) { int readerId = ReaderUtil.subIndex(hit.docId(), context.searcher().getIndexReader().leaves()); LeafReaderContext subReaderContext = context.searcher().getIndexReader().leaves().get(readerId); if (lastReaderId != readerId) { versions = subReaderContext.reader().getNumericDocValues(VersionFieldMapper.NAME); lastReaderId = readerId; } int docId = hit.docId() - subReaderContext.docBase; long version = Versions.NOT_FOUND; if (versions != null && versions.advanceExact(docId)) { version = versions.longValue(); } hit.version(version < 0 ? -1 : version); } } }
@Override public void hitsExecute(SearchContext context, SearchHit[] hits) throws IOException { if (context.hasScriptFields() == false) { return; List<ScriptFieldsContext.ScriptField> scriptFields = context.scriptFields().fields(); final IndexReader reader = context.searcher().getIndexReader(); for (SearchHit hit : hits) { int readerId = ReaderUtil.subIndex(hit.docId(), reader.leaves()); LeafReaderContext leafReaderContext = reader.leaves().get(readerId); if (readerId != lastReaderId) { leafScripts = createLeafScripts(leafReaderContext, scriptFields);
@Override public void hitExecute(SearchContext context, HitContext hitContext) { if (context.explain() == false) { return; } try { final int topLevelDocId = hitContext.hit().docId(); Explanation explanation = context.searcher().explain(context.query(), topLevelDocId); for (RescoreContext rescore : context.rescore()) { explanation = rescore.rescorer().explain(topLevelDocId, context.searcher(), rescore, explanation); } // we use the top level doc id, since we work with the top level searcher hitContext.hit().explanation(explanation); } catch (IOException e) { throw new FetchPhaseExecutionException(context, "Failed to explain doc [" + hitContext.hit().getType() + "#" + hitContext.hit().getId() + "]", e); } finally { context.clearReleasables(SearchContext.Lifetime.COLLECTION); } } }
private long getBackgroundFrequency(String value) throws IOException { Query query = fieldType.termQuery(value, context.getQueryShardContext()); if (query instanceof TermQuery) { // for types that use the inverted index, we prefer using a caching terms // enum that will do a better job at reusing index inputs Term term = ((TermQuery) query).getTerm(); FilterableTermsEnum termsEnum = getTermsEnum(term.field()); if (termsEnum.seekExact(term.bytes())) { return termsEnum.docFreq(); } else { return 0; } } // otherwise do it the naive way if (filter != null) { query = new BooleanQuery.Builder() .add(query, Occur.FILTER) .add(filter, Occur.FILTER) .build(); } return context.searcher().count(query); }
if (SearchType.QUERY_THEN_FETCH != context.searchType()) { return false; IndexSettings settings = context.indexShard().indexSettings(); } else if (context.size() != 0) { assert context.searcher().getIndexReader().getReaderCacheHelper() != null;
CompositeAggregator(String name, AggregatorFactories factories, SearchContext context, Aggregator parent, List<PipelineAggregator> pipelineAggregators, Map<String, Object> metaData, int size, CompositeValuesSourceConfig[] sourceConfigs, CompositeKey rawAfterKey) throws IOException { super(name, factories, context, parent, pipelineAggregators, metaData); this.size = size; this.sourceNames = Arrays.stream(sourceConfigs).map(CompositeValuesSourceConfig::name).collect(Collectors.toList()); this.reverseMuls = Arrays.stream(sourceConfigs).mapToInt(CompositeValuesSourceConfig::reverseMul).toArray(); this.formats = Arrays.stream(sourceConfigs).map(CompositeValuesSourceConfig::format).collect(Collectors.toList()); this.sources = new SingleDimensionValuesSource[sourceConfigs.length]; for (int i = 0; i < sourceConfigs.length; i++) { this.sources[i] = createValuesSource(context.bigArrays(), context.searcher().getIndexReader(), context.query(), sourceConfigs[i], size, i); } this.queue = new CompositeValuesCollectorQueue(context.bigArrays(), sources, size, rawAfterKey); this.sortedDocsProducer = sources[0].createSortedDocsProducerOrNull(context.searcher().getIndexReader(), context.query()); }