@Override public SignificantStringTerms buildEmptyAggregation() { // We need to account for the significance of a miss in our global stats - provide corpus size as context ContextIndexSearcher searcher = context.searcher(); IndexReader topReader = searcher.getIndexReader(); int supersetSize = topReader.numDocs(); return new SignificantStringTerms(name, bucketCountThresholds.getRequiredSize(), bucketCountThresholds.getMinDocCount(), pipelineAggregators(), metaData(), format, numCollectedDocs, supersetSize, significanceHeuristic, emptyList()); }
public GlobalOrdinalsStringTermsAggregator(String name, AggregatorFactories factories, ValuesSource.Bytes.WithOrdinals valuesSource, BucketOrder order, DocValueFormat format, BucketCountThresholds bucketCountThresholds, IncludeExclude.OrdinalsFilter includeExclude, SearchContext context, Aggregator parent, boolean remapGlobalOrds, SubAggCollectionMode collectionMode, boolean showTermDocCountError, List<PipelineAggregator> pipelineAggregators, Map<String, Object> metaData) throws IOException { super(name, factories, context, parent, order, format, bucketCountThresholds, collectionMode, showTermDocCountError, pipelineAggregators, metaData); this.valuesSource = valuesSource; this.includeExclude = includeExclude; final IndexReader reader = context.searcher().getIndexReader(); final SortedSetDocValues values = reader.leaves().size() > 0 ? valuesSource.globalOrdinalsValues(context.searcher().getIndexReader().leaves().get(0)) : DocValues.emptySortedSet(); this.valueCount = values.getValueCount(); this.lookupGlobalOrd = values::lookupOrd; this.acceptedGlobalOrdinals = includeExclude != null ? includeExclude.acceptedGlobalOrdinals(values) : null; this.bucketOrds = remapGlobalOrds ? new LongHash(1, context.bigArrays()) : null; }
public SignificantTextAggregatorFactory(String name, IncludeExclude includeExclude, QueryBuilder filterBuilder, TermsAggregator.BucketCountThresholds bucketCountThresholds, SignificanceHeuristic significanceHeuristic, SearchContext context, AggregatorFactory<?> parent, AggregatorFactories.Builder subFactoriesBuilder, String fieldName, String [] sourceFieldNames, boolean filterDuplicateText, Map<String, Object> metaData) throws IOException { super(name, context, parent, subFactoriesBuilder, metaData); // Note that if the field is unmapped (its field type is null), we don't fail, // and just use the given field name as a placeholder. this.fieldType = context.getQueryShardContext().fieldMapper(fieldName); this.indexedFieldName = fieldType != null ? fieldType.name() : fieldName; this.sourceFieldNames = sourceFieldNames == null ? new String[] { indexedFieldName } : sourceFieldNames; this.includeExclude = includeExclude; this.filter = filterBuilder == null ? null : filterBuilder.toQuery(context.getQueryShardContext()); this.filterDuplicateText = filterDuplicateText; IndexSearcher searcher = context.searcher(); // Important - need to use the doc count that includes deleted docs // or we have this issue: https://github.com/elastic/elasticsearch/issues/7951 this.supersetNumDocs = filter == null ? searcher.getIndexReader().maxDoc() : searcher.count(filter); this.bucketCountThresholds = bucketCountThresholds; this.significanceHeuristic = significanceHeuristic; }
public BucketsAggregator(String name, AggregatorFactories factories, SearchContext context, Aggregator parent, List<PipelineAggregator> pipelineAggregators, Map<String, Object> metaData) throws IOException { super(name, factories, context, parent, pipelineAggregators, metaData); bigArrays = context.bigArrays(); docCounts = bigArrays.newIntArray(1, true); if (context.aggregations() != null) { multiBucketConsumer = context.aggregations().multiBucketConsumer(); } else { multiBucketConsumer = (count) -> {}; } }
@Override public String toString() { StringBuilder result = new StringBuilder().append(shardTarget()); if (searchType() != SearchType.DEFAULT) { result.append("searchType=[").append(searchType()).append("]"); } if (scrollContext() != null) { if (scrollContext().scroll != null) { result.append("scroll=[").append(scrollContext().scroll.keepAlive()).append("]"); } else { result.append("scroll=[null]"); } } result.append(" query=[").append(query()).append("]"); return result.toString(); } }
CompositeAggregator(String name, AggregatorFactories factories, SearchContext context, Aggregator parent, List<PipelineAggregator> pipelineAggregators, Map<String, Object> metaData, int size, CompositeValuesSourceConfig[] sourceConfigs, CompositeKey rawAfterKey) throws IOException { super(name, factories, context, parent, pipelineAggregators, metaData); this.size = size; this.sourceNames = Arrays.stream(sourceConfigs).map(CompositeValuesSourceConfig::name).collect(Collectors.toList()); this.reverseMuls = Arrays.stream(sourceConfigs).mapToInt(CompositeValuesSourceConfig::reverseMul).toArray(); this.formats = Arrays.stream(sourceConfigs).map(CompositeValuesSourceConfig::format).collect(Collectors.toList()); this.sources = new SingleDimensionValuesSource[sourceConfigs.length]; for (int i = 0; i < sourceConfigs.length; i++) { this.sources[i] = createValuesSource(context.bigArrays(), context.searcher().getIndexReader(), context.query(), sourceConfigs[i], size, i); } this.queue = new CompositeValuesCollectorQueue(context.bigArrays(), sources, size, rawAfterKey); this.sortedDocsProducer = sources[0].createSortedDocsProducerOrNull(context.searcher().getIndexReader(), context.query()); }
if (entry == null) { QueryScorer queryScorer = new CustomQueryScorer(highlighterContext.query, field.fieldOptions().requireFieldMatch() ? fieldType.name() : null); queryScorer.setExpandMultiTermQuery(true); Fragmenter fragmenter; ArrayList<TextFragment> fragsList = new ArrayList<>(); List<Object> textsToHighlight; Analyzer analyzer = HighlightUtils.getAnalyzer(context.mapperService().documentMapper(hitContext.hit().getType()), fieldType); final int maxAnalyzedOffset = context.indexShard().indexSettings().getHighlightMaxAnalyzedOffset(); deprecationLogger.deprecated( "The length [" + text.length()+ "] of [" + highlighterContext.fieldName + "] field of [" + hitContext.hit().getId() + "] doc of [" + context.indexShard().shardId().getIndexName() + "] index has " + "exceeded the allowed maximum of ["+ maxAnalyzedOffset7 + "] set for the next major Elastic version. " + "This maximum can be set by changing the [" + IndexSettings.MAX_ANALYZED_OFFSET_SETTING.getKey() + hitContext.hit().getId() + "] doc of [" + context.indexShard().shardId().getIndexName() + "] index " + "has exceeded [" + maxAnalyzedOffset + "] - maximum allowed to be analyzed for highlighting. " + "This maximum can be set by changing the [" + IndexSettings.MAX_ANALYZED_OFFSET_SETTING.getKey() + try (TokenStream tokenStream = analyzer.tokenStream(fieldType.name(), text)) { if (!tokenStream.hasAttribute(CharTermAttribute.class) || !tokenStream.hasAttribute(OffsetAttribute.class)) {
@Override public void hitExecute(SearchContext context, HitContext hitContext) { if (context.explain() == false) { return; } try { final int topLevelDocId = hitContext.hit().docId(); Explanation explanation = context.searcher().explain(context.query(), topLevelDocId); for (RescoreContext rescore : context.rescore()) { explanation = rescore.rescorer().explain(topLevelDocId, context.searcher(), rescore, explanation); } // we use the top level doc id, since we work with the top level searcher hitContext.hit().explanation(explanation); } catch (IOException e) { throw new FetchPhaseExecutionException(context, "Failed to explain doc [" + hitContext.hit().getType() + "#" + hitContext.hit().getId() + "]", e); } finally { context.clearReleasables(SearchContext.Lifetime.COLLECTION); } } }
final ObjectHashSet<Term> termsSet = new ObjectHashSet<>(); try { context.searcher().createNormalizedWeight(context.query(), true).extractTerms(new DelegateSet(termsSet)); for (RescoreContext rescoreContext : context.rescore()) { try { rescoreContext.rescorer().extractTerms(context.searcher(), rescoreContext, new DelegateSet(termsSet)); } catch (IOException e) { throw new IllegalStateException("Failed to extract terms", e); IndexReaderContext indexReaderContext = context.searcher().getTopReaderContext(); for (int i = 0; i < terms.length; i++) { if(context.isCancelled()) { throw new TaskCancelledException("cancelled"); termStatistics[i] = context.searcher().termStatistics(terms[i], termContext); assert term.field() != null : "field is null"; if (!fieldStatistics.containsKey(term.field())) { final CollectionStatistics collectionStatistics = context.searcher().collectionStatistics(term.field()); fieldStatistics.put(term.field(), collectionStatistics); if(context.isCancelled()) { throw new TaskCancelledException("cancelled"); context.dfsResult().termsStatistics(terms, termStatistics) .fieldStatistics(fieldStatistics) .maxDoc(context.searcher().getIndexReader().maxDoc()); } catch (Exception e) { throw new DfsPhaseExecutionException(context, "Exception during dfs phase", e);
@Override public void hitsExecute(SearchContext context, SearchHit[] hits) throws IOException { if (context.storedFieldsContext() != null && context.storedFieldsContext().fetchFields() == false) { return ; MapperService mapperService = context.mapperService(); Set<String> parentFields = new HashSet<>(); for (SearchHit hit : hits) { ParentFieldMapper parentFieldMapper = mapperService.documentMapper(hit.getType()).parentFieldMapper(); if (parentFieldMapper.active()) { parentFields.add(parentFieldMapper.name()); Map<String, SortedDocValues> docValuesMap = new HashMap<>(); for (SearchHit hit : hits) { ParentFieldMapper parentFieldMapper = mapperService.documentMapper(hit.getType()).parentFieldMapper(); if (parentFieldMapper.active() == false) { continue; int readerId = ReaderUtil.subIndex(hit.docId(), context.searcher().getIndexReader().leaves()); LeafReaderContext subReaderContext = context.searcher().getIndexReader().leaves().get(readerId); if (lastReaderId != readerId) { docValuesMap.clear();
StoredFieldsContext storedFieldsContext = context.storedFieldsContext(); if (!context.hasScriptFields() && !context.hasFetchSourceContext()) { context.fetchSourceContext(new FetchSourceContext(true)); fieldsVisitor = new FieldsVisitor(context.sourceRequested()); } else if (storedFieldsContext.fetchFields() == false) { for (String fieldNameOrPattern : context.storedFieldsContext().fieldNames()) { if (fieldNameOrPattern.equals(SourceFieldMapper.NAME)) { FetchSourceContext fetchSourceContext = context.hasFetchSourceContext() ? context.fetchSourceContext() : FetchSourceContext.FETCH_SOURCE; context.fetchSourceContext(new FetchSourceContext(true, fetchSourceContext.includes(), fetchSourceContext.excludes())); continue; Collection<String> fieldNames = context.mapperService().simpleMatchToFullName(fieldNameOrPattern); for (String fieldName : fieldNames) { MappedFieldType fieldType = context.smartNameFieldType(fieldName); if (fieldType == null) { if (context.getObjectMapper(fieldName) != null) { throw new IllegalArgumentException("field [" + fieldName + "] isn't a leaf field"); boolean loadSource = context.sourceRequested(); if (storedToRequestedFields.isEmpty()) { SearchHit[] hits = new SearchHit[context.docIdsToLoadSize()]; FetchSubPhase.HitContext hitContext = new FetchSubPhase.HitContext(); for (int index = 0; index < context.docIdsToLoadSize(); index++) {
scriptService, cacheRecycler, pageCacheRecycler, bigArrays, threadPool.estimatedTimeInMillisCounter() ); SearchContext.setCurrent(searchContext); try { UpdateByQueryContext ubqContext = parseRequestSource(indexService, request, searchContext); searchContext.preProcess(); TopLevelFixedBitSetCollector bitSetCollector = new TopLevelFixedBitSetCollector(searchContext.searcher().getIndexReader().maxDoc()); searchContext.searcher().search(searchContext.query(), searchContext.aliasFilter(), bitSetCollector); FixedBitSet docsToUpdate = bitSetCollector.getBitSet(); ShardUpdateByQueryResponse response = new ShardUpdateByQueryResponse(request.shardId()); listener.onResponse(response); searchContext.close(); return; searchContext.close(); listener.onFailure(t); } finally { SearchContext.removeCurrent();
if (context.collapse() != null) { String name = context.collapse().getFieldName(); if (context.docValueFieldsContext() == null) { context.docValueFieldsContext(new DocValueFieldsContext( Collections.singletonList(new FieldAndFormat(name, DocValueFieldsContext.USE_DEFAULT_FORMAT)))); } else if (context.docValueFieldsContext().fields().stream().map(ff -> ff.field).anyMatch(name::equals) == false) { context.docValueFieldsContext().fields().add(new FieldAndFormat(name, DocValueFieldsContext.USE_DEFAULT_FORMAT)); if (context.docValueFieldsContext() == null) { return; List<String> noFormatFields = context.docValueFieldsContext().fields().stream().filter(f -> f.format == null).map(f -> f.field) .collect(Collectors.toList()); if (noFormatFields.isEmpty() == false) { for (FieldAndFormat fieldAndFormat : context.docValueFieldsContext().fields()) { String field = fieldAndFormat.field; MappedFieldType fieldType = context.mapperService().fullName(field); if (fieldType != null) { final IndexFieldData<?> indexFieldData = context.getForField(fieldType); final DocValueFormat format; if (fieldAndFormat.format == null) { if (subReaderContext == null || hit.docId() >= subReaderContext.docBase + subReaderContext.reader().maxDoc()) { int readerIndex = ReaderUtil.subIndex(hit.docId(), context.searcher().getIndexReader().leaves()); subReaderContext = context.searcher().getIndexReader().leaves().get(readerIndex); data = indexFieldData.load(subReaderContext); if (format == null) {
@Override public void hitExecute(SearchContext context, HitContext hitContext) { if (context.highlight() == null) { return; for (SearchContextHighlight.Field field : context.highlight().fields()) { Collection<String> fieldNamesToHighlight; if (Regex.isSimpleMatchPattern(field.field())) { fieldNamesToHighlight = context.mapperService().simpleMatchToFullName(field.field()); } else { fieldNamesToHighlight = Collections.singletonList(field.field()); if (context.highlight().forceSource(field)) { SourceFieldMapper sourceFieldMapper = context.mapperService().documentMapper(hitContext.hit().getType()).sourceMapper(); if (!sourceFieldMapper.enabled()) { throw new IllegalArgumentException("source is forced for fields " + fieldNamesToHighlight + " but type [" + hitContext.hit().getType() + "] has disabled _source"); MappedFieldType fieldType = context.mapperService().fullName(fieldName); if (fieldType == null) { continue; highlightQuery = context.parsedQuery().query(); HighlighterContext highlighterContext = new HighlighterContext(fieldType.name(), field, fieldType, context, hitContext, highlightQuery); hitContext.hit().highlightFields(highlightFields);
@Override public void hitsExecute(SearchContext context, SearchHit[] hits) throws IOException { if (context.version() == false || (context.storedFieldsContext() != null && context.storedFieldsContext().fetchFields() == false)) { return; } hits = hits.clone(); // don't modify the incoming hits Arrays.sort(hits, Comparator.comparingInt(SearchHit::docId)); int lastReaderId = -1; NumericDocValues versions = null; for (SearchHit hit : hits) { int readerId = ReaderUtil.subIndex(hit.docId(), context.searcher().getIndexReader().leaves()); LeafReaderContext subReaderContext = context.searcher().getIndexReader().leaves().get(readerId); if (lastReaderId != readerId) { versions = subReaderContext.reader().getNumericDocValues(VersionFieldMapper.NAME); lastReaderId = readerId; } int docId = hit.docId() - subReaderContext.docBase; long version = Versions.NOT_FOUND; if (versions != null && versions.advanceExact(docId)) { version = versions.longValue(); } hit.version(version < 0 ? -1 : version); } } }
final boolean needSource = context.sourceRequested() || context.highlight() != null; if (needSource || (context instanceof InnerHitsContext.InnerHitSubContext == false)) { FieldsVisitor rootFieldsVisitor = new FieldsVisitor(needSource); loadStoredFields(context, subReaderContext, rootFieldsVisitor, rootSubDocId); rootFieldsVisitor.postProcess(context.mapperService()); uid = rootFieldsVisitor.uid(); source = rootFieldsVisitor.source(); if (context.hasStoredFields() && !context.storedFieldsContext().fieldNames().isEmpty()) { FieldsVisitor nestedFieldsVisitor = new CustomFieldsVisitor(storedToRequestedFields.keySet(), false); searchFields = getSearchFields(context, nestedFieldsVisitor, nestedSubDocId, assert context.mapperService().types().size() == 1; typeText = context.mapperService().types().iterator().next(); DocumentMapper documentMapper = context.mapperService().documentMapper(typeText); SourceLookup sourceLookup = context.lookup().source(); sourceLookup.setSegmentAndDocument(subReaderContext, nestedSubDocId); ObjectMapper nestedObjectMapper = documentMapper.findNestedObjectMapper(nestedSubDocId, context, subReaderContext); assert nestedObjectMapper != null; SearchHit.NestedIdentity nestedIdentity = getInternalNestedIdentity(context, nestedSubDocId, subReaderContext, context.mapperService(), nestedObjectMapper); nestedObjectMapper.parentObjectMapperAreNested(context.mapperService()) == false) { context.lookup().source().setSource(nestedSourceAsMap); XContentType contentType = tuple.v1(); context.lookup().source().setSourceContentType(contentType);
Engine.GetResult result = null; try { Term uidTerm = context.mapperService().createUidTerm(request.type(), request.id()); if (uidTerm == null) { return new ExplainResponse(shardId.getIndexName(), request.type(), request.id(), false); result = context.indexShard().get(new Engine.Get(false, false, request.type(), request.id(), uidTerm)); if (!result.exists()) { return new ExplainResponse(shardId.getIndexName(), request.type(), request.id(), false); context.parsedQuery(context.getQueryShardContext().toQuery(request.query())); context.preProcess(true); int topLevelDocId = result.docIdAndVersion().docId + result.docIdAndVersion().docBase; Explanation explanation = context.searcher().explain(context.query(), topLevelDocId); for (RescoreContext ctx : context.rescore()) { Rescorer rescorer = ctx.rescorer(); explanation = rescorer.explain(topLevelDocId, context.searcher(), ctx, explanation); GetResult getResult = context.indexShard().getService().get(result, request.id(), request.type(), request.storedFields(), request.fetchSourceContext()); return new ExplainResponse(shardId.getIndexName(), request.type(), request.id(), true, explanation, getResult);
private void parentFieldResolveConfig(SearchContext context, ValuesSourceConfig<WithOrdinals> config) { DocumentMapper childDocMapper = context.mapperService().documentMapper(childType); if (childDocMapper != null) { ParentFieldMapper parentFieldMapper = childDocMapper.parentFieldMapper(); if (!parentFieldMapper.active()) { throw new IllegalArgumentException("[children] no [_parent] field not configured that points to a parent type"); } String parentType = parentFieldMapper.type(); DocumentMapper parentDocMapper = context.mapperService().documentMapper(parentType); if (parentDocMapper != null) { parentFilter = parentDocMapper.typeFilter(context.getQueryShardContext()); childFilter = childDocMapper.typeFilter(context.getQueryShardContext()); MappedFieldType parentFieldType = parentDocMapper.parentFieldMapper().getParentJoinFieldType(); final SortedSetDVOrdinalsIndexFieldData fieldData = context.getForField(parentFieldType); config.fieldContext(new FieldContext(parentFieldType.name(), fieldData, parentFieldType)); } else { config.unmapped(true); } } else { config.unmapped(true); } }
private void joinFieldResolveConfig(SearchContext context, ValuesSourceConfig<WithOrdinals> config) { ParentJoinFieldMapper parentJoinFieldMapper = ParentJoinFieldMapper.getMapper(context.mapperService()); ParentIdFieldMapper parentIdFieldMapper = parentJoinFieldMapper.getParentIdFieldMapper(childType, false); if (parentIdFieldMapper != null) { parentFilter = parentIdFieldMapper.getParentFilter(); childFilter = parentIdFieldMapper.getChildFilter(childType); MappedFieldType fieldType = parentIdFieldMapper.fieldType(); final SortedSetDVOrdinalsIndexFieldData fieldData = context.getForField(fieldType); config.fieldContext(new FieldContext(fieldType.name(), fieldData, fieldType)); } else { config.unmapped(true); } }
if (SearchType.QUERY_THEN_FETCH != context.searchType()) { return false; IndexSettings settings = context.indexShard().indexSettings(); } else if (context.size() != 0) { assert context.searcher().getIndexReader().getReaderCacheHelper() != null; if (context.getQueryShardContext().isCachable() == false) { return false;