@Override public String getType() { return type(); }
private boolean isParentHit(InternalSearchHit hit) { return hit.type().equals(documentMapper.parentFieldMapper().type()); }
private boolean isChildHit(InternalSearchHit hit) { DocumentMapper hitDocumentMapper = mapperService.documentMapper(hit.type()); return documentMapper.type().equals(hitDocumentMapper.parentFieldMapper().type()); } }
private FieldMapper getMapperForField(String fieldName, SearchContext searchContext, HitContext hitContext) { DocumentMapper documentMapper = searchContext.mapperService().documentMapper(hitContext.hit().type()); // TODO: no need to lookup the doc mapper with unambiguous field names? just look at the mapper service return documentMapper.mappers().smartNameFieldMapper(fieldName); } }
@Override public void hitExecute(SearchContext context, HitContext hitContext) { // it might make sense to cache the TermDocs on a shared fetch context and just skip here) // it is going to mean we work on the high level multi reader and not the lower level reader as is // the case below... long version; try { BytesRef uid = Uid.createUidAsBytes(hitContext.hit().type(), hitContext.hit().id()); version = Versions.loadVersion( hitContext.readerContext().reader(), new Term(UidFieldMapper.NAME, uid) ); } catch (IOException e) { throw new ElasticsearchException("Could not query index for _version", e); } if (version < 0) { version = -1; } hitContext.hit().version(version); } }
Collection<String> fieldNamesToHighlight; if (Regex.isSimpleMatchPattern(field.field())) { DocumentMapper documentMapper = context.mapperService().documentMapper(hitContext.hit().type()); fieldNamesToHighlight = documentMapper.mappers().simpleMatchToFullName(field.field()); } else { SourceFieldMapper sourceFieldMapper = context.mapperService().documentMapper(hitContext.hit().type()).sourceMapper(); if (!sourceFieldMapper.enabled()) { throw new IllegalArgumentException("source is forced for fields " + fieldNamesToHighlight + " but type [" + hitContext.hit().type() + "] has disabled _source");
int numberOfFragments; try { Analyzer analyzer = context.mapperService().documentMapper(hitContext.hit().type()).mappers().indexAnalyzer(); List<Object> fieldValues = HighlightUtils.loadFieldValues(field, fieldMapper, context, hitContext); CustomPostingsHighlighter highlighter;
ArrayList<TextFragment> fragsList = new ArrayList<>(); List<Object> textsToHighlight; Analyzer analyzer = context.mapperService().documentMapper(hitContext.hit().type()).mappers().indexAnalyzer();
if (isParentHit(hitContext.hit())) { field = ParentFieldMapper.NAME; term = Uid.createUid(hitContext.hit().type(), hitContext.hit().id()); } else if (isChildHit(hitContext.hit())) { DocumentMapper hitDocumentMapper = mapperService.documentMapper(hitContext.hit().type()); final String parentType = hitDocumentMapper.parentFieldMapper().type(); field = UidFieldMapper.NAME;
@Override public void hitExecute(SearchContext context, HitContext hitContext) { try { final int topLevelDocId = hitContext.hit().docId(); Explanation explanation = context.searcher().explain(context.query(), topLevelDocId); for (RescoreSearchContext rescore : context.rescore()) { explanation = rescore.rescorer().explain(topLevelDocId, context, rescore, explanation); } // we use the top level doc id, since we work with the top level searcher hitContext.hit().explanation(explanation); } catch (IOException e) { throw new FetchPhaseExecutionException(context, "Failed to explain doc [" + hitContext.hit().type() + "#" + hitContext.hit().id() + "]", e); } finally { context.clearReleasables(SearchContext.Lifetime.COLLECTION); } } }