static Analyzer getAnalyzer(DocumentMapper docMapper, MappedFieldType type) { if (type instanceof KeywordFieldMapper.KeywordFieldType) { KeywordFieldMapper.KeywordFieldType keywordFieldType = (KeywordFieldMapper.KeywordFieldType) type; if (keywordFieldType.normalizer() != null) { return keywordFieldType.normalizer(); } } return docMapper.mappers().indexAnalyzer(); } }
static Analyzer getAnalyzer(DocumentMapper docMapper, MappedFieldType type) { if (type instanceof KeywordFieldMapper.KeywordFieldType) { KeywordFieldMapper.KeywordFieldType keywordFieldType = (KeywordFieldMapper.KeywordFieldType) type; if (keywordFieldType.normalizer() != null) { return keywordFieldType.normalizer(); } } return docMapper.mappers().indexAnalyzer(); } }
private Analyzer findAnalyzer(ParseContext context) { Analyzer analyzer = fieldType().indexAnalyzer(); if (analyzer == null) { analyzer = context.docMapper().mappers().indexAnalyzer(); if (analyzer == null) { // This should not happen, should we log warn it? analyzer = Lucene.STANDARD_ANALYZER; } } return analyzer; }
FieldNameAnalyzer fieldNameAnalyzer = (FieldNameAnalyzer) docMapper.mappers().indexAnalyzer();
memoryIndex = new MemoryIndex(true); Analyzer analyzer = context.mapperService().documentMapper(parsedDocument.type()).mappers().indexAnalyzer(); memoryIndices[i] = indexDoc(d, analyzer, memoryIndex).createSearcher().getIndexReader();
FieldNameAnalyzer fieldNameAnalyzer = (FieldNameAnalyzer) docMapper.mappers().indexAnalyzer();
try { Analyzer analyzer = context.mapperService().documentMapper(hitContext.hit().type()).mappers().indexAnalyzer(); List<Object> fieldValues = HighlightUtils.loadFieldValues(field, fieldMapper, context, hitContext); fieldValues = fieldValues.stream().map(obj -> {
ArrayList<TextFragment> fragsList = new ArrayList<>(); List<Object> textsToHighlight; Analyzer analyzer = context.mapperService().documentMapper(hitContext.hit().type()).mappers().indexAnalyzer();
int numberOfFragments; try { Analyzer analyzer = context.mapperService().documentMapper(hitContext.hit().type()).mappers().indexAnalyzer(); List<Object> fieldValues = HighlightUtils.loadFieldValues(field, fieldMapper, context, hitContext); CustomPostingsHighlighter highlighter;
int numberOfFragments; try { Analyzer analyzer = context.mapperService().documentMapper(hitContext.hit().type()).mappers().indexAnalyzer(); List<Object> fieldValues = HighlightUtils.loadFieldValues(field, fieldMapper, context, hitContext); CustomPostingsHighlighter highlighter;
ArrayList<TextFragment> fragsList = new ArrayList<>(); List<Object> textsToHighlight; Analyzer analyzer = context.mapperService().documentMapper(hitContext.hit().type()).mappers().indexAnalyzer();
@Override public void prepare(PercolateContext context, ParsedDocument parsedDocument) { MemoryIndex memoryIndex = cache.get(); for (IndexableField field : parsedDocument.rootDoc().getFields()) { if (field.fieldType().indexOptions() == IndexOptions.NONE && field.name().equals(UidFieldMapper.NAME)) { continue; } try { Analyzer analyzer = context.mapperService().documentMapper(parsedDocument.type()).mappers().indexAnalyzer(); // TODO: instead of passing null here, we can have a CTL<Map<String,TokenStream>> and pass previous, // like the indexer does try (TokenStream tokenStream = field.tokenStream(analyzer, null)) { if (tokenStream != null) { memoryIndex.addField(field.name(), tokenStream, field.boost()); } } } catch (Exception e) { throw new ElasticsearchException("Failed to create token stream for [" + field.name() + "]", e); } } context.initialize(new DocEngineSearcher(memoryIndex), parsedDocument); }