/** @throws IllegalArgumentException if the fielddata is not supported on this type. * An IllegalArgumentException is needed in order to return an http error 400 * when this error occurs in a request. see: {@link org.elasticsearch.ExceptionsHelper#status} **/ protected final void failIfNoDocValues() { if (hasDocValues() == false) { throw new IllegalArgumentException("Can't load fielddata on [" + name() + "] because fielddata is unsupported on fields of type [" + typeName() + "]. Use doc values instead."); } }
protected MappedFieldType(MappedFieldType ref) { super(ref); this.name = ref.name(); this.boost = ref.boost(); this.docValues = ref.hasDocValues(); this.indexAnalyzer = ref.indexAnalyzer(); this.searchAnalyzer = ref.searchAnalyzer(); this.searchQuoteAnalyzer = ref.searchQuoteAnalyzer(); this.similarity = ref.similarity(); this.nullValue = ref.nullValue(); this.nullValueAsString = ref.nullValueAsString(); this.eagerGlobalOrdinals = ref.eagerGlobalOrdinals; }
protected final void failIfNotIndexed() { if (indexOptions() == IndexOptions.NONE && pointDataDimensionCount() == 0) { // we throw an IAE rather than an ISE so that it translates to a 4xx code rather than 5xx code on the http layer throw new IllegalArgumentException("Cannot search on field [" + name() + "] since it is not indexed."); } }
Query percolateQuery(String name, PercolateQuery.QueryStore queryStore, List<BytesReference> documents, IndexSearcher searcher, Version indexVersion) throws IOException { IndexReader indexReader = searcher.getIndexReader(); Tuple<BooleanQuery, Boolean> t = createCandidateQuery(indexReader, indexVersion); Query candidateQuery = t.v1(); boolean canUseMinimumShouldMatchField = t.v2(); Query verifiedMatchesQuery; // We can only skip the MemoryIndex verification when percolating a single non nested document. We cannot // skip MemoryIndex verification when percolating multiple documents, because when terms and // ranges are extracted from IndexReader backed by a RamDirectory holding multiple documents we do // not know to which document the terms belong too and for certain queries we incorrectly emit candidate // matches as actual match. if (canUseMinimumShouldMatchField && indexReader.maxDoc() == 1) { verifiedMatchesQuery = new TermQuery(new Term(extractionResultField.name(), EXTRACTION_COMPLETE)); } else { verifiedMatchesQuery = new MatchNoDocsQuery("multiple or nested docs or CoveringQuery could not be used"); } return new PercolateQuery(name, queryStore, documents, candidateQuery, searcher, verifiedMatchesQuery); }
@Override protected Query doToQuery(QueryShardContext context) throws IOException { Query query = null; MappedFieldType mapper = context.fieldMapper(this.fieldName); if (mapper != null) { query = mapper.termQuery(this.value, context); } if (query == null) { query = new TermQuery(new Term(this.fieldName, BytesRefs.toBytesRef(this.value))); } return query; }
/** * Expand a {@link PhraseQuery} to multiple fields that share the same analyzer. * Returns a {@link DisjunctionMaxQuery} with a disjunction for each expanded field. */ static Query blendPhrase(PhraseQuery query, float tiebreaker, FieldAndFieldType... fields) { List<Query> disjunctions = new ArrayList<>(); for (FieldAndFieldType field : fields) { int[] positions = query.getPositions(); Term[] terms = query.getTerms(); PhraseQuery.Builder builder = new PhraseQuery.Builder(); for (int i = 0; i < terms.length; i++) { builder.add(new Term(field.fieldType.name(), terms[i].bytes()), positions[i]); } Query q = builder.build(); if (field.boost != AbstractQueryBuilder.DEFAULT_BOOST) { q = new BoostQuery(q, field.boost); } disjunctions.add(q); } return new DisjunctionMaxQuery(disjunctions, tiebreaker); }
@Override protected SpanQuery doToQuery(QueryShardContext context) throws IOException { MappedFieldType mapper = context.fieldMapper(fieldName); Term term; if (mapper == null) { term = new Term(fieldName, BytesRefs.toBytesRef(value)); } else { Query termQuery = mapper.termQuery(value, context); term = MappedFieldType.extractTerm(termQuery); } return new SpanTermQuery(term); }
String fieldName = fieldType != null ? fieldType.name() : prefixBuilder.fieldName(); if (context.getIndexSettings().getIndexVersionCreated().before(Version.V_6_4_0)) { PrefixQuery prefixQuery = new PrefixQuery(new Term(fieldName, prefixBuilder.value())); if (prefixBuilder.rewrite() != null) { MultiTermQuery.RewriteMethod rewriteMethod =
public static Engine.Index prepareIndex(DocumentMapperForType docMapper, Version indexCreatedVersion, SourceToParse source, long seqNo, long primaryTerm, long version, VersionType versionType, Engine.Operation.Origin origin, long autoGeneratedIdTimestamp, boolean isRetry, long ifSeqNo, long ifPrimaryTerm) { long startTime = System.nanoTime(); ParsedDocument doc = docMapper.getDocumentMapper().parse(source); if (docMapper.getMapping() != null) { doc.addDynamicMappingsUpdate(docMapper.getMapping()); } Term uid; if (indexCreatedVersion.onOrAfter(Version.V_6_0_0_beta1)) { uid = new Term(IdFieldMapper.NAME, Uid.encodeId(doc.id())); } else if (docMapper.getDocumentMapper().idFieldMapper().fieldType().indexOptions() != IndexOptions.NONE) { uid = new Term(IdFieldMapper.NAME, doc.id()); } else { uid = new Term(UidFieldMapper.NAME, Uid.createUidAsBytes(doc.type(), doc.id())); } return new Engine.Index(uid, doc, seqNo, primaryTerm, version, versionType, origin, startTime, autoGeneratedIdTimestamp, isRetry, ifSeqNo, ifPrimaryTerm); }
private long getBackgroundFrequency(String value) throws IOException { Query query = fieldType.termQuery(value, context.getQueryShardContext()); if (query instanceof TermQuery) { // for types that use the inverted index, we prefer using a caching terms // enum that will do a better job at reusing index inputs Term term = ((TermQuery) query).getTerm(); FilterableTermsEnum termsEnum = getTermsEnum(term.field()); if (termsEnum.seekExact(term.bytes())) { return termsEnum.docFreq(); } else { return 0; } } // otherwise do it the naive way if (filter != null) { query = new BooleanQuery.Builder() .add(query, Occur.FILTER) .add(filter, Occur.FILTER) .build(); } return context.searcher().count(query); }
protected Query blendTermQuery(Term term, MappedFieldType fieldType) { if (fuzziness != null) { try { Query query = fieldType.fuzzyQuery(term.text(), fuzziness, fuzzyPrefixLength, maxExpansions, transpositions); if (query instanceof FuzzyQuery) { QueryParsers.setRewriteMethod((FuzzyQuery) query, fuzzyRewriteMethod); } return query; } catch (RuntimeException e) { if (lenient) { return newLenientFieldQuery(fieldType.name(), e); } else { throw e; } } } return termQuery(fieldType, term.bytes(), lenient); } }
checkTypeName(other); boolean indexed = indexOptions() != IndexOptions.NONE; boolean mergeWithIndexed = other.indexOptions() != IndexOptions.NONE; conflicts.add("mapper [" + name() + "] has different [index] values"); if (stored() != other.stored()) { conflicts.add("mapper [" + name() + "] has different [store] values"); if (hasDocValues() != other.hasDocValues()) { conflicts.add("mapper [" + name() + "] has different [doc_values] values"); if (omitNorms() && !other.omitNorms()) { conflicts.add("mapper [" + name() + "] has different [norms] values, cannot change from disable to enabled"); if (storeTermVectors() != other.storeTermVectors()) { conflicts.add("mapper [" + name() + "] has different [store_term_vector] values"); if (storeTermVectorOffsets() != other.storeTermVectorOffsets()) { conflicts.add("mapper [" + name() + "] has different [store_term_vector_offsets] values"); if (storeTermVectorPositions() != other.storeTermVectorPositions()) { conflicts.add("mapper [" + name() + "] has different [store_term_vector_positions] values"); if (storeTermVectorPayloads() != other.storeTermVectorPayloads()) { conflicts.add("mapper [" + name() + "] has different [store_term_vector_payloads] values"); if (indexAnalyzer() == null || "default".equals(indexAnalyzer().name())) {
protected void setupFieldType(BuilderContext context) { fieldType.setName(buildFullName(context)); if (context.indexCreatedVersion().before(Version.V_5_0_0_alpha1)) { fieldType.setOmitNorms(fieldType.omitNorms() && fieldType.boost() == 1.0f); } if (fieldType.indexAnalyzer() == null && fieldType.tokenized() == false && fieldType.indexOptions() != IndexOptions.NONE) { fieldType.setIndexAnalyzer(Lucene.KEYWORD_ANALYZER); fieldType.setSearchAnalyzer(Lucene.KEYWORD_ANALYZER); } boolean defaultDocValues = defaultDocValues(context.indexCreatedVersion()); defaultFieldType.setHasDocValues(defaultDocValues); if (docValuesSet == false) { fieldType.setHasDocValues(defaultDocValues); } } }
@Override public void hitExecute(SearchContext context, HitContext hitContext) { if (context.highlight() == null) { return; for (SearchContextHighlight.Field field : context.highlight().fields()) { Collection<String> fieldNamesToHighlight; if (Regex.isSimpleMatchPattern(field.field())) { fieldNamesToHighlight = context.mapperService().simpleMatchToFullName(field.field()); } else { fieldNamesToHighlight = Collections.singletonList(field.field()); if (context.highlight().forceSource(field)) { SourceFieldMapper sourceFieldMapper = context.mapperService().documentMapper(hitContext.hit().getType()).sourceMapper(); if (!sourceFieldMapper.enabled()) { throw new IllegalArgumentException("source is forced for fields " + fieldNamesToHighlight MappedFieldType fieldType = context.mapperService().fullName(fieldName); if (fieldType == null) { continue; if (fieldType.typeName().equals(TextFieldMapper.CONTENT_TYPE) == false && fieldType.typeName().equals(KeywordFieldMapper.CONTENT_TYPE) == false) { continue; highlightQuery = context.parsedQuery().query(); HighlighterContext highlighterContext = new HighlighterContext(fieldType.name(), field, fieldType, context, hitContext, highlightQuery);
@Override protected void parseCreateField(ParseContext context, List<IndexableField> fields) throws IOException { if (fieldType().indexOptions() == IndexOptions.NONE && !fieldType().stored()) { return; } fields.add(new Field(fieldType().name(), context.sourceToParse().type(), fieldType())); if (fieldType().hasDocValues()) { fields.add(new SortedSetDocValuesField(fieldType().name(), new BytesRef(context.sourceToParse().type()))); } }
if (fieldType != null) { Analyzer actualAnalyzer = getAnalyzer(fieldType, type == MultiMatchQueryBuilder.Type.PHRASE); name = fieldType.name(); if (!groups.containsKey(actualAnalyzer)) { groups.put(actualAnalyzer, new ArrayList<>()); groups.get(actualAnalyzer).add(new FieldAndFieldType(fieldType, boost)); } else { queries.add(new MatchNoDocsQuery("unknown field " + name)); String representativeField = group.get(0).fieldType.name(); Query q = parseGroup(type.matchQueryType(), representativeField, 1f, value, minimumShouldMatch); if (q != null) {
@Override protected Query doToQuery(QueryShardContext shardContext) throws IOException { MappedFieldType fieldType = shardContext.fieldMapper(fieldName); if (fieldType == null) { if (ignoreUnmapped) { return new MatchNoDocsQuery(); } else { throw new QueryShardException(shardContext, "failed to find geo_point field [" + fieldName + "]"); } } if (!(fieldType instanceof GeoPointFieldType)) { throw new QueryShardException(shardContext, "field [" + fieldName + "] is not a geo_point field"); } QueryValidationException exception = checkLatLon(); if (exception != null) { throw new QueryShardException(shardContext, "couldn't validate latitude/ longitude values", exception); } if (GeoValidationMethod.isCoerce(validationMethod)) { GeoUtils.normalizePoint(center, true, true); } Query query = LatLonPoint.newDistanceQuery(fieldType.name(), center.lat(), center.lon(), this.distance); if (fieldType.hasDocValues()) { Query dvQuery = LatLonDocValuesField.newSlowDistanceQuery(fieldType.name(), center.lat(), center.lon(), this.distance); query = new IndexOrDocValuesQuery(query, dvQuery); } return query; }
protected MappedFieldType(MappedFieldType ref) { super(ref); this.names = ref.names(); this.boost = ref.boost(); this.docValues = ref.hasDocValues(); this.indexAnalyzer = ref.indexAnalyzer(); this.searchAnalyzer = ref.searchAnalyzer(); this.searchQuoteAnalyzer = ref.searchQuoteAnalyzer(); this.similarity = ref.similarity(); this.normsLoading = ref.normsLoading(); this.fieldDataType = ref.fieldDataType(); this.nullValue = ref.nullValue(); this.nullValueAsString = ref.nullValueAsString(); }