private String getFieldFromExtractTerms( Query query ) { Set<Term> terms = new HashSet<>(); try { searcher.createNormalizedWeight( query, false ).extractTerms( terms ); } catch ( IOException ioe ) { throw new UnsupportedOperationException( ioe ); } catch ( UnsupportedOperationException ue ) { // TODO This is for "*" queries and such. Lucene doesn't seem // to be able/willing to rewrite such queries. // Just ignore the orphans then... OK? } return terms.isEmpty() ? null : terms.iterator().next().field(); }
@Override public Weight createNormalizedWeight(Query query, boolean needsScores) throws IOException { // During tests we prefer to use the wrapped IndexSearcher, because then we use the AssertingIndexSearcher // it is hacky, because if we perform a dfs search, we don't use the wrapped IndexSearcher... if (aggregatedDfs != null && needsScores) { // if scores are needed and we have dfs data then use it return super.createNormalizedWeight(query, needsScores); } else if (profiler != null) { // we need to use the createWeight method to insert the wrappers return super.createNormalizedWeight(query, needsScores); } else { return in.createNormalizedWeight(query, needsScores); } }
@Override Collector create(Collector in ) throws IOException { final Weight filterWeight = searcher.createNormalizedWeight(query, false); return new FilteredCollector(in, filterWeight); } };
@Override public void extractTerms(IndexSearcher searcher, RescoreContext rescoreContext, Set<Term> termsSet) throws IOException { searcher.createNormalizedWeight(((QueryRescoreContext) rescoreContext).query(), false).extractTerms(termsSet); }
/** * Get a {@link DocIdSet} that matches the inner documents. */ public DocIdSetIterator innerDocs(LeafReaderContext ctx) throws IOException { final IndexReaderContext topLevelCtx = ReaderUtil.getTopLevelContext(ctx); IndexSearcher indexSearcher = new IndexSearcher(topLevelCtx); Weight weight = indexSearcher.createNormalizedWeight(innerQuery, false); Scorer s = weight.scorer(ctx); return s == null ? null : s.iterator(); } }
public AdjacencyMatrixAggregatorFactory(String name, List<KeyedFilter> filters, String separator, SearchContext context, AggregatorFactory<?> parent, AggregatorFactories.Builder subFactories, Map<String, Object> metaData) throws IOException { super(name, context, parent, subFactories, metaData); IndexSearcher contextSearcher = context.searcher(); this.separator = separator; weights = new Weight[filters.size()]; keys = new String[filters.size()]; for (int i = 0; i < filters.size(); ++i) { KeyedFilter keyedFilter = filters.get(i); this.keys[i] = keyedFilter.key(); Query filter = keyedFilter.filter().toFilter(context.getQueryShardContext()); this.weights[i] = contextSearcher.createNormalizedWeight(filter, false); } }
/** * Returns the {@link Weight} for this filter aggregation, creating it if * necessary. This is done lazily so that the {@link Weight} is only created * if the aggregation collects documents reducing the overhead of the * aggregation in the case where no documents are collected. * * Note that as aggregations are initialsed and executed in a serial manner, * no concurrency considerations are necessary here. */ public Weight getWeight() { if (weight == null) { IndexSearcher contextSearcher = context.searcher(); try { weight = contextSearcher.createNormalizedWeight(filter, false); } catch (IOException e) { throw new AggregationInitializationException("Failed to initialse filter", e); } } return weight; }
/** * Returns the {@link Weight}s for this filter aggregation, creating it if * necessary. This is done lazily so that the {@link Weight}s are only * created if the aggregation collects documents reducing the overhead of * the aggregation in the case where no documents are collected. * * Note that as aggregations are initialsed and executed in a serial manner, * no concurrency considerations are necessary here. */ public Weight[] getWeights() { if (weights == null) { try { IndexSearcher contextSearcher = context.searcher(); weights = new Weight[filters.length]; for (int i = 0; i < filters.length; ++i) { this.weights[i] = contextSearcher.createNormalizedWeight(filters[i], false); } } catch (IOException e) { throw new AggregationInitializationException("Failed to initialse filters for aggregation [" + name() + "]", e); } } return weights; }
@Override public Weight createWeight(IndexSearcher searcher, boolean needsScores, float boost) throws IOException { if (needsScores == false && minScore == null) { return subQuery.createWeight(searcher, needsScores, boost); } boolean subQueryNeedsScores = combineFunction != CombineFunction.REPLACE; Weight[] filterWeights = new Weight[functions.length]; for (int i = 0; i < functions.length; ++i) { subQueryNeedsScores |= functions[i].needsScores(); if (functions[i] instanceof FilterScoreFunction) { Query filter = ((FilterScoreFunction) functions[i]).filter; filterWeights[i] = searcher.createNormalizedWeight(filter, false); } } Weight subQueryWeight = subQuery.createWeight(searcher, subQueryNeedsScores, boost); return new CustomBoostFactorWeight(this, subQueryWeight, filterWeights, subQueryNeedsScores); }
private static BitSetProducer newParentDocBitSetProducer(Version indexVersionCreated) { return context -> { Query query = Queries.newNonNestedFilter(indexVersionCreated); final IndexReaderContext topLevelContext = ReaderUtil.getTopLevelContext(context); final IndexSearcher searcher = new IndexSearcher(topLevelContext); searcher.setQueryCache(null); final Weight weight = searcher.createNormalizedWeight(query, false); Scorer s = weight.scorer(context); return s == null ? null : BitSet.of(s.iterator(), context.reader().maxDoc()); }; } }
IndexSearcher searcher = new IndexSearcher(topLevelContext); searcher.setQueryCache(null); Weight weight = searcher.createNormalizedWeight(childFilter, false); Scorer childDocsScorer = weight.scorer(ctx);
final IndexSearcher searcher = new IndexSearcher(reader); searcher.setQueryCache(null); weight = searcher.createNormalizedWeight(filter, false);
/** * Check whether there is one or more documents matching the provided query. */ public static boolean exists(IndexSearcher searcher, Query query) throws IOException { final Weight weight = searcher.createNormalizedWeight(query, false); // the scorer API should be more efficient at stopping after the first // match than the bulk scorer API for (LeafReaderContext context : searcher.getIndexReader().leaves()) { final Scorer scorer = weight.scorer(context); if (scorer == null) { continue; } final Bits liveDocs = context.reader().getLiveDocs(); final DocIdSetIterator iterator = scorer.iterator(); for (int doc = iterator.nextDoc(); doc != DocIdSetIterator.NO_MORE_DOCS; doc = iterator.nextDoc()) { if (liveDocs == null || liveDocs.get(doc)) { return true; } } } return false; }
@Override public void createWeight(Map context, IndexSearcher searcher) throws IOException { Weight w = searcher.createNormalizedWeight(q, true); context.put(this, w); } }
final IndexSearcher searcher = new IndexSearcher(topLevelContext); searcher.setQueryCache(null); final Weight weight = searcher.createNormalizedWeight(query, false); Scorer s = weight.scorer(context); final BitSet bitSet;
/** Returns an Explanation that describes how <code>doc</code> scored against * <code>query</code>. * * <p>This is intended to be used in developing Similarity implementations, * and, for good performance, should not be displayed with every hit. * Computing an explanation is as expensive as executing the query over the * entire index. */ public Explanation explain(Query query, int doc) throws IOException { return explain(createNormalizedWeight(query, true), doc); }
@Override Collector create(Collector in ) throws IOException { final Weight filterWeight = searcher.createNormalizedWeight(query, false); return new FilteredCollector(in, filterWeight); } };
/** * Get a {@link DocIdSet} that matches the inner documents. */ public DocIdSetIterator innerDocs(LeafReaderContext ctx) throws IOException { final IndexReaderContext topLevelCtx = ReaderUtil.getTopLevelContext(ctx); IndexSearcher indexSearcher = new IndexSearcher(topLevelCtx); Weight weight = indexSearcher.createNormalizedWeight(innerQuery, false); Scorer s = weight.scorer(ctx); return s == null ? null : s.iterator(); } }
public FilterAggregatorFactory(String name, QueryBuilder filterBuilder, SearchContext context, AggregatorFactory<?> parent, AggregatorFactories.Builder subFactoriesBuilder, Map<String, Object> metaData) throws IOException { super(name, context, parent, subFactoriesBuilder, metaData); IndexSearcher contextSearcher = context.searcher(); Query filter = filterBuilder.toFilter(context.getQueryShardContext()); weight = contextSearcher.createNormalizedWeight(filter, false); }
@Override public Weight createNormalizedWeight(Query query, boolean needsScores) throws IOException { BooleanQuery.Builder bq = new BooleanQuery.Builder(); bq.add(query, BooleanClause.Occur.MUST); bq.add(Queries.newNestedFilter(), BooleanClause.Occur.MUST_NOT); return super.createNormalizedWeight(bq.build(), needsScores); }