@Override public final DocIdSet getDocIdSet(LeafReaderContext context, Bits acceptDocs) throws IOException { return BitsFilteredDocIdSet.wrap(getDocIdSet(context), acceptDocs); }
/** * Convenience wrapper method: If {@code acceptDocs == null} it returns the original set without wrapping. * @param set Underlying DocIdSet. If {@code null}, this method returns {@code null} * @param acceptDocs Allowed docs, all docids not in this set will not be returned by this DocIdSet. * If {@code null}, this method returns the original set without wrapping. */ public static DocIdSet wrap(DocIdSet set, Bits acceptDocs) { return (set == null || acceptDocs == null) ? set : new BitsFilteredDocIdSet(set, acceptDocs); }
@Override public DocIdSet getDocIdSet(final LeafReaderContext context, final Bits acceptDocs) throws IOException { // get a private context that is used to rewrite, createWeight and score eventually final LeafReaderContext privateContext = context.reader().getContext(); final Weight weight = new IndexSearcher(privateContext).createNormalizedWeight(query, false); DocIdSet set = new DocIdSet() { @Override public DocIdSetIterator iterator() throws IOException { Scorer s = weight.scorer(privateContext); return s == null ? null : s.iterator(); } @Override public long ramBytesUsed() { return 0L; } }; return BitsFilteredDocIdSet.wrap(set, acceptDocs); }
/** * Convenience wrapper method: If {@code acceptDocs == null} it returns the original set without wrapping. * @param set Underlying DocIdSet. If {@code null}, this method returns {@code null} * @param acceptDocs Allowed docs, all docids not in this set will not be returned by this DocIdSet. * If {@code null}, this method returns the original set without wrapping. */ public static DocIdSet wrap(DocIdSet set, Bits acceptDocs) { return (set == null || acceptDocs == null) ? set : new BitsFilteredDocIdSet(set, acceptDocs); }
@Override public DocIdSet getDocIdSet(final LeafReaderContext context, final Bits acceptDocs) throws IOException { // get a private context that is used to rewrite, createWeight and score eventually final LeafReaderContext privateContext = context.reader().getContext(); final Weight weight = new IndexSearcher(privateContext).createNormalizedWeight(query, false); DocIdSet set = new DocIdSet() { @Override public DocIdSetIterator iterator() throws IOException { Scorer s = weight.scorer(privateContext); return s == null ? null : s.iterator(); } @Override public long ramBytesUsed() { return 0L; } }; return BitsFilteredDocIdSet.wrap(set, acceptDocs); }
@Override public DocIdSet getDocIdSet(LeafReaderContext context, final Bits acceptDocs) throws IOException { final LeafReader reader = context.reader(); final Object key = reader.getCoreCacheKey(); DocIdSet docIdSet = cache.get(key); if (docIdSet != null) { hitCount++; } else { docIdSet = filter.getDocIdSet(context, null); if (policy.shouldCache(filter, context, docIdSet)) { missCount++; docIdSet = docIdSetToCache(docIdSet, reader); if (docIdSet == null) { // We use EMPTY as a sentinel for the empty set, which is cacheable docIdSet = EMPTY; } assert docIdSet.isCacheable(); cache.put(key, docIdSet); } } return docIdSet == EMPTY ? null : BitsFilteredDocIdSet.wrap(docIdSet, acceptDocs); }
@Override public DocIdSet getDocIdSet(LeafReaderContext context, final Bits acceptDocs) throws IOException { final LeafReader reader = context.reader(); final Object key = reader.getCoreCacheKey(); DocIdSet docIdSet = cache.get(key); if (docIdSet != null) { hitCount++; } else { docIdSet = filter.getDocIdSet(context, null); if (policy.shouldCache(filter, context, docIdSet)) { missCount++; docIdSet = docIdSetToCache(docIdSet, reader); if (docIdSet == null) { // We use EMPTY as a sentinel for the empty set, which is cacheable docIdSet = EMPTY; } assert docIdSet.isCacheable(); cache.put(key, docIdSet); } } return docIdSet == EMPTY ? null : BitsFilteredDocIdSet.wrap(docIdSet, acceptDocs); }
@Override public DocIdSet getDocIdSet(LeafReaderContext context, Bits acceptDocs) throws IOException { if (context.ord == 0) { policy.onUse(in); } DocIdSet set = get(in, context); if (set == null) { // do not apply acceptDocs yet, we want the cached filter to not take them into account set = in.getDocIdSet(context, null); if (policy.shouldCache(in, context, set)) { set = docIdSetToCache(set, context.reader()); if (set == null) { // null values are not supported set = DocIdSet.EMPTY; } // it might happen that another thread computed the same set in parallel // although this might incur some CPU overhead, it is probably better // this way than trying to lock and preventing other filters to be // computed at the same time? putIfAbsent(in, context, set); } } return set == DocIdSet.EMPTY ? null : BitsFilteredDocIdSet.wrap(set, acceptDocs); }
@Override public Scorer scorer(LeafReaderContext context) throws IOException { DocIdSet childrenDocSet = childrenFilter.getDocIdSet(context, null); // we forcefully apply live docs here so that deleted children don't give matching parents childrenDocSet = BitsFilteredDocIdSet.wrap(childrenDocSet, context.reader().getLiveDocs()); if (Lucene.isEmpty(childrenDocSet)) { return null; } final DocIdSetIterator childIterator = childrenDocSet.iterator(); if (childIterator == null) { return null; } SortedDocValues bytesValues = globalIfd.load(context).getOrdinalsValues(parentType); if (bytesValues == null) { return null; } return new ChildScorer(this, parentIdxs, scores, childIterator, bytesValues); }
@Override public DocIdSet getDocIdSet(LeafReaderContext context, Bits acceptDocs) throws IOException { if (context.ord == 0) { policy.onUse(in); } DocIdSet set = get(in, context); if (set == null) { // do not apply acceptDocs yet, we want the cached filter to not take them into account set = in.getDocIdSet(context, null); if (policy.shouldCache(in, context, set)) { set = docIdSetToCache(set, context.reader()); if (set == null) { // null values are not supported set = DocIdSet.EMPTY; } // it might happen that another thread computed the same set in parallel // although this might incur some CPU overhead, it is probably better // this way than trying to lock and preventing other filters to be // computed at the same time? putIfAbsent(in, context, set); } } return set == DocIdSet.EMPTY ? null : BitsFilteredDocIdSet.wrap(set, acceptDocs); }
@Override public DocIdSet getDocIdSet(AtomicReaderContext context, Bits acceptDocs) throws IOException { AtomicReader reader = context.reader(); Fields fields = reader.fields(); Terms terms = fields.terms(VISIBILITY_FIELD_NAME); if (terms == null) { return null; } else { OpenBitSet bitSet = new OpenBitSet(reader.maxDoc()); TermsEnum iterator = terms.iterator(null); BytesRef bytesRef; while ((bytesRef = iterator.next()) != null) { makeVisible(iterator, bitSet, acceptDocs, isVisible(visibilityEvaluator, bytesRef)); } return BitsFilteredDocIdSet.wrap(bitSet, acceptDocs); } }
@Override public DocIdSet getDocIdSet(AtomicReaderContext context, Bits acceptDocs) throws IOException { AtomicReader reader = context.reader(); Fields fields = reader.fields(); Terms terms = fields.terms(VISIBILITY_FIELD_NAME); if (terms == null) { return null; } else { OpenBitSet bitSet = new OpenBitSet(reader.maxDoc()); TermsEnum iterator = terms.iterator(null); BytesRef bytesRef; while ((bytesRef = iterator.next()) != null) { makeVisible(iterator, bitSet, acceptDocs, isVisible(visibilityEvaluator, bytesRef)); } return BitsFilteredDocIdSet.wrap(bitSet, acceptDocs); } }
@Override public DocIdSet getDocIdSet(AtomicReaderContext context, Bits acceptDocs) throws IOException { AtomicReader reader = context.reader(); Fields fields = reader.fields(); Terms terms = fields.terms(VISIBILITY_FIELD_NAME); if (terms == null) { return null; } else { OpenBitSet bitSet = new OpenBitSet(reader.maxDoc()); TermsEnum iterator = terms.iterator(null); BytesRef bytesRef; while ((bytesRef = iterator.next()) != null) { makeVisible(iterator, bitSet, acceptDocs, isVisible(visibilityEvaluator, bytesRef)); } return BitsFilteredDocIdSet.wrap(bitSet, acceptDocs); } }
return BitsFilteredDocIdSet.wrap(new BitDocIdSet((BitSet) docsWithField), acceptDocs);
@Override public DocIdSet getDocIdSet(AtomicReaderContext context, Bits acceptDocs) throws IOException { AtomicReader reader = context.reader(); Fields fields = reader.fields(); Terms terms = fields.terms(VISIBILITY_FIELD_NAME); if (terms == null) { return null; } else { OpenBitSet bitSet = new OpenBitSet(reader.maxDoc()); TermsEnum iterator = terms.iterator(null); BytesRef bytesRef; while ((bytesRef = iterator.next()) != null) { makeVisible(iterator, bitSet, acceptDocs, isVisible(visibilityEvaluator, bytesRef)); } return BitsFilteredDocIdSet.wrap(bitSet, acceptDocs); } }
return BitsFilteredDocIdSet.wrap(new BitDocIdSet((BitSet) docsWithField), acceptDocs);
@Override public Scorer scorer(LeafReaderContext context) throws IOException { DocIdSet childrenDocIdSet = childrenFilter.getDocIdSet(context, null); if (Lucene.isEmpty(childrenDocIdSet)) { return null; } SortedDocValues globalValues = globalIfd.load(context).getOrdinalsValues(parentType); if (globalValues != null) { // we forcefully apply live docs here so that deleted children don't give matching parents childrenDocIdSet = BitsFilteredDocIdSet.wrap(childrenDocIdSet, context.reader().getLiveDocs()); DocIdSetIterator innerIterator = childrenDocIdSet.iterator(); if (innerIterator != null) { ChildrenDocIdIterator childrenDocIdIterator = new ChildrenDocIdIterator( innerIterator, parentOrds, globalValues ); return ConstantScorer.create(childrenDocIdIterator, this, queryWeight); } } return null; }
/** * Returns a DocIdSet with documents that should be permitted in search * results. */ @Override public DocIdSet getDocIdSet(LeafReaderContext context, Bits acceptDocs) throws IOException { final Terms terms = context.reader().terms(query.field); if (terms == null) { // field does not exist return null; } final TermsEnum termsEnum = query.getTermsEnum(terms); assert termsEnum != null; BitDocIdSet.Builder builder = new BitDocIdSet.Builder(context.reader().maxDoc()); PostingsEnum docs = null; while (termsEnum.next() != null) { docs = termsEnum.postings(docs, PostingsEnum.NONE); builder.or(docs); } return BitsFilteredDocIdSet.wrap(builder.build(), acceptDocs); } }
/** * Returns a DocIdSet with documents that should be permitted in search * results. */ @Override public DocIdSet getDocIdSet(LeafReaderContext context, Bits acceptDocs) throws IOException { final Terms terms = context.reader().terms(query.field); if (terms == null) { // field does not exist return null; } final TermsEnum termsEnum = query.getTermsEnum(terms); assert termsEnum != null; BitDocIdSet.Builder builder = new BitDocIdSet.Builder(context.reader().maxDoc()); PostingsEnum docs = null; while (termsEnum.next() != null) { docs = termsEnum.postings(docs, PostingsEnum.NONE); builder.or(docs); } return BitsFilteredDocIdSet.wrap(builder.build(), acceptDocs); } }
@Override public DocIdSet getDocIdSet(LeafReaderContext context, Bits acceptDocs) throws IOException { final LeafReader reader = context.reader(); BitDocIdSet.Builder builder = new BitDocIdSet.Builder(reader.maxDoc()); final Fields fields = reader.fields(); final BytesRef spare = new BytesRef(this.termsBytes); Terms terms = null; TermsEnum termsEnum = null; PostingsEnum docs = null; for (TermsAndField termsAndField : this.termsAndFields) { if ((terms = fields.terms(termsAndField.field)) != null) { termsEnum = terms.iterator(); // this won't return null for (int i = termsAndField.start; i < termsAndField.end; i++) { spare.offset = offsets[i]; spare.length = offsets[i+1] - offsets[i]; if (termsEnum.seekExact(spare)) { docs = termsEnum.postings(docs, PostingsEnum.NONE); // no freq since we don't need them builder.or(docs); } } } } return BitsFilteredDocIdSet.wrap(builder.build(), acceptDocs); }