Refine search
private void advanceScorer () throws IOException { while (nextReader < leaves.size()) { leaf = leaves.get(nextReader++); scorer = weight.scorer(leaf, ordered, false, leaf.reader().getLiveDocs()); // NB: arg 3 (topScorer) was 'true' prior to 4.1 upgrade but incorrectly I think?? if (scorer != null) { return; } } scorer = null; }
@Override public void setNextReader(AtomicReaderContext context) throws IOException { this.arc = context; this.currentReader = context.reader(); }
private TermsEnum getTermsEnum(AtomicReaderContext context) throws IOException { final TermState state = termStates.get(context.ord); if (state == null) { // term is not present in that reader assert termNotInReader(context.reader(), term) : "no termstate found but term exists in reader term=" + term; return null; } final TermsEnum termsEnum = context.reader().terms(term.field()).iterator(null); termsEnum.seekExact(term.bytes(), state); return termsEnum; }
@Override public DocIdSet getDocIdSet(AtomicReaderContext context, Bits acceptDocs) throws IOException { AtomicReader reader = context.reader(); Fields fields = reader.fields(); Terms terms = fields.terms(VISIBILITY_FIELD_NAME); if (terms == null) { return null; } else { OpenBitSet bitSet = new OpenBitSet(reader.maxDoc()); TermsEnum iterator = terms.iterator(null); BytesRef bytesRef; while ((bytesRef = iterator.next()) != null) { makeVisible(iterator, bitSet, acceptDocs, isVisible(visibilityEvaluator, bytesRef)); } return BitsFilteredDocIdSet.wrap(bitSet, acceptDocs); } }
ImageHashScorer(Weight weight, BitSet bitSet, AtomicReaderContext context, Bits liveDocs) { super(weight, luceneFieldName, lireFeature, context.reader(), ImageHashLimitQuery.this.getBoost()); this.bitSet = bitSet; this.liveDocs = liveDocs; maxDoc = context.reader().maxDoc(); docBase = context.docBase; }
final DocsEnum[] docsEnums = new DocsEnum[ subReaders.size() ]; for ( int subIDX = 0; subIDX < subReaders.size(); subIDX++ ) { termsEnums[ subIDX ] = subReaders.get( subIDX ).reader().fields().terms( "id" ).iterator( null ); final BytesRef id = new BytesRef( ids[ idx ] ); for ( int subIDX = 0; subIDX < subReaders.size(); subIDX++ ) { final AtomicReader sub = subReaders.get( subIDX ).reader(); final TermsEnum termsEnum = termsEnums[ subIDX ]; if ( termsEnum.seekExact( id, false ) ) { final DocsEnum docs = docsEnums[ subIDX ] = termsEnum.docs( sub.getLiveDocs(), docsEnums[ subIDX ], 0 ); if ( docs != null ) { final int docID = docs.nextDoc(); base += sub.maxDoc();
ReaderData(IndexReader reader) throws IOException { this.reader = reader; long minUID = Long.MAX_VALUE; long maxUID = Long.MIN_VALUE; uidMap = new Long2IntRBTreeMap(); uidMap.defaultReturnValue(-1); int maxDoc = reader.maxDoc(); if (maxDoc == 0) { _minUID = Long.MIN_VALUE; _maxUID = Long.MIN_VALUE; return; } List<AtomicReaderContext> leaves = reader.getContext().leaves(); for (AtomicReaderContext context : leaves) { AtomicReader atomicReader = context.reader(); NumericDocValues uidValues = atomicReader .getNumericDocValues(AbstractZoieIndexable.DOCUMENT_ID_PAYLOAD_FIELD); Bits liveDocs = atomicReader.getLiveDocs(); for (int i = 0; i < atomicReader.maxDoc(); ++i) { if (liveDocs == null || liveDocs.get(i)) { long uid = uidValues.get(i); if (uid < minUID) minUID = uid; if (uid > maxUID) maxUID = uid; uidMap.put(uid, i); } } } _minUID = minUID; _maxUID = maxUID; }
@Override public DocIdSet getDocIdSet(final AtomicReaderContext context, final Bits acceptDocs) throws IOException { AtomicReader areader = context.reader(); SortedDocValues sortedDocValues = areader.getSortedDocValues(fieldName); if (sortedDocValues == null) return null; OpenBitSet bits = new OpenBitSet(areader.maxDoc()); for( int docID=0; docID<areader.maxDoc(); docID++ ) { if( acceptDocs == null || acceptDocs.get(docID)) { sortedDocValues.get(docID, bytes);
private synchronized int[] getAllowedLuceneIds(AtomicReaderContext context) throws IOException { AtomicReader reader = context.reader(); if (allowedLuceneIds.containsKey(reader)) { return allowedLuceneIds.get(reader); } LOG.debug("building WpId filter for " + wpIds.length + " ids with hash " + Arrays.hashCode(wpIds)); TIntSet wpIdSet = new TIntHashSet(wpIds); TIntSet luceneIdSet = new TIntHashSet(); Set<String> fields = new HashSet<String>(Arrays.asList(LuceneOptions.LOCAL_ID_FIELD_NAME)); for (int i = 0; i < reader.numDocs(); i++) { Document d = reader.document(i, fields); int wpId = Integer.valueOf(d.get(LuceneOptions.LOCAL_ID_FIELD_NAME)); if (wpIdSet.contains(wpId)) { luceneIdSet.add(i); } } int luceneIds[] = luceneIdSet.toArray(); LOG.debug("WpId filter matched " + luceneIds.length + " ids."); allowedLuceneIds.put(reader, luceneIds); return luceneIds; } }
@Override public void collect(int doc) throws IOException { OsmObject object; BytesRef bytesRef = new BytesRef(10); context.reader().getBinaryDocValues("class_value").get(doc, bytesRef); if (class_nodeByteRef.bytesEquals(bytesRef)) { object = getNode(context.reader().getNumericDocValues("node.identity_value").get(doc)); } else if (class_wayByteRef.bytesEquals(bytesRef)) { object = getWay(context.reader().getNumericDocValues("way.identity_value").get(doc)); } else if (class_relationByteRef.bytesEquals(bytesRef)) { object = getRelation(context.reader().getNumericDocValues("relation.identity_value").get(doc)); } else { throw new RuntimeException(); } searchResults.put(object, scorer.score()); }
AtomicReader ar = rc.reader(); FieldInfos fis = ar.getFieldInfos(); for (FieldInfo fi : fis) { fields.add(fi.name);
@Override public void setNextReader(AtomicReaderContext context) throws IOException { this.arc = context; this.currentReader = context.reader(); }
/** * Returns a {@link TermsEnum} positioned at this weights Term or null if * the term does not exist in the given context */ TermsEnum getTermsEnum(final AtomicReaderContext context) throws IOException { final TermState state = termStates.get(context.ord); if (state == null) { // term is not present in that reader assert this.termNotInReader(context.reader(), term) : "no termstate found but term exists in reader term=" + term; return null; } final TermsEnum termsEnum = context.reader().terms(term.field()).iterator(null); termsEnum.seekExact(term.bytes(), state); return termsEnum; }
@Override public DocIdSet getDocIdSet(AtomicReaderContext context, Bits acceptDocs) throws IOException { AtomicReader reader = context.reader(); Fields fields = reader.fields(); Terms terms = fields.terms(VISIBILITY_FIELD_NAME); if (terms == null) { return null; } else { OpenBitSet bitSet = new OpenBitSet(reader.maxDoc()); TermsEnum iterator = terms.iterator(null); BytesRef bytesRef; while ((bytesRef = iterator.next()) != null) { makeVisible(iterator, bitSet, acceptDocs, isVisible(visibilityEvaluator, bytesRef)); } return BitsFilteredDocIdSet.wrap(bitSet, acceptDocs); } }
ReaderData(IndexReader reader) throws IOException { this.reader = reader; long minUID = Long.MAX_VALUE; long maxUID = Long.MIN_VALUE; uidMap = new Long2IntRBTreeMap(); uidMap.defaultReturnValue(-1); int maxDoc = reader.maxDoc(); if (maxDoc == 0) { _minUID = Long.MIN_VALUE; _maxUID = Long.MIN_VALUE; return; } List<AtomicReaderContext> leaves = reader.getContext().leaves(); for (AtomicReaderContext context : leaves) { AtomicReader atomicReader = context.reader(); NumericDocValues uidValues = atomicReader .getNumericDocValues(AbstractZoieIndexable.DOCUMENT_ID_PAYLOAD_FIELD); Bits liveDocs = atomicReader.getLiveDocs(); for (int i = 0; i < atomicReader.maxDoc(); ++i) { if (liveDocs == null || liveDocs.get(i)) { long uid = uidValues.get(i); if (uid < minUID) minUID = uid; if (uid > maxUID) maxUID = uid; uidMap.put(uid, i); } } } _minUID = minUID; _maxUID = maxUID; }
@Override public Explanation explain(final AtomicReaderContext context, final int doc) throws IOException { final NodeScorer scorer = (NodeScorer) this.scorer(context, context.reader().getLiveDocs()); if (scorer != null) { if (scorer.skipToCandidate(doc) && scorer.doc() == doc) { return weight.explain(context, doc); } } return new ComplexExplanation(false, 0.0f, "no matching term"); }