/** * @param reader reader of the index for which to provide suggestions * @param resultSize size of the results */ SuggesterSearcher(final IndexReader reader, final int resultSize) { super(reader); numDocs = reader.numDocs(); this.resultSize = resultSize; }
/** * Returns the number of CPE entries stored in the index. * * @return the number of CPE entries stored in the index */ public synchronized int numDocs() { if (indexReader == null) { return -1; } return indexReader.numDocs(); } }
public int getDocQty() { return reader.numDocs(); }
private static double computeNormalizedDocumentFrequency(final IndexReader indexReader, final Term term) throws IOException { int documentFrequency = indexReader.docFreq(term); return ((double) documentFrequency) / indexReader.numDocs(); }
/** * Get number of documents in this index database. * @return number of documents * @throws IOException if I/O exception occurred */ public int getNumFiles() throws IOException { IndexReader ireader = null; int numDocs = 0; try { ireader = DirectoryReader.open(indexDirectory); // open existing index numDocs = ireader.numDocs(); } finally { if (ireader != null) { try { ireader.close(); } catch (IOException e) { LOGGER.log(Level.WARNING, "An error occurred while closing index reader", e); } } } return numDocs; }
/** Returns the number of deleted documents. */ public final int numDeletedDocs() { return maxDoc() - numDocs(); }
@Override int size(){ setIndexReaderSearcher(); return searcher.getIndexReader().numDocs(); }
@Override protected IndexSample performSampling() throws IndexNotFoundKernelException { UniqueIndexSampler sampler = new UniqueIndexSampler(); sampler.increment( indexSearcher.getIndexReader().numDocs() ); checkCancellation(); return sampler.result(); } }
final IndexReader r = subReaders[i]; numDocs += r.numDocs(); // compute numDocs r.registerParentReader(this);
int numDocs = ireader.numDocs(); if (numDocs > 0) {
public void listTokens(int freq) throws IOException { IndexReader ireader = null; TermsEnum iter = null; Terms terms; try { ireader = DirectoryReader.open(indexDirectory); int numDocs = ireader.numDocs(); if (numDocs > 0) { Fields uFields = MultiFields.getFields(ireader);//reader.getTermVectors(0); terms = uFields.terms(QueryBuilder.DEFS); iter = terms.iterator(); // init uid iterator } while (iter != null && iter.term() != null) { //if (iter.term().field().startsWith("f")) { if (iter.docFreq() > 16 && iter.term().utf8ToString().length() > freq) { LOGGER.warning(iter.term().utf8ToString()); } BytesRef next = iter.next(); if (next==null) {iter=null;} } } finally { if (ireader != null) { try { ireader.close(); } catch (IOException e) { LOGGER.log(Level.WARNING, "An error occurred while closing index reader", e); } } } }
@Test void uniqueSamplingUseDocumentsNumber() throws IndexNotFoundKernelException { when( indexSearcher.getIndexReader().numDocs() ).thenReturn( 17 ); UniqueLuceneIndexSampler sampler = new UniqueLuceneIndexSampler( indexSearcher, taskControl.newInstance() ); IndexSample sample = sampler.sampleIndex(); assertEquals( 17, sample.indexSize() ); }
@Test void uniqueSamplingCancel() { when( indexSearcher.getIndexReader().numDocs() ).thenAnswer( invocation -> { taskControl.cancel(); return 17; } ); UniqueLuceneIndexSampler sampler = new UniqueLuceneIndexSampler( indexSearcher, taskControl.newInstance() ); IndexNotFoundKernelException notFoundKernelException = assertThrows( IndexNotFoundKernelException.class, sampler::sampleIndex ); assertEquals( notFoundKernelException.getMessage(), "Index dropped while sampling." ); }
return reader.numDocs(); } else if (query instanceof TermQuery && reader.hasDeletions() == false) { Term term = ((TermQuery) query).getTerm();
/** * Set the maximum percentage in which words may still appear. Words that appear * in more than this many percent of all docs will be ignored. * * @param maxPercentage the maximum percentage of documents (0-100) that a term may appear * in to be still considered relevant */ public void setMaxDocFreqPct(int maxPercentage) { this.maxDocFreq = maxPercentage * ir.numDocs() / 100; }
int numDocs = reader.numDocs(); if (numDocs > 0) {
@Test void createWritablePartition() throws Exception { try ( AbstractIndexPartition indexPartition = new WritableIndexPartitionFactory( IndexWriterConfigs::standard ) .createPartition( testDirectory.directory(), directory ) ) { try ( IndexWriter indexWriter = indexPartition.getIndexWriter() ) { indexWriter.addDocument( new Document() ); indexWriter.commit(); indexPartition.maybeRefreshBlocking(); try ( PartitionSearcher searcher = indexPartition.acquireSearcher() ) { assertEquals( 1, searcher.getIndexSearcher().getIndexReader().numDocs(), "We should be able to see newly added document " ); } } } }
return sampler.result( indexReader.numDocs() );
@Override public SignificantStringTerms buildEmptyAggregation() { // We need to account for the significance of a miss in our global stats - provide corpus size as context ContextIndexSearcher searcher = context.searcher(); IndexReader topReader = searcher.getIndexReader(); int supersetSize = topReader.numDocs(); return new SignificantStringTerms(name, bucketCountThresholds.getRequiredSize(), bucketCountThresholds.getMinDocCount(), pipelineAggregators(), metaData(), format, numCollectedDocs, supersetSize, significanceHeuristic, emptyList()); }
@Override public SignificantLongTerms buildEmptyAggregation() { // We need to account for the significance of a miss in our global stats - provide corpus size as context ContextIndexSearcher searcher = context.searcher(); IndexReader topReader = searcher.getIndexReader(); int supersetSize = topReader.numDocs(); return new SignificantLongTerms(name, bucketCountThresholds.getRequiredSize(), bucketCountThresholds.getMinDocCount(), pipelineAggregators(), metaData(), format, 0, supersetSize, significanceHeuristic, emptyList()); }