/** * Obtain a ReadersAndLiveDocs instance from the * readerPool. If create is true, you must later call * {@link #release(ReadersAndUpdates, boolean)}. */ synchronized ReadersAndUpdates get(SegmentCommitInfo info, boolean create) { assert info.info.dir == originalDirectory: "info.dir=" + info.info.dir + " vs " + originalDirectory; if (closed.get()) { assert readerMap.isEmpty() : "Reader map is not empty: " + readerMap; throw new AlreadyClosedException("ReaderPool is already closed"); } ReadersAndUpdates rld = readerMap.get(info); if (rld == null) { if (create == false) { return null; } rld = new ReadersAndUpdates(segmentInfos.getIndexCreatedVersionMajor(), info, newPendingDeletes(info)); // Steal initial reference: readerMap.put(info, rld); } else { assert rld.info == info: "rld.info=" + rld.info + " info=" + info + " isLive?=" + assertInfoIsLive(rld.info) + " vs " + assertInfoIsLive(info); } if (create) { // Return ref to caller: rld.incRef(); } assert noDups(); return rld; }
boolean changed = false; rld.decRef(); if (rld.refCount() == 0) { assert rld.refCount() > 0: "refCount=" + rld.refCount() + " reader=" + rld.info; if (poolReaders == false && rld.refCount() == 1 && readerMap.containsKey(rld.info)) { if (rld.writeLiveDocs(directory)) { if (rld.writeFieldUpdates(directory, fieldNumbers, completedDelGenSupplier.getAsLong(), infoStream)) { changed = true; if (rld.getNumDVUpdates() == 0) { rld.dropReaders(); readerMap.remove(rld.info); } else {
SegmentState(ReadersAndUpdates rld, IOUtils.IOConsumer<ReadersAndUpdates> onClose, SegmentCommitInfo info) throws IOException { this.rld = rld; reader = rld.getReader(IOContext.READ); startDelCount = rld.getDelCount(); delGen = info.getBufferedDeletesGen(); this.onClose = onClose; }
private synchronized CodecReader getLatestReader() throws IOException { if (this.reader == null) { // get a reader and dec the ref right away we just make sure we have a reader getReader(IOContext.READ).decRef(); } if (pendingDeletes.needsRefresh(reader)) { // we have a reader but its live-docs are out of sync. let's create a temporary one that we never share swapNewReaderWithLatestLiveDocs(); } return reader; }
final Bits currentLiveDocs = rld.getLiveDocs(); final Map<String,DocValuesFieldUpdates> mergingFieldUpdates = rld.getMergingFieldUpdates(); final String[] mergingFields; final DocValuesFieldUpdates[] dvFieldUpdates; holder.mergedDeletesAndUpdates.delete(holder.docMap.map(docUpto)); if (mergingFields != null) { // advance all iters beyond the deleted document skipDeletedDoc(updatesIters, j); docUpto += info.info.maxDoc() - info.getDelCount() - rld.getPendingDeleteCount(); holder.mergedDeletesAndUpdates.delete(holder.docMap.map(docUpto)); if (mergingFields != null) { // advance all iters beyond the deleted document skipDeletedDoc(updatesIters, j); holder.mergedDeletesAndUpdates.writeFieldUpdates(directory, mergedDVUpdates); success = true; } finally { if (!success) { holder.mergedDeletesAndUpdates.dropChanges(); readerPool.drop(merge.info); infoStream.message("IW", "no new deletes or field updates since merge started"); } else { String msg = holder.mergedDeletesAndUpdates.getPendingDeleteCount() + " new deletes"; if (mergedDVUpdates.any()) { msg += " and " + mergedDVUpdates.size() + " new field updates";
reader = rld.getReaderForMerge(context); liveDocs = rld.getReadOnlyLiveDocs(); delCount = rld.getPendingDeleteCount() + info.getDelCount(); assert rld.verifyDocCounts(); if (rld.getPendingDeleteCount() != 0) { infoStream.message("IW", "seg=" + segString(info) + " delCount=" + info.getDelCount() + " pendingDelCount=" + rld.getPendingDeleteCount()); } else if (info.getDelCount() != 0) { infoStream.message("IW", "seg=" + segString(info) + " delCount=" + info.getDelCount()); rld.release(reader); released = true; } finally { assert delCount <= info.info.maxDoc(): "delCount=" + delCount + " info.maxDoc=" + info.info.maxDoc() + " rld.pendingDeleteCount=" + rld.getPendingDeleteCount() + " info.getDelCount()=" + info.getDelCount(); segUpto++; if (poolReaders && mergedSegmentWarmer != null) { final ReadersAndUpdates rld = readerPool.get(merge.info, true); final SegmentReader sr = rld.getReader(IOContext.READ); try { mergedSegmentWarmer.warm(sr); } finally { synchronized(this) { rld.release(sr); readerPool.release(rld);
@SuppressWarnings("try") private synchronized void closeMergeReaders(MergePolicy.OneMerge merge, boolean suppressExceptions) throws IOException { final boolean drop = suppressExceptions == false; try (Closeable finalizer = merge::mergeFinished) { IOUtils.applyToAll(merge.readers, sr -> { final ReadersAndUpdates rld = getPooledInstance(sr.getOriginalSegmentInfo(), false); // We still hold a ref so it should not have been removed: assert rld != null; if (drop) { rld.dropChanges(); } else { rld.dropMergingUpdates(); } rld.release(sr); release(rld); if (drop) { readerPool.drop(rld.info); } }); } finally { Collections.fill(merge.readers, null); } }
public synchronized void release(ReadersAndUpdates rld, boolean assertInfoLive) throws IOException { rld.decRef(); assert rld.refCount() >= 1; if (!poolReaders && rld.refCount() == 1) { if (rld.writeLiveDocs(directory)) { rld.dropReaders(); readerMap.remove(rld.info);
rld.setIsMerging(); ReadersAndUpdates.MergeReader mr = rld.getReaderForMerge(context); SegmentReader reader = mr.reader; if (readerPool.isReaderPoolingEnabled() && mergedSegmentWarmer != null) { final ReadersAndUpdates rld = getPooledInstance(merge.info, true); final SegmentReader sr = rld.getReader(IOContext.READ); try { mergedSegmentWarmer.warm(sr); } finally { synchronized(this) { rld.release(sr); release(rld);
final Bits acceptDocs = state.rld.getLiveDocs(); state.postingsEnum = state.termsEnum.postings(state.postingsEnum, PostingsEnum.NONE); state.rld.initWritableLiveDocs(); state.any = true; state.rld.delete(docID);
public synchronized boolean delete(int docID) throws IOException { if (reader == null && pendingDeletes.mustInitOnDelete()) { getReader(IOContext.READ).decRef(); // pass a reader to initialize the pending deletes } return pendingDeletes.delete(docID); }
if (doSave && rld.writeLiveDocs(directory)) { rld.dropReaders(); } catch (Throwable t) { if (doSave) {
if (rld != null) { synchronized(bufferedUpdatesStream) { rld.initWritableLiveDocs(); if (rld.delete(docID)) { final int fullDelCount = rld.info.getDelCount() + rld.getPendingDeleteCount(); if (fullDelCount == rld.info.info.maxDoc()) {
/** * Drops reader for the given {@link SegmentCommitInfo} if it's pooled * @return <code>true</code> if a reader is pooled */ synchronized boolean drop(SegmentCommitInfo info) throws IOException { final ReadersAndUpdates rld = readerMap.get(info); if (rld != null) { assert info == rld.info; readerMap.remove(info); rld.dropReaders(); return true; } return false; }
/** Returns a reader for merge, with the latest doc values updates and deletions. */ synchronized MergeReader getReaderForMerge(IOContext context) throws IOException { // We must carry over any still-pending DV updates because they were not // successfully written, e.g. because there was a hole in the delGens, // or they arrived after we wrote all DVs for merge but before we set // isMerging here: for (Map.Entry<String, List<DocValuesFieldUpdates>> ent : pendingDVUpdates.entrySet()) { List<DocValuesFieldUpdates> mergingUpdates = mergingDVUpdates.get(ent.getKey()); if (mergingUpdates == null) { mergingUpdates = new ArrayList<>(); mergingDVUpdates.put(ent.getKey(), mergingUpdates); } mergingUpdates.addAll(ent.getValue()); } SegmentReader reader = getReader(context); if (pendingDeletes.needsRefresh(reader)) { // beware of zombies: assert pendingDeletes.getLiveDocs() != null; reader = createNewReaderWithLatestLiveDocs(reader); } assert pendingDeletes.verifyDocCounts(reader); return new MergeReader(reader, pendingDeletes.getHardLiveDocs()); }
docIdConsumer = doc -> update.add(doc, binaryValue); final Bits acceptDocs = segState.rld.getLiveDocs(); if (segState.rld.sortMap != null && segmentPrivateDeletes) { if (update.any()) { update.finish(); segState.rld.addDVUpdate(update);
public SegmentState(IndexWriter.ReaderPool pool, SegmentCommitInfo info) throws IOException { rld = pool.get(info, true); startDelCount = rld.getPendingDeleteCount(); reader = rld.getReader(IOContext.READ); delGen = info.getBufferedDeletesGen(); }
segState.rld.initWritableLiveDocs(); segState.any = true; if (segState.rld.delete(doc)) { delCount++;
merge.info.info.maxDoc() == 0 || (mergedUpdates != null && mergedUpdates.getPendingDeleteCount() == merge.info.info.maxDoc()); try { if (dropSegment) { mergedUpdates.dropChanges(); } finally { if (!success) { mergedUpdates.dropChanges(); readerPool.drop(merge.info);