/** * Writes all doc values updates to disk if there are any. * @return <code>true</code> iff any files where written */ boolean writeAllDocValuesUpdates() throws IOException { Collection<ReadersAndUpdates> copy; synchronized (this) { // this needs to be protected by the reader pool lock otherwise we hit ConcurrentModificationException copy = new HashSet<>(readerMap.values()); } boolean any = false; for (ReadersAndUpdates rld : copy) { any |= rld.writeFieldUpdates(directory, fieldNumbers, completedDelGenSupplier.getAsLong(), infoStream); } return any; }
/** * Writes all doc values updates to disk if there are any. * @return <code>true</code> iff any files where written */ boolean writeDocValuesUpdatesForMerge(List<SegmentCommitInfo> infos) throws IOException { boolean any = false; for (SegmentCommitInfo info : infos) { ReadersAndUpdates rld = get(info, false); if (rld != null) { any |= rld.writeFieldUpdates(directory, fieldNumbers, completedDelGenSupplier.getAsLong(), infoStream); rld.setIsMerging(); } } return any; }
/** * Commit live docs changes for the segment readers for * the provided infos. * * @throws IOException If there is a low-level I/O error */ synchronized boolean commit(SegmentInfos infos) throws IOException { boolean atLeastOneChange = false; for (SegmentCommitInfo info : infos) { final ReadersAndUpdates rld = readerMap.get(info); if (rld != null) { assert rld.info == info; boolean changed = rld.writeLiveDocs(directory); changed |= rld.writeFieldUpdates(directory, fieldNumbers, completedDelGenSupplier.getAsLong(), infoStream); if (changed) { // Make sure we only write del docs for a live segment: assert assertInfoIsLive(info); // Must checkpoint because we just // created new _X_N.del and field updates files; // don't call IW.checkpoint because that also // increments SIS.version, which we do not want to // do here: it was done previously (after we // invoked BDS.applyDeletes), whereas here all we // did was move the state to disk: atLeastOneChange = true; } } } return atLeastOneChange; }
continue; if (rld.writeFieldUpdates(directory, globalFieldNumberMap, bufferedUpdatesStream.getCompletedDelGen(), infoStream)) { checkpointNoSIS();
if (rld.writeFieldUpdates(directory, fieldNumbers, completedDelGenSupplier.getAsLong(), infoStream)) { changed = true;
/** * Writes all doc values updates to disk if there are any. * @return <code>true</code> iff any files where written */ boolean writeAllDocValuesUpdates() throws IOException { Collection<ReadersAndUpdates> copy; synchronized (this) { // this needs to be protected by the reader pool lock otherwise we hit ConcurrentModificationException copy = new HashSet<>(readerMap.values()); } boolean any = false; for (ReadersAndUpdates rld : copy) { any |= rld.writeFieldUpdates(directory, fieldNumbers, completedDelGenSupplier.getAsLong(), infoStream); } return any; }
/** * Writes all doc values updates to disk if there are any. * @return <code>true</code> iff any files where written */ boolean writeDocValuesUpdatesForMerge(List<SegmentCommitInfo> infos) throws IOException { boolean any = false; for (SegmentCommitInfo info : infos) { ReadersAndUpdates rld = get(info, false); if (rld != null) { any |= rld.writeFieldUpdates(directory, fieldNumbers, completedDelGenSupplier.getAsLong(), infoStream); rld.setIsMerging(); } } return any; }
/** * Commit live docs changes for the segment readers for * the provided infos. * * @throws IOException If there is a low-level I/O error */ synchronized boolean commit(SegmentInfos infos) throws IOException { boolean atLeastOneChange = false; for (SegmentCommitInfo info : infos) { final ReadersAndUpdates rld = readerMap.get(info); if (rld != null) { assert rld.info == info; boolean changed = rld.writeLiveDocs(directory); changed |= rld.writeFieldUpdates(directory, fieldNumbers, completedDelGenSupplier.getAsLong(), infoStream); if (changed) { // Make sure we only write del docs for a live segment: assert assertInfoIsLive(info); // Must checkpoint because we just // created new _X_N.del and field updates files; // don't call IW.checkpoint because that also // increments SIS.version, which we do not want to // do here: it was done previously (after we // invoked BDS.applyDeletes), whereas here all we // did was move the state to disk: atLeastOneChange = true; } } } return atLeastOneChange; }
continue; if (rld.writeFieldUpdates(directory, globalFieldNumberMap, bufferedUpdatesStream.getCompletedDelGen(), infoStream)) { checkpointNoSIS();
if (rld.writeFieldUpdates(directory, fieldNumbers, completedDelGenSupplier.getAsLong(), infoStream)) { changed = true;
segState.rld.writeFieldUpdates(info.info.dir, dvUpdates); applyDocValuesUpdatesList(coalescedUpdates.binaryDVUpdates, segState, dvUpdates); if (dvUpdates.any()) { segState.rld.writeFieldUpdates(info.info.dir, dvUpdates);
segState.rld.writeFieldUpdates(info.info.dir, dvUpdates); applyDocValuesUpdatesList(coalescedUpdates.binaryDVUpdates, segState, dvUpdates); if (dvUpdates.any()) { segState.rld.writeFieldUpdates(info.info.dir, dvUpdates);
holder.mergedDeletesAndUpdates.writeFieldUpdates(directory, mergedDVUpdates); success = true; } finally {
holder.mergedDeletesAndUpdates.writeFieldUpdates(directory, mergedDVUpdates); success = true; } finally {