/** * Global stats on segments. */ public SegmentsStats segmentsStats(boolean includeSegmentFileSizes) { ensureOpen(); Set<String> segmentName = new HashSet<>(); SegmentsStats stats = new SegmentsStats(); try (Searcher searcher = acquireSearcher("segments_stats", SearcherScope.INTERNAL)) { for (LeafReaderContext ctx : searcher.reader().getContext().leaves()) { SegmentReader segmentReader = Lucene.segmentReader(ctx.reader()); fillSegmentStats(segmentReader, includeSegmentFileSizes, stats); segmentName.add(segmentReader.getSegmentName()); } } try (Searcher searcher = acquireSearcher("segments_stats", SearcherScope.EXTERNAL)) { for (LeafReaderContext ctx : searcher.reader().getContext().leaves()) { SegmentReader segmentReader = Lucene.segmentReader(ctx.reader()); if (segmentName.contains(segmentReader.getSegmentName()) == false) { fillSegmentStats(segmentReader, includeSegmentFileSizes, stats); } } } writerSegmentStats(stats); return stats; }
private Engine.DeleteResult delete(Engine engine, Engine.Delete delete) throws IOException { active.set(true); final Engine.DeleteResult result; delete = indexingOperationListeners.preDelete(shardId, delete); try { if (logger.isTraceEnabled()) { logger.trace("delete [{}] (seq no [{}])", delete.uid().text(), delete.seqNo()); } result = engine.delete(delete); } catch (Exception e) { indexingOperationListeners.postDelete(shardId, delete, e); throw e; } indexingOperationListeners.postDelete(shardId, delete, result); return result; }
private Engine.IndexResult index(Engine engine, Engine.Index index) throws IOException { active.set(true); final Engine.IndexResult result; index = indexingOperationListeners.preIndex(shardId, index); try { if (logger.isTraceEnabled()) { // don't use index.source().utf8ToString() here source might not be valid UTF-8 logger.trace("index [{}][{}] (seq# [{}])", index.type(), index.id(), index.seqNo()); } result = engine.index(index); } catch (Exception e) { indexingOperationListeners.postIndex(shardId, index, e); throw e; } indexingOperationListeners.postIndex(shardId, index, result); return result; }
/** * Returns the {@link DocsStats} for this engine */ public DocsStats docStats() { // we calculate the doc stats based on the internal reader that is more up-to-date and not subject // to external refreshes. For instance we don't refresh an external reader if we flush and indices with // index.refresh_interval=-1 won't see any doc stats updates at all. This change will give more accurate statistics // when indexing but not refreshing in general. Yet, if a refresh happens the internal reader is refresh as well so we are // safe here. try (Engine.Searcher searcher = acquireSearcher("docStats", Engine.SearcherScope.INTERNAL)) { return docsStats(searcher.reader()); } }
/** * Flush the engine (committing segments to disk and truncating the * translog) and close it. */ public void flushAndClose() throws IOException { if (isClosed.get() == false) { logger.trace("flushAndClose now acquire writeLock"); try (ReleasableLock lock = writeLock.acquire()) { logger.trace("flushAndClose now acquired writeLock"); try { logger.debug("flushing shard on close - this might take some time to sync files to disk"); try { // TODO we might force a flush in the future since we have the write lock already even though recoveries // are running. flush(); } catch (AlreadyClosedException ex) { logger.debug("engine already closed - skipping flushAndClose"); } } finally { close(); // double close is not a problem } } } awaitPendingClose(); }
/** * perform the last stages of recovery once all translog operations are done. * note that you should still call {@link #postRecovery(String)}. */ public void finalizeRecovery() { recoveryState().setStage(RecoveryState.Stage.FINALIZE); Engine engine = getEngine(); engine.refresh("recovery_finalization"); engine.config().setEnableGcDeletes(true); }
/** * Executes the given flush request against the engine. * * @param request the flush request * @return the commit ID */ public Engine.CommitId flush(FlushRequest request) { final boolean waitIfOngoing = request.waitIfOngoing(); final boolean force = request.force(); logger.trace("flush with {}", request); /* * We allow flushes while recovery since we allow operations to happen while recovering and we want to keep the translog under * control (up to deletes, which we do not GC). Yet, we do not use flush internally to clear deletes and flush the index writer * since we use Engine#writeIndexingBuffer for this now. */ verifyNotClosed(); final Engine engine = getEngine(); if (engine.isRecovering()) { throw new IllegalIndexShardStateException( shardId(), state, "flush is only allowed if the engine is not recovery from translog"); } final long time = System.nanoTime(); final Engine.CommitId commitId = engine.flush(force, waitIfOngoing); engine.refresh("flush"); // TODO this is technically wrong we should remove this in 7.0 flushMetric.inc(System.nanoTime() - time); return commitId; }
public List<Segment> segments(boolean verbose) { return getEngine().segments(verbose); }
public void forceMerge(ForceMergeRequest forceMerge) throws IOException { verifyActive(); if (logger.isTraceEnabled()) { logger.trace("force merge with {}", forceMerge); } Engine engine = getEngine(); engine.forceMerge(forceMerge.flush(), forceMerge.maxNumSegments(), forceMerge.onlyExpungeDeletes(), false, false); if (forceMerge.flush()) { engine.refresh("force_merge"); // TODO this is technically wrong we should remove this in 7.0 } }
/** * Returns a new searcher instance. The consumer of this * API is responsible for releasing the returned searcher in a * safe manner, preferably in a try/finally block. * * @param source the source API or routing that triggers this searcher acquire * * @see Searcher#close() */ public final Searcher acquireSearcher(String source) throws EngineException { return acquireSearcher(source, SearcherScope.EXTERNAL); }
/** Check whether the engine should be failed */ protected boolean maybeFailEngine(String source, Exception e) { if (Lucene.isCorruptionException(e)) { failEngine("corrupt file (source: [" + source + "])", e); return true; } return false; }
public void flushAndCloseEngine() throws IOException { getEngine().flushAndClose(); }
private void applyOperation(Engine engine, Engine.Operation operation) throws IOException { switch (operation.operationType()) { case INDEX: Engine.Index engineIndex = (Engine.Index) operation; Mapping update = engineIndex.parsedDoc().dynamicMappingsUpdate(); if (engineIndex.parsedDoc().dynamicMappingsUpdate() != null) { recoveredTypes.compute(engineIndex.type(), (k, mapping) -> mapping == null ? update : mapping.merge(update, false)); } engine.index(engineIndex); break; case DELETE: engine.delete((Engine.Delete) operation); break; case NO_OP: engine.noOp((Engine.NoOp) operation); break; default: throw new IllegalStateException("No operation defined for [" + operation + "]"); } }
protected Segment[] getSegmentInfo(SegmentInfos lastCommittedSegmentInfos, boolean verbose) { ensureOpen(); Map<String, Segment> segments = new HashMap<>(); try (Searcher searcher = acquireSearcher("segments", SearcherScope.EXTERNAL)){ for (LeafReaderContext ctx : searcher.reader().getContext().leaves()) { fillSegmentInfo(Lucene.segmentReader(ctx.reader()), verbose, true, segments); try (Searcher searcher = acquireSearcher("segments", SearcherScope.INTERNAL)){ for (LeafReaderContext ctx : searcher.reader().getContext().leaves()) { SegmentReader segmentReader = Lucene.segmentReader(ctx.reader()); if (segments.containsKey(segmentReader.getSegmentName()) == false) { fillSegmentInfo(segmentReader, verbose, false, segments);
public boolean refreshNeeded() { if (store.tryIncRef()) { /* we need to inc the store here since we acquire a searcher and that might keep a file open on the store. this violates the assumption that all files are closed when the store is closed so we need to make sure we increment it here */ try { try (Searcher searcher = acquireSearcher("refresh_needed", SearcherScope.EXTERNAL)) { return searcher.getDirectoryReader().isCurrent() == false; } } catch (IOException e) { logger.error("failed to access searcher manager", e); failEngine("failed to access searcher manager", e); throw new EngineException(shardId, "failed to access searcher manager", e); } finally { store.decRef(); } } return false; }
/** * Triggers a forced merge on this engine */ public abstract void forceMerge(boolean flush, int maxNumSegments, boolean onlyExpungeDeletes, boolean upgrade, boolean upgradeOnlyAncientSegments) throws EngineException, IOException;
getEngine().flush(false, true); if (getMaxSeqNoOfUpdatesOrDeletes() == SequenceNumbers.UNASSIGNED_SEQ_NO) { getEngine().advanceMaxSeqNoOfUpdatesOrDeletes(seqNoStats().getMaxSeqNo()); engine.advanceMaxSeqNoOfUpdatesOrDeletes(seqNoStats().getMaxSeqNo()); engine.restoreLocalHistoryFromTranslog((resettingEngine, snapshot) -> runTranslogRecovery(resettingEngine, snapshot, Engine.Operation.Origin.LOCAL_RESET, () -> {})); if (indexSettings.getIndexVersionCreated().onOrBefore(Version.V_6_0_0_alpha1)) { engine.flush(false, true); engine.rollTranslogGeneration(); engine.fillSeqNoGaps(newPrimaryTerm); replicationTracker.updateLocalCheckpoint(currentRouting.allocationId().getId(), getLocalCheckpoint()); primaryReplicaSyncer.accept(this, new ActionListener<ResyncTask>() {
/** * Flush the engine (committing segments to disk and truncating the * translog) and close it. */ public void flushAndClose() throws IOException { if (isClosed.get() == false) { logger.trace("flushAndClose now acquire writeLock"); try (ReleasableLock lock = writeLock.acquire()) { logger.trace("flushAndClose now acquired writeLock"); try { logger.debug("flushing shard on close - this might take some time to sync files to disk"); try { flush(); // TODO we might force a flush in the future since we have the write lock already even though recoveries are running. } catch (AlreadyClosedException ex) { logger.debug("engine already closed - skipping flushAndClose"); } } finally { close(); // double close is not a problem } } } }
logger.trace("[translog] recover [create] op of [{}][{}]", create.type(), create.id()); engine.create(engineCreate); break; case SAVE: logger.trace("[translog] recover [index] op of [{}][{}]", index.type(), index.id()); engine.index(engineIndex); break; case DELETE: logger.trace("[translog] recover [delete] op of [{}][{}]", uid.type(), uid.id()); engine.delete(new Engine.Delete(uid.type(), uid.id(), delete.uid(), delete.version(), delete.versionType().versionTypeForReplicationAndRecovery(), Engine.Operation.Origin.RECOVERY, System.nanoTime(), false)); break; case DELETE_BY_QUERY: Translog.DeleteByQuery deleteByQuery = (Translog.DeleteByQuery) operation; engine.delete(prepareDeleteByQuery(queryParserService, mapperService, indexAliasesService, indexCache, deleteByQuery.source(), deleteByQuery.filteringAliases(), Engine.Operation.Origin.RECOVERY, deleteByQuery.types())); break;
/** * Writes all indexing changes to disk and opens a new searcher reflecting all changes. This can throw {@link AlreadyClosedException}. */ public void refresh(String source) { verifyNotClosed(); if (logger.isTraceEnabled()) { logger.trace("refresh with source [{}]", source); } getEngine().refresh(source); }