Translog getTranslog() { ensureOpen(); return translog; }
private void releaseIndexCommit(IndexCommit snapshot) throws IOException { // Revisit the deletion policy if we can clean up the snapshotting commit. if (combinedDeletionPolicy.releaseCommit(snapshot)) { ensureOpen(); // Here we don't have to trim translog because snapshotting an index commit // does not lock translog or prevents unreferenced files from trimming. indexWriter.deleteUnusedFiles(); } }
@Override public InternalEngine recoverFromTranslog(TranslogRecoveryRunner translogRecoveryRunner, long recoverUpToSeqNo) throws IOException { flushLock.lock(); try (ReleasableLock lock = readLock.acquire()) { ensureOpen(); assert getMaxSeqNoOfUpdatesOrDeletes() != SequenceNumbers.UNASSIGNED_SEQ_NO || engineConfig.getIndexSettings().getIndexVersionCreated().before(Version.V_6_0_0) : "max_seq_no_of_updates is uninitialized"; if (pendingTranslogRecovery.get() == false) { throw new IllegalStateException("Engine has already been recovered"); } try { recoverFromTranslogInternal(translogRecoveryRunner, recoverUpToSeqNo); } catch (Exception e) { try { pendingTranslogRecovery.set(true); // just play safe and never allow commits on this see #ensureCanFlush failEngine("failed to recover from translog", e); } catch (Exception inner) { e.addSuppressed(inner); } throw e; } } finally { flushLock.unlock(); } return this; }
@Override public void trimUnreferencedTranslogFiles() throws EngineException { try (ReleasableLock lock = readLock.acquire()) { ensureOpen(); translog.trimUnreferencedReaders(); } catch (AlreadyClosedException e) { failOnTragicEvent(e); throw e; } catch (Exception e) { try { failEngine("translog trimming failed", e); } catch (Exception inner) { e.addSuppressed(inner); } throw new EngineException(shardId, "failed to trim translog", e); } }
@Override public void trimOperationsFromTranslog(long belowTerm, long aboveSeqNo) throws EngineException { try (ReleasableLock lock = readLock.acquire()) { ensureOpen(); translog.trimOperations(belowTerm, aboveSeqNo); } catch (AlreadyClosedException e) { failOnTragicEvent(e); throw e; } catch (Exception e) { try { failEngine("translog operations trimming failed", e); } catch (Exception inner) { e.addSuppressed(inner); } throw new EngineException(shardId, "failed to trim translog operations", e); } }
@Override public Translog.Snapshot newChangesSnapshot(String source, MapperService mapperService, long fromSeqNo, long toSeqNo, boolean requiredFullRange) throws IOException { if (softDeleteEnabled == false) { throw new IllegalStateException("accessing changes snapshot requires soft-deletes enabled"); } ensureOpen(); refreshIfNeeded(source, toSeqNo); Searcher searcher = acquireSearcher(source, SearcherScope.INTERNAL); try { LuceneChangesSnapshot snapshot = new LuceneChangesSnapshot( searcher, mapperService, LuceneChangesSnapshot.DEFAULT_BATCH_SIZE, fromSeqNo, toSeqNo, requiredFullRange); searcher = null; return snapshot; } catch (Exception e) { try { maybeFailEngine("acquire changes snapshot", e); } catch (Exception inner) { e.addSuppressed(inner); } throw e; } finally { IOUtils.close(searcher); } }
@Override public int restoreLocalHistoryFromTranslog(TranslogRecoveryRunner translogRecoveryRunner) throws IOException { try (ReleasableLock ignored = readLock.acquire()) { ensureOpen(); final long localCheckpoint = localCheckpointTracker.getCheckpoint(); try (Translog.Snapshot snapshot = getTranslog().newSnapshotFromMinSeqNo(localCheckpoint + 1)) { return translogRecoveryRunner.run(this, snapshot); } } }
final boolean tryRenewSyncCommit() { boolean renewed = false; try (ReleasableLock lock = writeLock.acquire()) { ensureOpen(); ensureCanFlush(); String syncId = lastCommittedSegmentInfos.getUserData().get(SYNC_COMMIT_ID); long translogGenOfLastCommit = Long.parseLong(lastCommittedSegmentInfos.userData.get(Translog.TRANSLOG_GENERATION_KEY)); if (syncId != null && indexWriter.hasUncommittedChanges() && translog.totalOperationsByMinGen(translogGenOfLastCommit) == 0) { logger.trace("start renewing sync commit [{}]", syncId); commitIndexWriter(indexWriter, translog, syncId); logger.debug("successfully sync committed. sync id [{}].", syncId); lastCommittedSegmentInfos = store.readLastCommittedSegmentsInfo(); renewed = true; } } catch (IOException ex) { maybeFailEngine("renew sync commit", ex); throw new EngineException(shardId, "failed to renew sync commit", ex); } if (renewed) { // refresh outside of the write lock // we have to refresh internal searcher here to ensure we release unreferenced segments. refresh("renew sync commit", SearcherScope.INTERNAL); } return renewed; }
@Override public SyncedFlushResult syncFlush(String syncId, CommitId expectedCommitId) throws EngineException { ensureOpen(); if (indexWriter.hasUncommittedChanges()) { logger.trace("can't sync commit [{}]. have pending changes", syncId); ensureOpen(); ensureCanFlush();
@Override public void rollTranslogGeneration() throws EngineException { try (ReleasableLock ignored = readLock.acquire()) { ensureOpen(); translog.rollGeneration(); translog.trimUnreferencedReaders(); } catch (AlreadyClosedException e) { failOnTragicEvent(e); throw e; } catch (Exception e) { try { failEngine("translog trimming failed", e); } catch (Exception inner) { e.addSuppressed(inner); } throw new EngineException(shardId, "failed to roll translog", e); } }
@Override public CommitId flush(boolean force, boolean waitIfOngoing) throws EngineException { ensureOpen(); final byte[] newCommitId; ensureOpen(); if (flushLock.tryLock() == false) {
@Override public int fillSeqNoGaps(long primaryTerm) throws IOException { try (ReleasableLock ignored = writeLock.acquire()) { ensureOpen(); final long localCheckpoint = localCheckpointTracker.getCheckpoint(); final long maxSeqNo = localCheckpointTracker.getMaxSeqNo(); int numNoOpsAdded = 0; for ( long seqNo = localCheckpoint + 1; seqNo <= maxSeqNo; seqNo = localCheckpointTracker.getCheckpoint() + 1 /* the local checkpoint might have advanced so we leap-frog */) { innerNoOp(new NoOp(seqNo, primaryTerm, Operation.Origin.PRIMARY, System.nanoTime(), "filling gaps")); numNoOpsAdded++; assert seqNo <= localCheckpointTracker.getCheckpoint() : "local checkpoint did not advance; was [" + seqNo + "], now [" + localCheckpointTracker.getCheckpoint() + "]"; } return numNoOpsAdded; } }
optimizeLock.lock(); try { ensureOpen(); if (upgrade) { logger.info("starting segment upgrade upgradeOnlyAncientSegments={}", upgradeOnlyAncientSegments); ensureOpen(ex); failOnTragicEvent(ex); throw ex;
@Override public boolean shouldPeriodicallyFlush() { ensureOpen(); final long translogGenerationOfLastCommit = Long.parseLong(lastCommittedSegmentInfos.userData.get(Translog.TRANSLOG_GENERATION_KEY)); final long flushThreshold = config().getIndexSettings().getFlushThresholdSize().getBytes(); if (translog.sizeInBytesByMinGen(translogGenerationOfLastCommit) < flushThreshold) { return false; } /* * We flush to reduce the size of uncommitted translog but strictly speaking the uncommitted size won't always be * below the flush-threshold after a flush. To avoid getting into an endless loop of flushing, we only enable the * periodically flush condition if this condition is disabled after a flush. The condition will change if the new * commit points to the later generation the last commit's(eg. gen-of-last-commit < gen-of-new-commit)[1]. * * When the local checkpoint equals to max_seqno, and translog-gen of the last commit equals to translog-gen of * the new commit, we know that the last generation must contain operations because its size is above the flush * threshold and the flush-threshold is guaranteed to be higher than an empty translog by the setting validation. * This guarantees that the new commit will point to the newly rolled generation. In fact, this scenario only * happens when the generation-threshold is close to or above the flush-threshold; otherwise we have rolled * generations as the generation-threshold was reached, then the first condition (eg. [1]) is already satisfied. * * This method is to maintain translog only, thus IndexWriter#hasUncommittedChanges condition is not considered. */ final long translogGenerationOfNewCommit = translog.getMinGenerationForSeqNo(localCheckpointTracker.getCheckpoint() + 1).translogFileGeneration; return translogGenerationOfLastCommit < translogGenerationOfNewCommit || localCheckpointTracker.getCheckpoint() == localCheckpointTracker.getMaxSeqNo(); }
@Override public Translog getTranslog() { ensureOpen(); return translog; }
ensureOpen(); if (store.tryIncRef()) {
ensureOpen(); lastWriteNanos = delete.startTime(); final DeletionStrategy plan = deletionStrategyForOperation(delete);
assert Objects.equals(get.uid().field(), uidField) : get.uid().field(); try (ReleasableLock ignored = readLock.acquire()) { ensureOpen(); SearcherScope scope; if (get.realtime()) {
final boolean doThrottle = index.origin().isRecovery() == false; try (ReleasableLock releasableLock = readLock.acquire()) { ensureOpen(); assert assertIncomingSequenceNumber(index.origin(), index.seqNo()); assert assertVersionType(index);
private void releaseIndexCommit(IndexCommit snapshot) throws IOException { // Revisit the deletion policy if we can clean up the snapshotting commit. if (combinedDeletionPolicy.releaseCommit(snapshot)) { ensureOpen(); // Here we don't have to trim translog because snapshotting an index commit // does not lock translog or prevents unreferenced files from trimming. indexWriter.deleteUnusedFiles(); } }