@Override public Engine newReadWriteEngine(EngineConfig config) { return new InternalEngine(config); } }
/** resolves the current version of the document, returning null if not found */ private VersionValue resolveDocVersion(final Operation op, boolean loadSeqNo) throws IOException { assert incrementVersionLookup(); // used for asserting in tests VersionValue versionValue = getVersionFromMap(op.uid().bytes()); if (versionValue == null) { assert incrementIndexVersionLookup(); // used for asserting in tests final VersionsAndSeqNoResolver.DocIdAndVersion docIdAndVersion; try (Searcher searcher = acquireSearcher("load_version", SearcherScope.INTERNAL)) { docIdAndVersion = VersionsAndSeqNoResolver.loadDocIdAndVersion(searcher.reader(), op.uid(), loadSeqNo); } if (docIdAndVersion != null) { versionValue = new IndexVersionValue(null, docIdAndVersion.version, docIdAndVersion.seqNo, docIdAndVersion.primaryTerm); } } else if (engineConfig.isEnableGcDeletes() && versionValue.isDelete() && (engineConfig.getThreadPool().relativeTimeInMillis() - ((DeleteVersionValue)versionValue).time) > getGcDeletesInMillis()) { versionValue = null; } return versionValue; }
@Override public synchronized void beforeMerge(OnGoingMerge merge) { int maxNumMerges = mergeScheduler.getMaxMergeCount(); if (numMergesInFlight.incrementAndGet() > maxNumMerges) { if (isThrottling.getAndSet(true) == false) { logger.info("now throttling indexing: numMergesInFlight={}, maxNumMerges={}", numMergesInFlight, maxNumMerges); activateThrottling(); } } }
private IndexWriter createWriter() throws IOException { try { final IndexWriterConfig iwc = getIndexWriterConfig(); return createWriter(store.directory(), iwc); } catch (LockObtainFailedException ex) { logger.warn("could not lock IndexWriter", ex); throw ex; } }
@Override public Translog.Snapshot newChangesSnapshot(String source, MapperService mapperService, long fromSeqNo, long toSeqNo, boolean requiredFullRange) throws IOException { if (softDeleteEnabled == false) { throw new IllegalStateException("accessing changes snapshot requires soft-deletes enabled"); } ensureOpen(); refreshIfNeeded(source, toSeqNo); Searcher searcher = acquireSearcher(source, SearcherScope.INTERNAL); try { LuceneChangesSnapshot snapshot = new LuceneChangesSnapshot( searcher, mapperService, LuceneChangesSnapshot.DEFAULT_BATCH_SIZE, fromSeqNo, toSeqNo, requiredFullRange); searcher = null; return snapshot; } catch (Exception e) { try { maybeFailEngine("acquire changes snapshot", e); } catch (Exception inner) { e.addSuppressed(inner); } throw e; } finally { IOUtils.close(searcher); } }
@Override public void trimUnreferencedTranslogFiles() throws EngineException { try (ReleasableLock lock = readLock.acquire()) { ensureOpen(); translog.trimUnreferencedReaders(); } catch (AlreadyClosedException e) { failOnTragicEvent(e); throw e; } catch (Exception e) { try { failEngine("translog trimming failed", e); } catch (Exception inner) { e.addSuppressed(inner); } throw new EngineException(shardId, "failed to trim translog", e); } }
final boolean tryRenewSyncCommit() { boolean renewed = false; try (ReleasableLock lock = writeLock.acquire()) { ensureOpen(); ensureCanFlush(); String syncId = lastCommittedSegmentInfos.getUserData().get(SYNC_COMMIT_ID); long translogGenOfLastCommit = Long.parseLong(lastCommittedSegmentInfos.userData.get(Translog.TRANSLOG_GENERATION_KEY)); if (syncId != null && indexWriter.hasUncommittedChanges() && translog.totalOperationsByMinGen(translogGenOfLastCommit) == 0) { logger.trace("start renewing sync commit [{}]", syncId); commitIndexWriter(indexWriter, translog, syncId); logger.debug("successfully sync committed. sync id [{}].", syncId); lastCommittedSegmentInfos = store.readLastCommittedSegmentsInfo(); renewed = true; } } catch (IOException ex) { maybeFailEngine("renew sync commit", ex); throw new EngineException(shardId, "failed to renew sync commit", ex); } if (renewed) { // refresh outside of the write lock // we have to refresh internal searcher here to ensure we release unreferenced segments. refresh("renew sync commit", SearcherScope.INTERNAL); } return renewed; }
@Override public CommitId flush(boolean force, boolean waitIfOngoing) throws EngineException { ensureOpen(); final byte[] newCommitId; ensureOpen(); if (flushLock.tryLock() == false) { if (indexWriter.hasUncommittedChanges() || force || shouldPeriodicallyFlush()) { ensureCanFlush(); try { translog.rollGeneration(); logger.trace("starting commit for flush; commitTranslog=true"); commitIndexWriter(indexWriter, translog, null); logger.trace("finished commit for flush"); refresh("version_table_flush", SearcherScope.INTERNAL); translog.trimUnreferencedReaders(); } catch (AlreadyClosedException e) { throw new FlushFailedEngineException(shardId, e); refreshLastCommittedSegmentInfos(); maybeFailEngine("flush", ex); throw ex; } finally { pruneDeletedTombstones();
super(engineConfig); if (engineConfig.isAutoGeneratedIDsOptimizationEnabled() == false) { updateAutoIdTimestamp(Long.MAX_VALUE, true); throttle = new IndexThrottle(); try { translog = openTranslog(engineConfig, translogDeletionPolicy, engineConfig.getGlobalCheckpointSupplier()); assert translog.getGeneration() != null; this.translog = translog; this.softDeleteEnabled = engineConfig.getIndexSettings().isSoftDeleteEnabled(); this.softDeletesPolicy = newSoftDeletesPolicy(); this.combinedDeletionPolicy = new CombinedDeletionPolicy(logger, translogDeletionPolicy, softDeletesPolicy, translog::getLastSyncedGlobalCheckpoint); writer = createWriter(); bootstrapAppendOnlyInfoFromWriter(writer); historyUUID = loadHistoryUUID(writer); indexWriter = writer; } catch (IOException | TranslogCorruptedException e) { externalSearcherManager = createSearcherManager(new SearchFactory(logger, isClosed, engineConfig)); internalSearcherManager = externalSearcherManager.internalSearcherManager; this.internalSearcherManager = internalSearcherManager; this.localCheckpointTracker = createLocalCheckpointTracker(engineConfig, lastCommittedSegmentInfos, logger, () -> acquireSearcher("create_local_checkpoint_tracker", SearcherScope.INTERNAL), localCheckpointTrackerSupplier); this.lastRefreshedCheckpointListener = new LastRefreshedCheckpointListener(localCheckpointTracker.getCheckpoint()); this.internalSearcherManager.addListener(lastRefreshedCheckpointListener);
final long localCheckpointBeforeRefresh = getLocalCheckpoint(); ensureOpen(); if (store.tryIncRef()) { ReferenceManager<IndexSearcher> referenceManager = getReferenceManager(scope); failOnTragicEvent(e); throw e; } catch (Exception e) { try { failEngine("refresh failed source[" + source + "]", e); } catch (Exception inner) { e.addSuppressed(inner); assert lastRefreshedCheckpoint() >= localCheckpointBeforeRefresh : "refresh checkpoint was not advanced; " + "local_checkpoint=" + localCheckpointBeforeRefresh + " refresh_checkpoint=" + lastRefreshedCheckpoint(); maybePruneDeletes(); mergeScheduler.refreshConfig();
versionMap.enforceSafeAccess(); assert Objects.equals(delete.uid().field(), uidField) : delete.uid().field(); assert assertVersionType(delete); assert assertIncomingSequenceNumber(delete.origin(), delete.seqNo()); final DeleteResult deleteResult; ensureOpen(); lastWriteNanos = delete.startTime(); final DeletionStrategy plan = deletionStrategyForOperation(delete); deleteResult = deleteInLucene(delete, plan); } else { deleteResult = new DeleteResult( plan.versionOfDeletion, getPrimaryTerm(), plan.seqNoOfDeletion, plan.currentlyDeleted == false); location = innerNoOp(noOp).getTranslogLocation(); } else { location = null; } catch (RuntimeException | IOException e) { try { maybeFailEngine("index", e); } catch (Exception inner) { e.addSuppressed(inner); maybePruneDeletes(); return deleteResult;
final boolean doThrottle = index.origin().isRecovery() == false; try (ReleasableLock releasableLock = readLock.acquire()) { ensureOpen(); assert assertIncomingSequenceNumber(index.origin(), index.seqNo()); assert assertVersionType(index); try (Releasable ignored = versionMap.acquireLock(index.uid().bytes()); Releasable indexThrottle = doThrottle ? () -> {} : throttle.acquireThrottle()) { final IndexingStrategy plan = indexingStrategyForOperation(index); assert indexResult.getResultType() == Result.Type.FAILURE : indexResult.getResultType(); } else if (plan.indexIntoLucene || plan.addStaleOpToLucene) { indexResult = indexIntoLucene(index, plan); } else { indexResult = new IndexResult( plan.versionForIndexing, getPrimaryTerm(), plan.seqNoForIndexing, plan.currentNotFoundOrDeleted); location = innerNoOp(noOp).getTranslogLocation(); } else { location = null; maybeFailEngine("index", e); } catch (Exception inner) { e.addSuppressed(inner);
protected final IndexingStrategy planIndexingAsNonPrimary(Index index) throws IOException { assertNonPrimaryOrigin(index); final IndexingStrategy plan; final boolean appendOnlyRequest = canOptimizeAddDocument(index); if (appendOnlyRequest && mayHaveBeenIndexedBefore(index) == false && index.seqNo() > maxSeqNoOfNonAppendOnlyOperations.get()) { assert config().getIndexSettings().getIndexVersionCreated().before(Version.V_6_0_0_alpha1) : "index is newly created but op has no sequence numbers. op: " + index; opVsLucene = compareOpToLuceneDocBasedOnVersions(index); } else { opVsLucene = compareOpToLuceneDocBasedOnSeqNo(index);
optimizeLock.lock(); try { ensureOpen(); if (upgrade) { logger.info("starting segment upgrade upgradeOnlyAncientSegments={}", upgradeOnlyAncientSegments); if (tryRenewSyncCommit() == false) { flush(false, true); ensureOpen(ex); failOnTragicEvent(ex); throw ex; } catch (Exception e) { try { maybeFailEngine("force merge", e); } catch (Exception inner) { e.addSuppressed(inner);
assert Objects.equals(get.uid().field(), uidField) : get.uid().field(); try (ReleasableLock ignored = readLock.acquire()) { ensureOpen(); SearcherScope scope; if (get.realtime()) { try (Releasable ignore = versionMap.acquireLock(get.uid().bytes())) { versionValue = getVersionFromMap(get.uid().bytes()); maybeFailEngine("realtime_get", e); // lets check if the translog has failed with a tragic event throw new EngineException(shardId, "failed to read operation from translog", e); refresh("realtime_get", SearcherScope.INTERNAL); return getFromSearcher(get, searcherFactory, scope);
try { final boolean create = engineConfig.isCreate(); writer = createWriter(create); indexWriter = writer; translog = openTranslog(engineConfig, writer, create || skipInitialTranslogRecovery || engineConfig.forceNewTranslog()); translogGeneration = translog.getGeneration(); assert translogGeneration != null; manager = createSearcherManager(); this.searcherManager = manager; this.versionMap.setManager(searcherManager); if (skipInitialTranslogRecovery) { commitIndexWriter(writer, translog, lastCommittedSegmentInfos.getUserData().get(SYNC_COMMIT_ID)); } else { recoverFromTranslog(engineConfig, translogGeneration);
@Override public InternalEngine recoverFromTranslog(TranslogRecoveryRunner translogRecoveryRunner, long recoverUpToSeqNo) throws IOException { flushLock.lock(); try (ReleasableLock lock = readLock.acquire()) { ensureOpen(); assert getMaxSeqNoOfUpdatesOrDeletes() != SequenceNumbers.UNASSIGNED_SEQ_NO || engineConfig.getIndexSettings().getIndexVersionCreated().before(Version.V_6_0_0) : "max_seq_no_of_updates is uninitialized"; if (pendingTranslogRecovery.get() == false) { throw new IllegalStateException("Engine has already been recovered"); } try { recoverFromTranslogInternal(translogRecoveryRunner, recoverUpToSeqNo); } catch (Exception e) { try { pendingTranslogRecovery.set(true); // just play safe and never allow commits on this see #ensureCanFlush failEngine("failed to recover from translog", e); } catch (Exception inner) { e.addSuppressed(inner); } throw e; } } finally { flushLock.unlock(); } return this; }
protected final DeletionStrategy planDeletionAsNonPrimary(Delete delete) throws IOException { assertNonPrimaryOrigin(delete); assert config().getIndexSettings().getIndexVersionCreated().before(Version.V_6_0_0_alpha1) : "index is newly created but op has no sequence numbers. op: " + delete; opVsLucene = compareOpToLuceneDocBasedOnVersions(delete); } else { opVsLucene = compareOpToLuceneDocBasedOnSeqNo(delete);
@Override public void refresh(String source) throws EngineException { // we obtain a read lock here, since we don't want a flush to happen while we are refreshing // since it flushes the index as well (though, in terms of concurrency, we are allowed to do it) try (ReleasableLock lock = readLock.acquire()) { ensureOpen(); searcherManager.maybeRefreshBlocking(); } catch (AlreadyClosedException e) { ensureOpen(); maybeFailEngine("refresh", e); } catch (EngineClosedException e) { throw e; } catch (Throwable t) { failEngine("refresh failed", t); throw new RefreshFailedEngineException(shardId, t); } // TODO: maybe we should just put a scheduled job in threadPool? // We check for pruning in each delete request, but we also prune here e.g. in case a delete burst comes in and then no more deletes // for a long time: maybePruneDeletedTombstones(); versionMapRefreshPending.set(false); mergeScheduler.refreshConfig(); }
private void recoverFromTranslogInternal(TranslogRecoveryRunner translogRecoveryRunner, long recoverUpToSeqNo) throws IOException { Translog.TranslogGeneration translogGeneration = translog.getGeneration(); final int opsRecovered; final long translogFileGen = Long.parseLong(lastCommittedSegmentInfos.getUserData().get(Translog.TRANSLOG_GENERATION_KEY)); try (Translog.Snapshot snapshot = translog.newSnapshotFromGen( new Translog.TranslogGeneration(translog.getTranslogUUID(), translogFileGen), recoverUpToSeqNo)) { opsRecovered = translogRecoveryRunner.run(this, snapshot); } catch (Exception e) { throw new EngineException(shardId, "failed to recover from translog", e); } // flush if we recovered something or if we have references to older translogs // note: if opsRecovered == 0 and we have older translogs it means they are corrupted or 0 length. assert pendingTranslogRecovery.get() : "translogRecovery is not pending but should be"; pendingTranslogRecovery.set(false); // we are good - now we can commit if (opsRecovered > 0) { logger.trace("flushing post recovery from translog. ops recovered [{}]. committed translog id [{}]. current id [{}]", opsRecovered, translogGeneration == null ? null : translogGeneration.translogFileGeneration, translog.currentFileGeneration()); commitIndexWriter(indexWriter, translog, null); refreshLastCommittedSegmentInfos(); refresh("translog_recovery"); } translog.trimUnreferencedReaders(); }