@Override protected void handleMergeException(final Directory dir, final Throwable exc) { engineConfig.getThreadPool().generic().execute(new AbstractRunnable() { @Override public void onFailure(Exception e) { logger.debug("merge failure action rejected", e); } @Override protected void doRun() throws Exception { /* * We do this on another thread rather than the merge thread that we are initially called on so that we have complete * confidence that the call stack does not contain catch statements that would cause the error that might be thrown * here from being caught and never reaching the uncaught exception handler. */ failEngine("merge failed", new MergePolicy.MergeException(exc, dir)); } }); } }
@Override public void maybePruneDeletes() { // It's expensive to prune because we walk the deletes map acquiring dirtyLock for each uid so we only do it // every 1/4 of gcDeletesInMillis: if (engineConfig.isEnableGcDeletes() && engineConfig.getThreadPool().relativeTimeInMillis() - lastDeleteVersionPruneTimeMSec > getGcDeletesInMillis() * 0.25) { pruneDeletedTombstones(); } }
engineConfig.getThreadPool().executor(ThreadPool.Names.FLUSH).execute(new AbstractRunnable() { @Override public void onFailure(Exception e) {
private void pruneDeletedTombstones() { /* * We need to deploy two different trimming strategies for GC deletes on primary and replicas. Delete operations on primary * are remembered for at least one GC delete cycle and trimmed periodically. This is, at the moment, the best we can do on * primary for user facing APIs but this arbitrary time limit is problematic for replicas. On replicas however we should * trim only deletes whose seqno at most the local checkpoint. This requirement is explained as follows. * * Suppose o1 and o2 are two operations on the same document with seq#(o1) < seq#(o2), and o2 arrives before o1 on the replica. * o2 is processed normally since it arrives first; when o1 arrives it should be discarded: * - If seq#(o1) <= LCP, then it will be not be added to Lucene, as it was already previously added. * - If seq#(o1) > LCP, then it depends on the nature of o2: * *) If o2 is a delete then its seq# is recorded in the VersionMap, since seq#(o2) > seq#(o1) > LCP, * so a lookup can find it and determine that o1 is stale. * *) If o2 is an indexing then its seq# is either in Lucene (if refreshed) or the VersionMap (if not refreshed yet), * so a real-time lookup can find it and determine that o1 is stale. * * Here we prefer to deploy a single trimming strategy, which satisfies two constraints, on both primary and replicas because: * - It's simpler - no need to distinguish if an engine is running at primary mode or replica mode or being promoted. * - If a replica subsequently is promoted, user experience is maintained as that replica remembers deletes for the last GC cycle. * * However, the version map may consume less memory if we deploy two different trimming strategies for primary and replicas. */ final long timeMSec = engineConfig.getThreadPool().relativeTimeInMillis(); final long maxTimestampToPrune = timeMSec - engineConfig.getIndexSettings().getGcDeletesInMillis(); versionMap.pruneTombstones(maxTimestampToPrune, localCheckpointTracker.getCheckpoint()); lastDeleteVersionPruneTimeMSec = timeMSec; }
/** resolves the current version of the document, returning null if not found */ private VersionValue resolveDocVersion(final Operation op, boolean loadSeqNo) throws IOException { assert incrementVersionLookup(); // used for asserting in tests VersionValue versionValue = getVersionFromMap(op.uid().bytes()); if (versionValue == null) { assert incrementIndexVersionLookup(); // used for asserting in tests final VersionsAndSeqNoResolver.DocIdAndVersion docIdAndVersion; try (Searcher searcher = acquireSearcher("load_version", SearcherScope.INTERNAL)) { docIdAndVersion = VersionsAndSeqNoResolver.loadDocIdAndVersion(searcher.reader(), op.uid(), loadSeqNo); } if (docIdAndVersion != null) { versionValue = new IndexVersionValue(null, docIdAndVersion.version, docIdAndVersion.seqNo, docIdAndVersion.primaryTerm); } } else if (engineConfig.isEnableGcDeletes() && versionValue.isDelete() && (engineConfig.getThreadPool().relativeTimeInMillis() - ((DeleteVersionValue)versionValue).time) > getGcDeletesInMillis()) { versionValue = null; } return versionValue; }
versionMap.putDeleteUnderLock(delete.uid().bytes(), new DeleteVersionValue(plan.versionOfDeletion, plan.seqNoOfDeletion, delete.primaryTerm(), engineConfig.getThreadPool().relativeTimeInMillis()));
@Override protected void handleMergeException(final Directory dir, final Throwable exc) { logger.error("failed to merge", exc); engineConfig.getThreadPool().generic().execute(new AbstractRunnable() { @Override public void onFailure(Exception e) { logger.debug("merge failure action rejected", e); } @Override protected void doRun() throws Exception { MergePolicy.MergeException e = new MergePolicy.MergeException(exc, dir); failEngine("merge failed", e); } }); } }
@Override protected void handleMergeException(final Directory dir, final Throwable exc) { logger.error("failed to merge", exc); engineConfig.getThreadPool().generic().execute(new AbstractRunnable() { @Override public void onFailure(Exception e) { logger.debug("merge failure action rejected", e); } @Override protected void doRun() throws Exception { MergePolicy.MergeException e = new MergePolicy.MergeException(exc, dir); failEngine("merge failed", e); } }); } }
@Override protected void handleMergeException(final Directory dir, final Throwable exc) { engineConfig.getThreadPool().generic().execute(new AbstractRunnable() { @Override public void onFailure(Exception e) { logger.debug("merge failure action rejected", e); } @Override protected void doRun() throws Exception { /* * We do this on another thread rather than the merge thread that we are initially called on so that we have complete * confidence that the call stack does not contain catch statements that would cause the error that might be thrown * here from being caught and never reaching the uncaught exception handler. */ failEngine("merge failed", new MergePolicy.MergeException(exc, dir)); } }); } }
boolean success = false; try { this.lastDeleteVersionPruneTimeMSec = engineConfig.getThreadPool().relativeTimeInMillis();
private void maybePruneDeletedTombstones() { // It's expensive to prune because we walk the deletes map acquiring dirtyLock for each uid so we only do it // every 1/4 of gcDeletesInMillis: if (engineConfig.isEnableGcDeletes() && engineConfig.getThreadPool().relativeTimeInMillis() - lastDeleteVersionPruneTimeMSec > getGcDeletesInMillis() * 0.25) { pruneDeletedTombstones(); } }
@Override public void maybePruneDeletes() { // It's expensive to prune because we walk the deletes map acquiring dirtyLock for each uid so we only do it // every 1/4 of gcDeletesInMillis: if (engineConfig.isEnableGcDeletes() && engineConfig.getThreadPool().relativeTimeInMillis() - lastDeleteVersionPruneTimeMSec > getGcDeletesInMillis() * 0.25) { pruneDeletedTombstones(); } }
private void maybePruneDeletedTombstones() { // It's expensive to prune because we walk the deletes map acquiring dirtyLock for each uid so we only do it // every 1/4 of gcDeletesInMillis: if (engineConfig.isEnableGcDeletes() && engineConfig.getThreadPool().estimatedTimeInMillis() - lastDeleteVersionPruneTimeMSec > engineConfig.getGcDeletesInMillis() * 0.25) { pruneDeletedTombstones(); } }
private void maybePruneDeletedTombstones() { // It's expensive to prune because we walk the deletes map acquiring dirtyLock for each uid so we only do it // every 1/4 of gcDeletesInMillis: if (engineConfig.isEnableGcDeletes() && engineConfig.getThreadPool().relativeTimeInMillis() - lastDeleteVersionPruneTimeMSec > getGcDeletesInMillis() * 0.25) { pruneDeletedTombstones(); } }
private void pruneDeletedTombstones() { long timeMSec = engineConfig.getThreadPool().relativeTimeInMillis(); // TODO: not good that we reach into LiveVersionMap here; can we move this inside VersionMap instead? problem is the dirtyLock... // we only need to prune the deletes map; the current/old version maps are cleared on refresh: for (Map.Entry<BytesRef, VersionValue> entry : versionMap.getAllTombstones()) { BytesRef uid = entry.getKey(); try (Releasable ignored = acquireLock(uid)) { // can we do it without this lock on each value? maybe batch to a set and get the lock once per set? // Must re-get it here, vs using entry.getValue(), in case the uid was indexed/deleted since we pulled the iterator: VersionValue versionValue = versionMap.getTombstoneUnderLock(uid); if (versionValue != null) { if (timeMSec - versionValue.getTime() > getGcDeletesInMillis()) { versionMap.removeTombstoneUnderLock(uid); } } } } lastDeleteVersionPruneTimeMSec = timeMSec; }
/** resolves the current version of the document, returning null if not found */ private VersionValue resolveDocVersion(final Operation op) throws IOException { assert incrementVersionLookup(); // used for asserting in tests //VersionValue versionValue = versionMap.getUnderLock(op.uid()); VersionValue versionValue = null; if (versionValue == null) { assert incrementIndexVersionLookup(); // used for asserting in tests final long currentVersion = loadCurrentVersionFromIndex(op.uid()); if (currentVersion != Versions.NOT_FOUND) { versionValue = new VersionValue(currentVersion); } } else if (engineConfig.isEnableGcDeletes() && versionValue.isDelete() && (engineConfig.getThreadPool().relativeTimeInMillis() - versionValue.getTime()) > getGcDeletesInMillis()) { versionValue = null; } return versionValue; }
/** resolves the current version of the document, returning null if not found */ private VersionValue resolveDocVersion(final Operation op) throws IOException { assert incrementVersionLookup(); // used for asserting in tests VersionValue versionValue = getVersionFromMap(op.uid().bytes()); if (versionValue == null) { assert incrementIndexVersionLookup(); // used for asserting in tests final long currentVersion = loadCurrentVersionFromIndex(op.uid()); if (currentVersion != Versions.NOT_FOUND) { versionValue = new IndexVersionValue(null, currentVersion, SequenceNumbers.UNASSIGNED_SEQ_NO, 0L); } } else if (engineConfig.isEnableGcDeletes() && versionValue.isDelete() && (engineConfig.getThreadPool().relativeTimeInMillis() - ((DeleteVersionValue)versionValue).time) > getGcDeletesInMillis()) { versionValue = null; } return versionValue; }
/** resolves the current version of the document, returning null if not found */ private VersionValue resolveDocVersion(final Operation op) throws IOException { assert incrementVersionLookup(); // used for asserting in tests VersionValue versionValue = versionMap.getUnderLock(op.uid()); if (versionValue == null) { assert incrementIndexVersionLookup(); // used for asserting in tests final long currentVersion = loadCurrentVersionFromIndex(op.uid()); if (currentVersion != Versions.NOT_FOUND) { versionValue = new VersionValue(currentVersion); } } else if (engineConfig.isEnableGcDeletes() && versionValue.isDelete() && (engineConfig.getThreadPool().relativeTimeInMillis() - versionValue.getTime()) > getGcDeletesInMillis()) { versionValue = null; } return versionValue; }
public EngineConfig copy(EngineConfig config, EngineConfig.OpenMode openMode, LongSupplier globalCheckpointSupplier) { return new EngineConfig(openMode, config.getShardId(), config.getAllocationId(), config.getThreadPool(), config.getIndexSettings(), config.getWarmer(), config.getStore(), config.getMergePolicy(), config.getAnalyzer(), config.getSimilarity(), new CodecService(null, logger), config.getEventListener(), config.getQueryCache(), config.getQueryCachingPolicy(), config.getForceNewHistoryUUID(), config.getTranslogConfig(), config.getFlushMergesAfter(), config.getExternalRefreshListener(), Collections.emptyList(), config.getIndexSort(), config.getTranslogRecoveryRunner(), config.getCircuitBreakerService(), globalCheckpointSupplier); }
public EngineConfig copy(EngineConfig config, EngineConfig.OpenMode openMode, Analyzer analyzer) { return new EngineConfig(openMode, config.getShardId(), config.getAllocationId(), config.getThreadPool(), config.getIndexSettings(), config.getWarmer(), config.getStore(), config.getMergePolicy(), analyzer, config.getSimilarity(), new CodecService(null, logger), config.getEventListener(), config.getQueryCache(), config.getQueryCachingPolicy(), config.getForceNewHistoryUUID(), config.getTranslogConfig(), config.getFlushMergesAfter(), config.getExternalRefreshListener(), Collections.emptyList(), config.getIndexSort(), config.getTranslogRecoveryRunner(), config.getCircuitBreakerService(), config.getGlobalCheckpointSupplier()); }