@Override public Translog.Snapshot newChangesSnapshot(String source, MapperService mapperService, long fromSeqNo, long toSeqNo, boolean requiredFullRange) throws IOException { if (engineConfig.getIndexSettings().isSoftDeleteEnabled() == false) { throw new IllegalStateException("accessing changes snapshot requires soft-deletes enabled"); } return readHistoryOperations(source, mapperService, fromSeqNo); }
/** * Creates a new history snapshot for reading operations since the provided seqno. * The returned snapshot can be retrieved from either Lucene index or translog files. */ @Override public Translog.Snapshot readHistoryOperations(String source, MapperService mapperService, long startingSeqNo) throws IOException { if (engineConfig.getIndexSettings().isSoftDeleteEnabled()) { return newChangesSnapshot(source, mapperService, Math.max(0, startingSeqNo), Long.MAX_VALUE, false); } else { return getTranslog().newSnapshotFromMinSeqNo(startingSeqNo); } }
protected final DirectoryReader wrapReader(DirectoryReader reader, Function<DirectoryReader, DirectoryReader> readerWrapperFunction) throws IOException { reader = ElasticsearchDirectoryReader.wrap(reader, engineConfig.getShardId()); if (engineConfig.getIndexSettings().isSoftDeleteEnabled()) { reader = new SoftDeletesDirectoryReaderWrapper(reader, Lucene.SOFT_DELETES_FIELD); } return readerWrapperFunction.apply(reader); }
private static LocalCheckpointTracker createLocalCheckpointTracker(EngineConfig engineConfig, SegmentInfos lastCommittedSegmentInfos, Logger logger, Supplier<Searcher> searcherSupplier, BiFunction<Long, Long, LocalCheckpointTracker> localCheckpointTrackerSupplier) { try { final SequenceNumbers.CommitInfo seqNoStats = SequenceNumbers.loadSeqNoInfoFromLuceneCommit(lastCommittedSegmentInfos.userData.entrySet()); final long maxSeqNo = seqNoStats.maxSeqNo; final long localCheckpoint = seqNoStats.localCheckpoint; logger.trace("recovered maximum sequence number [{}] and local checkpoint [{}]", maxSeqNo, localCheckpoint); final LocalCheckpointTracker tracker = localCheckpointTrackerSupplier.apply(maxSeqNo, localCheckpoint); // Operations that are optimized using max_seq_no_of_updates optimization must not be processed twice; otherwise, they will // create duplicates in Lucene. To avoid this we check the LocalCheckpointTracker to see if an operation was already processed. // Thus, we need to restore the LocalCheckpointTracker bit by bit to ensure the consistency between LocalCheckpointTracker and // Lucene index. This is not the only solution since we can bootstrap max_seq_no_of_updates with max_seq_no of the commit to // disable the MSU optimization during recovery. Here we prefer to maintain the consistency of LocalCheckpointTracker. if (localCheckpoint < maxSeqNo && engineConfig.getIndexSettings().isSoftDeleteEnabled()) { try (Searcher searcher = searcherSupplier.get()) { Lucene.scanSeqNosInReader(searcher.getDirectoryReader(), localCheckpoint + 1, maxSeqNo, tracker::markSeqNoAsCompleted); } } return tracker; } catch (IOException ex) { throw new EngineCreationFailureException(engineConfig.getShardId(), "failed to create local checkpoint tracker", ex); } }
/** * Returns the estimated number of history operations whose seq# at least the provided seq# in this engine. */ @Override public int estimateNumberOfHistoryOperations(String source, MapperService mapperService, long startingSeqNo) throws IOException { if (engineConfig.getIndexSettings().isSoftDeleteEnabled()) { try (Translog.Snapshot snapshot = newChangesSnapshot(source, mapperService, Math.max(0, startingSeqNo), Long.MAX_VALUE, false)) { return snapshot.totalOperations(); } } else { return getTranslog().estimateTotalOperationsFromMinSeq(startingSeqNo); } }
@Override protected void parseCreateField(ParseContext context, List<IndexableField> fields) throws IOException { BytesReference originalSource = context.sourceToParse().source(); BytesReference source = originalSource; if (enabled && fieldType().stored() && source != null) { // Percolate and tv APIs may not set the source and that is ok, because these APIs will not index any data if (filter != null) { // we don't update the context source if we filter, we want to keep it as is... Tuple<XContentType, Map<String, Object>> mapTuple = XContentHelper.convertToMap(source, true, context.sourceToParse().getXContentType()); Map<String, Object> filteredSource = filter.apply(mapTuple.v2()); BytesStreamOutput bStream = new BytesStreamOutput(); XContentType contentType = mapTuple.v1(); XContentBuilder builder = XContentFactory.contentBuilder(contentType, bStream).map(filteredSource); builder.close(); source = bStream.bytes(); } BytesRef ref = source.toBytesRef(); fields.add(new StoredField(fieldType().name(), ref.bytes, ref.offset, ref.length)); } else { source = null; } if (originalSource != null && source != originalSource && context.indexSettings().isSoftDeleteEnabled()) { // if we omitted source or modified it we add the _recovery_source to ensure we have it for ops based recovery BytesRef ref = originalSource.toBytesRef(); fields.add(new StoredField(RECOVERY_SOURCE_NAME, ref.bytes, ref.offset, ref.length)); fields.add(new NumericDocValuesField(RECOVERY_SOURCE_NAME, 1)); } }
@Override public boolean hasCompleteOperationHistory(String source, MapperService mapperService, long startingSeqNo) throws IOException { if (engineConfig.getIndexSettings().isSoftDeleteEnabled()) { return getMinRetainedSeqNo() <= startingSeqNo; } else { final long currentLocalCheckpoint = getLocalCheckpointTracker().getCheckpoint(); final LocalCheckpointTracker tracker = new LocalCheckpointTracker(startingSeqNo, startingSeqNo - 1); try (Translog.Snapshot snapshot = getTranslog().newSnapshotFromMinSeqNo(startingSeqNo)) { Translog.Operation operation; while ((operation = snapshot.next()) != null) { if (operation.seqNo() != SequenceNumbers.UNASSIGNED_SEQ_NO) { tracker.markSeqNoAsCompleted(operation.seqNo()); } } } return tracker.getCheckpoint() >= currentLocalCheckpoint; } }
startingSeqNo = shard.indexSettings().isSoftDeleteEnabled() ? requiredSeqNoRangeStart : 0; try { final int estimateNumOps = shard.estimateNumberOfHistoryOperations("peer-recovery", startingSeqNo);
assert translog.getGeneration() != null; this.translog = translog; this.softDeleteEnabled = engineConfig.getIndexSettings().isSoftDeleteEnabled(); this.softDeletesPolicy = newSoftDeletesPolicy(); this.combinedDeletionPolicy =
/** * Creates a new history snapshot for reading operations since the provided seqno. * The returned snapshot can be retrieved from either Lucene index or translog files. */ @Override public Translog.Snapshot readHistoryOperations(String source, MapperService mapperService, long startingSeqNo) throws IOException { if (engineConfig.getIndexSettings().isSoftDeleteEnabled()) { return newChangesSnapshot(source, mapperService, Math.max(0, startingSeqNo), Long.MAX_VALUE, false); } else { return getTranslog().newSnapshotFromMinSeqNo(startingSeqNo); } }
/** * Returns the estimated number of history operations whose seq# at least the provided seq# in this engine. */ @Override public int estimateNumberOfHistoryOperations(String source, MapperService mapperService, long startingSeqNo) throws IOException { if (engineConfig.getIndexSettings().isSoftDeleteEnabled()) { try (Translog.Snapshot snapshot = newChangesSnapshot(source, mapperService, Math.max(0, startingSeqNo), Long.MAX_VALUE, false)) { return snapshot.totalOperations(); } } else { return getTranslog().estimateTotalOperationsFromMinSeq(startingSeqNo); } }
private static LocalCheckpointTracker createLocalCheckpointTracker(EngineConfig engineConfig, SegmentInfos lastCommittedSegmentInfos, Logger logger, Supplier<Searcher> searcherSupplier, BiFunction<Long, Long, LocalCheckpointTracker> localCheckpointTrackerSupplier) { try { final SequenceNumbers.CommitInfo seqNoStats = SequenceNumbers.loadSeqNoInfoFromLuceneCommit(lastCommittedSegmentInfos.userData.entrySet()); final long maxSeqNo = seqNoStats.maxSeqNo; final long localCheckpoint = seqNoStats.localCheckpoint; logger.trace("recovered maximum sequence number [{}] and local checkpoint [{}]", maxSeqNo, localCheckpoint); final LocalCheckpointTracker tracker = localCheckpointTrackerSupplier.apply(maxSeqNo, localCheckpoint); // Operations that are optimized using max_seq_no_of_updates optimization must not be processed twice; otherwise, they will // create duplicates in Lucene. To avoid this we check the LocalCheckpointTracker to see if an operation was already processed. // Thus, we need to restore the LocalCheckpointTracker bit by bit to ensure the consistency between LocalCheckpointTracker and // Lucene index. This is not the only solution since we can bootstrap max_seq_no_of_updates with max_seq_no of the commit to // disable the MSU optimization during recovery. Here we prefer to maintain the consistency of LocalCheckpointTracker. if (localCheckpoint < maxSeqNo && engineConfig.getIndexSettings().isSoftDeleteEnabled()) { try (Searcher searcher = searcherSupplier.get()) { Lucene.scanSeqNosInReader(searcher.getDirectoryReader(), localCheckpoint + 1, maxSeqNo, tracker::markSeqNoAsCompleted); } } return tracker; } catch (IOException ex) { throw new EngineCreationFailureException(engineConfig.getShardId(), "failed to create local checkpoint tracker", ex); } }
@Override protected void parseCreateField(ParseContext context, List<IndexableField> fields) throws IOException { BytesReference originalSource = context.sourceToParse().source(); BytesReference source = originalSource; if (enabled && fieldType().stored() && source != null) { // Percolate and tv APIs may not set the source and that is ok, because these APIs will not index any data if (filter != null) { // we don't update the context source if we filter, we want to keep it as is... Tuple<XContentType, Map<String, Object>> mapTuple = XContentHelper.convertToMap(source, true, context.sourceToParse().getXContentType()); Map<String, Object> filteredSource = filter.apply(mapTuple.v2()); BytesStreamOutput bStream = new BytesStreamOutput(); XContentType contentType = mapTuple.v1(); XContentBuilder builder = XContentFactory.contentBuilder(contentType, bStream).map(filteredSource); builder.close(); source = bStream.bytes(); } BytesRef ref = source.toBytesRef(); fields.add(new StoredField(fieldType().name(), ref.bytes, ref.offset, ref.length)); } else { source = null; } if (originalSource != null && source != originalSource && context.indexSettings().isSoftDeleteEnabled()) { // if we omitted source or modified it we add the _recovery_source to ensure we have it for ops based recovery BytesRef ref = originalSource.toBytesRef(); fields.add(new StoredField(RECOVERY_SOURCE_NAME, ref.bytes, ref.offset, ref.length)); fields.add(new NumericDocValuesField(RECOVERY_SOURCE_NAME, 1)); } }
@Override public boolean hasCompleteOperationHistory(String source, MapperService mapperService, long startingSeqNo) throws IOException { if (engineConfig.getIndexSettings().isSoftDeleteEnabled()) { return getMinRetainedSeqNo() <= startingSeqNo; } else { final long currentLocalCheckpoint = getLocalCheckpointTracker().getCheckpoint(); final LocalCheckpointTracker tracker = new LocalCheckpointTracker(startingSeqNo, startingSeqNo - 1); try (Translog.Snapshot snapshot = getTranslog().newSnapshotFromMinSeqNo(startingSeqNo)) { Translog.Operation operation; while ((operation = snapshot.next()) != null) { if (operation.seqNo() != SequenceNumbers.UNASSIGNED_SEQ_NO) { tracker.markSeqNoAsCompleted(operation.seqNo()); } } } return tracker.getCheckpoint() >= currentLocalCheckpoint; } }
this.seqNoStats = seqNoStats == null ? buildSeqNoStats(lastCommittedSegmentInfos) : seqNoStats; reader = ElasticsearchDirectoryReader.wrap(open(directory), config.getShardId()); if (config.getIndexSettings().isSoftDeleteEnabled()) { reader = new SoftDeletesDirectoryReaderWrapper(reader, Lucene.SOFT_DELETES_FIELD);
this.seqNoStats = seqNoStats == null ? buildSeqNoStats(lastCommittedSegmentInfos) : seqNoStats; reader = ElasticsearchDirectoryReader.wrap(open(directory), config.getShardId()); if (config.getIndexSettings().isSoftDeleteEnabled()) { reader = new SoftDeletesDirectoryReaderWrapper(reader, Lucene.SOFT_DELETES_FIELD);
startingSeqNo = shard.indexSettings().isSoftDeleteEnabled() ? requiredSeqNoRangeStart : 0; try { final int estimateNumOps = shard.estimateNumberOfHistoryOperations("peer-recovery", startingSeqNo);
assert translog.getGeneration() != null; this.translog = translog; this.softDeleteEnabled = engineConfig.getIndexSettings().isSoftDeleteEnabled(); this.softDeletesPolicy = newSoftDeletesPolicy(); this.combinedDeletionPolicy =