/** * Writes all indexing changes to disk and opens a new searcher reflecting all changes. This can throw {@link AlreadyClosedException}. */ public void refresh(String source) { verifyNotClosed(); if (logger.isTraceEnabled()) { logger.trace("refresh with source [{}]", source); } getEngine().refresh(source); }
/** * perform the last stages of recovery once all translog operations are done. * note that you should still call {@link #postRecovery(String)}. */ public void finalizeRecovery() { recoveryState().setStage(RecoveryState.Stage.FINALIZE); Engine engine = getEngine(); engine.refresh("recovery_finalization"); engine.config().setEnableGcDeletes(true); }
public void forceMerge(ForceMergeRequest forceMerge) throws IOException { verifyActive(); if (logger.isTraceEnabled()) { logger.trace("force merge with {}", forceMerge); } Engine engine = getEngine(); engine.forceMerge(forceMerge.flush(), forceMerge.maxNumSegments(), forceMerge.onlyExpungeDeletes(), false, false); if (forceMerge.flush()) { engine.refresh("force_merge"); // TODO this is technically wrong we should remove this in 7.0 } }
public IndexShard postRecovery(String reason) throws IndexShardStartedException, IndexShardRelocatedException, IndexShardClosedException { synchronized (mutex) { if (state == IndexShardState.CLOSED) { throw new IndexShardClosedException(shardId); } if (state == IndexShardState.STARTED) { throw new IndexShardStartedException(shardId); } // we need to refresh again to expose all operations that were index until now. Otherwise // we may not expose operations that were indexed with a refresh listener that was immediately // responded to in addRefreshListener. getEngine().refresh("post_recovery"); recoveryState.setStage(RecoveryState.Stage.DONE); changeState(IndexShardState.POST_RECOVERY, reason); } return this; }
/** * Upgrades the shard to the current version of Lucene and returns the minimum segment version */ public org.apache.lucene.util.Version upgrade(UpgradeRequest upgrade) throws IOException { verifyActive(); if (logger.isTraceEnabled()) { logger.trace("upgrade with {}", upgrade); } org.apache.lucene.util.Version previousVersion = minimumCompatibleVersion(); // we just want to upgrade the segments, not actually forge merge to a single segment final Engine engine = getEngine(); engine.forceMerge(true, // we need to flush at the end to make sure the upgrade is durable Integer.MAX_VALUE, // we just want to upgrade the segments, not actually optimize to a single segment false, true, upgrade.upgradeOnlyAncientSegments()); engine.refresh("upgrade"); // TODO this is technically wrong we should remove this in 7.0 org.apache.lucene.util.Version version = minimumCompatibleVersion(); if (logger.isTraceEnabled()) { logger.trace("upgraded segments for {} from version {} to version {}", shardId, previousVersion, version); } return version; }
/** * Executes the given flush request against the engine. * * @param request the flush request * @return the commit ID */ public Engine.CommitId flush(FlushRequest request) { final boolean waitIfOngoing = request.waitIfOngoing(); final boolean force = request.force(); logger.trace("flush with {}", request); /* * We allow flushes while recovery since we allow operations to happen while recovering and we want to keep the translog under * control (up to deletes, which we do not GC). Yet, we do not use flush internally to clear deletes and flush the index writer * since we use Engine#writeIndexingBuffer for this now. */ verifyNotClosed(); final Engine engine = getEngine(); if (engine.isRecovering()) { throw new IllegalIndexShardStateException( shardId(), state, "flush is only allowed if the engine is not recovery from translog"); } final long time = System.nanoTime(); final Engine.CommitId commitId = engine.flush(force, waitIfOngoing); engine.refresh("flush"); // TODO this is technically wrong we should remove this in 7.0 flushMetric.inc(System.nanoTime() - time); return commitId; }
public void doRefresh(final String source) throws EngineException { if (position < filters.length) { final EngineFilter filter = filters[position]; position++; filter.doRefresh(source, this); } else { engine.refresh(source); } }
/** * Writes all indexing changes to disk and opens a new searcher reflecting all changes. This can throw {@link AlreadyClosedException}. */ public void refresh(String source) { verifyNotClosed(); if (logger.isTraceEnabled()) { logger.trace("refresh with source [{}]", source); } getEngine().refresh(source); }
public void refresh(String source) { verifyNotClosed(); if (logger.isTraceEnabled()) { logger.trace("refresh with source: {}", source); } long time = System.nanoTime(); engine().refresh(source); refreshMetric.inc(System.nanoTime() - time); }
/** * perform the last stages of recovery once all translog operations are done. * note that you should still call {@link #postRecovery(String)}. */ public void finalizeRecovery() { recoveryState().setStage(RecoveryState.Stage.FINALIZE); Engine engine = getEngine(); engine.refresh("recovery_finalization"); engine.config().setEnableGcDeletes(true); }
/** * perform the last stages of recovery once all translog operations are done. * note that you should still call {@link #postRecovery(String)}. */ public void finalizeRecovery() { recoveryState().setStage(RecoveryState.Stage.FINALIZE); Engine engine = getEngine(); engine.refresh("recovery_finalization"); engine.config().setEnableGcDeletes(true); }
/** * perform the last stages of recovery once all translog operations are done. * note that you should still call {@link #postRecovery(String)}. */ public void finalizeRecovery() { recoveryState().setStage(RecoveryState.Stage.FINALIZE); engine().refresh("recovery_finalization"); startScheduledTasksIfNeeded(); engineConfig.setEnableGcDeletes(true); }
public void forceMerge(ForceMergeRequest forceMerge) throws IOException { verifyActive(); if (logger.isTraceEnabled()) { logger.trace("force merge with {}", forceMerge); } Engine engine = getEngine(); engine.forceMerge(forceMerge.flush(), forceMerge.maxNumSegments(), forceMerge.onlyExpungeDeletes(), false, false); if (forceMerge.flush()) { engine.refresh("force_merge"); // TODO this is technically wrong we should remove this in 7.0 } }
public void forceMerge(ForceMergeRequest forceMerge) throws IOException { verifyActive(); if (logger.isTraceEnabled()) { logger.trace("force merge with {}", forceMerge); } Engine engine = getEngine(); engine.forceMerge(forceMerge.flush(), forceMerge.maxNumSegments(), forceMerge.onlyExpungeDeletes(), false, false); if (forceMerge.flush()) { engine.refresh("force_merge"); // TODO this is technically wrong we should remove this in 7.0 } }
public IndexShard postRecovery(String reason) throws IndexShardStartedException, IndexShardRelocatedException, IndexShardClosedException { synchronized (mutex) { if (state == IndexShardState.CLOSED) { throw new IndexShardClosedException(shardId); } if (state == IndexShardState.STARTED) { throw new IndexShardStartedException(shardId); } // we need to refresh again to expose all operations that were index until now. Otherwise // we may not expose operations that were indexed with a refresh listener that was immediately // responded to in addRefreshListener. getEngine().refresh("post_recovery"); recoveryState.setStage(RecoveryState.Stage.DONE); changeState(IndexShardState.POST_RECOVERY, reason); } return this; }
/** * Upgrades the shard to the current version of Lucene and returns the minimum segment version */ public org.apache.lucene.util.Version upgrade(UpgradeRequest upgrade) throws IOException { verifyActive(); if (logger.isTraceEnabled()) { logger.trace("upgrade with {}", upgrade); } org.apache.lucene.util.Version previousVersion = minimumCompatibleVersion(); // we just want to upgrade the segments, not actually forge merge to a single segment final Engine engine = getEngine(); engine.forceMerge(true, // we need to flush at the end to make sure the upgrade is durable Integer.MAX_VALUE, // we just want to upgrade the segments, not actually optimize to a single segment false, true, upgrade.upgradeOnlyAncientSegments()); engine.refresh("upgrade"); // TODO this is technically wrong we should remove this in 7.0 org.apache.lucene.util.Version version = minimumCompatibleVersion(); if (logger.isTraceEnabled()) { logger.trace("upgraded segments for {} from version {} to version {}", shardId, previousVersion, version); } return version; }
/** * Upgrades the shard to the current version of Lucene and returns the minimum segment version */ public org.apache.lucene.util.Version upgrade(UpgradeRequest upgrade) throws IOException { verifyActive(); if (logger.isTraceEnabled()) { logger.trace("upgrade with {}", upgrade); } org.apache.lucene.util.Version previousVersion = minimumCompatibleVersion(); // we just want to upgrade the segments, not actually forge merge to a single segment final Engine engine = getEngine(); engine.forceMerge(true, // we need to flush at the end to make sure the upgrade is durable Integer.MAX_VALUE, // we just want to upgrade the segments, not actually optimize to a single segment false, true, upgrade.upgradeOnlyAncientSegments()); engine.refresh("upgrade"); // TODO this is technically wrong we should remove this in 7.0 org.apache.lucene.util.Version version = minimumCompatibleVersion(); if (logger.isTraceEnabled()) { logger.trace("upgraded segments for {} from version {} to version {}", shardId, previousVersion, version); } return version; }
public IndexShard postRecovery(String reason) throws IndexShardStartedException, IndexShardRelocatedException, IndexShardClosedException { synchronized (mutex) { if (state == IndexShardState.CLOSED) { throw new IndexShardClosedException(shardId); } if (state == IndexShardState.STARTED) { throw new IndexShardStartedException(shardId); } // we need to refresh again to expose all operations that were index until now. Otherwise // we may not expose operations that were indexed with a refresh listener that was immediately // responded to in addRefreshListener. getEngine().refresh("post_recovery"); recoveryState.setStage(RecoveryState.Stage.DONE); changeState(IndexShardState.POST_RECOVERY, reason); } return this; }
public synchronized void moveToStart() { synchronized(mutex) { if (state == IndexShardState.CREATED || state == IndexShardState.POST_RECOVERY) { // we want to refresh *before* we move to internal STARTED state try { getEngine().refresh("cluster_state_started"); } catch (Throwable t) { logger.warn("failed to refresh due to move to cluster wide started", t); } /* ShardRouting(ShardId shardId, String currentNodeId, String relocatingNodeId, boolean primary, ShardRoutingState state, RecoverySource recoverySource, UnassignedInfo unassignedInfo, AllocationId allocationId, long expectedShardSize, Collection<Range<Token>> tokenRanges) */ this.shardRouting = new ShardRouting(this.shardId, this.clusterService.localNode().getId(), null, true, ShardRoutingState.STARTED, null, IndexRoutingTable.UNASSIGNED_INFO_INDEX_CREATED, ShardRouting.DUMMY_ALLOCATION_ID, ShardRouting.UNAVAILABLE_EXPECTED_SHARD_SIZE, AbstractSearchStrategy.EMPTY_RANGE_TOKEN_LIST); changeState(IndexShardState.STARTED, "start shard [" + state + "]"); indexEventListener.afterIndexShardStarted(this); } } }
/** * Executes the given flush request against the engine. * * @param request the flush request * @return the commit ID */ public Engine.CommitId flush(FlushRequest request) { final boolean waitIfOngoing = request.waitIfOngoing(); final boolean force = request.force(); logger.trace("flush with {}", request); /* * We allow flushes while recovery since we allow operations to happen while recovering and we want to keep the translog under * control (up to deletes, which we do not GC). Yet, we do not use flush internally to clear deletes and flush the index writer * since we use Engine#writeIndexingBuffer for this now. */ verifyNotClosed(); final Engine engine = getEngine(); if (engine.isRecovering()) { throw new IllegalIndexShardStateException( shardId(), state, "flush is only allowed if the engine is not recovery from translog"); } final long time = System.nanoTime(); final Engine.CommitId commitId = engine.flush(force, waitIfOngoing); engine.refresh("flush"); // TODO this is technically wrong we should remove this in 7.0 flushMetric.inc(System.nanoTime() - time); return commitId; }