private void maybeFSyncTranslogs() { if (indexSettings.getTranslogDurability() == Translog.Durability.ASYNC) { for (IndexShard shard : this.shards.values()) { try { if (shard.isSyncNeeded()) { shard.sync(); } } catch (AlreadyClosedException ex) { // fine - continue; } catch (IOException e) { logger.warn("failed to sync translog", e); } } } }
private void maybeSyncTranslog(final IndexShard indexShard) throws IOException { if (indexShard.getTranslogDurability() == Translog.Durability.REQUEST && indexShard.getLastSyncedGlobalCheckpoint() < indexShard.getGlobalCheckpoint()) { indexShard.sync(); } }
@Override public void finalizeRecovery(final long globalCheckpoint) throws IOException { final IndexShard indexShard = indexShard(); indexShard.updateGlobalCheckpointOnReplica(globalCheckpoint, "finalizing recovery"); // Persist the global checkpoint. indexShard.sync(); indexShard.finalizeRecovery(); }
void run() { /* * We either respond immediately (i.e., if we do not fsync per request or wait for * refresh), or we there are past async operations and we wait for them to return to * respond. */ indexShard.afterWriteOperation(); // decrement pending by one, if there is nothing else to do we just respond with success maybeFinish(); if (waitUntilRefresh) { assert pendingOps.get() > 0; indexShard.addRefreshListener(location, forcedRefresh -> { if (forcedRefresh) { logger.warn( "block until refresh ran out of slots and forced a refresh: [{}]", request); } refreshed.set(forcedRefresh); maybeFinish(); }); } if (sync) { assert pendingOps.get() > 0; indexShard.sync(location, (ex) -> { syncFailure.set(ex); maybeFinish(); }); } } }
sync(); // persist the global checkpoint to disk final SeqNoStats seqNoStats = seqNoStats(); final TranslogStats translogStats = translogStats();
private void maybeFSyncTranslogs() { if (indexSettings.getTranslogDurability() == Translog.Durability.ASYNC) { for (IndexShard shard : this.shards.values()) { try { if (shard.isSyncNeeded()) { shard.sync(); } } catch (AlreadyClosedException ex) { // fine - continue; } catch (IOException e) { logger.warn("failed to sync translog", e); } } } }
private void maybeFSyncTranslogs() { if (indexSettings.getTranslogDurability() == Translog.Durability.ASYNC) { for (IndexShard shard : this.shards.values()) { try { if (shard.isSyncNeeded()) { shard.sync(); } } catch (AlreadyClosedException ex) { // fine - continue; } catch (IOException e) { logger.warn("failed to sync translog", e); } } } }
indexShard().sync();
protected final void processAfterWrite(boolean refresh, IndexShard indexShard, Translog.Location location) { if (refresh) { try { indexShard.refresh("refresh_flag_index"); } catch (Throwable e) { // ignore } } if (indexShard.getTranslogDurability() == Translog.Durabilty.REQUEST && location != null) { indexShard.sync(location); } }
@Override public void finalizeRecovery(final long globalCheckpoint) throws IOException { final IndexShard indexShard = indexShard(); indexShard.updateGlobalCheckpointOnReplica(globalCheckpoint, "finalizing recovery"); // Persist the global checkpoint. indexShard.sync(); indexShard.finalizeRecovery(); }
@Override public void finalizeRecovery(final long globalCheckpoint) throws IOException { final IndexShard indexShard = indexShard(); indexShard.updateGlobalCheckpointOnReplica(globalCheckpoint, "finalizing recovery"); // Persist the global checkpoint. indexShard.sync(); indexShard.finalizeRecovery(); }
private void maybeSyncTranslog(final IndexShard indexShard) throws IOException { if (indexShard.getTranslogDurability() == Translog.Durability.REQUEST && indexShard.getLastSyncedGlobalCheckpoint() < indexShard.getGlobalCheckpoint()) { indexShard.sync(); } }
private void maybeSyncTranslog(final IndexShard indexShard) throws IOException { if (indexShard.getTranslogDurability() == Translog.Durability.REQUEST && indexShard.getLastSyncedGlobalCheckpoint() < indexShard.getGlobalCheckpoint()) { indexShard.sync(); } }
void run() { // we either respond immediately ie. if we we don't fsync per request or wait for refresh // OR we got an pass async operations on and wait for them to return to respond. indexShard.maybeFlush(); maybeFinish(); // decrement the pendingOpts by one, if there is nothing else to do we just respond with success. if (waitUntilRefresh) { assert pendingOps.get() > 0; indexShard.addRefreshListener(location, forcedRefresh -> { if (forcedRefresh) { logger.warn("block_until_refresh request ran out of slots and forced a refresh: [{}]", request); } refreshed.set(forcedRefresh); maybeFinish(); }); } if (sync) { assert pendingOps.get() > 0; indexShard.sync(location, (ex) -> { syncFailure.set(ex); maybeFinish(); }); } } }
void run() { /* * We either respond immediately (i.e., if we do not fsync per request or wait for * refresh), or we there are past async operations and we wait for them to return to * respond. */ indexShard.afterWriteOperation(); // decrement pending by one, if there is nothing else to do we just respond with success maybeFinish(); if (waitUntilRefresh) { assert pendingOps.get() > 0; indexShard.addRefreshListener(location, forcedRefresh -> { if (forcedRefresh) { logger.warn( "block until refresh ran out of slots and forced a refresh: [{}]", request); } refreshed.set(forcedRefresh); maybeFinish(); }); } if (sync) { assert pendingOps.get() > 0; indexShard.sync(location, (ex) -> { syncFailure.set(ex); maybeFinish(); }); } } }
void run() { /* * We either respond immediately (i.e., if we do not fsync per request or wait for * refresh), or we there are past async operations and we wait for them to return to * respond. */ indexShard.afterWriteOperation(); // decrement pending by one, if there is nothing else to do we just respond with success maybeFinish(); if (waitUntilRefresh) { assert pendingOps.get() > 0; indexShard.addRefreshListener(location, forcedRefresh -> { if (forcedRefresh) { logger.warn( "block until refresh ran out of slots and forced a refresh: [{}]", request); } refreshed.set(forcedRefresh); maybeFinish(); }); } if (sync) { assert pendingOps.get() > 0; indexShard.sync(location, (ex) -> { syncFailure.set(ex); maybeFinish(); }); } } }
/** * Rollback the current engine to the safe commit, then replay local translog up to the global checkpoint. */ void resetEngineToGlobalCheckpoint() throws IOException { assert getActiveOperationsCount() == 0 : "Ongoing writes [" + getActiveOperations() + "]"; sync(); // persist the global checkpoint to disk final long globalCheckpoint = getGlobalCheckpoint(); final Engine newEngine; synchronized (mutex) { verifyNotClosed(); IOUtils.close(currentEngineReference.getAndSet(null)); trimUnsafeCommits(); newEngine = createNewEngine(newEngineConfig()); active.set(true); } newEngine.advanceMaxSeqNoOfUpdatesOrDeletes(globalCheckpoint); final Engine.TranslogRecoveryRunner translogRunner = (engine, snapshot) -> runTranslogRecovery( engine, snapshot, Engine.Operation.Origin.LOCAL_RESET, () -> { // TODO: add a dedicate recovery stats for the reset translog }); newEngine.recoverFromTranslog(translogRunner, globalCheckpoint); }