/** * Flushes the state of the engine including the transaction log, clearing memory and persisting * documents in the lucene index to disk including a potentially heavy and durable fsync operation. * This operation is not going to block if another flush operation is currently running and won't write * a lucene commit if nothing needs to be committed. * * @return the commit Id for the resulting commit */ public final CommitId flush() throws EngineException { return flush(false, false); }
/** * Flush the engine (committing segments to disk and truncating the * translog) and close it. */ public void flushAndClose() throws IOException { if (isClosed.get() == false) { logger.trace("flushAndClose now acquire writeLock"); try (ReleasableLock lock = writeLock.acquire()) { logger.trace("flushAndClose now acquired writeLock"); try { logger.debug("flushing shard on close - this might take some time to sync files to disk"); try { // TODO we might force a flush in the future since we have the write lock already even though recoveries // are running. flush(); } catch (AlreadyClosedException ex) { logger.debug("engine already closed - skipping flushAndClose"); } } finally { close(); // double close is not a problem } } } awaitPendingClose(); }
/** * Executes the given flush request against the engine. * * @param request the flush request * @return the commit ID */ public Engine.CommitId flush(FlushRequest request) { final boolean waitIfOngoing = request.waitIfOngoing(); final boolean force = request.force(); logger.trace("flush with {}", request); /* * We allow flushes while recovery since we allow operations to happen while recovering and we want to keep the translog under * control (up to deletes, which we do not GC). Yet, we do not use flush internally to clear deletes and flush the index writer * since we use Engine#writeIndexingBuffer for this now. */ verifyNotClosed(); final Engine engine = getEngine(); if (engine.isRecovering()) { throw new IllegalIndexShardStateException( shardId(), state, "flush is only allowed if the engine is not recovery from translog"); } final long time = System.nanoTime(); final Engine.CommitId commitId = engine.flush(force, waitIfOngoing); engine.refresh("flush"); // TODO this is technically wrong we should remove this in 7.0 flushMetric.inc(System.nanoTime() - time); return commitId; }
/** * Flushes the state of the engine including the transaction log, clearing memory and persisting * documents in the lucene index to disk including a potentially heavy and durable fsync operation. * This operation is not going to block if another flush operation is currently running and won't write * a lucene commit if nothing needs to be committed. * * @return the commit Id for the resulting commit */ public final CommitId flush() throws EngineException { return flush(false, false); }
public CommitId doFlush(final boolean force, final boolean waitIfOngoing) throws EngineException, FlushNotAllowedEngineException { if (position < filters.length) { final EngineFilter filter = filters[position]; position++; return filter.doFlush(force, waitIfOngoing, this); } else { return engine.flush(force, waitIfOngoing); } }
/** * Flush the engine (committing segments to disk and truncating the * translog) and close it. */ public void flushAndClose() throws IOException { if (isClosed.get() == false) { logger.trace("flushAndClose now acquire writeLock"); try (ReleasableLock lock = writeLock.acquire()) { logger.trace("flushAndClose now acquired writeLock"); try { logger.debug("flushing shard on close - this might take some time to sync files to disk"); try { flush(); // TODO we might force a flush in the future since we have the write lock already even though recoveries are running. } catch (AlreadyClosedException ex) { logger.debug("engine already closed - skipping flushAndClose"); } } finally { close(); // double close is not a problem } } } }
/** * Flush the engine (committing segments to disk and truncating the * translog) and close it. */ public void flushAndClose() throws IOException { if (isClosed.get() == false) { logger.trace("flushAndClose now acquire writeLock"); try (ReleasableLock lock = writeLock.acquire()) { logger.trace("flushAndClose now acquired writeLock"); try { logger.debug("flushing shard on close - this might take some time to sync files to disk"); try { flush(); // TODO we might force a flush in the future since we have the write lock already even though recoveries are running. } catch (AlreadyClosedException ex) { logger.debug("engine already closed - skipping flushAndClose"); } } finally { close(); // double close is not a problem } } } awaitPendingClose(); }
public Engine.CommitId flush(FlushRequest request) throws ElasticsearchException { boolean waitIfOngoing = request.waitIfOngoing(); boolean force = request.force(); if (logger.isTraceEnabled()) { logger.trace("flush with {}", request); } // we allows flush while recovering, since we allow for operations to happen // while recovering, and we want to keep the translog at bay (up to deletes, which // we don't gc). verifyStartedOrRecovering(); long time = System.nanoTime(); Engine.CommitId commitId = engine().flush(force, waitIfOngoing); flushMetric.inc(System.nanoTime() - time); return commitId; }
public Engine.CommitId flush(FlushRequest request) throws ElasticsearchException { boolean waitIfOngoing = request.waitIfOngoing(); boolean force = request.force(); if (logger.isTraceEnabled()) { logger.trace("flush with {}", request); } // we allows flush while recovering, since we allow for operations to happen // while recovering, and we want to keep the translog at bay (up to deletes, which // we don't gc). Yet, we don't use flush internally to clear deletes and flush the indexwriter since // we use #writeIndexingBuffer for this now. verifyNotClosed(); Engine engine = getEngine(); if (engine.isRecovering()) { throw new IllegalIndexShardStateException(shardId(), state, "flush is only allowed if the engine is not recovery" + " from translog"); } long time = System.nanoTime(); Engine.CommitId commitId = engine.flush(force, waitIfOngoing); flushMetric.inc(System.nanoTime() - time); return commitId; }
/** * Executes the given flush request against the engine. * * @param request the flush request * @return the commit ID */ public Engine.CommitId flush(FlushRequest request) { final boolean waitIfOngoing = request.waitIfOngoing(); final boolean force = request.force(); logger.trace("flush with {}", request); /* * We allow flushes while recovery since we allow operations to happen while recovering and we want to keep the translog under * control (up to deletes, which we do not GC). Yet, we do not use flush internally to clear deletes and flush the index writer * since we use Engine#writeIndexingBuffer for this now. */ verifyNotClosed(); final Engine engine = getEngine(); if (engine.isRecovering()) { throw new IllegalIndexShardStateException( shardId(), state, "flush is only allowed if the engine is not recovery from translog"); } final long time = System.nanoTime(); final Engine.CommitId commitId = engine.flush(force, waitIfOngoing); engine.refresh("flush"); // TODO this is technically wrong we should remove this in 7.0 flushMetric.inc(System.nanoTime() - time); return commitId; }
/** * Flush the engine (committing segments to disk and truncating the * translog) and close it. */ public void flushAndClose() throws IOException { if (isClosed.get() == false) { logger.trace("flushAndClose now acquire writeLock"); try (ReleasableLock lock = writeLock.acquire()) { logger.trace("flushAndClose now acquired writeLock"); try { logger.debug("flushing shard on close - this might take some time to sync files to disk"); try { flush(); // TODO we might force a flush in the future since we have the write lock already even though recoveries are running. } catch (FlushNotAllowedEngineException ex) { logger.debug("flush not allowed during flushAndClose - skipping"); } catch (EngineClosedException ex) { logger.debug("engine already closed - skipping flushAndClose"); } } finally { close(); // double close is not a problem } } } }
/** * Executes the given flush request against the engine. * * @param request the flush request * @return the commit ID */ public Engine.CommitId flush(FlushRequest request) { final boolean waitIfOngoing = request.waitIfOngoing(); final boolean force = request.force(); logger.trace("flush with {}", request); /* * We allow flushes while recovery since we allow operations to happen while recovering and we want to keep the translog under * control (up to deletes, which we do not GC). Yet, we do not use flush internally to clear deletes and flush the index writer * since we use Engine#writeIndexingBuffer for this now. */ verifyNotClosed(); final Engine engine = getEngine(); if (engine.isRecovering()) { throw new IllegalIndexShardStateException( shardId(), state, "flush is only allowed if the engine is not recovery from translog"); } final long time = System.nanoTime(); final Engine.CommitId commitId = engine.flush(force, waitIfOngoing); engine.refresh("flush"); // TODO this is technically wrong we should remove this in 7.0 flushMetric.inc(System.nanoTime() - time); return commitId; }