/** * Executes the asynchronous task after the specified delay. * * @param task Asynchronous task. * @param delay Delay in milliseconds. * @param executorService Executor on which to execute the task. * @param <T> Type parameter. * @return A CompletableFuture that will be completed with the result of the given task. */ public static <T> CompletableFuture<T> delayedFuture(final Supplier<CompletableFuture<T>> task, final long delay, final ScheduledExecutorService executorService) { return delayedFuture(Duration.ofMillis(delay), executorService) .thenCompose(v -> task.get()); }
private CompletionStage<Void> checkDone(Supplier<CompletableFuture<Boolean>> condition) { AtomicBoolean isDone = new AtomicBoolean(false); return Futures.loop(() -> !isDone.get(), () -> Futures.delayedFuture(condition, 100, executor) .thenAccept(isDone::set), executor); }
private CompletableFuture<Void> throttleOnce(int millis, boolean max) { this.metrics.processingDelay(millis); log.debug("{}: Processing delay = {}ms (max={}).", this.traceObjectId, millis, max); return Futures.delayedFuture(Duration.ofMillis(millis), this.executor); }
public static CompletableFuture<Void> loopWithDelay(Supplier<Boolean> condition, Supplier<CompletableFuture<Void>> loopBody, long delay, ScheduledExecutorService executor) { return Futures.loop(condition, () -> Futures.delayedFuture(loopBody, delay, executor), executor); }
private CompletableFuture<Void> getStreamRetentionFuture(StreamImpl stream) { // Randomly distribute retention work across RETENTION_FREQUENCY_IN_MINUTES spectrum by introducing a random initial // delay. This will ensure that not all streams become eligible for processing of retention at around similar times. long delay = Duration.ofMinutes(Config.MINIMUM_RETENTION_FREQUENCY_IN_MINUTES).toMillis(); long randomInitialDelay = ThreadLocalRandom.current().nextLong(delay); return Futures.delayedFuture(() -> performRetention(stream), randomInitialDelay, executor) .thenCompose(x -> RetryHelper.loopWithDelay(this::isRunning, () -> performRetention(stream), delay, executor)); }
public void start(Supplier<CompletableFuture<T>> supplier, Predicate<T> termination, ScheduledExecutorService executor) { futureRef.updateAndGet(previous -> { if (previous != null) { throw new IllegalStateException("Request already started"); } return Futures.loop(() -> !done.get() && !cancelled.get(), () -> Futures.delayedFuture(() -> supplier.get().thenAccept(r -> { result.set(r); done.set(termination.test(r)); }), 1000, executor), executor) .thenApply((Void v) -> { if (done.get()) { // completed return result.get(); } else { // cancelled throw new CancellationException(); } }); }); started.complete(null); }
private CompletableFuture<Void> delay() { val result = Futures.delayedFuture(this.config.getSegmentMetadataExpiration(), this.executor); this.stopToken.register(result); return result; } }
@Override public CompletableFuture<Void> handleFailedProcess(String failedHost) { if (!transactionMetadataTasks.isReady()) { return Futures.failedFuture(new IllegalStateException(getClass().getName() + " not yet ready")); } log.info("Host={}, sweeping orphaned transactions", failedHost); CompletableFuture<Void> delay = Futures.delayedFuture(Duration.ofMillis(2 * maxTxnTimeoutMillis), executor); return delay.thenComposeAsync(x -> withRetriesAsync(() -> sweepOrphanedTxnsWithoutDelay(failedHost), RETRYABLE_PREDICATE, Integer.MAX_VALUE, executor)); }
@Override protected void doStart() { Exceptions.checkNotClosed(this.closed.get(), this); notifyStarted(); this.runTask = Futures .delayedFuture(INITIAL_DELAY, this.executorService) .thenCompose(v -> run()); this.runTask.whenComplete((r, ex) -> stopAsync()); }
@Override protected void doStart() { // Create znode for storing latest batch id. If the batch id exists, get the value from the store. // We will later register watch on the path and keep receiving any changes to its value. RetryHelper.withRetriesAsync(() -> zkStoreHelper.createZNodeIfNotExist(guardPath) .thenCompose(v -> fetchVersion().thenAccept(r -> currentBatch.set(latestVersion.get()))) .thenAccept(v -> watch.compareAndSet(null, registerWatch(guardPath))), RetryHelper.RETRYABLE_PREDICATE, 5, gcExecutor) .whenComplete((r, e) -> { if (e == null) { notifyStarted(); gcLoop.set(Futures.loop(this::isRunning, () -> Futures.delayedFuture(this::process, periodInMillis, gcExecutor), gcExecutor)); } else { notifyFailed(e); } latch.complete(null); }); }
@Override public CompletableFuture<Void> writeEvent(ControllerEvent event) { Futures.delayedFuture(() -> streamRequestHandler.processEvent(event), 1000, executor); return CompletableFuture.completedFuture(null); }
@Override protected void doStop() { Exceptions.checkNotClosed(this.closed.get(), this); log.info("{}: Stopping.", this.traceObjectId); if (this.runTask == null) { notifyStoppedOrFailed(null); } else if (this.runTask.isDone()) { // Our runTask is done. See if it completed normally or not. notifyStoppedOrFailed(Futures.getException(this.runTask)); } else { // Still running. Wait (async) for the task to complete or a timeout to expire. CompletableFuture .anyOf(this.runTask, Futures.delayedFuture(getShutdownTimeout(), this.executor)) .whenComplete((r, ex) -> { if (ex != null) { ex = Exceptions.unwrap(ex); } if (ex == null && !this.runTask.isDone()) { // Still no exception, but our service did not properly shut down. ex = new TimeoutException("Timeout expired while waiting for the Service to shut down."); } this.runTask = null; notifyStoppedOrFailed(ex); }); } }
@Override public CompletableFuture<LogAddress> append(ArrayView data, Duration timeout) { ensurePreconditions(); CompletableFuture<LogAddress> result; try { Entry entry = new Entry(data); synchronized (this.entries) { entry.sequenceNumber = this.offset; this.entries.add(entry, clientId); // Only update internals after a successful add. this.offset += entry.data.length; } result = CompletableFuture.completedFuture(new InMemoryLogAddress(entry.sequenceNumber)); } catch (Throwable ex) { return Futures.failedFuture(ex); } Duration delay = this.appendDelayProvider.get(); if (delay.compareTo(Duration.ZERO) <= 0) { // No delay, execute right away. return result; } else { // Schedule the append after the given delay. return result.thenComposeAsync( logAddress -> Futures.delayedFuture(delay, this.executorService) .thenApply(ignored -> logAddress), this.executorService); } }
private void scale(long start, List<Long> segmentsToSeal, Map<Double, Double> keyRanges) throws InterruptedException, java.util.concurrent.ExecutionException { Controller.ScaleResponse scaleStatus = consumer.scale(SCOPE, STREAM, segmentsToSeal, keyRanges, start) .get(); AtomicBoolean done = new AtomicBoolean(false); Futures.loop(() -> !done.get(), () -> Futures.delayedFuture(() -> consumer.checkScale(SCOPE, STREAM, scaleStatus.getEpoch()), 1000, executor) .thenAccept(x -> done.set(x.getStatus().equals(Controller.ScaleStatusResponse.ScaleStatus.SUCCESS))), executor).get(); } }
@Override protected CompletableFuture<Void> doRun() { // A Writer iteration is made of the following stages: // 1. Delay (if necessary). // 2. Read data. // 3. Load data into SegmentProcessors. // 4. Flush eligible SegmentProcessors. // 5. Acknowledge (truncate). return Futures.loop( this::canRun, () -> Futures .delayedFuture(getIterationStartDelay(), this.executor) .thenRun(this::beginIteration) .thenComposeAsync(this::readData, this.executor) .thenComposeAsync(this::processReadResult, this.executor) .thenComposeAsync(this::flush, this.executor) .thenComposeAsync(this::acknowledge, this.executor) .exceptionally(this::iterationErrorHandler) .thenRunAsync(this::endIteration, this.executor), this.executor) .thenRun(this::closeProcessors); }
/** * Executes a read from Storage using the current, given state of the Segment. */ private CompletableFuture<Void> performRead(SegmentProperties segmentInfo) { // Calculate the last offset we read up to. long lastReadOffset; synchronized (this.readBuffer) { lastReadOffset = this.readBufferOffset + this.readBuffer.getLength(); } long diff = segmentInfo.getLength() - lastReadOffset; if (diff <= 0) { if (segmentInfo.isSealed()) { // Segment has been sealed; no point in looping anymore. return Futures.failedFuture(new StreamSegmentSealedException(this.segmentName)); } else { // No change in the segment. return Futures.delayedFuture(this.waitDuration, SegmentStoreReader.this.executor); } } else { byte[] buffer = new byte[(int) Math.min(Integer.MAX_VALUE, diff)]; return SegmentStoreReader.this.storage .openRead(segmentName) .thenCompose(handle -> SegmentStoreReader.this.storage.read( handle, lastReadOffset, buffer, 0, buffer.length, SegmentStoreReader.this.testConfig.getTimeout())) .thenComposeAsync(bytesRead -> { processRead(buffer, bytesRead); return truncateIfPossible(segmentInfo.getLength()); }, SegmentStoreReader.this.executor); } }
CompletableFuture<ContainerHandle> startupFuture = Futures.delayedFuture( () -> CompletableFuture.completedFuture(containerHandle), 3000, executorService()); when(containerRegistry.startContainer(eq(2), any()))
@Test public void testEphemeralNode() { CuratorFramework cli2 = CuratorFrameworkFactory.newClient(zkServer.getConnectString(), new RetryNTimes(0, 0)); cli2.start(); ZKStoreHelper zkStoreHelper2 = new ZKStoreHelper(cli2, executor); Assert.assertTrue(zkStoreHelper2.createEphemeralZNode("/testEphemeral", new byte[0]).join()); Assert.assertNotNull(zkStoreHelper2.getData("/testEphemeral").join()); zkStoreHelper2.getClient().close(); // let session get expired. // now read the data again. Verify that node no longer exists AssertExtensions.assertFutureThrows("", Futures.delayedFuture(() -> zkStoreHelper.getData("/testEphemeral"), 1000, executor), e -> e instanceof StoreException.DataNotFoundException); } }
private CompletableFuture<Void> waitForSegmentInStorage(SegmentProperties sp, StreamSegmentStore readOnlyStore) { if (sp.getLength() == 0) { // Empty segments may or may not exist in Storage, so don't bother complicating ourselves with this. return CompletableFuture.completedFuture(null); } TimeoutTimer timer = new TimeoutTimer(TIMEOUT); AtomicBoolean tryAgain = new AtomicBoolean(true); return Futures.loop( tryAgain::get, () -> Futures .exceptionallyExpecting(readOnlyStore.getStreamSegmentInfo(sp.getName(), TIMEOUT), ex -> ex instanceof StreamSegmentNotExistsException, StreamSegmentInformation.builder().name(sp.getName()).build()) .thenCompose(storageProps -> { if (sp.isSealed()) { tryAgain.set(!storageProps.isSealed()); } else { tryAgain.set(sp.getLength() != storageProps.getLength()); } if (tryAgain.get() && !timer.hasRemaining()) { return Futures.<Void>failedFuture(new TimeoutException( String.format("Segment %s did not complete in Storage in the allotted time.", sp.getName()))); } else { return Futures.delayedFuture(Duration.ofMillis(100), executorService()); } }), executorService()); }
/** * Tests the ability to timeout tail reads. This does not actually test the functionality of tail reads - it just * tests that they will time out appropriately. */ @Test public void testTailReadsTimeout() { final long segmentId = 1; final String segmentName = Long.toString(segmentId); // Setup a DurableLog and start it. @Cleanup ContainerSetup setup = new ContainerSetup(executorService()); @Cleanup DurableLog durableLog = setup.createDurableLog(); durableLog.startAsync().awaitRunning(); // Create a segment, which will be used for testing later. UpdateableSegmentMetadata segmentMetadata = setup.metadata.mapStreamSegmentId(segmentName, segmentId); segmentMetadata.setLength(0); Duration shortTimeout = Duration.ofMillis(30); // Setup a read operation, and make sure it is blocked (since there is no data). CompletableFuture<Iterator<Operation>> readFuture = durableLog.read(1, 1, shortTimeout); Assert.assertFalse("read() returned a completed future when there is no data available.", Futures.isSuccessful(readFuture)); CompletableFuture<Void> controlFuture = Futures.delayedFuture(Duration.ofMillis(2000), setup.executorService); AssertExtensions.assertSuppliedFutureThrows( "Future from read() operation did not fail with a TimeoutException after the timeout expired.", () -> CompletableFuture.anyOf(controlFuture, readFuture), ex -> ex instanceof TimeoutException); }