@Override protected CompletableFuture<Void> run() { this.canContinue.set(true); return Futures.loop( this::canLoop, this::runOneIteration, this.executorService); }
private CompletableFuture<Void> processCatchupReads(Queue<StoreReader.ReadItem> catchupReads) { return Futures.loop( () -> !catchupReads.isEmpty(), () -> processCatchupRead(catchupReads.poll()), this.executorService); }
private CompletionStage<Void> checkDone(Supplier<CompletableFuture<Boolean>> condition) { AtomicBoolean isDone = new AtomicBoolean(false); return Futures.loop(() -> !isDone.get(), () -> Futures.delayedFuture(condition, 100, executor) .thenAccept(isDone::set), executor); }
public static CompletableFuture<Void> loopWithDelay(Supplier<Boolean> condition, Supplier<CompletableFuture<Void>> loopBody, long delay, ScheduledExecutorService executor) { return Futures.loop(condition, () -> Futures.delayedFuture(loopBody, delay, executor), executor); }
/** * Gets the next {@link TableBucket} in the iteration. This will either be served directly from the cached batch * of {@link TableBucket}s or a new invocation to the underlying indexHashIterator will be performed to fetch the next * {@link TableBucket}. */ private CompletableFuture<TableBucket> getNextBucket() { val fromBatch = getNextBucketFromExistingBatch(); if (fromBatch != null) { return CompletableFuture.completedFuture(fromBatch); } val canContinue = new AtomicBoolean(true); return Futures.loop(canContinue::get, this::fetchNextTableBuckets, canContinue::set, this.executor) .thenApply(v -> getNextBucketFromExistingBatch()); }
CompletableFuture<Void> resumeReading(Consumer<ReadItem> eventHandler, CancellationToken cancellationToken) { return Futures.loop( () -> canRead(cancellationToken), () -> CompletableFuture.runAsync(() -> readNextItem(eventHandler), ClientReader.this.executor), ClientReader.this.executor); }
public void start(Supplier<CompletableFuture<T>> supplier, Predicate<T> termination, ScheduledExecutorService executor) { futureRef.updateAndGet(previous -> { if (previous != null) { throw new IllegalStateException("Request already started"); } return Futures.loop(() -> !done.get() && !cancelled.get(), () -> Futures.delayedFuture(() -> supplier.get().thenAccept(r -> { result.set(r); done.set(termination.test(r)); }), 1000, executor), executor) .thenApply((Void v) -> { if (done.get()) { // completed return result.get(); } else { // cancelled throw new CancellationException(); } }); }); started.complete(null); }
/** * Fetches the existing keys for all buckets in the given collection of {@link BucketUpdate}s. * * @param bucketUpdates The BucketUpdateCollection to fetch for. Upon completion of this method, this will be updated * with the existing keys. * @param segment The segment to operate on. * @param timer Timer for the operation. * @return A CompletableFuture that, when completed, will contain the collection of {@link BucketUpdate}s that was * passed in, with the existing keys populated. */ private CompletableFuture<Void> fetchExistingKeys(Collection<BucketUpdate> bucketUpdates, DirectSegmentAccess segment, TimeoutTimer timer) { Exceptions.checkNotClosed(this.closed.get(), this); return Futures.loop( bucketUpdates, bucketUpdate -> fetchExistingKeys(bucketUpdate, segment, timer).thenApply(v -> true), this.executor); }
private void processResult(Executor executor) { // Process the result, one entry at a time, until one of the stopping conditions occurs. AtomicBoolean shouldContinue = new AtomicBoolean(true); Futures .loop( () -> !this.closed.get() && shouldContinue.get(), () -> { CompletableFuture<ReadResultEntry> resultEntryFuture = fetchNextEntry(); shouldContinue.set(resultEntryFuture != null); return resultEntryFuture != null ? resultEntryFuture : CompletableFuture.completedFuture(null); }, resultEntry -> { if (resultEntry != null) { shouldContinue.set(this.entryHandler.processEntry(resultEntry)); } }, executor) .whenComplete((r, ex) -> close(ex)); // Make sure always close the result processor when done (with our without failures). }
@Override protected CompletableFuture<Void> doRun() { return Futures.loop( () -> !this.stopToken.isCancellationRequested(), () -> delay().thenCompose(v -> runOnce()), this.executor); }
@Override protected void doStart() { // Create znode for storing latest batch id. If the batch id exists, get the value from the store. // We will later register watch on the path and keep receiving any changes to its value. RetryHelper.withRetriesAsync(() -> zkStoreHelper.createZNodeIfNotExist(guardPath) .thenCompose(v -> fetchVersion().thenAccept(r -> currentBatch.set(latestVersion.get()))) .thenAccept(v -> watch.compareAndSet(null, registerWatch(guardPath))), RetryHelper.RETRYABLE_PREDICATE, 5, gcExecutor) .whenComplete((r, e) -> { if (e == null) { notifyStarted(); gcLoop.set(Futures.loop(this::isRunning, () -> Futures.delayedFuture(this::process, periodInMillis, gcExecutor), gcExecutor)); } else { notifyFailed(e); } latch.complete(null); }); }
/** * Periodically check the state synchronizer if the given Checkpoint is complete. * @param checkpointName Checkpoint name. * @param backgroundExecutor Executor on which the asynchronous task will run. * @return A CompletableFuture will be complete once the Checkpoint is complete. */ private CompletableFuture<Void> waitForCheckpointComplete(String checkpointName, ScheduledExecutorService backgroundExecutor) { AtomicBoolean checkpointPending = new AtomicBoolean(true); return Futures.loop(checkpointPending::get, () -> { return Futures.delayedTask(() -> { synchronizer.fetchUpdates(); checkpointPending.set(!synchronizer.getState().isCheckpointComplete(checkpointName)); if (checkpointPending.get()) { log.debug("Waiting on checkpoint: {} currentState is: {}", checkpointName, synchronizer.getState()); } return null; }, Duration.ofMillis(500), backgroundExecutor); }, backgroundExecutor); }
private CompletableFuture<Void> processCatchupReads() { if (!this.catchupReadsSupported) { return CompletableFuture.completedFuture(null); } return Futures.loop( this::canRun, () -> this.catchupQueue.take(CATCHUP_READ_COUNT) .thenComposeAsync(this::processCatchupReads, this.executorService), this.executorService) .exceptionally(ex -> { ex = Exceptions.unwrap(ex); if (ex instanceof ObjectClosedException) { // This a normal shutdown, as the catchupQueue is closed when we are done. return null; } throw new CompletionException(ex); }); }
private CompletableFuture<Void> throttle() { val delay = new AtomicReference<ThrottlerCalculator.DelayResult>(this.throttlerCalculator.getThrottlingDelay()); if (!delay.get().isMaximum()) { // We are not delaying the maximum amount. We only need to do this once. return throttleOnce(delay.get().getDurationMillis(), delay.get().isMaximum()); } else { // The initial delay calculation indicated that we need to throttle to the maximum, which means there's // significant pressure. In order to protect downstream components, we need to run in a loop and delay as much // as needed until the pressure is relieved. return Futures.loop( () -> !delay.get().isMaximum(), () -> throttleOnce(delay.get().getDurationMillis(), delay.get().isMaximum()) .thenRun(() -> delay.set(this.throttlerCalculator.getThrottlingDelay())), this.executor); } }
@Test public void testLoopIterable() { val list = IntStream.range(1, 10000).boxed().collect(Collectors.toList()); val processedList = Collections.synchronizedList(new ArrayList<Integer>()); Futures.loop( list, item -> { processedList.add(item); return CompletableFuture.completedFuture(true); }, ForkJoinPool.commonPool()).join(); AssertExtensions.assertListEquals("Unexpected result.", list, processedList, Integer::equals); }
/** * Runs in a loop as long as the CancellationToken is not cancelled. Asynchronously invokes the given callback * whenever there is new data available, which is interpreted as Events. */ CompletableFuture<Void> run() { return Futures.loop( this::canRun, () -> SegmentStoreReader.this.store .read(segmentName, getReadOffset(), Integer.MAX_VALUE, SegmentStoreReader.this.testConfig.getTimeout()) .thenComposeAsync(this::processReadResult, SegmentStoreReader.this.executor) .thenCompose(v -> SegmentStoreReader.this.store .getStreamSegmentInfo(segmentName, SegmentStoreReader.this.testConfig.getTimeout())) .handle(this::readCompleteCallback), SegmentStoreReader.this.executor); }
/** * Runs in a loop as long as the CancellationToken is not cancelled. Checks, on a periodic basis, if the Segment's * length changed in Storage. If so, it reads the outstanding data and interprets it as an ordered sequence of Events, * which are then passed on via the given event handler. */ CompletableFuture<Void> run() { return Futures.loop( () -> !this.cancellationToken.isCancellationRequested(), () -> SegmentStoreReader.this.storage .getStreamSegmentInfo(segmentName, SegmentStoreReader.this.testConfig.getTimeout()) .thenComposeAsync(this::performRead, SegmentStoreReader.this.executor), SegmentStoreReader.this.executor); }
/** * Appends continuously to a random new segment in the given container, as long as the given condition holds. */ CompletableFuture<Void> appendRandomly(String segmentName, boolean createSegment, Supplier<Boolean> canContinue) { byte[] appendData = new byte[1]; return (createSegment ? createStreamSegment(segmentName, null, TIMEOUT) : CompletableFuture.completedFuture(null)) .thenCompose(v -> Futures.loop( canContinue, () -> append(segmentName, appendData, null, TIMEOUT), this.executor)) .thenCompose(v -> createSegment ? deleteStreamSegment(segmentName, TIMEOUT) : CompletableFuture.completedFuture(null)); } }
/** * Executes all the requests asynchronously, one by one, on the given FencingTextContext. */ private CompletableFuture<Void> executeWithFencing(Iterator<StoreRequest> requests, int newInstanceFrequency, FencingTestContext context) { AtomicInteger index = new AtomicInteger(); return Futures.loop( requests::hasNext, () -> { // Create a new Segment Store instance if we need to. if (index.incrementAndGet() % newInstanceFrequency == 0) { context.createNewInstanceAsync(); } return executeWithFencing(requests.next(), index.get(), context); }, executorService()); }
private void scale(long start, List<Long> segmentsToSeal, Map<Double, Double> keyRanges) throws InterruptedException, java.util.concurrent.ExecutionException { Controller.ScaleResponse scaleStatus = consumer.scale(SCOPE, STREAM, segmentsToSeal, keyRanges, start) .get(); AtomicBoolean done = new AtomicBoolean(false); Futures.loop(() -> !done.get(), () -> Futures.delayedFuture(() -> consumer.checkScale(SCOPE, STREAM, scaleStatus.getEpoch()), 1000, executor) .thenAccept(x -> done.set(x.getStatus().equals(Controller.ScaleStatusResponse.ScaleStatus.SUCCESS))), executor).get(); } }