FutureUtils.waitForAll(futures.values()).get();
/** * Copies all the files from the given stream state handles to the given path, renaming the files w.r.t. their * {@link StateHandleID}. */ private void downloadDataForAllStateHandles( Map<StateHandleID, StreamStateHandle> stateHandleMap, Path restoreInstancePath, CloseableRegistry closeableRegistry) throws Exception { try { List<Runnable> runnables = createDownloadRunnables(stateHandleMap, restoreInstancePath, closeableRegistry); List<CompletableFuture<Void>> futures = new ArrayList<>(runnables.size()); for (Runnable runnable : runnables) { futures.add(CompletableFuture.runAsync(runnable, executorService)); } FutureUtils.waitForAll(futures).get(); } catch (ExecutionException e) { Throwable throwable = ExceptionUtils.stripExecutionException(e); throwable = ExceptionUtils.stripException(throwable, RuntimeException.class); if (throwable instanceof IOException) { throw (IOException) throwable; } else { throw new FlinkRuntimeException("Failed to download data for state handles.", e); } } }
private FutureUtils.ConjunctFuture<Void> closeHandlersAsync() { return FutureUtils.waitForAll(handlers.stream() .map(tuple -> tuple.f1) .filter(handler -> handler instanceof AutoCloseableAsync) .map(handler -> ((AutoCloseableAsync) handler).closeAsync()) .collect(Collectors.toList())); }
private FutureUtils.ConjunctFuture<Void> closeHandlersAsync() { return FutureUtils.waitForAll(handlers.stream() .map(tuple -> tuple.f1) .filter(handler -> handler instanceof AutoCloseableAsync) .map(handler -> ((AutoCloseableAsync) handler).closeAsync()) .collect(Collectors.toList())); }
private CompletableFuture<Void> asyncWaitForResultsToBeAccessed() { return FutureUtils.waitForAll( Stream.concat(registeredOperationTriggers.values().stream(), completedOperations.asMap().values().stream()) .map(ResultAccessTracker::getAccessedFuture) .collect(Collectors.toList())); }
private CompletableFuture<Void> asyncWaitForResultsToBeAccessed() { return FutureUtils.waitForAll( Stream.concat(registeredOperationTriggers.values().stream(), completedOperations.asMap().values().stream()) .map(ResultAccessTracker::getAccessedFuture) .collect(Collectors.toList())); }
/** * Schedules all execution vertices of this ExecutionJobVertex. * * @param slotProvider to allocate the slots from * @param queued if the allocations can be queued * @param locationPreferenceConstraint constraint for the location preferences * @return Future which is completed once all {@link Execution} could be deployed */ public CompletableFuture<Void> scheduleAll( SlotProvider slotProvider, boolean queued, LocationPreferenceConstraint locationPreferenceConstraint) { final ExecutionVertex[] vertices = this.taskVertices; final ArrayList<CompletableFuture<Void>> scheduleFutures = new ArrayList<>(vertices.length); // kick off the tasks for (ExecutionVertex ev : vertices) { scheduleFutures.add(ev.scheduleForExecution(slotProvider, queued, locationPreferenceConstraint)); } return FutureUtils.waitForAll(scheduleFutures); }
/** * Cancels all currently running vertex executions. * * @return A future that is complete once all tasks have canceled. */ public Future<Void> cancelWithFuture() { // we collect all futures from the task cancellations ArrayList<Future<ExecutionState>> futures = new ArrayList<>(parallelism); // cancel each vertex for (ExecutionVertex ev : getTaskVertices()) { futures.add(ev.cancel()); } // return a conjunct future, which is complete once all individual tasks are canceled return FutureUtils.waitForAll(futures); }
/** * Schedules all execution vertices of this ExecutionJobVertex. * * @param slotProvider to allocate the slots from * @param queued if the allocations can be queued * @param locationPreferenceConstraint constraint for the location preferences * @param allPreviousExecutionGraphAllocationIds set with all previous allocation ids in the job graph. * Can be empty if the allocation ids are not required for scheduling. * @return Future which is completed once all {@link Execution} could be deployed */ public CompletableFuture<Void> scheduleAll( SlotProvider slotProvider, boolean queued, LocationPreferenceConstraint locationPreferenceConstraint, @Nonnull Set<AllocationID> allPreviousExecutionGraphAllocationIds) { final ExecutionVertex[] vertices = this.taskVertices; final ArrayList<CompletableFuture<Void>> scheduleFutures = new ArrayList<>(vertices.length); // kick off the tasks for (ExecutionVertex ev : vertices) { scheduleFutures.add(ev.scheduleForExecution( slotProvider, queued, locationPreferenceConstraint, allPreviousExecutionGraphAllocationIds)); } return FutureUtils.waitForAll(scheduleFutures); }
/** * Schedules all execution vertices of this ExecutionJobVertex. * * @param slotProvider to allocate the slots from * @param queued if the allocations can be queued * @param locationPreferenceConstraint constraint for the location preferences * @param allPreviousExecutionGraphAllocationIds set with all previous allocation ids in the job graph. * Can be empty if the allocation ids are not required for scheduling. * @return Future which is completed once all {@link Execution} could be deployed */ public CompletableFuture<Void> scheduleAll( SlotProvider slotProvider, boolean queued, LocationPreferenceConstraint locationPreferenceConstraint, @Nonnull Set<AllocationID> allPreviousExecutionGraphAllocationIds) { final ExecutionVertex[] vertices = this.taskVertices; final ArrayList<CompletableFuture<Void>> scheduleFutures = new ArrayList<>(vertices.length); // kick off the tasks for (ExecutionVertex ev : vertices) { scheduleFutures.add(ev.scheduleForExecution( slotProvider, queued, locationPreferenceConstraint, allPreviousExecutionGraphAllocationIds)); } return FutureUtils.waitForAll(scheduleFutures); }
private CompletableFuture<Void> scheduleLazy(SlotProvider slotProvider) { final ArrayList<CompletableFuture<Void>> schedulingFutures = new ArrayList<>(numVerticesTotal); // simply take the vertices without inputs. for (ExecutionJobVertex ejv : verticesInCreationOrder) { if (ejv.getJobVertex().isInputVertex()) { final CompletableFuture<Void> schedulingJobVertexFuture = ejv.scheduleAll( slotProvider, allowQueuedScheduling, LocationPreferenceConstraint.ALL, // since it is an input vertex, the input based location preferences should be empty Collections.emptySet()); schedulingFutures.add(schedulingJobVertexFuture); } } return FutureUtils.waitForAll(schedulingFutures); }
private CompletableFuture<Void> scheduleLazy(SlotProvider slotProvider) { final ArrayList<CompletableFuture<Void>> schedulingFutures = new ArrayList<>(numVerticesTotal); // simply take the vertices without inputs. for (ExecutionJobVertex ejv : verticesInCreationOrder) { if (ejv.getJobVertex().isInputVertex()) { final CompletableFuture<Void> schedulingJobVertexFuture = ejv.scheduleAll( slotProvider, allowQueuedScheduling, LocationPreferenceConstraint.ALL, // since it is an input vertex, the input based location preferences should be empty Collections.emptySet()); schedulingFutures.add(schedulingJobVertexFuture); } } return FutureUtils.waitForAll(schedulingFutures); }
/** * Shuts the given rpc services down and waits for their termination. * * @param rpcServices to shut down * @param timeout for this operation * @throws InterruptedException if the operation has been interrupted * @throws ExecutionException if a problem occurred * @throws TimeoutException if a timeout occurred */ public static void terminateRpcServices( Time timeout, RpcService... rpcServices) throws InterruptedException, ExecutionException, TimeoutException { final Collection<CompletableFuture<?>> terminationFutures = new ArrayList<>(rpcServices.length); for (RpcService service : rpcServices) { if (service != null) { terminationFutures.add(service.stopService()); } } FutureUtils.waitForAll(terminationFutures).get(timeout.toMilliseconds(), TimeUnit.MILLISECONDS); }
/** * Shuts the given rpc services down and waits for their termination. * * @param rpcServices to shut down * @param timeout for this operation * @throws InterruptedException if the operation has been interrupted * @throws ExecutionException if a problem occurred * @throws TimeoutException if a timeout occurred */ public static void terminateRpcServices( Time timeout, RpcService... rpcServices) throws InterruptedException, ExecutionException, TimeoutException { final Collection<CompletableFuture<?>> terminationFutures = new ArrayList<>(rpcServices.length); for (RpcService service : rpcServices) { if (service != null) { terminationFutures.add(service.stopService()); } } FutureUtils.waitForAll(terminationFutures).get(timeout.toMilliseconds(), TimeUnit.MILLISECONDS); }
private void cancel(final long globalModVersionOfFailover) { while (true) { JobStatus curStatus = this.state; if (curStatus.equals(JobStatus.RUNNING)) { if (transitionState(curStatus, JobStatus.CANCELLING)) { // we build a future that is complete once all vertices have reached a terminal state final ArrayList<Future<?>> futures = new ArrayList<>(connectedExecutionVertexes.size()); // cancel all tasks (that still need cancelling) for (ExecutionVertex vertex : connectedExecutionVertexes) { futures.add(vertex.cancel()); } final FutureUtils.ConjunctFuture<Void> allTerminal = FutureUtils.waitForAll(futures); allTerminal.thenAcceptAsync(new AcceptFunction<Void>() { @Override public void accept(Void value) { allVerticesInTerminalState(globalModVersionOfFailover); } }, executor); break; } } else { LOG.info("FailoverRegion {} is {} when cancel.", id, state); break; } } }
private CompletableFuture<Boolean> tryAcceptLeadershipAndRunJobs(UUID newLeaderSessionID, Collection<JobGraph> recoveredJobs) { final DispatcherId dispatcherId = DispatcherId.fromUuid(newLeaderSessionID); if (leaderElectionService.hasLeadership(newLeaderSessionID)) { log.debug("Dispatcher {} accepted leadership with fencing token {}. Start recovered jobs.", getAddress(), dispatcherId); setNewFencingToken(dispatcherId); Collection<CompletableFuture<?>> runFutures = new ArrayList<>(recoveredJobs.size()); for (JobGraph recoveredJob : recoveredJobs) { final CompletableFuture<?> runFuture = waitForTerminatingJobManager(recoveredJob.getJobID(), recoveredJob, this::runJob); runFutures.add(runFuture); } return FutureUtils.waitForAll(runFutures).thenApply(ignored -> true); } else { log.debug("Dispatcher {} lost leadership before accepting it. Stop recovering jobs for fencing token {}.", getAddress(), dispatcherId); return CompletableFuture.completedFuture(false); } }
private void cancel(final long globalModVersionOfFailover) { while (true) { JobStatus curStatus = this.state; if (curStatus.equals(JobStatus.RUNNING)) { if (transitionState(curStatus, JobStatus.CANCELLING)) { // we build a future that is complete once all vertices have reached a terminal state final ArrayList<CompletableFuture<?>> futures = new ArrayList<>(connectedExecutionVertexes.size()); // cancel all tasks (that still need cancelling) for (ExecutionVertex vertex : connectedExecutionVertexes) { futures.add(vertex.cancel()); } final FutureUtils.ConjunctFuture<Void> allTerminal = FutureUtils.waitForAll(futures); allTerminal.thenAcceptAsync( (Void value) -> allVerticesInTerminalState(globalModVersionOfFailover), executor); break; } } else { LOG.info("FailoverRegion {} is {} when cancel.", id, state); break; } } }
private CompletableFuture<Boolean> tryAcceptLeadershipAndRunJobs(UUID newLeaderSessionID, Collection<JobGraph> recoveredJobs) { final DispatcherId dispatcherId = DispatcherId.fromUuid(newLeaderSessionID); if (leaderElectionService.hasLeadership(newLeaderSessionID)) { log.debug("Dispatcher {} accepted leadership with fencing token {}. Start recovered jobs.", getAddress(), dispatcherId); setNewFencingToken(dispatcherId); Collection<CompletableFuture<?>> runFutures = new ArrayList<>(recoveredJobs.size()); for (JobGraph recoveredJob : recoveredJobs) { final CompletableFuture<?> runFuture = waitForTerminatingJobManager(recoveredJob.getJobID(), recoveredJob, this::runJob); runFutures.add(runFuture); } return FutureUtils.waitForAll(runFutures).thenApply(ignored -> true); } else { log.debug("Dispatcher {} lost leadership before accepting it. Stop recovering jobs for fencing token {}.", getAddress(), dispatcherId); return CompletableFuture.completedFuture(false); } }
private void cancel(final long globalModVersionOfFailover) { while (true) { JobStatus curStatus = this.state; if (curStatus.equals(JobStatus.RUNNING)) { if (transitionState(curStatus, JobStatus.CANCELLING)) { // we build a future that is complete once all vertices have reached a terminal state final ArrayList<CompletableFuture<?>> futures = new ArrayList<>(connectedExecutionVertexes.size()); // cancel all tasks (that still need cancelling) for (ExecutionVertex vertex : connectedExecutionVertexes) { futures.add(vertex.cancel()); } final FutureUtils.ConjunctFuture<Void> allTerminal = FutureUtils.waitForAll(futures); allTerminal.thenAcceptAsync( (Void value) -> allVerticesInTerminalState(globalModVersionOfFailover), executor); break; } } else { LOG.info("FailoverRegion {} is {} when cancel.", id, state); break; } } }
final FutureUtils.ConjunctFuture<Void> allTerminal = FutureUtils.waitForAll(futures); allTerminal.whenCompleteAsync( (Void ignored, Throwable throwable) -> {