@Override public ExecutionVertexStatus getExecutionVertexStatus(ExecutionVertexID executionVertexID) { checkNotNull(executionVertexID); ExecutionJobVertex vertex = executionGraph.getJobVertex(executionVertexID.getJobVertexID()); if (vertex == null) { throw new IllegalArgumentException("Cannot find any vertex with id " + executionVertexID.getJobVertexID()); } return vertex.getTaskVertices()[executionVertexID.getSubTaskIndex()].getCurrentStatus(); }
public boolean areInputsReady(ExecutionVertexID vertexID) { // It is source vertex if no input info if (!vertexInputsMap.containsKey(vertexID)) { return true; } if (config.getInputDependencyConstraint(vertexID.getJobVertexID()) == InputDependencyConstraint.ALL) { for (VertexInput input : vertexInputsMap.get(vertexID).values()) { if (!input.isConsumable(scheduler)) { return false; } } return true; } else { for (VertexInput input : vertexInputsMap.get(vertexID).values()) { if (input.isConsumable(scheduler)) { return true; } } return false; } }
@Override public void onExecutionVertexFailover(ExecutionVertexFailoverEvent event) { scheduleQueue.clear(); currentScheduledUnit = null; scheduledRunningUnitSet.clear(); event.getAffectedExecutionVertexIDs().forEach(t -> logicalJobVertices.get(t.getJobVertexID()).failoverTask()); runningUnitMap.values().forEach(LogicalJobVertexRunningUnit::reset); logicalJobVertices.values().stream().filter(LogicalJobVertex::allTasksDeploying).forEach(j -> { allTaskDeploying(j); for (int i = 0; i < j.getParallelism(); i++) { produceResultPartition(j.getJobVertexID()); } }); onSchedulingStarted(); }
@Override public synchronized void onExecutionVertexStateChanged(ExecutionVertexStateChangedEvent event) { if (event.getNewExecutionState() == ExecutionState.DEPLOYING) { LogicalJobVertex jobVertex = logicalJobVertices.get(event.getExecutionVertexID().getJobVertexID()); jobVertex.deployingTask(); if (jobVertex.allTasksDeploying()) { allTaskDeploying(jobVertex); } checkScheduleNewRunningUnit(); } }
final List<ExecutionVertex> vertices = new ArrayList<>(verticesToSchedule.size()); for (ExecutionVertexID executionVertexID : verticesToSchedule) { ExecutionVertex ev = getJobVertex(executionVertexID.getJobVertexID()).getTaskVertices()[executionVertexID.getSubTaskIndex()]; if (ev.getExecutionState() == ExecutionState.CREATED) { vertices.add(ev);
private boolean isReadyToSchedule(ExecutionVertexID vertexID) { ExecutionVertexStatus vertexStatus = scheduler.getExecutionVertexStatus(vertexID); // only CREATED vertices can be scheduled if (vertexStatus.getExecutionState() != ExecutionState.CREATED) { return false; } // source vertices can be scheduled at once if (jobGraph.findVertexByID(vertexID.getJobVertexID()).isInputVertex()) { return true; } // query whether the inputs are ready overall return inputTracker.areInputsReady(vertexID); }
vertexID.getJobVertexID(), resultID, jobGraph.getResult(resultID).getResultType().isPipelined());
JobVertexID jobVertexID = egOperationLog.getExecutionVertexID().getJobVertexID(); int subTaskIndex = egOperationLog.getExecutionVertexID().getSubTaskIndex(); executionGraph.getJobVertex(jobVertexID).getTaskVertices()[subTaskIndex].recoverStatus( ResultPartitionOperationLog rpOperationLog = (ResultPartitionOperationLog) opLog; JobVertexID jobVertexID = rpOperationLog.getExecutionVertexID().getJobVertexID(); int subTaskIndex = rpOperationLog.getExecutionVertexID().getSubTaskIndex(); executionGraph.getJobVertex(jobVertexID).getTaskVertices()[subTaskIndex].recoverResultPartitionStatus( List<ExecutionVertex> evs = new ArrayList<>(ids.size()); for (ExecutionVertexID id : ids) { evs.add(executionGraph.getJobVertex(id.getJobVertexID()).getTaskVertices()[id.getSubTaskIndex()]);