public ArchivedExecutionVertex(ExecutionVertex vertex) { this.subTaskIndex = vertex.getParallelSubtaskIndex(); this.priorExecutions = vertex.getCopyOfPriorExecutionsList(); this.taskNameWithSubtask = vertex.getTaskNameWithSubtaskIndex(); this.currentExecution = vertex.getCurrentExecutionAttempt().archive(); }
assertEquals(1, mapTaskVertex.getNumberOfInputs()); assertEquals(1, mapTaskVertex.getInputEdges(0).length); ExecutionEdge inputEdge = mapTaskVertex.getInputEdges(0)[0]; assertEquals(sourceVertex.getID(), inputEdge.getSource().getProducer().getJobvertexId()); int inputPartition = inputEdge.getSource().getPartitionNumber(); if (!mapInputPartitionCounts.containsKey(inputPartition)) { assertEquals(1, sinkTaskVertex.getNumberOfInputs()); assertEquals(2, sinkTaskVertex.getInputEdges(0).length); ExecutionEdge inputEdge1 = sinkTaskVertex.getInputEdges(0)[0]; ExecutionEdge inputEdge2 = sinkTaskVertex.getInputEdges(0)[1]; assertEquals(mapVertex.getID(), inputEdge1.getSource().getProducer().getJobvertexId()); assertEquals(mapVertex.getID(), inputEdge2.getSource().getProducer().getJobvertexId());
/** * Cancels all currently running vertex executions. */ public void cancel() { for (ExecutionVertex ev : getTaskVertices()) { ev.cancel(); } }
/** * Gets a slot suitable for the given task vertex. This method will prefer slots that are local * (with respect to {@link ExecutionVertex#getPreferredLocationsBasedOnInputs()}), but will return non local * slots if no local slot is available. The method returns null, when this sharing group has * no slot is available for the given JobVertexID. * * @param vertex The vertex to allocate a slot for. * * @return A slot to execute the given ExecutionVertex in, or null, if none is available. */ public SimpleSlot getSlotForTask(ExecutionVertex vertex) { return getSlotForTask(vertex.getJobvertexId(), vertex.getPreferredLocationsBasedOnInputs()); }
final Execution consumer = consumerVertex.getCurrentExecutionAttempt(); final ExecutionState consumerState = consumer.getState(); .getCurrentExecutionAttempt(); consumerVertex.cachePartitionInfo(PartialInputChannelDeploymentDescriptor.fromEdge( partition, partitionExecution)); () -> { try { final ExecutionGraph executionGraph = consumerVertex.getExecutionGraph(); consumerVertex.scheduleForExecution( executionGraph.getSlotProvider(), executionGraph.isQueuedSchedulingAllowed(), consumerVertex.fail(new IllegalStateException("Could not schedule consumer " + "vertex " + consumerVertex, t)); if (consumerVertex.getExecutionState() == RUNNING) { consumerVertex.sendPartitionInfos(); .getCurrentAssignedResource().getTaskManagerLocation(); final ResourceID partitionTaskManager = partitionTaskManagerLocation.getResourceID(); .getCurrentExecutionAttempt(); consumerVertex.cachePartitionInfo(PartialInputChannelDeploymentDescriptor .fromEdge(partition, partitionExecution));
public JobVertexID getJobVertexId() { return this.vertexExecution.getVertex().getJobvertexId(); }
public void cachePartitionInfo(PartialInputChannelDeploymentDescriptor partitionInfo){ getCurrentExecutionAttempt().cachePartitionInfo(partitionInfo); }
@Override public String toString() { return getTaskNameWithSubtaskIndex(); }
final ExecutionVertex consumerVertex = edge.getTarget(); final Execution consumer = consumerVertex.getCurrentExecutionAttempt(); final ExecutionState consumerState = consumer.getState(); .getCurrentExecutionAttempt(); consumerVertex.cachePartitionInfo(PartialInputChannelDeploymentDescriptor.fromEdge( partition, partitionExecution)); if(consumerVertex.getExecutionState() == RUNNING){ consumerVertex.sendPartitionInfos(); .getCurrentAssignedResource().getTaskManagerLocation(); final ResourceID partitionTaskManager = partitionTaskManagerLocation.getResourceID(); .getCurrentExecutionAttempt(); consumerVertex.cachePartitionInfo(PartialInputChannelDeploymentDescriptor .fromEdge(partition, partitionExecution)); if (consumerVertex.getExecutionState() == RUNNING) { consumerVertex.sendPartitionInfos();
public void stop() throws StoppingException { if (isStoppable) { for (ExecutionVertex ev : this.getAllExecutionVertices()) { if (ev.getNumberOfInputs() == 0) { // send signal to sources only ev.stop(); } } } else { throw new StoppingException("This job is not stoppable."); } }
@Override public int getParallelSubtaskIndex() { return getVertex().getParallelSubtaskIndex(); }
ExecutionAttemptID taskId = task.getCurrentExecutionAttempt().getAttemptId(); if (sampledTasks.contains(taskId)) { subtaskIndexMap.put(taskId, task.getParallelSubtaskIndex()); } else { LOG.debug("Outdated sample. A task, which is part of the " +
final ExecutionVertex consumerVertex = edge.getTarget(); final Execution consumer = consumerVertex.getCurrentExecutionAttempt(); final ExecutionState consumerState = consumer.getState(); .getCurrentExecutionAttempt(); consumerVertex.cachePartitionInfo(PartialInputChannelDeploymentDescriptor.fromEdge(partition, partitionExecution)); consumerVertex.getCurrentExecutionAttempt().sendPartitionInfoAsync(); .getCurrentExecutionAttempt(); consumerVertex.cachePartitionInfo(PartialInputChannelDeploymentDescriptor .fromEdge(partition, partitionExecution)); if (consumerVertex.getExecutionState() == RUNNING) { consumerVertex.getCurrentExecutionAttempt().sendPartitionInfoAsync();
InputSplit nextInputSplit = executionVertex.getNextInputSplitFromAssgined(operatorID); if (nextInputSplit == null) { final InputSplitAssigner splitAssigner = vertex.getSplitAssigner(operatorID); final int taskId = execution.getVertex().getParallelSubtaskIndex(); final String host = slot != null ? slot.getTaskManagerLocation().getHostname() : null; nextInputSplit = splitAssigner.getNextInputSplit(host, taskId); executionVertex.inputSplitAssigned(operatorID, nextInputSplit); IOException reason = new IOException("Could not serialize the next input split of class " + nextInputSplit.getClass() + ".", ex); executionVertex.fail(reason); return FutureUtils.completedExceptionally(reason);
int numInternalSubpartitions = 0; int numInternalResultPartitions = 0; for (IntermediateResultPartition irp : getProducedPartitions().values()) { if (!(shuffleType == BlockingShuffleType.YARN && irp.getIntermediateResult().getResultType().isBlocking())) { for (List<ExecutionEdge> consumer : irp.getConsumers()) { int numExternalBlockingChannels = 0; int numExternalBlockingGates = 0; for (int j = 0; j < getNumberOfInputs(); ++j) { ExecutionEdge[] edges = getInputEdges(j);
JobVertexID jobvertexId = executionVertex.getJobvertexId(); jobVertexIDSetMap.putIfAbsent(jobvertexId, new HashSet<>()); jobVertexIDSetMap.get(jobvertexId).add(executionVertex.getParallelSubtaskIndex());
if (inputs.get(inputNum).getResultType().isPipelined()) { for (ExecutionEdge edge : ev.getInputEdges(inputNum)) { final ExecutionVertex predecessor = edge.getSource().getProducer(); final ArrayList<ExecutionVertex> predecessorRegion = vertexToRegion.get(predecessor);
executionVertices.forEach(v -> tasks.put(v.getJobvertexId(), v.getJobVertex()));
public ArchivedExecutionJobVertex(ExecutionJobVertex jobVertex) { this.taskVertices = new ArchivedExecutionVertex[jobVertex.getTaskVertices().length]; for (int x = 0; x < taskVertices.length; x++) { taskVertices[x] = jobVertex.getTaskVertices()[x].archive(); } archivedUserAccumulators = jobVertex.getAggregatedUserAccumulatorsStringified(); this.id = jobVertex.getJobVertexId(); this.name = jobVertex.getJobVertex().getName(); this.parallelism = jobVertex.getParallelism(); this.maxParallelism = jobVertex.getMaxParallelism(); }
ExecutionVertex vertex = new ExecutionVertex( this, i,