chainedOperatorVertexIds, userDefinedChainedOperatorVertexIds); TaskConfig taskConfig = new TaskConfig(jobVertex.getConfiguration()); taskConfig.setStubWrapper(new UserCodeObjectWrapper<Object>(streamNode.getInputFormat())); } else { jobVertex = new JobVertex( chainedNames.get(streamNodeId), jobVertexId, jobVertex.setResources(chainedMinResources.get(streamNodeId), chainedPreferredResources.get(streamNodeId)); jobVertex.setInvokableClass(streamNode.getJobVertexClass()); jobVertex.setParallelism(parallelism); } else { parallelism = jobVertex.getParallelism(); jobVertex.setMaxParallelism(streamNode.getMaxParallelism()); jobVertex.setInputDependencyConstraint(streamGraph.getExecutionConfig().getDefaultInputDependencyConstraint()); jobGraph.addVertex(jobVertex); return new StreamConfig(jobVertex.getConfiguration());
TaskConfig headConfig = new TaskConfig(headVertex.getConfiguration()); int inputIndex = headConfig.getDriverStrategy().getNumInputs(); headConfig.setIterationHeadSolutionSetInputIndex(inputIndex); new TaskConfig(container.getConfiguration()).addOutputShipStrategy(ShipStrategyType.FORWARD); String containerTaskName = container.getName(); if (containerTaskName.startsWith("CHAIN ")) { container.setName(containerTaskName + " -> " + chainedTask.getTaskName()); } else { container.setName("CHAIN " + containerTaskName + " -> " + chainedTask.getTaskName()); container.setResources(container.getMinResources().merge(node.getMinResources()), container.getPreferredResources().merge(node.getPreferredResources())); targetVertex.setStrictlyCoLocatedWith(head); final TaskConfig targetVertexConfig = new TaskConfig(targetVertex.getConfiguration());
/** * Returns a {@link JobVertexID} to vertex name mapping for the given graph. */ private Map<JobVertexID, String> rememberIds(JobGraph jobGraph) { final Map<JobVertexID, String> ids = new HashMap<>(); for (JobVertex vertex : jobGraph.getVertices()) { ids.put(vertex.getID(), vertex.getName()); } return ids; }
private static JobGraph getWorkingJobGraph() { final JobVertex jobVertex = new JobVertex("Working job vertex."); jobVertex.setInvokableClass(NoOpInvokable.class); return new JobGraph("Working testing job", jobVertex); }
private void connect(Integer headOfChain, StreamEdge edge) { physicalEdgesInOrder.add(edge); Integer downStreamvertexID = edge.getTargetId(); JobVertex headVertex = jobVertices.get(headOfChain); JobVertex downStreamVertex = jobVertices.get(downStreamvertexID); StreamConfig downStreamConfig = new StreamConfig(downStreamVertex.getConfiguration()); downStreamConfig.setNumberOfInputs(downStreamConfig.getNumberOfInputs() + 1); StreamPartitioner<?> partitioner = edge.getPartitioner(); JobEdge jobEdge; if (partitioner instanceof ForwardPartitioner || partitioner instanceof RescalePartitioner) { jobEdge = downStreamVertex.connectNewDataSetAsInput( headVertex, DistributionPattern.POINTWISE, ResultPartitionType.PIPELINED_BOUNDED); } else { jobEdge = downStreamVertex.connectNewDataSetAsInput( headVertex, DistributionPattern.ALL_TO_ALL, ResultPartitionType.PIPELINED_BOUNDED); } // set strategy name so that web interface can show it. jobEdge.setShipStrategyName(partitioner.toString()); if (LOG.isDebugEnabled()) { LOG.debug("CONNECTED: {} - {} -> {}", partitioner.getClass().getSimpleName(), headOfChain, downStreamvertexID); } }
String operator = vertex.getOperatorName() != null ? vertex.getOperatorName() : NOT_SET; String operatorDescr = vertex.getOperatorDescription() != null ? vertex.getOperatorDescription() : NOT_SET; String optimizerProps = vertex.getResultOptimizerProperties() != null ? vertex.getResultOptimizerProperties() : EMPTY; String description = vertex.getOperatorPrettyName() != null ? vertex.getOperatorPrettyName() : vertex.getName(); gen.writeStringField("id", vertex.getID().toString()); gen.writeNumberField("parallelism", vertex.getParallelism()); gen.writeStringField("operator", operator); gen.writeStringField("operator_strategy", operatorDescr); gen.writeStringField("description", description); if (!vertex.isInputVertex()) { List<JobEdge> inputs = vertex.getInputs(); for (int inputNum = 0; inputNum < inputs.size(); inputNum++) { JobEdge edge = inputs.get(inputNum); gen.writeStringField("id", predecessor.getID().toString());
JobVertex producer = new JobVertex("Speed Test Producer"); jobGraph.addVertex(producer); producer.setSlotSharingGroup(sharingGroup); producer.setInvokableClass(SpeedTestProducer.class); producer.setParallelism(numSubtasks); producer.getConfiguration().setInteger(DATA_VOLUME_GB_CONFIG_KEY, dataVolumeGb); producer.getConfiguration().setBoolean(IS_SLOW_SENDER_CONFIG_KEY, isSlowSender); forwarder = new JobVertex("Speed Test Forwarder"); jobGraph.addVertex(forwarder); forwarder.setSlotSharingGroup(sharingGroup); forwarder.setInvokableClass(SpeedTestForwarder.class); forwarder.setParallelism(numSubtasks); JobVertex consumer = new JobVertex("Speed Test Consumer"); jobGraph.addVertex(consumer); consumer.setSlotSharingGroup(sharingGroup); consumer.setInvokableClass(SpeedTestConsumer.class); consumer.setParallelism(numSubtasks); consumer.getConfiguration().setBoolean(IS_SLOW_RECEIVER_CONFIG_KEY, isSlowReceiver); forwarder.connectNewDataSetAsInput(producer, DistributionPattern.ALL_TO_ALL, ResultPartitionType.PIPELINED); consumer.connectNewDataSetAsInput(forwarder, DistributionPattern.ALL_TO_ALL, ResultPartitionType.PIPELINED);
headVertex.setInvokableClass(IterationHeadTask.class); headConfig = new TaskConfig(headVertex.getConfiguration()); toReturn = null; } else { headVertex = new JobVertex("PartialSolution ("+iteration.getNodeName()+")"); headVertex.setResources(iteration.getMinResources(), iteration.getPreferredResources()); headVertex.setInvokableClass(IterationHeadTask.class); headConfig = new TaskConfig(headVertex.getConfiguration()); headConfig.setDriver(NoOpDriver.class); toReturn = headVertex;
DualInputPlanNode target = (DualInputPlanNode) c.getTarget(); JobVertex accessingVertex = this.vertices.get(target); TaskConfig conf = new TaskConfig(accessingVertex.getConfiguration()); int inputNum = c == target.getInput1() ? 0 : c == target.getInput2() ? 1 : -1; vertex.setParallelism(pd); vertex.setMaxParallelism(pd); vertex.setSlotSharingGroup(sharingGroup); new TaskConfig(vertex.getConfiguration()).setIterationId(descr.getId());
final ClusterClient<?> client = cluster.getClusterClient(); final JobVertex vertex = new JobVertex("Blocking vertex"); vertex.setInvokableClass(BlockingNoOpInvokable.class); vertex.setParallelism(1);
@Override public void acceptWithException(JobVertex jobVertex, Integer newParallelism) throws FlinkException { if (jobVertex.getMaxParallelism() < newParallelism) { throw new FlinkException("Cannot rescale vertex " + jobVertex.getName() + " because its maximum parallelism " + jobVertex.getMaxParallelism() + " is smaller than the new parallelism " + newParallelism + '.'); } else { jobVertex.setParallelism(newParallelism); } } },
Configuration configuration = chainedVertex.getConfiguration(); streamConfig.getStreamOperator(Thread.currentThread().getContextClassLoader()); try (MockEnvironment environment = createMockEnvironment(chainedVertex.getName())) { StreamTask<Integer, StreamMap<Integer, Integer>> mockTask = createMockTask(streamConfig, environment); OperatorChain<Integer, StreamMap<Integer, Integer>> operatorChain = createOperatorChain(streamConfig, environment, mockTask);
public void attachJobGraph(List<JobVertex> topologicallySorted) throws JobException { LOG.debug("Attaching {} topologically sorted vertices to existing job graph with {} " + "vertices and {} intermediate results.", topologicallySorted.size(), tasks.size(), intermediateResults.size()); final ArrayList<ExecutionJobVertex> newExecJobVertices = new ArrayList<>(topologicallySorted.size()); createExecutionJobVertex(topologicallySorted); for (JobVertex jobVertex : topologicallySorted) { if (jobVertex.isInputVertex() && !jobVertex.isStoppable()) { this.isStoppable = false; } ExecutionJobVertex ejv = tasks.get(jobVertex.getID()); ejv.connectToPredecessors(this.intermediateResults); for (IntermediateResult res : ejv.getProducedDataSets()) { IntermediateResult previousDataSet = this.intermediateResults.putIfAbsent(res.getId(), res); if (previousDataSet != null) { throw new JobException(String.format("Encountered two intermediate data set with ID %s : previous=[%s] / new=[%s]", res.getId(), res, previousDataSet)); } } this.verticesInCreationOrder.add(ejv); this.numVerticesTotal += ejv.getParallelism(); newExecJobVertices.add(ejv); } terminationFuture = new CompletableFuture<>(); failoverStrategy.notifyNewVertices(newExecJobVertices); }
JobVertex sinkVertex = jobVertices.get(2); assertEquals(2, sourceVertex.getParallelism()); assertEquals(4, mapVertex.getParallelism()); assertEquals(2, sinkVertex.getParallelism()); ExecutionJobVertex execSourceVertex = eg.getJobVertex(sourceVertex.getID()); ExecutionJobVertex execMapVertex = eg.getJobVertex(mapVertex.getID()); ExecutionJobVertex execSinkVertex = eg.getJobVertex(sinkVertex.getID()); assertEquals(1, mapTaskVertex.getInputEdges(0).length); ExecutionEdge inputEdge = mapTaskVertex.getInputEdges(0)[0]; assertEquals(sourceVertex.getID(), inputEdge.getSource().getProducer().getJobvertexId()); int inputPartition = inputEdge.getSource().getPartitionNumber(); if (!mapInputPartitionCounts.containsKey(inputPartition)) { ExecutionEdge inputEdge1 = sinkTaskVertex.getInputEdges(0)[0]; ExecutionEdge inputEdge2 = sinkTaskVertex.getInputEdges(0)[1]; assertEquals(mapVertex.getID(), inputEdge1.getSource().getProducer().getJobvertexId()); assertEquals(mapVertex.getID(), inputEdge2.getSource().getProducer().getJobvertexId());
public int getParallelism() { return jobVertex.getParallelism(); }
sourceVertexConfig = new TaskConfig(sourceVertex.getConfiguration()); numChannelsDynamicPath++; numDynamicSenderTasksTotal += getNumberOfSendersPerReceiver(pattern, sourceVertex.getParallelism(), targetVertex.getParallelism());
TaskConfig t = new TaskConfig(tic.getContainingVertex().getConfiguration()); t.addChainedTask(tic.getChainedTask(), tic.getTaskConfig(), tic.getTaskName()); vertex.setInputDependencyConstraint(program.getOriginalPlan().getExecutionConfig().getDefaultInputDependencyConstraint()); graph.addVertex(vertex); vertex.setSlotSharingGroup(sharingGroup);
@Override public void onSchedulingStarted() { final List<ExecutionVertexID> verticesToSchedule = new ArrayList<>(); for (JobVertex vertex : jobGraph.getVerticesSortedTopologicallyFromSources()) { if (vertex.isInputVertex()) { for (int i = 0; i < vertex.getParallelism(); i++) { verticesToSchedule.add(new ExecutionVertexID(vertex.getID(), i)); } } } scheduleOneByOne(verticesToSchedule); }
JobVertex mapPrintVertex = verticesSorted.get(1); assertEquals(ResultPartitionType.PIPELINED_BOUNDED, sourceVertex.getProducedDataSets().get(0).getResultType()); assertEquals(ResultPartitionType.PIPELINED_BOUNDED, mapPrintVertex.getInputs().get(0).getSource().getResultType()); StreamConfig sourceConfig = new StreamConfig(sourceVertex.getConfiguration()); StreamConfig mapConfig = new StreamConfig(mapPrintVertex.getConfiguration()); Map<Integer, StreamConfig> chainedConfigs = mapConfig.getTransitiveChainedTaskConfigs(getClass().getClassLoader()); StreamConfig printConfig = chainedConfigs.values().iterator().next();
if (jobVertex.getName().contains("test_source")) { assertTrue(jobVertex.getMinResources().equals(resource1)); } else if (jobVertex.getName().contains("Iteration_Source")) { assertTrue(jobVertex.getPreferredResources().equals(resource2)); } else if (jobVertex.getName().contains("test_flatMap")) { assertTrue(jobVertex.getMinResources().equals(resource3.merge(resource4))); } else if (jobVertex.getName().contains("Iteration_Tail")) { assertTrue(jobVertex.getPreferredResources().equals(ResourceSpec.DEFAULT)); } else if (jobVertex.getName().contains("test_sink")) { assertTrue(jobVertex.getMinResources().equals(resource5));