private void connect(Integer headOfChain, StreamEdge edge) { physicalEdgesInOrder.add(edge); Integer downStreamvertexID = edge.getTargetId(); JobVertex headVertex = jobVertices.get(headOfChain); JobVertex downStreamVertex = jobVertices.get(downStreamvertexID); StreamConfig downStreamConfig = new StreamConfig(downStreamVertex.getConfiguration()); downStreamConfig.setNumberOfInputs(downStreamConfig.getNumberOfInputs() + 1); StreamPartitioner<?> partitioner = edge.getPartitioner(); JobEdge jobEdge; if (partitioner instanceof ForwardPartitioner || partitioner instanceof RescalePartitioner) { jobEdge = downStreamVertex.connectNewDataSetAsInput( headVertex, DistributionPattern.POINTWISE, ResultPartitionType.PIPELINED_BOUNDED); } else { jobEdge = downStreamVertex.connectNewDataSetAsInput( headVertex, DistributionPattern.ALL_TO_ALL, ResultPartitionType.PIPELINED_BOUNDED); } // set strategy name so that web interface can show it. jobEdge.setShipStrategyName(partitioner.toString()); if (LOG.isDebugEnabled()) { LOG.debug("CONNECTED: {} - {} -> {}", partitioner.getClass().getSimpleName(), headOfChain, downStreamvertexID); } }
chainedOperatorVertexIds, userDefinedChainedOperatorVertexIds); TaskConfig taskConfig = new TaskConfig(jobVertex.getConfiguration()); taskConfig.setStubWrapper(new UserCodeObjectWrapper<Object>(streamNode.getInputFormat())); } else { jobGraph.addVertex(jobVertex); return new StreamConfig(jobVertex.getConfiguration());
TaskConfig t = new TaskConfig(tic.getContainingVertex().getConfiguration()); t.addChainedTask(tic.getChainedTask(), tic.getTaskConfig(), tic.getTaskName());
sourceVertexConfig = new TaskConfig(sourceVertex.getConfiguration());
TaskConfig headConfig = new TaskConfig(headVertex.getConfiguration()); int inputIndex = headConfig.getDriverStrategy().getNumInputs(); headConfig.setIterationHeadSolutionSetInputIndex(inputIndex); new TaskConfig(container.getConfiguration()).addOutputShipStrategy(ShipStrategyType.FORWARD); final TaskConfig targetVertexConfig = new TaskConfig(targetVertex.getConfiguration());
headConfig = new TaskConfig(headVertex.getConfiguration()); toReturn = null; } else { headVertex.setResources(iteration.getMinResources(), iteration.getPreferredResources()); headVertex.setInvokableClass(IterationHeadTask.class); headConfig = new TaskConfig(headVertex.getConfiguration()); headConfig.setDriver(NoOpDriver.class); toReturn = headVertex;
DualInputPlanNode target = (DualInputPlanNode) c.getTarget(); JobVertex accessingVertex = this.vertices.get(target); TaskConfig conf = new TaskConfig(accessingVertex.getConfiguration()); int inputNum = c == target.getInput1() ? 0 : c == target.getInput2() ? 1 : -1; new TaskConfig(vertex.getConfiguration()).setIterationId(descr.getId());
headConfig = new TaskConfig(headVertex.getConfiguration()); toReturn = null; } else { headVertex.setResources(iteration.getMinResources(), iteration.getPreferredResources()); headVertex.setInvokableClass(IterationHeadTask.class); headConfig = new TaskConfig(headVertex.getConfiguration()); headConfig.setDriver(NoOpDriver.class); toReturn = headVertex;
assertEquals(ResultPartitionType.PIPELINED_BOUNDED, mapPrintVertex.getInputs().get(0).getSource().getResultType()); StreamConfig sourceConfig = new StreamConfig(sourceVertex.getConfiguration()); StreamConfig mapConfig = new StreamConfig(mapPrintVertex.getConfiguration()); Map<Integer, StreamConfig> chainedConfigs = mapConfig.getTransitiveChainedTaskConfigs(getClass().getClassLoader()); StreamConfig printConfig = chainedConfigs.values().iterator().next();
final TaskConfig headConfig = new TaskConfig(headVertex.getConfiguration()); final TaskConfig headFinalOutputConfig = descr.getHeadFinalResultConfig(); this.auxVertices.add(sync); final TaskConfig syncConfig = new TaskConfig(sync.getConfiguration()); syncConfig.setGateIterativeWithNumberOfEventsUntilInterrupt(0, headVertex.getParallelism()); tailConfig = new TaskConfig(rootOfStepFunctionVertex.getConfiguration()); tailConfigOfTerminationCriterion = new TaskConfig(rootOfTerminationCriterionVertex.getConfiguration());
@Test public void testCheckpointModeTranslation() { try { // with deactivated fault tolerance, the checkpoint mode should be at-least-once StreamExecutionEnvironment deactivated = getSimpleJob(); for (JobVertex vertex : deactivated.getStreamGraph().getJobGraph().getVertices()) { assertEquals(CheckpointingMode.AT_LEAST_ONCE, new StreamConfig(vertex.getConfiguration()).getCheckpointMode()); } // with activated fault tolerance, the checkpoint mode should be by default exactly once StreamExecutionEnvironment activated = getSimpleJob(); activated.enableCheckpointing(1000L); for (JobVertex vertex : activated.getStreamGraph().getJobGraph().getVertices()) { assertEquals(CheckpointingMode.EXACTLY_ONCE, new StreamConfig(vertex.getConfiguration()).getCheckpointMode()); } // explicitly setting the mode StreamExecutionEnvironment explicit = getSimpleJob(); explicit.enableCheckpointing(1000L, CheckpointingMode.AT_LEAST_ONCE); for (JobVertex vertex : explicit.getStreamGraph().getJobGraph().getVertices()) { assertEquals(CheckpointingMode.AT_LEAST_ONCE, new StreamConfig(vertex.getConfiguration()).getCheckpointMode()); } } catch (Exception e) { e.printStackTrace(); fail(e.getMessage()); } }
Configuration configuration = chainedVertex.getConfiguration();
testHarness.setupOutputForSingletonOperatorChain(); testHarness.taskConfig = chainedVertex.getConfiguration(); final StreamConfig operatorChainStreamConfig = new StreamConfig(chainedVertex.getConfiguration()); final AsyncWaitOperator<Integer, Integer> headOperator = operatorChainStreamConfig.getStreamOperator(AsyncWaitOperatorTest.class.getClassLoader());
Configuration configuration = chainedVertex.getConfiguration();
vertex.setInvokableClass((this.currentIteration != null && node.isOnDynamicPath()) ? IterationIntermediateTask.class : BatchTask.class); config = new TaskConfig(vertex.getConfiguration()); config.setDriver(ds.getDriverClass());
private void finalizeWorksetIteration(IterationDescriptor descr) { final WorksetIterationPlanNode iterNode = (WorksetIterationPlanNode) descr.getIterationNode(); final JobVertex headVertex = descr.getHeadTask(); final TaskConfig headConfig = new TaskConfig(headVertex.getConfiguration()); final TaskConfig headFinalOutputConfig = descr.getHeadFinalResultConfig(); this.auxVertices.add(sync); syncConfig = new TaskConfig(sync.getConfiguration()); syncConfig.setGateIterativeWithNumberOfEventsUntilInterrupt(0, headVertex.getParallelism()); worksetTailConfig = taskInChain.getTaskConfig(); } else { worksetTailConfig = new TaskConfig(nextWorksetVertex.getConfiguration()); solutionDeltaConfig = taskInChain.getTaskConfig(); } else { solutionDeltaConfig = new TaskConfig(solutionDeltaVertex.getConfiguration());
producer.getConfiguration().setInteger(DATA_VOLUME_GB_CONFIG_KEY, dataVolumeGb); producer.getConfiguration().setBoolean(IS_SLOW_SENDER_CONFIG_KEY, isSlowSender); consumer.getConfiguration().setBoolean(IS_SLOW_RECEIVER_CONFIG_KEY, isSlowReceiver);
private JobVertex createDualInputVertex(DualInputPlanNode node) throws CompilerException { final String taskName = node.getNodeName(); final DriverStrategy ds = node.getDriverStrategy(); final JobVertex vertex = new JobVertex(taskName); final TaskConfig config = new TaskConfig(vertex.getConfiguration()); vertex.setResources(node.getMinResources(), node.getPreferredResources()); vertex.setInvokableClass( (this.currentIteration != null && node.isOnDynamicPath()) ? IterationIntermediateTask.class : BatchTask.class); // set user code config.setStubWrapper(node.getProgramOperator().getUserCodeWrapper()); config.setStubParameters(node.getProgramOperator().getParameters()); // set the driver strategy config.setDriver(ds.getDriverClass()); config.setDriverStrategy(ds); if (node.getComparator1() != null) { config.setDriverComparator(node.getComparator1(), 0); } if (node.getComparator2() != null) { config.setDriverComparator(node.getComparator2(), 1); } if (node.getPairComparator() != null) { config.setDriverPairComparator(node.getPairComparator()); } // assign memory, file-handles, etc. assignDriverResources(node, config); return vertex; }
@Test public void testTempInIterationTest() throws Exception { ExecutionEnvironment env = ExecutionEnvironment.getExecutionEnvironment(); DataSet<Tuple2<Long, Long>> input = env.readCsvFile("file:///does/not/exist").types(Long.class, Long.class); DeltaIteration<Tuple2<Long, Long>, Tuple2<Long, Long>> iteration = input.iterateDelta(input, 1, 0); DataSet<Tuple2<Long, Long>> update = iteration.getWorkset() .join(iteration.getSolutionSet()).where(0).equalTo(0) .with(new DummyFlatJoinFunction<Tuple2<Long, Long>>()); iteration.closeWith(update, update) .output(new DiscardingOutputFormat<Tuple2<Long, Long>>()); Plan plan = env.createProgramPlan(); OptimizedPlan oPlan = (new Optimizer(new Configuration())).compile(plan); JobGraphGenerator jgg = new JobGraphGenerator(); JobGraph jg = jgg.compileJobGraph(oPlan); boolean solutionSetUpdateChecked = false; for(JobVertex v : jg.getVertices()) { if(v.getName().equals("SolutionSet Delta")) { // check if input of solution set delta is temped TaskConfig tc = new TaskConfig(v.getConfiguration()); assertTrue(tc.isInputAsynchronouslyMaterialized(0)); solutionSetUpdateChecked = true; } } assertTrue(solutionSetUpdateChecked); }
public SerializedValue<TaskInformation> getSerializedTaskInformation() throws IOException { if (null == serializedTaskInformation) { int parallelism = getParallelism(); int maxParallelism = getMaxParallelism(); if (LOG.isDebugEnabled()) { LOG.debug("Creating task information for " + generateDebugString()); } serializedTaskInformation = new SerializedValue<>( new TaskInformation( jobVertex.getID(), jobVertex.getName(), parallelism, maxParallelism, jobVertex.getInvokableClassName(), jobVertex.getConfiguration())); } return serializedTaskInformation; }