stageId, locationFactory.createStageLocation(stageId), plan.getFragment(), remoteTaskFactory, session, PartitioningHandle partitioningHandle = plan.getFragment().getPartitioning(); if (partitioningHandle.equals(SOURCE_DISTRIBUTION)) { checkArgument(!plan.getFragment().getStageExecutionDescriptor().isStageGroupedExecution()); stageSchedulers.put(stageId, newSourcePartitionedSchedulerAsStageScheduler(stage, planNodeId, splitSource, placementPolicy, splitBatchSize)); bucketToPartition = Optional.of(new int[1]); List<PlanNodeId> schedulingOrder = plan.getFragment().getPartitionedSources(); ConnectorId connectorId = partitioningHandle.getConnectorId().orElseThrow(IllegalStateException::new); List<ConnectorPartitionHandle> connectorPartitionHandles; boolean groupedExecutionForStage = plan.getFragment().getStageExecutionDescriptor().isStageGroupedExecution(); if (groupedExecutionForStage) { connectorPartitionHandles = nodePartitioningManager.listPartitionHandles(session, partitioningHandle); boolean dynamicLifespanSchedule = plan.getFragment().getStageExecutionDescriptor().isDynamicLifespanSchedule(); bucketNodeMap = nodePartitioningManager.getBucketNodeMap(session, partitioningHandle, dynamicLifespanSchedule); verify(!plan.getFragment().getStageExecutionDescriptor().isDynamicLifespanSchedule()); NodePartitionMap nodePartitionMap = partitioningCache.apply(plan.getFragment().getPartitioning()); if (groupedExecutionForStage) { checkState(connectorPartitionHandles.size() == nodePartitionMap.getBucketToPartition().length);
stateMachine.setColumns(outputStageExecutionPlan.getFieldNames(), outputStageExecutionPlan.getFragment().getTypes());
private SqlStageExecution createSqlStageExecution(StageExecutionPlan tableScanPlan, NodeTaskMap nodeTaskMap) { StageId stageId = new StageId(new QueryId("query"), 0); SqlStageExecution stage = SqlStageExecution.createSqlStageExecution(stageId, locationFactory.createStageLocation(stageId), tableScanPlan.getFragment(), new MockRemoteTaskFactory(queryExecutor, scheduledExecutor), TEST_SESSION, true, nodeTaskMap, queryExecutor, new NoOpFailureDetector(), new SplitSchedulerStats()); stage.setOutputBuffers(createInitialEmptyOutputBuffers(PARTITIONED) .withBuffer(OUT, 0) .withNoMoreBufferIds()); return stage; }
stageId, locationFactory.createStageLocation(stageId), plan.getFragment(), remoteTaskFactory, session, PartitioningHandle partitioningHandle = plan.getFragment().getPartitioning(); if (partitioningHandle.equals(SOURCE_DISTRIBUTION)) { NodePartitionMap nodePartitionMap = partitioningCache.apply(plan.getFragment().getPartitioning()); stageLinkages.put(stageId, new StageLinkage(plan.getFragment().getId(), parent, childStages));
private SqlStageExecution createSqlStageExecution(StageExecutionPlan tableScanPlan, NodeTaskMap nodeTaskMap) { StageId stageId = new StageId(new QueryId("query"), "stage"); SqlStageExecution stage = new SqlStageExecution(stageId, locationFactory.createStageLocation(stageId), tableScanPlan.getFragment(), new MockRemoteTaskFactory(executor), TEST_SESSION, nodeTaskMap, executor); stage.setOutputBuffers(INITIAL_EMPTY_OUTPUT_BUFFERS .withBuffer(OUT, 0) .withNoMoreBufferIds()); return stage; }