@Override public PhysicalContext resolve(PhysicalContext pctx) throws SemanticException { SparkSkewJoinProcFactory.getVisitedJoinOp().clear(); Dispatcher disp = new SparkSkewJoinTaskDispatcher(pctx); // since we may split current task, use a pre-order walker GraphWalker ogw = new PreOrderWalker(disp); ArrayList<Node> topNodes = new ArrayList<Node>(); topNodes.addAll(pctx.getRootTasks()); ogw.startWalking(topNodes, null); return pctx; }
/** * Walk the current operator and its descendants. * * @param nd * current operator in the graph * @throws SemanticException */ @Override protected void walk(Node nd) throws SemanticException { opStack.push(nd); dispatch(nd, opStack); // move all the children to the front of queue if (nd.getChildren() != null) { for (Node n : nd.getChildren()) { walk(n); } } else if (nd instanceof ConditionalTask) { for (Task n : ((ConditionalTask) nd).getListTasks()) { if (n.getParentTasks() == null || n.getParentTasks().isEmpty()) { walk(n); } } } opStack.pop(); } }
PreOrderWalker walker = new PreOrderWalker(new Dispatcher() { @Override public Object dispatch(Node nd, Stack<Node> stack, Object... nodeOutputs) walker.startWalking(Collections.singletonList(expression), null);
@Override public PhysicalContext resolve(PhysicalContext pctx) throws SemanticException { this.pctx = pctx; List<Node> topNodes = new ArrayList<Node>(); topNodes.addAll(pctx.getRootTasks()); // use a pre-order walker so that DPP sink works are visited (and combined) first GraphWalker taskWalker = new PreOrderWalker(new EquivalentWorkMatcher()); HashMap<Node, Object> nodeOutput = Maps.newHashMap(); taskWalker.startWalking(topNodes, nodeOutput); return pctx; }
/** * Walk the current operator and its descendants. * * @param nd * current operator in the graph * @throws SemanticException */ @Override protected void walk(Node nd) throws SemanticException { opStack.push(nd); dispatch(nd, opStack); // move all the children to the front of queue if (nd.getChildren() != null) { for (Node n : nd.getChildren()) { walk(n); } } else if (nd instanceof ConditionalTask) { for (Task n : ((ConditionalTask) nd).getListTasks()) { if (n.getParentTasks() == null || n.getParentTasks().isEmpty()) { walk(n); } } } opStack.pop(); } }
PreOrderWalker walker = new PreOrderWalker(new Dispatcher() { @Override public Object dispatch(Node nd, Stack<Node> stack, Object... nodeOutputs) walker.startWalking(Collections.singletonList(expression), null);
@Override public PhysicalContext resolve(PhysicalContext pctx) throws SemanticException { SparkSkewJoinProcFactory.getVisitedJoinOp().clear(); Dispatcher disp = new SparkSkewJoinTaskDispatcher(pctx); // since we may split current task, use a pre-order walker GraphWalker ogw = new PreOrderWalker(disp); ArrayList<Node> topNodes = new ArrayList<Node>(); topNodes.addAll(pctx.getRootTasks()); ogw.startWalking(topNodes, null); return pctx; }
/** * Walk the current operator and its descendants. * * @param nd * current operator in the graph * @throws SemanticException */ @Override public void walk(Node nd) throws SemanticException { opStack.push(nd); dispatch(nd, opStack); // move all the children to the front of queue if (nd.getChildren() != null) { for (Node n : nd.getChildren()) { walk(n); } } opStack.pop(); } }
PreOrderWalker walker = new PreOrderWalker(new Dispatcher() { @Override public Object dispatch(Node nd, Stack<Node> stack, Object... nodeOutputs) walker.startWalking(Collections.singletonList(expression), null);
@Override public ParseContext transform(ParseContext pctx) throws SemanticException { LOG.info("TablePropertyEnrichmentOptimizer::transform()."); Map<Rule, NodeProcessor> opRules = Maps.newLinkedHashMap(); opRules.put(new RuleRegExp("R1", TableScanOperator.getOperatorName() + "%"), new Processor()); WalkerCtx context = new WalkerCtx(pctx.getConf()); Dispatcher disp = new DefaultRuleDispatcher(null, opRules, context); List<Node> topNodes = Lists.newArrayList(); topNodes.addAll(pctx.getTopOps().values()); GraphWalker walker = new PreOrderWalker(disp); walker.startWalking(topNodes, null); LOG.info("TablePropertyEnrichmentOptimizer::transform() complete!"); return pctx; } }
/** * Walk the current operator and its descendants. * * @param nd * current operator in the graph * @throws SemanticException */ @Override public void walk(Node nd) throws SemanticException { opStack.push(nd); dispatch(nd, opStack); // move all the children to the front of queue if (nd.getChildren() != null) { for (Node n : nd.getChildren()) { walk(n); } } else if (nd instanceof ConditionalTask) { for (Task n : ((ConditionalTask) nd).getListTasks()) { if (n.getParentTasks() == null || n.getParentTasks().isEmpty()) { walk(n); } } } opStack.pop(); } }
private void runSetReducerParallelism(OptimizeSparkProcContext procCtx) throws SemanticException { ParseContext pCtx = procCtx.getParseContext(); Map<Rule, NodeProcessor> opRules = new LinkedHashMap<Rule, NodeProcessor>(); opRules.put(new RuleRegExp("Set parallelism - ReduceSink", ReduceSinkOperator.getOperatorName() + "%"), new SetSparkReducerParallelism(pCtx.getConf())); // The dispatcher fires the processor corresponding to the closest matching // rule and passes the context along Dispatcher disp = new DefaultRuleDispatcher(null, opRules, procCtx); GraphWalker ogw = new PreOrderWalker(disp); // Create a list of topop nodes ArrayList<Node> topNodes = new ArrayList<Node>(); topNodes.addAll(pCtx.getTopOps().values()); ogw.startWalking(topNodes, null); }
private void runSetReducerParallelism(OptimizeSparkProcContext procCtx) throws SemanticException { ParseContext pCtx = procCtx.getParseContext(); Map<Rule, NodeProcessor> opRules = new LinkedHashMap<Rule, NodeProcessor>(); opRules.put(new RuleRegExp("Set parallelism - ReduceSink", ReduceSinkOperator.getOperatorName() + "%"), new SetSparkReducerParallelism(pCtx.getConf())); // The dispatcher fires the processor corresponding to the closest matching // rule and passes the context along Dispatcher disp = new DefaultRuleDispatcher(null, opRules, procCtx); GraphWalker ogw = new PreOrderWalker(disp); // Create a list of topop nodes ArrayList<Node> topNodes = new ArrayList<Node>(); topNodes.addAll(pCtx.getTopOps().values()); ogw.startWalking(topNodes, null); }
private void vectorizeReduceWork(ReduceWork reduceWork, VectorTaskColumnInfo vectorTaskColumnInfo) throws SemanticException { LOG.info("Vectorizing ReduceWork..."); reduceWork.setVectorMode(true); // For some reason, the DefaultGraphWalker does not descend down from the reducer Operator as // expected. We need to descend down, otherwise it breaks our algorithm that determines // VectorizationContext... Do we use PreOrderWalker instead of DefaultGraphWalker. Map<Rule, NodeProcessor> opRules = new LinkedHashMap<Rule, NodeProcessor>(); ReduceWorkVectorizationNodeProcessor vnp = new ReduceWorkVectorizationNodeProcessor(vectorTaskColumnInfo); addReduceWorkRules(opRules, vnp); Dispatcher disp = new DefaultRuleDispatcher(vnp, opRules, null); GraphWalker ogw = new PreOrderWalker(disp); // iterator the reduce operator tree ArrayList<Node> topNodes = new ArrayList<Node>(); topNodes.add(reduceWork.getReducer()); LOG.info("vectorizeReduceWork reducer Operator: " + reduceWork.getReducer().getName() + "..."); HashMap<Node, Object> nodeOutput = new HashMap<Node, Object>(); ogw.startWalking(topNodes, nodeOutput); // Necessary since we are vectorizing the root operator in reduce. reduceWork.setReducer(vnp.getRootVectorOp()); vectorTaskColumnInfo.setScratchTypeNameArray(vnp.getVectorScratchColumnTypeNames()); vectorTaskColumnInfo.transferToBaseWork(reduceWork); if (LOG.isDebugEnabled()) { debugDisplayAllMaps(reduceWork); } }
GraphWalker ogw = new PreOrderWalker(disp);
GraphWalker ogw = new PreOrderWalker(disp);
@Override public PhysicalContext resolve(PhysicalContext pctx) throws SemanticException { SparkSkewJoinProcFactory.getVisitedJoinOp().clear(); Dispatcher disp = new SparkSkewJoinTaskDispatcher(pctx); // since we may split current task, use a pre-order walker GraphWalker ogw = new PreOrderWalker(disp); ArrayList<Node> topNodes = new ArrayList<Node>(); topNodes.addAll(pctx.getRootTasks()); ogw.startWalking(topNodes, null); return pctx; }
Dispatcher disp = new DefaultRuleDispatcher(UnionProcFactory.getNoUnion(), opRules, uCtx); GraphWalker ogw = new PreOrderWalker(disp);
@Override public ParseContext transform(ParseContext pctx) throws SemanticException { if (!pctx.getConf().getVar(ConfVars.HIVE_EXECUTION_ENGINE).equals("tez") || !pctx.getConf().getBoolVar(ConfVars.TEZ_DYNAMIC_PARTITION_PRUNING)) { return pctx; } Map<Rule, NodeProcessor> opRules = new LinkedHashMap<Rule, NodeProcessor>(); opRules.put(new RuleRegExp("R1", "(" + TableScanOperator.getOperatorName() + "%" + ".*" + ReduceSinkOperator.getOperatorName() + "%" + JoinOperator.getOperatorName() + "%)"), new JoinSynthetic()); // The dispatcher fires the processor corresponding to the closest matching // rule and passes the context along SyntheticContext context = new SyntheticContext(pctx); Dispatcher disp = new DefaultRuleDispatcher(null, opRules, context); GraphWalker ogw = new PreOrderWalker(disp); // Create a list of top op nodes List<Node> topNodes = new ArrayList<Node>(); topNodes.addAll(pctx.getTopOps().values()); ogw.startWalking(topNodes, null); return pctx; }
GraphWalker ogw = new PreOrderWalker(disp);