public static <T> Set<T> findOperators(Collection<Operator<?>> starts, Class<T> clazz) { Set<T> found = new HashSet<T>(); for (Operator<?> start : starts) { if (start == null) { continue; } findOperators(start, clazz, found); } return found; }
Set<FileSinkOperator> fsOps = OperatorUtils.findOperators( ts, FileSinkOperator.class); fsOpsAll.addAll(fsOps);
public static <T> Set<T> findOperators(Collection<Operator<?>> starts, Class<T> clazz) { Set<T> found = new HashSet<T>(); for (Operator<?> start : starts) { if (start == null) { continue; } findOperators(start, clazz, found); } return found; }
public static <T> Set<T> findOperators(Operator<?> start, Class<T> clazz) { return findOperators(start, clazz, new HashSet<T>()); }
public static <T> Set<T> findOperators(Operator<?> start, Class<T> clazz) { return findOperators(start, clazz, new HashSet<T>()); }
public static <T> T findSingleOperator(Operator<?> start, Class<T> clazz) { Set<T> found = findOperators(start, clazz, new HashSet<T>()); return found.size() == 1 ? found.iterator().next() : null; }
public static <T> T findSingleOperator(Operator<?> start, Class<T> clazz) { Set<T> found = findOperators(start, clazz, new HashSet<T>()); return found.size() == 1 ? found.iterator().next() : null; }
@SuppressWarnings("unchecked") private static <T> Set<T> findOperators(Operator<?> start, Class<T> clazz, Set<T> found) { if (clazz.isInstance(start)) { found.add((T) start); } if (start.getChildOperators() != null) { for (Operator<?> child : start.getChildOperators()) { findOperators(child, clazz, found); } } return found; }
@SuppressWarnings("unchecked") private static <T> Set<T> findOperators(Operator<?> start, Class<T> clazz, Set<T> found) { if (clazz.isInstance(start)) { found.add((T) start); } if (start.getChildOperators() != null) { for (Operator<?> child : start.getChildOperators()) { findOperators(child, clazz, found); } } return found; }
@Override public void configureJobConf(JobConf job) { for (PartitionDesc partition : aliasToPartnInfo.values()) { PlanUtils.configureJobConf(partition.getTableDesc(), job); } Collection<Operator<?>> mappers = aliasToWork.values(); for (FileSinkOperator fs : OperatorUtils.findOperators(mappers, FileSinkOperator.class)) { PlanUtils.configureJobConf(fs.getConf().getTableInfo(), job); } for (IConfigureJobConf icjc : OperatorUtils.findOperators(mappers, IConfigureJobConf.class)) { icjc.configureJobConf(job); } }
@Override public void configureJobConf(JobConf job) { if (reducer != null) { for (FileSinkOperator fs : OperatorUtils.findOperators(reducer, FileSinkOperator.class)) { PlanUtils.configureJobConf(fs.getConf().getTableInfo(), job); } } }
@Override public void configureJobConf(JobConf job) { if (reducer != null) { for (FileSinkOperator fs : OperatorUtils.findOperators(reducer, FileSinkOperator.class)) { PlanUtils.configureJobConf(fs.getConf().getTableInfo(), job); } } }
/** * This method gathers the TS operators with DPP from the context and * stores them into the input optimization cache. */ private static void gatherDPPTableScanOps( ParseContext pctx, SharedWorkOptimizerCache optimizerCache) throws SemanticException { // Find TS operators with partition pruning enabled in plan // because these TS may potentially read different data for // different pipeline. // These can be: // 1) TS with DPP. // 2) TS with semijoin DPP. Map<String, TableScanOperator> topOps = pctx.getTopOps(); Collection<Operator<? extends OperatorDesc>> tableScanOps = Lists.<Operator<?>>newArrayList(topOps.values()); Set<AppMasterEventOperator> s = OperatorUtils.findOperators(tableScanOps, AppMasterEventOperator.class); for (AppMasterEventOperator a : s) { if (a.getConf() instanceof DynamicPruningEventDesc) { DynamicPruningEventDesc dped = (DynamicPruningEventDesc) a.getConf(); optimizerCache.tableScanToDPPSource.put(dped.getTableScan(), a); } } for (Entry<ReduceSinkOperator, SemiJoinBranchInfo> e : pctx.getRsToSemiJoinBranchInfo().entrySet()) { optimizerCache.tableScanToDPPSource.put(e.getValue().getTsOp(), e.getKey()); } LOG.debug("DPP information stored in the cache: {}", optimizerCache.tableScanToDPPSource); }
@Override public void configureJobConf(JobConf job) { for (PartitionDesc partition : aliasToPartnInfo.values()) { PlanUtils.configureJobConf(partition.getTableDesc(), job); } Collection<Operator<?>> mappers = aliasToWork.values(); for (FileSinkOperator fs : OperatorUtils.findOperators(mappers, FileSinkOperator.class)) { PlanUtils.configureJobConf(fs.getConf().getTableInfo(), job); } }
op = op.getParentOperators().get(0); Set<AppMasterEventOperator> eventOps = OperatorUtils.findOperators( op, AppMasterEventOperator.class); for (AppMasterEventOperator otherEvent : eventOps) {
LimitDesc tempGlobalLimitDesc = tempGlobalLimit.getConf(); Table tab = ts.getConf().getTableMetadata(); Set<FilterOperator> filterOps = OperatorUtils.findOperators(ts, FilterOperator.class);
if (OperatorUtils.findOperators(rs, GroupByOperator.class).size() > 1){
LimitDesc tempGlobalLimitDesc = tempGlobalLimit.getConf(); Table tab = ts.getConf().getTableMetadata(); Set<FilterOperator> filterOps = OperatorUtils.findOperators(ts, FilterOperator.class);
Set<FileSinkOperator> fileSinkOperatorSet = OperatorUtils.findOperators(((MapredLocalWork) work).getAliasToWork().values(), FileSinkOperator.class); for(FileSinkOperator fsop : fileSinkOperatorSet) { collectFileSinkDescs(fsop, acidSinks);
Set<ReduceSinkOperator> rsOps = OperatorUtils.findOperators( ((Operator<?>) stack.get(stack.size() - 5)).getParentOperators().get(0), ReduceSinkOperator.class);