public final static List<IAlgebraicRewriteRule> buildPhysicalRewritesAllLevelsRuleCollection() { List<IAlgebraicRewriteRule> physicalPlanRewrites = new LinkedList<IAlgebraicRewriteRule>(); physicalPlanRewrites.add(new PullSelectOutOfEqJoin()); physicalPlanRewrites.add(new PushFunctionsOntoEqJoinBranches()); physicalPlanRewrites.add(new SetAlgebricksPhysicalOperatorsRule()); physicalPlanRewrites.add(new SetExecutionModeRule()); physicalPlanRewrites.add(new EnforceStructuralPropertiesRule()); physicalPlanRewrites.add(new PushProjectDownRule()); physicalPlanRewrites.add(new CopyLimitDownRule()); return physicalPlanRewrites; }
private void addEnforcers(AbstractLogicalOperator op, int childIndex, IPhysicalPropertiesVector diffPropertiesVector, IPhysicalPropertiesVector required, IPhysicalPropertiesVector deliveredByChild, INodeDomain domain, boolean nestedPlan, IOptimizationContext context) throws AlgebricksException { IPartitioningProperty pp = diffPropertiesVector.getPartitioningProperty(); if (pp == null || pp.getPartitioningType() == PartitioningType.UNPARTITIONED) { addLocalEnforcers(op, childIndex, diffPropertiesVector.getLocalProperties(), nestedPlan, context); IPhysicalPropertiesVector deliveredByNewChild = ((AbstractLogicalOperator) op.getInputs().get(0).getValue()).getDeliveredPhysicalProperties(); if (!nestedPlan) { addPartitioningEnforcers(op, childIndex, pp, required, deliveredByNewChild, domain, context); } } else { if (!nestedPlan) { addPartitioningEnforcers(op, childIndex, pp, required, deliveredByChild, pp.getNodeDomain(), context); } AbstractLogicalOperator newChild = (AbstractLogicalOperator) op.getInputs().get(childIndex).getValue(); IPhysicalPropertiesVector newDiff = newPropertiesDiff(newChild, required, true, context); if (AlgebricksConfig.ALGEBRICKS_LOGGER.isTraceEnabled()) { AlgebricksConfig.ALGEBRICKS_LOGGER.trace(">>>> New properties diff: " + newDiff + "\n"); } if (newDiff != null) { addLocalEnforcers(op, childIndex, newDiff.getLocalProperties(), nestedPlan, context); } } }
SourceLocation sourceLoc = parentOp.getSourceLocation(); ReplicateOperator replicateOp = createReplicateOperator(parentOp.getInputs().get(childIndex), ctx, sourceLoc); ExchangeOperator exchToLocalAgg = createOneToOneExchangeOp(replicateOpRef, ctx); ExchangeOperator exchToForward = createOneToOneExchangeOp(replicateOpRef, ctx); MutableObject<ILogicalOperator> exchToLocalAggRef = new MutableObject<>(exchToLocalAgg); MutableObject<ILogicalOperator> exchToForwardRef = new MutableObject<>(exchToForward); List<Mutable<ILogicalExpression>> rangeMapFun = new ArrayList<>(1); createAggregateFunction(ctx, samplingResultVar, samplingFun, rangeMapResultVar, rangeMapFun, targetDomain.cardinality(), partitioningColumns, sourceLoc); createAggregate(samplingResultVar, false, samplingFun, exchToLocalAggRef, ctx, sourceLoc); MutableObject<ILogicalOperator> localAgg = new MutableObject<>(localAggOp); AggregateOperator globalAggOp = createAggregate(rangeMapResultVar, true, rangeMapFun, localAgg, ctx, sourceLoc); MutableObject<ILogicalOperator> globalAgg = new MutableObject<>(globalAggOp); ForwardOperator forward = createForward(rangeMapKey, rangeMapVar, exchToForwardRef, globalAgg, ctx, sourceLoc); MutableObject<ILogicalOperator> forwardRef = new MutableObject<>(forward);
optimizeUsingConstraintsAndEquivClasses(op); PhysicalRequirements pr = op.getRequiredPhysicalPropertiesForChildren(required, context); IPhysicalPropertiesVector[] reqdProperties = null; for (Mutable<ILogicalOperator> childRef : op.getInputs()) { AbstractLogicalOperator child = (AbstractLogicalOperator) childRef.getValue(); changed |= physOptimizeOp(childRef, reqdProperties[j], nestedPlan, context); child.computeDeliveredPhysicalProperties(context); IPhysicalPropertiesVector delivered = child.getDeliveredPhysicalProperties(); int startChildIndex = getStartChildIndex(op, pr, nestedPlan, context); IPartitioningProperty firstDeliveredPartitioning = null; if (isRedundantSort(opRef, delivered, diff, context)) { opIsRedundantSort = true; addEnforcers(op, childIndex, diff, rqd, delivered, childrenDomain, nestedPlan, context); delivered = newChild.getDeliveredPhysicalProperties(); IPhysicalPropertiesVector newDiff = newPropertiesDiff(newChild, rqd, mayExpandPartitioningProperties, context); if (loggerTraceEnabled) { AlgebricksConfig.ALGEBRICKS_LOGGER.trace(">>>> New properties diff: " + newDiff + "\n"); if (isRedundantSort(opRef, delivered, newDiff, context)) { opIsRedundantSort = true; break; AbstractOperatorWithNestedPlans nested = (AbstractOperatorWithNestedPlans) op;
private IPhysicalOperator createHashConnector(IOptimizationContext ctx, IPhysicalPropertiesVector deliveredByChild, INodeDomain domain, IPhysicalPropertiesVector requiredAtChild, IPartitioningProperty rqdPartitioning, int childIndex, ILogicalOperator parentOp) { IPhysicalOperator hashConnector; List<LogicalVariable> vars = new ArrayList<>(((UnorderedPartitionedProperty) rqdPartitioning).getColumnSet()); String hashMergeHint = (String) ctx.getMetadataProvider().getConfig().get(HASH_MERGE); if (hashMergeHint == null || !hashMergeHint.equalsIgnoreCase(TRUE_CONSTANT)) { hashConnector = new HashPartitionExchangePOperator(vars, domain); return hashConnector; } List<ILocalStructuralProperty> cldLocals = deliveredByChild.getLocalProperties(); List<ILocalStructuralProperty> reqdLocals = requiredAtChild.getLocalProperties(); boolean propWasSet = false; hashConnector = null; if (reqdLocals != null && cldLocals != null && allAreOrderProps(cldLocals)) { AbstractLogicalOperator c = (AbstractLogicalOperator) parentOp.getInputs().get(childIndex).getValue(); Map<LogicalVariable, EquivalenceClass> ecs = ctx.getEquivalenceClassMap(c); List<FunctionalDependency> fds = ctx.getFDList(c); if (PropertiesUtil.matchLocalProperties(reqdLocals, cldLocals, ecs, fds)) { List<OrderColumn> orderColumns = getOrderColumnsFromGroupingProperties(reqdLocals, cldLocals); hashConnector = new HashPartitionMergeExchangePOperator(orderColumns, vars, domain); propWasSet = true; } } if (!propWasSet) { hashConnector = new HashPartitionExchangePOperator(vars, domain); } return hashConnector; }
/** * Creates a range-based exchange operator. * @param parentOp the operator requiring range-based partitioner to have input tuples repartitioned using a range * @param domain the target node domain of the range-based partitioner * @param requiredPartitioning {@see OrderedPartitionedProperty} * @param childIndex the index of the child at which the required partitioning is needed * @param ctx optimization context * @return a range-based exchange operator * @throws AlgebricksException */ private IPhysicalOperator createRangePartitionerConnector(AbstractLogicalOperator parentOp, INodeDomain domain, IPartitioningProperty requiredPartitioning, int childIndex, IOptimizationContext ctx) throws AlgebricksException { // options for range partitioning: 1. static range map, 2. dynamic range map computed at run time List<OrderColumn> partitioningColumns = ((OrderedPartitionedProperty) requiredPartitioning).getOrderColumns(); if (parentOp.getAnnotations().containsKey(OperatorAnnotations.USE_STATIC_RANGE)) { // TODO(ali): static range map implementation should be fixed to require ORDERED_PARTITION and come here. RangeMap rangeMap = (RangeMap) parentOp.getAnnotations().get(OperatorAnnotations.USE_STATIC_RANGE); return new RangePartitionExchangePOperator(partitioningColumns, domain, rangeMap); } else { return createDynamicRangePartitionExchangePOperator(parentOp, ctx, domain, partitioningColumns, childIndex); } }
private IPhysicalOperator createMergingConnector(ILogicalOperator parentOp, INodeDomain domain, IPhysicalPropertiesVector deliveredByChild) { IPhysicalOperator mergingConnector; List<OrderColumn> ordCols = computeOrderColumns(deliveredByChild); if (ordCols.isEmpty()) { IPartitioningProperty partitioningDeliveredByChild = deliveredByChild.getPartitioningProperty(); if (partitioningDeliveredByChild.getPartitioningType() == PartitioningType.ORDERED_PARTITIONED) { mergingConnector = new SequentialMergeExchangePOperator(); } else { mergingConnector = new RandomMergeExchangePOperator(); } } else { if (parentOp.getAnnotations().containsKey(OperatorAnnotations.USE_STATIC_RANGE)) { RangeMap rangeMap = (RangeMap) parentOp.getAnnotations().get(OperatorAnnotations.USE_STATIC_RANGE); mergingConnector = new RangePartitionMergeExchangePOperator(ordCols, domain, rangeMap); } else { OrderColumn[] sortColumns = new OrderColumn[ordCols.size()]; sortColumns = ordCols.toArray(sortColumns); mergingConnector = new SortMergeExchangePOperator(sortColumns); } } return mergingConnector; }
public static final List<IAlgebraicRewriteRule> buildPhysicalRewritesAllLevelsRuleCollection() { List<IAlgebraicRewriteRule> physicalRewritesAllLevels = new LinkedList<>(); physicalRewritesAllLevels.add(new PullSelectOutOfEqJoin()); //Turned off the following rule for now not to change OptimizerTest results. physicalRewritesAllLevels.add(new SetupCommitExtensionOpRule()); physicalRewritesAllLevels.add(new SetAlgebricksPhysicalOperatorsRule()); physicalRewritesAllLevels.add(new SetAsterixPhysicalOperatorsRule()); physicalRewritesAllLevels.add(new AddEquivalenceClassForRecordConstructorRule()); physicalRewritesAllLevels.add(new CheckFullParallelSortRule()); physicalRewritesAllLevels .add(new EnforceStructuralPropertiesRule(BuiltinFunctions.RANGE_MAP, BuiltinFunctions.LOCAL_SAMPLING)); physicalRewritesAllLevels.add(new RemoveSortInFeedIngestionRule()); physicalRewritesAllLevels.add(new RemoveUnnecessarySortMergeExchange()); physicalRewritesAllLevels.add(new PushProjectDownRule()); physicalRewritesAllLevels.add(new IntroduceMaterializationForInsertWithSelfScanRule()); physicalRewritesAllLevels.add(new InlineSingleReferenceVariablesRule()); physicalRewritesAllLevels.add(new RemoveUnusedAssignAndAggregateRule()); physicalRewritesAllLevels.add(new ConsolidateAssignsRule()); // After adding projects, we may need need to set physical operators again. physicalRewritesAllLevels.add(new SetAlgebricksPhysicalOperatorsRule()); return physicalRewritesAllLevels; }