private static void setHashJoinOp(AbstractBinaryJoinOperator op, JoinPartitioningType partitioningType, List<LogicalVariable> sideLeft, List<LogicalVariable> sideRight, IOptimizationContext context) throws AlgebricksException { op.setPhysicalOperator(new HybridHashJoinPOperator(op.getJoinKind(), partitioningType, sideLeft, sideRight, context.getPhysicalOptimizationConfig().getMaxFramesForJoin(), context.getPhysicalOptimizationConfig().getMaxFramesForJoinLeftInput(), context.getPhysicalOptimizationConfig().getMaxRecordsPerFrame(), context.getPhysicalOptimizationConfig().getFudgeFactor())); if (partitioningType == JoinPartitioningType.BROADCAST) { hybridToInMemHashJoin(op, context); } }
private static void setNestedLoopJoinOp(AbstractBinaryJoinOperator op, IOptimizationContext context) { op.setPhysicalOperator(new NestedLoopJoinPOperator(op.getJoinKind(), JoinPartitioningType.BROADCAST, context.getPhysicalOptimizationConfig().getMaxFramesForJoin())); }
/** * When true, the sort operator requires ORDERED_PARTITION (only applicable to dynamic version for now). * Conditions: * 1. Execution mode == partitioned * 2. Dynamic range map was not disabled by some checks * 3. User didn't disable it * 4. User didn't provide static range map * 5. Physical sort operator is not in-memory * 6. There are at least two partitions in the cluster * @param sortOp the sort operator * @param clusterDomain the partitions specification of the cluster * @param ctx optimization context * @return true if the sort operator should be full parallel sort, false otherwise. */ private boolean isFullParallel(AbstractLogicalOperator sortOp, INodeDomain clusterDomain, IOptimizationContext ctx) { return sortOp.getAnnotations().get(OperatorAnnotations.USE_DYNAMIC_RANGE) != Boolean.FALSE && !sortOp.getAnnotations().containsKey(OperatorAnnotations.USE_STATIC_RANGE) && sortOp.getPhysicalOperator().getOperatorTag() == PhysicalOperatorTag.STABLE_SORT && clusterDomain.cardinality() != null && clusterDomain.cardinality() > 1 && ctx.getPhysicalOptimizationConfig().getSortParallel(); } }
localResultVariables.add(samplingResultVar); localAggFunctions.add(new MutableObject<>(samplingExp)); Object[] samplingParam = { context.getPhysicalOptimizationConfig().getSortSamples() }; samplingExp.setOpaqueParameters(samplingParam);
physicalOptimizationConfig = context.getPhysicalOptimizationConfig(); if (AlgebricksConfig.ALGEBRICKS_LOGGER.isTraceEnabled()) { AlgebricksConfig.ALGEBRICKS_LOGGER.trace(">>>> Optimizing operator " + op.getPhysicalOperator() + ".\n");
context.getPhysicalOptimizationConfig().getMaxFramesForGroupBy(), sortPhysicalOperator.getSortColumns()));
int frameSize = context.getPhysicalOptimizationConfig().getFrameSize(); if (frameSize > 0) { long sz = frames0 * frameSize + overhead * v.getNumberOfTuples();
/** * Generate new ORDER operator that uses TopKSort module and replaces the old ORDER operator. */ private boolean pushLimitIntoOrder(Mutable<ILogicalOperator> opRef, Mutable<ILogicalOperator> opRef2, IOptimizationContext context) throws AlgebricksException { PhysicalOptimizationConfig physicalOptimizationConfig = context.getPhysicalOptimizationConfig(); LimitOperator limitOp = (LimitOperator) opRef.getValue(); OrderOperator orderOp = (OrderOperator) opRef2.getValue(); // We don't push-down LIMIT into in-memory sort. if (orderOp.getPhysicalOperator().getOperatorTag() != PhysicalOperatorTag.STABLE_SORT) { return false; } Integer topK = getOutputLimit(limitOp); if (topK == null) { return false; } // Create the new ORDER operator, set the topK value, and replace the current one. OrderOperator newOrderOp = new OrderOperator(orderOp.getOrderExpressions(), topK); newOrderOp.setSourceLocation(orderOp.getSourceLocation()); newOrderOp.setPhysicalOperator( new StableSortPOperator(physicalOptimizationConfig.getMaxFramesExternalSort(), newOrderOp.getTopK())); newOrderOp.getInputs().addAll(orderOp.getInputs()); newOrderOp.setExecutionMode(orderOp.getExecutionMode()); newOrderOp.recomputeSchema(); newOrderOp.computeDeliveredPhysicalProperties(context); opRef2.setValue(newOrderOp); context.computeAndSetTypeEnvironmentForOperator(newOrderOp); context.addToDontApplySet(this, limitOp); return true; }
private static void computeDefaultPhysicalOp(AbstractLogicalOperator op, boolean topLevelOp, IOptimizationContext context) throws AlgebricksException { PhysicalOptimizationConfig physicalOptimizationConfig = context.getPhysicalOptimizationConfig(); if (op.getOperatorTag().equals(LogicalOperatorTag.GROUP)) { GroupByOperator gby = (GroupByOperator) op; context.getPhysicalOptimizationConfig().getMaxFramesForGroupBy())); context.getPhysicalOptimizationConfig().getMaxFramesForGroupBy())); } else { throw new CompilationException(ErrorCode.COMPILATION_ERROR, gby.getSourceLocation(),
@SuppressWarnings({ "unchecked", "rawtypes" }) private static void computeDefaultPhysicalOp(AbstractLogicalOperator op, boolean topLevelOp, IOptimizationContext context) throws AlgebricksException { PhysicalOptimizationConfig physicalOptimizationConfig = context.getPhysicalOptimizationConfig(); if (op.getPhysicalOperator() == null) { switch (op.getOperatorTag()) { context.getPhysicalOptimizationConfig().getMaxFramesForGroupBy())); } else { op.setPhysicalOperator(new MicroPreclusteredGroupByPOperator(columnList));