@Override public RuleSet getRules(OptimizerRulesContext context) { final ImmutableList.Builder<RelOptRule> rules = ImmutableList.builder(); rules.add(AggregateReduceFunctionsRule.NO_REDUCE_SUM); if (context.getPlannerSettings() .getOptions() .getOption(PlannerSettings.JDBC_PUSH_DOWN_PLUS)) { rules.add( SimpleFilterJoinRule.CALCITE_INSTANCE, JOIN_CONDITION_PUSH_CALCITE_RULE, PushFilterPastProjectRule.CALCITE_INSTANCE ); } return RuleSets.ofList(rules.build()); } },
@Override public Set<RelOptRule> getRules(OptimizerRulesContext optimizerContext, PlannerPhase phase, SourceType pluginType) { final OptionManager options = optimizerContext.getPlannerSettings().getOptions(); switch(phase){ case LOGICAL: return ImmutableSet.<RelOptRule>of(new ElasticScanRule(pluginType)); case PHYSICAL: ImmutableSet.Builder<RelOptRule> builder = ImmutableSet.builder(); builder.add(new ElasticScanPrule(optimizerContext.getFunctionRegistry())); if (options.getOption(ExecConstants.ELASTIC_RULES_PROJECT)) { builder.add(new ElasticProjectRule(optimizerContext.getFunctionRegistry())); } if (options.getOption(ExecConstants.ELASTIC_RULES_FILTER)) { builder.add(ElasticFilterRule.INSTANCE); } if (options.getOption(ExecConstants.ELASTIC_RULES_LIMIT)) { builder.add(ElasticLimitRule.INSTANCE); } if (options.getOption(ExecConstants.ELASTIC_RULES_SAMPLE)) { builder.add(ElasticSampleRule.INSTANCE); } return builder.build(); default: return ImmutableSet.of(); } }
@Override public Set<RelOptRule> getRules(OptimizerRulesContext optimizerContext, PlannerPhase phase, SourceType pluginType) { switch(phase){ case LOGICAL: ImmutableSet.Builder<RelOptRule> builder = ImmutableSet.builder(); builder.add(new HiveScanDrule(pluginType)); builder.add(EliminateEmptyScans.INSTANCE); final PlannerSettings plannerSettings = optimizerContext.getPlannerSettings(); if(plannerSettings.isPartitionPruningEnabled()){ builder.add(new PruneScanRuleFilterOnProject<>(pluginType, HiveScanDrel.class, optimizerContext)); builder.add(new PruneScanRuleFilterOnScan<>(pluginType, HiveScanDrel.class, optimizerContext)); } final OptionManager options = plannerSettings.getOptions(); if (options.getOption(HivePluginOptions.HIVE_ORC_READER_VECTORIZE) && options.getOption(HivePluginOptions.ENABLE_FILTER_PUSHDOWN_HIVE_ORC)) { builder.add(new ORCFilterPushDownRule(pluginType)); } return builder.build(); case PHYSICAL: return ImmutableSet.<RelOptRule>of( new HiveScanPrule(pluginType) ); default: return ImmutableSet.<RelOptRule>of(); } }
private RelNode determineMaterializationPlan( final SqlHandlerConfig sqlHandlerConfig, ReflectionGoal goal, ReflectionEntry entry, Materialization materialization, ExcludedReflectionsProvider exclusionsProvider, NamespaceService namespace, ExtendedToRelContext context, SabotConfig config, ReflectionSettings reflectionSettings, MaterializationStore materializationStore) { final ReflectionPlanGenerator planGenerator = new ReflectionPlanGenerator(sqlHandlerConfig, namespace, context.getPlannerSettings().getOptions(), config, goal, entry, materialization, reflectionSettings, materializationStore); final RelNode normalizedPlan = planGenerator.generateNormalizedPlan(); // avoid accelerating this CTAS with the materialization itself // we set exclusions before we get to the logical phase (since toRel() is triggered in SqlToRelConverter, prior to planning). final List<String> exclusions = ImmutableList.<String>builder() .addAll(exclusionsProvider.getExcludedReflections(goal.getId().getId())) .add(goal.getId().getId()) .build(); context.getSession().getSubstitutionSettings().setExclusions(exclusions); RefreshDecision decision = planGenerator.getRefreshDecision(); // save the decision for later. context.recordExtraInfo(DECISION_NAME, SERIALIZER.serialize(decision)); logger.trace("Refresh decision: {}", decision); if(logger.isTraceEnabled()) { logger.trace(RelOptUtil.toString(normalizedPlan)); } return normalizedPlan; }
protected void applyEdgeProjection(SearchRequestBuilder searchRequest, ElasticIntermediateScanPrel scan) { boolean edgeProject = PrelUtil.getPlannerSettings(scan.getCluster()).getOptions().getOption(ExecConstants.ELASTIC_RULES_EDGE_PROJECT); if(!edgeProject){ return; } final String[] includesOrderedByOriginalTable; if (scan.getProjectedColumns().isEmpty()) { includesOrderedByOriginalTable = new String[0]; } else { includesOrderedByOriginalTable = scan.getBatchSchema().mask(scan.getProjectedColumns(), false) .toCalciteRecordType(scan.getCluster().getTypeFactory()).getFieldNames().toArray(new String[0]); } // canonicalize includes order so we don't get test variability. Arrays.sort(includesOrderedByOriginalTable); searchRequest.setFetchSource(includesOrderedByOriginalTable, null); }
@Override public RelOptCost computeSelfCost(RelOptPlanner planner, RelMetadataQuery relMetadataQuery) { if(PrelUtil.getSettings(getCluster()).useDefaultCosting()) { //We use multiplier 0.05 for TopN operator, and 0.1 for Sort, to make TopN a preferred choice. return super.computeSelfCost(planner).multiplyBy(.1); } RelNode child = this.getInput(); double inputRows = relMetadataQuery.getRowCount(child); // int rowWidth = child.getRowType().getPrecision(); int numSortFields = this.collation.getFieldCollations().size(); double cpuCost = DremioCost.COMPARE_CPU_COST * numSortFields * inputRows * (Math.log(inputRows)/Math.log(2)); double diskIOCost = 0; // assume in-memory for now until we enforce operator-level memory constraints // TODO: use rowWidth instead of avgFieldWidth * numFields // avgFieldWidth * numFields * inputRows double numFields = this.getRowType().getFieldCount(); long fieldWidth = PrelUtil.getPlannerSettings(planner).getOptions() .getOption(ExecConstants.AVERAGE_FIELD_WIDTH_KEY).getNumVal(); double memCost = fieldWidth * numFields * inputRows; Factory costFactory = (Factory) planner.getCostFactory(); return costFactory.makeCost(inputRows, cpuCost, diskIOCost, 0, memCost); }
double factor = PrelUtil.getPlannerSettings(planner).getOptions() .getOption(ExecConstants.HASH_JOIN_TABLE_FACTOR_KEY).getFloatVal(); long fieldWidth = PrelUtil.getPlannerSettings(planner).getOptions() .getOption(ExecConstants.AVERAGE_FIELD_WIDTH_KEY).getNumVal();
@Override public RelOptCost computeSelfCost(RelOptPlanner planner, RelMetadataQuery mq) { if(PrelUtil.getSettings(getCluster()).useDefaultCosting()) { return super.computeSelfCost(planner).multiplyBy(.1); } final RelNode child = this.getInput(); double inputRows = mq.getRowCount(child); int numGroupByFields = this.getGroupCount(); int numAggrFields = this.aggCalls.size(); // cpu cost of hashing each grouping key double cpuCost = DremioCost.HASH_CPU_COST * numGroupByFields * inputRows; // add cpu cost for computing the aggregate functions cpuCost += DremioCost.FUNC_CPU_COST * numAggrFields * inputRows; double diskIOCost = 0; // assume in-memory for now until we enforce operator-level memory constraints // TODO: use distinct row count // + hash table template stuff double factor = PrelUtil.getPlannerSettings(planner).getOptions() .getOption(ExecConstants.HASH_AGG_TABLE_FACTOR_KEY).getFloatVal(); long fieldWidth = PrelUtil.getPlannerSettings(planner).getOptions() .getOption(ExecConstants.AVERAGE_FIELD_WIDTH_KEY).getNumVal(); // table + hashValues + links double memCost = ( (fieldWidth * numGroupByFields) + IntHolder.WIDTH + IntHolder.WIDTH ) * inputRows * factor; Factory costFactory = (Factory) planner.getCostFactory(); return costFactory.makeCost(inputRows, cpuCost, diskIOCost, 0 /* network cost */, memCost); }
final PlannerSettings plannerSettings = PrelUtil.getPlannerSettings(call.getPlanner()); final RelTraitSet traitsChild; if (plannerSettings.getOptions().getOption(PlannerSettings.ENABLE_UNIONALL_ROUND_ROBIN)) { traitsChild = call.getPlanner().emptyTraitSet().plus(Prel.PHYSICAL).plus(DistributionTrait.ROUND_ROBIN); } else {
if (rightRowCount <= plannerSettings.getOptions().getOption(PlannerSettings.BROADCAST_MIN_THRESHOLD)) { logger.debug("Enable broadcast plan? true (rightRowCount %d smaller than minimum broadcast threshold)", rightRowCount); return true; final long maxWidthPerQuery = plannerSettings.getOptions().getOption(ExecConstants.MAX_WIDTH_GLOBAL); final long sliceTarget = plannerSettings.getSliceTarget(); final double minFactor = Doubles.min(leftRowCount * 1.0 / sliceTarget, numEndPoints * maxWidthPerNode, maxWidthPerQuery);
/** * Returns a rel root that defers materialization of scans via {@link com.dremio.exec.planner.logical.ConvertibleScan} * * Used for serialization. */ public RelRootPlus toConvertibleRelRoot(final SqlNode validatedNode, boolean expand) { final OptionManager o = settings.getOptions(); final boolean useLegacyDecorrelator = o.getOption(PlannerSettings.USE_LEGACY_DECORRELATOR); final long inSubQueryThreshold = o.getOption(ExecConstants.FAST_OR_ENABLE) ? o.getOption(ExecConstants.FAST_OR_MAX_THRESHOLD) : settings.getOptions().getOption(ExecConstants.PLANNER_IN_SUBQUERY_THRESHOLD); final SqlToRelConverter.Config config = SqlToRelConverter.configBuilder() .withInSubQueryThreshold((int) inSubQueryThreshold) .withTrimUnusedFields(true) .withConvertTableAccess(false) .withExpand(expand) .build(); final ReflectionAllowedMonitoringConvertletTable convertletTable = new ReflectionAllowedMonitoringConvertletTable(new ConvertletTable(functionContext.getContextInformation())); final SqlToRelConverter sqlToRelConverter = new DremioSqlToRelConverter(this, validator, convertletTable, config); // Previously we had "top" = !innerQuery, but calcite only adds project if it is not a top query. final RelRoot rel = sqlToRelConverter.convertQuery(validatedNode, false /* needs validate */, false /* top */); final RelNode rel2 = sqlToRelConverter.flattenTypes(rel.rel, true); final RelNode rel3; rel3 = expand ? rel2 : rel2.accept(new RelsWithRexSubQueryFlattener(sqlToRelConverter)); final RelNode rel4 = RelDecorrelator.decorrelateQuery(rel3, useLegacyDecorrelator); if (logger.isDebugEnabled()) { logger.debug("ConvertQuery with expand = {}:\n{}", expand, RelOptUtil.toString(rel4, SqlExplainLevel.ALL_ATTRIBUTES)); } return RelRootPlus.of(rel4, rel.kind, convertletTable.isReflectionDisallowed()); }
@Override public void onMatch(RelOptRuleCall call) { final SortRel sort = (SortRel) call.rel(0); final RelNode input = sort.getInput(); final PlannerSettings plannerSettings = PrelUtil.getPlannerSettings(call.getPlanner()); final RelTraitSet inputTraits; if (plannerSettings.getOptions().getOption(PlannerSettings.ENABLE_SORT_ROUND_ROBIN)) { // Keep the collation in logical sort just make its input round robin round robin. inputTraits = sort.getTraitSet().plus(Prel.PHYSICAL).plus(DistributionTrait.ROUND_ROBIN); } else { // Keep the collation in logical sort. Convert input into a RelNode with 1) this collation, 2) Physical, 3) hash distributed on DistributionTrait hashDistribution = new DistributionTrait(DistributionTrait.DistributionType.HASH_DISTRIBUTED, ImmutableList.copyOf(getDistributionField(sort))); inputTraits = sort.getTraitSet().plus(Prel.PHYSICAL).plus(hashDistribution); } final RelNode convertedInput = convert(input, inputTraits); if(isSingleMode(call)){ call.transformTo(convertedInput); }else{ RelNode exch = new SingleMergeExchangePrel(sort.getCluster(), sort.getTraitSet().plus(Prel.PHYSICAL).plus(DistributionTrait.SINGLETON), convertedInput, sort.getCollation()); call.transformTo(exch); // transform logical "sort" into "SingleMergeExchange". } }
final StrippingFactory factory = new StrippingFactory(parent.getSettings().getOptions(), parent.getConfig());
break; case DECIMAL: if (context.getPlannerSettings().getOptions(). getOption(PlannerSettings.ENABLE_DECIMAL_DATA_TYPE_KEY).getBoolVal() == false ) { throw UserException