/** * utility function to extract timezone id from Druid query * @param query Druid Rel * @return time zone */ private static TimeZone timezoneId(final DruidQuery query, final RexNode arg) { return arg.getType().getSqlTypeName() == SqlTypeName.TIMESTAMP_WITH_LOCAL_TIME_ZONE ? TimeZone.getTimeZone( query.getTopNode().getCluster().getPlanner().getContext().unwrap(CalciteConnectionConfig.class).timeZone()) : TimeZone.getTimeZone("UTC"); }
@Override public boolean matches(RelOptRuleCall call) { final Filter filter = call.rel(0); final RelNode filterChild = call.rel(1); // If the filter is already on top of a TableScan, // we can bail out if (filterChild instanceof TableScan) { return false; } HiveRulesRegistry registry = call.getPlanner().getContext().unwrap(HiveRulesRegistry.class); // If this operator has been visited already by the rule, // we do not need to apply the optimization if (registry != null && registry.getVisited(this).contains(filter)) { return false; } return true; }
@Override public final HiveJoin copy(RelTraitSet traitSet, RexNode conditionExpr, RelNode left, RelNode right, JoinRelType joinType, boolean semiJoinDone) { try { Set<String> variablesStopped = Collections.emptySet(); HiveJoin join = new HiveJoin(getCluster(), traitSet, left, right, conditionExpr, joinType, variablesStopped, joinAlgorithm); // If available, copy state to registry for optimization rules HiveRulesRegistry registry = join.getCluster().getPlanner().getContext().unwrap(HiveRulesRegistry.class); if (registry != null) { registry.copyPushedPredicates(this, join); } return join; } catch (InvalidRelException | CalciteSemanticException e) { // Semantic error not possible. Must be a bug. Convert to // internal error. throw new AssertionError(e); } }
@Override public final HiveJoin copy(RelTraitSet traitSet, RexNode conditionExpr, RelNode left, RelNode right, JoinRelType joinType, boolean semiJoinDone) { try { Set<String> variablesStopped = Collections.emptySet(); HiveJoin join = new HiveJoin(getCluster(), traitSet, left, right, conditionExpr, joinType, variablesStopped, joinAlgorithm); // If available, copy state to registry for optimization rules HiveRulesRegistry registry = join.getCluster().getPlanner().getContext().unwrap(HiveRulesRegistry.class); if (registry != null) { registry.copyPushedPredicates(this, join); } return join; } catch (InvalidRelException | CalciteSemanticException e) { // Semantic error not possible. Must be a bug. Convert to // internal error. throw new AssertionError(e); } }
@Override public boolean matches(RelOptRuleCall call) { final Filter filter = call.rel(0); final RelNode filterChild = call.rel(1); // If the filter is already on top of a TableScan, // we can bail out if (filterChild instanceof TableScan) { return false; } HiveRulesRegistry registry = call.getPlanner().getContext().unwrap(HiveRulesRegistry.class); // If this operator has been visited already by the rule, // we do not need to apply the optimization if (registry != null && registry.getVisited(this).contains(filter)) { return false; } return true; }
@Override public boolean isExecutable(HiveJoin join) { final Double maxMemory = join.getCluster().getPlanner().getContext(). unwrap(HiveAlgorithmsConf.class).getMaxMemory(); // Check streaming side RelNode smallInput = join.getStreamingInput(); if (smallInput == null) { return false; } return HiveAlgorithmsUtil.isFittingIntoMemory(maxMemory, smallInput, 1); }
public static Integer getSplitCountWithRepartition(HiveJoin join) { final Double maxSplitSize = join.getCluster().getPlanner().getContext(). unwrap(HiveAlgorithmsConf.class).getMaxSplitSize(); // We repartition: new number of splits RelMetadataQuery mq = RelMetadataQuery.instance(); final Double averageRowSize = mq.getAverageRowSize(join); final Double rowCount = mq.getRowCount(join); if (averageRowSize == null || rowCount == null) { return null; } final Double totalSize = averageRowSize * rowCount; final Double splitCount = totalSize / maxSplitSize; return splitCount.intValue(); }
public static Integer getSplitCountWithRepartition(HiveJoin join) { final Double maxSplitSize = join.getCluster().getPlanner().getContext(). unwrap(HiveAlgorithmsConf.class).getMaxSplitSize(); // We repartition: new number of splits final RelMetadataQuery mq = join.getCluster().getMetadataQuery(); final Double averageRowSize = mq.getAverageRowSize(join); final Double rowCount = mq.getRowCount(join); if (averageRowSize == null || rowCount == null) { return null; } final Double totalSize = averageRowSize * rowCount; final Double splitCount = totalSize / maxSplitSize; return splitCount.intValue(); }
@Override public SemiJoin copy(RelTraitSet traitSet, RexNode condition, RelNode left, RelNode right, JoinRelType joinType, boolean semiJoinDone) { try { final JoinInfo joinInfo = JoinInfo.of(left, right, condition); HiveSemiJoin semijoin = new HiveSemiJoin(getCluster(), traitSet, left, right, condition, joinInfo.leftKeys, joinInfo.rightKeys); // If available, copy state to registry for optimization rules HiveRulesRegistry registry = semijoin.getCluster().getPlanner().getContext().unwrap(HiveRulesRegistry.class); if (registry != null) { registry.copyPushedPredicates(this, semijoin); } return semijoin; } catch (InvalidRelException | CalciteSemanticException e) { // Semantic error not possible. Must be a bug. Convert to // internal error. throw new AssertionError(e); } }
@Override public boolean isExecutable(HiveJoin join) { final Double maxMemory = join.getCluster().getPlanner().getContext(). unwrap(HiveAlgorithmsConf.class).getMaxMemory(); // Check streaming side RelNode smallInput = join.getStreamingInput(); if (smallInput == null) { return false; } return HiveAlgorithmsUtil.isFittingIntoMemory(maxMemory, smallInput, 1); }
@Override public SemiJoin copy(RelTraitSet traitSet, RexNode condition, RelNode left, RelNode right, JoinRelType joinType, boolean semiJoinDone) { try { final JoinInfo joinInfo = JoinInfo.of(left, right, condition); HiveSemiJoin semijoin = new HiveSemiJoin(getCluster(), traitSet, left, right, condition, joinInfo.leftKeys, joinInfo.rightKeys); // If available, copy state to registry for optimization rules HiveRulesRegistry registry = semijoin.getCluster().getPlanner().getContext().unwrap(HiveRulesRegistry.class); if (registry != null) { registry.copyPushedPredicates(this, semijoin); } return semijoin; } catch (InvalidRelException | CalciteSemanticException e) { // Semantic error not possible. Must be a bug. Convert to // internal error. throw new AssertionError(e); } }
@Override public boolean matches(RelOptRuleCall call) { final RelNode node = call.rel(0); numberMatches++; HiveRulesRegistry registry = call.getPlanner(). getContext().unwrap(HiveRulesRegistry.class); // If this operator has been visited already by the rule, // we do not need to apply the optimization if (registry != null && registry.getVisited(this).contains(node)) { return false; } return true; }
/** Decorrelates a query. * * <p>This is the main entry point to {@code RelDecorrelator}. * * @param rootRel Root node of the query * * @return Equivalent query with all * {@link org.apache.calcite.rel.logical.LogicalCorrelate} instances removed */ public static RelNode decorrelateQuery(RelNode rootRel) { final CorelMap corelMap = new CorelMapBuilder().build(rootRel); if (!corelMap.hasCorrelation()) { return rootRel; } final RelOptCluster cluster = rootRel.getCluster(); final HiveRelDecorrelator decorrelator = new HiveRelDecorrelator(cluster, corelMap, cluster.getPlanner().getContext()); RelNode newRootRel = decorrelator.removeCorrelationViaRule(rootRel); if (!decorrelator.cm.mapCorToCorRel.isEmpty()) { newRootRel = decorrelator.decorrelate(newRootRel); } return newRootRel; }
/** Decorrelates a query. * * <p>This is the main entry point to {@code RelDecorrelator}. * * @param rootRel Root node of the query * * @return Equivalent query with all * {@link org.apache.calcite.rel.logical.LogicalCorrelate} instances removed */ public static RelNode decorrelateQuery(RelNode rootRel) { final CorelMap corelMap = new CorelMapBuilder().build(rootRel); if (!corelMap.hasCorrelation()) { return rootRel; } final RelOptCluster cluster = rootRel.getCluster(); final HiveRelDecorrelator decorrelator = new HiveRelDecorrelator(cluster, corelMap, cluster.getPlanner().getContext()); RelNode newRootRel = decorrelator.removeCorrelationViaRule(rootRel); if (!decorrelator.cm.mapCorToCorRel.isEmpty()) { newRootRel = decorrelator.decorrelate(newRootRel); } return newRootRel; }
@Override public boolean isExecutable(HiveJoin join) { final RelMetadataQuery mq = join.getCluster().getMetadataQuery(); final Double maxMemory = join.getCluster().getPlanner().getContext(). unwrap(HiveAlgorithmsConf.class).getMaxMemory();
basePlan.getCluster().getPlanner().getContext());
HiveRulesRegistry registry = call.getPlanner().getContext().unwrap(HiveRulesRegistry.class); assert registry != null; RexBuilder rB = join.getCluster().getRexBuilder();
@Override public void onMatch(RelOptRuleCall call) { final RelNode node = call.rel(0); numberOnMatch++; // If we have fired it already once, we return and the test will fail if (numberOnMatch > 1) { return; } // Register that we have visited this operator in this rule HiveRulesRegistry registry = call.getPlanner(). getContext().unwrap(HiveRulesRegistry.class); if (registry != null) { registry.registerVisited(this, node); } // We create a new op if it is the first time we fire the rule final RelNode newNode = new DummyNode(node.getCluster(), node.getTraitSet()); // We register it so we do not fire the rule on it again if (registry != null) { registry.registerVisited(this, newNode); } call.transformTo(newNode); } }
public void onMatch(RelOptRuleCall call) { final Filter filter = call.rel(0); //final RelBuilder builder = call.builder(); //TODO: replace HiveSubQRemoveRelBuilder with calcite's once calcite 1.11.0 is released final HiveSubQRemoveRelBuilder builder = new HiveSubQRemoveRelBuilder(null, call.rel(0).getCluster(), null); final RexSubQuery e = RexUtil.SubQueryFinder.find(filter.getCondition()); assert e != null; final RelOptUtil.Logic logic = LogicVisitor.find(RelOptUtil.Logic.TRUE, ImmutableList.of(filter.getCondition()), e); builder.push(filter.getInput()); final int fieldCount = builder.peek().getRowType().getFieldCount(); assert(filter instanceof HiveFilter); Set<RelNode> corrScalarQueries = filter.getCluster().getPlanner().getContext().unwrap(Set.class); boolean isCorrScalarQuery = corrScalarQueries.contains(e.rel); final RexNode target = apply(e, HiveFilter.getVariablesSet(e), logic, builder, 1, fieldCount, isCorrScalarQuery); final RexShuttle shuttle = new ReplaceSubQueryShuttle(e, target); builder.filter(shuttle.apply(filter.getCondition())); builder.project(fields(builder, filter.getRowType().getFieldCount())); call.transformTo(builder.build()); } };
HiveRulesRegistry registry = call.getPlanner().getContext().unwrap(HiveRulesRegistry.class); assert registry != null; RexBuilder rB = join.getCluster().getRexBuilder();