private static ExprNodeDesc convertToExprNode(RexNode rn, RelNode inputRel, String tabAlias, Set<Integer> vcolsInCalcite) { return rn.accept(new ExprNodeConverter(tabAlias, inputRel.getRowType(), vcolsInCalcite, inputRel.getCluster().getTypeFactory(), true)); }
/** Creates a call to a scalar operator. */ public RexNode call(SqlOperator operator, Iterable<? extends RexNode> operands) { return cluster.getRexBuilder().makeCall(operator, ImmutableList.copyOf(operands)); }
private RelNode copyNodeScan(RelNode scan) { final RelNode newScan; if (scan instanceof DruidQuery) { final DruidQuery dq = (DruidQuery) scan; // Ideally we should use HiveRelNode convention. However, since Volcano planner // throws in that case because DruidQuery does not implement the interface, // we set it as Bindable. Currently, we do not use convention in Hive, hence that // should be fine. // TODO: If we want to make use of convention (e.g., while directly generating operator // tree instead of AST), this should be changed. newScan = DruidQuery.create(optCluster, optCluster.traitSetOf(BindableConvention.INSTANCE), scan.getTable(), dq.getDruidTable(), ImmutableList.<RelNode>of(dq.getTableScan())); } else { newScan = new HiveTableScan(optCluster, optCluster.traitSetOf(HiveRelNode.CONVENTION), (RelOptHiveTable) scan.getTable(), ((RelOptHiveTable) scan.getTable()).getName(), null, false, false); } return newScan; } }
/** Creates an expression that casts an expression to a given type. */ public RexNode cast(RexNode expr, SqlTypeName typeName) { final RelDataType type = cluster.getTypeFactory().createSqlType(typeName); return cluster.getRexBuilder().makeCast(type, expr); }
private RelNode createFirstGB(RelNode input, boolean left, RelOptCluster cluster, RexBuilder rexBuilder) throws CalciteSemanticException { final List<RexNode> gbChildProjLst = Lists.newArrayList(); final List<Integer> groupSetPositions = Lists.newArrayList(); for (int cInd = 0; cInd < input.getRowType().getFieldList().size(); cInd++) { gbChildProjLst.add(rexBuilder.makeInputRef(input, cInd)); groupSetPositions.add(cInd); } if (left) { gbChildProjLst.add(rexBuilder.makeBigintLiteral(new BigDecimal(2))); } else { gbChildProjLst.add(rexBuilder.makeBigintLiteral(new BigDecimal(1))); } // also add the last VCol groupSetPositions.add(input.getRowType().getFieldList().size()); // create the project before GB RelNode gbInputRel = HiveProject.create(input, gbChildProjLst, null); // groupSetPosition includes all the positions final ImmutableBitSet groupSet = ImmutableBitSet.of(groupSetPositions); List<AggregateCall> aggregateCalls = Lists.newArrayList(); RelDataType aggFnRetType = TypeConverter.convert(TypeInfoFactory.longTypeInfo, cluster.getTypeFactory()); AggregateCall aggregateCall = HiveCalciteUtil.createSingleArgAggCall("count", cluster, TypeInfoFactory.longTypeInfo, input.getRowType().getFieldList().size(), aggFnRetType); aggregateCalls.add(aggregateCall); return new HiveAggregate(cluster, cluster.traitSetOf(HiveRelNode.CONVENTION), gbInputRel, groupSet, null, aggregateCalls); }
public RelNode transform(int ruleSetIndex, RelTraitSet requiredOutputTraits, RelNode rel) throws RelConversionException { ensure(State.STATE_5_CONVERTED); rel.getCluster().setMetadataProvider( new CachingRelMetadataProvider( rel.getCluster().getMetadataProvider(), rel.getCluster().getPlanner())); Program program = programs.get(ruleSetIndex); return program.run(planner, rel, requiredOutputTraits, ImmutableList.of(), ImmutableList.of()); }
private static RelNode createMaterializedViewScan(HiveConf conf, Table viewTable) { final RexBuilder rexBuilder = new RexBuilder( new JavaTypeFactoryImpl( new HiveTypeSystemImpl())); final RelOptCluster cluster = RelOptCluster.create(planner, rexBuilder); RelDataTypeFactory dtFactory = cluster.getRexBuilder().getTypeFactory(); for (RelDataTypeField field : rowType.getFieldList()) { if (DruidTable.DEFAULT_TIMESTAMP_COLUMN.equals(field.getName())) { RelOptHiveTable optTable = new RelOptHiveTable(null, cluster.getTypeFactory(), fullyQualifiedTabName, rowType, viewTable, nonPartitionColumns, partitionColumns, new ArrayList<>(), conf, new HashMap<>(), new HashMap<>(), new AtomicInteger()); dataSource, RelDataTypeImpl.proto(rowType), metrics, DruidTable.DEFAULT_TIMESTAMP_COLUMN, intervals, null, null); final TableScan scan = new HiveTableScan(cluster, cluster.traitSetOf(HiveRelNode.CONVENTION), optTable, viewTable.getTableName(), null, false, false); tableRel = DruidQuery.create(cluster, cluster.traitSetOf(BindableConvention.INSTANCE), optTable, druidTable, ImmutableList.<RelNode>of(scan), ImmutableMap.of()); } else { RelOptHiveTable optTable = new RelOptHiveTable(null, cluster.getTypeFactory(), fullyQualifiedTabName, rowType, viewTable, nonPartitionColumns, partitionColumns, new ArrayList<>(), conf, new HashMap<>(), new HashMap<>(), new AtomicInteger()); tableRel = new HiveTableScan(cluster, cluster.traitSetOf(HiveRelNode.CONVENTION), optTable, viewTable.getTableName(), null, false, false);
private PlannerResult planExplanation( final RelNode rel, final SqlExplain explain, final Set<String> datasourceNames ) { final String explanation = RelOptUtil.dumpPlan("", rel, explain.getFormat(), explain.getDetailLevel()); final Supplier<Sequence<Object[]>> resultsSupplier = Suppliers.ofInstance( Sequences.simple(ImmutableList.of(new Object[]{explanation}))); final RelDataTypeFactory typeFactory = rel.getCluster().getTypeFactory(); return new PlannerResult( resultsSupplier, typeFactory.createStructType( ImmutableList.of(Calcites.createSqlType(typeFactory, SqlTypeName.VARCHAR)), ImmutableList.of("PLAN") ), datasourceNames ); } }
private static void replaceEmptyGroupAggr(final RelNode rel, RelNode parent) { // If this function is called, the parent should only include constant List<RexNode> exps = parent.getChildExps(); for (RexNode rexNode : exps) { if (!rexNode.accept(new HiveCalciteUtil.ConstantFinder())) { throw new RuntimeException("We expect " + parent.toString() + " to contain only constants. However, " + rexNode.toString() + " is " + rexNode.getKind()); } } HiveAggregate oldAggRel = (HiveAggregate) rel; RelDataTypeFactory typeFactory = oldAggRel.getCluster().getTypeFactory(); RelDataType longType = TypeConverter.convert(TypeInfoFactory.longTypeInfo, typeFactory); RelDataType intType = TypeConverter.convert(TypeInfoFactory.intTypeInfo, typeFactory); // Create the dummy aggregation. SqlAggFunction countFn = SqlFunctionConverter.getCalciteAggFn("count", false, ImmutableList.of(intType), longType); // TODO: Using 0 might be wrong; might need to walk down to find the // proper index of a dummy. List<Integer> argList = ImmutableList.of(0); AggregateCall dummyCall = new AggregateCall(countFn, false, argList, longType, null); Aggregate newAggRel = oldAggRel.copy(oldAggRel.getTraitSet(), oldAggRel.getInput(), oldAggRel.indicator, oldAggRel.getGroupSet(), oldAggRel.getGroupSets(), ImmutableList.of(dummyCall)); RelNode select = introduceDerivedTable(newAggRel); parent.replaceInput(0, select); } }
/** Creates a call to a scalar operator. */ public RexNode call(SqlOperator operator, RexNode... operands) { final RexBuilder builder = cluster.getRexBuilder(); final List<RexNode> operandList = ImmutableList.copyOf(operands); final RelDataType type = builder.deriveReturnType(operator, operandList); if (type == null) { throw new IllegalArgumentException("cannot derive type: " + operator + "; operands: " + Lists.transform(operandList, FN_TYPE)); } return builder.makeCall(type, operator, operandList); }
programBuilder.addMatchOrder(HepMatchOrder.TOP_DOWN); programBuilder = programBuilder.addRuleCollection( ImmutableList.<RelOptRule>of(DummyRule.INSTANCE)); RexBuilder rexBuilder = new RexBuilder(new JavaTypeFactoryImpl()); RelOptCluster cluster = RelOptCluster.create(planner, rexBuilder); RelMetadataProvider chainedProvider = ChainedRelMetadataProvider.of(list); final RelNode node = new DummyNode(cluster, cluster.traitSet()); node.getCluster().setMetadataProvider( new CachingRelMetadataProvider(chainedProvider, planner));
if (followPlanChanges) { programBuilder.addMatchOrder(order); programBuilder = programBuilder.addRuleCollection(ImmutableList.copyOf(rules)); } else { basePlan.getCluster().getPlanner().getContext()); planner.registerMetadataProviders(list); RelMetadataProvider chainedProvider = ChainedRelMetadataProvider.of(list); basePlan.getCluster().setMetadataProvider( new CachingRelMetadataProvider(chainedProvider, planner)); basePlan.getCluster().getPlanner().setExecutor(executorProvider); planner.setExecutor(executorProvider);
public static HiveTableFunctionScan createUDTFForSetOp(RelOptCluster cluster, RelNode input) throws SemanticException { RelTraitSet traitSet = TraitsUtil.getDefaultTraitSet(cluster); List<RexNode> originalInputRefs = Lists.transform(input.getRowType().getFieldList(), new Function<RelDataTypeField, RexNode>() { @Override public RexNode apply(RelDataTypeField input) { return new RexInputRef(input.getIndex(), input.getType()); } }); ImmutableList.Builder<RelDataType> argTypeBldr = ImmutableList.<RelDataType> builder(); for (int i = 0; i < originalInputRefs.size(); i++) { argTypeBldr.add(originalInputRefs.get(i).getType()); } RelDataType retType = input.getRowType(); String funcName = "replicate_rows"; FunctionInfo fi = FunctionRegistry.getFunctionInfo(funcName); SqlOperator calciteOp = SqlFunctionConverter.getCalciteOperator(funcName, fi.getGenericUDTF(), argTypeBldr.build(), retType); // Hive UDTF only has a single input List<RelNode> list = new ArrayList<>(); list.add(input); RexNode rexNode = cluster.getRexBuilder().makeCall(calciteOp, originalInputRefs); return HiveTableFunctionScan.create(cluster, traitSet, list, rexNode, null, retType, null); }
RexBuilder rexBuilder = filterRel.getCluster().getRexBuilder(); final RelBuilder relBuilder = call.builder(); List<RelDataTypeField> origFields = setOp.getRowType().getFieldList(); int[] adjustments = new int[origFields.size()]; final List<RelNode> newSetOpInputs = new ArrayList<>(); for (int index = 0; index < setOp.getInputs().size(); index++) { RelNode input = setOp.getInput(index); RexNode newCondition = condition.accept(new RelOptUtil.RexInputConverter(rexBuilder, origFields, input.getRowType().getFieldList(), adjustments)); if (setOp instanceof Union && setOp.all) { final RelMetadataQuery mq = call.getMetadataQuery(); final RelOptPredicateList predicates = mq.getPulledUpPredicates(input); if (predicates != null) { ImmutableList.Builder<RexNode> listBuilder = ImmutableList.builder(); listBuilder.addAll(predicates.pulledUpPredicates); listBuilder.add(newCondition); RexExecutor executor = Util.first(filterRel.getCluster().getPlanner().getExecutor(), RexUtil.EXECUTOR); final RexSimplify simplify = new RexSimplify(rexBuilder, true, executor); final RexNode x = simplify.simplifyAnds(listBuilder.build()); if (x.isAlwaysFalse()) {
private static RexCall adjustOBSchema(RexCall obyExpr, Project obChild, List<FieldSchema> resultSchema) { int a = -1; List<RexNode> operands = new ArrayList<>(); for (int k = 0; k < obyExpr.operands.size(); k++) { RexNode rn = obyExpr.operands.get(k); for (int j = 0; j < resultSchema.size(); j++) { if( obChild.getChildExps().get(j).toString().equals(rn.toString())) { a = j; break; } } if (a != -1) { operands.add(new RexInputRef(a, rn.getType())); } else { if (rn instanceof RexCall) { operands.add(adjustOBSchema((RexCall)rn, obChild, resultSchema)); } else { operands.add(rn); } } a = -1; } return (RexCall) obChild.getCluster().getRexBuilder().makeCall( obyExpr.getType(), obyExpr.getOperator(), operands); }
/** * TODO: 1) isSamplingPred 2) sampleDesc 3) isSortedFilter */ OpAttr visit(HiveFilter filterRel) throws SemanticException { OpAttr inputOpAf = dispatch(filterRel.getInput()); if (LOG.isDebugEnabled()) { LOG.debug("Translating operator rel#" + filterRel.getId() + ":" + filterRel.getRelTypeName() + " with row type: [" + filterRel.getRowType() + "]"); } ExprNodeDesc filCondExpr = filterRel.getCondition().accept( new ExprNodeConverter(inputOpAf.tabAlias, filterRel.getInput().getRowType(), inputOpAf.vcolsInCalcite, filterRel.getCluster().getTypeFactory(), true)); FilterDesc filDesc = new FilterDesc(filCondExpr, false); ArrayList<ColumnInfo> cinfoLst = createColInfos(inputOpAf.inputs.get(0)); FilterOperator filOp = (FilterOperator) OperatorFactory.getAndMakeChild(filDesc, new RowSchema(cinfoLst), inputOpAf.inputs.get(0)); if (LOG.isDebugEnabled()) { LOG.debug("Generated " + filOp + " with row schema: [" + filOp.getSchema() + "]"); } return inputOpAf.clone(filOp); }
private RelNode projectLeftOuterSide(RelNode srcRel, int numColumns) throws SemanticException { RowResolver iRR = relToHiveRR.get(srcRel); RowResolver oRR = new RowResolver(); RowResolver.add(oRR, iRR, numColumns); List<RexNode> calciteColLst = new ArrayList<RexNode>(); List<String> oFieldNames = new ArrayList<String>(); RelDataType iType = srcRel.getRowType(); for (int i = 0; i < iType.getFieldCount(); i++) { RelDataTypeField fType = iType.getFieldList().get(i); String fName = iType.getFieldNames().get(i); calciteColLst.add(cluster.getRexBuilder().makeInputRef(fType.getType(), i)); oFieldNames.add(fName); } HiveRelNode selRel = HiveProject.create(srcRel, calciteColLst, oFieldNames); this.relToHiveColNameCalcitePosMap.put(selRel, buildHiveToCalciteColumnMap(oRR, selRel)); this.relToHiveRR.put(selRel, oRR); return selRel; }
public String generateSql() { SqlDialect dialect = getJdbcDialect(); final HiveJdbcImplementor jdbcImplementor = new HiveJdbcImplementor(dialect, (JavaTypeFactory) getCluster().getTypeFactory()); Project topProject; if (getInput() instanceof Project) { topProject = (Project) getInput(); } else { // If it is not a project operator, we add it on top of the input // to force generating the column names instead of * while // translating to SQL RelNode nodeToTranslate = getInput(); RexBuilder builder = getCluster().getRexBuilder(); List<RexNode> projects = new ArrayList<>( nodeToTranslate.getRowType().getFieldList().size()); for (int i = 0; i < nodeToTranslate.getRowType().getFieldCount(); i++) { projects.add(builder.makeInputRef(nodeToTranslate, i)); } topProject = new JdbcProject(nodeToTranslate.getCluster(), nodeToTranslate.getTraitSet(), nodeToTranslate, projects, nodeToTranslate.getRowType()); } final HiveJdbcImplementor.Result result = jdbcImplementor.translate(topProject); return result.asStatement().toSqlString(dialect).getSql(); }
private Project swapInputs(Join join, Project topProject, RelBuilder builder) { RexBuilder rexBuilder = join.getCluster().getRexBuilder(); int rightInputSize = join.getRight().getRowType().getFieldCount(); int leftInputSize = join.getLeft().getRowType().getFieldCount(); List<RelDataTypeField> joinFields = join.getRowType().getFieldList(); RexNode newJoinCond = join.getCondition().accept(new RelOptUtil.RexInputConverter(rexBuilder, joinFields, joinFields, condAdjustments)); Join swappedJoin = (Join)builder.push(join.getRight()).push(join.getLeft()).join(join.getJoinType(), List<RelDataTypeField> swappedJoinFeilds = swappedJoin.getRowType().getFieldList(); for(RexNode project:topProject.getProjects()) { RexNode newProject = project.accept(new RelOptUtil.RexInputConverter(rexBuilder, swappedJoinFeilds, swappedJoinFeilds, condAdjustments)); newProjects.add(newProject);
private static RelNode createTableScan(Table viewTable) { final RexBuilder rexBuilder = new RexBuilder(new JavaTypeFactoryImpl()); final RelOptCluster cluster = RelOptCluster.create(planner, rexBuilder); List<RelDataType> druidColTypes = new ArrayList<>(); List<String> druidColNames = new ArrayList<>(); for (RelDataTypeField field : rowType.getFieldList()) { druidColTypes.add(field.getType()); druidColNames.add(field.getName()); if (field.getName().equals(DruidTable.DEFAULT_TIMESTAMP_COLUMN)) { if (field.getType().getSqlTypeName() == SqlTypeName.VARCHAR) { final TableScan scan = new HiveTableScan(cluster, cluster.traitSetOf(HiveRelNode.CONVENTION), optTable, viewTable.getTableName(), null, false, false); tableRel = DruidQuery.create(cluster, cluster.traitSetOf(HiveRelNode.CONVENTION), optTable, druidTable, ImmutableList.<RelNode>of(scan)); } else { tableRel = new HiveTableScan(cluster, cluster.traitSetOf(HiveRelNode.CONVENTION), optTable, viewTable.getTableName(), null, false, false);