public static ExprNodeDesc getExprNode(Integer inputRefIndx, RelNode inputRel, ExprNodeConverter exprConv) { ExprNodeDesc exprNode = null; RexNode rexInputRef = new RexInputRef(inputRefIndx, inputRel.getRowType() .getFieldList().get(inputRefIndx).getType()); exprNode = rexInputRef.accept(exprConv); return exprNode; }
private static ExprNodeDesc convertToExprNode(RexNode rn, RelNode inputRel, String tabAlias, Set<Integer> vcolsInCalcite) { return rn.accept(new ExprNodeConverter(tabAlias, inputRel.getRowType(), vcolsInCalcite, inputRel.getCluster().getTypeFactory(), true)); }
public RelNode align(RelNode rel, List<RelFieldCollation> collations) { ImmutableList.Builder<RelNode> newInputs = new ImmutableList.Builder<>(); for (RelNode input : rel.getInputs()) { newInputs.add(dispatchAlign(input, ImmutableList.<RelFieldCollation>of())); } return rel.copy(rel.getTraitSet(), newInputs.build()); }
public void fixSharedOlapTableScanAt(RelNode parent, int ordinalInParent) { OLAPTableScan copy = copyTableScanIfNeeded(parent.getInputs().get(ordinalInParent)); if (copy != null) parent.replaceInput(ordinalInParent, copy); }
private static void replaceEmptyGroupAggr(final RelNode rel, RelNode parent) { // If this function is called, the parent should only include constant List<RexNode> exps = parent.getChildExps(); for (RexNode rexNode : exps) { if (!rexNode.accept(new HiveCalciteUtil.ConstantFinder())) { throw new RuntimeException("We expect " + parent.toString() + " to contain only constants. However, " + rexNode.toString() + " is " + rexNode.getKind()); } } HiveAggregate oldAggRel = (HiveAggregate) rel; RelDataTypeFactory typeFactory = oldAggRel.getCluster().getTypeFactory(); RelDataType longType = TypeConverter.convert(TypeInfoFactory.longTypeInfo, typeFactory); RelDataType intType = TypeConverter.convert(TypeInfoFactory.intTypeInfo, typeFactory); // Create the dummy aggregation. SqlAggFunction countFn = SqlFunctionConverter.getCalciteAggFn("count", false, ImmutableList.of(intType), longType); // TODO: Using 0 might be wrong; might need to walk down to find the // proper index of a dummy. List<Integer> argList = ImmutableList.of(0); AggregateCall dummyCall = new AggregateCall(countFn, false, argList, longType, null); Aggregate newAggRel = oldAggRel.copy(oldAggRel.getTraitSet(), oldAggRel.getInput(), oldAggRel.indicator, oldAggRel.getGroupSet(), oldAggRel.getGroupSets(), ImmutableList.of(dummyCall)); RelNode select = introduceDerivedTable(newAggRel); parent.replaceInput(0, select); } }
/** * Creates a LogicalAggregate that removes all duplicates from the result of * an underlying relational expression. * * @param rel underlying rel * @return rel implementing SingleValueAgg */ public static RelNode createSingleValueAggRel( RelOptCluster cluster, RelNode rel, RelFactories.AggregateFactory aggregateFactory) { // assert (rel.getRowType().getFieldCount() == 1); final int aggCallCnt = rel.getRowType().getFieldCount(); final List<AggregateCall> aggCalls = new ArrayList<>(); for (int i = 0; i < aggCallCnt; i++) { aggCalls.add( AggregateCall.create( SqlStdOperatorTable.SINGLE_VALUE, false, false, ImmutableList.of(i), -1, 0, rel, null, null)); } return aggregateFactory.createAggregate(rel, false, ImmutableBitSet.of(), null, aggCalls); }
private Frame(RelNode rel) { this(rel, ImmutableList.of(Pair.of(deriveAlias(rel), rel.getRowType()))); }
private RelNode copyNodeScan(RelNode scan) { final RelNode newScan; if (scan instanceof DruidQuery) { final DruidQuery dq = (DruidQuery) scan; // Ideally we should use HiveRelNode convention. However, since Volcano planner // throws in that case because DruidQuery does not implement the interface, // we set it as Bindable. Currently, we do not use convention in Hive, hence that // should be fine. // TODO: If we want to make use of convention (e.g., while directly generating operator // tree instead of AST), this should be changed. newScan = DruidQuery.create(optCluster, optCluster.traitSetOf(BindableConvention.INSTANCE), scan.getTable(), dq.getDruidTable(), ImmutableList.<RelNode>of(dq.getTableScan())); } else { newScan = new HiveTableScan(optCluster, optCluster.traitSetOf(HiveRelNode.CONVENTION), (RelOptHiveTable) scan.getTable(), ((RelOptHiveTable) scan.getTable()).getName(), null, false, false); } return newScan; } }
/** Creates a group key with grouping sets, both identified by field positions * in the underlying relational expression. * * <p>This method of creating a group key does not allow you to group on new * expressions, only column projections, but is efficient, especially when you * are coming from an existing {@link Aggregate}. */ public GroupKey groupKey(ImmutableBitSet groupSet, boolean indicator, ImmutableList<ImmutableBitSet> groupSets) { if (groupSet.length() > peek().getRowType().getFieldCount()) { throw new IllegalArgumentException("out of bounds: " + groupSet); } if (groupSets == null) { groupSets = ImmutableList.of(groupSet); } final ImmutableList<RexNode> nodes = fields(ImmutableIntList.of(groupSet.toArray())); final List<ImmutableList<RexNode>> nodeLists = Lists.transform(groupSets, new Function<ImmutableBitSet, ImmutableList<RexNode>>() { public ImmutableList<RexNode> apply(ImmutableBitSet input) { return fields(ImmutableIntList.of(input.toArray())); } }); return groupKey(nodes, indicator, nodeLists); }
private PlannerResult planExplanation( final RelNode rel, final SqlExplain explain, final Set<String> datasourceNames ) { final String explanation = RelOptUtil.dumpPlan("", rel, explain.getFormat(), explain.getDetailLevel()); final Supplier<Sequence<Object[]>> resultsSupplier = Suppliers.ofInstance( Sequences.simple(ImmutableList.of(new Object[]{explanation}))); final RelDataTypeFactory typeFactory = rel.getCluster().getTypeFactory(); return new PlannerResult( resultsSupplier, typeFactory.createStructType( ImmutableList.of(Calcites.createSqlType(typeFactory, SqlTypeName.VARCHAR)), ImmutableList.of("PLAN") ), datasourceNames ); } }
LOG.debug("Matched HiveSemiJoinRule"); final RelOptCluster cluster = join.getCluster(); final RexBuilder rexBuilder = cluster.getRexBuilder(); final ImmutableBitSet rightBits = ImmutableBitSet.range(left.getRowType().getFieldCount(), join.getRowType().getFieldCount()); if (topRefs.intersects(rightBits)) { return; if (!joinInfo.rightSet().equals( ImmutableBitSet.range(aggregate.getGroupCount()))) { call.transformTo(topOperator.copy(topOperator.getTraitSet(), ImmutableList.of(left))); return; Join rightJoin = (Join)(((HepRelVertex)aggregate.getInput()).getCurrentRel()); List<RexNode> projects = new ArrayList<>(); for(int i=0; i<rightJoin.getRowType().getFieldCount(); i++){ projects.add(rexBuilder.makeInputRef(rightJoin, i)); RelNode topProject = call.builder().push(rightJoin).project(projects, rightJoin.getRowType().getFieldNames(), semi = call.builder().push(left).push(aggregate.getInput()).semiJoin(newCondition).build(); call.transformTo(topOperator.copy(topOperator.getTraitSet(), ImmutableList.of(semi)));
public void onMatch(RelOptRuleCall call) { final HiveFilter filter = call.rel(0); final HiveSortLimit sort = call.rel(1); final RelNode newFilter = filter.copy(sort.getInput().getTraitSet(), ImmutableList.<RelNode>of(sort.getInput())); final HiveSortLimit newSort = sort.copy(sort.getTraitSet(), newFilter, sort.collation, sort.offset, sort.fetch); call.transformTo(newSort); }
if(subQueryDesc.getRexSubQuery().getRowType().getFieldCount() > 1) { throw new CalciteSubquerySemanticException(ErrorMsg.INVALID_SUBQUERY_EXPRESSION.getMsg( "SubQuery can contain only 1 item in Select List.")); ImmutableList.<RexNode>of(rexNodeLhs) ); return rexSubQuery; if(subQueryDesc.getRexSubQuery().getRowType().getFieldCount() > 1) { throw new CalciteSubquerySemanticException(ErrorMsg.INVALID_SUBQUERY_EXPRESSION.getMsg( "SubQuery can contain only 1 item in Select List."));
final int count = sort.getInput().getRowType().getFieldCount(); if (count == 1) { List<RelDataTypeField> fields = sort.getInput().getRowType().getFieldList(); List<Pair<RexNode, String>> newChildExprs = new ArrayList<>(); List<RexNode> topChildExprs = new ArrayList<>(); List<String> topChildExprsFields = new ArrayList<>(); for (int i = 0; i < count ; i++) { RexNode expr = rexBuilder.makeInputRef(sort.getInput(), i); RelDataTypeField field = fields.get(i); if (constants.containsKey(expr)) { topChildExprs.add(constants.get(expr)); topChildExprsFields.add(field.getName()); } else { newChildExprs.add(Pair.<RexNode,String>of(expr, field.getName())); topChildExprs.add(expr); topChildExprsFields.add(field.getName()); RelOptUtil.permutation(Pair.left(newChildExprs), sort.getInput().getRowType()).inverse(); List<RelFieldCollation> fieldCollations = new ArrayList<>(); for (RelFieldCollation fc : sort.getCollation().getFieldCollations()) { for (RelNode child : parent.getInputs()) { if (!((HepRelVertex) child).getCurrentRel().equals(sort)) { inputs.add(child); call.transformTo(parent.copy(parent.getTraitSet(), inputs));
public static HiveTableFunctionScan createUDTFForSetOp(RelOptCluster cluster, RelNode input) throws SemanticException { RelTraitSet traitSet = TraitsUtil.getDefaultTraitSet(cluster); List<RexNode> originalInputRefs = Lists.transform(input.getRowType().getFieldList(), new Function<RelDataTypeField, RexNode>() { @Override public RexNode apply(RelDataTypeField input) { return new RexInputRef(input.getIndex(), input.getType()); } }); ImmutableList.Builder<RelDataType> argTypeBldr = ImmutableList.<RelDataType> builder(); for (int i = 0; i < originalInputRefs.size(); i++) { argTypeBldr.add(originalInputRefs.get(i).getType()); } RelDataType retType = input.getRowType(); String funcName = "replicate_rows"; FunctionInfo fi = FunctionRegistry.getFunctionInfo(funcName); SqlOperator calciteOp = SqlFunctionConverter.getCalciteOperator(funcName, fi.getGenericUDTF(), argTypeBldr.build(), retType); // Hive UDTF only has a single input List<RelNode> list = new ArrayList<>(); list.add(input); RexNode rexNode = cluster.getRexBuilder().makeCall(calciteOp, originalInputRefs); return HiveTableFunctionScan.create(cluster, traitSet, list, rexNode, null, retType, null); }
/** Returns references to the fields of a given input. */ public ImmutableList<RexNode> fields(int inputCount, int inputOrdinal) { final RelNode input = peek(inputCount, inputOrdinal); final RelDataType rowType = input.getRowType(); final ImmutableList.Builder<RexNode> nodes = ImmutableList.builder(); for (int fieldOrdinal : Util.range(rowType.getFieldCount())) { nodes.add(field(inputCount, inputOrdinal, fieldOrdinal)); } return nodes.build(); }
/** * TODO: 1) isSamplingPred 2) sampleDesc 3) isSortedFilter */ OpAttr visit(HiveFilter filterRel) throws SemanticException { OpAttr inputOpAf = dispatch(filterRel.getInput()); if (LOG.isDebugEnabled()) { LOG.debug("Translating operator rel#" + filterRel.getId() + ":" + filterRel.getRelTypeName() + " with row type: [" + filterRel.getRowType() + "]"); } ExprNodeDesc filCondExpr = filterRel.getCondition().accept( new ExprNodeConverter(inputOpAf.tabAlias, filterRel.getInput().getRowType(), inputOpAf.vcolsInCalcite, filterRel.getCluster().getTypeFactory(), true)); FilterDesc filDesc = new FilterDesc(filCondExpr, false); ArrayList<ColumnInfo> cinfoLst = createColInfos(inputOpAf.inputs.get(0)); FilterOperator filOp = (FilterOperator) OperatorFactory.getAndMakeChild(filDesc, new RowSchema(cinfoLst), inputOpAf.inputs.get(0)); if (LOG.isDebugEnabled()) { LOG.debug("Generated " + filOp + " with row schema: [" + filOp.getSchema() + "]"); } return inputOpAf.clone(filOp); }
private RelNode projectLeftOuterSide(RelNode srcRel, int numColumns) throws SemanticException { RowResolver iRR = relToHiveRR.get(srcRel); RowResolver oRR = new RowResolver(); RowResolver.add(oRR, iRR, numColumns); List<RexNode> calciteColLst = new ArrayList<RexNode>(); List<String> oFieldNames = new ArrayList<String>(); RelDataType iType = srcRel.getRowType(); for (int i = 0; i < iType.getFieldCount(); i++) { RelDataTypeField fType = iType.getFieldList().get(i); String fName = iType.getFieldNames().get(i); calciteColLst.add(cluster.getRexBuilder().makeInputRef(fType.getType(), i)); oFieldNames.add(fName); } HiveRelNode selRel = HiveProject.create(srcRel, calciteColLst, oFieldNames); this.relToHiveColNameCalcitePosMap.put(selRel, buildHiveToCalciteColumnMap(oRR, selRel)); this.relToHiveRR.put(selRel, oRR); return selRel; }
public String generateSql() { SqlDialect dialect = getJdbcDialect(); final HiveJdbcImplementor jdbcImplementor = new HiveJdbcImplementor(dialect, (JavaTypeFactory) getCluster().getTypeFactory()); Project topProject; if (getInput() instanceof Project) { topProject = (Project) getInput(); } else { // If it is not a project operator, we add it on top of the input // to force generating the column names instead of * while // translating to SQL RelNode nodeToTranslate = getInput(); RexBuilder builder = getCluster().getRexBuilder(); List<RexNode> projects = new ArrayList<>( nodeToTranslate.getRowType().getFieldList().size()); for (int i = 0; i < nodeToTranslate.getRowType().getFieldCount(); i++) { projects.add(builder.makeInputRef(nodeToTranslate, i)); } topProject = new JdbcProject(nodeToTranslate.getCluster(), nodeToTranslate.getTraitSet(), nodeToTranslate, projects, nodeToTranslate.getRowType()); } final HiveJdbcImplementor.Result result = jdbcImplementor.translate(topProject); return result.asStatement().toSqlString(dialect).getSql(); }
private RelNode createFirstGB(RelNode input, boolean left, RelOptCluster cluster, RexBuilder rexBuilder) throws CalciteSemanticException { final List<RexNode> gbChildProjLst = Lists.newArrayList(); final List<Integer> groupSetPositions = Lists.newArrayList(); for (int cInd = 0; cInd < input.getRowType().getFieldList().size(); cInd++) { gbChildProjLst.add(rexBuilder.makeInputRef(input, cInd)); groupSetPositions.add(cInd); } if (left) { gbChildProjLst.add(rexBuilder.makeBigintLiteral(new BigDecimal(2))); } else { gbChildProjLst.add(rexBuilder.makeBigintLiteral(new BigDecimal(1))); } // also add the last VCol groupSetPositions.add(input.getRowType().getFieldList().size()); // create the project before GB RelNode gbInputRel = HiveProject.create(input, gbChildProjLst, null); // groupSetPosition includes all the positions final ImmutableBitSet groupSet = ImmutableBitSet.of(groupSetPositions); List<AggregateCall> aggregateCalls = Lists.newArrayList(); RelDataType aggFnRetType = TypeConverter.convert(TypeInfoFactory.longTypeInfo, cluster.getTypeFactory()); AggregateCall aggregateCall = HiveCalciteUtil.createSingleArgAggCall("count", cluster, TypeInfoFactory.longTypeInfo, input.getRowType().getFieldList().size(), aggFnRetType); aggregateCalls.add(aggregateCall); return new HiveAggregate(cluster, cluster.traitSetOf(HiveRelNode.CONVENTION), gbInputRel, groupSet, null, aggregateCalls); }