@Override public int hashCode() { return rel.getTable().hashCode(); }
@Override public StringBuilder digest(StringBuilder buf) { return buf.append("Scan(table: ") .append(rel.getTable().getQualifiedName()).append(")"); } }
private static String deriveAlias(RelNode rel) { if (rel instanceof TableScan) { final List<String> names = rel.getTable().getQualifiedName(); if (!names.isEmpty()) { return Util.last(names); } } return null; }
private static String deriveAlias(RelNode rel) { if (rel instanceof TableScan) { final List<String> names = rel.getTable().getQualifiedName(); if (!names.isEmpty()) { return Util.last(names); } } return null; }
private RelNode copyNodeScan(RelNode scan) { final RelNode newScan; if (scan instanceof DruidQuery) { final DruidQuery dq = (DruidQuery) scan; // Ideally we should use HiveRelNode convention. However, since Volcano planner // throws in that case because DruidQuery does not implement the interface, // we set it as Bindable. Currently, we do not use convention in Hive, hence that // should be fine. // TODO: If we want to make use of convention (e.g., while directly generating operator // tree instead of AST), this should be changed. newScan = DruidQuery.create(optCluster, optCluster.traitSetOf(BindableConvention.INSTANCE), scan.getTable(), dq.getDruidTable(), ImmutableList.<RelNode>of(dq.getTableScan())); } else { newScan = new HiveTableScan(optCluster, optCluster.traitSetOf(HiveRelNode.CONVENTION), (RelOptHiveTable) scan.getTable(), ((RelOptHiveTable) scan.getTable()).getName(), null, false, false); } return newScan; } }
public static PartialDruidQuery create(final RelNode scanRel) { final Supplier<RelBuilder> builderSupplier = () -> RelFactories.LOGICAL_BUILDER.create( scanRel.getCluster(), scanRel.getTable().getRelOptSchema() ); return new PartialDruidQuery(builderSupplier, scanRel, null, null, null, null, null, null, null, null); }
@Override public RelOptMaterialization apply(RelOptMaterialization materialization) { final RelNode viewScan = materialization.tableRel; final RelNode newViewScan; if (viewScan instanceof DruidQuery) { final DruidQuery dq = (DruidQuery) viewScan; newViewScan = DruidQuery.create(optCluster, optCluster.traitSetOf(HiveRelNode.CONVENTION), viewScan.getTable(), dq.getDruidTable(), ImmutableList.<RelNode>of(dq.getTableScan())); } else { newViewScan = new HiveTableScan(optCluster, optCluster.traitSetOf(HiveRelNode.CONVENTION), (RelOptHiveTable) viewScan.getTable(), viewScan.getTable().getQualifiedName().get(0), null, false, false); } return new RelOptMaterialization(newViewScan, materialization.queryRel, null); } }
cq.put(vk, materialization); if (LOG.isDebugEnabled()) { LOG.debug("Cached materialized view for rewriting: " + tableRel.getTable().getQualifiedName());
@Override public void visit(RelNode node, int ordinal, RelNode parent) { if (node instanceof TableScan) { // We can continue // TODO: Need to check that this is the same MV that we are rebuilding RelOptHiveTable hiveTable = (RelOptHiveTable) node.getTable(); if (!hiveTable.getHiveTableMD().isMaterializedView()) { // If it is not a materialized view, we do not rewrite it throw new ReturnedValue(false); } if (containsAggregate && !AcidUtils.isFullAcidTable(hiveTable.getHiveTableMD())) { // If it contains an aggregate and it is not a full acid table, // we do not rewrite it (we need MERGE support) throw new ReturnedValue(false); } } else if (node instanceof Project) { // We can continue super.visit(node, ordinal, parent); } else { throw new ReturnedValue(false); } } }.go(union.getInput(1));
mq.getNodeTypes(rel); for (RelNode scan : nodeTypes.get(TableScan.class)) { if (((RelOptHiveTable) scan.getTable()).getHiveTableMD().isMaterializedView()) { usesMaterializedViews = true; break;
private SqlIOConfig resolveSourceConfig(RelNode relNode) { String sourceName = String.join(".", relNode.getTable().getQualifiedName()); SqlIOConfig sourceConfig = ioResolver.fetchSourceInfo(sourceName); if (sourceConfig == null) { throw new SamzaException("Unsupported source found in join statement: " + sourceName); } return sourceConfig; }
String getAlias() { if (lastAlias != null) { return lastAlias; } else { RelNode top = peek(); if (top instanceof TableScan) { return Util.last(top.getTable().getQualifiedName()); } else { return null; } } }
@Override public void visit(RelNode node, int ordinal, RelNode parent) { if (node instanceof TableScan) { final RelOptCluster cluster = node.getCluster(); final RelOptTable.ToRelContext context = ViewExpanders.simpleContext(cluster); final RelNode r = node.getTable().toRel(context); planner.registerClass(r); } super.visit(node, ordinal, parent); } };
@Override public void visit(RelNode node, int ordinal, RelNode parent) { if (node instanceof TableScan) { final RelOptCluster cluster = node.getCluster(); final RelOptTable.ToRelContext context = RelOptUtil.getContext(cluster); final RelNode r = node.getTable().toRel(context); plan.registerClass(r); } super.visit(node, ordinal, parent); } };
@Test public void testAllPredicates() { final Project rel = (Project) convertSql("select * from emp, dept"); final Join join = (Join) rel.getInput(); final RelOptTable empTable = join.getInput(0).getTable(); final RelOptTable deptTable = join.getInput(1).getTable(); Frameworks.withPlanner((cluster, relOptSchema, rootSchema) -> { checkAllPredicates(cluster, empTable, deptTable); return null; }); }
/** Unit test for * {@link org.apache.calcite.rel.metadata.RelMdPredicates#getPredicates(Join, RelMetadataQuery)}. */ @Test public void testPredicates() { final Project rel = (Project) convertSql("select * from emp, dept"); final Join join = (Join) rel.getInput(); final RelOptTable empTable = join.getInput(0).getTable(); final RelOptTable deptTable = join.getInput(1).getTable(); Frameworks.withPlanner((cluster, relOptSchema, rootSchema) -> { checkPredicates(cluster, empTable, deptTable); return null; }); }
@Test public void testAllPredicates() { final Project rel = (Project) convertSql("select * from emp, dept"); final Join join = (Join) rel.getInput(); final RelOptTable empTable = join.getInput(0).getTable(); final RelOptTable deptTable = join.getInput(1).getTable(); Frameworks.withPlanner((cluster, relOptSchema, rootSchema) -> { checkAllPredicates(cluster, empTable, deptTable); return null; }); }