@Override public StringBuilder digest(StringBuilder buf) { return buf.append("Scan(table: ") .append(rel.getTable().getQualifiedName()).append(")"); } }
@Override public void visit(RelNode node, int ordinal, RelNode parent) { if (node instanceof DruidRel) { datasourceNames.addAll(((DruidRel) node).getDataSourceNames()); } if (node instanceof Bindables.BindableTableScan) { Bindables.BindableTableScan bts = (Bindables.BindableTableScan) node; RelOptTable table = bts.getTable(); String tableName = table.getQualifiedName().get(0); datasourceNames.add(tableName); } node.childrenAccept(this); } }
private static String deriveAlias(RelNode rel) { if (rel instanceof TableScan) { final List<String> names = rel.getTable().getQualifiedName(); if (!names.isEmpty()) { return Util.last(names); } } return null; }
private static String deriveAlias(RelNode rel) { if (rel instanceof TableScan) { final List<String> names = rel.getTable().getQualifiedName(); if (!names.isEmpty()) { return Util.last(names); } } return null; }
@Override public void streamsPlan(StreamsPlanCreator planCreator) throws Exception { String sourceName = Joiner.on('.').join(getTable().getQualifiedName()); Map<String, ISqlStreamsDataSource> sources = planCreator.getSources(); if (!sources.containsKey(sourceName)) { throw new RuntimeException("Cannot find table " + sourceName); } List<String> fieldNames = getRowType().getFieldNames(); final Stream<Values> finalStream = planCreator.getStreamBuilder() .newStream(sources.get(sourceName).getProducer(), new StreamsScanTupleValueMapper(fieldNames), parallelismHint); planCreator.addStream(finalStream); } }
public Integer splitCount(HiveTableScan scan, RelMetadataQuery mq) { Integer splitCount; RelOptHiveTable table = (RelOptHiveTable) scan.getTable(); List<String> bucketCols = table.getHiveTableMD().getBucketCols(); if (bucketCols != null && !bucketCols.isEmpty()) { splitCount = table.getHiveTableMD().getNumBuckets(); } else { splitCount = splitCountRepartition(scan, mq); if (splitCount == null) { throw new RuntimeException("Could not get split count for table: " + scan.getTable().getQualifiedName()); } } return splitCount; }
new HashMap<String, RelOptMaterialization>(); for (RelOptMaterialization materialization : cachedViews) { qualifiedNameToView.put(materialization.table.getQualifiedName().get(0), materialization);
@Override public void streamsPlan(StreamsPlanCreator planCreator) throws Exception { // SingleRel RelNode input = getInput(); StormRelUtils.getStormRelInput(input).streamsPlan(planCreator); Stream<Values> inputStream = planCreator.pop(); Preconditions.checkArgument(isInsert(), "Only INSERT statement is supported."); // Calcite ensures that the value is structurized to the table definition // hence we can use PK index directly // To elaborate, if table BAR is defined as ID INTEGER PK, NAME VARCHAR, DEPTID INTEGER // and query like INSERT INTO BAR SELECT NAME, ID FROM FOO is executed, // Calcite makes the projection ($1 <- ID, $0 <- NAME, null) to the value before INSERT. String tableName = Joiner.on('.').join(getTable().getQualifiedName()); IRichBolt consumer = planCreator.getSources().get(tableName).getConsumer(); // To make logic simple, it assumes that all the tables have one PK (which it should be extended to support composed key), // and provides PairStream(KeyedStream) to consumer bolt. inputStream.mapToPair(new StreamInsertMapToPairFunction(primaryKeyIndex)).to(consumer); planCreator.addStream(inputStream); }
public Integer splitCount(HiveTableScan scan, RelMetadataQuery mq) { Integer splitCount; RelOptHiveTable table = (RelOptHiveTable) scan.getTable(); List<String> bucketCols = table.getHiveTableMD().getBucketCols(); if (bucketCols != null && !bucketCols.isEmpty()) { splitCount = table.getHiveTableMD().getNumBuckets(); } else { splitCount = splitCountRepartition(scan, mq); if (splitCount == null) { throw new RuntimeException("Could not get split count for table: " + scan.getTable().getQualifiedName()); } } return splitCount; }
private void fetchColStats(RelNode key, TableScan tableAccessRel, ImmutableBitSet fieldsUsed, Set<RelDataTypeField> extraFields) { final List<Integer> iRefSet = Lists.newArrayList(); if (key instanceof Project) { final Project project = (Project) key; for (RexNode rx : project.getChildExps()) { iRefSet.addAll(HiveCalciteUtil.getInputRefs(rx)); } } else { final int fieldCount = tableAccessRel.getRowType().getFieldCount(); if (fieldsUsed.equals(ImmutableBitSet.range(fieldCount)) && extraFields.isEmpty()) { // get all cols iRefSet.addAll(ImmutableBitSet.range(fieldCount).asList()); } } //Remove any virtual cols if (tableAccessRel instanceof HiveTableScan) { iRefSet.removeAll(((HiveTableScan)tableAccessRel).getVirtualCols()); } if (!iRefSet.isEmpty()) { final RelOptTable table = tableAccessRel.getTable(); if (table instanceof RelOptHiveTable) { ((RelOptHiveTable) table).getColStat(iRefSet, true); LOG.debug("Got col stats for {} in {}", iRefSet, tableAccessRel.getTable().getQualifiedName()); } } }
null, viewScan.getTable().getQualifiedName()); if (opType == OpType.CREATE) { LOG.debug("Created materialized view for rewriting: " + viewScan.getTable().getQualifiedName());
cq.put(vk, materialization); if (LOG.isDebugEnabled()) { LOG.debug("Cached materialized view for rewriting: " + tableRel.getTable().getQualifiedName());
((RelOptHiveTable) table).getColStat(iRefSet, true); LOG.debug("Got col stats for {} in {}", iRefSet, tableAccessRel.getTable().getQualifiedName());
@Override public RelOptMaterialization apply(RelOptMaterialization materialization) { final RelNode viewScan = materialization.tableRel; final RelNode newViewScan; if (viewScan instanceof DruidQuery) { final DruidQuery dq = (DruidQuery) viewScan; newViewScan = DruidQuery.create(optCluster, optCluster.traitSetOf(HiveRelNode.CONVENTION), viewScan.getTable(), dq.getDruidTable(), ImmutableList.<RelNode>of(dq.getTableScan())); } else { newViewScan = new HiveTableScan(optCluster, optCluster.traitSetOf(HiveRelNode.CONVENTION), (RelOptHiveTable) viewScan.getTable(), viewScan.getTable().getQualifiedName().get(0), null, false, false); } return new RelOptMaterialization(newViewScan, materialization.queryRel, null); } }
@Override public RelNode visit(TableScan scan) { if (scan instanceof HiveTableScan) { HiveTableScan hiveScan = (HiveTableScan) scan; RelOptHiveTable relOptHiveTable = (RelOptHiveTable) hiveScan.getTable(); Table tab = relOptHiveTable.getHiveTableMD(); if (tab.isTemporary()) { fail(tab.getTableName() + " is a temporary table"); } if (tab.getTableType() == TableType.EXTERNAL_TABLE) { fail(tab.getFullyQualifiedName() + " is an external table"); } return scan; } // TableScan of a non-Hive table - don't support for materializations. fail(scan.getTable().getQualifiedName() + " is a table scan of a non-Hive table."); return scan; }
@Override public RelNode visit(TableScan scan) { if (tables.contains(scan.getTable().getQualifiedName())) { used.value = true; } return scan; } @Override
@Override public RelNode visit(TableScan scan) { List<String> table = scan.getTable().getQualifiedName(); tables.put(table, counter.value++); return scan; }
/** * @param origin * @return the table */ public static List<String> getTable(RelColumnOrigin origin) { return origin.getOriginTable().getQualifiedName(); }
@Override public void visit(final RelNode node, final int ordinal, final RelNode parent) { if (node instanceof TableScan) { usedTables.add(node.getTable().getQualifiedName()); } super.visit(node, ordinal, parent); } };
@Override public RelNode visit(final TableScan scan) { builder.add(scan.getTable().getQualifiedName()); return super.visit(scan); }