@Override public NamespaceKey getPath() { return dataset.getName(); }
@Override public TableType getJdbcTableType() { // ugly way to return correct table type for the system tables and information schema. if(dataset.getName().getRoot().equals("sys") || dataset.getName().getRoot().equals("INFORMATION_SCHEMA") ) { return TableType.SYSTEM_TABLE; } return TableType.TABLE; }
@Override public boolean equals(final Object other) { if (!(other instanceof RelOptNamespaceTable)) { return false; } RelOptNamespaceTable castOther = (RelOptNamespaceTable) other; return Objects.equal(table.getDataset().getName(), castOther.table.getDataset().getName()) && Objects.equal(cluster, castOther.cluster); }
@Override public List<List<String>> get() { final List<String> table = dataset.getName().getPathComponents(); if (table == null) { return ImmutableList.of(); } return ImmutableList.of(table); } });
@Override public boolean equals(final Object other) { if (!(other instanceof ScanRelBase)) { return false; } if(!other.getClass().equals(this.getClass())){ return false; } ScanRelBase castOther = (ScanRelBase) other; return Objects.equal(projectedColumns, castOther.projectedColumns) && Objects.equal(getTable(), castOther.getTable()) && Objects.equal(getTableMetadata().getName(), castOther.getTableMetadata().getName()) && Objects.equal(getPluginId(), castOther.getPluginId()); }
@Override public boolean equals(final Object other) { if (!(other instanceof ScanCrel)) { return false; } ScanCrel otherScan = (ScanCrel) other; // for acceleration purposes, no need to do deep comparison. if(otherScan.isDirectNamespaceDescendent && isDirectNamespaceDescendent){ return getTableMetadata().getName().equals(otherScan.getTableMetadata().getName()); } return super.equals(other); }
public InfoSchemaScanPrel( RelOptCluster cluster, RelTraitSet traitSet, RelOptTable table, TableMetadata dataset, SearchQuery query, List<SchemaPath> projectedColumns, double observedRowcountAdjustment ) { super(cluster, traitSet, table, dataset.getStoragePluginId(), dataset, projectedColumns, observedRowcountAdjustment); this.pluginId = dataset.getStoragePluginId(); this.table = Preconditions.checkNotNull(InfoSchemaStoragePlugin.TABLE_MAP.get(dataset.getName().getName().toLowerCase()), "Unexpected system table."); this.query = query; }
@Override public void write(final Kryo kryo, final Output output, final TableMetadata table) { try{ Preconditions.checkArgument(!table.isPruned(), "Cannot serialize a pruned table."); }catch(NamespaceException ex){ throw Throwables.propagate(ex); } kryo.writeObject(output, table.getName().getPathComponents()); }
@Override public List<String> getQualifiedName() { return table.getDataset().getName().getPathComponents(); }
@Override public int hashCode() { return Objects.hashCode(getTable(), getTableMetadata().getName(), getPluginId()); }
@VisibleForTesting public SystemScanPrel( RelOptCluster cluster, RelTraitSet traitSet, RelOptTable table, TableMetadata dataset, List<SchemaPath> projectedColumns, double observedRowcountAdjustment, RelDataType rowType ) { super(cluster, traitSet, table, dataset.getStoragePluginId(), dataset, projectedColumns, observedRowcountAdjustment); this.systemTable = Preconditions.checkNotNull(SystemStoragePlugin.TABLE_MAP.get(dataset.getName().getName().toLowerCase()), "Unexpected system table."); this.executorCount = PrelUtil.getPlannerSettings(cluster).getExecutorCount(); this.rowType = rowType; this.pluginId = dataset.getStoragePluginId(); }
@Override public int hashCode() { if(isDirectNamespaceDescendent){ return getTableMetadata().getName().hashCode(); } return super.hashCode(); }
@Override public RelWriter explainTerms(RelWriter pw) { pw.item("table", tableMetadata.getName()); if(projectedColumns != null){ pw.item("columns", FluentIterable.from(projectedColumns).transform(new Function<SchemaPath, String>(){ @Override public String apply(SchemaPath input) { return input.toString(); }}).join(Joiner.on(", "))); } pw.item("splits", getTableMetadata().getSplitCount()); if(observedRowcountAdjustment != 1.0d){ pw.item("rowAdjust", observedRowcountAdjustment); } // we need to include the table metadata digest since not all properties (specifically which splits) are included in the explain output (what base computeDigest uses). pw.itemIf("tableDigest", tableMetadata.computeDigest(), pw.getDetailLevel() == SqlExplainLevel.DIGEST_ATTRIBUTES); return pw; }
@Override public RelNode visit(RelNode other) { if( !(other instanceof ScanCrel) ) { return super.visit(other); } ScanCrel sc = (ScanCrel) other; return new GenericScan(sc.getTableMetadata().getName(), sc.getRowType(), sc.getCluster(), sc.getTraitSet()); }}); long hash = Hashing.murmur3_128().hashBytes(RelOptUtil.toString(cleansed).getBytes(StandardCharsets.UTF_8)).asLong();
@Override public SubScan getSpecificScan(List<SplitWork> work) throws ExecutionSetupException { List<DatasetSplit> splits = new ArrayList<>(work.size()); BatchSchema schema = getDataset().getSchema(); for(SplitWork split : work){ splits.add(split.getSplit()); } boolean storageImpersonationEnabled = dataset.getStoragePluginId().getCapabilities().getCapability(SourceCapabilities.STORAGE_IMPERSONATION); String userName = storageImpersonationEnabled ? getUserName() : ImpersonationUtil.getProcessUserName(); final ReadDefinition readDefinition = dataset.getReadDefinition(); return new HiveSubScan(splits, userName, schema, dataset.getName().getPathComponents(), filter, dataset.getStoragePluginId(), columns, readDefinition.getPartitionColumnsList()); }
private Prel newScan(RelDataType rowType, double rowCount, double splitRatio) throws Exception { TableMetadata metadata = Mockito.mock(TableMetadata.class); when(metadata.getName()).thenReturn(new NamespaceKey(ImmutableList.of("sys", "version"))); when(metadata.getSchema()).thenReturn(SystemTable.VERSION.getSchema()); when(metadata.getSplitRatio()).thenReturn(splitRatio); StoragePluginId pluginId = new StoragePluginId(new SourceConfig().setConfig(new SystemPluginConf().toBytesString()), new SystemPluginConf(), SourceCapabilities.NONE); when(metadata.getStoragePluginId()).thenReturn(pluginId); List<SchemaPath> columns = FluentIterable.from(SystemTable.VERSION.getSchema()).transform(input -> SchemaPath.getSimplePath(input.getName())).toList(); final RelOptTable relOptTable = Mockito.mock(RelOptTable.class); when(relOptTable.getRowCount()).thenReturn(rowCount); return new SystemScanPrel(cluster, traits, relOptTable, metadata, columns, 1.0d, rowType); }
private Prel newHardScan(RelDataType rowType) { TableMetadata metadata = Mockito.mock(TableMetadata.class); when(metadata.getName()).thenReturn(new NamespaceKey(ImmutableList.of("sys", "memory"))); when(metadata.getSchema()).thenReturn(SystemTable.MEMORY.getSchema()); StoragePluginId pluginId = new StoragePluginId(new SourceConfig().setConfig(new SystemPluginConf().toBytesString()), new SystemPluginConf(), SourceCapabilities.NONE); when(metadata.getStoragePluginId()).thenReturn(pluginId); List<SchemaPath> columns = FluentIterable.from(SystemTable.MEMORY.getSchema()).transform(new Function<Field, SchemaPath>(){ @Override public SchemaPath apply(Field input) { return SchemaPath.getSimplePath(input.getName()); }}).toList(); return new SystemScanPrel(cluster, traits, Mockito.mock(RelOptTable.class), metadata, columns, 1.0d, rowType); }
private Prel newSoftScan(RelDataType rowType) { TableMetadata metadata = Mockito.mock(TableMetadata.class); when(metadata.getName()).thenReturn(new NamespaceKey(ImmutableList.of("sys", "version"))); when(metadata.getSchema()).thenReturn(SystemTable.VERSION.getSchema()); StoragePluginId pluginId = new StoragePluginId(new SourceConfig().setConfig(new SystemPluginConf().toBytesString()), new SystemPluginConf(), SourceCapabilities.NONE); when(metadata.getStoragePluginId()).thenReturn(pluginId); List<SchemaPath> columns = FluentIterable.from(SystemTable.VERSION.getSchema()).transform(new Function<Field, SchemaPath>(){ @Override public SchemaPath apply(Field input) { return SchemaPath.getSimplePath(input.getName()); }}).toList(); return new SystemScanPrel(cluster, traits, Mockito.mock(RelOptTable.class), metadata, columns, 1.0d, rowType); }
@Override public SubScan getSpecificScan(List<SplitWork> work) throws ExecutionSetupException { final BatchSchema schema = cachedRelDataType == null ? getDataset().getSchema(): BatchSchema.fromCalciteRowType(cachedRelDataType); // Create an abridged version of the splits to save network bytes. List<DatasetSplit> splits = work.stream().map( workSplit -> ProtostuffUtil.copy(workSplit.getSplit()) .setExtendedProperty(convertToScanXAttr(workSplit.getSplit().getExtendedProperty())) ).collect(Collectors.toList()); return new ParquetSubScan(dataset.getFormatSettings(), splits, getUserName(), schema, getDataset().getName().getPathComponents(), filter == null ? null : filter.getConditions(), dataset.getStoragePluginId(), columns, dataset.getReadDefinition().getPartitionColumnsList(), globalDictionaryEncodedColumns, dataset.getReadDefinition().getExtendedProperty()); }
@Override public PhysicalOperator getPhysicalOperator(PhysicalPlanCreator creator) throws IOException { final HBaseScanSpec spec = new HBaseScanSpec(getTableMetadata().getName(), startRow, stopRow, filter); return creator.addMetadata(this, new HBaseGroupScan(spec, getTableMetadata(), getProjectedColumns(), getTableMetadata().getApproximateRecordCount())); }