@Override public int getMaxParallelizationWidth(){ return tableMetadata.getSplitCount(); }
@Override @JsonIgnore public int getMaxParallelizationWidth() { return dataset.getSplitCount(); }
@Override public RelOptCost computeSelfCost(final RelOptPlanner planner, final RelMetadataQuery mq) { if(tableMetadata.getSplitCount() == 0){ return planner.getCostFactory().makeInfiniteCost(); } return super.computeSelfCost(planner, mq); }
public ElasticIntermediateScanPrel( RelOptCluster cluster, RelTraitSet traitSet, RelOptTable table, TableMetadata dataset, List<SchemaPath> projectedColumns, double observedRowcountAdjustment) { super(cluster, traits(cluster, table.getRowCount(), dataset.getSplitCount(), traitSet), table, dataset.getStoragePluginId(), dataset, projectedColumns, observedRowcountAdjustment); }
@Override public RelWriter explainTerms(RelWriter pw) { pw.item("table", tableMetadata.getName()); if(projectedColumns != null){ pw.item("columns", FluentIterable.from(projectedColumns).transform(new Function<SchemaPath, String>(){ @Override public String apply(SchemaPath input) { return input.toString(); }}).join(Joiner.on(", "))); } pw.item("splits", getTableMetadata().getSplitCount()); if(observedRowcountAdjustment != 1.0d){ pw.item("rowAdjust", observedRowcountAdjustment); } // we need to include the table metadata digest since not all properties (specifically which splits) are included in the explain output (what base computeDigest uses). pw.itemIf("tableDigest", tableMetadata.computeDigest(), pw.getDetailLevel() == SqlExplainLevel.DIGEST_ATTRIBUTES); return pw; }
@Override public double getCostForParallelization() { RelOptCost cost = computeSelfCost(getCluster().getPlanner(), getCluster().getMetadataQuery()); double costForParallelization = Math.max(cost.getRows(), 1); PlannerSettings settings = PrelUtil.getSettings(getCluster()); if (settings.useMinimumCostPerSplit()) { double minCostPerSplit = settings.getMinimumCostPerSplit(getPluginId().getType()); costForParallelization = Math.max(costForParallelization, minCostPerSplit * tableMetadata.getSplitCount()); } return costForParallelization; } }
@Override public void onMatch(RelOptRuleCall call) { HiveScanDrel scan = call.rel(0); if(scan.getTableMetadata().getSplitCount() == 0){ call.transformTo(new EmptyRel(scan.getCluster(), scan.getTraitSet(), scan.getRowType(), scan.getProjectedSchema())); } }
@Override public RelNode convert(RelNode rel) { FilesystemScanDrel drel = (FilesystemScanDrel) rel; // TODO: this singleton check should be removed once DX-7175 is fixed boolean singleton = !drel.getTableMetadata().getStoragePluginId().getCapabilities().getCapability(SourceCapabilities.REQUIRES_HARD_AFFINITY) && drel.getTableMetadata().getSplitCount() == 1; return new EasyScanPrel(drel.getCluster(), drel.getTraitSet().plus(Prel.PHYSICAL).plus(singleton ? DistributionTrait.SINGLETON : DistributionTrait.ANY), drel.getTable(), drel.getPluginId(), drel.getTableMetadata(), drel.getProjectedColumns(), drel.getObservedRowcountAdjustment()); }