@Override public PlanNode visitTableWriter(TableWriterNode node, RewriteContext<FragmentProperties> context) { if (node.getPartitioningScheme().isPresent()) { context.get().setDistribution(node.getPartitioningScheme().get().getPartitioning().getHandle(), metadata, session); } return context.defaultRewrite(node, context.get()); }
@Override public PlanNode visitTableWriter(TableWriterNode node, RewriteContext<FragmentProperties> context) { if (node.getPartitioningScheme().isPresent()) { context.get().setDistribution(node.getPartitioningScheme().get().getPartitioning().getHandle(), metadata, session); } return context.defaultRewrite(node, context.get()); }
@Override public PlanNode visitTableWriter(TableWriterNode node, RewriteContext<Set<Symbol>> context) { ImmutableSet.Builder<Symbol> expectedInputs = ImmutableSet.<Symbol>builder() .addAll(node.getColumns()); if (node.getPartitioningScheme().isPresent()) { PartitioningScheme partitioningScheme = node.getPartitioningScheme().get(); partitioningScheme.getPartitioning().getColumns().forEach(expectedInputs::add); partitioningScheme.getHashColumn().ifPresent(expectedInputs::add); } if (node.getStatisticsAggregation().isPresent()) { StatisticAggregations aggregations = node.getStatisticsAggregation().get(); expectedInputs.addAll(aggregations.getGroupingSymbols()); aggregations.getAggregations().values().forEach(aggregation -> expectedInputs.addAll(SymbolsExtractor.extractUnique(aggregation.getCall()))); } PlanNode source = context.rewrite(node.getSource(), expectedInputs.build()); return new TableWriterNode( node.getId(), source, node.getTarget(), node.getRowCountSymbol(), node.getFragmentSymbol(), node.getColumns(), node.getColumnNames(), node.getPartitioningScheme(), node.getStatisticsAggregation(), node.getStatisticsAggregationDescriptor()); }
@Override public PlanNode visitTableWriter(TableWriterNode node, RewriteContext<Set<Symbol>> context) { ImmutableSet.Builder<Symbol> expectedInputs = ImmutableSet.<Symbol>builder() .addAll(node.getColumns()); if (node.getPartitioningScheme().isPresent()) { PartitioningScheme partitioningScheme = node.getPartitioningScheme().get(); partitioningScheme.getPartitioning().getColumns().forEach(expectedInputs::add); partitioningScheme.getHashColumn().ifPresent(expectedInputs::add); } if (node.getStatisticsAggregation().isPresent()) { StatisticAggregations aggregations = node.getStatisticsAggregation().get(); expectedInputs.addAll(aggregations.getGroupingSymbols()); aggregations.getAggregations().values().forEach(aggregation -> expectedInputs.addAll(SymbolsExtractor.extractUnique(aggregation.getCall()))); } PlanNode source = context.rewrite(node.getSource(), expectedInputs.build()); return new TableWriterNode( node.getId(), source, node.getTarget(), node.getRowCountSymbol(), node.getFragmentSymbol(), node.getColumns(), node.getColumnNames(), node.getPartitioningScheme(), node.getStatisticsAggregation(), node.getStatisticsAggregationDescriptor()); }
public TableWriterNode map(TableWriterNode node, PlanNode source, PlanNodeId newNodeId) { // Intentionally does not use canonicalizeAndDistinct as that would remove columns ImmutableList<Symbol> columns = node.getColumns().stream() .map(this::map) .collect(toImmutableList()); return new TableWriterNode( newNodeId, source, node.getTarget(), map(node.getRowCountSymbol()), map(node.getFragmentSymbol()), columns, node.getColumnNames(), node.getPartitioningScheme().map(partitioningScheme -> canonicalize(partitioningScheme, source)), node.getStatisticsAggregation().map(this::map), node.getStatisticsAggregationDescriptor().map(this::map)); }
public TableWriterNode map(TableWriterNode node, PlanNode source, PlanNodeId newNodeId) { // Intentionally does not use canonicalizeAndDistinct as that would remove columns ImmutableList<Symbol> columns = node.getColumns().stream() .map(this::map) .collect(toImmutableList()); return new TableWriterNode( newNodeId, source, node.getTarget(), map(node.getRowCountSymbol()), map(node.getFragmentSymbol()), columns, node.getColumnNames(), node.getPartitioningScheme().map(partitioningScheme -> canonicalize(partitioningScheme, source)), node.getStatisticsAggregation().map(this::map), node.getStatisticsAggregationDescriptor().map(this::map)); }
@Override public PlanWithProperties visitTableWriter(TableWriterNode node, PreferredProperties preferredProperties) { PlanWithProperties source = node.getSource().accept(this, preferredProperties); Optional<PartitioningScheme> partitioningScheme = node.getPartitioningScheme(); if (!partitioningScheme.isPresent()) { if (scaleWriters) { partitioningScheme = Optional.of(new PartitioningScheme(Partitioning.create(SCALED_WRITER_DISTRIBUTION, ImmutableList.of()), source.getNode().getOutputSymbols())); } else if (redistributeWrites) { partitioningScheme = Optional.of(new PartitioningScheme(Partitioning.create(FIXED_ARBITRARY_DISTRIBUTION, ImmutableList.of()), source.getNode().getOutputSymbols())); } } if (partitioningScheme.isPresent() && !source.getProperties().isCompatibleTablePartitioningWith(partitioningScheme.get().getPartitioning(), false, metadata, session)) { source = withDerivedProperties( partitionedExchange( idAllocator.getNextId(), REMOTE, source.getNode(), partitioningScheme.get()), source.getProperties()); } return rebaseAndDeriveProperties(node, source); }
@Override public PlanWithProperties visitTableWriter(TableWriterNode node, PreferredProperties preferredProperties) { PlanWithProperties source = node.getSource().accept(this, preferredProperties); Optional<PartitioningScheme> partitioningScheme = node.getPartitioningScheme(); if (!partitioningScheme.isPresent()) { if (scaleWriters) { partitioningScheme = Optional.of(new PartitioningScheme(Partitioning.create(SCALED_WRITER_DISTRIBUTION, ImmutableList.of()), source.getNode().getOutputSymbols())); } else if (redistributeWrites) { partitioningScheme = Optional.of(new PartitioningScheme(Partitioning.create(FIXED_ARBITRARY_DISTRIBUTION, ImmutableList.of()), source.getNode().getOutputSymbols())); } } if (partitioningScheme.isPresent() && !source.getProperties().isCompatibleTablePartitioningWith(partitioningScheme.get().getPartitioning(), false, metadata, session)) { source = withDerivedProperties( partitionedExchange( idAllocator.getNextId(), REMOTE, source.getNode(), partitioningScheme.get()), source.getProperties()); } return rebaseAndDeriveProperties(node, source); }
if (node.getPartitioningScheme().isPresent()) { context.setDriverInstanceCount(1);
if (node.getPartitioningScheme().isPresent()) { context.setDriverInstanceCount(1);
@Override public PlanNode visitTableWriter(TableWriterNode node, RewriteContext<Context> context) { // Part of the plan should be an Optional<StateChangeListener<QueryState>> and this // callback can create the table and abort the table creation if the query fails. TableWriterNode.WriterTarget writerTarget = context.get().getMaterializedHandle(node.getTarget()).get(); return new TableWriterNode( node.getId(), node.getSource().accept(this, context), writerTarget, node.getRowCountSymbol(), node.getFragmentSymbol(), node.getColumns(), node.getColumnNames(), node.getPartitioningScheme(), node.getStatisticsAggregation(), node.getStatisticsAggregationDescriptor()); }
@Override public PlanNode visitTableWriter(TableWriterNode node, RewriteContext<Context> context) { // Part of the plan should be an Optional<StateChangeListener<QueryState>> and this // callback can create the table and abort the table creation if the query fails. TableWriterNode.WriterTarget writerTarget = context.get().getMaterializedHandle(node.getTarget()).get(); return new TableWriterNode( node.getId(), node.getSource().accept(this, context), writerTarget, node.getRowCountSymbol(), node.getFragmentSymbol(), node.getColumns(), node.getColumnNames(), node.getPartitioningScheme(), node.getStatisticsAggregation(), node.getStatisticsAggregationDescriptor()); }