FeaturesConfig expected = new FeaturesConfig() .setCpuCostWeight(0.4) .setMemoryCostWeight(0.3) .setNetworkCostWeight(0.2) .setIterativeOptimizerEnabled(false) .setIterativeOptimizerTimeout(new Duration(10, SECONDS)) .setEnableStatsCalculator(false) .setIgnoreStatsCalculatorFailures(false) .setDistributedIndexJoinsEnabled(true) .setJoinDistributionType(BROADCAST) .setJoinMaxBroadcastTableSize(new DataSize(42, GIGABYTE)) .setGroupedExecutionForAggregationEnabled(true) .setDynamicScheduleForGroupedExecutionEnabled(true) .setConcurrentLifespansPerTask(1) .setFastInequalityJoins(false) .setColocatedJoinsEnabled(true) .setSpatialJoinsEnabled(false) .setJoinReorderingStrategy(NONE) .setMaxReorderedJoins(5) .setRedistributeWrites(false) .setScaleWriters(true) .setWriterMinSize(new DataSize(42, GIGABYTE)) .setOptimizeMetadataQueries(true) .setOptimizeHashGeneration(false) .setOptimizeMixedDistinctAggregations(true) .setPushTableWriteThroughUnion(false) .setDictionaryAggregation(true) .setPushAggregationThroughJoin(false) .setLegacyArrayAgg(true)
@Test(groups = JDBC) public void testSessionProperties() throws SQLException { final String joinDistributionType = "join_distribution_type"; final String defaultValue = new FeaturesConfig().getJoinDistributionType().name(); assertThat(getSessionProperty(connection(), joinDistributionType)).isEqualTo(defaultValue); setSessionProperty(connection(), joinDistributionType, "BROADCAST"); assertThat(getSessionProperty(connection(), joinDistributionType)).isEqualTo("BROADCAST"); resetSessionProperty(connection(), joinDistributionType); assertThat(getSessionProperty(connection(), joinDistributionType)).isEqualTo(defaultValue); }
private TestLegacyMapSubscript() { super(new FeaturesConfig().setLegacyMapSubscript(true)); }
public TestingConnectorSession( String user, Optional<String> source, Optional<String> traceToken, TimeZoneKey timeZoneKey, Locale locale, long startTime, List<PropertyMetadata<?>> propertyMetadatas, Map<String, Object> propertyValues, boolean isLegacyTimestamp) { this.queryId = queryIdGenerator.createNextQueryId().toString(); this.identity = new Identity(requireNonNull(user, "user is null"), Optional.empty()); this.source = requireNonNull(source, "source is null"); this.traceToken = requireNonNull(traceToken, "traceToken is null"); this.timeZoneKey = requireNonNull(timeZoneKey, "timeZoneKey is null"); this.locale = requireNonNull(locale, "locale is null"); this.startTime = startTime; this.properties = Maps.uniqueIndex(propertyMetadatas, PropertyMetadata::getName); this.propertyValues = ImmutableMap.copyOf(propertyValues); this.isLegacyTimestamp = isLegacyTimestamp; }
public MetadataManager getMetadata(HistogramGroupImplementation groupMode) { MetadataManager metadata = MetadataManager.createTestMetadataManager(new FeaturesConfig() .setHistogramGroupImplementation(groupMode)); return metadata; } }
@VisibleForTesting public TypeRegistry() { this(ImmutableSet.of(), new FeaturesConfig()); }
OPTIMIZE_HASH_GENERATION, "Compute hash codes for distribution, joins, and aggregations early in query plan", featuresConfig.isOptimizeHashGeneration(), false), booleanProperty( VARCHAR, JoinDistributionType.class, featuresConfig.getJoinDistributionType(), false, value -> JoinDistributionType.valueOf(((String) value).toUpperCase()), VARCHAR, DataSize.class, featuresConfig.getJoinMaxBroadcastTableSize(), true, value -> DataSize.valueOf((String) value), DISTRIBUTED_INDEX_JOIN, "Distribute index joins on join keys instead of executing inline", featuresConfig.isDistributedIndexJoinsEnabled(), false), integerProperty( GROUPED_EXECUTION_FOR_AGGREGATION, "Use grouped execution for aggregation when possible", featuresConfig.isGroupedExecutionForAggregationEnabled(), false), booleanProperty(
@Test public void testDefaults() { assertRecordedDefaults(ConfigAssertions.recordDefaults(FeaturesConfig.class) .setExperimentalSyntaxEnabled(false) .setDistributedIndexJoinsEnabled(false) .setDistributedJoinsEnabled(true) .setRedistributeWrites(true) .setOptimizeMetadataQueries(false) .setOptimizeHashGeneration(true) .setOptimizeSingleDistinct(true) .setPushTableWriteThroughUnion(true) .setIntermediateAggregationsEnabled(false) .setColumnarProcessing(false) .setColumnarProcessingDictionary(false) .setDictionaryAggregation(false)); }
OPTIMIZE_HASH_GENERATION, "Compute hash codes for distribution, joins, and aggregations early in query plan", featuresConfig.isOptimizeHashGeneration(), false), booleanSessionProperty( DISTRIBUTED_JOIN, "Use a distributed join instead of a broadcast join", featuresConfig.isDistributedJoinsEnabled(), false), booleanSessionProperty( DISTRIBUTED_INDEX_JOIN, "Distribute index joins on join keys instead of executing inline", featuresConfig.isDistributedIndexJoinsEnabled(), false), integerSessionProperty( REDISTRIBUTE_WRITES, "Force parallel distributed writes", featuresConfig.isRedistributeWrites(), false), booleanSessionProperty( PUSH_TABLE_WRITE_THROUGH_UNION, "Parallelize writes when using UNION ALL in queries that write data", featuresConfig.isPushTableWriteThroughUnion(), false), integerSessionProperty( TASK_INTERMEDIATE_AGGREGATION, "Experimental: add intermediate aggregation jobs per worker",
public List<PlanOptimizer> getPlanOptimizers(boolean forceSingleNode) { FeaturesConfig featuresConfig = new FeaturesConfig() .setDistributedIndexJoinsEnabled(false) .setOptimizeHashGeneration(true); return new PlanOptimizers( metadata, sqlParser, featuresConfig, forceSingleNode, new MBeanExporter(new TestingMBeanServer()), splitManager, pageSourceManager, statsCalculator, costCalculator, estimatedExchangesCostCalculator, new CostComparator(featuresConfig), taskCountEstimator).get(); }
FeaturesConfig featuresConfig = new FeaturesConfig() .setExperimentalSyntaxEnabled(true) .setDistributedIndexJoinsEnabled(false) .setOptimizeHashGeneration(true); PlanOptimizersFactory planOptimizersFactory = new PlanOptimizersFactory(metadata, sqlParser, featuresConfig, true); sqlParser, dataDefinitionTask, featuresConfig.isExperimentalSyntaxEnabled()); Analyzer analyzer = new Analyzer(session, metadata, sqlParser, accessControl, Optional.of(queryExplainer), featuresConfig.isExperimentalSyntaxEnabled());
private QueryExplainer getQueryExplainer() { Metadata metadata = queryRunner.getMetadata(); FeaturesConfig featuresConfig = new FeaturesConfig().setOptimizeHashGeneration(true); boolean forceSingleNode = queryRunner.getNodeCount() == 1; TaskCountEstimator taskCountEstimator = new TaskCountEstimator(queryRunner::getNodeCount); CostCalculator costCalculator = new CostCalculatorUsingExchanges(taskCountEstimator); List<PlanOptimizer> optimizers = new PlanOptimizers( metadata, sqlParser, featuresConfig, forceSingleNode, new MBeanExporter(new TestingMBeanServer()), queryRunner.getSplitManager(), queryRunner.getPageSourceManager(), queryRunner.getStatsCalculator(), costCalculator, new CostCalculatorWithEstimatedExchanges(costCalculator, taskCountEstimator), new CostComparator(featuresConfig), taskCountEstimator).get(); return new QueryExplainer( optimizers, new PlanFragmenter(metadata, queryRunner.getNodePartitioningManager(), new QueryManagerConfig()), metadata, queryRunner.getAccessControl(), sqlParser, queryRunner.getStatsCalculator(), costCalculator, ImmutableMap.of()); }
private static JoinCompiler getJoinCompiler(boolean groupByUsesEqual) { return new JoinCompiler(MetadataManager.createTestMetadataManager(), new FeaturesConfig().setGroupByUsesEqualTo(groupByUsesEqual)); }
private LocalQueryRunner createLocalQueryRunner(NodeSpillConfig nodeSpillConfig) { LocalQueryRunner queryRunner = new LocalQueryRunner( SESSION, new FeaturesConfig() .setSpillerSpillPaths(spillPath.getAbsolutePath()) .setSpillEnabled(true), nodeSpillConfig, false, true); queryRunner.createCatalog( SESSION.getCatalog().get(), new TpchConnectorFactory(1), ImmutableMap.of()); return queryRunner; } }
@BeforeClass public void setUp() throws Exception { tempDirectory = createTempDirectory(getClass().getSimpleName()); FeaturesConfig featuresConfig = new FeaturesConfig(); featuresConfig.setSpillerSpillPaths(tempDirectory.toString()); featuresConfig.setSpillerThreads(8); featuresConfig.setSpillMaxUsedSpaceThreshold(1.0); singleStreamSpillerFactory = new FileSingleStreamSpillerFactory(blockEncodingSerde, new SpillerStats(), featuresConfig); factory = new GenericPartitioningSpillerFactory(singleStreamSpillerFactory); scheduledExecutor = newSingleThreadScheduledExecutor(); }
private static QueryRunner createQueryRunner(boolean legacyLogFunction) { return new LocalQueryRunner(testSessionBuilder().build(), new FeaturesConfig().setLegacyLogFunction(legacyLogFunction)); } }
this.blockEncodingSerde = new BlockEncodingManager(typeRegistry); this.metadata = new MetadataManager( new FeaturesConfig().setExperimentalSyntaxEnabled(true), typeRegistry, blockEncodingSerde, .put(CreateView.class, new CreateViewTask(jsonCodec(ViewDefinition.class), sqlParser, accessControl, new FeaturesConfig())) .put(DropTable.class, new DropTableTask()) .put(DropView.class, new DropViewTask())
@BeforeMethod public void setUp() { blockEncodingSerde = new BlockEncodingManager(new TypeRegistry()); spillerStats = new SpillerStats(); FeaturesConfig featuresConfig = new FeaturesConfig(); featuresConfig.setSpillerSpillPaths(spillPath.getAbsolutePath()); featuresConfig.setSpillMaxUsedSpaceThreshold(1.0); singleStreamSpillerFactory = new FileSingleStreamSpillerFactory(blockEncodingSerde, spillerStats, featuresConfig); factory = new GenericSpillerFactory(singleStreamSpillerFactory); PagesSerdeFactory pagesSerdeFactory = new PagesSerdeFactory(requireNonNull(blockEncodingSerde, "blockEncodingSerde is null"), false); pagesSerde = pagesSerdeFactory.createPagesSerde(); memoryContext = newSimpleAggregatedMemoryContext(); }
private static PagesHashStrategy pagesHashStrategy() { return new SimplePagesHashStrategy( ImmutableList.of(BIGINT), ImmutableList.of(), ImmutableList.of(ImmutableList.of(TEST_PAGE.getBlock(0))), ImmutableList.of(), OptionalInt.empty(), Optional.of(0), MetadataManager.createTestMetadataManager().getFunctionRegistry(), new FeaturesConfig().isGroupByUsesEqualTo()); }
@Test public void testTooManyGroupingElements() { Session session = testSessionBuilder(new SessionPropertyManager(new SystemSessionProperties( new QueryManagerConfig(), new TaskManagerConfig(), new MemoryManagerConfig(), new FeaturesConfig().setMaxGroupingSets(2048)))).build(); analyze(session, "SELECT a, b, c, d, e, f, g, h, i, j, k, SUM(l)" + "FROM (VALUES (1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12))\n" + "t (a, b, c, d, e, f, g, h, i, j, k, l)\n" + "GROUP BY CUBE (a, b, c, d, e, f), CUBE (g, h, i, j, k)"); assertFails(session, TOO_MANY_GROUPING_SETS, "line 3:10: GROUP BY has 4096 grouping sets but can contain at most 2048", "SELECT a, b, c, d, e, f, g, h, i, j, k, l, SUM(m)" + "FROM (VALUES (1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13))\n" + "t (a, b, c, d, e, f, g, h, i, j, k, l, m)\n" + "GROUP BY CUBE (a, b, c, d, e, f), CUBE (g, h, i, j, k, l)"); assertFails(session, TOO_MANY_GROUPING_SETS, format("line 3:10: GROUP BY has more than %s grouping sets but can contain at most 2048", Integer.MAX_VALUE), "SELECT a, b, c, d, e, f, g, h, i, j, k, l, m, n, o, p, " + "q, r, s, t, u, v, x, w, y, z, aa, ab, ac, ad, ae, SUM(af)" + "FROM (VALUES (1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, " + "17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32))\n" + "t (a, b, c, d, e, f, g, h, i, j, k, l, m, n, o, p, " + "q, r, s, t, u, v, x, w, y, z, aa, ab, ac, ad, ae, af)\n" + "GROUP BY CUBE (a, b, c, d, e, f, g, h, i, j, k, l, m, n, o, p, " + "q, r, s, t, u, v, x, w, y, z, aa, ab, ac, ad, ae)"); }