.build(); QueryManagerConfig expected = new QueryManagerConfig() .setMinQueryExpireAge(new Duration(30, TimeUnit.SECONDS)) .setMaxQueryHistory(10) .setMaxQueryLength(10000) .setMaxStageCount(12345) .setClientTimeout(new Duration(10, TimeUnit.SECONDS)) .setScheduleSplitBatchSize(99) .setMinScheduleSplitBatchSize(9) .setMaxConcurrentQueries(10) .setMaxQueuedQueries(15) .setInitialHashPartitions(16) .setQueryManagerExecutorPoolSize(11) .setRemoteTaskMinErrorDuration(new Duration(60, TimeUnit.SECONDS)) .setRemoteTaskMaxErrorDuration(new Duration(60, TimeUnit.SECONDS)) .setRemoteTaskMaxCallbackThreads(10) .setQueryExecutionPolicy("phased") .setQueryMaxRunTime(new Duration(2, TimeUnit.HOURS)) .setQueryMaxExecutionTime(new Duration(3, TimeUnit.HOURS)) .setQueryMaxCpuTime(new Duration(2, TimeUnit.DAYS)) .setInitializationRequiredWorkers(200) .setInitializationTimeout(new Duration(1, TimeUnit.MINUTES)) .setRequiredWorkers(333) .setRequiredWorkersMaxWait(new Duration(33, TimeUnit.MINUTES));
public QueryTracker(QueryManagerConfig queryManagerConfig, ScheduledExecutorService queryManagementExecutor) { requireNonNull(queryManagerConfig, "queryManagerConfig is null"); this.minQueryExpireAge = queryManagerConfig.getMinQueryExpireAge(); this.maxQueryHistory = queryManagerConfig.getMaxQueryHistory(); this.clientTimeout = queryManagerConfig.getClientTimeout(); this.queryManagementExecutor = requireNonNull(queryManagementExecutor, "queryManagementExecutor is null"); }
@Inject public LegacyResourceGroupConfigurationManager(QueryManagerConfig config) { hardConcurrencyLimit = config.getMaxConcurrentQueries(); maxQueued = config.getMaxQueuedQueries(); }
this.maxQueryLength = queryManagerConfig.getMaxQueryLength(); this.maxQueryCpuTime = queryManagerConfig.getQueryMaxCpuTime(); queryManagementExecutor = Executors.newScheduledThreadPool(queryManagerConfig.getQueryManagerExecutorPoolSize(), threadsNamed("query-management-%s")); queryManagementExecutorMBean = new ThreadPoolExecutorMBean((ThreadPoolExecutor) queryManagementExecutor);
.build(); QueryManagerConfig expected = new QueryManagerConfig() .setMaxQueryAge(new Duration(30, TimeUnit.SECONDS)) .setMaxQueryHistory(10) .setClientTimeout(new Duration(10, TimeUnit.SECONDS)) .setScheduleSplitBatchSize(99) .setMaxConcurrentQueries(10) .setMaxQueuedQueries(15) .setQueueConfigFile("/etc/presto/queues.json") .setInitialHashPartitions(16) .setQueryManagerExecutorPoolSize(11) .setRemoteTaskMinErrorDuration(new Duration(30, TimeUnit.SECONDS)) .setRemoteTaskMaxCallbackThreads(10) .setQueryExecutionPolicy("phased") .setQueryMaxRunTime(new Duration(2, TimeUnit.HOURS));
EXECUTION_POLICY, "Policy used for scheduling query tasks", queryManagerConfig.getQueryExecutionPolicy(), false), booleanProperty( HASH_PARTITION_COUNT, "Number of partitions for distributed joins and aggregations", queryManagerConfig.getInitialHashPartitions(), false), booleanProperty( VARCHAR, Duration.class, queryManagerConfig.getQueryMaxRunTime(), false, value -> Duration.valueOf((String) value), VARCHAR, Duration.class, queryManagerConfig.getQueryMaxExecutionTime(), false, value -> Duration.valueOf((String) value), VARCHAR, Duration.class, queryManagerConfig.getQueryMaxCpuTime(), false, value -> Duration.valueOf((String) value),
@Provides private HttpRemoteTaskFactory createHttpRemoteTaskFactory( JsonMapper jsonMapper, JsonCodec<TaskStatus> taskStatusCodec, JsonCodec<TaskInfo> taskInfoCodec, JsonCodec<TaskUpdateRequest> taskUpdateRequestCodec) { JaxrsTestingHttpProcessor jaxrsTestingHttpProcessor = new JaxrsTestingHttpProcessor(URI.create("http://fake.invalid/"), testingTaskResource, jsonMapper); TestingHttpClient testingHttpClient = new TestingHttpClient(jaxrsTestingHttpProcessor.setTrace(TRACE_HTTP)); testingTaskResource.setHttpClient(testingHttpClient); return new HttpRemoteTaskFactory( new QueryManagerConfig(), TASK_MANAGER_CONFIG, testingHttpClient, new TestSqlTaskManager.MockLocationFactory(), taskStatusCodec, taskInfoCodec, taskUpdateRequestCodec, new RemoteTaskStats()); } });
EXECUTION_POLICY, "Policy used for scheduling query tasks", queryManagerConfig.getQueryExecutionPolicy(), false), booleanSessionProperty( HASH_PARTITION_COUNT, "Number of partitions for distributed joins and aggregations", queryManagerConfig.getInitialHashPartitions(), false), booleanSessionProperty( VARCHAR, Duration.class, queryManagerConfig.getQueryMaxRunTime(), false, value -> Duration.valueOf((String) value)),
this.maxQueryAge = config.getMaxQueryAge(); this.maxQueryHistory = config.getMaxQueryHistory(); this.clientTimeout = config.getClientTimeout(); queryManagementExecutor = Executors.newScheduledThreadPool(config.getQueryManagerExecutorPoolSize(), threadsNamed("query-management-%s")); queryManagementExecutorMBean = new ThreadPoolExecutorMBean((ThreadPoolExecutor) queryManagementExecutor); queryManagementExecutor.scheduleWithFixedDelay(new Runnable()
private void parse(String fileName) { String path = this.getClass().getClassLoader().getResource(fileName).getPath(); QueryManagerConfig config = new QueryManagerConfig(); config.setQueueConfigFile(path); new SqlQueryQueueManager(config, new ObjectMapperProvider().get(), new MBeanExporter(ManagementFactory.getPlatformMBeanServer())); }
@Inject public SqlQueryQueueManager(QueryManagerConfig config, ObjectMapper mapper, MBeanExporter mbeanExporter) { requireNonNull(config, "config is null"); this.mbeanExporter = requireNonNull(mbeanExporter, "mbeanExporter is null"); ImmutableList.Builder<QueryQueueRule> rules = ImmutableList.builder(); if (config.getQueueConfigFile() == null) { QueryQueueDefinition global = new QueryQueueDefinition("global", config.getMaxConcurrentQueries(), config.getMaxQueuedQueries()); rules.add(new QueryQueueRule(null, null, ImmutableMap.of(), ImmutableList.of(global))); } else { File file = new File(config.getQueueConfigFile()); ManagerSpec managerSpec; try { managerSpec = mapper.readValue(file, ManagerSpec.class); } catch (IOException e) { throw Throwables.propagate(e); } Map<String, QueryQueueDefinition> definitions = new HashMap<>(); for (Map.Entry<String, QueueSpec> queue : managerSpec.getQueues().entrySet()) { definitions.put(queue.getKey(), new QueryQueueDefinition(queue.getKey(), queue.getValue().getMaxConcurrent(), queue.getValue().getMaxQueued())); } for (RuleSpec rule : managerSpec.getRules()) { rules.add(QueryQueueRule.createRule(rule.getUserRegex(), rule.getSourceRegex(), rule.getSessionPropertyRegexes(), rule.getQueues(), definitions)); } } this.rules = rules.build(); checkIsDAG(this.rules); }
@Inject public HttpRemoteTaskFactory(QueryManagerConfig config, TaskManagerConfig taskConfig, @ForScheduler HttpClient httpClient, LocationFactory locationFactory, JsonCodec<TaskStatus> taskStatusCodec, JsonCodec<TaskInfo> taskInfoCodec, JsonCodec<TaskUpdateRequest> taskUpdateRequestCodec, RemoteTaskStats stats) { this.httpClient = httpClient; this.locationFactory = locationFactory; this.taskStatusCodec = taskStatusCodec; this.taskInfoCodec = taskInfoCodec; this.taskUpdateRequestCodec = taskUpdateRequestCodec; this.maxErrorDuration = config.getRemoteTaskMaxErrorDuration(); this.taskStatusRefreshMaxWait = taskConfig.getStatusRefreshMaxWait(); this.taskInfoUpdateInterval = taskConfig.getInfoUpdateInterval(); this.coreExecutor = newCachedThreadPool(daemonThreadsNamed("remote-task-callback-%s")); this.executor = new BoundedExecutor(coreExecutor, config.getRemoteTaskMaxCallbackThreads()); this.executorMBean = new ThreadPoolExecutorMBean((ThreadPoolExecutor) coreExecutor); this.stats = requireNonNull(stats, "stats is null"); this.updateScheduledExecutor = newSingleThreadScheduledExecutor(daemonThreadsNamed("task-info-update-scheduler-%s")); this.errorScheduledExecutor = newSingleThreadScheduledExecutor(daemonThreadsNamed("remote-task-error-delay-%s")); }
@Inject public HttpRemoteTaskFactory(QueryManagerConfig config, TaskManagerConfig taskConfig, @ForScheduler HttpClient httpClient, LocationFactory locationFactory, JsonCodec<TaskInfo> taskInfoCodec, JsonCodec<TaskUpdateRequest> taskUpdateRequestCodec) { this.httpClient = httpClient; this.locationFactory = locationFactory; this.taskInfoCodec = taskInfoCodec; this.taskUpdateRequestCodec = taskUpdateRequestCodec; this.minErrorDuration = config.getRemoteTaskMinErrorDuration(); this.taskInfoRefreshMaxWait = taskConfig.getInfoRefreshMaxWait(); this.coreExecutor = newCachedThreadPool(daemonThreadsNamed("remote-task-callback-%s")); this.executor = new BoundedExecutor(coreExecutor, config.getRemoteTaskMaxCallbackThreads()); this.executorMBean = new ThreadPoolExecutorMBean((ThreadPoolExecutor) coreExecutor); this.errorScheduledExecutor = newSingleThreadScheduledExecutor(daemonThreadsNamed("remote-task-error-delay-%s")); }
private QueryExplainer getQueryExplainer() { Metadata metadata = queryRunner.getMetadata(); FeaturesConfig featuresConfig = new FeaturesConfig().setOptimizeHashGeneration(true); boolean forceSingleNode = queryRunner.getNodeCount() == 1; TaskCountEstimator taskCountEstimator = new TaskCountEstimator(queryRunner::getNodeCount); CostCalculator costCalculator = new CostCalculatorUsingExchanges(taskCountEstimator); List<PlanOptimizer> optimizers = new PlanOptimizers( metadata, sqlParser, featuresConfig, forceSingleNode, new MBeanExporter(new TestingMBeanServer()), queryRunner.getSplitManager(), queryRunner.getPageSourceManager(), queryRunner.getStatsCalculator(), costCalculator, new CostCalculatorWithEstimatedExchanges(costCalculator, taskCountEstimator), new CostComparator(featuresConfig), taskCountEstimator).get(); return new QueryExplainer( optimizers, new PlanFragmenter(metadata, queryRunner.getNodePartitioningManager(), new QueryManagerConfig()), metadata, queryRunner.getAccessControl(), sqlParser, queryRunner.getStatsCalculator(), costCalculator, ImmutableMap.of()); }
@Test public void testTooManyGroupingElements() { Session session = testSessionBuilder(new SessionPropertyManager(new SystemSessionProperties( new QueryManagerConfig(), new TaskManagerConfig(), new MemoryManagerConfig(), new FeaturesConfig().setMaxGroupingSets(2048)))).build(); analyze(session, "SELECT a, b, c, d, e, f, g, h, i, j, k, SUM(l)" + "FROM (VALUES (1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12))\n" + "t (a, b, c, d, e, f, g, h, i, j, k, l)\n" + "GROUP BY CUBE (a, b, c, d, e, f), CUBE (g, h, i, j, k)"); assertFails(session, TOO_MANY_GROUPING_SETS, "line 3:10: GROUP BY has 4096 grouping sets but can contain at most 2048", "SELECT a, b, c, d, e, f, g, h, i, j, k, l, SUM(m)" + "FROM (VALUES (1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13))\n" + "t (a, b, c, d, e, f, g, h, i, j, k, l, m)\n" + "GROUP BY CUBE (a, b, c, d, e, f), CUBE (g, h, i, j, k, l)"); assertFails(session, TOO_MANY_GROUPING_SETS, format("line 3:10: GROUP BY has more than %s grouping sets but can contain at most 2048", Integer.MAX_VALUE), "SELECT a, b, c, d, e, f, g, h, i, j, k, l, m, n, o, p, " + "q, r, s, t, u, v, x, w, y, z, aa, ab, ac, ad, ae, SUM(af)" + "FROM (VALUES (1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, " + "17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32))\n" + "t (a, b, c, d, e, f, g, h, i, j, k, l, m, n, o, p, " + "q, r, s, t, u, v, x, w, y, z, aa, ab, ac, ad, ae, af)\n" + "GROUP BY CUBE (a, b, c, d, e, f, g, h, i, j, k, l, m, n, o, p, " + "q, r, s, t, u, v, x, w, y, z, aa, ab, ac, ad, ae)"); }
this.nodePartitioningManager = new NodePartitioningManager(nodeScheduler); this.splitManager = new SplitManager(new QueryManagerConfig()); this.blockEncodingManager = new BlockEncodingManager(typeRegistry); this.metadata = new MetadataManager( typeRegistry, blockEncodingManager, new SessionPropertyManager(new SystemSessionProperties(new QueryManagerConfig(), new TaskManagerConfig(), new MemoryManagerConfig(), featuresConfig)), new SchemaPropertyManager(), new TablePropertyManager(), new ColumnPropertyManager(), transactionManager); this.planFragmenter = new PlanFragmenter(this.metadata, this.nodePartitioningManager, new QueryManagerConfig()); this.joinCompiler = new JoinCompiler(metadata, featuresConfig); this.pageIndexerFactory = new GroupByHashPageIndexerFactory(joinCompiler);
@BeforeClass public void setUp() { TaskCountEstimator taskCountEstimator = new TaskCountEstimator(() -> NUMBER_OF_NODES); costCalculatorUsingExchanges = new CostCalculatorUsingExchanges(taskCountEstimator); costCalculatorWithEstimatedExchanges = new CostCalculatorWithEstimatedExchanges(costCalculatorUsingExchanges, taskCountEstimator); session = testSessionBuilder().setCatalog("tpch").build(); CatalogManager catalogManager = new CatalogManager(); catalogManager.registerCatalog(createBogusTestingCatalog("tpch")); transactionManager = createTestTransactionManager(catalogManager); metadata = createTestMetadataManager(transactionManager, new FeaturesConfig()); finalizerService = new FinalizerService(); finalizerService.start(); nodeScheduler = new NodeScheduler( new LegacyNetworkTopology(), new InMemoryNodeManager(), new NodeSchedulerConfig().setIncludeCoordinator(true), new NodeTaskMap(finalizerService)); nodePartitioningManager = new NodePartitioningManager(nodeScheduler); planFragmenter = new PlanFragmenter(metadata, nodePartitioningManager, new QueryManagerConfig()); }
private QueryExplainer getQueryExplainer() { Metadata metadata = queryRunner.getMetadata(); FeaturesConfig featuresConfig = new FeaturesConfig().setOptimizeHashGeneration(true); boolean forceSingleNode = queryRunner.getNodeCount() == 1; TaskCountEstimator taskCountEstimator = new TaskCountEstimator(queryRunner::getNodeCount); CostCalculator costCalculator = new CostCalculatorUsingExchanges(taskCountEstimator); List<PlanOptimizer> optimizers = new PlanOptimizers( metadata, sqlParser, featuresConfig, forceSingleNode, new MBeanExporter(new TestingMBeanServer()), queryRunner.getSplitManager(), queryRunner.getPageSourceManager(), queryRunner.getStatsCalculator(), costCalculator, new CostCalculatorWithEstimatedExchanges(costCalculator, taskCountEstimator), new CostComparator(featuresConfig), taskCountEstimator).get(); return new QueryExplainer( optimizers, new PlanFragmenter(metadata, queryRunner.getNodePartitioningManager(), new QueryManagerConfig()), metadata, queryRunner.getAccessControl(), sqlParser, queryRunner.getStatsCalculator(), costCalculator, ImmutableMap.of()); }