@Override public MaterializedResult execute(String sql) { return source.execute(sql); }
@Override public MaterializedResult execute(Session session, String sql) { return source.execute(session, sql); }
@Test public void testMetadataIsClearedAfterQueryFinished() { @Language("SQL") String sql = "SELECT * FROM nation"; queryRunner.execute(sql); assertEquals(metadataManager.getCatalogsByQueryId().size(), 0); }
@Test public void testMetadataIsClearedAfterQueryFailed() { @Language("SQL") String sql = "SELECT nationkey/0 FROM nation"; // will raise division by zero exception try { queryRunner.execute(sql); fail("expected exception"); } catch (Throwable t) { // query should fail } assertEquals(metadataManager.getCatalogsByQueryId().size(), 0); }
@Test public void testMemoryFraction() throws Exception { try (DistributedQueryRunner queryRunner = getSimpleQueryRunner()) { queryRunner.execute("SELECT COUNT(*), clerk FROM orders GROUP BY clerk"); waitForGlobalResourceGroup(queryRunner); } } }
@Test(expectedExceptions = RuntimeException.class, expectedExceptionsMessageRegExp = "Insufficient active worker nodes. Waited 1.00ns for at least 5 workers, but only 4 workers are active") public void testInsufficientWorkerNodes() throws Exception { try (DistributedQueryRunner queryRunner = TpchQueryRunnerBuilder.builder() .setCoordinatorProperties(ImmutableMap.<String, String>builder() .put("query-manager.required-workers", "5") .put("query-manager.required-workers-max-wait", "1ns") .build()) .setNodeCount(4) .build()) { queryRunner.execute("SELECT 1"); fail("Expected exception due to insufficient active worker nodes"); } }
@Test(expectedExceptions = RuntimeException.class, expectedExceptionsMessageRegExp = "Insufficient active worker nodes. Waited 1.00ns for at least 4 workers, but only 3 workers are active") public void testInsufficientWorkerNodesWithCoordinatorExcluded() throws Exception { try (DistributedQueryRunner queryRunner = TpchQueryRunnerBuilder.builder() .setCoordinatorProperties(ImmutableMap.<String, String>builder() .put("node-scheduler.include-coordinator", "false") .put("query-manager.required-workers", "4") .put("query-manager.required-workers-max-wait", "1ns") .build()) .setNodeCount(4) .build()) { queryRunner.execute("SELECT 1"); fail("Expected exception due to insufficient active worker nodes"); } }
@Test(timeOut = 60_000) public void testQuerySystemTableResourceGroup() throws Exception { QueryId firstQuery = createQuery(queryRunner, dashboardSession(), LONG_LASTING_QUERY); waitForQueryState(queryRunner, firstQuery, RUNNING); MaterializedResult result = queryRunner.execute("SELECT resource_group_id FROM system.runtime.queries WHERE source = 'dashboard'"); assertEquals(result.getOnlyValue(), ImmutableList.of("global", "user-user", "dashboard-user")); }
@Test(expectedExceptions = RuntimeException.class, expectedExceptionsMessageRegExp = "Cluster is still initializing, there are insufficient active worker nodes \\(4\\) to run query") public void testInsufficientInitialWorkerNodes() throws Exception { try (DistributedQueryRunner queryRunner = TpchQueryRunnerBuilder.builder() .setSingleCoordinatorProperty("query-manager.initialization-required-workers", "5") .setNodeCount(4) .build()) { queryRunner.execute("SELECT 1"); fail("Expected exception due to insufficient active worker nodes"); } }
@Test public void testInitializationTimeout() throws Exception { try (DistributedQueryRunner queryRunner = TpchQueryRunnerBuilder.builder() .setCoordinatorProperties(ImmutableMap.<String, String>builder() .put("query-manager.initialization-required-workers", "5") .put("query-manager.initialization-timeout", "1ns") .build()) .setNodeCount(4) .build()) { queryRunner.execute("SELECT 1"); assertEquals(queryRunner.getCoordinator().refreshNodes().getActiveNodes().size(), 4); } }
@Test(timeOut = 240_000) public void testResourceOverCommit() throws Exception { Map<String, String> properties = ImmutableMap.<String, String>builder() .put("query.max-memory-per-node", "1kB") .put("query.max-total-memory-per-node", "1kB") .put("query.max-memory", "1kB") .build(); try (DistributedQueryRunner queryRunner = createQueryRunner(TINY_SESSION, properties)) { try { queryRunner.execute("SELECT COUNT(*), clerk FROM orders GROUP BY clerk"); fail(); } catch (RuntimeException e) { // expected } Session session = testSessionBuilder() .setCatalog("tpch") .setSchema("tiny") .setSystemProperty(RESOURCE_OVERCOMMIT, "true") .build(); queryRunner.execute(session, "SELECT COUNT(*), clerk FROM orders GROUP BY clerk"); } }
private MaterializedResult runQueryAndWaitForEvents(@Language("SQL") String sql, int numEventsExpected) throws Exception { generatedEvents.initialize(numEventsExpected); MaterializedResult result = queryRunner.execute(session, sql); generatedEvents.waitForEvents(10); return result; }
@Test(expectedExceptions = RuntimeException.class, expectedExceptionsMessageRegExp = "Cluster is still initializing, there are insufficient active worker nodes \\(3\\) to run query") public void testInsufficientInitialWorkerNodesWithCoordinatorExcluded() throws Exception { try (DistributedQueryRunner queryRunner = TpchQueryRunnerBuilder.builder() .setSingleExtraProperty("node-scheduler.include-coordinator", "false") .setSingleCoordinatorProperty("query-manager.initialization-required-workers", "4") .setNodeCount(4) .build()) { queryRunner.execute("SELECT 1"); fail("Expected exception due to insufficient active worker nodes"); } }
@Test public void testInsufficientWorkerNodesAfterDrop() throws Exception { try (DistributedQueryRunner queryRunner = TpchQueryRunnerBuilder.builder() .setCoordinatorProperties(ImmutableMap.<String, String>builder() .put("query-manager.required-workers", "4") .put("query-manager.required-workers-max-wait", "1ns") .build()) .setNodeCount(4) .build()) { queryRunner.execute("SELECT 1"); assertEquals(queryRunner.getCoordinator().refreshNodes().getActiveNodes().size(), 4); try { // Query should still be allowed to run if active workers drop down below the minimum required nodes queryRunner.getServers().get(0).close(); assertEquals(queryRunner.getCoordinator().refreshNodes().getActiveNodes().size(), 3); queryRunner.execute("SELECT 1"); } catch (RuntimeException e) { assertEquals(e.getMessage(), "Insufficient active worker nodes. Waited 1.00ns for at least 4 workers, but only 3 workers are active"); } } } }
@Test public void testSufficientInitialWorkerNodes() throws Exception { try (DistributedQueryRunner queryRunner = TpchQueryRunnerBuilder.builder() .setSingleCoordinatorProperty("query-manager.initialization-required-workers", "4") .setNodeCount(4) .build()) { queryRunner.execute("SELECT 1"); assertEquals(queryRunner.getCoordinator().refreshNodes().getActiveNodes().size(), 4); // Query should still be allowed to run if active workers drop down below the minimum required nodes queryRunner.getServers().get(0).close(); assertEquals(queryRunner.getCoordinator().refreshNodes().getActiveNodes().size(), 3); queryRunner.execute("SELECT 1"); } }
List<ListenableFuture<?>> queryFutures = new ArrayList<>(); for (int i = 0; i < 5; i++) { queryFutures.add(executor.submit(() -> queryRunner.execute("SELECT COUNT(*), clerk FROM orders GROUP BY clerk")));
private void testNoLeak(@Language("SQL") String query) throws Exception { Map<String, String> properties = ImmutableMap.<String, String>builder() .put("task.verbose-stats", "true") .build(); try (DistributedQueryRunner queryRunner = createQueryRunner(TINY_SESSION, properties)) { executor.submit(() -> queryRunner.execute(query)).get(); for (BasicQueryInfo info : queryRunner.getCoordinator().getQueryManager().getQueries()) { assertEquals(info.getState(), FINISHED); } // Make sure we didn't leak any memory on the workers for (TestingPrestoServer worker : queryRunner.getServers()) { Optional<MemoryPool> reserved = worker.getLocalMemoryManager().getReservedPool(); assertTrue(reserved.isPresent()); assertEquals(reserved.get().getMaxBytes(), reserved.get().getFreeBytes()); MemoryPool general = worker.getLocalMemoryManager().getGeneralPool(); assertEquals(general.getMaxBytes(), general.getFreeBytes()); } } }
@Test(timeOut = 60_000) public void testRunningQuery() throws Exception { queryRunner.execute("SELECT COUNT(*), clerk FROM orders GROUP BY clerk"); while (true) { ResourceGroupInfo global = queryRunner.getCoordinator().getResourceGroupManager().get().getResourceGroupInfo(new ResourceGroupId(new ResourceGroupId("global"), "bi-user")); if (global.getSoftMemoryLimit().toBytes() > 0) { break; } TimeUnit.SECONDS.sleep(2); } }
@Test public void testMemoryFraction() throws Exception { try (DistributedQueryRunner queryRunner = TpchQueryRunnerBuilder.builder().build()) { queryRunner.installPlugin(new ResourceGroupManagerPlugin()); getResourceGroupManager(queryRunner).setConfigurationManager("file", ImmutableMap.of( "resource-groups.config-file", getResourceFilePath("resource_groups_memory_percentage.json"))); queryRunner.execute("SELECT COUNT(*), clerk FROM orders GROUP BY clerk"); waitForGlobalResourceGroup(queryRunner); } }
@Test public void testPathToRoot() throws Exception { try (DistributedQueryRunner queryRunner = TpchQueryRunnerBuilder.builder().build()) { queryRunner.installPlugin(new ResourceGroupManagerPlugin()); InternalResourceGroupManager<?> manager = getResourceGroupManager(queryRunner); manager.setConfigurationManager("file", ImmutableMap.of( "resource-groups.config-file", getResourceFilePath("resource_groups_config_dashboard.json"))); queryRunner.execute(testSessionBuilder().setCatalog("tpch").setSchema("tiny").setSource("dashboard-foo").build(), "SELECT COUNT(*), clerk FROM orders GROUP BY clerk"); List<ResourceGroupInfo> path = manager.getPathToRoot(new ResourceGroupId(new ResourceGroupId(new ResourceGroupId("global"), "user-user"), "dashboard-user")); assertEquals(path.size(), 3); assertTrue(path.get(1).getSubGroups() != null); assertEquals(path.get(2).getId(), new ResourceGroupId("global")); assertEquals(path.get(2).getHardConcurrencyLimit(), 100); assertEquals(path.get(2).getRunningQueries(), null); } }