throws Exception queryRunner = DistributedQueryRunner.builder(session).build(); queryRunner.installPlugin(new Plugin()
@Test public void testInsufficientWorkerNodesAfterDrop() throws Exception { try (DistributedQueryRunner queryRunner = TpchQueryRunnerBuilder.builder() .setCoordinatorProperties(ImmutableMap.<String, String>builder() .put("query-manager.required-workers", "4") .put("query-manager.required-workers-max-wait", "1ns") .build()) .setNodeCount(4) .build()) { queryRunner.execute("SELECT 1"); assertEquals(queryRunner.getCoordinator().refreshNodes().getActiveNodes().size(), 4); try { // Query should still be allowed to run if active workers drop down below the minimum required nodes queryRunner.getServers().get(0).close(); assertEquals(queryRunner.getCoordinator().refreshNodes().getActiveNodes().size(), 3); queryRunner.execute("SELECT 1"); } catch (RuntimeException e) { assertEquals(e.getMessage(), "Insufficient active worker nodes. Waited 1.00ns for at least 4 workers, but only 3 workers are active"); } } } }
private static DistributedQueryRunner createThriftQueryRunnerInternal(List<DriftServer> servers, int nodeCount, Map<String, String> properties) throws Exception { String addresses = servers.stream() .map(server -> "localhost:" + driftServerPort(server)) .collect(joining(",")); Session defaultSession = testSessionBuilder() .setCatalog("thrift") .setSchema("tiny") .build(); DistributedQueryRunner queryRunner = DistributedQueryRunner.builder(defaultSession) .setNodeCount(nodeCount) .setExtraProperties(properties) .build(); queryRunner.installPlugin(new ThriftPlugin()); Map<String, String> connectorProperties = ImmutableMap.<String, String>builder() .put("presto.thrift.client.addresses", addresses) .put("presto.thrift.client.connect-timeout", "30s") .put("presto-thrift.lookup-requests-concurrency", "2") .build(); queryRunner.createCatalog("thrift", "presto-thrift", connectorProperties); return queryRunner; }
throws Exception queryRunner = DistributedQueryRunner.builder(session).build(); queryRunner.installPlugin(new Plugin()
public static DistributedQueryRunner createQueryRunner(String dbConfigUrl, H2ResourceGroupsDao dao, String environment) throws Exception { DistributedQueryRunner queryRunner = DistributedQueryRunner .builder(testSessionBuilder().setCatalog("tpch").setSchema("tiny").build()) .setNodeCount(2) .setEnvironment(environment) .build(); try { Plugin h2ResourceGroupManagerPlugin = new H2ResourceGroupManagerPlugin(); queryRunner.installPlugin(h2ResourceGroupManagerPlugin); queryRunner.getCoordinator().getResourceGroupManager().get() .setConfigurationManager(CONFIGURATION_MANAGER_TYPE, ImmutableMap.of("resource-groups.config-db-url", dbConfigUrl, "node.environment", environment)); queryRunner.installPlugin(new TpchPlugin()); queryRunner.createCatalog("tpch", "tpch"); setup(queryRunner, dao, environment); return queryRunner; } catch (Exception e) { queryRunner.close(); throw e; } }
public static DistributedQueryRunner createQueryRunner(Map<String, String> extraProperties, Map<String, String> coordinatorProperties) throws Exception { Session session = testSessionBuilder() .setSource("test") .setCatalog("tpcds") .setSchema("sf1") .build(); DistributedQueryRunner queryRunner = DistributedQueryRunner.builder(session) .setNodeCount(4) .setExtraProperties(extraProperties) .setCoordinatorProperties(coordinatorProperties) .build(); try { queryRunner.installPlugin(new TpcdsPlugin()); queryRunner.createCatalog("tpcds", "tpcds"); return queryRunner; } catch (Exception e) { queryRunner.close(); throw e; } }
@Test public void testInsufficientWorkerNodesAfterDrop() throws Exception { try (DistributedQueryRunner queryRunner = TpchQueryRunnerBuilder.builder() .setCoordinatorProperties(ImmutableMap.<String, String>builder() .put("query-manager.required-workers", "4") .put("query-manager.required-workers-max-wait", "1ns") .build()) .setNodeCount(4) .build()) { queryRunner.execute("SELECT 1"); assertEquals(queryRunner.getCoordinator().refreshNodes().getActiveNodes().size(), 4); try { // Query should still be allowed to run if active workers drop down below the minimum required nodes queryRunner.getServers().get(0).close(); assertEquals(queryRunner.getCoordinator().refreshNodes().getActiveNodes().size(), 3); queryRunner.execute("SELECT 1"); } catch (RuntimeException e) { assertEquals(e.getMessage(), "Insufficient active worker nodes. Waited 1.00ns for at least 4 workers, but only 3 workers are active"); } } } }
public static DistributedQueryRunner createQueryRunner() throws Exception { DistributedQueryRunner queryRunner = DistributedQueryRunner.builder(testSessionBuilder().build()) .setNodeCount(2) .build(); try { queryRunner.installPlugin(new TpchPlugin()); queryRunner.createCatalog("tpch", "tpch"); return queryRunner; } catch (Exception e) { queryRunner.close(); throw e; } } }
public static DistributedQueryRunner createQueryRunner(Session session) throws Exception { DistributedQueryRunner queryRunner = DistributedQueryRunner.builder(session) .setNodeCount(2) .build(); try { queryRunner.installPlugin(new TpchPlugin()); queryRunner.createCatalog("tpch", "tpch"); return queryRunner; } catch (Exception e) { queryRunner.close(); throw e; } } }
@Test public void testSufficientInitialWorkerNodes() throws Exception { try (DistributedQueryRunner queryRunner = TpchQueryRunnerBuilder.builder() .setSingleCoordinatorProperty("query-manager.initialization-required-workers", "4") .setNodeCount(4) .build()) { queryRunner.execute("SELECT 1"); assertEquals(queryRunner.getCoordinator().refreshNodes().getActiveNodes().size(), 4); // Query should still be allowed to run if active workers drop down below the minimum required nodes queryRunner.getServers().get(0).close(); assertEquals(queryRunner.getCoordinator().refreshNodes().getActiveNodes().size(), 3); queryRunner.execute("SELECT 1"); } }
@Test(expectedExceptions = RuntimeException.class, expectedExceptionsMessageRegExp = "Insufficient active worker nodes. Waited 1.00ns for at least 4 workers, but only 3 workers are active") public void testInsufficientWorkerNodesWithCoordinatorExcluded() throws Exception { try (DistributedQueryRunner queryRunner = TpchQueryRunnerBuilder.builder() .setCoordinatorProperties(ImmutableMap.<String, String>builder() .put("node-scheduler.include-coordinator", "false") .put("query-manager.required-workers", "4") .put("query-manager.required-workers-max-wait", "1ns") .build()) .setNodeCount(4) .build()) { queryRunner.execute("SELECT 1"); fail("Expected exception due to insufficient active worker nodes"); } }
@Test public void testInitializationTimeout() throws Exception { try (DistributedQueryRunner queryRunner = TpchQueryRunnerBuilder.builder() .setCoordinatorProperties(ImmutableMap.<String, String>builder() .put("query-manager.initialization-required-workers", "5") .put("query-manager.initialization-timeout", "1ns") .build()) .setNodeCount(4) .build()) { queryRunner.execute("SELECT 1"); assertEquals(queryRunner.getCoordinator().refreshNodes().getActiveNodes().size(), 4); } }
@Test(expectedExceptions = RuntimeException.class, expectedExceptionsMessageRegExp = "Insufficient active worker nodes. Waited 1.00ns for at least 5 workers, but only 4 workers are active") public void testInsufficientWorkerNodes() throws Exception { try (DistributedQueryRunner queryRunner = TpchQueryRunnerBuilder.builder() .setCoordinatorProperties(ImmutableMap.<String, String>builder() .put("query-manager.required-workers", "5") .put("query-manager.required-workers-max-wait", "1ns") .build()) .setNodeCount(4) .build()) { queryRunner.execute("SELECT 1"); fail("Expected exception due to insufficient active worker nodes"); } }
public DistributedQueryRunner buildWithoutCatalogs() throws Exception { DistributedQueryRunner queryRunner = super.build(); try { queryRunner.installPlugin(new TpchPlugin()); return queryRunner; } catch (Exception e) { queryRunner.close(); throw e; } } }
@Test(timeOut = 60_000L) public void testQueryCpuLimit() throws Exception { try (DistributedQueryRunner queryRunner = builder().setSingleExtraProperty("query.max-cpu-time", "1ms").build()) { QueryId queryId = createQuery(queryRunner, TEST_SESSION, "SELECT COUNT(*) FROM lineitem"); waitForQueryState(queryRunner, queryId, FAILED); QueryManager queryManager = queryRunner.getCoordinator().getQueryManager(); BasicQueryInfo queryInfo = queryManager.getQueryInfo(queryId); assertEquals(queryInfo.getState(), FAILED); assertEquals(queryInfo.getErrorCode(), EXCEEDED_CPU_LIMIT.toErrorCode()); } } }
public static void main(String[] args) throws Exception { Logging.initialize(); DistributedQueryRunner queryRunner = TpchQueryRunnerBuilder.builder() .setSingleExtraProperty("http-server.http.port", "8080") .build(); Thread.sleep(10); Logger log = Logger.get(TpchQueryRunner.class); log.info("======== SERVER STARTED ========"); log.info("\n====\n%s\n====", queryRunner.getCoordinator().getBaseUrl()); } }
@BeforeMethod public void setUp() throws Exception { SessionBuilder sessionBuilder = testSessionBuilder(); generatedEvents = new EventsBuilder(); queryRunner = DistributedQueryRunner.builder(sessionBuilder.build()) .setExtraProperties(ImmutableMap.of("testing-warning-collector.preloaded-warnings", String.valueOf(TEST_WARNINGS))) .setNodeCount(1) .build(); queryRunner.installPlugin(new TestingEventListenerPlugin(generatedEvents)); generatedEvents.initialize(EXPECTED_EVENTS); }
public static Builder builder(Session defaultSession) { return new Builder(defaultSession); }
public TestNonIterativeDistributedQueries() { super(() -> TpchQueryRunnerBuilder.builder().setSingleExtraProperty("experimental.iterative-optimizer-enabled", "false").build()); } }
public TestDistributedQueriesNoHashGeneration() { super(() -> TpchQueryRunnerBuilder.builder() .setSingleCoordinatorProperty("optimizer.optimize-hash-generation", "false") .build()); } }