@JsonProperty public Duration getAge() { return Duration.nanosSince(start); }
public boolean isExpired(Duration idleTimeout) { Long idleStartTime = this.idleStartTime.get(); return idleStartTime != null && Duration.nanosSince(idleStartTime).compareTo(idleTimeout) > 0; }
public synchronized boolean isFailed() { return future == null || // are we disabled? successTransitionTimestamp == null || // are we in success state? Duration.nanosSince(successTransitionTimestamp).compareTo(warmupInterval) < 0; // are we within the warmup period? }
public synchronized boolean isExpired() { return future == null && disabledTimestamp != null && Duration.nanosSince(disabledTimestamp).compareTo(gcGraceInterval) > 0; }
@GET @Produces(APPLICATION_JSON) public ServerInfo getInfo() { boolean starting = !catalogStore.areCatalogsLoaded(); return new ServerInfo(version, environment, coordinator, starting, Optional.of(nanosSince(startTime))); }
public synchronized void verifyInitialMinimumWorkersRequirement() { if (minimumWorkerRequirementMet) { return; } if (currentCount < initializationMinCount && nanosSince(createNanos).compareTo(initializationMaxWait) < 0) { throw new PrestoException(SERVER_STARTING_UP, format("Cluster is still initializing, there are insufficient active worker nodes (%s) to run query", currentCount)); } minimumWorkerRequirementMet = true; }
@Override public void installPlugin(Plugin plugin) { long start = System.nanoTime(); for (TestingPrestoServer server : servers) { server.installPlugin(plugin); } log.info("Installed plugin %s in %s", plugin.getClass().getSimpleName(), nanosSince(start).convertToMostSuccinctTimeUnit()); }
private void updateStats(long currentRequestStartNanos) { stats.infoRoundTripMillis(nanosSince(currentRequestStartNanos).toMillis()); }
private void updateStats(long currentRequestStartNanos) { stats.statusRoundTripMillis(nanosSince(currentRequestStartNanos).toMillis()); } }
private void updateStats(long currentRequestStartNanos) { Duration requestRoundTrip = Duration.nanosSince(currentRequestStartNanos); stats.updateRoundTripMillis(requestRoundTrip.toMillis()); } }
@AfterMethod public void tearDown(Method method) { assertTrue(Futures.allAsList(futures).isDone(), "Expression test futures are not complete"); log.info("FINISHED %s in %s verified %s expressions", method.getName(), Duration.nanosSince(start), futures.size()); }
public static void copyTable(QueryRunner queryRunner, QualifiedObjectName table, Session session) { long start = System.nanoTime(); log.info("Running import for %s", table.getObjectName()); @Language("SQL") String sql = format("CREATE TABLE %s AS SELECT * FROM %s", table.getObjectName(), table); long rows = (Long) queryRunner.execute(session, sql).getMaterializedRows().get(0).getField(0); log.info("Imported %s rows for %s in %s", rows, table.getObjectName(), nanosSince(start).convertToMostSuccinctTimeUnit()); } }
static void waitForNodeRefresh(TestingPrestoServer server) throws InterruptedException { long start = System.nanoTime(); while (server.refreshNodes().getActiveNodes().size() < 1) { assertLessThan(nanosSince(start), new Duration(10, SECONDS)); MILLISECONDS.sleep(10); } }
private void waitForNodes(int numberOfNodes) throws InterruptedException { DistributedQueryRunner queryRunner = (DistributedQueryRunner) getQueryRunner(); long start = System.nanoTime(); while (queryRunner.getCoordinator().refreshNodes().getActiveNodes().size() < numberOfNodes) { assertLessThan(nanosSince(start), new Duration(10, SECONDS)); MILLISECONDS.sleep(10); } } }
private static void copyTpchTables( QueryRunner queryRunner, String sourceCatalog, String sourceSchema, Session session, Iterable<TpchTable<?>> tables) { LOG.info("Loading data from %s.%s...", sourceCatalog, sourceSchema); long startTime = System.nanoTime(); for (TpchTable<?> table : tables) { copyTable(queryRunner, sourceCatalog, session, sourceSchema, table); } LOG.info("Loading from %s.%s complete in %s", sourceCatalog, sourceSchema, nanosSince(startTime).toString(SECONDS)); }
public static void copyTpchTablesBucketed( QueryRunner queryRunner, String sourceCatalog, String sourceSchema, Session session, Iterable<TpchTable<?>> tables) { log.info("Loading data from %s.%s...", sourceCatalog, sourceSchema); long startTime = System.nanoTime(); for (TpchTable<?> table : tables) { copyTableBucketed(queryRunner, new QualifiedObjectName(sourceCatalog, sourceSchema, table.getTableName().toLowerCase(ENGLISH)), session); } log.info("Loading from %s.%s complete in %s", sourceCatalog, sourceSchema, nanosSince(startTime).toString(SECONDS)); }
public static void waitForGlobalResourceGroup(DistributedQueryRunner queryRunner) throws InterruptedException { long startTime = System.nanoTime(); while (true) { SECONDS.sleep(1); ResourceGroupInfo global = getResourceGroupManager(queryRunner).getResourceGroupInfo(new ResourceGroupId("global")); if (global.getSoftMemoryLimit().toBytes() > 0) { break; } assertLessThan(nanosSince(startTime).roundTo(SECONDS), 60L); } }
private static void loadTpchTopic(EmbeddedElasticsearchNode embeddedElasticsearchNode, TestingPrestoClient prestoClient, TpchTable<?> table) { long start = System.nanoTime(); LOG.info("Running import for %s", table.getTableName()); ElasticsearchLoader loader = new ElasticsearchLoader(embeddedElasticsearchNode.getClient(), table.getTableName().toLowerCase(ENGLISH), prestoClient.getServer(), prestoClient.getDefaultSession()); loader.execute(format("SELECT * from %s", new QualifiedObjectName(TPCH_SCHEMA, TINY_SCHEMA_NAME, table.getTableName().toLowerCase(ENGLISH)))); LOG.info("Imported %s in %s", table.getTableName(), nanosSince(start).convertToMostSuccinctTimeUnit()); }
private static void loadTpchTopic(EmbeddedKafka embeddedKafka, TestingPrestoClient prestoClient, TpchTable<?> table) { long start = System.nanoTime(); log.info("Running import for %s", table.getTableName()); TestUtils.loadTpchTopic(embeddedKafka, prestoClient, kafkaTopicName(table), new QualifiedObjectName("tpch", TINY_SCHEMA_NAME, table.getTableName().toLowerCase(ENGLISH))); log.info("Imported %s in %s", 0, table.getTableName(), nanosSince(start).convertToMostSuccinctTimeUnit()); }
private static void loadTpchTable(EmbeddedRedis embeddedRedis, TestingPrestoClient prestoClient, TpchTable<?> table, String dataFormat) { long start = System.nanoTime(); log.info("Running import for %s", table.getTableName()); RedisTestUtils.loadTpchTable( embeddedRedis, prestoClient, redisTableName(table), new QualifiedObjectName("tpch", TINY_SCHEMA_NAME, table.getTableName().toLowerCase(ENGLISH)), dataFormat); log.info("Imported %s in %s", table.getTableName(), nanosSince(start).convertToMostSuccinctTimeUnit()); }