Refine search
public MultilevelSplitQueue(double levelTimeMultiplier) { this.levelMinPriority = new AtomicLong[LEVEL_THRESHOLD_SECONDS.length]; this.levelWaitingSplits = new ArrayList<>(LEVEL_THRESHOLD_SECONDS.length); ImmutableList.Builder<CounterStat> counters = ImmutableList.builder(); for (int i = 0; i < LEVEL_THRESHOLD_SECONDS.length; i++) { levelScheduledTime[i] = new AtomicLong(); levelMinPriority[i] = new AtomicLong(-1); levelWaitingSplits.add(new PriorityQueue<>()); counters.add(new CounterStat()); } this.selectedLevelCounters = counters.build(); this.levelTimeMultiplier = levelTimeMultiplier; }
@Override public HashCollisionsInfo get() { return createHashCollisionsInfo(operatorContext.getInputPositions().getTotalCount(), hashCollisions, expectedHashCollisions); } }
public CounterStat getOutputDataSize() { CounterStat stat = new CounterStat(); stat.merge(outputDataSize); for (DriverContext driver : drivers) { stat.merge(driver.getOutputDataSize()); } return stat; }
start.compareAndSet(0, startNanos); lastReady.compareAndSet(0, startNanos); processCalls.incrementAndGet(); waitNanos.getAndAdd(startNanos - lastReady.get()); unblockedQuantaWallTime.add(elapsed.getWall()); blockedQuantaWallTime.add(elapsed.getWall()); long quantaCpuNanos = elapsed.getCpu().roundTo(NANOSECONDS); cpuTimeNanos.addAndGet(quantaCpuNanos); globalCpuTimeMicros.update(quantaCpuNanos / 1000); globalScheduledTimeMicros.update(quantaScheduledNanos / 1000);
public void recordGetOutput(Page page) { getOutputCalls.incrementAndGet(); getOutputWallNanos.getAndAdd(nanosBetween(intervalWallStart.get(), System.nanoTime())); getOutputCpuNanos.getAndAdd(nanosBetween(intervalCpuStart.get(), currentThreadCpuTime())); getOutputUserNanos.getAndAdd(nanosBetween(intervalUserStart.get(), currentThreadUserTime())); if (page != null) { outputDataSize.update(page.getSizeInBytes()); outputPositions.update(page.getPositionCount()); } }
disableDeletedGroups(deletedSpecs); if (lastRefresh.get() > 0) { for (ResourceGroupIdTemplate deleted : deletedSpecs) { log.info("Resource group spec deleted %s", deleted); lastRefresh.set(System.nanoTime()); if (succinctNanos(System.nanoTime() - lastRefresh.get()).compareTo(maxRefreshInterval) > 0) { lastRefresh.set(0); refreshFailures.update(1); log.error(e, "Error loading configuration from db");
OperatorInfo info = Optional.ofNullable(infoSupplier).map(Supplier::get).orElse(null); long inputPositionsCount = inputPositions.getTotalCount(); succinctNanos(addInputTiming.getWallNanos()), succinctNanos(addInputTiming.getCpuNanos()), succinctBytes(rawInputDataSize.getTotalCount()), succinctBytes(inputDataSize.getTotalCount()), inputPositionsCount, (double) inputPositionsCount * inputPositionsCount, succinctNanos(getOutputTiming.getWallNanos()), succinctNanos(getOutputTiming.getCpuNanos()), succinctBytes(outputDataSize.getTotalCount()), outputPositions.getTotalCount(), succinctBytes(physicalWrittenDataSize.get()), succinctNanos(blockedWallNanos.get()), succinctBytes(operatorMemoryContext.getSystemMemory()), succinctBytes(peakUserMemoryReservation.get()), succinctBytes(peakSystemMemoryReservation.get()), succinctBytes(peakTotalMemoryReservation.get()),
Distribution elapsedTime = new Distribution(this.elapsedTime); long rawInputDataSize = this.rawInputDataSize.getTotalCount(); long rawInputPositions = this.rawInputPositions.getTotalCount(); long processedInputDataSize = this.processedInputDataSize.getTotalCount(); long processedInputPositions = this.processedInputPositions.getTotalCount(); long outputDataSize = this.outputDataSize.getTotalCount(); long outputPositions = this.outputPositions.getTotalCount(); long physicalWrittenDataSize = this.physicalWrittenDataSize.get(); drivers.add(driverStats); queuedTime.add(driverStats.getQueuedTime().roundTo(NANOSECONDS)); elapsedTime.add(driverStats.getElapsedTime().roundTo(NANOSECONDS)); rawInputDataSize += driverStats.getRawInputDataSize().toBytes(); rawInputPositions += driverStats.getRawInputPositions(); processedInputDataSize += driverStats.getProcessedInputDataSize().toBytes(); processedInputPositions += driverStats.getProcessedInputPositions(); outputDataSize += driverStats.getOutputDataSize().toBytes(); outputPositions += driverStats.getOutputPositions();
public void merge(CounterStat counterStat) { requireNonNull(counterStat, "counterStat is null"); oneMinute.merge(counterStat.getOneMinute()); fiveMinute.merge(counterStat.getFiveMinute()); fifteenMinute.merge(counterStat.getFifteenMinute()); count.addAndGet(counterStat.getTotalCount()); }
public void testInvalidDbGetTable() { assertFalse(metastore.getTable(BAD_DATABASE, TEST_TABLE).isPresent()); assertEquals(stats.getGetTable().getThriftExceptions().getTotalCount(), 0); assertEquals(stats.getGetTable().getTotalFailures().getTotalCount(), 0); }
public PrioritizedSplitRunner take() throws InterruptedException { while (true) { lock.lockInterruptibly(); try { PrioritizedSplitRunner result; while ((result = pollSplit()) == null) { notEmpty.await(); } if (result.updateLevelPriority()) { offer(result); continue; } int selectedLevel = result.getPriority().getLevel(); levelMinPriority[selectedLevel].set(result.getPriority().getLevelPriority()); selectedLevelCounters.get(selectedLevel).update(1); return result; } finally { lock.unlock(); } } }
10, 10, new DataSize(1, MEGABYTE), new TestingHiveSplitLoader(), Executors.newFixedThreadPool(5), new CounterStat()); assertEquals(hiveSplitSource.getBufferedInternalSplitCount(), i + 1); assertEquals(getSplits(hiveSplitSource, 1).size(), 1); assertEquals(hiveSplitSource.getBufferedInternalSplitCount(), 4);
@Test public void testEmptyBucket() { final HiveSplitSource hiveSplitSource = HiveSplitSource.bucketed( SESSION, "database", "table", TupleDomain.all(), 10, 10, new DataSize(1, MEGABYTE), new TestingHiveSplitLoader(), Executors.newFixedThreadPool(5), new CounterStat()); hiveSplitSource.addToQueue(new TestSplit(0, OptionalInt.of(2))); hiveSplitSource.noMoreSplits(); assertEquals(getSplits(hiveSplitSource, OptionalInt.of(0), 10).size(), 0); assertEquals(getSplits(hiveSplitSource, OptionalInt.of(1), 10).size(), 0); assertEquals(getSplits(hiveSplitSource, OptionalInt.of(2), 10).size(), 1); assertEquals(getSplits(hiveSplitSource, OptionalInt.of(3), 10).size(), 0); }
10, 10, new DataSize(1, MEGABYTE), new TestingHiveSplitLoader(), Executors.newFixedThreadPool(5), new CounterStat()); assertTrue(started.await(1, TimeUnit.SECONDS)); assertTrue(!splits.isDone()); assertEquals(((HiveSplit) split).getSchema().getProperty("id"), "33");
ListenableFuture<?> addToQueue(InternalHiveSplit split) { if (stateReference.get().getKind() != INITIAL) { return immediateFuture(null); } if (estimatedSplitSizeInBytes.addAndGet(split.getEstimatedSizeInBytes()) > maxOutstandingSplitsBytes) { // TODO: investigate alternative split discovery strategies when this error is hit. // This limit should never be hit given there is a limit of maxOutstandingSplits. // If it's hit, it means individual splits are huge. if (loggedHighMemoryWarning.compareAndSet(false, true)) { highMemorySplitSourceCounter.update(1); log.warn("Split buffering for %s.%s in query %s exceeded memory limit (%s). %s splits are buffered.", databaseName, tableName, queryId, succinctBytes(maxOutstandingSplitsBytes), getBufferedInternalSplitCount()); } throw new PrestoException(HIVE_EXCEEDED_SPLIT_BUFFERING_LIMIT, format( "Split buffering for %s.%s exceeded memory limit (%s). %s splits are buffered.", databaseName, tableName, succinctBytes(maxOutstandingSplitsBytes), getBufferedInternalSplitCount())); } bufferedInternalSplitCount.incrementAndGet(); OptionalInt bucketNumber = split.getBucketNumber(); return queues.offer(bucketNumber, split); }
@SuppressWarnings({"OverlyStrongTypeCast", "ConstantConditions"}) @Test public void testGetMetadataRetryCounter() { int maxRetries = 2; try (PrestoS3FileSystem fs = new PrestoS3FileSystem()) { MockAmazonS3 s3 = new MockAmazonS3(); s3.setGetObjectMetadataHttpCode(SC_INTERNAL_SERVER_ERROR); Configuration configuration = new Configuration(); configuration.set(S3_MAX_BACKOFF_TIME, "1ms"); configuration.set(S3_MAX_RETRY_TIME, "5s"); configuration.setInt(S3_MAX_CLIENT_RETRIES, maxRetries); fs.initialize(new URI("s3n://test-bucket/"), configuration); fs.setS3Client(s3); fs.getS3ObjectMetadata(new Path("s3n://test-bucket/test")); } catch (Throwable expected) { assertInstanceOf(expected, AmazonS3Exception.class); assertEquals(((AmazonS3Exception) expected).getStatusCode(), SC_INTERNAL_SERVER_ERROR); assertEquals(PrestoS3FileSystem.getFileSystemStats().getGetMetadataRetries().getTotalCount(), maxRetries); } }
queuedTime.add(driverStats.getQueuedTime().roundTo(NANOSECONDS)); elapsedTime.add(driverStats.getElapsedTime().roundTo(NANOSECONDS)); totalScheduledTime.getAndAdd(driverStats.getTotalScheduledTime().roundTo(NANOSECONDS)); totalCpuTime.getAndAdd(driverStats.getTotalCpuTime().roundTo(NANOSECONDS)); totalBlockedTime.getAndAdd(driverStats.getTotalBlockedTime().roundTo(NANOSECONDS)); rawInputDataSize.update(driverStats.getRawInputDataSize().toBytes()); rawInputPositions.update(driverStats.getRawInputPositions()); processedInputDataSize.update(driverStats.getProcessedInputDataSize().toBytes()); processedInputPositions.update(driverStats.getProcessedInputPositions()); outputDataSize.update(driverStats.getOutputDataSize().toBytes()); outputPositions.update(driverStats.getOutputPositions()); physicalWrittenDataSize.getAndAdd(driverStats.getPhysicalWrittenDataSize().toBytes());
public void queryFinished(QueryInfo info) completedQueries.update(1); consumedCpuTimeSecs.update((long) info.getQueryStats().getTotalCpuTime().getValue(SECONDS)); consumedInputBytes.update(info.getQueryStats().getRawInputDataSize().toBytes()); consumedInputRows.update(info.getQueryStats().getRawInputPositions()); executionTime.add(info.getQueryStats().getExecutionTime()); queuedTime.add(info.getQueryStats().getQueuedTime()); switch (info.getErrorCode().getType()) { case USER_ERROR: userErrorFailures.update(1); break; case INTERNAL_ERROR: internalFailures.update(1); break; case INSUFFICIENT_RESOURCES: insufficientResourcesFailures.update(1); break; case EXTERNAL: externalFailures.update(1); break; abandonedQueries.update(1); canceledQueries.update(1); failedQueries.update(1);