@NotNull public DataSize getMaxQueryTotalMemory() { if (maxQueryTotalMemory == null) { return succinctBytes(maxQueryMemory.toBytes() * 2); } return maxQueryTotalMemory; }
private static String formatEstimateAsDataSize(double value) { return isNaN(value) ? "?" : succinctBytes((long) value).toString(); }
/** * Reserves the given number of bytes to spill. If more than the maximum, throws an exception. * * @throws ExceededSpillLimitException */ public synchronized ListenableFuture<?> reserve(long bytes) { checkArgument(bytes >= 0, "bytes is negative"); if ((currentBytes + bytes) >= maxBytes) { throw exceededLocalLimit(succinctBytes(maxBytes)); } currentBytes += bytes; return NOT_BLOCKED; }
private void rebuildHashAggregationBuilder() { this.hashAggregationBuilder = new InMemoryHashAggregationBuilder( accumulatorFactories, step, expectedGroups, groupByTypes, groupByPartialChannels, hashChannel, operatorContext, Optional.of(DataSize.succinctBytes(0)), Optional.of(overwriteIntermediateChannelOffset), joinCompiler, false, false); } }
@JsonProperty public DataSize getSpilledDataSize() { return succinctBytes(operatorSummaries.stream() .mapToLong(stats -> stats.getSpilledDataSize().toBytes()) .sum()); } }
@JsonProperty public DataSize getLogicalWrittenDataSize() { return succinctBytes( operatorSummaries.stream() .filter(stats -> stats.getOperatorType().equals(TableWriterOperator.class.getSimpleName())) .mapToLong(stats -> stats.getInputDataSize().toBytes()) .sum()); }
@GuardedBy("this") private String getAdditionalFailureInfo(long allocated, long delta) { Map<String, Long> queryAllocations = memoryPool.getTaggedMemoryAllocations().get(queryId); String additionalInfo = format("Allocated: %s, Delta: %s", succinctBytes(allocated), succinctBytes(delta)); // It's possible that a query tries allocating more than the available memory // failing immediately before any allocation of that query is tagged if (queryAllocations == null) { return additionalInfo; } String topConsumers = queryAllocations.entrySet().stream() .sorted(comparingByValue(Comparator.reverseOrder())) .limit(3) .collect(toImmutableMap(Entry::getKey, e -> succinctBytes(e.getValue()))) .toString(); return format("%s, Top Consumers: %s", additionalInfo, topConsumers); } }
@GuardedBy("this") private void enforceTotalMemoryLimit(long allocated, long delta, long maxMemory) { if (allocated + delta > maxMemory) { throw exceededLocalTotalMemoryLimit(succinctBytes(maxMemory), getAdditionalFailureInfo(allocated, delta)); } }
@Override public synchronized ListenableFuture<?> reserveSpill(long bytes) { checkArgument(bytes >= 0, "bytes is negative"); if (spillUsed + bytes > maxSpill) { throw exceededPerQueryLocalLimit(succinctBytes(maxSpill)); } ListenableFuture<?> future = spillSpaceTracker.reserve(bytes); spillUsed += bytes; return future; }
@Override public synchronized ListenableFuture<?> reserveSpill(long bytes) { checkArgument(bytes >= 0, "bytes is negative"); if (spillUsed + bytes > maxSpill) { throw exceededPerQueryLocalLimit(succinctBytes(maxSpill)); } ListenableFuture<?> future = spillSpaceTracker.reserve(bytes); spillUsed += bytes; return future; }
@GuardedBy("this") private void enforceUserMemoryLimit(long allocated, long delta, long maxMemory) { if (allocated + delta > maxMemory) { throw exceededLocalUserMemoryLimit(succinctBytes(maxMemory), getAdditionalFailureInfo(allocated, delta)); } }
@GuardedBy("this") private void enforceUserMemoryLimit(long allocated, long delta, long maxMemory) { if (allocated + delta > maxMemory) { throw exceededLocalUserMemoryLimit(succinctBytes(maxMemory), getAdditionalFailureInfo(allocated, delta)); } }
@GuardedBy("this") private String getAdditionalFailureInfo(long allocated, long delta) { Map<String, Long> queryAllocations = memoryPool.getTaggedMemoryAllocations().get(queryId); String additionalInfo = format("Allocated: %s, Delta: %s", succinctBytes(allocated), succinctBytes(delta)); // It's possible that a query tries allocating more than the available memory // failing immediately before any allocation of that query is tagged if (queryAllocations == null) { return additionalInfo; } String topConsumers = queryAllocations.entrySet().stream() .sorted(comparingByValue(Comparator.reverseOrder())) .limit(3) .collect(toImmutableMap(Entry::getKey, e -> succinctBytes(e.getValue()))) .toString(); return format("%s, Top Consumers: %s", additionalInfo, topConsumers); } }
@Override public DataSize getTotalMemoryReservation() { // acquire reference to scheduler before checking finalQueryInfo, because // state change listener sets finalQueryInfo and then clears scheduler when // the query finishes. SqlQueryScheduler scheduler = queryScheduler.get(); Optional<QueryInfo> finalQueryInfo = stateMachine.getFinalQueryInfo(); if (finalQueryInfo.isPresent()) { return finalQueryInfo.get().getQueryStats().getTotalMemoryReservation(); } if (scheduler == null) { return new DataSize(0, BYTE); } return succinctBytes(scheduler.getTotalMemoryReservation()); }
ListenableFuture<?> addToQueue(InternalHiveSplit split) { if (stateReference.get().getKind() != INITIAL) { return immediateFuture(null); } if (estimatedSplitSizeInBytes.addAndGet(split.getEstimatedSizeInBytes()) > maxOutstandingSplitsBytes) { // TODO: investigate alternative split discovery strategies when this error is hit. // This limit should never be hit given there is a limit of maxOutstandingSplits. // If it's hit, it means individual splits are huge. if (loggedHighMemoryWarning.compareAndSet(false, true)) { highMemorySplitSourceCounter.update(1); log.warn("Split buffering for %s.%s in query %s exceeded memory limit (%s). %s splits are buffered.", databaseName, tableName, queryId, succinctBytes(maxOutstandingSplitsBytes), getBufferedInternalSplitCount()); } throw new PrestoException(HIVE_EXCEEDED_SPLIT_BUFFERING_LIMIT, format( "Split buffering for %s.%s exceeded memory limit (%s). %s splits are buffered.", databaseName, tableName, succinctBytes(maxOutstandingSplitsBytes), getBufferedInternalSplitCount())); } bufferedInternalSplitCount.incrementAndGet(); OptionalInt bucketNumber = split.getBucketNumber(); return queues.offer(bucketNumber, split); }
@Override public DataSize getUserMemoryReservation() { // acquire reference to scheduler before checking finalQueryInfo, because // state change listener sets finalQueryInfo and then clears scheduler when // the query finishes. SqlQueryScheduler scheduler = queryScheduler.get(); Optional<QueryInfo> finalQueryInfo = stateMachine.getFinalQueryInfo(); if (finalQueryInfo.isPresent()) { return finalQueryInfo.get().getQueryStats().getUserMemoryReservation(); } if (scheduler == null) { return new DataSize(0, BYTE); } return succinctBytes(scheduler.getUserMemoryReservation()); }
public MockQueryExecution(long memoryUsage, String queryId, int priority, Duration cpuUsage) { this.memoryUsage = succinctBytes(memoryUsage); this.cpuUsage = cpuUsage; this.session = testSessionBuilder() .setSystemProperty(QUERY_PRIORITY, String.valueOf(priority)) .build(); this.resourceGroupId = Optional.empty(); this.queryId = new QueryId(queryId); }
@Test(timeOut = 240_000, expectedExceptions = RuntimeException.class, expectedExceptionsMessageRegExp = ".*Query exceeded local spill limit of 10B") public void testMaxSpillPerNodeLimit() { try (QueryRunner queryRunner = createLocalQueryRunner(new NodeSpillConfig().setMaxSpillPerNode(DataSize.succinctBytes(10)))) { queryRunner.execute(queryRunner.getDefaultSession(), "SELECT COUNT(DISTINCT clerk) as count, orderdate FROM orders GROUP BY orderdate ORDER BY count, orderdate"); } }
@Test(timeOut = 240_000, expectedExceptions = RuntimeException.class, expectedExceptionsMessageRegExp = ".*Query exceeded per-query local spill limit of 10B") public void testQueryMaxSpillPerNodeLimit() { try (QueryRunner queryRunner = createLocalQueryRunner(new NodeSpillConfig().setQueryMaxSpillPerNode(DataSize.succinctBytes(10)))) { queryRunner.execute(queryRunner.getDefaultSession(), "SELECT COUNT(DISTINCT clerk) as count, orderdate FROM orders GROUP BY orderdate ORDER BY count, orderdate"); } }
private DriverContext createDriverContext(long memoryLimit) { return TestingTaskContext.builder(executor, scheduledExecutor, TEST_SESSION) .setMemoryPoolSize(succinctBytes(memoryLimit)) .build() .addPipelineContext(0, true, true, false) .addDriverContext(); }