private void disposeLookupSourceIfRequested() { checkState(state == State.LOOKUP_SOURCE_BUILT); verify(lookupSourceNotNeeded.isPresent()); if (!lookupSourceNotNeeded.get().isDone()) { return; } index.clear(); localRevocableMemoryContext.setBytes(0); localUserMemoryContext.setBytes(index.getEstimatedSize().toBytes()); lookupSourceSupplier = null; close(); }
this.softConcurrencyLimit = softConcurrencyLimit; checkArgument(hardConcurrencyLimit.isPresent() || maxRunning.isPresent(), "Missing required property: hardConcurrencyLimit"); this.hardConcurrencyLimit = hardConcurrencyLimit.orElseGet(maxRunning::get); checkArgument(this.hardConcurrencyLimit >= 0, "hardConcurrencyLimit is negative"); absoluteSize = Optional.of(DataSize.valueOf(softMemoryLimit)); fraction = Optional.empty(); this.softMemoryLimitFraction = fraction; this.subGroups = ImmutableList.copyOf(requireNonNull(subGroups, "subGroups is null").orElse(ImmutableList.of())); Set<ResourceGroupNameTemplate> names = new HashSet<>(); for (ResourceGroupSpec subGroup : this.subGroups) {
@NotNull public DataSize getMaxQueryTotalMemory() { if (maxQueryTotalMemory == null) { return succinctBytes(maxQueryMemory.toBytes() * 2); } return maxQueryTotalMemory; }
public static String formatDataRate(DataSize dataSize, Duration duration, boolean longForm) { double rate = dataSize.toBytes() / duration.getValue(SECONDS); if (Double.isNaN(rate) || Double.isInfinite(rate)) { rate = 0; } String rateString = formatDataSize(new DataSize(rate, BYTE), false); if (longForm) { if (!rateString.endsWith("B")) { rateString += "B"; } rateString += "/s"; } return rateString; }
@Test public void testQueryStateInfo() root.setSoftMemoryLimit(new DataSize(1, MEGABYTE)); root.setMaxQueuedQueries(40); root.setHardConcurrencyLimit(0); rootA.setSoftMemoryLimit(new DataSize(1, MEGABYTE)); rootA.setMaxQueuedQueries(20); rootA.setHardConcurrencyLimit(0); rootAX.setSoftMemoryLimit(new DataSize(1, MEGABYTE)); rootAX.setMaxQueuedQueries(10); rootAX.setHardConcurrencyLimit(0); Optional.of(rootAX.getId()), Optional.of(ImmutableList.of(rootAX.getInfo(), rootA.getInfo(), root.getInfo()))); assertEquals(query.getQuery(), "SELECT 1"); assertEquals(query.getQueryId().toString(), "query_root_a_x"); assertEquals(query.getQueryState(), QUEUED); assertEquals(query.getProgress(), Optional.empty()); List<ResourceGroupInfo> chainInfo = query.getPathToRoot().get();
queryRunner.getCoordinator().getResourceGroupManager().get() .setConfigurationManager("file", ImmutableMap.of("resource-groups.config-file", getResourceFilePath("resource_groups_resource_estimate_based_config.json"))); queryRunner, newSessionWithResourceEstimates(new ResourceEstimates( Optional.of(Duration.valueOf("4m")), Optional.empty(), Optional.of(DataSize.valueOf("400MB")))), LONG_LASTING_QUERY, createResourceGroupId("global", "small")); queryRunner, newSessionWithResourceEstimates(new ResourceEstimates( Optional.of(Duration.valueOf("4m")), Optional.empty(), Optional.of(DataSize.valueOf("600MB")))), LONG_LASTING_QUERY, createResourceGroupId("global", "other")); Optional.of(DataSize.valueOf("6TB")))), LONG_LASTING_QUERY, createResourceGroupId("global", "huge_memory")); Optional.of(DataSize.valueOf("4TB")))), LONG_LASTING_QUERY, createResourceGroupId("global", "other"));
rawInputDataSize = new DataSize(0, BYTE); rawInputPositions = 0; rawInputReadTime = new Duration(0, MILLISECONDS); processedInputDataSize = new DataSize(0, BYTE); processedInputPositions = 0; outputDataSize = new DataSize(0, BYTE); outputPositions = 0; startNanos = System.nanoTime(); succinctBytes(driverMemoryContext.getUserMemory()), succinctBytes(driverMemoryContext.getRevocableMemory()), succinctBytes(driverMemoryContext.getSystemMemory()), blockedMonitor != null, builder.build(), rawInputDataSize.convertToMostSuccinctDataSize(), rawInputPositions, rawInputReadTime, processedInputDataSize.convertToMostSuccinctDataSize(), processedInputPositions, outputDataSize.convertToMostSuccinctDataSize(), outputPositions, succinctBytes(physicalWrittenDataSize), ImmutableList.copyOf(transform(operatorContexts, OperatorContext::getOperatorStats)));
private void unspillLookupSourceIfRequested() { checkState(state == State.INPUT_SPILLED); if (!spilledLookupSourceHandle.getUnspillingRequested().isDone()) { // Nothing to do yet. return; } verify(spiller.isPresent()); verify(!unspillInProgress.isPresent()); localUserMemoryContext.setBytes(getSpiller().getSpilledPagesInMemorySize() + index.getEstimatedSize().toBytes()); unspillInProgress = Optional.of(getSpiller().getAllSpilledPages()); state = State.INPUT_UNSPILLING; }
private PartitionedOutputOperator createPartitionedOutputOperator() { PartitionFunction partitionFunction = new LocalPartitionGenerator(new InterpretedHashGenerator(ImmutableList.of(BIGINT), new int[] {0}), PARTITION_COUNT); PagesSerdeFactory serdeFactory = new PagesSerdeFactory(new BlockEncodingManager(new TypeRegistry()), false); OutputBuffers buffers = createInitialEmptyOutputBuffers(PARTITIONED); for (int partition = 0; partition < PARTITION_COUNT; partition++) { buffers = buffers.withBuffer(new OutputBuffers.OutputBufferId(partition), partition); } PartitionedOutputBuffer buffer = createPartitionedBuffer( buffers.withNoMoreBufferIds(), new DataSize(Long.MAX_VALUE, BYTE)); // don't let output buffer block PartitionedOutputFactory operatorFactory = new PartitionedOutputFactory( partitionFunction, ImmutableList.of(0), ImmutableList.of(Optional.empty()), false, OptionalInt.empty(), buffer, new DataSize(1, GIGABYTE)); return (PartitionedOutputOperator) operatorFactory .createOutputOperator(0, new PlanNodeId("plan-node-0"), TYPES, Function.identity(), serdeFactory) .createOperator(createDriverContext()); }
private static OperatorFactory compileFilterWithNoInputColumns(RowExpression filter, ExpressionCompiler compiler) { try { Supplier<PageProcessor> processor = compiler.compilePageProcessor(Optional.of(filter), ImmutableList.of()); return new FilterAndProjectOperatorFactory(0, new PlanNodeId("test"), processor, ImmutableList.of(), new DataSize(0, BYTE), 0); } catch (Throwable e) { if (e instanceof UncheckedExecutionException) { e = e.getCause(); } throw new RuntimeException("Error compiling " + filter + ": " + e.getMessage(), e); } }
@Test public void testConstructor() 19, 20.0, DataSize.valueOf("21GB"), DataSize.valueOf("22GB"), DataSize.valueOf("23GB"), DataSize.valueOf("24GB"), DataSize.valueOf("25GB"), true, ImmutableSet.of(BlockedReason.WAITING_FOR_MEMORY), DataSize.valueOf("27GB"), 28, DataSize.valueOf("29GB"), 30, DataSize.valueOf("31GB"), 32, DataSize.valueOf("32GB"), ImmutableList.of(new StageGcStatistics( 101, 102, assertEquals(basicInfo.getQueryStats().getUserMemoryReservation(), DataSize.valueOf("21GB")); assertEquals(basicInfo.getQueryStats().getPeakUserMemoryReservation(), DataSize.valueOf("23GB"));
@Test public void testEstimatedSize() { List<Type> types = ImmutableList.of(BIGINT, VARCHAR); PagesIndex pagesIndex = newPagesIndex(types, 30, false); long initialEstimatedSize = pagesIndex.getEstimatedSize().toBytes(); assertTrue(initialEstimatedSize > 0, format("Initial estimated size must be positive, got %s", initialEstimatedSize)); pagesIndex.addPage(somePage(types)); long estimatedSizeWithOnePage = pagesIndex.getEstimatedSize().toBytes(); assertTrue(estimatedSizeWithOnePage > initialEstimatedSize, "Estimated size should grow after adding a page"); pagesIndex.addPage(somePage(types)); long estimatedSizeWithTwoPages = pagesIndex.getEstimatedSize().toBytes(); assertEquals( estimatedSizeWithTwoPages, initialEstimatedSize + (estimatedSizeWithOnePage - initialEstimatedSize) * 2, "Estimated size should grow linearly as long as we don't pass expectedPositions"); pagesIndex.compact(); long estimatedSizeAfterCompact = pagesIndex.getEstimatedSize().toBytes(); // We can expect compact to reduce size because VARCHAR sequence pages are compactable. assertTrue(estimatedSizeAfterCompact < estimatedSizeWithTwoPages, format( "Compact should reduce (or retain) size, but changed from %s to %s", estimatedSizeWithTwoPages, estimatedSizeAfterCompact)); }
@Test public void testRecordCursorYield() Page input = SequencePageBuilder.createSequencePage(ImmutableList.of(BIGINT), length, 0); DriverContext driverContext = newDriverContext(); metadata.getFunctionRegistry().addFunctions(ImmutableList.of(new GenericLongFunction("record_cursor", value -> { driverContext.getYieldSignal().forceYieldForTesting(); return value; ExpressionCompiler expressionCompiler = new ExpressionCompiler(metadata, new PageFunctionCompiler(metadata, 0)); ImmutableList.of(), ImmutableList.of(BIGINT), new DataSize(0, BYTE), 0); assertNull(operator.getOutput()); driverContext.getYieldSignal().reset(); Page output = operator.getOutput(); driverContext.getYieldSignal().reset(); assertNotNull(output); assertEquals(toValues(BIGINT, output.getBlock(0)), toValues(BIGINT, input.getBlock(0)));
private Operator interpretedFilterProject(Optional<Expression> filter, Expression projection, Type expectedType, Session session) { Optional<PageFilter> pageFilter = filter .map(expression -> new InterpretedPageFilter( expression, SYMBOL_TYPES, INPUT_MAPPING, metadata, SQL_PARSER, session)); PageProjection pageProjection = new InterpretedPageProjection(projection, SYMBOL_TYPES, INPUT_MAPPING, metadata, SQL_PARSER, session); assertEquals(pageProjection.getType(), expectedType); PageProcessor processor = new PageProcessor(pageFilter, ImmutableList.of(pageProjection)); OperatorFactory operatorFactory = new FilterAndProjectOperatorFactory( 0, new PlanNodeId("test"), () -> processor, ImmutableList.of(pageProjection.getType()), new DataSize(0, BYTE), 0); return operatorFactory.createOperator(createDriverContext(session)); }
@Test(timeOut = 60_000) public void testRunningQuery() throws Exception { queryRunner.execute("SELECT COUNT(*), clerk FROM orders GROUP BY clerk"); while (true) { ResourceGroupInfo global = queryRunner.getCoordinator().getResourceGroupManager().get().getResourceGroupInfo(new ResourceGroupId(new ResourceGroupId("global"), "bi-user")); if (global.getSoftMemoryLimit().toBytes() > 0) { break; } TimeUnit.SECONDS.sleep(2); } }
public SourceOperator newScanFilterAndProjectOperator(DriverContext driverContext) { ConnectorPageSource pageSource = newPageSource(); ImmutableList.Builder<RowExpression> projectionsBuilder = ImmutableList.builder(); for (int i = 0; i < types.size(); i++) { projectionsBuilder.add(field(i, types.get(i))); } Supplier<CursorProcessor> cursorProcessor = EXPRESSION_COMPILER.compileCursorProcessor(Optional.empty(), projectionsBuilder.build(), "key"); Supplier<PageProcessor> pageProcessor = EXPRESSION_COMPILER.compilePageProcessor(Optional.empty(), projectionsBuilder.build()); SourceOperatorFactory sourceOperatorFactory = new ScanFilterAndProjectOperatorFactory( 0, new PlanNodeId("test"), new PlanNodeId("0"), (session, split, columnHandles) -> pageSource, cursorProcessor, pageProcessor, columns.stream().map(columnHandle -> (ColumnHandle) columnHandle).collect(toList()), types, new DataSize(0, BYTE), 0); SourceOperator operator = sourceOperatorFactory.createOperator(driverContext); operator.addSplit(new Split(new ConnectorId("test"), TestingTransactionHandle.create(), TestingSplit.createLocalSplit())); return operator; }
@Test public void testBufferCloseOnFinish() throws Exception { SqlTask sqlTask = createInitialTask(); OutputBuffers outputBuffers = createInitialEmptyOutputBuffers(PARTITIONED).withBuffer(OUT, 0).withNoMoreBufferIds(); updateTask(sqlTask, EMPTY_SOURCES, outputBuffers); ListenableFuture<BufferResult> bufferResult = sqlTask.getTaskResults(OUT, 0, new DataSize(1, MEGABYTE)); assertFalse(bufferResult.isDone()); // close the sources (no splits will ever be added) updateTask(sqlTask, ImmutableList.of(new TaskSource(TABLE_SCAN_NODE_ID, ImmutableSet.of(), true)), outputBuffers); // finish the task by calling abort on it sqlTask.abortTaskResults(OUT); // buffer will be closed by cancel event (wait for event to fire) bufferResult.get(1, SECONDS); // verify the buffer is closed bufferResult = sqlTask.getTaskResults(OUT, 0, new DataSize(1, MEGABYTE)); assertTrue(bufferResult.isDone()); assertTrue(bufferResult.get().isBufferComplete()); }
@Override public ListenableFuture<BufferResult> get(OutputBufferId bufferId, long startingSequenceId, DataSize maxSize) { checkState(!Thread.holdsLock(this), "Can not get pages while holding a lock on this"); requireNonNull(bufferId, "bufferId is null"); checkArgument(maxSize.toBytes() > 0, "maxSize must be at least 1 byte"); return getBuffer(bufferId).getPages(startingSequenceId, maxSize, Optional.of(masterBuffer)); }