@Config("kafka.buffer-size") public KafkaConnectorConfig setKafkaBufferSize(String kafkaBufferSize) { this.kafkaBufferSize = DataSize.valueOf(kafkaBufferSize); return this; }
public static PropertyMetadata<DataSize> dataSizeSessionProperty(String name, String description, DataSize defaultValue, boolean hidden) { return new PropertyMetadata<>( name, description, createUnboundedVarcharType(), DataSize.class, defaultValue, hidden, value -> DataSize.valueOf((String) value), DataSize::toString); } }
absoluteSize = Optional.of(DataSize.valueOf(softMemoryLimit)); fraction = Optional.empty();
private ResourceEstimates parseResourceEstimate(HttpServletRequest servletRequest) { ResourceEstimateBuilder builder = new ResourceEstimateBuilder(); for (String header : splitSessionHeader(servletRequest.getHeaders(PRESTO_RESOURCE_ESTIMATE))) { List<String> nameValue = Splitter.on('=').limit(2).trimResults().splitToList(header); assertRequest(nameValue.size() == 2, "Invalid %s header", PRESTO_RESOURCE_ESTIMATE); String name = nameValue.get(0); String value = nameValue.get(1); try { switch (name.toUpperCase()) { case ResourceEstimates.EXECUTION_TIME: builder.setExecutionTime(Duration.valueOf(value)); break; case ResourceEstimates.CPU_TIME: builder.setCpuTime(Duration.valueOf(value)); break; case ResourceEstimates.PEAK_MEMORY: builder.setPeakMemory(DataSize.valueOf(value)); break; default: throw badRequest(format("Unsupported resource name %s", name)); } } catch (IllegalArgumentException e) { throw badRequest(format("Unsupported format for resource estimate '%s': %s", value, e)); } } return builder.build(); }
private static void assertEqualsResourceGroup( InternalResourceGroup group, String softMemoryLimit, int maxQueued, int hardConcurrencyLimit, int softConcurrencyLimit, SchedulingPolicy schedulingPolicy, int schedulingWeight, boolean jmxExport, Duration softCpuLimit, Duration hardCpuLimit) { assertEquals(group.getSoftMemoryLimit(), DataSize.valueOf(softMemoryLimit)); assertEquals(group.getMaxQueuedQueries(), maxQueued); assertEquals(group.getHardConcurrencyLimit(), hardConcurrencyLimit); assertEquals(group.getSoftConcurrencyLimit(), softConcurrencyLimit); assertEquals(group.getSchedulingPolicy(), schedulingPolicy); assertEquals(group.getSchedulingWeight(), schedulingWeight); assertEquals(group.getJmxExport(), jmxExport); assertEquals(group.getSoftCpuLimit(), softCpuLimit); assertEquals(group.getHardCpuLimit(), hardCpuLimit); } }
19, 20.0, DataSize.valueOf("21GB"), DataSize.valueOf("22GB"), DataSize.valueOf("23GB"), DataSize.valueOf("24GB"), DataSize.valueOf("25GB"), true, Duration.valueOf("23m"), true, ImmutableSet.of(WAITING_FOR_MEMORY), DataSize.valueOf("27GB"), 28, DataSize.valueOf("29GB"), 30, DataSize.valueOf("31GB"), 32, DataSize.valueOf("33GB"), ImmutableList.of(), ImmutableList.of()),
@BeforeMethod public void setUp() { executor = newCachedThreadPool(daemonThreadsNamed("test-executor-%s")); scheduledExecutor = newScheduledThreadPool(2, daemonThreadsNamed("test-scheduledExecutor-%s")); TaskContext taskContext = TestingTaskContext.builder(executor, scheduledExecutor, TEST_SESSION) .setQueryMaxMemory(DataSize.valueOf("100MB")) .setMemoryPoolSize(DataSize.valueOf("10B")) .setQueryId(QUERY_ID) .build(); memoryPool = taskContext.getQueryContext().getMemoryPool(); driverContext = taskContext .addPipelineContext(0, true, true, false) .addDriverContext(); }
Optional.of(new Range<>( Optional.empty(), Optional.of(DataSize.valueOf("500MB")))))), Optional.empty(), new ResourceGroupIdTemplate("global.foo")); Optional.of(Duration.valueOf("4m")), Optional.empty(), Optional.of(DataSize.valueOf("400MB"))))) .map(SelectionContext::getResourceGroupId), Optional.of(resourceGroupId)); Optional.of(Duration.valueOf("4m")), Optional.empty(), Optional.of(DataSize.valueOf("600MB"))))) .map(SelectionContext::getResourceGroupId), Optional.empty()); Optional.empty(), Optional.of(new Range<>( Optional.of(DataSize.valueOf("5TB")), Optional.empty())))), Optional.empty(), Optional.of(Duration.valueOf("100h")), Optional.empty(), Optional.of(DataSize.valueOf("4TB"))))) .map(SelectionContext::getResourceGroupId), Optional.empty());
Optional.of(Duration.valueOf("4m")), Optional.empty(), Optional.of(DataSize.valueOf("400MB")))), LONG_LASTING_QUERY, createResourceGroupId("global", "small")); Optional.of(Duration.valueOf("4m")), Optional.empty(), Optional.of(DataSize.valueOf("600MB")))), LONG_LASTING_QUERY, createResourceGroupId("global", "other")); Optional.of(Duration.valueOf("1s")), Optional.of(Duration.valueOf("1s")), Optional.of(DataSize.valueOf("6TB")))), LONG_LASTING_QUERY, createResourceGroupId("global", "huge_memory")); Optional.of(Duration.valueOf("100h")), Optional.empty(), Optional.of(DataSize.valueOf("4TB")))), LONG_LASTING_QUERY, createResourceGroupId("global", "other"));
19, 20.0, DataSize.valueOf("21GB"), DataSize.valueOf("22GB"), DataSize.valueOf("23GB"), DataSize.valueOf("24GB"), DataSize.valueOf("25GB"), true, Duration.valueOf("23m"), true, ImmutableSet.of(BlockedReason.WAITING_FOR_MEMORY), DataSize.valueOf("27GB"), 28, DataSize.valueOf("29GB"), 30, DataSize.valueOf("31GB"), 32, DataSize.valueOf("32GB"), ImmutableList.of(new StageGcStatistics( 101, assertEquals(basicInfo.getQueryStats().getUserMemoryReservation(), DataSize.valueOf("21GB")); assertEquals(basicInfo.getQueryStats().getPeakUserMemoryReservation(), DataSize.valueOf("23GB")); assertEquals(basicInfo.getQueryStats().getTotalCpuTime(), Duration.valueOf("24m"));
DataSize maxSize = DataSize.valueOf(request.getHeader(PrestoHeaders.PRESTO_MAX_SIZE)); assertEquals(maxSize, expectedMaxSize);
.setRe2JDfaRetries(42) .setSpillEnabled(true) .setAggregationOperatorUnspillMemoryLimit(DataSize.valueOf("100MB")) .setSpillerSpillPaths("/tmp/custom/spill/path1,/tmp/custom/spill/path2") .setSpillerThreads(42)
featuresConfig.getJoinMaxBroadcastTableSize(), true, value -> DataSize.valueOf((String) value), DataSize::toString), booleanProperty( featuresConfig.getWriterMinSize(), false, value -> DataSize.valueOf((String) value), DataSize::toString), booleanProperty( memoryManagerConfig.getMaxQueryMemory(), true, value -> DataSize.valueOf((String) value), DataSize::toString), new PropertyMetadata<>( memoryManagerConfig.getMaxQueryTotalMemory(), true, value -> DataSize.valueOf((String) value), DataSize::toString), booleanProperty( featuresConfig.getAggregationOperatorUnspillMemoryLimit(), false, value -> DataSize.valueOf((String) value), DataSize::toString), booleanProperty(
@Config("buffer-size") public KafkaConfig setKafkaBufferSize(String kafkaBufferSize) { this.kafkaBufferSize = DataSize.valueOf(kafkaBufferSize); return this; }
.setRe2JDfaRetries(5) .setSpillEnabled(false) .setAggregationOperatorUnspillMemoryLimit(DataSize.valueOf("4MB")) .setSpillerSpillPaths("") .setSpillerThreads(4)
@Override public void initialize(MinDataSize dataSize) { this.min = DataSize.valueOf(dataSize.value()); }
@Override public void initialize(MaxDataSize dataSize) { this.max = DataSize.valueOf(dataSize.value()); }
@Test(expectedExceptions = NullPointerException.class, expectedExceptionsMessageRegExp = "size is null") public void testValueOfRejectsNull() { DataSize.valueOf(null); }
@Test(expectedExceptions = IllegalArgumentException.class, expectedExceptionsMessageRegExp = "size is not a valid.*") public void testValueOfRejectsInvalidNumber() { DataSize.valueOf("1.2x4 B"); }