Refine search
public static TaskStatus initialTaskStatus(TaskId taskId, URI location, String nodeId) { return new TaskStatus( taskId, "", MIN_VERSION, PLANNED, location, nodeId, ImmutableSet.of(), ImmutableList.of(), 0, 0, false, new DataSize(0, BYTE), new DataSize(0, BYTE), new DataSize(0, BYTE), 0, new Duration(0, MILLISECONDS)); }
@Test public void testIntegration() throws IOException { // tiny file TestingOrcDataSource orcDataSource = new TestingOrcDataSource( new FileOrcDataSource(tempFile.getFile(), new DataSize(1, Unit.MEGABYTE), new DataSize(1, Unit.MEGABYTE), new DataSize(1, Unit.MEGABYTE), true)); doIntegration(orcDataSource, new DataSize(1, Unit.MEGABYTE), new DataSize(1, Unit.MEGABYTE), new DataSize(1, Unit.MEGABYTE)); assertEquals(orcDataSource.getReadCount(), 1); // read entire file at once // tiny stripes orcDataSource = new TestingOrcDataSource( new FileOrcDataSource(tempFile.getFile(), new DataSize(1, Unit.MEGABYTE), new DataSize(1, Unit.MEGABYTE), new DataSize(1, Unit.MEGABYTE), true)); doIntegration(orcDataSource, new DataSize(400, Unit.KILOBYTE), new DataSize(400, Unit.KILOBYTE), new DataSize(400, Unit.KILOBYTE)); assertEquals(orcDataSource.getReadCount(), 3); // footer, first few stripes, last few stripes }
@Test(expectedExceptions = ExceededMemoryLimitException.class, expectedExceptionsMessageRegExp = "Query exceeded per-node user memory limit of.*", dataProvider = "testMemoryLimitProvider") public void testMemoryLimit(boolean parallelBuild, boolean buildHashEnabled) { TaskContext taskContext = TestingTaskContext.createTaskContext(executor, scheduledExecutor, TEST_SESSION, new DataSize(100, BYTE)); RowPagesBuilder buildPages = rowPagesBuilder(buildHashEnabled, Ints.asList(0), ImmutableList.of(VARCHAR, BIGINT, BIGINT)) .addSequencePage(10, 20, 30, 40); BuildSideSetup buildSideSetup = setupBuildSide(parallelBuild, taskContext, Ints.asList(0), buildPages, Optional.empty(), false, SINGLE_STREAM_SPILLER_FACTORY); instantiateBuildDrivers(buildSideSetup, taskContext); buildLookupSource(buildSideSetup); }
@Test public void testExplicitPropertyMappings() { Map<String, String> properties = new ImmutableMap.Builder<String, String>() .put("hive.parquet.writer.block-size", "234MB") .put("hive.parquet.writer.page-size", "11MB") .build(); ParquetFileWriterConfig expected = new ParquetFileWriterConfig() .setBlockSize(new DataSize(234, MEGABYTE)) .setPageSize(new DataSize(11, MEGABYTE)); assertFullMapping(properties, expected); } }
private Operator interpretedFilterProject(Optional<Expression> filter, Expression projection, Type expectedType, Session session) { Optional<PageFilter> pageFilter = filter .map(expression -> new InterpretedPageFilter( expression, SYMBOL_TYPES, INPUT_MAPPING, metadata, SQL_PARSER, session)); PageProjection pageProjection = new InterpretedPageProjection(projection, SYMBOL_TYPES, INPUT_MAPPING, metadata, SQL_PARSER, session); assertEquals(pageProjection.getType(), expectedType); PageProcessor processor = new PageProcessor(pageFilter, ImmutableList.of(pageProjection)); OperatorFactory operatorFactory = new FilterAndProjectOperatorFactory( 0, new PlanNodeId("test"), () -> processor, ImmutableList.of(pageProjection.getType()), new DataSize(0, BYTE), 0); return operatorFactory.createOperator(createDriverContext(session)); }
public static void assertExpectedDriverStats(DriverStats actual) assertEquals(actual.getLifespan(), Lifespan.driverGroup(21)); assertEquals(actual.getUserMemoryReservation(), new DataSize(6, BYTE)); assertEquals(actual.getRevocableMemoryReservation(), new DataSize(7, BYTE)); assertEquals(actual.getSystemMemoryReservation(), new DataSize(8, BYTE)); assertEquals(actual.getTotalScheduledTime(), new Duration(9, NANOSECONDS)); assertEquals(actual.getTotalBlockedTime(), new Duration(12, NANOSECONDS)); assertEquals(actual.getRawInputDataSize(), new DataSize(13, BYTE)); assertEquals(actual.getRawInputPositions(), 14); assertEquals(actual.getRawInputReadTime(), new Duration(15, NANOSECONDS)); assertEquals(actual.getProcessedInputDataSize(), new DataSize(16, BYTE)); assertEquals(actual.getProcessedInputPositions(), 17); assertEquals(actual.getOutputDataSize(), new DataSize(18, BYTE)); assertEquals(actual.getOutputPositions(), 19); assertEquals(actual.getPhysicalWrittenDataSize(), new DataSize(20, BYTE)); assertEquals(actual.getOperatorStats().size(), 1);
@Test public void testBufferCloseOnFinish() throws Exception { SqlTask sqlTask = createInitialTask(); OutputBuffers outputBuffers = createInitialEmptyOutputBuffers(PARTITIONED).withBuffer(OUT, 0).withNoMoreBufferIds(); updateTask(sqlTask, EMPTY_SOURCES, outputBuffers); ListenableFuture<BufferResult> bufferResult = sqlTask.getTaskResults(OUT, 0, new DataSize(1, MEGABYTE)); assertFalse(bufferResult.isDone()); // close the sources (no splits will ever be added) updateTask(sqlTask, ImmutableList.of(new TaskSource(TABLE_SCAN_NODE_ID, ImmutableSet.of(), true)), outputBuffers); // finish the task by calling abort on it sqlTask.abortTaskResults(OUT); // buffer will be closed by cancel event (wait for event to fire) bufferResult.get(1, SECONDS); // verify the buffer is closed bufferResult = sqlTask.getTaskResults(OUT, 0, new DataSize(1, MEGABYTE)); assertTrue(bufferResult.isDone()); assertTrue(bufferResult.get().isBufferComplete()); }
@Test public void testMergeGap() { List<DiskRange> consistent10ByteGap = ImmutableList.of(new DiskRange(100, 90), new DiskRange(200, 90), new DiskRange(300, 90)); assertEquals(mergeAdjacentDiskRanges(consistent10ByteGap, new DataSize(0, BYTE), new DataSize(1, GIGABYTE)), consistent10ByteGap); assertEquals(mergeAdjacentDiskRanges(consistent10ByteGap, new DataSize(9, BYTE), new DataSize(1, GIGABYTE)), consistent10ByteGap); assertEquals(mergeAdjacentDiskRanges(consistent10ByteGap, new DataSize(10, BYTE), new DataSize(1, GIGABYTE)), ImmutableList.of(new DiskRange(100, 290))); assertEquals(mergeAdjacentDiskRanges(consistent10ByteGap, new DataSize(100, BYTE), new DataSize(1, GIGABYTE)), ImmutableList.of(new DiskRange(100, 290))); List<DiskRange> middle10ByteGap = ImmutableList.of(new DiskRange(100, 80), new DiskRange(200, 90), new DiskRange(300, 80), new DiskRange(400, 90)); assertEquals(mergeAdjacentDiskRanges(middle10ByteGap, new DataSize(0, BYTE), new DataSize(1, GIGABYTE)), middle10ByteGap); assertEquals(mergeAdjacentDiskRanges(middle10ByteGap, new DataSize(9, BYTE), new DataSize(1, GIGABYTE)), middle10ByteGap); assertEquals(mergeAdjacentDiskRanges(middle10ByteGap, new DataSize(10, BYTE), new DataSize(1, GIGABYTE)), ImmutableList.of(new DiskRange(100, 80), new DiskRange(200, 180), new DiskRange(400, 90))); assertEquals(mergeAdjacentDiskRanges(middle10ByteGap, new DataSize(100, BYTE), new DataSize(1, GIGABYTE)), ImmutableList.of(new DiskRange(100, 390))); }
public DriverStats() { this.lifespan = null; this.createTime = DateTime.now(); this.startTime = null; this.endTime = null; this.queuedTime = new Duration(0, MILLISECONDS); this.elapsedTime = new Duration(0, MILLISECONDS); this.userMemoryReservation = new DataSize(0, BYTE); this.revocableMemoryReservation = new DataSize(0, BYTE); this.systemMemoryReservation = new DataSize(0, BYTE); this.totalScheduledTime = new Duration(0, MILLISECONDS); this.totalCpuTime = new Duration(0, MILLISECONDS); this.totalBlockedTime = new Duration(0, MILLISECONDS); this.fullyBlocked = false; this.blockedReasons = ImmutableSet.of(); this.rawInputDataSize = new DataSize(0, BYTE); this.rawInputPositions = 0; this.rawInputReadTime = new Duration(0, MILLISECONDS); this.processedInputDataSize = new DataSize(0, BYTE); this.processedInputPositions = 0; this.outputDataSize = new DataSize(0, BYTE); this.outputPositions = 0; this.physicalWrittenDataSize = new DataSize(0, BYTE); this.operatorStats = ImmutableList.of(); }
@Test public void testReadUserMetadata() throws Exception { try (TempFile tempFile = new TempFile()) { Map<String, String> metadata = ImmutableMap.of( "a", "ala", "b", "ma", "c", "kota"); createFileWithOnlyUserMetadata(tempFile.getFile(), metadata); OrcDataSource orcDataSource = new FileOrcDataSource(tempFile.getFile(), new DataSize(1, MEGABYTE), new DataSize(1, MEGABYTE), new DataSize(1, MEGABYTE), true); OrcReader orcReader = new OrcReader(orcDataSource, ORC, new DataSize(1, MEGABYTE), new DataSize(1, MEGABYTE), new DataSize(1, MEGABYTE), new DataSize(1, MEGABYTE)); Footer footer = orcReader.getFooter(); Map<String, String> readMetadata = Maps.transformValues(footer.getUserMetadata(), Slice::toStringAscii); assertEquals(readMetadata, metadata); } }
@Test public void testExplicitPropertyMappings() { Map<String, String> properties = new ImmutableMap.Builder<String, String>() .put("experimental.max-spill-per-node", "10MB") .put("experimental.query-max-spill-per-node", "15 MB") .build(); NodeSpillConfig expected = new NodeSpillConfig() .setMaxSpillPerNode(new DataSize(10, MEGABYTE)) .setQueryMaxSpillPerNode(new DataSize(15, MEGABYTE)); assertFullMapping(properties, expected); } }
private static RcFileReader createRcFileReader(TempFile tempFile, Type type, RcFileEncoding encoding) throws IOException { RcFileDataSource rcFileDataSource = new FileRcFileDataSource(tempFile.getFile()); RcFileReader rcFileReader = new RcFileReader( rcFileDataSource, encoding, ImmutableMap.of(0, type), new AircompressorCodecFactory(new HadoopCodecFactory(RcFileTester.class.getClassLoader())), 0, tempFile.getFile().length(), new DataSize(8, MEGABYTE)); assertEquals(rcFileReader.getColumnCount(), 1); return rcFileReader; }
@Test public void testMergeWithMemorySpill() 0, new PlanNodeId("test"), ImmutableList.of(BIGINT), ImmutableList.of(0), ImmutableList.of(), Step.SINGLE, false, Optional.empty(), 1, Optional.of(new DataSize(16, MEGABYTE)), true, new DataSize(smallPagesSpillThresholdSize, Unit.BYTE), succinctBytes(Integer.MAX_VALUE), spillerFactory,
@Test public void testMergeAdjacent() { List<DiskRange> diskRanges = mergeAdjacentDiskRanges( ImmutableList.of(new DiskRange(100, 100), new DiskRange(200, 100), new DiskRange(300, 100)), new DataSize(0, BYTE), new DataSize(1, GIGABYTE)); assertEquals(diskRanges, ImmutableList.of(new DiskRange(100, 300))); }
0, 0.0, new DataSize(0, BYTE), new DataSize(0, BYTE), new DataSize(0, BYTE), new Duration(0, MILLISECONDS), new Duration(0, MILLISECONDS), false, ImmutableSet.of(), new DataSize(0, BYTE), 0, new DataSize(0, BYTE), 0, new DataSize(0, BYTE), 0, new DataSize(0, BYTE), 0, new Duration(0, MILLISECONDS), ImmutableList.of());
@Test public void testMoveQuery() { QueryId testQuery = new QueryId("test_query"); MemoryPool pool1 = new MemoryPool(new MemoryPoolId("test"), new DataSize(1000, BYTE)); MemoryPool pool2 = new MemoryPool(new MemoryPoolId("test"), new DataSize(1000, BYTE)); pool1.reserve(testQuery, "test_tag", 10); Map<String, Long> allocations = pool1.getTaggedMemoryAllocations().get(testQuery); assertEquals(allocations, ImmutableMap.of("test_tag", 10L)); pool1.moveQuery(testQuery, pool2); assertNull(pool1.getTaggedMemoryAllocations().get(testQuery)); allocations = pool2.getTaggedMemoryAllocations().get(testQuery); assertEquals(allocations, ImmutableMap.of("test_tag", 10L)); assertEquals(pool1.getFreeBytes(), 1000); assertEquals(pool2.getFreeBytes(), 990); pool2.free(testQuery, "test", 10); assertEquals(pool2.getFreeBytes(), 1000); }
@Test(expectedExceptions = IllegalArgumentException.class) public void testInvalidValues() { NodeMemoryConfig config = new NodeMemoryConfig(); config.setMaxQueryTotalMemoryPerNode(new DataSize(1, GIGABYTE)); config.setHeapHeadroom(new DataSize(3.1, GIGABYTE)); // In this case we have 4GB - 1GB = 3GB available memory for the general pool // and the heap headroom and the config is more than that. validateHeapHeadroom(config, new DataSize(4, GIGABYTE).toBytes()); } }
public static void assertExpectedOperatorStats(OperatorStats actual) assertEquals(actual.getTotalDrivers(), 1); assertEquals(actual.getAddInputWall(), new Duration(3, NANOSECONDS)); assertEquals(actual.getAddInputCpu(), new Duration(4, NANOSECONDS)); assertEquals(actual.getRawInputDataSize(), new DataSize(5, BYTE)); assertEquals(actual.getInputDataSize(), new DataSize(6, BYTE)); assertEquals(actual.getInputPositions(), 7); assertEquals(actual.getSumSquaredInputPositions(), 8.0); assertEquals(actual.getGetOutputWall(), new Duration(10, NANOSECONDS)); assertEquals(actual.getGetOutputCpu(), new Duration(11, NANOSECONDS)); assertEquals(actual.getOutputDataSize(), new DataSize(12, BYTE)); assertEquals(actual.getOutputPositions(), 13); assertEquals(actual.getPhysicalWrittenDataSize(), new DataSize(14, BYTE)); assertEquals(actual.getBlockedWall(), new Duration(15, NANOSECONDS)); assertEquals(actual.getUserMemoryReservation(), new DataSize(19, BYTE)); assertEquals(actual.getRevocableMemoryReservation(), new DataSize(20, BYTE)); assertEquals(actual.getSystemMemoryReservation(), new DataSize(21, BYTE)); assertEquals(actual.getPeakUserMemoryReservation(), new DataSize(22, BYTE)); assertEquals(actual.getPeakSystemMemoryReservation(), new DataSize(23, BYTE)); assertEquals(actual.getPeakTotalMemoryReservation(), new DataSize(24, BYTE)); assertEquals(actual.getSpilledDataSize(), new DataSize(25, BYTE)); assertEquals(actual.getInfo().getClass(), SplitOperatorInfo.class); assertEquals(((SplitOperatorInfo) actual.getInfo()).getSplitInfo(), NON_MERGEABLE_INFO.getSplitInfo());
@Test public void testExceedMemoryLimit() throws Exception { List<Page> input = rowPagesBuilder(BIGINT) .row(1L) .build(); DriverContext smallDiverContext = createTaskContext(executor, scheduledExecutor, TEST_SESSION, new DataSize(1, BYTE)) .addPipelineContext(0, true, true, false) .addDriverContext(); TopNOperatorFactory operatorFactory = new TopNOperatorFactory( 0, new PlanNodeId("test"), ImmutableList.of(BIGINT), 100, ImmutableList.of(0), ImmutableList.of(ASC_NULLS_LAST)); try (Operator operator = operatorFactory.createOperator(smallDiverContext)) { operator.addInput(input.get(0)); fail("must fail because of exceeding local memory limit"); } catch (ExceededMemoryLimitException ignore) { } } }