@Override public void release(JobSpecification job) { IClusterCapacity requiredCapacity = job.getRequiredClusterCapacity(); long reqAggregatedMemoryByteSize = requiredCapacity.getAggregatedMemoryByteSize(); int reqAggregatedNumCores = requiredCapacity.getAggregatedCores(); IClusterCapacity currentCapacity = resourceManager.getCurrentCapacity(); long aggregatedMemoryByteSize = currentCapacity.getAggregatedMemoryByteSize(); int aggregatedNumCores = currentCapacity.getAggregatedCores(); currentCapacity.setAggregatedMemoryByteSize(aggregatedMemoryByteSize + reqAggregatedMemoryByteSize); currentCapacity.setAggregatedCores(aggregatedNumCores + reqAggregatedNumCores); ensureMaxCapacity(); }
private void ensureMaxCapacity() { final IClusterCapacity currentCapacity = resourceManager.getCurrentCapacity(); final IReadOnlyClusterCapacity maximumCapacity = resourceManager.getMaximumCapacity(); if (currentCapacity.getAggregatedCores() > maximumCapacity.getAggregatedCores() || currentCapacity.getAggregatedMemoryByteSize() > maximumCapacity.getAggregatedMemoryByteSize()) { LOGGER.warn("Current cluster available capacity {} is more than its maximum capacity {}", currentCapacity, maximumCapacity); } } }
public static IClusterCapacity getStageBasedRequiredCapacity(List<PlanStage> stages, int computationLocations, int sortFrameLimit, int groupFrameLimit, int joinFrameLimit, int textSearchFrameLimit, int frameSize) { final OperatorResourcesComputer computer = new OperatorResourcesComputer(computationLocations, sortFrameLimit, groupFrameLimit, joinFrameLimit, textSearchFrameLimit, frameSize); final IClusterCapacity clusterCapacity = new ClusterCapacity(); final Long maxRequiredMemory = stages.stream().mapToLong(stage -> stage.getRequiredMemory(computer)).max() .orElseThrow(IllegalStateException::new); clusterCapacity.setAggregatedMemoryByteSize(maxRequiredMemory); final Integer maxRequireCores = stages.stream().mapToInt(stage -> stage.getRequiredCores(computer)).max() .orElseThrow(IllegalStateException::new); clusterCapacity.setAggregatedCores(maxRequireCores); return clusterCapacity; } }
private void addOutputBuffer(ILogicalOperator op) { if (op.getExecutionMode() == AbstractLogicalOperator.ExecutionMode.PARTITIONED || op.getExecutionMode() == AbstractLogicalOperator.ExecutionMode.LOCAL) { stageMemorySoFar += frameSize * numComputationPartitions; // every operator needs one output buffer. } else { stageMemorySoFar += frameSize; // every operator needs one output buffer. } clusterCapacity.setAggregatedMemoryByteSize(stageMemorySoFar); }
public RequiredCapacityVisitor(int numComputationPartitions, int sortFrameLimit, int groupFrameLimit, int joinFrameLimit, int frameSize, IClusterCapacity clusterCapacity) { this.numComputationPartitions = numComputationPartitions; this.frameSize = frameSize; this.groupByMemorySize = groupFrameLimit * (long) frameSize; this.joinMemorySize = joinFrameLimit * (long) frameSize; this.sortMemorySize = sortFrameLimit * (long) frameSize; this.clusterCapacity = clusterCapacity; this.clusterCapacity.setAggregatedCores(1); // At least one core is needed. }
private void assertRequiredMemory(List<PlanStage> stages, long expectedMemory) { final IClusterCapacity clusterCapacity = ResourceUtils.getStageBasedRequiredCapacity(stages, PARALLELISM, FRAME_LIMIT, FRAME_LIMIT, FRAME_LIMIT, FRAME_LIMIT, FRAME_SIZE); Assert.assertEquals(clusterCapacity.getAggregatedMemoryByteSize(), expectedMemory); } }
@Test public void testParallelGroupBy() throws AlgebricksException { IClusterCapacity clusterCapacity = new ClusterCapacity(); RequiredCapacityVisitor visitor = makeComputationCapacityVisitor(PARALLELISM, clusterCapacity); // Constructs a parallel group-by query plan. GroupByOperator globalGby = makeGroupByOperator(AbstractLogicalOperator.ExecutionMode.PARTITIONED); ExchangeOperator exchange = new ExchangeOperator(); exchange.setPhysicalOperator(new HashPartitionExchangePOperator(Collections.emptyList(), null)); GroupByOperator localGby = makeGroupByOperator(AbstractLogicalOperator.ExecutionMode.LOCAL); globalGby.getInputs().add(new MutableObject<>(exchange)); exchange.getInputs().add(new MutableObject<>(localGby)); // Verifies the calculated cluster capacity requirement for the test quer plan. globalGby.accept(visitor, null); Assert.assertTrue(clusterCapacity.getAggregatedCores() == PARALLELISM); Assert.assertTrue(clusterCapacity.getAggregatedMemoryByteSize() == 2 * MEMORY_BUDGET * PARALLELISM + 2 * FRAME_SIZE * PARALLELISM * PARALLELISM); }
private void calculateMemoryUsageForBlockingOperators(ILogicalOperator op, long memSize) throws AlgebricksException { visitInternal(op, false); if (op.getExecutionMode() == AbstractLogicalOperator.ExecutionMode.PARTITIONED || op.getExecutionMode() == AbstractLogicalOperator.ExecutionMode.LOCAL) { stageMemorySoFar += memSize * numComputationPartitions; } else { stageMemorySoFar += memSize; } clusterCapacity.setAggregatedMemoryByteSize(stageMemorySoFar); }
private void setAvailableCores(ILogicalOperator op) { if (op.getExecutionMode() == AbstractLogicalOperator.ExecutionMode.PARTITIONED || op.getExecutionMode() == AbstractLogicalOperator.ExecutionMode.LOCAL) { clusterCapacity.setAggregatedCores((int) numComputationPartitions); } } }
@Override public JobSubmissionStatus allocate(JobSpecification job) throws HyracksException { IClusterCapacity requiredCapacity = job.getRequiredClusterCapacity(); long reqAggregatedMemoryByteSize = requiredCapacity.getAggregatedMemoryByteSize(); int reqAggregatedNumCores = requiredCapacity.getAggregatedCores(); IReadOnlyClusterCapacity maximumCapacity = resourceManager.getMaximumCapacity(); if (!(reqAggregatedMemoryByteSize <= maximumCapacity.getAggregatedMemoryByteSize() && reqAggregatedNumCores <= maximumCapacity.getAggregatedCores())) { throw HyracksException.create(ErrorCode.JOB_REQUIREMENTS_EXCEED_CAPACITY, requiredCapacity.toString(), maximumCapacity.toString()); } IClusterCapacity currentCapacity = resourceManager.getCurrentCapacity(); long currentAggregatedMemoryByteSize = currentCapacity.getAggregatedMemoryByteSize(); int currentAggregatedAvailableCores = currentCapacity.getAggregatedCores(); if (!(reqAggregatedMemoryByteSize <= currentAggregatedMemoryByteSize && reqAggregatedNumCores <= currentAggregatedAvailableCores)) { return JobSubmissionStatus.QUEUE; } currentCapacity.setAggregatedMemoryByteSize(currentAggregatedMemoryByteSize - reqAggregatedMemoryByteSize); currentCapacity.setAggregatedCores(currentAggregatedAvailableCores - reqAggregatedNumCores); return JobSubmissionStatus.EXECUTE; }
@Test public void testUnPartitionedGroupBy() throws AlgebricksException { IClusterCapacity clusterCapacity = new ClusterCapacity(); RequiredCapacityVisitor visitor = makeComputationCapacityVisitor(PARALLELISM, clusterCapacity); // Constructs a parallel group-by query plan. GroupByOperator globalGby = makeGroupByOperator(AbstractLogicalOperator.ExecutionMode.UNPARTITIONED); ExchangeOperator exchange = new ExchangeOperator(); exchange.setPhysicalOperator(new OneToOneExchangePOperator()); exchange.setExecutionMode(AbstractLogicalOperator.ExecutionMode.UNPARTITIONED); GroupByOperator localGby = makeGroupByOperator(AbstractLogicalOperator.ExecutionMode.UNPARTITIONED); globalGby.getInputs().add(new MutableObject<>(exchange)); exchange.getInputs().add(new MutableObject<>(localGby)); // Verifies the calculated cluster capacity requirement for the test quer plan. globalGby.accept(visitor, null); Assert.assertTrue(clusterCapacity.getAggregatedCores() == 1); Assert.assertTrue(clusterCapacity.getAggregatedMemoryByteSize() == 2 * MEMORY_BUDGET + FRAME_SIZE); }
private void calculateMemoryUsageForExchange(ExchangeOperator op) throws AlgebricksException { visitInternal(op, false); IPhysicalOperator physicalOperator = op.getPhysicalOperator(); PhysicalOperatorTag physicalOperatorTag = physicalOperator.getOperatorTag(); if (physicalOperatorTag == PhysicalOperatorTag.ONE_TO_ONE_EXCHANGE || physicalOperatorTag == PhysicalOperatorTag.SORT_MERGE_EXCHANGE) { addOutputBuffer(op); return; } stageMemorySoFar += 2L * MAX_BUFFER_PER_CONNECTION * numComputationPartitions * numComputationPartitions * frameSize; clusterCapacity.setAggregatedMemoryByteSize(stageMemorySoFar); }
Assert.assertTrue(clusterCapacity.getAggregatedCores() == PARALLELISM); Assert.assertTrue(clusterCapacity.getAggregatedMemoryByteSize() == 3 * MEMORY_BUDGET * PARALLELISM + 2 * 2L * PARALLELISM * PARALLELISM * FRAME_SIZE + 3 * FRAME_SIZE * PARALLELISM);
@Test public void testUnPartitionedJoin() throws AlgebricksException { IClusterCapacity clusterCapacity = new ClusterCapacity(); RequiredCapacityVisitor visitor = makeComputationCapacityVisitor(PARALLELISM, clusterCapacity); // Constructs a join query plan. InnerJoinOperator join = makeJoinOperator(AbstractLogicalOperator.ExecutionMode.UNPARTITIONED); // Left child plan of the join. ExchangeOperator leftChildExchange = new ExchangeOperator(); leftChildExchange.setExecutionMode(AbstractLogicalOperator.ExecutionMode.UNPARTITIONED); leftChildExchange.setPhysicalOperator(new OneToOneExchangePOperator()); InnerJoinOperator leftChild = makeJoinOperator(AbstractLogicalOperator.ExecutionMode.UNPARTITIONED); join.getInputs().add(new MutableObject<>(leftChildExchange)); leftChildExchange.getInputs().add(new MutableObject<>(leftChild)); EmptyTupleSourceOperator ets = new EmptyTupleSourceOperator(); ets.setExecutionMode(AbstractLogicalOperator.ExecutionMode.UNPARTITIONED); leftChild.getInputs().add(new MutableObject<>(ets)); leftChild.getInputs().add(new MutableObject<>(ets)); // Right child plan of the join. ExchangeOperator rightChildExchange = new ExchangeOperator(); rightChildExchange.setExecutionMode(AbstractLogicalOperator.ExecutionMode.UNPARTITIONED); rightChildExchange.setPhysicalOperator(new OneToOneExchangePOperator()); GroupByOperator rightChild = makeGroupByOperator(AbstractLogicalOperator.ExecutionMode.UNPARTITIONED); join.getInputs().add(new MutableObject<>(rightChildExchange)); rightChildExchange.getInputs().add(new MutableObject<>(rightChild)); rightChild.getInputs().add(new MutableObject<>(ets)); // Verifies the calculated cluster capacity requirement for the test quer plan. join.accept(visitor, null); Assert.assertTrue(clusterCapacity.getAggregatedCores() == 1); Assert.assertTrue(clusterCapacity.getAggregatedMemoryByteSize() == 3 * MEMORY_BUDGET + 5L * FRAME_SIZE); }