@Test public void test() throws HyracksException { ClusterCapacity capacity = new ClusterCapacity(); String nodeId = "node1"; capacity.update(nodeId, new NodeCapacity(1024L, 8)); Assert.assertTrue(capacity.getAggregatedMemoryByteSize() == 1024L); Assert.assertTrue(capacity.getAggregatedCores() == 8); capacity.update(nodeId, new NodeCapacity(-1L, -2)); Assert.assertTrue(capacity.getAggregatedMemoryByteSize() == 0L); Assert.assertTrue(capacity.getAggregatedCores() == 0); capacity.getMemoryByteSize(nodeId); } catch (HyracksException e) { nodeNotExist = e.getErrorCode() == ErrorCode.NO_SUCH_NODE; nodeNotExist = false; try { capacity.getCores(nodeId); } catch (HyracksException e) { nodeNotExist = e.getErrorCode() == ErrorCode.NO_SUCH_NODE; capacity.update(nodeId, new NodeCapacity(1024L, 8)); capacity.update(nodeId, new NodeCapacity(4L, 0)); Assert.assertTrue(capacity.getAggregatedMemoryByteSize() == 0L); Assert.assertTrue(capacity.getAggregatedCores() == 0);
public JobSpecification(int frameSize) { roots = new ArrayList<>(); resultSetIds = new ArrayList<>(); opMap = new HashMap<>(); connMap = new HashMap<>(); opInputMap = new HashMap<>(); opOutputMap = new HashMap<>(); connectorOpMap = new HashMap<>(); properties = new HashMap<>(); userConstraints = new HashSet<>(); operatorIdCounter = 0; connectorIdCounter = 0; maxReattempts = 0; useConnectorPolicyForScheduling = false; requiredClusterCapacity = new ClusterCapacity(); setFrameSize(frameSize); }
public static IClusterCapacity getStageBasedRequiredCapacity(List<PlanStage> stages, int computationLocations, int sortFrameLimit, int groupFrameLimit, int joinFrameLimit, int textSearchFrameLimit, int frameSize) { final OperatorResourcesComputer computer = new OperatorResourcesComputer(computationLocations, sortFrameLimit, groupFrameLimit, joinFrameLimit, textSearchFrameLimit, frameSize); final IClusterCapacity clusterCapacity = new ClusterCapacity(); final Long maxRequiredMemory = stages.stream().mapToLong(stage -> stage.getRequiredMemory(computer)).max() .orElseThrow(IllegalStateException::new); clusterCapacity.setAggregatedMemoryByteSize(maxRequiredMemory); final Integer maxRequireCores = stages.stream().mapToInt(stage -> stage.getRequiredCores(computer)).max() .orElseThrow(IllegalStateException::new); clusterCapacity.setAggregatedCores(maxRequireCores); return clusterCapacity; } }
@Test public void testParallelGroupBy() throws AlgebricksException { IClusterCapacity clusterCapacity = new ClusterCapacity(); RequiredCapacityVisitor visitor = makeComputationCapacityVisitor(PARALLELISM, clusterCapacity); // Constructs a parallel group-by query plan. GroupByOperator globalGby = makeGroupByOperator(AbstractLogicalOperator.ExecutionMode.PARTITIONED); ExchangeOperator exchange = new ExchangeOperator(); exchange.setPhysicalOperator(new HashPartitionExchangePOperator(Collections.emptyList(), null)); GroupByOperator localGby = makeGroupByOperator(AbstractLogicalOperator.ExecutionMode.LOCAL); globalGby.getInputs().add(new MutableObject<>(exchange)); exchange.getInputs().add(new MutableObject<>(localGby)); // Verifies the calculated cluster capacity requirement for the test quer plan. globalGby.accept(visitor, null); Assert.assertTrue(clusterCapacity.getAggregatedCores() == PARALLELISM); Assert.assertTrue(clusterCapacity.getAggregatedMemoryByteSize() == 2 * MEMORY_BUDGET * PARALLELISM + 2 * FRAME_SIZE * PARALLELISM * PARALLELISM); }
@Test public void testUnPartitionedGroupBy() throws AlgebricksException { IClusterCapacity clusterCapacity = new ClusterCapacity(); RequiredCapacityVisitor visitor = makeComputationCapacityVisitor(PARALLELISM, clusterCapacity); // Constructs a parallel group-by query plan. GroupByOperator globalGby = makeGroupByOperator(AbstractLogicalOperator.ExecutionMode.UNPARTITIONED); ExchangeOperator exchange = new ExchangeOperator(); exchange.setPhysicalOperator(new OneToOneExchangePOperator()); exchange.setExecutionMode(AbstractLogicalOperator.ExecutionMode.UNPARTITIONED); GroupByOperator localGby = makeGroupByOperator(AbstractLogicalOperator.ExecutionMode.UNPARTITIONED); globalGby.getInputs().add(new MutableObject<>(exchange)); exchange.getInputs().add(new MutableObject<>(localGby)); // Verifies the calculated cluster capacity requirement for the test quer plan. globalGby.accept(visitor, null); Assert.assertTrue(clusterCapacity.getAggregatedCores() == 1); Assert.assertTrue(clusterCapacity.getAggregatedMemoryByteSize() == 2 * MEMORY_BUDGET + FRAME_SIZE); }
@Test public void testParallelJoin() throws AlgebricksException { IClusterCapacity clusterCapacity = new ClusterCapacity(); RequiredCapacityVisitor visitor = makeComputationCapacityVisitor(PARALLELISM, clusterCapacity);
@Test public void testUnPartitionedJoin() throws AlgebricksException { IClusterCapacity clusterCapacity = new ClusterCapacity(); RequiredCapacityVisitor visitor = makeComputationCapacityVisitor(PARALLELISM, clusterCapacity); // Constructs a join query plan. InnerJoinOperator join = makeJoinOperator(AbstractLogicalOperator.ExecutionMode.UNPARTITIONED); // Left child plan of the join. ExchangeOperator leftChildExchange = new ExchangeOperator(); leftChildExchange.setExecutionMode(AbstractLogicalOperator.ExecutionMode.UNPARTITIONED); leftChildExchange.setPhysicalOperator(new OneToOneExchangePOperator()); InnerJoinOperator leftChild = makeJoinOperator(AbstractLogicalOperator.ExecutionMode.UNPARTITIONED); join.getInputs().add(new MutableObject<>(leftChildExchange)); leftChildExchange.getInputs().add(new MutableObject<>(leftChild)); EmptyTupleSourceOperator ets = new EmptyTupleSourceOperator(); ets.setExecutionMode(AbstractLogicalOperator.ExecutionMode.UNPARTITIONED); leftChild.getInputs().add(new MutableObject<>(ets)); leftChild.getInputs().add(new MutableObject<>(ets)); // Right child plan of the join. ExchangeOperator rightChildExchange = new ExchangeOperator(); rightChildExchange.setExecutionMode(AbstractLogicalOperator.ExecutionMode.UNPARTITIONED); rightChildExchange.setPhysicalOperator(new OneToOneExchangePOperator()); GroupByOperator rightChild = makeGroupByOperator(AbstractLogicalOperator.ExecutionMode.UNPARTITIONED); join.getInputs().add(new MutableObject<>(rightChildExchange)); rightChildExchange.getInputs().add(new MutableObject<>(rightChild)); rightChild.getInputs().add(new MutableObject<>(ets)); // Verifies the calculated cluster capacity requirement for the test quer plan. join.accept(visitor, null); Assert.assertTrue(clusterCapacity.getAggregatedCores() == 1); Assert.assertTrue(clusterCapacity.getAggregatedMemoryByteSize() == 3 * MEMORY_BUDGET + 5L * FRAME_SIZE); }