private synchronized void updateNodes(MemoryPoolAssignmentsRequest assignments) { ImmutableSet.Builder<Node> builder = ImmutableSet.builder(); Set<Node> aliveNodes = builder .addAll(nodeManager.getNodes(ACTIVE)) .addAll(nodeManager.getNodes(SHUTTING_DOWN)) .build(); ImmutableSet<String> aliveNodeIds = aliveNodes.stream() .map(Node::getNodeIdentifier) .collect(toImmutableSet()); // Remove nodes that don't exist anymore // Make a copy to materialize the set difference Set<String> deadNodes = ImmutableSet.copyOf(difference(nodes.keySet(), aliveNodeIds)); nodes.keySet().removeAll(deadNodes); // Add new nodes for (Node node : aliveNodes) { if (!nodes.containsKey(node.getNodeIdentifier())) { nodes.put(node.getNodeIdentifier(), new RemoteNodeMemory(node, httpClient, memoryInfoCodec, assignmentsRequestJsonCodec, locationFactory.createMemoryInfoLocation(node))); } } // If work isn't scheduled on the coordinator (the current node) there is no point // in polling or updating (when moving queries to the reserved pool) its memory pools if (!isWorkScheduledOnCoordinator) { nodes.remove(nodeManager.getCurrentNode().getNodeIdentifier()); } // Schedule refresh for (RemoteNodeMemory node : nodes.values()) { node.asyncRefresh(assignments); } }
private <T extends Statement> DataDefinitionExecution<T> createDataDefinitionExecution( String query, Session session, ResourceGroupId resourceGroup, T statement, List<Expression> parameters, WarningCollector warningCollector) { @SuppressWarnings("unchecked") DataDefinitionTask<T> task = (DataDefinitionTask<T>) tasks.get(statement.getClass()); checkArgument(task != null, "no task for statement: %s", statement.getClass().getSimpleName()); QueryStateMachine stateMachine = QueryStateMachine.begin( query, session, locationFactory.createQueryLocation(session.getQueryId()), resourceGroup, task.isTransactionControl(), transactionManager, accessControl, executor, metadata, warningCollector); stateMachine.setUpdateType(task.getName()); return new DataDefinitionExecution<>(task, statement, transactionManager, metadata, accessControl, stateMachine, parameters); } }
taskId, node.getNodeIdentifier(), locationFactory.createTaskLocation(node, taskId), fragment, initialSplits,
SqlStageExecution stage = createSqlStageExecution( stageId, locationFactory.createStageLocation(stageId), plan.getFragment(), remoteTaskFactory,
taskId -> createSqlTask( taskId, locationFactory.createLocalTaskLocation(taskId), nodeInfo.getNodeId(), queryContexts.getUnchecked(taskId.getQueryId()),
SqlStageExecution stage = createSqlStageExecution( stageId, locationFactory.createStageLocation(stageId), plan.getFragment(), remoteTaskFactory,
taskId -> createSqlTask( taskId, locationFactory.createLocalTaskLocation(taskId), nodeInfo.getNodeId(), queryContexts.getUnchecked(taskId.getQueryId()),
private SqlStageExecution createSqlStageExecution(StageExecutionPlan tableScanPlan, NodeTaskMap nodeTaskMap) { StageId stageId = new StageId(new QueryId("query"), 0); SqlStageExecution stage = SqlStageExecution.createSqlStageExecution(stageId, locationFactory.createStageLocation(stageId), tableScanPlan.getFragment(), new MockRemoteTaskFactory(queryExecutor, scheduledExecutor), TEST_SESSION, true, nodeTaskMap, queryExecutor, new NoOpFailureDetector(), new SplitSchedulerStats()); stage.setOutputBuffers(createInitialEmptyOutputBuffers(PARTITIONED) .withBuffer(OUT, 0) .withNoMoreBufferIds()); return stage; }
private <T extends Statement> DataDefinitionExecution<T> createDataDefinitionExecution( String query, Session session, ResourceGroupId resourceGroup, T statement, List<Expression> parameters, WarningCollector warningCollector) { @SuppressWarnings("unchecked") DataDefinitionTask<T> task = (DataDefinitionTask<T>) tasks.get(statement.getClass()); checkArgument(task != null, "no task for statement: %s", statement.getClass().getSimpleName()); QueryStateMachine stateMachine = QueryStateMachine.begin( query, session, locationFactory.createQueryLocation(session.getQueryId()), resourceGroup, task.isTransactionControl(), transactionManager, accessControl, executor, metadata, warningCollector); stateMachine.setUpdateType(task.getName()); return new DataDefinitionExecution<>(task, statement, transactionManager, metadata, accessControl, stateMachine, parameters); } }
private synchronized void updateNodes(MemoryPoolAssignmentsRequest assignments) { ImmutableSet.Builder<Node> builder = ImmutableSet.builder(); Set<Node> aliveNodes = builder .addAll(nodeManager.getNodes(ACTIVE)) .addAll(nodeManager.getNodes(SHUTTING_DOWN)) .build(); ImmutableSet<String> aliveNodeIds = aliveNodes.stream() .map(Node::getNodeIdentifier) .collect(toImmutableSet()); // Remove nodes that don't exist anymore // Make a copy to materialize the set difference Set<String> deadNodes = ImmutableSet.copyOf(difference(nodes.keySet(), aliveNodeIds)); nodes.keySet().removeAll(deadNodes); // Add new nodes for (Node node : aliveNodes) { if (!nodes.containsKey(node.getNodeIdentifier())) { nodes.put(node.getNodeIdentifier(), new RemoteNodeMemory(node, httpClient, memoryInfoCodec, assignmentsRequestJsonCodec, locationFactory.createMemoryInfoLocation(node))); } } // If work isn't scheduled on the coordinator (the current node) there is no point // in polling or updating (when moving queries to the reserved pool) its memory pools if (!isWorkScheduledOnCoordinator) { nodes.remove(nodeManager.getCurrentNode().getNodeIdentifier()); } // Schedule refresh for (RemoteNodeMemory node : nodes.values()) { node.asyncRefresh(assignments); } }
taskId, node.getNodeIdentifier(), locationFactory.createTaskLocation(node, taskId), fragment, initialSplits,
private SqlStageExecution createSqlStageExecution(StageExecutionPlan tableScanPlan, NodeTaskMap nodeTaskMap) { StageId stageId = new StageId(new QueryId("query"), 0); SqlStageExecution stage = SqlStageExecution.createSqlStageExecution(stageId, locationFactory.createStageLocation(stageId), tableScanPlan.getFragment(), new MockRemoteTaskFactory(queryExecutor, scheduledExecutor), TEST_SESSION, true, nodeTaskMap, queryExecutor, new NoOpFailureDetector(), new SplitSchedulerStats()); stage.setOutputBuffers(createInitialEmptyOutputBuffers(PARTITIONED) .withBuffer(OUT, 0) .withNoMoreBufferIds()); return stage; }
query, session, locationFactory.createQueryLocation(session.getQueryId()), resourceGroup, preparedQuery,
query, session, locationFactory.createQueryLocation(session.getQueryId()), resourceGroup, preparedQuery,
session, query, locationFactory.createQueryLocation(queryId), Optional.ofNullable(selectionContext).map(SelectionContext::getResourceGroupId), queryExecutor,
session, query, locationFactory.createQueryLocation(queryId), Optional.ofNullable(selectionContext).map(SelectionContext::getResourceGroupId), queryExecutor,