public IClusterController getPrimaryClusterController() { return getClusterController(primaryCcId); }
public void sendApplicationMessageToCC(CcId ccId, byte[] data, DeploymentId deploymentId) throws Exception { getClusterController(ccId).sendApplicationMessageToCC(data, deploymentId, id); }
@Override public void run() { try { ncs.getClusterController(ccId).notifyPingResponse(ncs.getId()); } catch (Exception e) { LOGGER.info("failed to respond to ping from cc {}", ccId, e); } } }
@Override public void run() { try { ncs.removeActivityClusterGraph(deployedJobSpecId); } catch (HyracksException e) { try { ncs.getClusterController(ccId).notifyDeployedJobSpecFailure(deployedJobSpecId, ncs.getId()); } catch (Exception e1) { e1.printStackTrace(); } } }
@Override public void reportPartitionWriteCompletion(JobId jobId, ResultSetId rsId, int partition) throws HyracksException { try { LOGGER.trace("Reporting partition write completion: JobId: {}:ResultSetId: {}:partition: {}", jobId, rsId, partition); ncs.getClusterController(jobId.getCcId()).reportResultPartitionWriteCompletion(jobId, rsId, partition); } catch (Exception e) { throw HyracksException.create(e); } }
public synchronized void advertisePartitionRequest(TaskAttemptId taId, Collection<PartitionId> pids, IPartitionCollector collector, PartitionState minState) throws Exception { for (PartitionId pid : pids) { partitionRequestMap.put(pid, collector); PartitionRequest req = new PartitionRequest(pid, nodeController.getId(), taId, minState); nodeController.getClusterController(jobId.getCcId()).registerPartitionRequest(req); } }
@Override protected void doRun() throws Exception { ByteArrayOutputStream baos = new ByteArrayOutputStream(); ncs.getContext().getStateDumpHandler().dumpState(baos); ncs.getClusterController(ccId).notifyStateDump(ncs.getContext().getNodeId(), stateDumpId, baos.toString("UTF-8")); } }
private void performCleanup() { nodeController.getJobletMap().remove(jobId); IJobletEventListener listener = getJobletEventListener(); if (listener != null) { listener.jobletFinish(cleanupStatus); } close(); cleanupPending = false; try { nodeController.getClusterController(jobId.getCcId()).notifyJobletCleanup(jobId, nodeController.getId()); } catch (Exception e) { e.printStackTrace(); } }
@Override public void run() { DeploymentStatus status; try { DeploymentUtils.deploy(deploymentId, binaryURLs, ncs.getContext().getJobSerializerDeserializerContainer(), ncs.getServerContext(), true); status = DeploymentStatus.SUCCEED; } catch (Exception e) { status = DeploymentStatus.FAIL; e.printStackTrace(); } try { IClusterController ccs = ncs.getClusterController(ccId); ccs.notifyDeployBinary(deploymentId, ncs.getId(), status); } catch (Exception e) { throw new RuntimeException(e); } }
@Override public void registerResultPartitionLocation(JobId jobId, ResultSetId rsId, int partition, int nPartitions, boolean orderedResult, boolean emptyResult) throws HyracksException { try { // Be sure to send the *public* network address to the CC ncs.getClusterController(jobId.getCcId()).registerResultPartitionLocation(jobId, rsId, orderedResult, emptyResult, partition, nPartitions, ncs.getResultNetworkManager().getPublicNetworkAddress()); } catch (Exception e) { throw HyracksException.create(e); } }
@Override public void run() { DeploymentStatus status; try { DeploymentUtils.undeploy(deploymentId, ncs.getContext().getJobSerializerDeserializerContainer(), ncs.getServerContext()); status = DeploymentStatus.SUCCEED; } catch (Exception e) { status = DeploymentStatus.FAIL; } try { IClusterController ccs = ncs.getClusterController(ccId); ccs.notifyDeployBinary(deploymentId, ncs.getId(), status); } catch (Exception e) { throw new RuntimeException(e); } }
@Override public void run() { try { if (!upsert) { ncs.checkForDuplicateDeployedJobSpec(deployedJobSpecId); } ActivityClusterGraph acg = (ActivityClusterGraph) DeploymentUtils.deserialize(acgBytes, null, ncs.getContext()); ncs.storeActivityClusterGraph(deployedJobSpecId, acg); } catch (HyracksException e) { try { ncs.getClusterController(ccId).notifyDeployedJobSpecFailure(deployedJobSpecId, ncs.getId()); } catch (Exception e1) { e1.printStackTrace(); } } }
private void updatePartitionState(CcId ccId, PartitionId pid, TaskAttemptId taId, IPartition partition, PartitionState state) throws HyracksDataException { PartitionDescriptor desc = new PartitionDescriptor(pid, ncs.getId(), taId, partition.isReusable()); desc.setState(state); try { ncs.getClusterController(ccId).registerPartitionProvider(desc); } catch (Exception e) { throw HyracksDataException.create(e); } }
@Override public void run() { String result; try { result = ThreadDumpUtil.takeDumpJSONString(); } catch (Exception e) { LOGGER.log(Level.WARN, "Exception taking thread dump", e); result = null; } try { ncs.getClusterController(ccId).notifyThreadDump(ncs.getContext().getNodeId(), requestId, result); } catch (Exception e) { LOGGER.log(Level.WARN, "Exception sending thread dump to CC", e); } } }
@Override public void run() { Exception ex = exceptions.get(0); LOGGER.log(ExceptionUtils.causedByInterrupt(ex) ? Level.DEBUG : Level.WARN, "task " + taskId + " has failed", ex); try { IResultPartitionManager resultPartitionManager = ncs.getResultPartitionManager(); if (resultPartitionManager != null) { resultPartitionManager.abortReader(jobId); } ncs.getClusterController(jobId.getCcId()).notifyTaskFailure(jobId, taskId, ncs.getId(), exceptions); } catch (Exception e) { LOGGER.log(Level.ERROR, "Failure reporting task failure to cluster controller", e); } if (task != null) { task.getJoblet().removeTask(task); } }
@Override public void run() { TaskProfile taskProfile = new TaskProfile(task.getTaskAttemptId(), task.getPartitionSendProfile(), task.getStatsCollector()); try { ncs.getClusterController(task.getJobletContext().getJobId().getCcId()).notifyTaskComplete( task.getJobletContext().getJobId(), task.getTaskAttemptId(), ncs.getId(), taskProfile); } catch (Exception e) { LOGGER.log(Level.ERROR, "Failed notifying task complete for " + task.getTaskAttemptId(), e); } task.getJoblet().removeTask(task); }