/** * Method that defines the actions to be performed when a job finishes either successfully or with failure. * This method updates the state of the dag and performs clean up actions as necessary. */ private void onJobFinish(DagNode<JobExecutionPlan> dagNode) throws IOException { Dag<JobExecutionPlan> dag = this.jobToDag.get(dagNode); String dagId = DagManagerUtils.generateDagId(dag); String jobName = DagManagerUtils.getJobName(dagNode); ExecutionStatus jobStatus = DagManagerUtils.getExecutionStatus(dagNode); log.info("Job {} of Dag {} has finished with status {}", jobName, dagId, jobStatus.name()); deleteJobState(dagId, dagNode); if (jobStatus == COMPLETE) { submitNext(dagId); } else if (jobStatus == FAILED) { if (DagManagerUtils.getFailureOption(dag) == FailureOption.FINISH_RUNNING) { this.failedDagIdsFinishRunning.add(dagId); } else { this.failedDagIdsFinishAllPossible.add(dagId); } } }
JsonObject jobSpecJson = (JsonObject) serializedJobExecutionPlan.get(SerializationConstants.JOB_SPEC_KEY); JsonObject specExecutorJson = (JsonObject) serializedJobExecutionPlan.get(SerializationConstants.SPEC_EXECUTOR_KEY); ExecutionStatus executionStatus = ExecutionStatus.valueOf(serializedJobExecutionPlan. get(SerializationConstants.EXECUTION_STATUS_KEY).getAsString());
jobExecutionPlanJson.add(SerializationConstants.SPEC_EXECUTOR_KEY, specExecutorJson); String executionStatus = jobExecutionPlan.getExecutionStatus().name(); jobExecutionPlanJson.addProperty(SerializationConstants.EXECUTION_STATUS_KEY, executionStatus);
/** * Poll the statuses of running jobs. * @return List of {@link JobStatus}es. */ private void pollJobStatuses() throws IOException { this.failedDagIdsFinishRunning.clear(); for (DagNode<JobExecutionPlan> node : this.jobToDag.keySet()) { long pollStartTime = System.nanoTime(); JobStatus jobStatus = pollJobStatus(node); Instrumented.updateTimer(this.jobStatusPolledTimer, System.nanoTime() - pollStartTime, TimeUnit.NANOSECONDS); Preconditions.checkNotNull(jobStatus, "Received null job status for a running job " + DagManagerUtils.getJobName(node)); JobExecutionPlan jobExecutionPlan = DagManagerUtils.getJobExecutionPlan(node); ExecutionStatus status = valueOf(jobStatus.getEventName()); switch (status) { case COMPLETE: jobExecutionPlan.setExecutionStatus(COMPLETE); onJobFinish(node); break; case FAILED: case CANCELLED: jobExecutionPlan.setExecutionStatus(FAILED); onJobFinish(node); break; default: jobExecutionPlan.setExecutionStatus(RUNNING); break; } } }
org.apache.gobblin.service.monitoring.JobStatus js1 = org.apache.gobblin.service.monitoring.JobStatus.builder().flowGroup("fgroup1") .flowName("flow1").jobGroup("jgroup1").jobName("job1").startTime(1000L).endTime(5000L) .eventName(ExecutionStatus.COMPLETE.name()).flowExecutionId(0).message("Test message 1") .processedCount(100).jobExecutionId(1).lowWatermark("watermark:1").highWatermark("watermark:2").build(); org.apache.gobblin.service.monitoring.JobStatus js2 = org.apache.gobblin.service.monitoring.JobStatus.builder().flowGroup("fgroup1") .flowName("flow1").jobGroup("jgroup1").jobName("job2").startTime(2000L).endTime(6000L) .eventName(ExecutionStatus.COMPLETE.name()).flowExecutionId(0).message("Test message 2") .processedCount(200).jobExecutionId(2).lowWatermark("watermark:2").highWatermark("watermark:3").build(); List<org.apache.gobblin.service.monitoring.JobStatus> jobStatusList = Lists.newArrayList(js1, js2);
.setExecutionEndTime(queriedJobStatus.getEndTime()) .setProcessedCount(queriedJobStatus.getProcessedCount())) .setExecutionStatus(ExecutionStatus.valueOf(queriedJobStatus.getEventName())) .setMessage(queriedJobStatus.getMessage()) .setJobState(new JobState().setLowWatermark(queriedJobStatus.getLowWatermark()).
org.apache.gobblin.service.monitoring.JobStatus js1 = org.apache.gobblin.service.monitoring.JobStatus.builder().flowGroup("fgroup1") .flowName("flow1").jobGroup("jgroup1").jobName("job1").startTime(1000L).endTime(5000L) .eventName(ExecutionStatus.COMPLETE.name()).flowExecutionId(0).message("Test message 1") .processedCount(100).jobExecutionId(1).lowWatermark("watermark:1").highWatermark("watermark:2").build(); org.apache.gobblin.service.monitoring.JobStatus js2 = org.apache.gobblin.service.monitoring.JobStatus.builder().flowGroup("fgroup1") .flowName("flow1").jobGroup("jgroup1").jobName("job1").startTime(2000L).endTime(6000L) .eventName(ExecutionStatus.COMPLETE.name()).flowExecutionId(1).message("Test message 2") .processedCount(200).jobExecutionId(2).lowWatermark("watermark:2").highWatermark("watermark:3").build(); List<org.apache.gobblin.service.monitoring.JobStatus> jobStatusList1 = Lists.newArrayList(js1);
JsonObject jobSpecJson = (JsonObject) serializedJobExecutionPlan.get(SerializationConstants.JOB_SPEC_KEY); JsonObject specExecutorJson = (JsonObject) serializedJobExecutionPlan.get(SerializationConstants.SPEC_EXECUTOR_KEY); ExecutionStatus executionStatus = ExecutionStatus.valueOf(serializedJobExecutionPlan. get(SerializationConstants.EXECUTION_STATUS_KEY).getAsString());
org.apache.gobblin.service.monitoring.JobStatus js1 = org.apache.gobblin.service.monitoring.JobStatus.builder().flowGroup("fgroup1") .flowName("flow1").jobGroup("jgroup1").jobName("job1").startTime(1000L).endTime(5000L) .eventName(ExecutionStatus.RUNNING.name()).flowExecutionId(0).message("Test message 1").processedCount(100) .jobExecutionId(1).lowWatermark("watermark:1").highWatermark("watermark:2").build(); org.apache.gobblin.service.monitoring.JobStatus js2 = org.apache.gobblin.service.monitoring.JobStatus.builder().flowGroup("fgroup1") .flowName("flow1").jobGroup("jgroup1").jobName("job2").startTime(2000L).endTime(6000L) .eventName(ExecutionStatus.COMPLETE.name()).flowExecutionId(0).message("Test message 2") .processedCount(200).jobExecutionId(2).lowWatermark("watermark:2").highWatermark("watermark:3").build(); List<org.apache.gobblin.service.monitoring.JobStatus> jobStatusList = Lists.newArrayList(js1, js2);
/** * Poll the statuses of running jobs. * @return List of {@link JobStatus}es. */ private void pollJobStatuses() throws IOException { this.failedDagIdsFinishRunning.clear(); for (DagNode<JobExecutionPlan> node : this.jobToDag.keySet()) { long pollStartTime = System.nanoTime(); JobStatus jobStatus = pollJobStatus(node); Instrumented.updateTimer(this.jobStatusPolledTimer, System.nanoTime() - pollStartTime, TimeUnit.NANOSECONDS); Preconditions.checkNotNull(jobStatus, "Received null job status for a running job " + DagManagerUtils.getJobName(node)); JobExecutionPlan jobExecutionPlan = DagManagerUtils.getJobExecutionPlan(node); ExecutionStatus status = valueOf(jobStatus.getEventName()); switch (status) { case COMPLETE: jobExecutionPlan.setExecutionStatus(COMPLETE); onJobFinish(node); break; case FAILED: case CANCELLED: jobExecutionPlan.setExecutionStatus(FAILED); onJobFinish(node); break; default: jobExecutionPlan.setExecutionStatus(RUNNING); break; } } }
org.apache.gobblin.service.monitoring.JobStatus js1 = org.apache.gobblin.service.monitoring.JobStatus.builder().flowGroup("fgroup1") .flowName("flow1").jobGroup("jgroup1").jobName("job1").startTime(1000L).endTime(5000L) .eventName(ExecutionStatus.COMPLETE.name()).flowExecutionId(0).message("Test message 1") .processedCount(100).jobExecutionId(1).lowWatermark("watermark:1").highWatermark("watermark:2").build(); org.apache.gobblin.service.monitoring.JobStatus js2 = org.apache.gobblin.service.monitoring.JobStatus.builder().flowGroup("fgroup1") .flowName("flow1").jobGroup("jgroup1").jobName("job2").startTime(2000L).endTime(6000L) .eventName(ExecutionStatus.FAILED.name()).flowExecutionId(0).message("Test message 2") .processedCount(200).jobExecutionId(2).lowWatermark("watermark:2").highWatermark("watermark:3").build(); List<org.apache.gobblin.service.monitoring.JobStatus> jobStatusList = Lists.newArrayList(js1, js2);
/** * Method that defines the actions to be performed when a job finishes either successfully or with failure. * This method updates the state of the dag and performs clean up actions as necessary. */ private void onJobFinish(DagNode<JobExecutionPlan> dagNode) throws IOException { Dag<JobExecutionPlan> dag = this.jobToDag.get(dagNode); String dagId = DagManagerUtils.generateDagId(dag); String jobName = DagManagerUtils.getJobName(dagNode); ExecutionStatus jobStatus = DagManagerUtils.getExecutionStatus(dagNode); log.info("Job {} of Dag {} has finished with status {}", jobName, dagId, jobStatus.name()); deleteJobState(dagId, dagNode); if (jobStatus == COMPLETE) { submitNext(dagId); } else if (jobStatus == FAILED) { if (DagManagerUtils.getFailureOption(dag) == FailureOption.FINISH_RUNNING) { this.failedDagIdsFinishRunning.add(dagId); } else { this.failedDagIdsFinishAllPossible.add(dagId); } } }
jobExecutionPlanJson.add(SerializationConstants.SPEC_EXECUTOR_KEY, specExecutorJson); String executionStatus = jobExecutionPlan.getExecutionStatus().name(); jobExecutionPlanJson.addProperty(SerializationConstants.EXECUTION_STATUS_KEY, executionStatus);