@Override public void close() throws IOException { try { executeCancellation(); } finally { super.close(); } }
this.jobProps.putAll(jobProps); if (!tryLockJob(this.jobProps)) { throw new JobException(String.format("Previous instance of job %s is still running, skipping this scheduled run", this.jobProps.getProperty(ConfigurationKeys.JOB_NAME_KEY))); instanceBroker = createDefaultInstanceBroker(jobProps); this.eventSubmitter = buildEventSubmitter(metadataTags); unlockJob(); throw e;
if (!canCleanStagingData(jobState)) { LOG.error("Job " + jobState.getJobName() + " has unfinished commit sequences. Will not clean up staging data."); return; cleanupStagingDataPerTask(jobState); } else { cleanupStagingDataForEntireJob(jobState);
notifyListeners(this.jobContext, jobListener, TimingEvent.LauncherTimings.JOB_PREPARE, new JobListenerAction() { @Override public void apply(JobListener jobListener, JobContext jobContext) executeUnfinishedCommitSequences(jobState.getJobName()); cleanLeftoverStagingData(workUnitStream, jobState); stagingDataCleanTimer.stop(this.eventMetadataGenerator.getMetadata(this.jobContext, EventName.MR_STAGING_DATA_CLEAN)); notifyListeners(this.jobContext, jobListener, TimingEvent.LauncherTimings.JOB_START, new JobListenerAction() { @Override public void apply(JobListener jobListener, JobContext jobContext) workUnitStream = prepareWorkUnits(workUnitStream, jobState); runWorkUnitStream(workUnitStream); jobRunTimer.stop(this.eventMetadataGenerator.getMetadata(this.jobContext,EventName.JOB_RUN)); this.jobContext.finalizeJobStateBeforeCommit(); this.jobContext.commit(); postProcessJobState(jobState); jobCommitTimer.stop(this.eventMetadataGenerator.getMetadata(this.jobContext, EventName.JOB_COMMIT)); } finally { try { TimingEvent jobCleanupTimer = this.eventSubmitter.getTimingEvent(TimingEvent.LauncherTimings.JOB_CLEANUP); cleanupStagingData(jobState); jobCleanupTimer.stop(this.eventMetadataGenerator.getMetadata(this.jobContext, EventName.JOB_CLEANUP));
@Override public void onLost() { executeCancellation(); } }));
if (!canCleanStagingData(jobState)) { LOG.error("Job " + jobState.getJobName() + " has unfinished commit sequences. Will not clean up staging data."); return;
notifyListeners(this.jobContext, jobListener, TimingEvent.LauncherTimings.JOB_PREPARE, new JobListenerAction() { @Override public void apply(JobListener jobListener, JobContext jobContext) executeUnfinishedCommitSequences(jobState.getJobName()); LOG.warn("No work units have been created for job " + jobId); jobState.setState(JobState.RunningState.COMMITTED); notifyListeners(this.jobContext, jobListener, TimingEvent.LauncherTimings.JOB_COMPLETE, new JobListenerAction() { @Override cleanLeftoverStagingData(workUnitStream, jobState); stagingDataCleanTimer.stop(this.eventMetadataGenerator.getMetadata(this.jobContext, EventName.MR_STAGING_DATA_CLEAN)); notifyListeners(this.jobContext, jobListener, TimingEvent.LauncherTimings.JOB_START, new JobListenerAction() { @Override public void apply(JobListener jobListener, JobContext jobContext) workUnitStream = prepareWorkUnits(workUnitStream, jobState); runWorkUnitStream(workUnitStream); jobRunTimer.stop(this.eventMetadataGenerator.getMetadata(this.jobContext,EventName.JOB_RUN)); this.jobContext.finalizeJobStateBeforeCommit(); this.jobContext.commit(); postProcessJobState(jobState); jobCommitTimer.stop(this.eventMetadataGenerator.getMetadata(this.jobContext, EventName.JOB_COMMIT));
@Override public void run() { synchronized (AbstractJobLauncher.this.cancellationRequest) { try { while (!AbstractJobLauncher.this.cancellationRequested) { // Wait for a cancellation request to arrive AbstractJobLauncher.this.cancellationRequest.wait(); } LOG.info("Cancellation has been requested for job " + AbstractJobLauncher.this.jobContext.getJobId()); executeCancellation(); LOG.info("Cancellation has been executed for job " + AbstractJobLauncher.this.jobContext.getJobId()); } catch (InterruptedException ie) { Thread.currentThread().interrupt(); } } synchronized (AbstractJobLauncher.this.cancellationExecution) { AbstractJobLauncher.this.cancellationExecuted = true; AbstractJobLauncher.this.jobContext.getJobState().setState(JobState.RunningState.CANCELLED); // Notify that the cancellation has been executed AbstractJobLauncher.this.cancellationExecution.notifyAll(); } } });
if (!canCleanStagingData(jobState)) { LOG.error("Job " + jobState.getJobName() + " has unfinished commit sequences. Will not clean up staging data."); return;
this.jobProps.putAll(jobProps); if (!tryLockJob(this.jobProps)) { throw new JobException(String.format("Previous instance of job %s is still running, skipping this scheduled run", this.jobProps.getProperty(ConfigurationKeys.JOB_NAME_KEY))); instanceBroker = createDefaultInstanceBroker(jobProps); this.eventSubmitter = buildEventSubmitter(metadataTags); unlockJob(); throw e;
@Override public void close() throws IOException { try { // Stop all dependent services this.serviceManager.stopAsync().awaitStopped(5, TimeUnit.SECONDS); } catch (TimeoutException te) { LOG.warn("Timed out while waiting for the service manager to be stopped", te); } finally { super.close(); } }
if (!canCleanStagingData(jobState)) { LOG.error("Job " + jobState.getJobName() + " has unfinished commit sequences. Will not clean up staging data."); return; cleanupStagingDataPerTask(jobState); } else { cleanupStagingDataForEntireJob(jobState);
@Override public void onLost() { executeCancellation(); } }));
@Override public void close() throws IOException { try { if (this.hadoopJobSubmitted && !this.job.isComplete()) { LOG.info("Killing the Hadoop MR job for job " + this.jobContext.getJobId()); this.job.killJob(); } } finally { try { cleanUpWorkingDirectory(); } finally { super.close(); fs.close(); } } }
@Override public void run() { synchronized (AbstractJobLauncher.this.cancellationRequest) { try { while (!AbstractJobLauncher.this.cancellationRequested) { // Wait for a cancellation request to arrive AbstractJobLauncher.this.cancellationRequest.wait(); } LOG.info("Cancellation has been requested for job " + AbstractJobLauncher.this.jobContext.getJobId()); executeCancellation(); LOG.info("Cancellation has been executed for job " + AbstractJobLauncher.this.jobContext.getJobId()); } catch (InterruptedException ie) { Thread.currentThread().interrupt(); } } synchronized (AbstractJobLauncher.this.cancellationExecution) { AbstractJobLauncher.this.cancellationExecuted = true; AbstractJobLauncher.this.jobContext.getJobState().setState(JobState.RunningState.CANCELLED); // Notify that the cancellation has been executed AbstractJobLauncher.this.cancellationExecution.notifyAll(); } } });
@Override public void close() throws IOException { try { executeCancellation(); } finally { super.close(); } }
@Override public void close() throws IOException { try { // Stop all dependent services this.serviceManager.stopAsync().awaitStopped(5, TimeUnit.SECONDS); } catch (TimeoutException te) { LOG.warn("Timed out while waiting for the service manager to be stopped", te); } finally { super.close(); } }
@Override public void close() throws IOException { try { if (this.hadoopJobSubmitted && !this.job.isComplete()) { LOG.info("Killing the Hadoop MR job for job " + this.jobContext.getJobId()); this.job.killJob(); } } finally { try { cleanUpWorkingDirectory(); } finally { super.close(); fs.close(); } } }