@Test public void testIsUnsuccessful() { assertTrue(BatchStatus.FAILED.isUnsuccessful()); assertFalse(BatchStatus.COMPLETED.isUnsuccessful()); assertFalse(BatchStatus.STARTED.isUnsuccessful()); assertFalse(BatchStatus.STARTING.isUnsuccessful()); }
/** * Delegate execution to the {@link PartitionHandler} provided. The * {@link StepExecution} passed in here becomes the parent or master * execution for the partition, summarising the status on exit of the * logical grouping of work carried out by the {@link PartitionHandler}. The * individual step executions and their input parameters (through * {@link ExecutionContext}) for the partition elements are provided by the * {@link StepExecutionSplitter}. * * @param stepExecution the master step execution for the partition * * @see Step#execute(StepExecution) */ @Override protected void doExecute(StepExecution stepExecution) throws Exception { stepExecution.getExecutionContext().put(STEP_TYPE_KEY, this.getClass().getName()); // Wait for task completion and then aggregate the results Collection<StepExecution> executions = partitionHandler.handle(stepExecutionSplitter, stepExecution); stepExecution.upgradeStatus(BatchStatus.COMPLETED); stepExecutionAggregator.aggregate(stepExecution, executions); // If anything failed or had a problem we need to crap out if (stepExecution.getStatus().isUnsuccessful()) { throw new JobExecutionException("Partition handler returned an unsuccessful step"); } }
if (jobExecution.getStatus().isUnsuccessful()) {
stepExecutionAggregator.aggregate(stepExecution, stepExecutions); if (stepExecution.getStatus().isUnsuccessful()) { if (hasReducer) { reducer.rollbackPartitionedStep();
public int getFailureCount() { int count = 0; int start = 0; int pageSize = 100; Collection<StepExecution> stepExecutions; do { stepExecutions = jobService.listStepExecutionsForStep(jobName, stepName, start, pageSize); start += pageSize; for (StepExecution stepExecution : stepExecutions) { if (stepExecution.getStatus().isUnsuccessful()) { count++; } } } while (!stepExecutions.isEmpty()); return count; }
public int getFailureCount() { int pageSize = 100; int start = 0; int count = 0; Collection<JobExecution> jobExecutions; do { try { jobExecutions = jobService.listJobExecutionsForJob(jobName, start, pageSize); start += pageSize; } catch (NoSuchJobException e) { throw new IllegalStateException("Cannot locate job=" + jobName, e); } for (JobExecution jobExecution : jobExecutions) { if (jobExecution.getStatus().isUnsuccessful()) { count++; } } } while (!jobExecutions.isEmpty()); return count; }
@ManagedMetric(metricType = MetricType.COUNTER, displayName = "Job Execution Failure Count") public int getJobExecutionFailureCount() { int count = 0; int start = 0; int pageSize = 100; Collection<JobExecution> jobExecutions; do { jobExecutions = jobService.listJobExecutions(start, pageSize); start += pageSize; for (JobExecution jobExecution : jobExecutions) { if (jobExecution.getStatus().isUnsuccessful()) { count++; } } } while (!jobExecutions.isEmpty()); return count; }
@Override public void execute(JobExecutionContext context) throws JobExecutionException { try { List<LocalDate> dates = ExcelFileUtils.generateListOfDates(); for (LocalDate date : dates) { JobParameters jobParameters = jobParametersExtractor.getJobParameters(null, null); JobParametersBuilder builder = new JobParametersBuilder(jobParameters); builder.addDate("date", Date.from(date.atStartOfDay(ZoneId.systemDefault()).toInstant())); JobExecution jobExecution = service.startNewInstance("mainJob", builder.toJobParameters()); if (jobExecution.getStatus().isUnsuccessful()) { throw new JobExecutionException("Job 'mainJob' has FAILED!"); } } } catch (Exception e) { LOGGER.error("Error when starting the job: ", e); } } }
@Override public void execute(JobExecutionContext context) throws JobExecutionException { try { List<LocalDate> dates = ExcelFileUtils.generateListOfDates(); for (LocalDate date : dates) { JobParameters jobParameters = jobParametersExtractor.getJobParameters(null, null); JobParametersBuilder builder = new JobParametersBuilder(jobParameters); builder.addDate("date", Date.from(date.atStartOfDay(ZoneId.systemDefault()).toInstant())); JobExecution jobExecution = service.startNewInstance("loadGtfsIntoDatabaseJob", builder.toJobParameters()); if (jobExecution.getStatus().isUnsuccessful()) { throw new JobExecutionException("Job 'mainJob' has FAILED!"); } } } catch (Exception e) { LOGGER.error("Error when starting the job: ", e); } }
/** * Delegate execution to the {@link PartitionHandler} provided. The * {@link StepExecution} passed in here becomes the parent or master * execution for the partition, summarising the status on exit of the * logical grouping of work carried out by the {@link PartitionHandler}. The * individual step executions and their input parameters (through * {@link ExecutionContext}) for the partition elements are provided by the * {@link StepExecutionSplitter}. * * @param stepExecution the master step execution for the partition * * @see Step#execute(StepExecution) */ @Override protected void doExecute(StepExecution stepExecution) throws Exception { stepExecution.getExecutionContext().put(STEP_TYPE_KEY, this.getClass().getName()); // Wait for task completion and then aggregate the results Collection<StepExecution> executions = partitionHandler.handle(stepExecutionSplitter, stepExecution); stepExecution.upgradeStatus(BatchStatus.COMPLETED); stepExecutionAggregator.aggregate(stepExecution, executions); // If anything failed or had a problem we need to crap out if (stepExecution.getStatus().isUnsuccessful()) { throw new JobExecutionException("Partition handler returned an unsuccessful step"); } }
/** * Delegate execution to the {@link PartitionHandler} provided. The * {@link StepExecution} passed in here becomes the parent or master * execution for the partition, summarising the status on exit of the * logical grouping of work carried out by the {@link PartitionHandler}. The * individual step executions and their input parameters (through * {@link ExecutionContext}) for the partition elements are provided by the * {@link StepExecutionSplitter}. * * @param stepExecution the master step execution for the partition * * @see Step#execute(StepExecution) */ @Override protected void doExecute(StepExecution stepExecution) throws Exception { stepExecution.getExecutionContext().put(STEP_TYPE_KEY, this.getClass().getName()); // Wait for task completion and then aggregate the results Collection<StepExecution> executions = partitionHandler.handle(stepExecutionSplitter, stepExecution); stepExecution.upgradeStatus(BatchStatus.COMPLETED); stepExecutionAggregator.aggregate(stepExecution, executions); // If anything failed or had a problem we need to crap out if (stepExecution.getStatus().isUnsuccessful()) { throw new JobExecutionException("Partition handler returned an unsuccessful step"); } }
/** * Delegate execution to the {@link PartitionHandler} provided. The * {@link StepExecution} passed in here becomes the parent or master * execution for the partition, summarising the status on exit of the * logical grouping of work carried out by the {@link PartitionHandler}. The * individual step executions and their input parameters (through * {@link ExecutionContext}) for the partition elements are provided by the * {@link StepExecutionSplitter}. * * @param stepExecution the master step execution for the partition * * @see Step#execute(StepExecution) */ @Override protected void doExecute(StepExecution stepExecution) throws Exception { // Wait for task completion and then aggregate the results Collection<StepExecution> executions = partitionHandler.handle(stepExecutionSplitter, stepExecution); stepExecution.upgradeStatus(BatchStatus.COMPLETED); stepExecutionAggregator.aggregate(stepExecution, executions); // If anything failed or had a problem we need to crap out if (stepExecution.getStatus().isUnsuccessful()) { throw new JobExecutionException("Partition handler returned an unsuccessful step"); } }
/** * Delegate execution to the {@link PartitionHandler} provided. The * {@link StepExecution} passed in here becomes the parent or master * execution for the partition, summarising the status on exit of the * logical grouping of work carried out by the {@link PartitionHandler}. The * individual step executions and their input parameters (through * {@link ExecutionContext}) for the partition elements are provided by the * {@link StepExecutionSplitter}. * * @param stepExecution the master step execution for the partition * * @see Step#execute(StepExecution) */ @Override protected void doExecute(StepExecution stepExecution) throws Exception { // Wait for task completion and then aggregate the results Collection<StepExecution> executions = partitionHandler.handle(stepExecutionSplitter, stepExecution); stepExecution.upgradeStatus(BatchStatus.COMPLETED); stepExecutionAggregator.aggregate(stepExecution, executions); // If anything failed or had a problem we need to crap out if (stepExecution.getStatus().isUnsuccessful()) { throw new JobExecutionException("Partition handler returned an unsuccessful step"); } }
if (lastJobExecution != null) { BatchStatus status = lastJobExecution.getStatus(); if (status.isUnsuccessful() && status!=BatchStatus.ABANDONED) { restart = true;
private JobParameters createJobParametersWithIncrementerIfAvailable(String parameters, Job job) throws JobParametersNotFoundException { JobParameters jobParameters = jobParametersConverter .getJobParameters(PropertiesConverter.stringToProperties(parameters)); // use JobParametersIncrementer to create JobParameters if incrementer is set and only if the job is no restart if (job.getJobParametersIncrementer() != null) { JobExecution lastJobExecution = jobRepository.getLastJobExecution(job.getName(), jobParameters); boolean restart = false; // check if job failed before if (lastJobExecution != null) { BatchStatus status = lastJobExecution.getStatus(); if (status.isUnsuccessful() && status != BatchStatus.ABANDONED) { restart = true; } } // if it's not a restart, create new JobParameters with the incrementer if (!restart) { JobParameters nextParameters = getNextJobParameters(job); Map<String, JobParameter> map = new HashMap<String, JobParameter>(nextParameters.getParameters()); map.putAll(jobParameters.getParameters()); jobParameters = new JobParameters(map); } } return jobParameters; }
private JobParameters createJobParametersWithIncrementerIfAvailable(String parameters, Job job) throws JobParametersNotFoundException { JobParameters jobParameters = jobParametersConverter .getJobParameters(PropertiesConverter.stringToProperties(parameters)); // use JobParametersIncrementer to create JobParameters if incrementer is set and only if the job is no restart if (job.getJobParametersIncrementer() != null) { JobExecution lastJobExecution = jobRepository.getLastJobExecution(job.getName(), jobParameters); boolean restart = false; // check if job failed before if (lastJobExecution != null) { BatchStatus status = lastJobExecution.getStatus(); if (status.isUnsuccessful() && status != BatchStatus.ABANDONED) { restart = true; } } // if it's not a restart, create new JobParameters with the incrementer if (!restart) { JobParameters nextParameters = getNextJobParameters(job); Map<String, JobParameter> map = new HashMap<String, JobParameter>(nextParameters.getParameters()); map.putAll(jobParameters.getParameters()); jobParameters = new JobParameters(map); } } return jobParameters; }
if (jobExecution.getStatus().isUnsuccessful()) {
/** * Execute the job provided by delegating to the {@link JobLauncher} to * prevent duplicate executions. The job parameters will be generated by the * {@link JobParametersExtractor} provided (if any), otherwise empty. On a * restart, the job parameters will be the same as the last (failed) * execution. * * @see AbstractStep#doExecute(StepExecution) */ @Override protected void doExecute(StepExecution stepExecution) throws Exception { ExecutionContext executionContext = stepExecution.getExecutionContext(); JobParameters jobParameters; if (executionContext.containsKey(JOB_PARAMETERS_KEY)) { jobParameters = (JobParameters) executionContext.get(JOB_PARAMETERS_KEY); } else { jobParameters = jobParametersExtractor.getJobParameters(job, stepExecution); executionContext.put(JOB_PARAMETERS_KEY, jobParameters); } JobExecution jobExecution = jobLauncher.run(job, jobParameters); if (jobExecution.getStatus().isUnsuccessful()) { // AbstractStep will take care of the step execution status throw new UnexpectedJobExecutionException("Step failure: the delegate Job failed in JobStep."); } }
/** * Execute the job provided by delegating to the {@link JobLauncher} to * prevent duplicate executions. The job parameters will be generated by the * {@link JobParametersExtractor} provided (if any), otherwise empty. On a * restart, the job parameters will be the same as the last (failed) * execution. * * @see AbstractStep#doExecute(StepExecution) */ @Override protected void doExecute(StepExecution stepExecution) throws Exception { ExecutionContext executionContext = stepExecution.getExecutionContext(); JobParameters jobParameters; if (executionContext.containsKey(JOB_PARAMETERS_KEY)) { jobParameters = (JobParameters) executionContext.get(JOB_PARAMETERS_KEY); } else { jobParameters = jobParametersExtractor.getJobParameters(job, stepExecution); executionContext.put(JOB_PARAMETERS_KEY, jobParameters); } JobExecution jobExecution = jobLauncher.run(job, jobParameters); if (jobExecution.getStatus().isUnsuccessful()) { // AbstractStep will take care of the step execution status throw new UnexpectedJobExecutionException("Step failure: the delegate Job failed in JobStep."); } }
stepExecutionAggregator.aggregate(stepExecution, stepExecutions); if (stepExecution.getStatus().isUnsuccessful()) { if (hasReducer) { reducer.rollbackPartitionedStep();