/** * Schedules a purge job to trigger right now. This will not block - it schedules the job to trigger but immediately * returns. This method will ensure that no two data purge jobs will execute at the same time (Quartz will ensure * this since {@link DataPurgeJob} is an implementation of {@link org.quartz.StatefulJob}). * * @throws Exception if failed to schedule the data purge for immediate execution */ public static void purgeNow() throws Exception { // there should always be a DataPurgeJob defined with a job name as the same as this class' name // let's trigger that job now. this ensures the job is only ever run once, never concurrently // note that you can't call this method again until the data purge job finished; otherwise, // you'll get an exception saying there is already a trigger defined - this is what we want, you // shouldn't ask for more than one data purge job to execute now - you have to wait for it to finish. SchedulerLocal scheduler = LookupUtil.getSchedulerBean(); SimpleTrigger trigger = new SimpleTrigger("DataPurgeJobNow", DataPurgeJob.class.getName()); trigger.setJobName(DataPurgeJob.class.getName()); trigger.setJobGroup(DataPurgeJob.class.getName()); scheduler.scheduleJob(trigger); }
/** * Schedules a calc job to trigger right now. This will not block - it schedules the job to trigger but immediately * returns. This method will ensure that no two data calc jobs will execute at the same time (Quartz will ensure * this since {@link DataCalcJob} is an implementation of {@link org.quartz.StatefulJob}). * * @throws Exception if failed to schedule the data calc for immediate execution */ public static void calcNow() throws Exception { // there should always be a DataCalcJob defined with a job name as the same as this class' name // let's trigger that job now. this ensures the job is only ever run once, never concurrently // note that you can't call this method again until the data calc job finished; otherwise, // you'll get an exception saying there is already a trigger defined - this is what we want, you // shouldn't ask for more than one data calc job to execute now - you have to wait for it to finish. SchedulerLocal scheduler = LookupUtil.getSchedulerBean(); SimpleTrigger trigger = new SimpleTrigger("DataCalcJobNow", DataCalcJob.class.getName()); trigger.setJobName(DataCalcJob.class.getName()); trigger.setJobGroup(DataCalcJob.class.getName()); scheduler.scheduleJob(trigger); }
private void scheduleAgentRequestFullAvailabilityJob(Collection<Agent> agents) { Scheduler scheduler = LookupUtil.getSchedulerBean(); try { final String DEFAULT_JOB_NAME = "AgentRequestFullAvailabilityJob"; final String DEFAULT_JOB_GROUP = "AgentRequestFullAvailabilityGroup"; final String TRIGGER_PREFIX = "AgentRequestFullAvailabilityTrigger"; final String randomSuffix = UUID.randomUUID().toString(); final String triggerName = TRIGGER_PREFIX + " - " + randomSuffix; SimpleTrigger trigger = new SimpleTrigger(triggerName, DEFAULT_JOB_GROUP, new Date()); JobDataMap jobDataMap = new JobDataMap(); jobDataMap.put(AgentRequestFullAvailabilityJob.KEY_TRIGGER_NAME, triggerName); jobDataMap.put(AgentRequestFullAvailabilityJob.KEY_TRIGGER_GROUP_NAME, DEFAULT_JOB_GROUP); AgentRequestFullAvailabilityJob.externalizeJobValues(jobDataMap, AgentRequestFullAvailabilityJob.AGENTS, agents); trigger.setJobName(DEFAULT_JOB_NAME); trigger.setJobGroup(DEFAULT_JOB_GROUP); trigger.setJobDataMap(jobDataMap); if (isJobScheduled(scheduler, DEFAULT_JOB_NAME, DEFAULT_JOB_GROUP)) { scheduler.scheduleJob(trigger); } else { JobDetail jobDetail = new JobDetail(DEFAULT_JOB_NAME, DEFAULT_JOB_GROUP, AgentRequestFullAvailabilityJob.class); scheduler.scheduleJob(jobDetail, trigger); } } catch (SchedulerException e) { LOG.error("Failed to schedule AgentRequestFullAvailabilityJob.", e); } }
simpleTrigger.setJobName(DEFAULT_AGENT_JOB); simpleTrigger.setJobGroup(DEFAULT_AGENT_GROUP); scheduler.scheduleJob(simpleTrigger);
+ instanceId + "_" + String.valueOf(dumId++), Scheduler.DEFAULT_RECOVERY_GROUP, new Date(firedTime)); rcvryTrig.setJobName(jobName); rcvryTrig.setJobGroup(jobGroup); rcvryTrig.setPriority(priority);
+ instanceId + "_" + String.valueOf(dumId++), Scheduler.DEFAULT_RECOVERY_GROUP, new Date(firedTime)); rcvryTrig.setJobName(jobName); rcvryTrig.setJobGroup(jobGroup); rcvryTrig.setPriority(priority);
AgentInventoryStatusUpdateJob.SERVERS_COMMA_LIST, servers); trigger.setJobName(DEFAULT_JOB_NAME); trigger.setJobGroup(DEFAULT_JOB_GROUP); trigger.setJobDataMap(jobDataMap);
firedTime); recoveryTrigger.setJobName(jobWrapper.jobDetail.getName()); recoveryTrigger.setJobGroup(jobWrapper.jobDetail.getGroup()); recoveryTrigger
new Date(ftRec.getFireTimestamp())); rcvryTrig.setVolatility(ftRec.isTriggerIsVolatile()); rcvryTrig.setJobName(jKey.getName()); rcvryTrig.setJobGroup(jKey.getGroup()); rcvryTrig.setMisfireInstruction(SimpleTrigger.MISFIRE_INSTRUCTION_FIRE_NOW);
new Date(ftRec.getFireTimestamp())); rcvryTrig.setVolatility(ftRec.isTriggerIsVolatile()); rcvryTrig.setJobName(jKey.getName()); rcvryTrig.setJobGroup(jKey.getGroup()); rcvryTrig.setMisfireInstruction(SimpleTrigger.MISFIRE_INSTRUCTION_FIRE_NOW);