@Override public void start() { handle = scheduler.schedule( Group.CHECKPOINT, job, recurringPeriodMillis, MILLISECONDS ); }
private void closeJobScheduler( String errorMessage ) { if ( jobScheduler != null ) { try { jobScheduler.close(); } catch ( Exception e ) { throw new RuntimeException( errorMessage, e ); } jobScheduler = null; } }
@Override public void start() { if ( checkIntervalMillis > 0 ) { monitorJobHandle = scheduler.scheduleRecurring( Group.TRANSACTION_TIMEOUT_MONITOR, kernelTransactionMonitor, checkIntervalMillis, TimeUnit.MILLISECONDS ); } }
public void start() { boolean enabled = Util.toBoolean(ApocConfiguration.get("ttl.enabled", null)); if (!enabled) return; long ttlSchedule = Util.toLong(ApocConfiguration.get("ttl.schedule", DEFAULT_SCHEDULE)); ttlIndexJobHandle = scheduler.schedule(TTL_GROUP, this::createTTLIndex, (int)(ttlSchedule*0.8), TimeUnit.SECONDS); long limit = Util.toLong(ApocConfiguration.get("ttl.limit", 1000L)); ttlJobHandle = scheduler.scheduleRecurring(TTL_GROUP, () -> expireNodes(limit), ttlSchedule, ttlSchedule, TimeUnit.SECONDS); }
@Override public void init() { fileWatchers = jobScheduler.threadFactory( Group.FILE_WATCHER ); }
@Override public Executor executor( Group group ) { return delegate.executor( group ); }
@Override public void setTopLevelGroupName( String name ) { delegate.setTopLevelGroupName( name ); }
@Override public ThreadFactory threadFactory( Group group ) { return delegate.threadFactory( group ); }
public Builder withRotation( long internalLogRotationThreshold, long internalLogRotationDelay, int maxInternalLogArchives, JobScheduler jobScheduler ) { return withRotation( internalLogRotationThreshold, internalLogRotationDelay, maxInternalLogArchives, jobScheduler.executor( Group.LOG_ROTATION ) ); }
platformModule.jobScheduler.setTopLevelGroupName( "Core " + myself );
@Override public void start() { featureDecayJob = scheduler.schedule( Group.UDC, get( UsageDataKeys.features )::sweep, 1, DAYS ); } }
@Override public void start() { jobHandle = scheduler.scheduleRecurring( Group.STORAGE_MAINTENANCE, this::maintenance, 1, TimeUnit.SECONDS ); }
@Override public void start() { threadPool = executorFactory.create( corePoolSize, maxPoolSize, keepAlive, queueSize, true, new NameAppendingThreadFactory( connector, scheduler.threadFactory( Group.BOLT_WORKER ) ) ); }
@After public void tearDown() throws Exception { jobScheduler.close(); }
private void startDeferredExecutors( JobScheduler jobScheduler, Iterable<Pair<DeferredExecutor,Group>> deferredExecutors ) { for ( Pair<DeferredExecutor,Group> executorGroupPair : deferredExecutors ) { DeferredExecutor executor = executorGroupPair.first(); Group group = executorGroupPair.other(); executor.satisfyWith( jobScheduler.executor( group ) ); } }
platformModule.jobScheduler.setTopLevelGroupName( "ReadReplica " + myself );
@Override public void execute( Runnable runnable ) { if ( latch.compareAndSet( false, true ) ) { jobScheduler.schedule( group, () -> { try { runnable.run(); } finally { latch.set( false ); } } ); } } }
public void start() { if ( backgroundSampling ) { Runnable samplingRunner = () -> sampleIndexes( BACKGROUND_REBUILD_UPDATED ); backgroundSamplingHandle = scheduler.scheduleRecurring( Group.INDEX_SAMPLING, samplingRunner, 10, SECONDS ); } }
@Before public void setup() { when( jobScheduler.threadFactory( any() ) ).thenReturn( Executors.defaultThreadFactory() ); }
@After public void tearDown() throws Exception { jobScheduler.close(); }