@Override public void start() { if ( checkIntervalMillis > 0 ) { monitorJobHandle = scheduler.scheduleRecurring( Group.TRANSACTION_TIMEOUT_MONITOR, kernelTransactionMonitor, checkIntervalMillis, TimeUnit.MILLISECONDS ); } }
@Override public void start() { jobHandle = scheduler.scheduleRecurring( Group.STORAGE_MAINTENANCE, this::maintenance, 1, TimeUnit.SECONDS ); }
public void start() { if ( backgroundSampling ) { Runnable samplingRunner = () -> sampleIndexes( BACKGROUND_REBUILD_UPDATED ); backgroundSamplingHandle = scheduler.scheduleRecurring( Group.INDEX_SAMPLING, samplingRunner, 10, SECONDS ); } }
@Override public JobHandle scheduleRecurring( Group group, Runnable runnable, long initialDelay, long period, TimeUnit timeUnit ) { counter.getAndIncrement(); return delegate.scheduleRecurring( group, runnable, initialDelay, period, timeUnit ); }
@Override public JobHandle scheduleRecurring( Group group, Runnable runnable, long period, TimeUnit timeUnit ) { counter.getAndIncrement(); return delegate.scheduleRecurring( group, runnable, period, timeUnit ); }
@Test void startJobTransactionMonitor() { JobHandle jobHandle = Mockito.mock( JobHandle.class ); when( jobScheduler.scheduleRecurring( eq(Group.TRANSACTION_TIMEOUT_MONITOR ), eq( transactionMonitor), anyLong(), any(TimeUnit.class) )).thenReturn( jobHandle ); KernelTransactionMonitorScheduler monitorScheduler = new KernelTransactionMonitorScheduler( transactionMonitor, jobScheduler, 7 ); monitorScheduler.start(); verify(jobScheduler).scheduleRecurring( Group.TRANSACTION_TIMEOUT_MONITOR, transactionMonitor, 7, TimeUnit.MILLISECONDS ); monitorScheduler.stop(); verify( jobHandle ).cancel( true ); } }
@Test void scheduleRecurringMonitorJobIfConfigured() { KernelTransactionMonitorScheduler transactionMonitorScheduler = createMonitorScheduler(1); transactionMonitorScheduler.start(); verify( scheduler).scheduleRecurring( Group.TRANSACTION_TIMEOUT_MONITOR, transactionTimeoutMonitor, 1, TimeUnit .MILLISECONDS ); }
private TransactionFacade createTransactionalActions() { final long timeoutMillis = getTransactionTimeoutMillis(); final Clock clock = Clocks.systemClock(); transactionRegistry = new TransactionHandleRegistry( clock, timeoutMillis, userLogProvider ); // ensure that this is > 0 long runEvery = round( timeoutMillis / 2.0 ); resolveDependency( JobScheduler.class ).scheduleRecurring( Group.SERVER_TRANSACTION_TIMEOUT, () -> { long maxAge = clock.millis() - timeoutMillis; transactionRegistry.rollbackSuspendedTransactionsIdleSince( maxAge ); }, runEvery, MILLISECONDS ); return new TransactionFacade( new TransitionalPeriodTransactionMessContainer( database.getGraph() ), resolveDependency( QueryExecutionEngine.class ), resolveDependency( GraphDatabaseQueryService.class ), transactionRegistry, userLogProvider ); }
public void start() { boolean enabled = Util.toBoolean(ApocConfiguration.get("ttl.enabled", null)); if (!enabled) return; long ttlSchedule = Util.toLong(ApocConfiguration.get("ttl.schedule", DEFAULT_SCHEDULE)); ttlIndexJobHandle = scheduler.schedule(TTL_GROUP, this::createTTLIndex, (int)(ttlSchedule*0.8), TimeUnit.SECONDS); long limit = Util.toLong(ApocConfiguration.get("ttl.limit", 1000L)); ttlJobHandle = scheduler.scheduleRecurring(TTL_GROUP, () -> expireNodes(limit), ttlSchedule, ttlSchedule, TimeUnit.SECONDS); }
@Override public void start() { if ( checkIntervalMillis > 0 ) { monitorJobHandle = scheduler.scheduleRecurring( Group.TRANSACTION_TIMEOUT_MONITOR, kernelTransactionMonitor, checkIntervalMillis, TimeUnit.MILLISECONDS ); } }
@Override public void start() { jobHandle = scheduler.scheduleRecurring( Group.STORAGE_MAINTENANCE, this::maintenance, 1, TimeUnit.SECONDS ); }
public void start() { if ( backgroundSampling ) { Runnable samplingRunner = () -> sampleIndexes( BACKGROUND_REBUILD_UPDATED ); backgroundSamplingHandle = scheduler.scheduleRecurring( Group.INDEX_SAMPLING, samplingRunner, 10, SECONDS ); } }
private synchronized void scheduleProfile() { if ( !available ) { return; } long frequencyMillis = config.get( GraphDatabaseSettings.pagecache_warmup_profiling_interval ).toMillis(); jobHandle = scheduler.scheduleRecurring( pageCacheIOHelper, this::doProfile, frequencyMillis, TimeUnit.MILLISECONDS ); }
public JobScheduler.JobHandle scheduleRecurring( String name, long periodMillis, ThrowingAction<Exception> action ) { return delegate.scheduleRecurring( new JobScheduler.Group( name ), () -> withErrorHandling( action ), periodMillis, MILLISECONDS ); }
@Override public synchronized void start() throws IOException, DamagedLogStorageException, DisposedException { if ( !directory.exists() && !directory.mkdirs() ) { throw new IOException( "Could not create: " + directory ); } state = new RecoveryProtocol( fileSystem, fileNames, readerPool, contentMarshal, logProvider ).run(); log.info( "log started with recovered state %s", state ); /* * Recovery guarantees that once complete the header of the last raft log file is intact. No such guarantee * is made for the last log entry in the last file (or any of the files for that matter). To complete * recovery we need to rotate away the last log file, so that any incomplete entries at the end of the last * do not have entries appended after them, which would result in unaligned (and therefore wrong) reads. * As an obvious optimization, we don't need to rotate if the file contains only the header, such as is * the case of a newly created log. */ if ( state.segments.last().size() > SegmentHeader.SIZE ) { rotateSegment( state.appendIndex, state.appendIndex, state.terms.latest() ); } readerPoolPruner = scheduler.scheduleRecurring( new JobScheduler.Group( "reader-pool-pruner" ), () -> readerPool.prune( READER_POOL_MAX_AGE, MINUTES ), READER_POOL_MAX_AGE, READER_POOL_MAX_AGE, MINUTES ); }
private TransactionFacade createTransactionalActions() { final long timeoutMillis = getTransactionTimeoutMillis(); final Clock clock = Clocks.systemClock(); transactionRegistry = new TransactionHandleRegistry( clock, timeoutMillis, userLogProvider ); // ensure that this is > 0 long runEvery = round( timeoutMillis / 2.0 ); resolveDependency( JobScheduler.class ).scheduleRecurring( Group.SERVER_TRANSACTION_TIMEOUT, () -> { long maxAge = clock.millis() - timeoutMillis; transactionRegistry.rollbackSuspendedTransactionsIdleSince( maxAge ); }, runEvery, MILLISECONDS ); return new TransactionFacade( new TransitionalPeriodTransactionMessContainer( database.getGraph() ), resolveDependency( QueryExecutionEngine.class ), resolveDependency( GraphDatabaseQueryService.class ), transactionRegistry, userLogProvider ); }