@Override public void run() { if ( !dbHealthSupplier.get().isHealthy() ) { catchUpFuture.completeExceptionally( dbHealthSupplier.get().cause() ); } else if ( iAmAVotingMember() && caughtUpWithLeader() ) { catchUpFuture.complete( Boolean.TRUE ); } else { currentCatchupDelayInMs += SECONDS.toMillis( 1 ); long longerDelay = currentCatchupDelayInMs < maxCatchupLag ? currentCatchupDelayInMs : maxCatchupLag; jobScheduler.schedule( new JobScheduler.Group( MembershipWaiter.class.toString() ), this, longerDelay, MILLISECONDS ); } }
@Override public synchronized void start() throws IOException, DamagedLogStorageException, DisposedException { if ( !directory.exists() && !directory.mkdirs() ) { throw new IOException( "Could not create: " + directory ); } state = new RecoveryProtocol( fileSystem, fileNames, readerPool, contentMarshal, logProvider ).run(); log.info( "log started with recovered state %s", state ); /* * Recovery guarantees that once complete the header of the last raft log file is intact. No such guarantee * is made for the last log entry in the last file (or any of the files for that matter). To complete * recovery we need to rotate away the last log file, so that any incomplete entries at the end of the last * do not have entries appended after them, which would result in unaligned (and therefore wrong) reads. * As an obvious optimization, we don't need to rotate if the file contains only the header, such as is * the case of a newly created log. */ if ( state.segments.last().size() > SegmentHeader.SIZE ) { rotateSegment( state.appendIndex, state.appendIndex, state.terms.latest() ); } readerPoolPruner = scheduler.scheduleRecurring( new JobScheduler.Group( "reader-pool-pruner" ), () -> readerPool.prune( READER_POOL_MAX_AGE, MINUTES ), READER_POOL_MAX_AGE, READER_POOL_MAX_AGE, MINUTES ); }
private LifecycleMessageHandler<ReceivedInstantClusterIdAwareMessage<?>> createMessageHandlerChain( CoreServerModule coreServerModule ) { RaftMessageApplier messageApplier = new RaftMessageApplier( localDatabase, logProvider, consensusModule.raftMachine(), coreServerModule.downloadService(), coreServerModule.commandApplicationProcess(), catchupAddressProvider ); ComposableMessageHandler monitoringHandler = RaftMessageMonitoringHandler.composable( platformModule.clock, platformModule.monitors ); int queueSize = platformModule.config.get( CausalClusteringSettings.raft_in_queue_size ); int maxBatch = platformModule.config.get( CausalClusteringSettings.raft_in_queue_max_batch ); Function<Runnable, ContinuousJob> jobFactory = runnable -> new ContinuousJob( platformModule.jobScheduler.threadFactory( new JobScheduler.Group( "raft-batch-handler" ) ), runnable, logProvider ); ComposableMessageHandler batchingMessageHandler = BatchingMessageHandler.composable( queueSize, maxBatch, jobFactory, logProvider ); ComposableMessageHandler leaderAvailabilityHandler = LeaderAvailabilityHandler.composable( consensusModule.getLeaderAvailabilityTimers(), consensusModule.raftMachine()::term ); ComposableMessageHandler clusterBindingHandler = ClusterBindingHandler.composable( logProvider ); return clusterBindingHandler .compose( leaderAvailabilityHandler ) .compose( batchingMessageHandler ) .compose( monitoringHandler ) .apply( messageApplier ); } }
public JobScheduler.JobHandle schedule( String name, long delayMillis, ThrowingAction<Exception> action ) { return delegate.schedule( new JobScheduler.Group( name ), () -> withErrorHandling( action ), delayMillis, MILLISECONDS ); }
CompletableFuture<Boolean> waitUntilCaughtUpMember( RaftMachine raft ) { CompletableFuture<Boolean> catchUpFuture = new CompletableFuture<>(); Evaluator evaluator = new Evaluator( raft, catchUpFuture, dbHealthSupplier ); JobScheduler.JobHandle jobHandle = jobScheduler.schedule( new JobScheduler.Group( getClass().toString() ), evaluator, currentCatchupDelayInMs, MILLISECONDS ); catchUpFuture.whenComplete( ( result, e ) -> jobHandle.cancel( true ) ); return catchUpFuture; }
public JobScheduler.JobHandle scheduleRecurring( String name, long periodMillis, ThrowingAction<Exception> action ) { return delegate.scheduleRecurring( new JobScheduler.Group( name ), () -> withErrorHandling( action ), periodMillis, MILLISECONDS ); }