void setLeader(RaftPeerId newLeaderId, String op) { if (!Objects.equals(leaderId, newLeaderId)) { LOG.info("{}: change Leader from {} to {} at term {} for {}", selfId, leaderId, newLeaderId, getCurrentTerm(), op); leaderId = newLeaderId; } }
private void setRole(Role newRole, String op) { LOG.info("{} changes role from {} to {} at term {} for {}", getId(), this.role, newRole, state.getCurrentTerm(), op); this.role = newRole; }
@Override public long getCurrentTerm() { return getState().getCurrentTerm(); }
@Override public long getCurrentTerm() { return getState().getCurrentTerm(); }
private void setRole(RaftPeerRole newRole, Object reason) { LOG.info("{} changes role from {} to {} at term {} for {}", getId(), this.role, newRole, state.getCurrentTerm(), reason); this.role.transitionRole(newRole); }
void setLeader(RaftPeerId newLeaderId, String op) { if (!Objects.equals(leaderId, newLeaderId)) { String suffix; if (newLeaderId == null) { // reset the time stamp when a null leader is assigned lastNoLeaderTime = Timestamp.currentTime(); suffix = ""; } else { Timestamp previous = lastNoLeaderTime; lastNoLeaderTime = null; suffix = ", leader elected after " + previous.elapsedTimeMs() + "ms"; } LOG.info("{}: change Leader from {} to {} at term {} for {}{}", selfId, leaderId, newLeaderId, getCurrentTerm(), op, suffix); leaderId = newLeaderId; } }
private boolean shouldWithholdVotes(long candidateTerm) { if (state.getCurrentTerm() < candidateTerm) { return false; } else if (isLeader()) { return true; } else { // following a leader and not yet timeout return isFollower() && state.hasLeader() && role.getFollowerState().map(FollowerState::shouldWithholdVotes).orElse(false); } }
synchronized InstallSnapshotRequestProto createInstallSnapshotRequest( RaftPeerId targetId, String requestId, int requestIndex, SnapshotInfo snapshot, List<FileChunkProto> chunks, boolean done) { OptionalLong totalSize = snapshot.getFiles().stream() .mapToLong(FileInfo::getFileSize).reduce(Long::sum); assert totalSize.isPresent(); return ServerProtoUtils.toInstallSnapshotRequestProto(getId(), targetId, groupId, requestId, requestIndex, state.getCurrentTerm(), snapshot.getTermIndex(), chunks, totalSize.getAsLong(), done); }
synchronized InstallSnapshotRequestProto createInstallSnapshotRequest( RaftPeerId targetId, String requestId, int requestIndex, SnapshotInfo snapshot, List<FileChunkProto> chunks, boolean done) { OptionalLong totalSize = snapshot.getFiles().stream() .mapToLong(FileInfo::getFileSize).reduce(Long::sum); assert totalSize.isPresent(); return ServerProtoUtils.toInstallSnapshotRequestProto(getId(), targetId, groupId, requestId, requestIndex, state.getCurrentTerm(), snapshot.getTermIndex(), chunks, totalSize.getAsLong(), done); }
IllegalStateException newIllegalStateExceptionForMultipleLeaders(RaftGroupId groupId, List<RaftServerImpl> leaders) { final String g = groupId == null? "": " for " + groupId; return new IllegalStateException("Found multiple leaders" + g + " at the same term (=" + leaders.get(0).getState().getCurrentTerm() + "), leaders.size() = " + leaders.size() + " > 1, leaders = " + leaders + ": " + printServers(groupId)); }
private boolean shouldWithholdVotes(long candidateTerm) { if (state.getCurrentTerm() < candidateTerm) { return false; } else if (isLeader()) { return true; } else { return isFollower() && state.hasLeader() && heartbeatMonitor.shouldWithholdVotes(); } }
LeaderState(RaftServerImpl server, RaftProperties properties) { this.server = server; stagingCatchupGap = RaftServerConfigKeys.stagingCatchupGap(properties); syncInterval = RaftServerConfigKeys.Rpc.sleepTime(properties); final ServerState state = server.getState(); this.raftLog = state.getLog(); this.currentTerm = state.getCurrentTerm(); eventQ = new ArrayBlockingQueue<>(4096); processor = new EventProcessor(); pendingRequests = new PendingRequests(server); final RaftConfiguration conf = server.getRaftConf(); Collection<RaftPeer> others = conf.getOtherPeers(state.getSelfId()); final Timestamp t = new Timestamp().addTimeMs(-server.getMaxTimeoutMs()); placeHolderIndex = raftLog.getNextIndex(); senders = new SenderList(others.stream().map( p -> server.newLogAppender(this, p, t, placeHolderIndex, true)) .toArray(LogAppender[]::new)); voterLists = divideFollowers(conf); }
private void applyOldNewConf() { final ServerState state = server.getState(); final RaftConfiguration current = server.getRaftConf(); final RaftConfiguration oldNewConf= stagingState.generateOldNewConf(current, state.getLog().getNextIndex()); // apply the (old, new) configuration to log, and use it as the current conf long index = state.getLog().append(state.getCurrentTerm(), oldNewConf); updateConfiguration(index, oldNewConf); this.stagingState = null; notifySenders(); }
private void applyOldNewConf() { final ServerState state = server.getState(); final RaftConfiguration current = server.getRaftConf(); final RaftConfiguration oldNewConf= stagingState.generateOldNewConf(current, state.getLog().getNextIndex()); // apply the (old, new) configuration to log, and use it as the current conf long index = state.getLog().append(state.getCurrentTerm(), oldNewConf); updateConfiguration(index, oldNewConf); this.stagingState = null; notifySenders(); }
static List<File> getSnapshotFiles(MiniRaftCluster cluster, long startIndex, long endIndex) { final RaftServerImpl leader = cluster.getLeader(); final SimpleStateMachineStorage storage = SimpleStateMachine4Testing.get(leader).getStateMachineStorage(); final long term = leader.getState().getCurrentTerm(); return LongStream.range(startIndex, endIndex) .mapToObj(i -> storage.getSnapshotFile(term, i)) .collect(Collectors.toList()); }
LeaderState(RaftServerImpl server, RaftProperties properties) { this.server = server; stagingCatchupGap = RaftServerConfigKeys.stagingCatchupGap(properties); syncInterval = RaftServerConfigKeys.Rpc.sleepTime(properties); final ServerState state = server.getState(); this.raftLog = state.getLog(); this.currentTerm = state.getCurrentTerm(); processor = new EventProcessor(); this.pendingRequests = new PendingRequests(server.getId()); this.watchRequests = new WatchRequests(server.getId(), properties); final RaftConfiguration conf = server.getRaftConf(); Collection<RaftPeer> others = conf.getOtherPeers(state.getSelfId()); final Timestamp t = Timestamp.currentTime().addTimeMs(-server.getMaxTimeoutMs()); placeHolderIndex = raftLog.getNextIndex(); senders = new SenderList(others.stream().map( p -> server.newLogAppender(this, p, t, placeHolderIndex, true)) .toArray(LogAppender[]::new)); voterLists = divideFollowers(conf); }
LogEntryProto start() { // In the beginning of the new term, replicate a conf entry in order // to finally commit entries in the previous term. // Also this message can help identify the last committed index and the conf. final LogEntryProto placeHolder = ServerProtoUtils.toLogEntryProto( server.getRaftConf(), server.getState().getCurrentTerm(), raftLog.getNextIndex()); CodeInjectionForTesting.execute(APPEND_PLACEHOLDER, server.getId().toString(), null); raftLog.append(placeHolder); processor.start(); senders.forEach(LogAppender::startAppender); return placeHolder; }
static File getSnapshotFile(MiniRaftCluster cluster, int i) { final RaftServerImpl leader = cluster.getLeader(); final SimpleStateMachine4Testing sm = SimpleStateMachine4Testing.get(leader); return sm.getStateMachineStorage().getSnapshotFile( leader.getState().getCurrentTerm(), i); }
/** * when the (old, new) log entry has been committed, should replicate (new): * 1) append (new) to log * 2) update conf to (new) * 3) update RpcSenders list * 4) start replicating the log entry */ private void replicateNewConf() { final RaftConfiguration conf = server.getRaftConf(); final RaftConfiguration newConf = RaftConfiguration.newBuilder() .setConf(conf) .setLogEntryIndex(raftLog.getNextIndex()) .build(); // stop the LogAppender if the corresponding follower is no longer in the conf updateSenders(newConf); long index = raftLog.append(server.getState().getCurrentTerm(), newConf); updateConfiguration(index, newConf); notifySenders(); }
void start() { // In the beginning of the new term, replicate an empty entry in order // to finally commit entries in the previous term. // Also this message can help identify the last committed index when // the leader peer is just started. final LogEntryProto placeHolder = LogEntryProto.newBuilder() .setTerm(server.getState().getCurrentTerm()) .setIndex(raftLog.getNextIndex()) .setNoOp(LeaderNoOp.newBuilder()).build(); CodeInjectionForTesting.execute(APPEND_PLACEHOLDER, server.getId().toString(), null); raftLog.append(placeHolder); processor.start(); senders.forEach(LogAppender::startAppender); }