PendingRequest addPendingRequest(RaftClientRequest request, TransactionContext entry) { if (LOG.isDebugEnabled()) { LOG.debug("{}: addPendingRequest at {}, entry=", server.getId(), request, ServerProtoUtils.toLogEntryString(entry.getLogEntry())); } return pendingRequests.add(request, entry); }
PendingRequest addPendingRequest(long index, RaftClientRequest request, TransactionContext entry) { LOG.debug("{}: addPendingRequest at index={}, request={}", server.getId(), index, request); return pendingRequests.addPendingRequest(index, request, entry); }
TransactionContext getTransactionContext(long index) { return pendingRequests.getTransactionContext(index); }
void fail() { stopAndRemoveSenders(s -> !s.getFollower().isAttendingVote()); LeaderState.this.stagingState = null; // send back failure response to client's request pendingRequests.failSetConfiguration( new ReconfigurationTimeoutException("Fail to set configuration " + newConf + ". Timeout when bootstrapping new peers.")); } }
void replyPendingRequest(long logIndex, RaftClientReply reply) { pendingRequests.replyPendingRequest(logIndex, reply); }
void stop() { this.running = false; // do not interrupt event processor since it may be in the middle of logSync senders.forEach(LogAppender::stopAppender); try { pendingRequests.sendNotLeaderResponses(); } catch (IOException e) { LOG.warn(server.getId() + ": Caught exception in sendNotLeaderResponses", e); } }
LeaderState(RaftServerImpl server, RaftProperties properties) { this.server = server; stagingCatchupGap = RaftServerConfigKeys.stagingCatchupGap(properties); syncInterval = RaftServerConfigKeys.Rpc.sleepTime(properties); final ServerState state = server.getState(); this.raftLog = state.getLog(); this.currentTerm = state.getCurrentTerm(); eventQ = new ArrayBlockingQueue<>(4096); processor = new EventProcessor(); pendingRequests = new PendingRequests(server); final RaftConfiguration conf = server.getRaftConf(); Collection<RaftPeer> others = conf.getOtherPeers(state.getSelfId()); final Timestamp t = new Timestamp().addTimeMs(-server.getMaxTimeoutMs()); placeHolderIndex = raftLog.getNextIndex(); senders = new SenderList(others.stream().map( p -> server.newLogAppender(this, p, t, placeHolderIndex, true)) .toArray(LogAppender[]::new)); voterLists = divideFollowers(conf); }
/** * Start bootstrapping new peers */ PendingRequest startSetConfiguration(SetConfigurationRequest request) { Preconditions.assertTrue(running && !inStagingState()); RaftPeer[] peersInNewConf = request.getPeersInNewConf(); Collection<RaftPeer> peersToBootStrap = RaftConfiguration .computeNewPeers(peersInNewConf, server.getRaftConf()); // add the request to the pending queue final PendingRequest pending = pendingRequests.addConfRequest(request); ConfigurationStagingState stagingState = new ConfigurationStagingState( peersToBootStrap, new PeerConfiguration(Arrays.asList(peersInNewConf))); Collection<RaftPeer> newPeers = stagingState.getNewPeers(); // set the staging state this.stagingState = stagingState; if (newPeers.isEmpty()) { applyOldNewConf(); } else { // update the LeaderState's sender list addSenders(newPeers); } return pending; }
private void checkAndUpdateConfiguration(TermIndex[] entriesToCheck) { final RaftConfiguration conf = server.getRaftConf(); if (committedConf(entriesToCheck)) { if (conf.isTransitional()) { replicateNewConf(); } else { // the (new) log entry has been committed LOG.debug("{} sends success to setConfiguration request", server.getId()); pendingRequests.replySetConfiguration(); // if the leader is not included in the current configuration, step down if (!conf.containsInConf(server.getId())) { LOG.info("{} is not included in the new configuration {}. Step down.", server.getId(), conf); try { // leave some time for all RPC senders to send out new conf entry Thread.sleep(server.getMinTimeoutMs()); } catch (InterruptedException ignored) { } // the pending request handler will send NotLeaderException for // pending client requests when it stops server.shutdown(); } } } }
pendingRequests.checkDelayedReplies(min);
void fail() { stopAndRemoveSenders(s -> !s.getFollower().isAttendingVote()); LeaderState.this.stagingState = null; // send back failure response to client's request pendingRequests.failSetConfiguration( new ReconfigurationTimeoutException("Fail to set configuration " + newConf + ". Timeout when bootstrapping new peers.")); } }
void replyPendingRequest(long logIndex, RaftClientReply reply) { if (!pendingRequests.replyPendingRequest(logIndex, reply)) { submitUpdateStateEvent(UPDATE_COMMIT_EVENT); } }
void stop() { this.running = false; // do not interrupt event processor since it may be in the middle of logSync senders.forEach(LogAppender::stopAppender); final NotLeaderException nle = server.generateNotLeaderException(); final Collection<CommitInfoProto> commitInfos = server.getCommitInfos(); try { final Collection<TransactionContext> transactions = pendingRequests.sendNotLeaderResponses(nle, commitInfos); server.getStateMachine().notifyNotLeader(transactions); watchRequests.failWatches(nle); } catch (IOException e) { LOG.warn(server.getId() + ": Caught exception in sendNotLeaderResponses", e); } }
LeaderState(RaftServerImpl server, RaftProperties properties) { this.server = server; stagingCatchupGap = RaftServerConfigKeys.stagingCatchupGap(properties); syncInterval = RaftServerConfigKeys.Rpc.sleepTime(properties); final ServerState state = server.getState(); this.raftLog = state.getLog(); this.currentTerm = state.getCurrentTerm(); processor = new EventProcessor(); this.pendingRequests = new PendingRequests(server.getId()); this.watchRequests = new WatchRequests(server.getId(), properties); final RaftConfiguration conf = server.getRaftConf(); Collection<RaftPeer> others = conf.getOtherPeers(state.getSelfId()); final Timestamp t = Timestamp.currentTime().addTimeMs(-server.getMaxTimeoutMs()); placeHolderIndex = raftLog.getNextIndex(); senders = new SenderList(others.stream().map( p -> server.newLogAppender(this, p, t, placeHolderIndex, true)) .toArray(LogAppender[]::new)); voterLists = divideFollowers(conf); }
/** * Start bootstrapping new peers */ PendingRequest startSetConfiguration(SetConfigurationRequest request) { Preconditions.assertTrue(running && !inStagingState()); RaftPeer[] peersInNewConf = request.getPeersInNewConf(); Collection<RaftPeer> peersToBootStrap = RaftConfiguration .computeNewPeers(peersInNewConf, server.getRaftConf()); // add the request to the pending queue final PendingRequest pending = pendingRequests.addConfRequest(request); ConfigurationStagingState stagingState = new ConfigurationStagingState( peersToBootStrap, new PeerConfiguration(Arrays.asList(peersInNewConf))); Collection<RaftPeer> newPeers = stagingState.getNewPeers(); // set the staging state this.stagingState = stagingState; if (newPeers.isEmpty()) { applyOldNewConf(); } else { // update the LeaderState's sender list addSenders(newPeers); } return pending; }
private void checkAndUpdateConfiguration(TermIndex[] entriesToCheck) { final RaftConfiguration conf = server.getRaftConf(); if (committedConf(entriesToCheck)) { if (conf.isTransitional()) { replicateNewConf(); } else { // the (new) log entry has been committed pendingRequests.replySetConfiguration(server::getCommitInfos); // if the leader is not included in the current configuration, step down if (!conf.containsInConf(server.getId())) { LOG.info("{} is not included in the new configuration {}. Step down.", server.getId(), conf); try { // leave some time for all RPC senders to send out new conf entry Thread.sleep(server.getMinTimeoutMs()); } catch (InterruptedException ignored) { } // the pending request handler will send NotLeaderException for // pending client requests when it stops server.shutdown(false); } } } }
PendingRequest addPendingRequest(long index, RaftClientRequest request, TransactionContext entry) { // externally synced for now Preconditions.assertTrue(request.is(RaftClientRequestProto.TypeCase.WRITE)); if (last != null && !(last.getRequest() instanceof SetConfigurationRequest)) { Preconditions.assertTrue(index == last.getIndex() + 1, () -> "index = " + index + " != last.getIndex() + 1, last=" + last); } return add(index, request, entry); }
TransactionContext getTransactionContext(long index) { return pendingRequests.getTransactionContext(index); }