private void handleException(Exception e) { LOG.trace("TRACE", e); server.getServerRpc().handleException(follower.getPeer().getId(), e, false); }
private void handleException(Exception e) { LOG.trace("TRACE", e); server.getServerRpc().handleException(follower.getPeer().getId(), e, false); }
void setRaftConf(long logIndex, RaftConfiguration conf) { configurationManager.addConfiguration(logIndex, conf); server.getServerRpc().addPeers(conf.getPeers()); LOG.info("{}: set configuration {} at {}", getSelfId(), conf, logIndex); LOG.trace("{}: {}", getSelfId(), configurationManager); }
private int submitRequests(final long electionTerm, final TermIndex lastEntry) { int submitted = 0; for (final RaftPeer peer : others) { final RequestVoteRequestProto r = server.createRequestVoteRequest( peer.getId(), electionTerm, lastEntry); service.submit( () -> server.getServerRpc().requestVote(r)); submitted++; } return submitted; }
private int submitRequests(final long electionTerm, final TermIndex lastEntry) { int submitted = 0; for (final RaftPeer peer : others) { final RequestVoteRequestProto r = server.createRequestVoteRequest( peer.getId(), electionTerm, lastEntry); service.submit( () -> server.getServerRpc().requestVote(r)); submitted++; } return submitted; }
public GrpcLogAppender(RaftServerImpl server, LeaderState leaderState, FollowerInfo f) { super(server, leaderState, f); this.rpcService = (GrpcService) server.getServerRpc(); maxPendingRequestsNum = GrpcConfigKeys.Server.leaderOutstandingAppendsMax( server.getProxy().getProperties()); requestTimeoutDuration = RaftServerConfigKeys.Rpc.requestTimeout(server.getProxy().getProperties()); pendingRequests = new ConcurrentHashMap<>(); }
private InstallSnapshotReplyProto installSnapshot(SnapshotInfo snapshot) throws InterruptedIOException { String requestId = UUID.randomUUID().toString(); InstallSnapshotReplyProto reply = null; try { for (InstallSnapshotRequestProto request : new SnapshotRequestIter(snapshot, requestId)) { follower.updateLastRpcSendTime(); reply = server.getServerRpc().installSnapshot(request); follower.updateLastRpcResponseTime(); if (!reply.getServerReply().getSuccess()) { return reply; } } } catch (InterruptedIOException iioe) { throw iioe; } catch (Exception ioe) { LOG.warn("{}: Failed to installSnapshot {}: {}", this, snapshot, ioe); handleException(ioe); return null; } if (reply != null) { follower.setSnapshotIndex(snapshot.getTermIndex().getIndex()); LOG.info("{}: install snapshot-{} successfully on follower {}", server.getId(), snapshot.getTermIndex().getIndex(), follower.getPeer()); } return reply; }
final AppendEntriesReplyProto r = server.getServerRpc().appendEntries(request); follower.updateLastRpcResponseTime();
private InstallSnapshotReplyProto installSnapshot(SnapshotInfo snapshot) throws InterruptedIOException { String requestId = UUID.randomUUID().toString(); InstallSnapshotReplyProto reply = null; try { for (InstallSnapshotRequestProto request : new SnapshotRequestIter(snapshot, requestId)) { follower.updateLastRpcSendTime(); reply = server.getServerRpc().installSnapshot(request); follower.updateLastRpcResponseTime(); if (!reply.getServerReply().getSuccess()) { return reply; } } } catch (InterruptedIOException iioe) { throw iioe; } catch (Exception ioe) { LOG.warn("{}: Failed to installSnapshot {}: {}", this, snapshot, ioe); handleException(ioe); return null; } if (reply != null) { follower.updateMatchIndex(snapshot.getTermIndex().getIndex()); follower.updateNextIndex(snapshot.getTermIndex().getIndex() + 1); LOG.info("{}: install snapshot-{} successfully on follower {}", server.getId(), snapshot.getTermIndex().getIndex(), follower.getPeer()); } return reply; }
void updateConfiguration(LogEntryProto[] entries) { if (entries != null && entries.length > 0) { configurationManager.removeConfigurations(entries[0].getIndex()); for (LogEntryProto entry : entries) { if (ProtoUtils.isConfigurationLogEntry(entry)) { final RaftConfiguration conf = ServerProtoUtils.toRaftConfiguration( entry.getIndex(), entry.getConfigurationEntry()); configurationManager.addConfiguration(entry.getIndex(), conf); server.getServerRpc().addPeers(conf.getPeers()); } } } }
final AppendEntriesReplyProto r = server.getServerRpc().appendEntries(request); follower.updateLastRpcResponseTime();
getServerRpc().addPeers(Arrays.asList(peersInNewConf));
getServerRpc().addPeers(Arrays.asList(peersInNewConf));
@Test public void testServerRestartOnException() throws Exception { RaftProperties properties = new RaftProperties(); final MiniRaftClusterWithGRpc cluster = MiniRaftClusterWithGRpc.FACTORY.newCluster(1, properties); cluster.start(); RaftPeerId leaderId = RaftTestUtil.waitForLeader(cluster).getId(); GrpcConfigKeys.Server.setPort(properties, cluster.getLeader().getServerRpc().getInetSocketAddress().getPort()); // Create a raft server proxy with server rpc bound to a different address // compared to leader. This helps in locking the raft storage directory to // be used by next raft server proxy instance. final StateMachine stateMachine = cluster.getLeader().getStateMachine(); ServerImplUtils.newRaftServer(leaderId, cluster.getGroup(), stateMachine, properties, null); // Close the server rpc for leader so that new raft server can be bound to it. cluster.getLeader().getServerRpc().close(); // Create a raft server proxy with server rpc bound to same address as // the leader. This step would fail as the raft storage has been locked by // the raft server proxy created earlier. Raft server proxy should close // the rpc server on failure. testFailureCase("start a new server with the same address", () -> ServerImplUtils.newRaftServer(leaderId, cluster.getGroup(), stateMachine, properties, null), IOException.class, IOException.class, OverlappingFileLockException.class); // Try to start a raft server rpc at the leader address. cluster.getServer(leaderId).getFactory().newRaftServerRpc(cluster.getServer(leaderId)); } }
@Test public void testServerRestartOnException() throws Exception { RaftProperties properties = new RaftProperties(); final MiniRaftClusterWithGrpc cluster = MiniRaftClusterWithGrpc.FACTORY.newCluster(1, properties); cluster.start(); RaftPeerId leaderId = RaftTestUtil.waitForLeader(cluster).getId(); GrpcConfigKeys.Server.setPort(properties, cluster.getLeader().getServerRpc().getInetSocketAddress().getPort()); // Create a raft server proxy with server rpc bound to a different address // compared to leader. This helps in locking the raft storage directory to // be used by next raft server proxy instance. final StateMachine stateMachine = cluster.getLeader().getStateMachine(); ServerImplUtils.newRaftServer(leaderId, cluster.getGroup(), gid -> stateMachine, properties, null); // Close the server rpc for leader so that new raft server can be bound to it. cluster.getLeader().getServerRpc().close(); // Create a raft server proxy with server rpc bound to same address as // the leader. This step would fail as the raft storage has been locked by // the raft server proxy created earlier. Raft server proxy should close // the rpc server on failure. testFailureCase("start a new server with the same address", () -> ServerImplUtils.newRaftServer(leaderId, cluster.getGroup(), gid -> stateMachine, properties, null).start(), IOException.class, OverlappingFileLockException.class); // Try to start a raft server rpc at the leader address. cluster.getServer(leaderId).getFactory().newRaftServerRpc(cluster.getServer(leaderId)); } }