/** * After receiving a appendEntries reply, do the following: * 1. If the reply is success, update the follower's match index and submit * an event to leaderState * 2. If the reply is NOT_LEADER, step down * 3. If the reply is INCONSISTENCY, decrease the follower's next index * based on the response */ @Override public void onNext(AppendEntriesReplyProto reply) { if (LOG.isDebugEnabled()) { LOG.debug("{}<-{}: received {} reply {} ", server.getId(), follower.getPeer(), (!firstResponseReceived? "the first": "a"), ServerProtoUtils.toString(reply)); } try { onNextImpl(reply); } catch(Throwable t) { LOG.error("Failed onNext " + reply, t); } }
@Override public String toString() { return "last=" + getLastEntryTermIndex() + ", committed=" + ServerProtoUtils.toString(get(getLastCommittedIndex())); }
@Override public String toString() { return "last=" + getLastEntryTermIndex() + ", committed=" + ServerProtoUtils.toString(get(getLastCommittedIndex())); }
public static String toString(AppendEntriesReplyProto reply) { return toString(reply.getServerReply()) + "," + reply.getResult() + ",nextIndex:" + reply.getNextIndex() + ",term:" + reply.getTerm(); }
public static String toString(AppendEntriesReplyProto reply) { return toString(reply.getServerReply()) + "," + reply.getResult() + ",nextIndex:" + reply.getNextIndex() + ",term:" + reply.getTerm() + ",followerCommit:" + reply.getFollowerCommit(); }
TruncateIndices computeTruncateIndices(Consumer<TermIndex> failClientRequest, LogEntryProto... entries) { int arrayIndex = 0; long truncateIndex = -1; try(AutoCloseableLock readLock = closedSegments.readLock()) { final Iterator<TermIndex> i = iterator(entries[0].getIndex()); for(; i.hasNext() && arrayIndex < entries.length; arrayIndex++) { final TermIndex storedEntry = i.next(); Preconditions.assertTrue(storedEntry.getIndex() == entries[arrayIndex].getIndex(), "The stored entry's index %s is not consistent with the received entries[%s]'s index %s", storedEntry.getIndex(), arrayIndex, entries[arrayIndex].getIndex()); if (storedEntry.getTerm() != entries[arrayIndex].getTerm()) { // we should truncate from the storedEntry's arrayIndex truncateIndex = storedEntry.getIndex(); if (LOG.isTraceEnabled()) { LOG.trace("{}: truncate to {}, arrayIndex={}, ti={}, storedEntry={}, entries={}", name, truncateIndex, arrayIndex, ServerProtoUtils.toTermIndex(entries[arrayIndex]), storedEntry, ServerProtoUtils.toString(entries)); } // fail all requests starting at truncateIndex failClientRequest.accept(storedEntry); for(; i.hasNext(); ) { failClientRequest.accept(i.next()); } break; } } } return new TruncateIndices(arrayIndex, truncateIndex); }
@Override public void onNext(AppendEntriesRequestProto request) { final CompletableFuture<Void> current = new CompletableFuture<>(); final CompletableFuture<Void> previous = previousOnNext.getAndSet(current); try { server.appendEntriesAsync(request).thenCombine(previous, (reply, v) -> { if (!isClosed.get()) { if (LOG.isDebugEnabled()) { LOG.debug(server.getId() + ": reply " + ServerProtoUtils.toString(reply)); } responseObserver.onNext(reply); } current.complete(null); return null; }); } catch (Throwable e) { GrpcUtil.warn(LOG, () -> getId() + ": Failed appendEntries " + ProtoUtils.toString(request.getServerRequest()), e); responseObserver.onError(GrpcUtil.wrapException(e, request.getServerRequest().getCallId())); current.completeExceptionally(e); } }
server.getId(), truncateIndex, index, ServerProtoUtils.toTermIndex(entries[index]), storedEntry, ServerProtoUtils.toString(entries));
static void assertLogEntries(RaftLog log, long expectedTerm, SimpleMessage... expectedMessages) { final List<LogEntryProto> entries = new ArrayList<>(expectedMessages.length); for(LogEntryProto e : getLogEntryProtos(log)) { final String s = ServerProtoUtils.toString(e); if (e.hasStateMachineLogEntry()) { LOG.info(s + ", " + e.getStateMachineLogEntry().toString().trim().replace("\n", ", ")); entries.add(e); } else if (e.hasConfigurationEntry()) { LOG.info("Found {}, ignoring it.", s); } else if (e.hasMetadataEntry()) { LOG.info("Found {}, ignoring it.", s); } else { throw new AssertionError("Unexpected LogEntryBodyCase " + e.getLogEntryBodyCase() + " at " + s); } } try { assertLogEntries(entries, expectedTerm, expectedMessages); } catch(Throwable t) { throw new AssertionError("entries: " + entries, t); } }
static void assertLogEntries(RaftLog log, long expectedTerm, SimpleMessage... expectedMessages) { final TermIndex[] termIndices = log.getEntries(1, Long.MAX_VALUE); final List<LogEntryProto> entries = new ArrayList<>(expectedMessages.length); for (TermIndex ti : termIndices) { final LogEntryProto e; try { e = log.get(ti.getIndex()); } catch (IOException exception) { throw new AssertionError("Failed to get log at " + ti, exception); } if (e.getLogEntryBodyCase() == LogEntryProto.LogEntryBodyCase.SMLOGENTRY) { LOG.info(ServerProtoUtils.toString(e) + ", " + e.getSmLogEntry().toString().trim().replace("\n", ", ")); entries.add(e); } else if (e.getLogEntryBodyCase() == LogEntryProto.LogEntryBodyCase.NOOP) { LOG.info("Found " + LogEntryProto.LogEntryBodyCase.NOOP + " at " + ti + ", ignoring it."); } else { throw new AssertionError("Unexpected LogEntryBodyCase " + e.getLogEntryBodyCase() + " at " + ti + ": " + ServerProtoUtils.toString(e)); } } try { assertLogEntries(entries, expectedTerm, expectedMessages); } catch(Throwable t) { throw new AssertionError("entries: " + entries, t); } }
CompletableFuture<Message> applyLogToStateMachine(LogEntryProto next) { final StateMachine stateMachine = getStateMachine(); if (next.hasConfigurationEntry()) { // the reply should have already been set. only need to record // the new conf in the metadata file. state.writeRaftConfiguration(next); } else if (next.hasStateMachineLogEntry()) { // check whether there is a TransactionContext because we are the leader. TransactionContext trx = role.getLeaderState() .map(leader -> leader.getTransactionContext(next.getIndex())).orElseGet( () -> TransactionContext.newBuilder() .setServerRole(role.getCurrentRole()) .setStateMachine(stateMachine) .setLogEntry(next) .build()); // Let the StateMachine inject logic for committed transactions in sequential order. trx = stateMachine.applyTransactionSerial(trx); try { // TODO: This step can be parallelized CompletableFuture<Message> stateMachineFuture = stateMachine.applyTransaction(trx); return replyPendingRequest(next, stateMachineFuture); } catch (Throwable e) { LOG.error("{}: applyTransaction failed for index:{} proto:{}", getId(), next.getIndex(), ServerProtoUtils.toString(next), e.getMessage()); throw e; } } return null; }
+ leaderTerm + ", " + previous + ", " + leaderCommit + ", " + initializing + ", commits" + ProtoUtils.toString(commitInfos) + ", entries: " + ServerProtoUtils.toString(entries)); if (LOG.isDebugEnabled()) { LOG.debug("{}: inconsistency entries. Leader previous:{}, Reply:{}", getId(), previous, ServerProtoUtils.toString(reply)); logAppendEntries(isHeartbeat, () -> getId() + ": succeeded to handle AppendEntries. Reply: " + ServerProtoUtils.toString(reply)); return JavaUtils.allOf(futures) .thenApply(v -> {
+ previous + ", " + leaderCommit + ", " + initializing + ", commits" + ProtoUtils.toString(commitInfos) + ", entries: " + ServerProtoUtils.toString(entries)); final List<CompletableFuture<Long>> futures; if (LOG.isDebugEnabled()) { LOG.debug("{}: inconsistency entries. Leader previous:{}, Reply:{}", getId(), previous, ServerProtoUtils.toString(reply)); getId() + ": succeeded to handle AppendEntries. Reply: " + ServerProtoUtils.toString(reply)); return reply; });
if (LOG.isDebugEnabled()) { LOG.debug("{}: applying nextIndex={}, nextLog={}", this, nextIndex, ServerProtoUtils.toString(next));
if (LOG.isDebugEnabled()) { LOG.debug("{}: applying nextIndex={}, nextLog={}", this, nextIndex, ServerProtoUtils.toString(next));
if (request == null) { LOG.warn("{}: Request not found, ignoring reply: {}", this, ServerProtoUtils.toString(reply)); return;