/** * After receiving a appendEntries reply, do the following: * 1. If the reply is success, update the follower's match index and submit * an event to leaderState * 2. If the reply is NOT_LEADER, step down * 3. If the reply is INCONSISTENCY, decrease the follower's next index * based on the response */ @Override public void onNext(AppendEntriesReplyProto reply) { if (LOG.isDebugEnabled()) { LOG.debug("{}<-{}: received {} reply {} ", server.getId(), follower.getPeer(), (!firstResponseReceived? "the first": "a"), ServerProtoUtils.toString(reply)); } try { onNextImpl(reply); } catch(Throwable t) { LOG.error("Failed onNext " + reply, t); } }
@Override public LogEntryProto initLogEntry(long term, long index) { Preconditions.assertTrue(serverRole == RaftPeerRole.LEADER); Preconditions.assertNull(logEntry, "logEntry"); Objects.requireNonNull(smLogEntryProto, "smLogEntryProto == null"); return logEntry = ServerProtoUtils.toLogEntryProto(smLogEntryProto, term, index); }
@Override public String toString() { return ServerProtoUtils.toLogEntryString(logEntry); } }
/** * Return a new log entry based on the input log entry with stateMachineData added. * @param stateMachineData - state machine data to be added * @param entry - log entry to which stateMachineData needs to be added * @return LogEntryProto with stateMachineData added */ static LogEntryProto addStateMachineData(ByteString stateMachineData, LogEntryProto entry) { Preconditions.assertTrue(shouldReadStateMachineData(entry), () -> "Failed to addStateMachineData to " + entry + " since shouldReadStateMachineData is false."); return rebuildLogEntryProto(entry, toStateMachineEntryProtoBuilder(stateMachineData)); }
/** * If the given entry has state machine log entry and it has state machine data, * build a new entry without the state machine data. * * @return a new entry without the state machine data if the given has state machine data; * otherwise, return the given entry. */ static LogEntryProto removeStateMachineData(LogEntryProto entry) { return getStateMachineData(entry) .filter(stateMachineData -> !stateMachineData.isEmpty()) .map(_dummy -> rebuildLogEntryProto(entry, toStateMachineEntryProtoBuilder(entry.getSerializedSize()))) .orElse(entry); }
public LogEntryProto getEntry(TimeDuration timeout) throws RaftLogIOException, TimeoutException { LogEntryProto entryProto; if (future == null) { return logEntry; } try { entryProto = future.thenApply(data -> ServerProtoUtils.addStateMachineData(data, logEntry)) .get(timeout.getDuration(), timeout.getUnit()); } catch (TimeoutException t) { throw t; } catch (Throwable t) { final String err = selfId + ": Failed readStateMachineData for " + ServerProtoUtils.toLogEntryString(logEntry); LogAppender.LOG.error(err, t); throw new RaftLogIOException(err, JavaUtils.unwrapCompletionException(t)); } // by this time we have already read the state machine data, // so the log entry data should be set now if (ServerProtoUtils.shouldReadStateMachineData(entryProto)) { final String err = selfId + ": State machine data not set for " + ServerProtoUtils.toLogEntryString(logEntry); LogAppender.LOG.error(err); throw new RaftLogIOException(err); } return entryProto; }
TruncateIndices computeTruncateIndices(Consumer<TermIndex> failClientRequest, LogEntryProto... entries) { int arrayIndex = 0; long truncateIndex = -1; try(AutoCloseableLock readLock = closedSegments.readLock()) { final Iterator<TermIndex> i = iterator(entries[0].getIndex()); for(; i.hasNext() && arrayIndex < entries.length; arrayIndex++) { final TermIndex storedEntry = i.next(); Preconditions.assertTrue(storedEntry.getIndex() == entries[arrayIndex].getIndex(), "The stored entry's index %s is not consistent with the received entries[%s]'s index %s", storedEntry.getIndex(), arrayIndex, entries[arrayIndex].getIndex()); if (storedEntry.getTerm() != entries[arrayIndex].getTerm()) { // we should truncate from the storedEntry's arrayIndex truncateIndex = storedEntry.getIndex(); if (LOG.isTraceEnabled()) { LOG.trace("{}: truncate to {}, arrayIndex={}, ti={}, storedEntry={}, entries={}", name, truncateIndex, arrayIndex, ServerProtoUtils.toTermIndex(entries[arrayIndex]), storedEntry, ServerProtoUtils.toString(entries)); } // fail all requests starting at truncateIndex failClientRequest.accept(storedEntry); for(; i.hasNext(); ) { failClientRequest.accept(i.next()); } break; } } } return new TruncateIndices(arrayIndex, truncateIndex); }
+ previous + ", " + leaderCommit + ", " + initializing + ", commits" + ProtoUtils.toString(commitInfos) + ", entries: " + ServerProtoUtils.toString(entries)); final List<CompletableFuture<Long>> futures; currentTerm = state.getCurrentTerm(); if (!recognized) { final AppendEntriesReplyProto reply = ServerProtoUtils.toAppendEntriesReplyProto( leaderId, getId(), groupId, currentTerm, followerCommit, nextIndex, NOT_LEADER, callId); if (LOG.isDebugEnabled()) { final AppendEntriesReplyProto reply = ServerProtoUtils.toAppendEntriesReplyProto( leaderId, getId(), groupId, currentTerm, followerCommit, Math.min(nextIndex, previous.getIndex()), INCONSISTENCY, callId); if (LOG.isDebugEnabled()) { LOG.debug("{}: inconsistency entries. Leader previous:{}, Reply:{}", getId(), previous, ServerProtoUtils.toString(reply)); state.updateStatemachine(leaderCommit, currentTerm); final long n = isHeartbeat? state.getLog().getNextIndex(): entries[entries.length - 1].getIndex() + 1; reply = ServerProtoUtils.toAppendEntriesReplyProto(leaderId, getId(), groupId, currentTerm, state.getLog().getLastCommittedIndex(), n, SUCCESS, callId); getId() + ": succeeded to handle AppendEntries. Reply: " + ServerProtoUtils.toString(reply)); return reply; });
WriteLog(LogEntryProto entry) { this.entry = ServerProtoUtils.removeStateMachineData(entry); if (this.entry == entry || stateMachine == null) { this.stateMachineFuture = null; } else { try { // this.entry != entry iff the entry has state machine data this.stateMachineFuture = stateMachine.writeStateMachineData(entry); } catch (Throwable e) { LOG.error(name + ": writeStateMachineData failed for index " + entry.getIndex() + ", entry=" + ServerProtoUtils.toLogEntryString(entry), e); throw e; } } this.combined = stateMachineFuture == null? super.getFuture() : super.getFuture().thenCombine(stateMachineFuture, (index, stateMachineResult) -> index); }
/** * Construct a {@link TransactionContext} from a client request. * Used by the state machine to start a transaction * and send the Log entry representing the transaction data * to be applied to the raft log. */ public TransactionContextImpl( StateMachine stateMachine, RaftClientRequest clientRequest, StateMachineLogEntryProto smLogEntryProto, Object stateMachineContext) { this(RaftPeerRole.LEADER, stateMachine); this.clientRequest = clientRequest; this.smLogEntryProto = smLogEntryProto != null? smLogEntryProto : ServerProtoUtils.toStateMachineLogEntryProto(clientRequest, null, null); this.stateMachineContext = stateMachineContext; }
@Override public EntryWithData getEntryWithData(long index) throws RaftLogIOException { final LogEntryProto entry = get(index); if (!ServerProtoUtils.shouldReadStateMachineData(entry)) { return new EntryWithData(entry, null); } try { return new EntryWithData(entry, server.map(s -> s.getStateMachine().readStateMachineData(entry)).orElse(null)); } catch (Throwable e) { final String err = getSelfId() + ": Failed readStateMachineData for " + ServerProtoUtils.toLogEntryString(entry); LOG.error(err, e); throw new RaftLogIOException(err, JavaUtils.unwrapCompletionException(e)); } }
static Optional<ByteString> getStateMachineData(LogEntryProto entry) { return getStateMachineEntry(entry) .map(StateMachineEntryProto::getStateMachineData); }
@Override int getSerializedSize() { return ServerProtoUtils.getSerializedSize(entry); }
static boolean shouldReadStateMachineData(LogEntryProto entry) { return getStateMachineData(entry).map(ByteString::isEmpty).orElse(false); }
LOG.trace("{}: truncate to {}, index={}, ti={}, storedEntry={}, entries={}", server.getId(), truncateIndex, index, ServerProtoUtils.toTermIndex(entries[index]), storedEntry, ServerProtoUtils.toString(entries));
+ leaderTerm + ", " + previous + ", " + leaderCommit + ", " + initializing + ", commits" + ProtoUtils.toString(commitInfos) + ", entries: " + ServerProtoUtils.toString(entries)); currentTerm = state.getCurrentTerm(); if (!recognized) { final AppendEntriesReplyProto reply = ServerProtoUtils.toAppendEntriesReplyProto( leaderId, getId(), groupId, currentTerm, nextIndex, NOT_LEADER, callId); if (LOG.isDebugEnabled()) { ServerProtoUtils.toAppendEntriesReplyProto(leaderId, getId(), groupId, currentTerm, Math.min(nextIndex, previous.getIndex()), INCONSISTENCY, callId); if (LOG.isDebugEnabled()) { LOG.debug("{}: inconsistency entries. Leader previous:{}, Reply:{}", getId(), previous, ServerProtoUtils.toString(reply)); nextIndex = entries[entries.length - 1].getIndex() + 1; final AppendEntriesReplyProto reply = ServerProtoUtils.toAppendEntriesReplyProto( leaderId, getId(), groupId, currentTerm, nextIndex, SUCCESS, callId); logAppendEntries(isHeartbeat, () -> getId() + ": succeeded to handle AppendEntries. Reply: " + ServerProtoUtils.toString(reply)); return JavaUtils.allOf(futures) .thenApply(v -> {
if (LOG.isTraceEnabled()) { LOG.trace("{}: appendEntry {}", getSelfId(), ServerProtoUtils.toLogEntryString(entry)); if (stateMachineCachingEnabled) { cache.appendEntry(ServerProtoUtils.removeStateMachineData(entry)); } else { cache.appendEntry(entry); LOG.error(getSelfId() + ": Failed to append " + ServerProtoUtils.toLogEntryString(entry), throwable); throw throwable;
private SimpleOperation(ClientId clientId, long callId, String op, boolean hasStateMachineData) { this.op = Objects.requireNonNull(op); final ByteString bytes = ProtoUtils.toByteString(op); this.smLogEntryProto = ServerProtoUtils.toStateMachineLogEntryProto( clientId, callId, bytes, hasStateMachineData? bytes: null); }
static int getSerializedSize(LogEntryProto entry) { return getStateMachineEntry(entry) .filter(smEnty -> smEnty.getStateMachineData().isEmpty()) .map(StateMachineEntryProto::getLogEntryProtoSerializedSize) .orElseGet(entry::getSerializedSize); }
public int getSerializedSize() { return ServerProtoUtils.getSerializedSize(logEntry); }