if (r1.hasSegmentState() != r2.hasSegmentState()) { return Booleans.compare(r1.hasSegmentState(), r2.hasSegmentState()); if (!r1.hasSegmentState()) { SegmentStateProto r1Seg = r1.getSegmentState(); SegmentStateProto r2Seg = r2.getSegmentState(); long r1SeenEpoch = Math.max(r1.getAcceptedInEpoch(), r1.getLastWriterEpoch()); long r2SeenEpoch = Math.max(r2.getAcceptedInEpoch(), r2.getLastWriterEpoch()); .compare(r1.getSegmentState().getEndTxId(), r2.getSegmentState().getEndTxId()) .result();
if (bestResponse.hasAcceptedInEpoch()) { LOG.info("Using already-accepted recovery for segment " + "starting at txid " + segmentTxId + ": " + bestEntry); } else if (bestResponse.hasSegmentState()) { LOG.info("Using longest log: " + bestEntry); } else { assert !resp.hasSegmentState() : "One of the loggers had a response, but no best logger " + "was found."; SegmentStateProto logToSync = bestResponse.getSegmentState(); assert segmentTxId == logToSync.getStartTxId(); PrepareRecoveryResponseProto resp = e.getValue(); if (resp.hasLastCommittedTxId() && resp.getLastCommittedTxId() > logToSync.getEndTxId()) { throw new AssertionError("Decided to synchronize log to " + logToSync + " but logger " + logger + " had seen txid " + resp.getLastCommittedTxId() + " committed");
public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto buildPartial() { org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto result = new org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) == 0x00000001)) { to_bitField0_ |= 0x00000001; } if (segmentStateBuilder_ == null) { result.segmentState_ = segmentState_; } else { result.segmentState_ = segmentStateBuilder_.build(); } if (((from_bitField0_ & 0x00000002) == 0x00000002)) { to_bitField0_ |= 0x00000002; } result.acceptedInEpoch_ = acceptedInEpoch_; if (((from_bitField0_ & 0x00000004) == 0x00000004)) { to_bitField0_ |= 0x00000004; } result.lastWriterEpoch_ = lastWriterEpoch_; if (((from_bitField0_ & 0x00000008) == 0x00000008)) { to_bitField0_ |= 0x00000008; } result.lastCommittedTxId_ = lastCommittedTxId_; result.bitField0_ = to_bitField0_; onBuilt(); return result; }
if (bestResponse.hasAcceptedInEpoch()) { LOG.info("Using already-accepted recovery for segment " + "starting at txid " + segmentTxId + ": " + bestEntry); } else if (bestResponse.hasSegmentState()) { LOG.info("Using longest log: " + bestEntry); } else { assert !resp.hasSegmentState() : "One of the loggers had a response, but no best logger " + "was found."; SegmentStateProto logToSync = bestResponse.getSegmentState(); assert segmentTxId == logToSync.getStartTxId(); PrepareRecoveryResponseProto resp = e.getValue(); if (resp.hasLastCommittedTxId() && resp.getLastCommittedTxId() > logToSync.getEndTxId()) { throw new AssertionError("Decided to synchronize log to " + logToSync + " but logger " + logger + " had seen txid " + resp.getLastCommittedTxId() + " committed");
if (bestResponse.hasAcceptedInEpoch()) { LOG.info("Using already-accepted recovery for segment " + "starting at txid " + segmentTxId + ": " + bestEntry); } else if (bestResponse.hasSegmentState()) { LOG.info("Using longest log: " + bestEntry); } else { assert !resp.hasSegmentState() : "One of the loggers had a response, but no best logger " + "was found."; SegmentStateProto logToSync = bestResponse.getSegmentState(); assert segmentTxId == logToSync.getStartTxId(); PrepareRecoveryResponseProto resp = e.getValue(); if (resp.hasLastCommittedTxId() && resp.getLastCommittedTxId() > logToSync.getEndTxId()) { throw new AssertionError("Decided to synchronize log to " + logToSync + " but logger " + logger + " had seen txid " + resp.getLastCommittedTxId() + " committed");
return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto.getDefaultInstance(); case 17: return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto.getDefaultInstance(); case 18: return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto.getDefaultInstance();
assertFalse(prep.hasAcceptedInEpoch()); assertFalse(prep.hasSegmentState()); assertFalse(prep.hasAcceptedInEpoch()); assertTrue(prep.hasSegmentState()); ch.acceptRecovery(prep.getSegmentState(), new URL("file:///dev/null")).get(); ch.setEpoch(2); prep = ch.prepareRecovery(1L).get(); assertEquals(1L, prep.getAcceptedInEpoch()); assertEquals(1L, prep.getSegmentState().getEndTxId()); ch.acceptRecovery(prep.getSegmentState(), new URL("file:///dev/null")).get(); fail("accept from earlier epoch not rejected"); } catch (ExecutionException ioe) {
@Test public void testComparisons() { Entry<AsyncLogger, PrepareRecoveryResponseProto> INPROGRESS_1_3 = makeEntry(PrepareRecoveryResponseProto.newBuilder() .setSegmentState(SegmentStateProto.newBuilder() .setStartTxId(1L) .build()); Entry<AsyncLogger, PrepareRecoveryResponseProto> INPROGRESS_1_4 = makeEntry(PrepareRecoveryResponseProto.newBuilder() .setSegmentState(SegmentStateProto.newBuilder() .setStartTxId(1L) .build()); Entry<AsyncLogger, PrepareRecoveryResponseProto> INPROGRESS_1_4_ACCEPTED = makeEntry(PrepareRecoveryResponseProto.newBuilder() .setSegmentState(SegmentStateProto.newBuilder() .setStartTxId(1L) makeEntry(PrepareRecoveryResponseProto.newBuilder() .setSegmentState(SegmentStateProto.newBuilder() .setStartTxId(1L)
return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto.getDefaultInstance(); case 17: return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto.getDefaultInstance(); case 18: return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto.getDefaultInstance();
return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto.getDefaultInstance(); case 17: return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto.getDefaultInstance(); case 18: return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto.getDefaultInstance();
return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto.getDefaultInstance(); case 17: return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto.getDefaultInstance(); case 18: return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto.getDefaultInstance();
if (r1.hasSegmentState() != r2.hasSegmentState()) { return Booleans.compare(r1.hasSegmentState(), r2.hasSegmentState()); if (!r1.hasSegmentState()) { SegmentStateProto r1Seg = r1.getSegmentState(); SegmentStateProto r2Seg = r2.getSegmentState(); long r1SeenEpoch = Math.max(r1.getAcceptedInEpoch(), r1.getLastWriterEpoch()); long r2SeenEpoch = Math.max(r2.getAcceptedInEpoch(), r2.getLastWriterEpoch()); .compare(r1.getSegmentState().getEndTxId(), r2.getSegmentState().getEndTxId()) .result();
PrepareRecoveryResponseProto.newBuilder();
if (r1.hasSegmentState() != r2.hasSegmentState()) { return Booleans.compare(r1.hasSegmentState(), r2.hasSegmentState()); if (!r1.hasSegmentState()) { SegmentStateProto r1Seg = r1.getSegmentState(); SegmentStateProto r2Seg = r2.getSegmentState(); long r1SeenEpoch = Math.max(r1.getAcceptedInEpoch(), r1.getLastWriterEpoch()); long r2SeenEpoch = Math.max(r2.getAcceptedInEpoch(), r2.getLastWriterEpoch()); .compare(r1.getSegmentState().getEndTxId(), r2.getSegmentState().getEndTxId()) .result();
result = result && (hasSegmentState() == other.hasSegmentState()); if (hasSegmentState()) { result = result && getSegmentState() .equals(other.getSegmentState()); result = result && (hasAcceptedInEpoch() == other.hasAcceptedInEpoch()); if (hasAcceptedInEpoch()) { result = result && (getAcceptedInEpoch() == other.getAcceptedInEpoch()); result = result && (hasLastWriterEpoch() == other.hasLastWriterEpoch()); if (hasLastWriterEpoch()) { result = result && (getLastWriterEpoch() == other.getLastWriterEpoch()); result = result && (hasLastCommittedTxId() == other.hasLastCommittedTxId()); if (hasLastCommittedTxId()) { result = result && (getLastCommittedTxId() == other.getLastCommittedTxId()); getUnknownFields().equals(other.getUnknownFields()); return result;
PrepareRecoveryResponseProto.newBuilder();
public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto buildPartial() { org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto result = new org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) == 0x00000001)) { to_bitField0_ |= 0x00000001; } if (segmentStateBuilder_ == null) { result.segmentState_ = segmentState_; } else { result.segmentState_ = segmentStateBuilder_.build(); } if (((from_bitField0_ & 0x00000002) == 0x00000002)) { to_bitField0_ |= 0x00000002; } result.acceptedInEpoch_ = acceptedInEpoch_; if (((from_bitField0_ & 0x00000004) == 0x00000004)) { to_bitField0_ |= 0x00000004; } result.lastWriterEpoch_ = lastWriterEpoch_; if (((from_bitField0_ & 0x00000008) == 0x00000008)) { to_bitField0_ |= 0x00000008; } result.lastCommittedTxId_ = lastCommittedTxId_; result.bitField0_ = to_bitField0_; onBuilt(); return result; }
public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto buildPartial() { org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto result = new org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) == 0x00000001)) { to_bitField0_ |= 0x00000001; } if (segmentStateBuilder_ == null) { result.segmentState_ = segmentState_; } else { result.segmentState_ = segmentStateBuilder_.build(); } if (((from_bitField0_ & 0x00000002) == 0x00000002)) { to_bitField0_ |= 0x00000002; } result.acceptedInEpoch_ = acceptedInEpoch_; if (((from_bitField0_ & 0x00000004) == 0x00000004)) { to_bitField0_ |= 0x00000004; } result.lastWriterEpoch_ = lastWriterEpoch_; if (((from_bitField0_ & 0x00000008) == 0x00000008)) { to_bitField0_ |= 0x00000008; } result.lastCommittedTxId_ = lastCommittedTxId_; result.bitField0_ = to_bitField0_; onBuilt(); return result; }
result = result && (hasSegmentState() == other.hasSegmentState()); if (hasSegmentState()) { result = result && getSegmentState() .equals(other.getSegmentState()); result = result && (hasAcceptedInEpoch() == other.hasAcceptedInEpoch()); if (hasAcceptedInEpoch()) { result = result && (getAcceptedInEpoch() == other.getAcceptedInEpoch()); result = result && (hasLastWriterEpoch() == other.hasLastWriterEpoch()); if (hasLastWriterEpoch()) { result = result && (getLastWriterEpoch() == other.getLastWriterEpoch()); result = result && (hasLastCommittedTxId() == other.hasLastCommittedTxId()); if (hasLastCommittedTxId()) { result = result && (getLastCommittedTxId() == other.getLastCommittedTxId()); getUnknownFields().equals(other.getUnknownFields()); return result;
@java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); if (hasSegmentState()) { hash = (37 * hash) + SEGMENTSTATE_FIELD_NUMBER; hash = (53 * hash) + getSegmentState().hashCode(); } if (hasAcceptedInEpoch()) { hash = (37 * hash) + ACCEPTEDINEPOCH_FIELD_NUMBER; hash = (53 * hash) + hashLong(getAcceptedInEpoch()); } if (hasLastWriterEpoch()) { hash = (37 * hash) + LASTWRITEREPOCH_FIELD_NUMBER; hash = (53 * hash) + hashLong(getLastWriterEpoch()); } if (hasLastCommittedTxId()) { hash = (37 * hash) + LASTCOMMITTEDTXID_FIELD_NUMBER; hash = (53 * hash) + hashLong(getLastCommittedTxId()); } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; }