/** * @return the current state of the given segment, or null if the * segment does not exist. */ @VisibleForTesting SegmentStateProto getSegmentInfo(long segmentTxId) throws IOException { EditLogFile elf = fjm.getLogFile(segmentTxId); if (elf == null) { return null; } if (elf.isInProgress()) { elf.scanLog(Long.MAX_VALUE, false); } if (elf.getLastTxId() == HdfsServerConstants.INVALID_TXID) { LOG.info("Edit log file " + elf + " appears to be empty. " + "Moving it aside..." + " ; journal id: " + journalId); elf.moveAsideEmptyFile(); return null; } SegmentStateProto ret = SegmentStateProto.newBuilder() .setStartTxId(segmentTxId) .setEndTxId(elf.getLastTxId()) .setIsInProgress(elf.isInProgress()) .build(); LOG.info("getSegmentInfo(" + segmentTxId + "): " + elf + " -> " + TextFormat.shortDebugString(ret) + " ; journal id: " + journalId); return ret; }
subBuilder.mergeFrom(segmentState_); segmentState_ = subBuilder.buildPartial();
subBuilder.mergeFrom(stateToAccept_); stateToAccept_ = subBuilder.buildPartial();
subBuilder.mergeFrom(stateToAccept_); stateToAccept_ = subBuilder.buildPartial();
subBuilder.mergeFrom(segmentState_); segmentState_ = subBuilder.buildPartial();
subBuilder.mergeFrom(segmentState_); segmentState_ = subBuilder.buildPartial();
subBuilder.mergeFrom(segmentState_); segmentState_ = subBuilder.buildPartial();
makeEntry(PrepareRecoveryResponseProto.newBuilder() .setSegmentState(SegmentStateProto.newBuilder() .setStartTxId(1L) .setEndTxId(3L) .setIsInProgress(true)) .setLastWriterEpoch(0L) .build()); makeEntry(PrepareRecoveryResponseProto.newBuilder() .setSegmentState(SegmentStateProto.newBuilder() .setStartTxId(1L) .setEndTxId(4L) .setIsInProgress(true)) .setLastWriterEpoch(0L) .build()); makeEntry(PrepareRecoveryResponseProto.newBuilder() .setSegmentState(SegmentStateProto.newBuilder() .setStartTxId(1L) .setEndTxId(4L) .setIsInProgress(true)) .setLastWriterEpoch(0L) .setAcceptedInEpoch(1L) makeEntry(PrepareRecoveryResponseProto.newBuilder() .setSegmentState(SegmentStateProto.newBuilder() .setStartTxId(1L) .setEndTxId(3L) .setIsInProgress(false))
subBuilder.mergeFrom(segmentState_); segmentState_ = subBuilder.buildPartial();
/** * @return the current state of the given segment, or null if the * segment does not exist. */ @VisibleForTesting SegmentStateProto getSegmentInfo(long segmentTxId) throws IOException { EditLogFile elf = fjm.getLogFile(segmentTxId); if (elf == null) { return null; } if (elf.isInProgress()) { elf.scanLog(); } if (elf.getLastTxId() == HdfsConstants.INVALID_TXID) { LOG.info("Edit log file " + elf + " appears to be empty. " + "Moving it aside..."); elf.moveAsideEmptyFile(); return null; } SegmentStateProto ret = SegmentStateProto.newBuilder() .setStartTxId(segmentTxId) .setEndTxId(elf.getLastTxId()) .setIsInProgress(elf.isInProgress()) .build(); LOG.info("getSegmentInfo(" + segmentTxId + "): " + elf + " -> " + TextFormat.shortDebugString(ret)); return ret; }
/** * <code>required .hadoop.hdfs.qjournal.SegmentStateProto stateToAccept = 2;</code> * * <pre> ** Details on the segment to recover * </pre> */ public Builder mergeStateToAccept(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto value) { if (stateToAcceptBuilder_ == null) { if (((bitField0_ & 0x00000002) == 0x00000002) && stateToAccept_ != org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.getDefaultInstance()) { stateToAccept_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.newBuilder(stateToAccept_).mergeFrom(value).buildPartial(); } else { stateToAccept_ = value; } onChanged(); } else { stateToAcceptBuilder_.mergeFrom(value); } bitField0_ |= 0x00000002; return this; } /**
/** * @return the current state of the given segment, or null if the * segment does not exist. */ @VisibleForTesting SegmentStateProto getSegmentInfo(long segmentTxId) throws IOException { EditLogFile elf = fjm.getLogFile(segmentTxId); if (elf == null) { return null; } if (elf.isInProgress()) { elf.scanLog(); } if (elf.getLastTxId() == HdfsConstants.INVALID_TXID) { LOG.info("Edit log file " + elf + " appears to be empty. " + "Moving it aside..."); elf.moveAsideEmptyFile(); return null; } SegmentStateProto ret = SegmentStateProto.newBuilder() .setStartTxId(segmentTxId) .setEndTxId(elf.getLastTxId()) .setIsInProgress(elf.isInProgress()) .build(); LOG.info("getSegmentInfo(" + segmentTxId + "): " + elf + " -> " + TextFormat.shortDebugString(ret)); return ret; }
/** * <code>optional .hadoop.hdfs.qjournal.SegmentStateProto segmentState = 1;</code> */ public Builder mergeSegmentState(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto value) { if (segmentStateBuilder_ == null) { if (((bitField0_ & 0x00000001) == 0x00000001) && segmentState_ != org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.getDefaultInstance()) { segmentState_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.newBuilder(segmentState_).mergeFrom(value).buildPartial(); } else { segmentState_ = value; } onChanged(); } else { segmentStateBuilder_.mergeFrom(value); } bitField0_ |= 0x00000001; return this; } /**
/** * <code>required .hadoop.hdfs.qjournal.SegmentStateProto stateToAccept = 2;</code> * * <pre> ** Details on the segment to recover * </pre> */ public Builder mergeStateToAccept(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto value) { if (stateToAcceptBuilder_ == null) { if (((bitField0_ & 0x00000002) == 0x00000002) && stateToAccept_ != org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.getDefaultInstance()) { stateToAccept_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.newBuilder(stateToAccept_).mergeFrom(value).buildPartial(); } else { stateToAccept_ = value; } onChanged(); } else { stateToAcceptBuilder_.mergeFrom(value); } bitField0_ |= 0x00000002; return this; } /**
/** * <code>required .hadoop.hdfs.qjournal.SegmentStateProto stateToAccept = 2;</code> * * <pre> ** Details on the segment to recover * </pre> */ public Builder setStateToAccept( org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.Builder builderForValue) { if (stateToAcceptBuilder_ == null) { stateToAccept_ = builderForValue.build(); onChanged(); } else { stateToAcceptBuilder_.setMessage(builderForValue.build()); } bitField0_ |= 0x00000002; return this; } /**
/** * <code>required .hadoop.hdfs.qjournal.SegmentStateProto stateToAccept = 2;</code> * * <pre> ** Details on the segment to recover * </pre> */ public Builder setStateToAccept( org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.Builder builderForValue) { if (stateToAcceptBuilder_ == null) { stateToAccept_ = builderForValue.build(); onChanged(); } else { stateToAcceptBuilder_.setMessage(builderForValue.build()); } bitField0_ |= 0x00000002; return this; } /**
/** * <code>required .hadoop.hdfs.qjournal.SegmentStateProto segmentState = 1;</code> */ public Builder mergeSegmentState(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto value) { if (segmentStateBuilder_ == null) { if (((bitField0_ & 0x00000001) == 0x00000001) && segmentState_ != org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.getDefaultInstance()) { segmentState_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.newBuilder(segmentState_).mergeFrom(value).buildPartial(); } else { segmentState_ = value; } onChanged(); } else { segmentStateBuilder_.mergeFrom(value); } bitField0_ |= 0x00000001; return this; } /**
/** * <code>optional .hadoop.hdfs.qjournal.SegmentStateProto segmentState = 1;</code> */ public Builder mergeSegmentState(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto value) { if (segmentStateBuilder_ == null) { if (((bitField0_ & 0x00000001) == 0x00000001) && segmentState_ != org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.getDefaultInstance()) { segmentState_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.newBuilder(segmentState_).mergeFrom(value).buildPartial(); } else { segmentState_ = value; } onChanged(); } else { segmentStateBuilder_.mergeFrom(value); } bitField0_ |= 0x00000001; return this; } /**
/** * <code>required .hadoop.hdfs.qjournal.SegmentStateProto segmentState = 1;</code> */ public Builder mergeSegmentState(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto value) { if (segmentStateBuilder_ == null) { if (((bitField0_ & 0x00000001) == 0x00000001) && segmentState_ != org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.getDefaultInstance()) { segmentState_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.newBuilder(segmentState_).mergeFrom(value).buildPartial(); } else { segmentState_ = value; } onChanged(); } else { segmentStateBuilder_.mergeFrom(value); } bitField0_ |= 0x00000001; return this; } /**
/** * <code>optional .hadoop.hdfs.qjournal.SegmentStateProto segmentState = 1;</code> */ public Builder setSegmentState( org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.Builder builderForValue) { if (segmentStateBuilder_ == null) { segmentState_ = builderForValue.build(); onChanged(); } else { segmentStateBuilder_.setMessage(builderForValue.build()); } bitField0_ |= 0x00000001; return this; } /**