subBuilder.mergeFrom(nextMarker_); nextMarker_ = subBuilder.buildPartial();
subBuilder.mergeFrom(block_); block_ = subBuilder.buildPartial();
subBuilder.mergeFrom(block_); block_ = subBuilder.buildPartial();
public Builder mergeFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (com.google.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto) e.getUnfinishedMessage(); throw e; } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } private int bitField0_;
public Builder clone() { return create().mergeFrom(buildPartial()); }
public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); }
public Builder mergeFrom(com.google.protobuf.Message other) { if (other instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto) { return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto)other); } else { super.mergeFrom(other); return this; } }
subBuilder.mergeFrom(truncateBlock_); truncateBlock_ = subBuilder.buildPartial();
subBuilder.mergeFrom(block_); block_ = subBuilder.buildPartial();
subBuilder.mergeFrom(key_); key_ = subBuilder.buildPartial();
subBuilder.mergeFrom(marker_); marker_ = subBuilder.buildPartial();
subBuilder.mergeFrom(key_); key_ = subBuilder.buildPartial();
/** * <code>optional .hadoop.hdfs.BlockProto truncateBlock = 3;</code> * * <pre> * New block for recovery (truncate) * </pre> */ public Builder mergeTruncateBlock(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto value) { if (truncateBlockBuilder_ == null) { if (((bitField0_ & 0x00000004) == 0x00000004) && truncateBlock_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.getDefaultInstance()) { truncateBlock_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.newBuilder(truncateBlock_).mergeFrom(value).buildPartial(); } else { truncateBlock_ = value; } onChanged(); } else { truncateBlockBuilder_.mergeFrom(value); } bitField0_ |= 0x00000004; return this; } /**
/** * <code>required .hadoop.hdfs.BlockProto block = 1;</code> * * <pre> * Block * </pre> */ public Builder mergeBlock(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto value) { if (blockBuilder_ == null) { if (((bitField0_ & 0x00000001) == 0x00000001) && block_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.getDefaultInstance()) { block_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.newBuilder(block_).mergeFrom(value).buildPartial(); } else { block_ = value; } onChanged(); } else { blockBuilder_.mergeFrom(value); } bitField0_ |= 0x00000001; return this; } /**
/** * <code>optional .hadoop.hdfs.BlockProto block = 3;</code> * * <pre> * block information * </pre> */ public Builder mergeBlock(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto value) { if (blockBuilder_ == null) { if (((bitField0_ & 0x00000004) == 0x00000004) && block_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.getDefaultInstance()) { block_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.newBuilder(block_).mergeFrom(value).buildPartial(); } else { block_ = value; } onChanged(); } else { blockBuilder_.mergeFrom(value); } bitField0_ |= 0x00000004; return this; } /**
/** * <code>optional .hadoop.hdfs.BlockProto marker = 1;</code> */ public Builder mergeMarker(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto value) { if (markerBuilder_ == null) { if (((bitField0_ & 0x00000001) == 0x00000001) && marker_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.getDefaultInstance()) { marker_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.newBuilder(marker_).mergeFrom(value).buildPartial(); } else { marker_ = value; } onChanged(); } else { markerBuilder_.mergeFrom(value); } bitField0_ |= 0x00000001; return this; } /**
/** * <code>optional .hadoop.hdfs.BlockProto nextMarker = 2;</code> */ public Builder mergeNextMarker(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto value) { if (nextMarkerBuilder_ == null) { if (((bitField0_ & 0x00000002) == 0x00000002) && nextMarker_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.getDefaultInstance()) { nextMarker_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.newBuilder(nextMarker_).mergeFrom(value).buildPartial(); } else { nextMarker_ = value; } onChanged(); } else { nextMarkerBuilder_.mergeFrom(value); } bitField0_ |= 0x00000002; return this; } /**
/** * <code>required .hadoop.hdfs.BlockProto key = 1;</code> */ public Builder mergeKey(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto value) { if (keyBuilder_ == null) { if (((bitField0_ & 0x00000001) == 0x00000001) && key_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.getDefaultInstance()) { key_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.newBuilder(key_).mergeFrom(value).buildPartial(); } else { key_ = value; } onChanged(); } else { keyBuilder_.mergeFrom(value); } bitField0_ |= 0x00000001; return this; } /**
/** * <code>optional .hadoop.hdfs.BlockProto key = 1;</code> */ public Builder mergeKey(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto value) { if (keyBuilder_ == null) { if (((bitField0_ & 0x00000001) == 0x00000001) && key_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.getDefaultInstance()) { key_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.newBuilder(key_).mergeFrom(value).buildPartial(); } else { key_ = value; } onChanged(); } else { keyBuilder_.mergeFrom(value); } bitField0_ |= 0x00000001; return this; } /**
/** * <code>required .hadoop.hdfs.BlockProto block = 1;</code> */ public Builder mergeBlock(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto value) { if (blockBuilder_ == null) { if (((bitField0_ & 0x00000001) == 0x00000001) && block_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.getDefaultInstance()) { block_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.newBuilder(block_).mergeFrom(value).buildPartial(); } else { block_ = value; } onChanged(); } else { blockBuilder_.mergeFrom(value); } bitField0_ |= 0x00000001; return this; } /**