/** * <code>required .hadoop.hdfs.ChecksumProto checksum = 1;</code> */ public Builder setChecksum(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto value) { if (checksumBuilder_ == null) { if (value == null) { throw new NullPointerException(); } checksum_ = value; onChanged(); } else { checksumBuilder_.setMessage(value); } bitField0_ |= 0x00000001; return this; } /**
/** * <code>required .hadoop.hdfs.ChecksumProto checksum = 1;</code> */ public Builder setChecksum(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto value) { if (checksumBuilder_ == null) { if (value == null) { throw new NullPointerException(); } checksum_ = value; onChanged(); } else { checksumBuilder_.setMessage(value); } bitField0_ |= 0x00000001; return this; } /**
/** * <code>required .hadoop.hdfs.ChecksumProto checksum = 1;</code> */ public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto.Builder getChecksumBuilder() { bitField0_ |= 0x00000001; onChanged(); return getChecksumFieldBuilder().getBuilder(); } /**
/** * <code>required .hadoop.hdfs.ChecksumProto checksum = 1;</code> */ public Builder mergeChecksum(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto value) { if (checksumBuilder_ == null) { if (((bitField0_ & 0x00000001) == 0x00000001) && checksum_ != org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto.getDefaultInstance()) { checksum_ = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto.newBuilder(checksum_).mergeFrom(value).buildPartial(); } else { checksum_ = value; } onChanged(); } else { checksumBuilder_.mergeFrom(value); } bitField0_ |= 0x00000001; return this; } /**
/** * <code>required uint64 chunkOffset = 2;</code> * * <pre> ** * The offset into the block at which the first packet * will start. This is necessary since reads will align * backwards to a checksum chunk boundary. * </pre> */ public Builder setChunkOffset(long value) { bitField0_ |= 0x00000002; chunkOffset_ = value; onChanged(); return this; } /**
/** * <code>required uint64 chunkOffset = 2;</code> * * <pre> ** * The offset into the block at which the first packet * will start. This is necessary since reads will align * backwards to a checksum chunk boundary. * </pre> */ public Builder setChunkOffset(long value) { bitField0_ |= 0x00000002; chunkOffset_ = value; onChanged(); return this; } /**
/** * <code>required uint64 chunkOffset = 2;</code> * * <pre> ** * The offset into the block at which the first packet * will start. This is necessary since reads will align * backwards to a checksum chunk boundary. * </pre> */ public Builder clearChunkOffset() { bitField0_ = (bitField0_ & ~0x00000002); chunkOffset_ = 0L; onChanged(); return this; }
/** * <code>required uint64 chunkOffset = 2;</code> * * <pre> ** * The offset into the block at which the first packet * will start. This is necessary since reads will align * backwards to a checksum chunk boundary. * </pre> */ public Builder clearChunkOffset() { bitField0_ = (bitField0_ & ~0x00000002); chunkOffset_ = 0L; onChanged(); return this; }
/** * <code>required .hadoop.hdfs.ChecksumProto checksum = 1;</code> */ public Builder mergeChecksum(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto value) { if (checksumBuilder_ == null) { if (((bitField0_ & 0x00000001) == 0x00000001) && checksum_ != org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto.getDefaultInstance()) { checksum_ = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto.newBuilder(checksum_).mergeFrom(value).buildPartial(); } else { checksum_ = value; } onChanged(); } else { checksumBuilder_.mergeFrom(value); } bitField0_ |= 0x00000001; return this; } /**
/** * <code>required uint64 chunkOffset = 2;</code> * * <pre> ** * The offset into the block at which the first packet * will start. This is necessary since reads will align * backwards to a checksum chunk boundary. * </pre> */ public Builder clearChunkOffset() { bitField0_ = (bitField0_ & ~0x00000002); chunkOffset_ = 0L; onChanged(); return this; }
/** * <code>required .hadoop.hdfs.ChecksumProto checksum = 1;</code> */ public Builder setChecksum( org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto.Builder builderForValue) { if (checksumBuilder_ == null) { checksum_ = builderForValue.build(); onChanged(); } else { checksumBuilder_.setMessage(builderForValue.build()); } bitField0_ |= 0x00000001; return this; } /**
/** * <code>required .hadoop.hdfs.ChecksumProto checksum = 1;</code> */ public Builder clearChecksum() { if (checksumBuilder_ == null) { checksum_ = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto.getDefaultInstance(); onChanged(); } else { checksumBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000001); return this; } /**
/** * <code>required .hadoop.hdfs.ChecksumProto checksum = 1;</code> */ public Builder clearChecksum() { if (checksumBuilder_ == null) { checksum_ = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto.getDefaultInstance(); onChanged(); } else { checksumBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000001); return this; } /**
/** * <code>required .hadoop.hdfs.ChecksumProto checksum = 1;</code> */ public Builder setChecksum(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto value) { if (checksumBuilder_ == null) { if (value == null) { throw new NullPointerException(); } checksum_ = value; onChanged(); } else { checksumBuilder_.setMessage(value); } bitField0_ |= 0x00000001; return this; } /**
/** * <code>required .hadoop.hdfs.ChecksumProto checksum = 1;</code> */ public Builder clearChecksum() { if (checksumBuilder_ == null) { checksum_ = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto.getDefaultInstance(); onChanged(); } else { checksumBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000001); return this; } /**
/** * <code>required uint64 chunkOffset = 2;</code> * * <pre> ** * The offset into the block at which the first packet * will start. This is necessary since reads will align * backwards to a checksum chunk boundary. * </pre> */ public Builder setChunkOffset(long value) { bitField0_ |= 0x00000002; chunkOffset_ = value; onChanged(); return this; } /**
/** * <code>required .hadoop.hdfs.ChecksumProto checksum = 1;</code> */ public Builder setChecksum( org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto.Builder builderForValue) { if (checksumBuilder_ == null) { checksum_ = builderForValue.build(); onChanged(); } else { checksumBuilder_.setMessage(builderForValue.build()); } bitField0_ |= 0x00000001; return this; } /**
/** * <code>required .hadoop.hdfs.ChecksumProto checksum = 1;</code> */ public Builder setChecksum( org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto.Builder builderForValue) { if (checksumBuilder_ == null) { checksum_ = builderForValue.build(); onChanged(); } else { checksumBuilder_.setMessage(builderForValue.build()); } bitField0_ |= 0x00000001; return this; } /**
/** * <code>required .hadoop.hdfs.ChecksumProto checksum = 1;</code> */ public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto.Builder getChecksumBuilder() { bitField0_ |= 0x00000001; onChanged(); return getChecksumFieldBuilder().getBuilder(); } /**
/** * <code>required .hadoop.hdfs.ChecksumProto checksum = 1;</code> */ public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto.Builder getChecksumBuilder() { bitField0_ |= 0x00000001; onChanged(); return getChecksumFieldBuilder().getBuilder(); } /**