/** * <code>optional .hadoop.hdfs.datanode.BlockIdCommandProto blkIdCmd = 8;</code> */ public Builder clearBlkIdCmd() { if (blkIdCmdBuilder_ == null) { blkIdCmd_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockIdCommandProto.getDefaultInstance(); onChanged(); } else { blkIdCmdBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000080); return this; } /**
blkIdCmd_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockIdCommandProto.getDefaultInstance(); } else { blkIdCmdBuilder_.clear();
public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockIdCommandProto other) { if (other == org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockIdCommandProto.getDefaultInstance()) return this; if (other.hasAction()) { setAction(other.getAction()); } if (other.hasBlockPoolId()) { bitField0_ |= 0x00000002; blockPoolId_ = other.blockPoolId_; onChanged(); } if (!other.blockIds_.isEmpty()) { if (blockIds_.isEmpty()) { blockIds_ = other.blockIds_; bitField0_ = (bitField0_ & ~0x00000004); } else { ensureBlockIdsIsMutable(); blockIds_.addAll(other.blockIds_); } onChanged(); } this.mergeUnknownFields(other.getUnknownFields()); return this; }
blkIdCmd_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockIdCommandProto.getDefaultInstance(); } else { blkIdCmdBuilder_.clear();
public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockIdCommandProto other) { if (other == org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockIdCommandProto.getDefaultInstance()) return this; if (other.hasAction()) { setAction(other.getAction()); } if (other.hasBlockPoolId()) { bitField0_ |= 0x00000002; blockPoolId_ = other.blockPoolId_; onChanged(); } if (!other.blockIds_.isEmpty()) { if (blockIds_.isEmpty()) { blockIds_ = other.blockIds_; bitField0_ = (bitField0_ & ~0x00000004); } else { ensureBlockIdsIsMutable(); blockIds_.addAll(other.blockIds_); } onChanged(); } this.mergeUnknownFields(other.getUnknownFields()); return this; }
public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockIdCommandProto other) { if (other == org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockIdCommandProto.getDefaultInstance()) return this; if (other.hasAction()) { setAction(other.getAction()); } if (other.hasBlockPoolId()) { bitField0_ |= 0x00000002; blockPoolId_ = other.blockPoolId_; onChanged(); } if (!other.blockIds_.isEmpty()) { if (blockIds_.isEmpty()) { blockIds_ = other.blockIds_; bitField0_ = (bitField0_ & ~0x00000004); } else { ensureBlockIdsIsMutable(); blockIds_.addAll(other.blockIds_); } onChanged(); } this.mergeUnknownFields(other.getUnknownFields()); return this; }
/** * <code>optional .hadoop.hdfs.datanode.BlockIdCommandProto blkIdCmd = 8;</code> */ public Builder mergeBlkIdCmd(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockIdCommandProto value) { if (blkIdCmdBuilder_ == null) { if (((bitField0_ & 0x00000080) == 0x00000080) && blkIdCmd_ != org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockIdCommandProto.getDefaultInstance()) { blkIdCmd_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockIdCommandProto.newBuilder(blkIdCmd_).mergeFrom(value).buildPartial(); } else { blkIdCmd_ = value; } onChanged(); } else { blkIdCmdBuilder_.mergeFrom(value); } bitField0_ |= 0x00000080; return this; } /**
/** * <code>optional .hadoop.hdfs.datanode.BlockIdCommandProto blkIdCmd = 8;</code> */ public Builder mergeBlkIdCmd(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockIdCommandProto value) { if (blkIdCmdBuilder_ == null) { if (((bitField0_ & 0x00000080) == 0x00000080) && blkIdCmd_ != org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockIdCommandProto.getDefaultInstance()) { blkIdCmd_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockIdCommandProto.newBuilder(blkIdCmd_).mergeFrom(value).buildPartial(); } else { blkIdCmd_ = value; } onChanged(); } else { blkIdCmdBuilder_.mergeFrom(value); } bitField0_ |= 0x00000080; return this; } /**
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockIdCommandProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockIdCommandProto.getDefaultInstance(); }
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockIdCommandProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockIdCommandProto.getDefaultInstance(); }
/** * <code>optional .hadoop.hdfs.datanode.BlockIdCommandProto blkIdCmd = 8;</code> */ public Builder clearBlkIdCmd() { if (blkIdCmdBuilder_ == null) { blkIdCmd_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockIdCommandProto.getDefaultInstance(); onChanged(); } else { blkIdCmdBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000080); return this; } /**
/** * <code>optional .hadoop.hdfs.datanode.BlockIdCommandProto blkIdCmd = 8;</code> */ public Builder clearBlkIdCmd() { if (blkIdCmdBuilder_ == null) { blkIdCmd_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockIdCommandProto.getDefaultInstance(); onChanged(); } else { blkIdCmdBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000080); return this; } /**
/** * <code>required string blockPoolId = 2;</code> */ public Builder clearBlockPoolId() { bitField0_ = (bitField0_ & ~0x00000002); blockPoolId_ = getDefaultInstance().getBlockPoolId(); onChanged(); return this; } /**
private void initFields() { cmdType_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto.Type.BalancerBandwidthCommand; balancerCmd_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProto.getDefaultInstance(); blkCmd_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto.getDefaultInstance(); recoveryCmd_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommandProto.getDefaultInstance(); finalizeCmd_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.FinalizeCommandProto.getDefaultInstance(); keyUpdateCmd_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProto.getDefaultInstance(); registerCmd_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterCommandProto.getDefaultInstance(); blkIdCmd_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockIdCommandProto.getDefaultInstance(); } private byte memoizedIsInitialized = -1;
/** * <code>required string blockPoolId = 2;</code> */ public Builder clearBlockPoolId() { bitField0_ = (bitField0_ & ~0x00000002); blockPoolId_ = getDefaultInstance().getBlockPoolId(); onChanged(); return this; } /**
private void initFields() { cmdType_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto.Type.BalancerBandwidthCommand; balancerCmd_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProto.getDefaultInstance(); blkCmd_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto.getDefaultInstance(); recoveryCmd_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommandProto.getDefaultInstance(); finalizeCmd_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.FinalizeCommandProto.getDefaultInstance(); keyUpdateCmd_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProto.getDefaultInstance(); registerCmd_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterCommandProto.getDefaultInstance(); blkIdCmd_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockIdCommandProto.getDefaultInstance(); } private byte memoizedIsInitialized = -1;
/** * <code>optional .hadoop.hdfs.datanode.BlockIdCommandProto blkIdCmd = 8;</code> */ public Builder mergeBlkIdCmd(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockIdCommandProto value) { if (blkIdCmdBuilder_ == null) { if (((bitField0_ & 0x00000080) == 0x00000080) && blkIdCmd_ != org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockIdCommandProto.getDefaultInstance()) { blkIdCmd_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockIdCommandProto.newBuilder(blkIdCmd_).mergeFrom(value).buildPartial(); } else { blkIdCmd_ = value; } onChanged(); } else { blkIdCmdBuilder_.mergeFrom(value); } bitField0_ |= 0x00000080; return this; } /**
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockIdCommandProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockIdCommandProto.getDefaultInstance(); }
/** * <code>required string blockPoolId = 2;</code> */ public Builder clearBlockPoolId() { bitField0_ = (bitField0_ & ~0x00000002); blockPoolId_ = getDefaultInstance().getBlockPoolId(); onChanged(); return this; } /**
private void initFields() { cmdType_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto.Type.BalancerBandwidthCommand; balancerCmd_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProto.getDefaultInstance(); blkCmd_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto.getDefaultInstance(); recoveryCmd_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommandProto.getDefaultInstance(); finalizeCmd_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.FinalizeCommandProto.getDefaultInstance(); keyUpdateCmd_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProto.getDefaultInstance(); registerCmd_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterCommandProto.getDefaultInstance(); blkIdCmd_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockIdCommandProto.getDefaultInstance(); blkECReconstructionCmd_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockECReconstructionCommandProto.getDefaultInstance(); } private byte memoizedIsInitialized = -1;