/** * <code>optional .hadoop.hdfs.datanode.KeyUpdateCommandProto keyUpdateCmd = 6;</code> */ public Builder clearKeyUpdateCmd() { if (keyUpdateCmdBuilder_ == null) { keyUpdateCmd_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProto.getDefaultInstance(); onChanged(); } else { keyUpdateCmdBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000020); return this; } /**
keyUpdateCmd_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProto.getDefaultInstance(); } else { keyUpdateCmdBuilder_.clear();
/** * <code>optional .hadoop.hdfs.datanode.KeyUpdateCommandProto keyUpdateCmd = 6;</code> */ public Builder mergeKeyUpdateCmd(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProto value) { if (keyUpdateCmdBuilder_ == null) { if (((bitField0_ & 0x00000020) == 0x00000020) && keyUpdateCmd_ != org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProto.getDefaultInstance()) { keyUpdateCmd_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProto.newBuilder(keyUpdateCmd_).mergeFrom(value).buildPartial(); } else { keyUpdateCmd_ = value; } onChanged(); } else { keyUpdateCmdBuilder_.mergeFrom(value); } bitField0_ |= 0x00000020; return this; } /**
keyUpdateCmd_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProto.getDefaultInstance(); } else { keyUpdateCmdBuilder_.clear();
keyUpdateCmd_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProto.getDefaultInstance(); } else { keyUpdateCmdBuilder_.clear();
/** * <code>optional .hadoop.hdfs.datanode.KeyUpdateCommandProto keyUpdateCmd = 6;</code> */ public Builder mergeKeyUpdateCmd(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProto value) { if (keyUpdateCmdBuilder_ == null) { if (((bitField0_ & 0x00000020) == 0x00000020) && keyUpdateCmd_ != org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProto.getDefaultInstance()) { keyUpdateCmd_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProto.newBuilder(keyUpdateCmd_).mergeFrom(value).buildPartial(); } else { keyUpdateCmd_ = value; } onChanged(); } else { keyUpdateCmdBuilder_.mergeFrom(value); } bitField0_ |= 0x00000020; return this; } /**
/** * <code>optional .hadoop.hdfs.datanode.KeyUpdateCommandProto keyUpdateCmd = 6;</code> */ public Builder mergeKeyUpdateCmd(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProto value) { if (keyUpdateCmdBuilder_ == null) { if (((bitField0_ & 0x00000020) == 0x00000020) && keyUpdateCmd_ != org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProto.getDefaultInstance()) { keyUpdateCmd_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProto.newBuilder(keyUpdateCmd_).mergeFrom(value).buildPartial(); } else { keyUpdateCmd_ = value; } onChanged(); } else { keyUpdateCmdBuilder_.mergeFrom(value); } bitField0_ |= 0x00000020; return this; } /**
/** * <code>optional .hadoop.hdfs.datanode.KeyUpdateCommandProto keyUpdateCmd = 6;</code> */ public Builder clearKeyUpdateCmd() { if (keyUpdateCmdBuilder_ == null) { keyUpdateCmd_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProto.getDefaultInstance(); onChanged(); } else { keyUpdateCmdBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000020); return this; } /**
/** * <code>optional .hadoop.hdfs.datanode.KeyUpdateCommandProto keyUpdateCmd = 6;</code> */ public Builder clearKeyUpdateCmd() { if (keyUpdateCmdBuilder_ == null) { keyUpdateCmd_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProto.getDefaultInstance(); onChanged(); } else { keyUpdateCmdBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000020); return this; } /**
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProto.getDefaultInstance(); }
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProto.getDefaultInstance(); }
public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProto other) { if (other == org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProto.getDefaultInstance()) return this; if (other.hasKeys()) { mergeKeys(other.getKeys()); } this.mergeUnknownFields(other.getUnknownFields()); return this; }
public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProto other) { if (other == org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProto.getDefaultInstance()) return this; if (other.hasKeys()) { mergeKeys(other.getKeys()); } this.mergeUnknownFields(other.getUnknownFields()); return this; }
private void initFields() { cmdType_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto.Type.BalancerBandwidthCommand; balancerCmd_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProto.getDefaultInstance(); blkCmd_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto.getDefaultInstance(); recoveryCmd_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommandProto.getDefaultInstance(); finalizeCmd_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.FinalizeCommandProto.getDefaultInstance(); keyUpdateCmd_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProto.getDefaultInstance(); registerCmd_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterCommandProto.getDefaultInstance(); blkIdCmd_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockIdCommandProto.getDefaultInstance(); } private byte memoizedIsInitialized = -1;
private void initFields() { cmdType_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto.Type.BalancerBandwidthCommand; balancerCmd_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProto.getDefaultInstance(); blkCmd_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto.getDefaultInstance(); recoveryCmd_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommandProto.getDefaultInstance(); finalizeCmd_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.FinalizeCommandProto.getDefaultInstance(); keyUpdateCmd_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProto.getDefaultInstance(); registerCmd_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterCommandProto.getDefaultInstance(); blkIdCmd_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockIdCommandProto.getDefaultInstance(); } private byte memoizedIsInitialized = -1;
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProto.getDefaultInstance(); }
public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProto other) { if (other == org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProto.getDefaultInstance()) return this; if (other.hasKeys()) { mergeKeys(other.getKeys()); } this.mergeUnknownFields(other.getUnknownFields()); return this; }
private void initFields() { cmdType_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto.Type.BalancerBandwidthCommand; balancerCmd_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProto.getDefaultInstance(); blkCmd_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto.getDefaultInstance(); recoveryCmd_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommandProto.getDefaultInstance(); finalizeCmd_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.FinalizeCommandProto.getDefaultInstance(); keyUpdateCmd_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProto.getDefaultInstance(); registerCmd_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterCommandProto.getDefaultInstance(); blkIdCmd_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockIdCommandProto.getDefaultInstance(); blkECReconstructionCmd_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockECReconstructionCommandProto.getDefaultInstance(); } private byte memoizedIsInitialized = -1;