@java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProto other = (org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProto) obj; boolean result = true; result = result && (hasBandwidth() == other.hasBandwidth()); if (hasBandwidth()) { result = result && (getBandwidth() == other.getBandwidth()); } result = result && getUnknownFields().equals(other.getUnknownFields()); return result; }
bitField0_ = (bitField0_ & ~0x00000001); if (balancerCmdBuilder_ == null) { balancerCmd_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProto.getDefaultInstance(); } else { balancerCmdBuilder_.clear();
@java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); if (hasBandwidth()) { hash = (37 * hash) + BANDWIDTH_FIELD_NUMBER; hash = (53 * hash) + hashLong(getBandwidth()); } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; }
if (hasBalancerCmd()) { result = result && getBalancerCmd() .equals(other.getBalancerCmd());
bitField0_ = (bitField0_ & ~0x00000001); if (balancerCmdBuilder_ == null) { balancerCmd_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProto.getDefaultInstance(); } else { balancerCmdBuilder_.clear();
if (hasBalancerCmd()) { result = result && getBalancerCmd() .equals(other.getBalancerCmd());
hash = (53 * hash) + getBalancerCmd().hashCode();
bitField0_ = (bitField0_ & ~0x00000001); if (balancerCmdBuilder_ == null) { balancerCmd_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProto.getDefaultInstance(); } else { balancerCmdBuilder_.clear();
hash = (53 * hash) + getBalancerCmd().hashCode();
/** * <code>optional .hadoop.hdfs.datanode.BalancerBandwidthCommandProto balancerCmd = 2;</code> * * <pre> * One of the following command is available when the corresponding * cmdType is set * </pre> */ public Builder mergeBalancerCmd(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProto value) { if (balancerCmdBuilder_ == null) { if (((bitField0_ & 0x00000002) == 0x00000002) && balancerCmd_ != org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProto.getDefaultInstance()) { balancerCmd_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProto.newBuilder(balancerCmd_).mergeFrom(value).buildPartial(); } else { balancerCmd_ = value; } onChanged(); } else { balancerCmdBuilder_.mergeFrom(value); } bitField0_ |= 0x00000002; return this; } /**
/** * <code>optional .hadoop.hdfs.datanode.BalancerBandwidthCommandProto balancerCmd = 2;</code> * * <pre> * One of the following command is available when the corresponding * cmdType is set * </pre> */ public Builder mergeBalancerCmd(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProto value) { if (balancerCmdBuilder_ == null) { if (((bitField0_ & 0x00000002) == 0x00000002) && balancerCmd_ != org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProto.getDefaultInstance()) { balancerCmd_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProto.newBuilder(balancerCmd_).mergeFrom(value).buildPartial(); } else { balancerCmd_ = value; } onChanged(); } else { balancerCmdBuilder_.mergeFrom(value); } bitField0_ |= 0x00000002; return this; } /**
/** * <code>optional .hadoop.hdfs.datanode.BalancerBandwidthCommandProto balancerCmd = 2;</code> * * <pre> * One of the following command is available when the corresponding * cmdType is set * </pre> */ public Builder clearBalancerCmd() { if (balancerCmdBuilder_ == null) { balancerCmd_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProto.getDefaultInstance(); onChanged(); } else { balancerCmdBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000002); return this; } /**
/** * <code>optional .hadoop.hdfs.datanode.BalancerBandwidthCommandProto balancerCmd = 2;</code> * * <pre> * One of the following command is available when the corresponding * cmdType is set * </pre> */ public Builder clearBalancerCmd() { if (balancerCmdBuilder_ == null) { balancerCmd_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProto.getDefaultInstance(); onChanged(); } else { balancerCmdBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000002); return this; } /**
@java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProto other = (org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProto) obj; boolean result = true; result = result && (hasBandwidth() == other.hasBandwidth()); if (hasBandwidth()) { result = result && (getBandwidth() == other.getBandwidth()); } result = result && getUnknownFields().equals(other.getUnknownFields()); return result; }
@java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProto other = (org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProto) obj; boolean result = true; result = result && (hasBandwidth() == other.hasBandwidth()); if (hasBandwidth()) { result = result && (getBandwidth() == other.getBandwidth()); } result = result && getUnknownFields().equals(other.getUnknownFields()); return result; }
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProto.getDefaultInstance(); }
@java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); if (hasBandwidth()) { hash = (37 * hash) + BANDWIDTH_FIELD_NUMBER; hash = (53 * hash) + hashLong(getBandwidth()); } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; }
public org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProto.getDefaultInstance(); }
@java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); if (hasBandwidth()) { hash = (37 * hash) + BANDWIDTH_FIELD_NUMBER; hash = (53 * hash) + hashLong(getBandwidth()); } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; }
public static BalancerBandwidthCommand convert( BalancerBandwidthCommandProto balancerCmd) { return new BalancerBandwidthCommand(balancerCmd.getBandwidth()); }