public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionLoad buildPartial() { org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionLoad result = new org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionLoad(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0;
hash = (19 * hash) + getDescriptorForType().hashCode(); if (hasRegionSpecifier()) { hash = (37 * hash) + REGION_SPECIFIER_FIELD_NUMBER; hash = (53 * hash) + getRegionSpecifier().hashCode(); if (hasStores()) { hash = (37 * hash) + STORES_FIELD_NUMBER; hash = (53 * hash) + getStores(); if (hasStorefiles()) { hash = (37 * hash) + STOREFILES_FIELD_NUMBER; hash = (53 * hash) + getStorefiles(); if (hasStoreUncompressedSizeMB()) { hash = (37 * hash) + STORE_UNCOMPRESSED_SIZE_MB_FIELD_NUMBER; hash = (53 * hash) + getStoreUncompressedSizeMB(); if (hasStorefileSizeMB()) { hash = (37 * hash) + STOREFILE_SIZE_MB_FIELD_NUMBER; hash = (53 * hash) + getStorefileSizeMB(); if (hasMemstoreSizeMB()) { hash = (37 * hash) + MEMSTORE_SIZE_MB_FIELD_NUMBER; hash = (53 * hash) + getMemstoreSizeMB(); if (hasStorefileIndexSizeKB()) { hash = (37 * hash) + STOREFILE_INDEX_SIZE_KB_FIELD_NUMBER; hash = (53 * hash) + hashLong(getStorefileIndexSizeKB());
public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionLoad other) { if (other == org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionLoad.getDefaultInstance()) return this; if (other.hasRegionSpecifier()) { mergeRegionSpecifier(other.getRegionSpecifier()); if (other.hasStores()) { setStores(other.getStores()); if (other.hasStorefiles()) { setStorefiles(other.getStorefiles()); if (other.hasStoreUncompressedSizeMB()) { setStoreUncompressedSizeMB(other.getStoreUncompressedSizeMB()); if (other.hasStorefileSizeMB()) { setStorefileSizeMB(other.getStorefileSizeMB()); if (other.hasMemstoreSizeMB()) { setMemstoreSizeMB(other.getMemstoreSizeMB()); if (other.hasStorefileIndexSizeKB()) { setStorefileIndexSizeKB(other.getStorefileIndexSizeKB()); if (other.hasReadRequestsCount()) { setReadRequestsCount(other.getReadRequestsCount()); if (other.hasWriteRequestsCount()) { setWriteRequestsCount(other.getWriteRequestsCount()); if (other.hasTotalCompactingKVs()) {
public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionLoad buildPartial() { org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionLoad result = new org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionLoad(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0;
public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionLoad buildPartial() { org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionLoad result = new org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionLoad(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0;
public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionLoad buildPartial() { org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionLoad result = new org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionLoad(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0;
result = result && (hasRegionSpecifier() == other.hasRegionSpecifier()); if (hasRegionSpecifier()) { result = result && getRegionSpecifier() .equals(other.getRegionSpecifier()); result = result && (hasStores() == other.hasStores()); if (hasStores()) { result = result && (getStores() == other.getStores()); result = result && (hasStorefiles() == other.hasStorefiles()); if (hasStorefiles()) { result = result && (getStorefiles() == other.getStorefiles()); result = result && (hasStoreUncompressedSizeMB() == other.hasStoreUncompressedSizeMB()); if (hasStoreUncompressedSizeMB()) { result = result && (getStoreUncompressedSizeMB() == other.getStoreUncompressedSizeMB()); result = result && (hasStorefileSizeMB() == other.hasStorefileSizeMB()); if (hasStorefileSizeMB()) { result = result && (getStorefileSizeMB() == other.getStorefileSizeMB()); result = result && (hasMemstoreSizeMB() == other.hasMemstoreSizeMB()); if (hasMemstoreSizeMB()) { result = result && (getMemstoreSizeMB() == other.getMemstoreSizeMB());
result = result && (hasRegionSpecifier() == other.hasRegionSpecifier()); if (hasRegionSpecifier()) { result = result && getRegionSpecifier() .equals(other.getRegionSpecifier()); result = result && (hasStores() == other.hasStores()); if (hasStores()) { result = result && (getStores() == other.getStores()); result = result && (hasStorefiles() == other.hasStorefiles()); if (hasStorefiles()) { result = result && (getStorefiles() == other.getStorefiles()); result = result && (hasStoreUncompressedSizeMB() == other.hasStoreUncompressedSizeMB()); if (hasStoreUncompressedSizeMB()) { result = result && (getStoreUncompressedSizeMB() == other.getStoreUncompressedSizeMB()); result = result && (hasStorefileSizeMB() == other.hasStorefileSizeMB()); if (hasStorefileSizeMB()) { result = result && (getStorefileSizeMB() == other.getStorefileSizeMB()); result = result && (hasMemstoreSizeMB() == other.hasMemstoreSizeMB()); if (hasMemstoreSizeMB()) { result = result && (getMemstoreSizeMB() == other.getMemstoreSizeMB());
result = result && (hasRegionSpecifier() == other.hasRegionSpecifier()); if (hasRegionSpecifier()) { result = result && getRegionSpecifier() .equals(other.getRegionSpecifier()); result = result && (hasStores() == other.hasStores()); if (hasStores()) { result = result && (getStores() == other.getStores()); result = result && (hasStorefiles() == other.hasStorefiles()); if (hasStorefiles()) { result = result && (getStorefiles() == other.getStorefiles()); result = result && (hasStoreUncompressedSizeMB() == other.hasStoreUncompressedSizeMB()); if (hasStoreUncompressedSizeMB()) { result = result && (getStoreUncompressedSizeMB() == other.getStoreUncompressedSizeMB()); result = result && (hasStorefileSizeMB() == other.hasStorefileSizeMB()); if (hasStorefileSizeMB()) { result = result && (getStorefileSizeMB() == other.getStorefileSizeMB()); result = result && (hasMemstoreSizeMB() == other.hasMemstoreSizeMB()); if (hasMemstoreSizeMB()) { result = result && (getMemstoreSizeMB() == other.getMemstoreSizeMB());
hash = (19 * hash) + getDescriptorForType().hashCode(); if (hasRegionSpecifier()) { hash = (37 * hash) + REGION_SPECIFIER_FIELD_NUMBER; hash = (53 * hash) + getRegionSpecifier().hashCode(); if (hasStores()) { hash = (37 * hash) + STORES_FIELD_NUMBER; hash = (53 * hash) + getStores(); if (hasStorefiles()) { hash = (37 * hash) + STOREFILES_FIELD_NUMBER; hash = (53 * hash) + getStorefiles(); if (hasStoreUncompressedSizeMB()) { hash = (37 * hash) + STORE_UNCOMPRESSED_SIZE_MB_FIELD_NUMBER; hash = (53 * hash) + getStoreUncompressedSizeMB(); if (hasStorefileSizeMB()) { hash = (37 * hash) + STOREFILE_SIZE_MB_FIELD_NUMBER; hash = (53 * hash) + getStorefileSizeMB(); if (hasMemstoreSizeMB()) { hash = (37 * hash) + MEMSTORE_SIZE_MB_FIELD_NUMBER; hash = (53 * hash) + getMemstoreSizeMB(); if (hasStorefileIndexSizeMB()) { hash = (37 * hash) + STOREFILE_INDEX_SIZE_MB_FIELD_NUMBER; hash = (53 * hash) + getStorefileIndexSizeMB();
hash = (19 * hash) + getDescriptorForType().hashCode(); if (hasRegionSpecifier()) { hash = (37 * hash) + REGION_SPECIFIER_FIELD_NUMBER; hash = (53 * hash) + getRegionSpecifier().hashCode(); if (hasStores()) { hash = (37 * hash) + STORES_FIELD_NUMBER; hash = (53 * hash) + getStores(); if (hasStorefiles()) { hash = (37 * hash) + STOREFILES_FIELD_NUMBER; hash = (53 * hash) + getStorefiles(); if (hasStoreUncompressedSizeMB()) { hash = (37 * hash) + STORE_UNCOMPRESSED_SIZE_MB_FIELD_NUMBER; hash = (53 * hash) + getStoreUncompressedSizeMB(); if (hasStorefileSizeMB()) { hash = (37 * hash) + STOREFILE_SIZE_MB_FIELD_NUMBER; hash = (53 * hash) + getStorefileSizeMB(); if (hasMemstoreSizeMB()) { hash = (37 * hash) + MEMSTORE_SIZE_MB_FIELD_NUMBER; hash = (53 * hash) + getMemstoreSizeMB(); if (hasStorefileIndexSizeKB()) { hash = (37 * hash) + STOREFILE_INDEX_SIZE_KB_FIELD_NUMBER; hash = (53 * hash) + hashLong(getStorefileIndexSizeKB());
org.jamon.escaping.Escaping.HTML.write(org.jamon.emit.StandardEmitter.valueOf(load.getStores()), jamonWriter); org.jamon.escaping.Escaping.HTML.write(org.jamon.emit.StandardEmitter.valueOf(load.getStorefiles()), jamonWriter); org.jamon.escaping.Escaping.HTML.write(org.jamon.emit.StandardEmitter.valueOf(load.getStoreUncompressedSizeMB()), jamonWriter); org.jamon.escaping.Escaping.HTML.write(org.jamon.emit.StandardEmitter.valueOf(load.getStorefileSizeMB()), jamonWriter); org.jamon.escaping.Escaping.HTML.write(org.jamon.emit.StandardEmitter.valueOf(load.getTotalStaticIndexSizeKB()), jamonWriter); org.jamon.escaping.Escaping.HTML.write(org.jamon.emit.StandardEmitter.valueOf(load.getTotalStaticBloomSizeKB()), jamonWriter); org.jamon.escaping.Escaping.HTML.write(org.jamon.emit.StandardEmitter.valueOf(load.getDataLocality()), jamonWriter);
public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionLoad other) { if (other == org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionLoad.getDefaultInstance()) return this; if (other.hasRegionSpecifier()) { mergeRegionSpecifier(other.getRegionSpecifier()); if (other.hasStores()) { setStores(other.getStores()); if (other.hasStorefiles()) { setStorefiles(other.getStorefiles()); if (other.hasStoreUncompressedSizeMB()) { setStoreUncompressedSizeMB(other.getStoreUncompressedSizeMB()); if (other.hasStorefileSizeMB()) { setStorefileSizeMB(other.getStorefileSizeMB()); if (other.hasMemstoreSizeMB()) { setMemstoreSizeMB(other.getMemstoreSizeMB()); if (other.hasStorefileIndexSizeMB()) { setStorefileIndexSizeMB(other.getStorefileIndexSizeMB()); if (other.hasReadRequestsCount()) { setReadRequestsCount(other.getReadRequestsCount()); if (other.hasWriteRequestsCount()) { setWriteRequestsCount(other.getWriteRequestsCount()); if (other.hasTotalCompactingKVs()) {
public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionLoad other) { if (other == org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionLoad.getDefaultInstance()) return this; if (other.hasRegionSpecifier()) { mergeRegionSpecifier(other.getRegionSpecifier()); if (other.hasStores()) { setStores(other.getStores()); if (other.hasStorefiles()) { setStorefiles(other.getStorefiles()); if (other.hasStoreUncompressedSizeMB()) { setStoreUncompressedSizeMB(other.getStoreUncompressedSizeMB()); if (other.hasStorefileSizeMB()) { setStorefileSizeMB(other.getStorefileSizeMB()); if (other.hasMemstoreSizeMB()) { setMemstoreSizeMB(other.getMemstoreSizeMB()); if (other.hasStorefileIndexSizeKB()) { setStorefileIndexSizeKB(other.getStorefileIndexSizeKB()); if (other.hasReadRequestsCount()) { setReadRequestsCount(other.getReadRequestsCount()); if (other.hasWriteRequestsCount()) { setWriteRequestsCount(other.getWriteRequestsCount()); if (other.hasTotalCompactingKVs()) {
public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionLoad other) { if (other == org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionLoad.getDefaultInstance()) return this; if (other.hasRegionSpecifier()) { mergeRegionSpecifier(other.getRegionSpecifier()); if (other.hasStores()) { setStores(other.getStores()); if (other.hasStorefiles()) { setStorefiles(other.getStorefiles()); if (other.hasStoreUncompressedSizeMB()) { setStoreUncompressedSizeMB(other.getStoreUncompressedSizeMB()); if (other.hasStorefileSizeMB()) { setStorefileSizeMB(other.getStorefileSizeMB()); if (other.hasMemstoreSizeMB()) { setMemstoreSizeMB(other.getMemstoreSizeMB()); if (other.hasStorefileIndexSizeKB()) { setStorefileIndexSizeKB(other.getStorefileIndexSizeKB()); if (other.hasReadRequestsCount()) { setReadRequestsCount(other.getReadRequestsCount()); if (other.hasWriteRequestsCount()) { setWriteRequestsCount(other.getWriteRequestsCount()); if (other.hasTotalCompactingKVs()) {
hash = (19 * hash) + getDescriptorForType().hashCode(); if (hasRegionSpecifier()) { hash = (37 * hash) + REGION_SPECIFIER_FIELD_NUMBER; hash = (53 * hash) + getRegionSpecifier().hashCode(); if (hasStores()) { hash = (37 * hash) + STORES_FIELD_NUMBER; hash = (53 * hash) + getStores(); if (hasStorefiles()) { hash = (37 * hash) + STOREFILES_FIELD_NUMBER; hash = (53 * hash) + getStorefiles(); if (hasStoreUncompressedSizeMB()) { hash = (37 * hash) + STORE_UNCOMPRESSED_SIZE_MB_FIELD_NUMBER; hash = (53 * hash) + getStoreUncompressedSizeMB(); if (hasStorefileSizeMB()) { hash = (37 * hash) + STOREFILE_SIZE_MB_FIELD_NUMBER; hash = (53 * hash) + getStorefileSizeMB(); if (hasMemstoreSizeMB()) { hash = (37 * hash) + MEMSTORE_SIZE_MB_FIELD_NUMBER; hash = (53 * hash) + getMemstoreSizeMB(); if (hasStorefileIndexSizeKB()) { hash = (37 * hash) + STOREFILE_INDEX_SIZE_KB_FIELD_NUMBER; hash = (53 * hash) + hashLong(getStorefileIndexSizeKB());
if (load != null && load.getTotalCompactingKVs() > 0) { percentDone = String.format("%.2f", 100 * ((float) load.getCurrentCompactedKVs() / load.getTotalCompactingKVs())) + "%"; org.jamon.escaping.Escaping.HTML.write(org.jamon.emit.StandardEmitter.valueOf(load.getTotalCompactingKVs()), jamonWriter); org.jamon.escaping.Escaping.HTML.write(org.jamon.emit.StandardEmitter.valueOf(load.getCurrentCompactedKVs()), jamonWriter);
org.jamon.escaping.Escaping.HTML.write(org.jamon.emit.StandardEmitter.valueOf(load.getReadRequestsCount()), jamonWriter); org.jamon.escaping.Escaping.HTML.write(org.jamon.emit.StandardEmitter.valueOf(load.getWriteRequestsCount()), jamonWriter);
org.jamon.escaping.Escaping.HTML.write(org.jamon.emit.StandardEmitter.valueOf(load.getMemstoreSizeMB()), jamonWriter);
public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; if (!hasRegionSpecifier()) { memoizedIsInitialized = 0; return false; } if (!getRegionSpecifier().isInitialized()) { memoizedIsInitialized = 0; return false; } for (int i = 0; i < getStoreCompleteSequenceIdCount(); i++) { if (!getStoreCompleteSequenceId(i).isInitialized()) { memoizedIsInitialized = 0; return false; } } memoizedIsInitialized = 1; return true; }