/** * <pre> ** Information on the load of individual regions. * </pre> * * <code>repeated .hbase.pb.RegionLoad region_loads = 5;</code> */ public Builder addRegionLoads( int index, org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.RegionLoad.Builder builderForValue) { if (regionLoadsBuilder_ == null) { ensureRegionLoadsIsMutable(); regionLoads_.add(index, builderForValue.build()); onChanged(); } else { regionLoadsBuilder_.addMessage(index, builderForValue.build()); } return this; } /**
public Builder mergeFrom(org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.RegionLoad other) { if (other == org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.RegionLoad.getDefaultInstance()) return this; if (other.hasRegionSpecifier()) { mergeRegionSpecifier(other.getRegionSpecifier()); setStores(other.getStores()); setStorefiles(other.getStorefiles()); setStoreUncompressedSizeMB(other.getStoreUncompressedSizeMB()); setStorefileSizeMB(other.getStorefileSizeMB()); setMemStoreSizeMB(other.getMemStoreSizeMB()); setStorefileIndexSizeKB(other.getStorefileIndexSizeKB()); setReadRequestsCount(other.getReadRequestsCount()); setWriteRequestsCount(other.getWriteRequestsCount()); setTotalCompactingKVs(other.getTotalCompactingKVs()); setCurrentCompactedKVs(other.getCurrentCompactedKVs()); setRootIndexSizeKB(other.getRootIndexSizeKB()); setTotalStaticIndexSizeKB(other.getTotalStaticIndexSizeKB()); setTotalStaticBloomSizeKB(other.getTotalStaticBloomSizeKB());
public static ClusterStatusProtos.RegionLoad toRegionLoad(RegionMetrics regionMetrics) { return ClusterStatusProtos.RegionLoad.newBuilder() .setRegionSpecifier(HBaseProtos.RegionSpecifier .newBuilder().setType(HBaseProtos.RegionSpecifier.RegionSpecifierType.REGION_NAME) .setValue(UnsafeByteOperations.unsafeWrap(regionMetrics.getRegionName())) .build()) .setTotalStaticBloomSizeKB((int) regionMetrics.getBloomFilterSize() .get(Size.Unit.KILOBYTE)) .setCurrentCompactedKVs(regionMetrics.getCompactedCellCount()) .setTotalCompactingKVs(regionMetrics.getCompactingCellCount()) .setCompleteSequenceId(regionMetrics.getCompletedSequenceId()) .setDataLocality(regionMetrics.getDataLocality()) .setFilteredReadRequestsCount(regionMetrics.getFilteredReadRequestCount()) .setTotalStaticIndexSizeKB((int) regionMetrics.getStoreFileUncompressedDataIndexSize() .get(Size.Unit.KILOBYTE)) .setLastMajorCompactionTs(regionMetrics.getLastMajorCompactionTimestamp()) .setMemStoreSizeMB((int) regionMetrics.getMemStoreSize().get(Size.Unit.MEGABYTE)) .setReadRequestsCount(regionMetrics.getReadRequestCount()) .setWriteRequestsCount(regionMetrics.getWriteRequestCount()) .setStorefileIndexSizeKB((long) regionMetrics.getStoreFileIndexSize() .get(Size.Unit.KILOBYTE)) .setRootIndexSizeKB((int) regionMetrics.getStoreFileRootLevelIndexSize() .get(Size.Unit.KILOBYTE)) .setStores(regionMetrics.getStoreCount()) .setStorefiles(regionMetrics.getStoreCount()) .setStorefileSizeMB((int) regionMetrics.getStoreFileSize().get(Size.Unit.MEGABYTE)) .addAllStoreCompleteSequenceId(toStoreSequenceId(regionMetrics.getStoreSequenceId())) .setStoreUncompressedSizeMB( (int) regionMetrics.getUncompressedStoreFileSize().get(Size.Unit.MEGABYTE)) .build();
/** * <pre> ** Information on the load of individual regions. * </pre> * * <code>repeated .hbase.pb.RegionLoad region_loads = 5;</code> */ public Builder setRegionLoads( int index, org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.RegionLoad.Builder builderForValue) { if (regionLoadsBuilder_ == null) { ensureRegionLoadsIsMutable(); regionLoads_.set(index, builderForValue.build()); onChanged(); } else { regionLoadsBuilder_.setMessage(index, builderForValue.build()); } return this; } /**
/** * <pre> ** Information on the load of individual regions. * </pre> * * <code>repeated .hbase.pb.RegionLoad region_loads = 5;</code> */ public Builder addRegionLoads( int index, org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.RegionLoad.Builder builderForValue) { if (regionLoadsBuilder_ == null) { ensureRegionLoadsIsMutable(); regionLoads_.add(index, builderForValue.build()); onChanged(); } else { regionLoadsBuilder_.addMessage(index, builderForValue.build()); } return this; } /**
/** * <pre> ** Information on the load of individual regions. * </pre> * * <code>repeated .hbase.pb.RegionLoad region_loads = 5;</code> */ public Builder addRegionLoads( org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.RegionLoad.Builder builderForValue) { if (regionLoadsBuilder_ == null) { ensureRegionLoadsIsMutable(); regionLoads_.add(builderForValue.build()); onChanged(); } else { regionLoadsBuilder_.addMessage(builderForValue.build()); } return this; } /**
private ClusterStatusProtos.ServerLoad createServerLoadProto() { HBaseProtos.RegionSpecifier rSpecOne = HBaseProtos.RegionSpecifier.newBuilder() .setType(HBaseProtos.RegionSpecifier.RegionSpecifierType.ENCODED_REGION_NAME) .setValue(ByteString.copyFromUtf8("ASDFGQWERT")).build(); HBaseProtos.RegionSpecifier rSpecTwo = HBaseProtos.RegionSpecifier.newBuilder() .setType(HBaseProtos.RegionSpecifier.RegionSpecifierType.ENCODED_REGION_NAME) .setValue(ByteString.copyFromUtf8("QWERTYUIOP")).build(); ClusterStatusProtos.RegionLoad rlOne = ClusterStatusProtos.RegionLoad.newBuilder().setRegionSpecifier(rSpecOne).setStores(10) .setStorefiles(101).setStoreUncompressedSizeMB(106).setStorefileSizeMB(520) .setFilteredReadRequestsCount(100).setStorefileIndexSizeKB(42).setRootIndexSizeKB(201) .setReadRequestsCount(Integer.MAX_VALUE).setWriteRequestsCount(Integer.MAX_VALUE) .build(); ClusterStatusProtos.RegionLoad rlTwo = ClusterStatusProtos.RegionLoad.newBuilder().setRegionSpecifier(rSpecTwo).setStores(3) .setStorefiles(13).setStoreUncompressedSizeMB(23).setStorefileSizeMB(300) .setFilteredReadRequestsCount(200).setStorefileIndexSizeKB(40).setRootIndexSizeKB(303) .setReadRequestsCount(Integer.MAX_VALUE).setWriteRequestsCount(Integer.MAX_VALUE) .build(); ClusterStatusProtos.ServerLoad sl = ClusterStatusProtos.ServerLoad.newBuilder().addRegionLoads(rlOne). addRegionLoads(rlTwo).build(); return sl; }
private ClusterStatusProtos.ServerLoad createServerLoadProto() { HBaseProtos.RegionSpecifier rSpecOne = HBaseProtos.RegionSpecifier.newBuilder() .setType(HBaseProtos.RegionSpecifier.RegionSpecifierType.ENCODED_REGION_NAME) .setValue(ByteString.copyFromUtf8("ASDFGQWERT")).build(); HBaseProtos.RegionSpecifier rSpecTwo = HBaseProtos.RegionSpecifier.newBuilder() .setType(HBaseProtos.RegionSpecifier.RegionSpecifierType.ENCODED_REGION_NAME) .setValue(ByteString.copyFromUtf8("QWERTYUIOP")).build(); ClusterStatusProtos.RegionLoad rlOne = ClusterStatusProtos.RegionLoad.newBuilder().setRegionSpecifier(rSpecOne).setStores(10) .setStorefiles(101).setStoreUncompressedSizeMB(106).setStorefileSizeMB(520) .setFilteredReadRequestsCount(100).setStorefileIndexSizeKB(42).setRootIndexSizeKB(201) .setReadRequestsCount(Integer.MAX_VALUE).setWriteRequestsCount(Integer.MAX_VALUE).build(); ClusterStatusProtos.RegionLoad rlTwo = ClusterStatusProtos.RegionLoad.newBuilder().setRegionSpecifier(rSpecTwo).setStores(3) .setStorefiles(13).setStoreUncompressedSizeMB(23).setStorefileSizeMB(300) .setFilteredReadRequestsCount(200).setStorefileIndexSizeKB(40).setRootIndexSizeKB(303) .setReadRequestsCount(Integer.MAX_VALUE).setWriteRequestsCount(Integer.MAX_VALUE).build(); ClusterStatusProtos.ServerLoad sl = ClusterStatusProtos.ServerLoad.newBuilder().addRegionLoads(rlOne). addRegionLoads(rlTwo).build(); return sl; }
/** * <code>repeated .hbase.pb.RegionLoad region_loads = 1;</code> */ public Builder setRegionLoads( int index, org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.RegionLoad.Builder builderForValue) { if (regionLoadsBuilder_ == null) { ensureRegionLoadsIsMutable(); regionLoads_.set(index, builderForValue.build()); onChanged(); } else { regionLoadsBuilder_.setMessage(index, builderForValue.build()); } return this; } /**
private RegionServerStatusProtos.RegionServerReportRequest.Builder makeRSReportRequestWithRegions(final ServerName sn, HRegionInfo... regions) { ClusterStatusProtos.ServerLoad.Builder sl = ClusterStatusProtos.ServerLoad.newBuilder(); for (int i = 0; i < regions.length; i++) { HBaseProtos.RegionSpecifier.Builder rs = HBaseProtos.RegionSpecifier.newBuilder(); rs.setType(HBaseProtos.RegionSpecifier.RegionSpecifierType.REGION_NAME); rs.setValue(UnsafeByteOperations.unsafeWrap(regions[i].getRegionName())); ClusterStatusProtos.RegionLoad.Builder rl = ClusterStatusProtos.RegionLoad.newBuilder() .setRegionSpecifier(rs.build()); sl.addRegionLoads(i, rl.build()); } return RegionServerStatusProtos.RegionServerReportRequest.newBuilder() .setServer(ProtobufUtil.toServerName(sn)) .setLoad(sl); }
/** * <code>repeated .hbase.pb.RegionLoad region_loads = 1;</code> */ public Builder addRegionLoads( int index, org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.RegionLoad.Builder builderForValue) { if (regionLoadsBuilder_ == null) { ensureRegionLoadsIsMutable(); regionLoads_.add(index, builderForValue.build()); onChanged(); } else { regionLoadsBuilder_.addMessage(index, builderForValue.build()); } return this; } /**
/** * <code>repeated .hbase.pb.RegionLoad region_loads = 1;</code> */ public Builder addRegionLoads( org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.RegionLoad.Builder builderForValue) { if (regionLoadsBuilder_ == null) { ensureRegionLoadsIsMutable(); regionLoads_.add(builderForValue.build()); onChanged(); } else { regionLoadsBuilder_.addMessage(builderForValue.build()); } return this; } /**
public static ClusterStatusProtos.RegionLoad toRegionLoad(RegionMetrics regionMetrics) { return ClusterStatusProtos.RegionLoad.newBuilder() .setRegionSpecifier(HBaseProtos.RegionSpecifier .newBuilder().setType(HBaseProtos.RegionSpecifier.RegionSpecifierType.REGION_NAME) .setValue(UnsafeByteOperations.unsafeWrap(regionMetrics.getRegionName())) .build()) .setTotalStaticBloomSizeKB((int) regionMetrics.getBloomFilterSize() .get(Size.Unit.KILOBYTE)) .setCurrentCompactedKVs(regionMetrics.getCompactedCellCount()) .setTotalCompactingKVs(regionMetrics.getCompactingCellCount()) .setCompleteSequenceId(regionMetrics.getCompletedSequenceId()) .setDataLocality(regionMetrics.getDataLocality()) .setFilteredReadRequestsCount(regionMetrics.getFilteredReadRequestCount()) .setTotalStaticIndexSizeKB((int) regionMetrics.getStoreFileUncompressedDataIndexSize() .get(Size.Unit.KILOBYTE)) .setLastMajorCompactionTs(regionMetrics.getLastMajorCompactionTimestamp()) .setMemStoreSizeMB((int) regionMetrics.getMemStoreSize().get(Size.Unit.MEGABYTE)) .setReadRequestsCount(regionMetrics.getReadRequestCount()) .setWriteRequestsCount(regionMetrics.getWriteRequestCount()) .setStorefileIndexSizeKB((long) regionMetrics.getStoreFileIndexSize() .get(Size.Unit.KILOBYTE)) .setRootIndexSizeKB((int) regionMetrics.getStoreFileRootLevelIndexSize() .get(Size.Unit.KILOBYTE)) .setStores(regionMetrics.getStoreCount()) .setStorefiles(regionMetrics.getStoreCount()) .setStorefileSizeMB((int) regionMetrics.getStoreFileSize().get(Size.Unit.MEGABYTE)) .addAllStoreCompleteSequenceId(toStoreSequenceId(regionMetrics.getStoreSequenceId())) .setStoreUncompressedSizeMB( (int) regionMetrics.getUncompressedStoreFileSize().get(Size.Unit.MEGABYTE)) .build();
public Builder mergeFrom(org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.RegionLoad other) { if (other == org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.RegionLoad.getDefaultInstance()) return this; if (other.hasRegionSpecifier()) { mergeRegionSpecifier(other.getRegionSpecifier()); setStores(other.getStores()); setStorefiles(other.getStorefiles()); setStoreUncompressedSizeMB(other.getStoreUncompressedSizeMB()); setStorefileSizeMB(other.getStorefileSizeMB()); setMemStoreSizeMB(other.getMemStoreSizeMB()); setStorefileIndexSizeKB(other.getStorefileIndexSizeKB()); setReadRequestsCount(other.getReadRequestsCount()); setWriteRequestsCount(other.getWriteRequestsCount()); setTotalCompactingKVs(other.getTotalCompactingKVs()); setCurrentCompactedKVs(other.getCurrentCompactedKVs()); setRootIndexSizeKB(other.getRootIndexSizeKB()); setTotalStaticIndexSizeKB(other.getTotalStaticIndexSizeKB()); setTotalStaticBloomSizeKB(other.getTotalStaticBloomSizeKB());
regionLoadBldr.setRegionSpecifier(regionSpecifier.build()) .setStores(stores) .setStorefiles(storefiles) .setStoreUncompressedSizeMB(storeUncompressedSizeMB) .setStorefileSizeMB(storefileSizeMB) .setMemStoreSizeMB(memstoreSizeMB) .setStorefileIndexSizeKB(storefileIndexSizeKB) .setRootIndexSizeKB(rootLevelIndexSizeKB) .setTotalStaticIndexSizeKB(totalStaticIndexSizeKB) .setTotalStaticBloomSizeKB(totalStaticBloomSizeKB) .setReadRequestsCount(r.getReadRequestsCount()) .setCpRequestsCount(r.getCpRequestsCount()) .setFilteredReadRequestsCount(r.getFilteredReadRequestsCount()) .setWriteRequestsCount(r.getWriteRequestsCount()) .setTotalCompactingKVs(totalCompactingKVs) .setCurrentCompactedKVs(currentCompactedKVs) .setDataLocality(dataLocality) .setLastMajorCompactionTs(r.getOldestHfileTs(true)); r.setCompleteSequenceId(regionLoadBldr); return regionLoadBldr.build();
public static ClusterStatusProtos.RegionLoad toRegionLoad(RegionMetrics regionMetrics) { return ClusterStatusProtos.RegionLoad.newBuilder() .setRegionSpecifier(HBaseProtos.RegionSpecifier .newBuilder().setType(HBaseProtos.RegionSpecifier.RegionSpecifierType.REGION_NAME) .setValue(UnsafeByteOperations.unsafeWrap(regionMetrics.getRegionName())) .build()) .setTotalStaticBloomSizeKB((int) regionMetrics.getBloomFilterSize() .get(Size.Unit.KILOBYTE)) .setCurrentCompactedKVs(regionMetrics.getCompactedCellCount()) .setTotalCompactingKVs(regionMetrics.getCompactingCellCount()) .setCompleteSequenceId(regionMetrics.getCompletedSequenceId()) .setDataLocality(regionMetrics.getDataLocality()) .setFilteredReadRequestsCount(regionMetrics.getFilteredReadRequestCount()) .setTotalStaticIndexSizeKB((int) regionMetrics.getStoreFileUncompressedDataIndexSize() .get(Size.Unit.KILOBYTE)) .setLastMajorCompactionTs(regionMetrics.getLastMajorCompactionTimestamp()) .setMemStoreSizeMB((int) regionMetrics.getMemStoreSize().get(Size.Unit.MEGABYTE)) .setReadRequestsCount(regionMetrics.getReadRequestCount()) .setCpRequestsCount(regionMetrics.getCpRequestCount()) .setWriteRequestsCount(regionMetrics.getWriteRequestCount()) .setStorefileIndexSizeKB((long) regionMetrics.getStoreFileIndexSize() .get(Size.Unit.KILOBYTE)) .setRootIndexSizeKB((int) regionMetrics.getStoreFileRootLevelIndexSize() .get(Size.Unit.KILOBYTE)) .setStores(regionMetrics.getStoreCount()) .setStorefiles(regionMetrics.getStoreCount()) .setStorefileSizeMB((int) regionMetrics.getStoreFileSize().get(Size.Unit.MEGABYTE)) .addAllStoreCompleteSequenceId(toStoreSequenceId(regionMetrics.getStoreSequenceId())) .setStoreUncompressedSizeMB( (int) regionMetrics.getUncompressedStoreFileSize().get(Size.Unit.MEGABYTE))
/** * <pre> ** Information on the load of individual regions. * </pre> * * <code>repeated .hbase.pb.RegionLoad region_loads = 5;</code> */ public Builder addRegionLoads( org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.RegionLoad.Builder builderForValue) { if (regionLoadsBuilder_ == null) { ensureRegionLoadsIsMutable(); regionLoads_.add(builderForValue.build()); onChanged(); } else { regionLoadsBuilder_.addMessage(builderForValue.build()); } return this; } /**
private ClusterStatusProtos.ServerLoad createServerLoadProto() { HBaseProtos.RegionSpecifier rSpecOne = HBaseProtos.RegionSpecifier.newBuilder() .setType(HBaseProtos.RegionSpecifier.RegionSpecifierType.ENCODED_REGION_NAME) .setValue(ByteString.copyFromUtf8("ASDFGQWERT")).build(); HBaseProtos.RegionSpecifier rSpecTwo = HBaseProtos.RegionSpecifier.newBuilder() .setType(HBaseProtos.RegionSpecifier.RegionSpecifierType.ENCODED_REGION_NAME) .setValue(ByteString.copyFromUtf8("QWERTYUIOP")).build(); ClusterStatusProtos.RegionLoad rlOne = ClusterStatusProtos.RegionLoad.newBuilder().setRegionSpecifier(rSpecOne).setStores(10) .setStorefiles(101).setStoreUncompressedSizeMB(106).setStorefileSizeMB(520) .setFilteredReadRequestsCount(100).setStorefileIndexSizeKB(42).setRootIndexSizeKB(201) .setReadRequestsCount(Integer.MAX_VALUE).setWriteRequestsCount(Integer.MAX_VALUE) .build(); ClusterStatusProtos.RegionLoad rlTwo = ClusterStatusProtos.RegionLoad.newBuilder().setRegionSpecifier(rSpecTwo).setStores(3) .setStorefiles(13).setStoreUncompressedSizeMB(23).setStorefileSizeMB(300) .setFilteredReadRequestsCount(200).setStorefileIndexSizeKB(40).setRootIndexSizeKB(303) .setReadRequestsCount(Integer.MAX_VALUE).setWriteRequestsCount(Integer.MAX_VALUE) .setCpRequestsCount(100) .build(); ClusterStatusProtos.ServerLoad sl = ClusterStatusProtos.ServerLoad.newBuilder().addRegionLoads(rlOne). addRegionLoads(rlTwo).build(); return sl; }
/** * <pre> ** Information on the load of individual regions. * </pre> * * <code>repeated .hbase.pb.RegionLoad region_loads = 5;</code> */ public Builder setRegionLoads( int index, org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.RegionLoad.Builder builderForValue) { if (regionLoadsBuilder_ == null) { ensureRegionLoadsIsMutable(); regionLoads_.set(index, builderForValue.build()); onChanged(); } else { regionLoadsBuilder_.setMessage(index, builderForValue.build()); } return this; } /**
private ClusterStatusProtos.ServerLoad createServerLoadProto() { HBaseProtos.RegionSpecifier rSpecOne = HBaseProtos.RegionSpecifier.newBuilder() .setType(HBaseProtos.RegionSpecifier.RegionSpecifierType.ENCODED_REGION_NAME) .setValue(ByteString.copyFromUtf8("ASDFGQWERT")).build(); HBaseProtos.RegionSpecifier rSpecTwo = HBaseProtos.RegionSpecifier.newBuilder() .setType(HBaseProtos.RegionSpecifier.RegionSpecifierType.ENCODED_REGION_NAME) .setValue(ByteString.copyFromUtf8("QWERTYUIOP")).build(); ClusterStatusProtos.RegionLoad rlOne = ClusterStatusProtos.RegionLoad.newBuilder().setRegionSpecifier(rSpecOne).setStores(10) .setStorefiles(101).setStoreUncompressedSizeMB(106).setStorefileSizeMB(520) .setFilteredReadRequestsCount(100).setStorefileIndexSizeKB(42).setRootIndexSizeKB(201) .setReadRequestsCount(Integer.MAX_VALUE).setWriteRequestsCount(Integer.MAX_VALUE).build(); ClusterStatusProtos.RegionLoad rlTwo = ClusterStatusProtos.RegionLoad.newBuilder().setRegionSpecifier(rSpecTwo).setStores(3) .setStorefiles(13).setStoreUncompressedSizeMB(23).setStorefileSizeMB(300) .setFilteredReadRequestsCount(200).setStorefileIndexSizeKB(40).setRootIndexSizeKB(303) .setReadRequestsCount(Integer.MAX_VALUE).setWriteRequestsCount(Integer.MAX_VALUE).build(); ClusterStatusProtos.ServerLoad sl = ClusterStatusProtos.ServerLoad.newBuilder().addRegionLoads(rlOne). addRegionLoads(rlTwo).build(); return sl; }