org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.TimedQuota.Builder subBuilder = null; if (((bitField0_ & 0x00000002) == 0x00000002)) { subBuilder = timedQuota_.toBuilder();
@Override protected ThrottleSettings merge(QuotaSettings other) throws IOException { if (other instanceof ThrottleSettings) { ThrottleSettings otherThrottle = (ThrottleSettings) other; // Make sure this and the other target the same "subject" validateQuotaTarget(other); QuotaProtos.ThrottleRequest.Builder builder = proto.toBuilder(); if (!otherThrottle.proto.hasType()) { return null; } QuotaProtos.ThrottleRequest otherProto = otherThrottle.proto; if (otherProto.hasTimedQuota()) { if (otherProto.hasTimedQuota()) { validateTimedQuota(otherProto.getTimedQuota()); } if (!proto.getType().equals(otherProto.getType())) { throw new IllegalArgumentException( "Cannot merge a ThrottleRequest for " + proto.getType() + " with " + otherProto.getType()); } QuotaProtos.TimedQuota.Builder timedQuotaBuilder = proto.getTimedQuota().toBuilder(); timedQuotaBuilder.mergeFrom(otherProto.getTimedQuota()); QuotaProtos.ThrottleRequest mergedReq = builder.setTimedQuota( timedQuotaBuilder.build()).build(); return new ThrottleSettings(getUserName(), getTableName(), getNamespace(), getRegionServer(), mergedReq); } } return this; }
org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.TimedQuota.Builder subBuilder = null; if (((bitField0_ & 0x00000001) == 0x00000001)) { subBuilder = reqNum_.toBuilder(); org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.TimedQuota.Builder subBuilder = null; if (((bitField0_ & 0x00000002) == 0x00000002)) { subBuilder = reqSize_.toBuilder(); org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.TimedQuota.Builder subBuilder = null; if (((bitField0_ & 0x00000004) == 0x00000004)) { subBuilder = writeNum_.toBuilder(); org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.TimedQuota.Builder subBuilder = null; if (((bitField0_ & 0x00000008) == 0x00000008)) { subBuilder = writeSize_.toBuilder(); org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.TimedQuota.Builder subBuilder = null; if (((bitField0_ & 0x00000010) == 0x00000010)) { subBuilder = readNum_.toBuilder(); org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.TimedQuota.Builder subBuilder = null; if (((bitField0_ & 0x00000020) == 0x00000020)) { subBuilder = readSize_.toBuilder(); org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.TimedQuota.Builder subBuilder = null; if (((bitField0_ & 0x00000040) == 0x00000040)) { subBuilder = reqCapacityUnit_.toBuilder(); org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.TimedQuota.Builder subBuilder = null;
org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.TimedQuota.Builder subBuilder = null; if (((bitField0_ & 0x00000001) == 0x00000001)) { subBuilder = reqNum_.toBuilder(); org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.TimedQuota.Builder subBuilder = null; if (((bitField0_ & 0x00000002) == 0x00000002)) { subBuilder = reqSize_.toBuilder(); org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.TimedQuota.Builder subBuilder = null; if (((bitField0_ & 0x00000004) == 0x00000004)) { subBuilder = writeNum_.toBuilder(); org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.TimedQuota.Builder subBuilder = null; if (((bitField0_ & 0x00000008) == 0x00000008)) { subBuilder = writeSize_.toBuilder(); org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.TimedQuota.Builder subBuilder = null; if (((bitField0_ & 0x00000010) == 0x00000010)) { subBuilder = readNum_.toBuilder(); org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.TimedQuota.Builder subBuilder = null; if (((bitField0_ & 0x00000020) == 0x00000020)) { subBuilder = readSize_.toBuilder();
org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.TimedQuota.Builder subBuilder = null; if (((bitField0_ & 0x00000002) == 0x00000002)) { subBuilder = timedQuota_.toBuilder();
@Override protected ThrottleSettings merge(QuotaSettings other) throws IOException { if (other instanceof ThrottleSettings) { ThrottleSettings otherThrottle = (ThrottleSettings) other; // Make sure this and the other target the same "subject" validateQuotaTarget(other); QuotaProtos.ThrottleRequest.Builder builder = proto.toBuilder(); if (!otherThrottle.proto.hasType()) { return null; } QuotaProtos.ThrottleRequest otherProto = otherThrottle.proto; if (otherProto.hasTimedQuota()) { if (otherProto.hasTimedQuota()) { validateTimedQuota(otherProto.getTimedQuota()); } if (!proto.getType().equals(otherProto.getType())) { throw new IllegalArgumentException( "Cannot merge a ThrottleRequest for " + proto.getType() + " with " + otherProto.getType()); } QuotaProtos.TimedQuota.Builder timedQuotaBuilder = proto.getTimedQuota().toBuilder(); timedQuotaBuilder.mergeFrom(otherProto.getTimedQuota()); QuotaProtos.ThrottleRequest mergedReq = builder.setTimedQuota( timedQuotaBuilder.build()).build(); return new ThrottleSettings(getUserName(), getTableName(), getNamespace(), mergedReq); } } return this; }
GlobalQuotaSettingsImpl settings = new GlobalQuotaSettingsImpl(null, null, ns, quota); QuotaProtos.TimedQuota writeQuota = REQUEST_THROTTLE.toBuilder() .setSoftLimit(500).build();
@Test public void testMergeThrottle() throws IOException { QuotaProtos.Quotas quota = QuotaProtos.Quotas.newBuilder() .setThrottle(THROTTLE).build(); QuotaProtos.TimedQuota writeQuota = REQUEST_THROTTLE.toBuilder() .setSoftLimit(500).build(); // Unset the req throttle, set a write throttle QuotaProtos.ThrottleRequest writeThrottle = QuotaProtos.ThrottleRequest.newBuilder() .setTimedQuota(writeQuota).setType(QuotaProtos.ThrottleType.WRITE_NUMBER).build(); GlobalQuotaSettingsImpl settings = new GlobalQuotaSettingsImpl("joe", null, null, quota); GlobalQuotaSettingsImpl merged = settings.merge( new ThrottleSettings("joe", null, null, writeThrottle)); QuotaProtos.Throttle mergedThrottle = merged.getThrottleProto(); // Verify the request throttle is in place assertTrue(mergedThrottle.hasReqNum()); QuotaProtos.TimedQuota actualReqNum = mergedThrottle.getReqNum(); assertEquals(REQUEST_THROTTLE.getSoftLimit(), actualReqNum.getSoftLimit()); // Verify the write throttle is in place assertTrue(mergedThrottle.hasWriteNum()); QuotaProtos.TimedQuota actualWriteNum = mergedThrottle.getWriteNum(); assertEquals(writeQuota.getSoftLimit(), actualWriteNum.getSoftLimit()); }
public static Builder newBuilder(org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.TimedQuota prototype) { return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); } public Builder toBuilder() {
public static Builder newBuilder() { return DEFAULT_INSTANCE.toBuilder(); } public static Builder newBuilder(org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.TimedQuota prototype) {
@Override protected ThrottleSettings merge(QuotaSettings other) throws IOException { if (other instanceof ThrottleSettings) { ThrottleSettings otherThrottle = (ThrottleSettings) other; // Make sure this and the other target the same "subject" validateQuotaTarget(other); QuotaProtos.ThrottleRequest.Builder builder = proto.toBuilder(); if (!otherThrottle.proto.hasType()) { return null; } QuotaProtos.ThrottleRequest otherProto = otherThrottle.proto; if (otherProto.hasTimedQuota()) { if (otherProto.hasTimedQuota()) { validateTimedQuota(otherProto.getTimedQuota()); } if (!proto.getType().equals(otherProto.getType())) { throw new IllegalArgumentException( "Cannot merge a ThrottleRequest for " + proto.getType() + " with " + otherProto.getType()); } QuotaProtos.TimedQuota.Builder timedQuotaBuilder = proto.getTimedQuota().toBuilder(); timedQuotaBuilder.mergeFrom(otherProto.getTimedQuota()); QuotaProtos.ThrottleRequest mergedReq = builder.setTimedQuota( timedQuotaBuilder.build()).build(); return new ThrottleSettings(getUserName(), getTableName(), getNamespace(), mergedReq); } } return this; }
GlobalQuotaSettingsImpl settings = new GlobalQuotaSettingsImpl(null, null, ns, null, quota); QuotaProtos.TimedQuota writeQuota = REQUEST_THROTTLE.toBuilder() .setSoftLimit(500).build();
@Test public void testMergeThrottle() throws IOException { QuotaProtos.Quotas quota = QuotaProtos.Quotas.newBuilder() .setThrottle(THROTTLE).build(); QuotaProtos.TimedQuota writeQuota = REQUEST_THROTTLE.toBuilder() .setSoftLimit(500).build(); // Unset the req throttle, set a write throttle QuotaProtos.ThrottleRequest writeThrottle = QuotaProtos.ThrottleRequest.newBuilder() .setTimedQuota(writeQuota).setType(QuotaProtos.ThrottleType.WRITE_NUMBER).build(); GlobalQuotaSettingsImpl settings = new GlobalQuotaSettingsImpl("joe", null, null, null, quota); GlobalQuotaSettingsImpl merged = settings.merge( new ThrottleSettings("joe", null, null, null, writeThrottle)); QuotaProtos.Throttle mergedThrottle = merged.getThrottleProto(); // Verify the request throttle is in place assertTrue(mergedThrottle.hasReqNum()); QuotaProtos.TimedQuota actualReqNum = mergedThrottle.getReqNum(); assertEquals(REQUEST_THROTTLE.getSoftLimit(), actualReqNum.getSoftLimit()); // Verify the write throttle is in place assertTrue(mergedThrottle.hasWriteNum()); QuotaProtos.TimedQuota actualWriteNum = mergedThrottle.getWriteNum(); assertEquals(writeQuota.getSoftLimit(), actualWriteNum.getSoftLimit()); }
public static Builder newBuilder(org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.TimedQuota prototype) { return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); } public Builder toBuilder() {
public static Builder newBuilder() { return DEFAULT_INSTANCE.toBuilder(); } public static Builder newBuilder(org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.TimedQuota prototype) {