final Throttle throttle = Throttle.newBuilder() .setReadNum(TimedQuota.newBuilder().setSoftLimit(readLimit).setTimeUnit(HBaseProtos.TimeUnit.MINUTES).build()) .setWriteNum(TimedQuota.newBuilder().setSoftLimit(writeLimit).setTimeUnit(HBaseProtos.TimeUnit.MINUTES).build()) .build(); final Quotas quotas = Quotas.newBuilder()
@Test public void testIncompatibleThrottleTypes() throws IOException { TimedQuota requestsQuota = TimedQuota.newBuilder().setSoftLimit(10) .setScope(QuotaProtos.QuotaScope.MACHINE) .setTimeUnit(HBaseProtos.TimeUnit.MINUTES).build(); ThrottleRequest requestsQuotaReq = ThrottleRequest.newBuilder().setTimedQuota(requestsQuota) .setType(QuotaProtos.ThrottleType.REQUEST_NUMBER).build(); ThrottleSettings orig = new ThrottleSettings("joe", null, null, requestsQuotaReq); TimedQuota readsQuota = TimedQuota.newBuilder().setSoftLimit(10) .setScope(QuotaProtos.QuotaScope.MACHINE) .setTimeUnit(HBaseProtos.TimeUnit.SECONDS).build(); ThrottleRequest readsQuotaReq = ThrottleRequest.newBuilder().setTimedQuota(readsQuota) .setType(QuotaProtos.ThrottleType.READ_NUMBER).build(); try { orig.merge(new ThrottleSettings("joe", null, null, readsQuotaReq)); fail("A read throttle should not be capable of being merged with a request quota"); } catch (IllegalArgumentException e) { // Pass } }
@Test public void testMerge() throws IOException { TimedQuota tq1 = TimedQuota.newBuilder().setSoftLimit(10) .setScope(QuotaProtos.QuotaScope.MACHINE) .setTimeUnit(HBaseProtos.TimeUnit.MINUTES).build(); ThrottleRequest tr1 = ThrottleRequest.newBuilder().setTimedQuota(tq1) .setType(QuotaProtos.ThrottleType.REQUEST_NUMBER).build(); ThrottleSettings orig = new ThrottleSettings("joe", null, null, tr1); TimedQuota tq2 = TimedQuota.newBuilder().setSoftLimit(10) .setScope(QuotaProtos.QuotaScope.MACHINE) .setTimeUnit(HBaseProtos.TimeUnit.SECONDS).build(); ThrottleRequest tr2 = ThrottleRequest.newBuilder().setTimedQuota(tq2) .setType(QuotaProtos.ThrottleType.REQUEST_NUMBER).build(); ThrottleSettings merged = orig.merge(new ThrottleSettings("joe", null, null, tr2)); assertEquals(10, merged.getSoftLimit()); assertEquals(ThrottleType.REQUEST_NUMBER, merged.getThrottleType()); assertEquals(TimeUnit.SECONDS, merged.getTimeUnit()); }
/** * Build a protocol buffer TimedQuota * * @param limit the allowed number of request/data per timeUnit * @param timeUnit the limit time unit * @param scope the quota scope * @return the protocol buffer TimedQuota */ public static QuotaProtos.TimedQuota toTimedQuota(final long limit, final TimeUnit timeUnit, final QuotaScope scope) { return QuotaProtos.TimedQuota.newBuilder() .setSoftLimit(limit) .setTimeUnit(toProtoTimeUnit(timeUnit)) .setScope(toProtoQuotaScope(scope)) .build(); }
@Test public void testNoThrottleReturnsOriginal() throws IOException { TimedQuota tq1 = TimedQuota.newBuilder().setSoftLimit(10) .setScope(QuotaProtos.QuotaScope.MACHINE) .setTimeUnit(HBaseProtos.TimeUnit.MINUTES).build(); ThrottleRequest tr1 = ThrottleRequest.newBuilder().setTimedQuota(tq1) .setType(QuotaProtos.ThrottleType.REQUEST_NUMBER).build(); ThrottleSettings orig = new ThrottleSettings("joe", null, null, tr1); ThrottleRequest tr2 = ThrottleRequest.newBuilder() .setType(QuotaProtos.ThrottleType.REQUEST_NUMBER).build(); assertTrue( "The same object should be returned by merge, but it wasn't", orig == orig.merge(new ThrottleSettings("joe", null, null, tr2))); } }
/** * <code>optional .hbase.pb.TimedQuota read_capacity_unit = 9;</code> */ public Builder mergeReadCapacityUnit(org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.TimedQuota value) { if (readCapacityUnitBuilder_ == null) { if (((bitField0_ & 0x00000100) == 0x00000100) && readCapacityUnit_ != null && readCapacityUnit_ != org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.TimedQuota.getDefaultInstance()) { readCapacityUnit_ = org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.TimedQuota.newBuilder(readCapacityUnit_).mergeFrom(value).buildPartial(); } else { readCapacityUnit_ = value; } onChanged(); } else { readCapacityUnitBuilder_.mergeFrom(value); } bitField0_ |= 0x00000100; return this; } /**
/** * <code>optional .hbase.pb.TimedQuota req_capacity_unit = 7;</code> */ public Builder mergeReqCapacityUnit(org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.TimedQuota value) { if (reqCapacityUnitBuilder_ == null) { if (((bitField0_ & 0x00000040) == 0x00000040) && reqCapacityUnit_ != null && reqCapacityUnit_ != org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.TimedQuota.getDefaultInstance()) { reqCapacityUnit_ = org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.TimedQuota.newBuilder(reqCapacityUnit_).mergeFrom(value).buildPartial(); } else { reqCapacityUnit_ = value; } onChanged(); } else { reqCapacityUnitBuilder_.mergeFrom(value); } bitField0_ |= 0x00000040; return this; } /**
/** * <code>optional .hbase.pb.TimedQuota read_size = 6;</code> */ public Builder mergeReadSize(org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.TimedQuota value) { if (readSizeBuilder_ == null) { if (((bitField0_ & 0x00000020) == 0x00000020) && readSize_ != null && readSize_ != org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.TimedQuota.getDefaultInstance()) { readSize_ = org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.TimedQuota.newBuilder(readSize_).mergeFrom(value).buildPartial(); } else { readSize_ = value; } onChanged(); } else { readSizeBuilder_.mergeFrom(value); } bitField0_ |= 0x00000020; return this; } /**
/** * <code>optional .hbase.pb.TimedQuota read_num = 5;</code> */ public Builder mergeReadNum(org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.TimedQuota value) { if (readNumBuilder_ == null) { if (((bitField0_ & 0x00000010) == 0x00000010) && readNum_ != null && readNum_ != org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.TimedQuota.getDefaultInstance()) { readNum_ = org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.TimedQuota.newBuilder(readNum_).mergeFrom(value).buildPartial(); } else { readNum_ = value; } onChanged(); } else { readNumBuilder_.mergeFrom(value); } bitField0_ |= 0x00000010; return this; } /**
/** * <code>optional .hbase.pb.TimedQuota timed_quota = 2;</code> */ public Builder mergeTimedQuota(org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.TimedQuota value) { if (timedQuotaBuilder_ == null) { if (((bitField0_ & 0x00000002) == 0x00000002) && timedQuota_ != null && timedQuota_ != org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.TimedQuota.getDefaultInstance()) { timedQuota_ = org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.TimedQuota.newBuilder(timedQuota_).mergeFrom(value).buildPartial(); } else { timedQuota_ = value; } onChanged(); } else { timedQuotaBuilder_.mergeFrom(value); } bitField0_ |= 0x00000002; return this; } /**
/** * <code>optional .hbase.pb.TimedQuota write_capacity_unit = 8;</code> */ public Builder mergeWriteCapacityUnit(org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.TimedQuota value) { if (writeCapacityUnitBuilder_ == null) { if (((bitField0_ & 0x00000080) == 0x00000080) && writeCapacityUnit_ != null && writeCapacityUnit_ != org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.TimedQuota.getDefaultInstance()) { writeCapacityUnit_ = org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.TimedQuota.newBuilder(writeCapacityUnit_).mergeFrom(value).buildPartial(); } else { writeCapacityUnit_ = value; } onChanged(); } else { writeCapacityUnitBuilder_.mergeFrom(value); } bitField0_ |= 0x00000080; return this; } /**
@Test public void testIncompatibleThrottleTypes() throws IOException { TimedQuota requestsQuota = TimedQuota.newBuilder().setSoftLimit(10) .setScope(QuotaProtos.QuotaScope.MACHINE) .setTimeUnit(HBaseProtos.TimeUnit.MINUTES).build(); ThrottleRequest requestsQuotaReq = ThrottleRequest.newBuilder().setTimedQuota(requestsQuota) .setType(QuotaProtos.ThrottleType.REQUEST_NUMBER).build(); ThrottleSettings orig = new ThrottleSettings("joe", null, null, null, requestsQuotaReq); TimedQuota readsQuota = TimedQuota.newBuilder().setSoftLimit(10) .setScope(QuotaProtos.QuotaScope.MACHINE) .setTimeUnit(HBaseProtos.TimeUnit.SECONDS).build(); ThrottleRequest readsQuotaReq = ThrottleRequest.newBuilder().setTimedQuota(readsQuota) .setType(QuotaProtos.ThrottleType.READ_NUMBER).build(); try { orig.merge(new ThrottleSettings("joe", null, null, null, readsQuotaReq)); fail("A read throttle should not be capable of being merged with a request quota"); } catch (IllegalArgumentException e) { // Pass } }
/** * <code>optional .hbase.pb.TimedQuota write_size = 4;</code> */ public Builder mergeWriteSize(org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.TimedQuota value) { if (writeSizeBuilder_ == null) { if (((bitField0_ & 0x00000008) == 0x00000008) && writeSize_ != null && writeSize_ != org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.TimedQuota.getDefaultInstance()) { writeSize_ = org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.TimedQuota.newBuilder(writeSize_).mergeFrom(value).buildPartial(); } else { writeSize_ = value; } onChanged(); } else { writeSizeBuilder_.mergeFrom(value); } bitField0_ |= 0x00000008; return this; } /**
/** * <code>optional .hbase.pb.TimedQuota req_num = 1;</code> */ public Builder mergeReqNum(org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.TimedQuota value) { if (reqNumBuilder_ == null) { if (((bitField0_ & 0x00000001) == 0x00000001) && reqNum_ != null && reqNum_ != org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.TimedQuota.getDefaultInstance()) { reqNum_ = org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.TimedQuota.newBuilder(reqNum_).mergeFrom(value).buildPartial(); } else { reqNum_ = value; } onChanged(); } else { reqNumBuilder_.mergeFrom(value); } bitField0_ |= 0x00000001; return this; } /**
/** * <code>optional .hbase.pb.TimedQuota write_num = 3;</code> */ public Builder mergeWriteNum(org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.TimedQuota value) { if (writeNumBuilder_ == null) { if (((bitField0_ & 0x00000004) == 0x00000004) && writeNum_ != null && writeNum_ != org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.TimedQuota.getDefaultInstance()) { writeNum_ = org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.TimedQuota.newBuilder(writeNum_).mergeFrom(value).buildPartial(); } else { writeNum_ = value; } onChanged(); } else { writeNumBuilder_.mergeFrom(value); } bitField0_ |= 0x00000004; return this; } /**
/** * <code>optional .hbase.pb.TimedQuota req_size = 2;</code> */ public Builder mergeReqSize(org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.TimedQuota value) { if (reqSizeBuilder_ == null) { if (((bitField0_ & 0x00000002) == 0x00000002) && reqSize_ != null && reqSize_ != org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.TimedQuota.getDefaultInstance()) { reqSize_ = org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.TimedQuota.newBuilder(reqSize_).mergeFrom(value).buildPartial(); } else { reqSize_ = value; } onChanged(); } else { reqSizeBuilder_.mergeFrom(value); } bitField0_ |= 0x00000002; return this; } /**
@Test public void testMerge() throws IOException { TimedQuota tq1 = TimedQuota.newBuilder().setSoftLimit(10) .setScope(QuotaProtos.QuotaScope.MACHINE) .setTimeUnit(HBaseProtos.TimeUnit.MINUTES).build(); ThrottleRequest tr1 = ThrottleRequest.newBuilder().setTimedQuota(tq1) .setType(QuotaProtos.ThrottleType.REQUEST_NUMBER).build(); ThrottleSettings orig = new ThrottleSettings("joe", null, null, null, tr1); TimedQuota tq2 = TimedQuota.newBuilder().setSoftLimit(10) .setScope(QuotaProtos.QuotaScope.MACHINE) .setTimeUnit(HBaseProtos.TimeUnit.SECONDS).build(); ThrottleRequest tr2 = ThrottleRequest.newBuilder().setTimedQuota(tq2) .setType(QuotaProtos.ThrottleType.REQUEST_NUMBER).build(); ThrottleSettings merged = orig.merge(new ThrottleSettings("joe", null, null, null, tr2)); assertEquals(10, merged.getSoftLimit()); assertEquals(ThrottleType.REQUEST_NUMBER, merged.getThrottleType()); assertEquals(TimeUnit.SECONDS, merged.getTimeUnit()); }
/** * Build a protocol buffer TimedQuota * * @param limit the allowed number of request/data per timeUnit * @param timeUnit the limit time unit * @param scope the quota scope * @return the protocol buffer TimedQuota */ public static QuotaProtos.TimedQuota toTimedQuota(final long limit, final TimeUnit timeUnit, final QuotaScope scope) { return QuotaProtos.TimedQuota.newBuilder() .setSoftLimit(limit) .setTimeUnit(toProtoTimeUnit(timeUnit)) .setScope(toProtoQuotaScope(scope)) .build(); }
@Test public void testNoThrottleReturnsOriginal() throws IOException { TimedQuota tq1 = TimedQuota.newBuilder().setSoftLimit(10) .setScope(QuotaProtos.QuotaScope.MACHINE) .setTimeUnit(HBaseProtos.TimeUnit.MINUTES).build(); ThrottleRequest tr1 = ThrottleRequest.newBuilder().setTimedQuota(tq1) .setType(QuotaProtos.ThrottleType.REQUEST_NUMBER).build(); ThrottleSettings orig = new ThrottleSettings("joe", null, null, null, tr1); ThrottleRequest tr2 = ThrottleRequest.newBuilder() .setType(QuotaProtos.ThrottleType.REQUEST_NUMBER).build(); assertTrue( "The same object should be returned by merge, but it wasn't", orig == orig.merge(new ThrottleSettings("joe", null, null, null, tr2))); } }
public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder() {