@Warmup(iterations = 5, time = 200, timeUnit = TimeUnit.MILLISECONDS) @Measurement(iterations = 20, time = 100, timeUnit = TimeUnit.MILLISECONDS) @Benchmark @Threads(2) public void testSampleHit50() { sampleHit10Checks++; if (MethodTracker.hit(2)) { sampleHit10Sampled++; } }
@Benchmark() @BenchmarkMode(Mode.AverageTime) // just one thread @Threads(1) @OutputTimeUnit(TimeUnit.MICROSECONDS) public void avgRequestTime_singleThread() { // using a single actor minimizes the context switching between actor threads. Hello hello = Actor.getReference(Hello.class, "hello"); Task<String> result = hello.sayHello("test"); if (cpuCount > 2) { // this takes one cpu but reduces latency by reducing context switching while (!result.isDone()) { } } result.join(); }
@Benchmark() @Threads(-1) @BenchmarkMode({ Mode.Throughput }) @OperationsPerInvocation(THROUGHPUT_BENCH_BATCH_SIZE) public void requestThroughput() { // use a different actor per thread, ideally one actor per core for this test Hello hello = Actor.getReference(Hello.class, "hello" + Thread.currentThread().getId()); List<Task<String>> results = new ArrayList<>(THROUGHPUT_BENCH_BATCH_SIZE); // doing a batch of operations reduces latency since the worker threads don't stop processing requests. // if just a single message were sent then join invoked // we'd be measuring context switching more than anything else for (int i = 0; i < THROUGHPUT_BENCH_BATCH_SIZE; i++) { results.add(hello.sayHello("test")); } Task<Void> result = Task.allOf(results); result.join(); }
@Benchmark() @BenchmarkMode(Mode.AverageTime) // just one thread @Threads(1) @OutputTimeUnit(TimeUnit.MICROSECONDS) @OperationsPerInvocation(REQ_TIME_BENCH_BATCH_SIZE) public void avgRequestTime_batched() { // using a single actor minimizes the context switching between actor threads. Hello hello = Actor.getReference(Hello.class, "hello"); List<Task<String>> results = new ArrayList<>(REQ_TIME_BENCH_BATCH_SIZE); // doing a batch of operations reduces latency since the worker threads don't stop processing requests. // context switching plays a big role in the result when a single message is sent. for (int i = 0; i < REQ_TIME_BENCH_BATCH_SIZE; i++) { results.add(hello.sayHello("test")); } Task<Void> result = Task.allOf(results); result.join(); }
@Warmup(iterations = 5, time = 200, timeUnit = TimeUnit.MILLISECONDS) @Measurement(iterations = 20, time = 100, timeUnit = TimeUnit.MILLISECONDS) @Benchmark @Threads(2) public void testSampleHit100() { sampleHit10Checks++; if (MethodTracker.hit(3)) { sampleHit10Sampled++; } }
@Warmup(iterations = 5, time = 500, timeUnit = TimeUnit.MILLISECONDS) @Measurement(iterations = 5, time = 500, timeUnit = TimeUnit.MILLISECONDS) @Benchmark @Threads(1) public void testGauge_1() { c.gauge("g1", 10); }
@Warmup(iterations = 5, time = 200, timeUnit = TimeUnit.MILLISECONDS) @Measurement(iterations = 20, time = 100, timeUnit = TimeUnit.MILLISECONDS) @Benchmark @Threads(2) public void testSampleHit10() { sampleHit10Checks++; if (MethodTracker.hit(1)) { sampleHit10Sampled++; } }
@Measurement(iterations = 5, time = 1) @Warmup(iterations = 10, time = 1) @Fork(3) @BenchmarkMode(Mode.SampleTime) @OutputTimeUnit(TimeUnit.MICROSECONDS) @State(Scope.Thread) @Threads(1) public class MutableSpanConverterBenchmarks { final MutableSpanConverter converter =
@Warmup(iterations = 5, time = 500, timeUnit = TimeUnit.MILLISECONDS) @Measurement(iterations = 5, time = 500, timeUnit = TimeUnit.MILLISECONDS) @Benchmark @Threads(1) public void testOneMethodSingleThread() { mip1.recordEntry("a"); mip1.recordExit("a", 1); }
@Warmup(iterations = 5, time = 200, timeUnit = TimeUnit.MILLISECONDS) @Measurement(iterations = 5, time = 2000, timeUnit = TimeUnit.MILLISECONDS) @Threads(2) @Benchmark public void testSendCommandMulti2() { br.send(new OkayCommand()); }
@Warmup(iterations = 5, time = 200, timeUnit = TimeUnit.MILLISECONDS) @Measurement(iterations = 5, time = 2000, timeUnit = TimeUnit.MILLISECONDS) @Threads(4) @Benchmark public void testSendCommandMulti4() { br.send(new OkayCommand()); }