@Test public void test() { StatsBuckets stats = new StatsBuckets(10, 20, 30); assertEquals(stats.getAvg(), Double.NaN); assertEquals(stats.getSum(), 0); assertEquals(stats.getCount(), 0); assertEquals(stats.getBuckets(), new long[] { 0, 0, 0, 0 }); stats.addValue(5); assertEquals(stats.getAvg(), Double.NaN); assertEquals(stats.getSum(), 0); assertEquals(stats.getCount(), 0); assertEquals(stats.getBuckets(), new long[] { 0, 0, 0, 0 }); stats.refresh(); assertEquals(stats.getAvg(), 5.0); assertEquals(stats.getSum(), 5); assertEquals(stats.getCount(), 1); assertEquals(stats.getBuckets(), new long[] { 1, 0, 0, 0 }); stats.addValue(15); assertEquals(stats.getAvg(), 5.0); assertEquals(stats.getSum(), 5); assertEquals(stats.getCount(), 1); assertEquals(stats.getBuckets(), new long[] { 1, 0, 0, 0 });
public StatsBuckets(long... boundaries) { checkArgument(boundaries.length > 0); checkArgument(isSorted(boundaries), "Boundaries array must be sorted"); this.boundaries = boundaries; this.sumCounter = new LongAdder(); this.buckets = new LongAdder[boundaries.length + 1]; for (int i = 0; i < buckets.length; i++) { buckets[i] = new LongAdder(); } this.values = new long[buckets.length]; }
stats.storageWriteLatencyBuckets.refresh(); long[] latencyBuckets = stats.storageWriteLatencyBuckets.getBuckets(); metric(stream, cluster, namespace, "pulsar_storage_write_latency_le_0_5", latencyBuckets[0]); metric(stream, cluster, namespace, "pulsar_storage_write_latency_le_1", latencyBuckets[1]); metric(stream, cluster, namespace, "pulsar_storage_write_latency_overflow", latencyBuckets[9]); metric(stream, cluster, namespace, "pulsar_storage_write_latency_count", stats.storageWriteLatencyBuckets.getCount()); metric(stream, cluster, namespace, "pulsar_storage_write_latency_sum", stats.storageWriteLatencyBuckets.getSum()); stats.entrySizeBuckets.refresh(); long[] entrySizeBuckets = stats.entrySizeBuckets.getBuckets(); metric(stream, cluster, namespace, "pulsar_entry_size_le_128", entrySizeBuckets[0]); metric(stream, cluster, namespace, "pulsar_entry_size_le_512", entrySizeBuckets[1]); metric(stream, cluster, namespace, "pulsar_entry_size_le_1_mb", entrySizeBuckets[7]); metric(stream, cluster, namespace, "pulsar_entry_size_le_overflow", entrySizeBuckets[8]); metric(stream, cluster, namespace, "pulsar_entry_size_count", stats.entrySizeBuckets.getCount()); metric(stream, cluster, namespace, "pulsar_entry_size_sum", stats.entrySizeBuckets.getSum());
@Override public long[] getEntrySizeBuckets() { return entryStats.getBuckets(); }
metric(stream, cluster, namespace, topic, "pulsar_msg_backlog", stats.msgBacklog); long[] latencyBuckets = stats.storageWriteLatencyBuckets.getBuckets(); metric(stream, cluster, namespace, topic, "pulsar_storage_write_latency_le_0_5", latencyBuckets[0]); metric(stream, cluster, namespace, topic, "pulsar_storage_write_latency_le_1", latencyBuckets[1]); metric(stream, cluster, namespace, topic, "pulsar_storage_write_latency_overflow", latencyBuckets[9]); metric(stream, cluster, namespace, topic, "pulsar_storage_write_latency_count", stats.storageWriteLatencyBuckets.getCount()); metric(stream, cluster, namespace, topic, "pulsar_storage_write_latency_sum", stats.storageWriteLatencyBuckets.getSum()); long[] entrySizeBuckets = stats.entrySizeBuckets.getBuckets(); metric(stream, cluster, namespace, topic, "pulsar_entry_size_le_128", entrySizeBuckets[0]); metric(stream, cluster, namespace, topic, "pulsar_entry_size_le_512", entrySizeBuckets[1]); metric(stream, cluster, namespace, topic, "pulsar_entry_size_le_1_mb", entrySizeBuckets[7]); metric(stream, cluster, namespace, topic, "pulsar_entry_size_le_overflow", entrySizeBuckets[8]); metric(stream, cluster, namespace, topic, "pulsar_entry_size_count", stats.entrySizeBuckets.getCount()); metric(stream, cluster, namespace, topic, "pulsar_entry_size_sum", stats.entrySizeBuckets.getSum());
@Override public double getAddEntryLatencyAverageUsec() { return addEntryLatencyStatsUsec.getAvg(); }
public void addAddEntryLatencySample(long latency, TimeUnit unit) { addEntryLatencyStatsUsec.addValue(unit.toMicros(latency)); }
public void refreshStats(long period, TimeUnit unit) { double seconds = unit.toMillis(period) / 1000.0; addEntryOps.calculateRate(seconds); addEntryOpsFailed.calculateRate(seconds); readEntriesOps.calculateRate(seconds); readEntriesOpsFailed.calculateRate(seconds); markDeleteOps.calculateRate(seconds); addEntryLatencyStatsUsec.refresh(); ledgerSwitchLatencyStatsUsec.refresh(); entryStats.refresh(); }
public void reset() { topicsCount = 0; subscriptionsCount = 0; producersCount = 0; consumersCount = 0; rateIn = 0; rateOut = 0; throughputIn = 0; throughputOut = 0; storageSize = 0; msgBacklog = 0; storageWriteRate = 0; storageReadRate = 0; replicationStats.clear(); storageWriteLatencyBuckets.reset(); entrySizeBuckets.reset(); } }
stats.storageWriteLatencyBuckets.refresh(); long[] latencyBuckets = stats.storageWriteLatencyBuckets.getBuckets(); metric(stream, cluster, namespace, "pulsar_storage_write_latency_le_0_5", latencyBuckets[0]); metric(stream, cluster, namespace, "pulsar_storage_write_latency_le_1", latencyBuckets[1]); metric(stream, cluster, namespace, "pulsar_storage_write_latency_overflow", latencyBuckets[9]); metric(stream, cluster, namespace, "pulsar_storage_write_latency_count", stats.storageWriteLatencyBuckets.getCount()); metric(stream, cluster, namespace, "pulsar_storage_write_latency_sum", stats.storageWriteLatencyBuckets.getSum()); stats.entrySizeBuckets.refresh(); long[] entrySizeBuckets = stats.entrySizeBuckets.getBuckets(); metric(stream, cluster, namespace, "pulsar_entry_size_le_128", entrySizeBuckets[0]); metric(stream, cluster, namespace, "pulsar_entry_size_le_512", entrySizeBuckets[1]); metric(stream, cluster, namespace, "pulsar_entry_size_le_1_mb", entrySizeBuckets[7]); metric(stream, cluster, namespace, "pulsar_entry_size_le_overflow", entrySizeBuckets[8]); metric(stream, cluster, namespace, "pulsar_entry_size_count", stats.entrySizeBuckets.getCount()); metric(stream, cluster, namespace, "pulsar_entry_size_sum", stats.entrySizeBuckets.getSum());
@Override public long[] getAddEntryLatencyBuckets() { return addEntryLatencyStatsUsec.getBuckets(); }
@Override public double getEntrySizeAverage() { return entryStats.getAvg(); }
public void addLedgerSwitchLatencySample(long latency, TimeUnit unit) { ledgerSwitchLatencyStatsUsec.addValue(unit.toMicros(latency)); }
public void refreshStats(long period, TimeUnit unit) { double seconds = unit.toMillis(period) / 1000.0; addEntryOps.calculateRate(seconds); addEntryOpsFailed.calculateRate(seconds); readEntriesOps.calculateRate(seconds); readEntriesOpsFailed.calculateRate(seconds); markDeleteOps.calculateRate(seconds); addEntryLatencyStatsUsec.refresh(); ledgerSwitchLatencyStatsUsec.refresh(); entryStats.refresh(); }
public void reset() { subscriptionsCount = 0; producersCount = 0; consumersCount = 0; rateIn = 0; rateOut = 0; throughputIn = 0; throughputOut = 0; storageSize = 0; msgBacklog = 0; storageWriteRate = 0; storageReadRate = 0; replicationStats.clear(); subscriptionStats.clear(); storageWriteLatencyBuckets.reset(); entrySizeBuckets.reset(); }
@Override public long[] getLedgerSwitchLatencyBuckets() { return ledgerSwitchLatencyStatsUsec.getBuckets(); }
@Override public double getLedgerSwitchLatencyAverageUsec() { return ledgerSwitchLatencyStatsUsec.getAvg(); }