static EstimatedHistogram defaultPartitionSizeHistogram() { // EH of 150 can track a max value of 1697806495183, i.e., > 1.5PB return new EstimatedHistogram(150); }
@Override public boolean equals(Object o) { if (this == o) return true; if (!(o instanceof EstimatedHistogram)) return false; EstimatedHistogram that = (EstimatedHistogram) o; return Arrays.equals(getBucketOffsets(), that.getBucketOffsets()) && Arrays.equals(getBuckets(false), that.getBuckets(false)); }
public MetadataCollector addCellPerPartitionCount(long cellCount) { estimatedCellPerPartitionCount.add(cellCount); return this; }
long[] offsets = new EstimatedHistogram().getBucketOffsets();
public double[] metricPercentilesAsArray(long[] counts) { double[] result = new double[7]; if (isEmpty(counts)) { Arrays.fill(result, Double.NaN); return result; } double[] offsetPercentiles = new double[] { 0.5, 0.75, 0.95, 0.98, 0.99 }; EstimatedHistogram metric = new EstimatedHistogram(counts); if (metric.isOverflowed()) { System.err.println(String.format("EstimatedHistogram overflowed larger than %s, unable to calculate percentiles", metric.getLargestBucketOffset())); for (int i = 0; i < result.length; i++) result[i] = Double.NaN; } else { for (int i = 0; i < offsetPercentiles.length; i++) result[i] = metric.percentile(offsetPercentiles[i]); } result[5] = metric.min(); result[6] = metric.max(); return result; }
/** * @param gcBefore gc time in seconds * @return estimated droppable tombstone ratio at given gcBefore time. */ public double getEstimatedDroppableTombstoneRatio(int gcBefore) { long estimatedColumnCount = this.estimatedColumnCount.mean() * this.estimatedColumnCount.count(); if (estimatedColumnCount > 0) { double droppable = getDroppableTombstonesBefore(gcBefore); return droppable / estimatedColumnCount; } return 0.0f; }
return new EstimatedHistogram(EMPTY_LONG_ARRAY, ZERO); System.arraycopy(highhalf, 0, ranges, lowhalf.length + 1, highhalf.length); final EstimatedHistogram hist = new EstimatedHistogram(ranges, new long[ranges.length + 1]); for (int i = 0 ; i < count ; i++) hist.add(values[i]); return hist;
long histogramCount = sstableMetadata.estimatedPartitionSize.count(); long estimatedKeys = histogramCount > 0 && !sstableMetadata.estimatedPartitionSize.isOverflowed() ? histogramCount
/** * Log the row size per leaf for all MerkleTrees. * * @param logger */ public void logRowSizePerLeaf(Logger logger) { for (MerkleTree tree : merkleTrees.values()) { tree.histogramOfRowSizePerLeaf().log(logger); } }
private static String nameOfRange(long[] bucketOffsets, int index) { StringBuilder sb = new StringBuilder(); appendRange(sb, bucketOffsets, index); return sb.toString(); }
for (int i = 0; i < nameCount; i++) names[i] = nameOfRange(bucketOffsets, i); maxNameLength = Math.max(maxNameLength, names[i].length());
public long[] getRecentRangeLatencyHistogramMicros() { return rangeMetrics.recentLatencyHistogram.getBuckets(true); }
EstimatedHistogram partitionSizeHist = new EstimatedHistogram(estimatedPartitionSize); EstimatedHistogram columnCountHist = new EstimatedHistogram(estimatedColumnCount); if (partitionSizeHist.isOverflowed()) System.err.println(String.format("Row sizes are larger than %s, unable to calculate percentiles", partitionSizeHist.getLargestBucketOffset())); for (int i = 0; i < offsetPercentiles.length; i++) estimatedRowSizePercentiles[i] = Double.NaN; estimatedRowSizePercentiles[i] = partitionSizeHist.percentile(offsetPercentiles[i]); if (columnCountHist.isOverflowed()) System.err.println(String.format("Column counts are larger than %s, unable to calculate percentiles", columnCountHist.getLargestBucketOffset())); for (int i = 0; i < estimatedColumnCountPercentiles.length; i++) estimatedColumnCountPercentiles[i] = Double.NaN; estimatedColumnCountPercentiles[i] = columnCountHist.percentile(offsetPercentiles[i]); estimatedRowSizePercentiles[5] = partitionSizeHist.min(); estimatedColumnCountPercentiles[5] = columnCountHist.min(); estimatedRowSizePercentiles[6] = partitionSizeHist.max(); estimatedColumnCountPercentiles[6] = columnCountHist.max();
/** * @param gcBefore gc time in seconds * @return estimated droppable tombstone ratio at given gcBefore time. */ public double getEstimatedDroppableTombstoneRatio(int gcBefore) { long estimatedColumnCount = this.estimatedColumnCount.mean() * this.estimatedColumnCount.count(); if (estimatedColumnCount > 0) { double droppable = getDroppableTombstonesBefore(gcBefore); return droppable / estimatedColumnCount; } return 0.0f; }
return new EstimatedHistogram(EMPTY_LONG_ARRAY, ZERO); System.arraycopy(highhalf, 0, ranges, lowhalf.length + 1, highhalf.length); final EstimatedHistogram hist = new EstimatedHistogram(ranges, new long[ranges.length + 1]); for (int i = 0 ; i < count ; i++) hist.add(values[i]); return hist;
long histogramCount = sstableMetadata.estimatedPartitionSize.count(); long estimatedKeys = histogramCount > 0 && !sstableMetadata.estimatedPartitionSize.isOverflowed() ? histogramCount
/** * Log the row size per leaf for all MerkleTrees. * * @param logger */ public void logRowSizePerLeaf(Logger logger) { for (MerkleTree tree : merkleTrees.values()) { tree.histogramOfRowSizePerLeaf().log(logger); } }
private static String nameOfRange(long[] bucketOffsets, int index) { StringBuilder sb = new StringBuilder(); appendRange(sb, bucketOffsets, index); return sb.toString(); }
for (int i = 0; i < nameCount; i++) names[i] = nameOfRange(bucketOffsets, i); maxNameLength = Math.max(maxNameLength, names[i].length());
public long[] getLifetimeWriteLatencyHistogramMicros() { return metric.writeLatency.totalLatencyHistogram.getBuckets(false); }