@Override public ConcurrentHistogram copy() { ConcurrentHistogram copy = new ConcurrentHistogram(this); copy.add(this); return copy; }
@Override public void recordConvertedDoubleValueWithCount(final double value, final long count) throws ArrayIndexOutOfBoundsException { long criticalValue = wrp.writerCriticalSectionEnter(); try { long integerValue = (long) (value * activeCounts.doubleToIntegerValueConversionRatio); int index = countsArrayIndex(integerValue); activeCounts.addAndGet( normalizeIndex(index, activeCounts.getNormalizingIndexOffset(), activeCounts.length()), count); updateMinAndMax(integerValue); addToTotalCount(count); } finally { wrp.writerCriticalSectionExit(criticalValue); } }
@Override public ConcurrentHistogram copyCorrectedForCoordinatedOmission(final long expectedIntervalBetweenValueSamples) { ConcurrentHistogram toHistogram = new ConcurrentHistogram(this); toHistogram.addWhileCorrectingForCoordinatedOmission(this, expectedIntervalBetweenValueSamples); return toHistogram; }
@Override void recordConvertedDoubleValue(final double value) { long criticalValue = wrp.writerCriticalSectionEnter(); try { long integerValue = (long) (value * activeCounts.doubleToIntegerValueConversionRatio); int index = countsArrayIndex(integerValue); activeCounts.incrementAndGet( normalizeIndex(index, activeCounts.getNormalizingIndexOffset(), activeCounts.length())); updateMinAndMax(integerValue); incrementTotalCount(); } finally { wrp.writerCriticalSectionExit(criticalValue); } }
private void shiftLowestInactiveHalfBucketContentsLeft(final int shiftAmount, final int preShiftZeroIndex) { final int numberOfBinaryOrdersOfMagnitude = shiftAmount >> subBucketHalfCountMagnitude; // The lowest inactive half-bucket (not including the 0 value) is special: unlike all other half // buckets, the lowest half bucket values cannot be scaled by simply changing the // normalizing offset. Instead, they must be individually re-recorded at the new // scale, and cleared from the current one. // // We know that all half buckets "below" the current lowest one are full of 0s, because // we would have overflowed otherwise. So we need to shift the values in the current // lowest half bucket into that range (including the current lowest half bucket itself). // Iterating up from the lowermost non-zero "from slot" and copying values to the newly // scaled "to slot" (and then zeroing the "from slot"), will work in a single pass, // because the scale "to slot" index will always be a lower index than its or any // preceding non-scaled "from slot" index: // // (Note that we specifically avoid slot 0, as it is directly handled in the outer case) for (int fromIndex = 1; fromIndex < subBucketHalfCount; fromIndex++) { long toValue = valueFromIndex(fromIndex) << numberOfBinaryOrdersOfMagnitude; int toIndex = countsArrayIndex(toValue); int normalizedToIndex = normalizeIndex(toIndex, inactiveCounts.getNormalizingIndexOffset(), inactiveCounts.length()); long countAtFromIndex = inactiveCounts.get(fromIndex + preShiftZeroIndex); inactiveCounts.lazySet(normalizedToIndex, countAtFromIndex); inactiveCounts.lazySet(fromIndex + preShiftZeroIndex, 0); } // Note that the above loop only creates O(N) work for histograms that have values in // the lowest half-bucket (excluding the 0 value). Histograms that never have values // there (e.g. all integer value histograms used as internal storage in DoubleHistograms) // will never loop, and their shifts will remain O(1). }
assert (countsArrayLength == inactiveCounts.length()); int newArrayLength = determineArrayLengthNeeded(newHighestTrackableValue); int countsDelta = newArrayLength - countsArrayLength; copyInactiveCountsContentsOnResize(oldInactiveCounts, countsDelta); copyInactiveCountsContentsOnResize(oldInactiveCounts, countsDelta); establishSize(newHighestTrackableValue);
/** * Construct a new histogram by decoding it from a ByteBuffer. * @param buffer The buffer to decode from * @param minBarForHighestTrackableValue Force highestTrackableValue to be set at least this high * @return The newly constructed histogram */ public static ConcurrentHistogram decodeFromByteBuffer(final ByteBuffer buffer, final long minBarForHighestTrackableValue) { return (ConcurrentHistogram) decodeFromByteBuffer(buffer, ConcurrentHistogram.class, minBarForHighestTrackableValue); }
/** * Construct a new histogram by decoding it from a compressed form in a ByteBuffer. * @param buffer The buffer to decode from * @param minBarForHighestTrackableValue Force highestTrackableValue to be set at least this high * @return The newly constructed histogram * @throws java.util.zip.DataFormatException on error parsing/decompressing the buffer */ public static ConcurrentHistogram decodeFromCompressedByteBuffer(final ByteBuffer buffer, final long minBarForHighestTrackableValue) throws DataFormatException { return (ConcurrentHistogram) decodeFromCompressedByteBuffer(buffer, ConcurrentHistogram.class, minBarForHighestTrackableValue); }
@Override void recordConvertedDoubleValue(final double value) { long criticalValue = wrp.writerCriticalSectionEnter(); try { long integerValue = (long) (value * activeCounts.doubleToIntegerValueConversionRatio); int index = countsArrayIndex(integerValue); activeCounts.incrementAndGet( normalizeIndex(index, activeCounts.getNormalizingIndexOffset(), activeCounts.length())); updateMinAndMax(integerValue); incrementTotalCount(); } finally { wrp.writerCriticalSectionExit(criticalValue); } }
private void shiftLowestInactiveHalfBucketContentsLeft(final int shiftAmount, final int preShiftZeroIndex) { final int numberOfBinaryOrdersOfMagnitude = shiftAmount >> subBucketHalfCountMagnitude; // The lowest inactive half-bucket (not including the 0 value) is special: unlike all other half // buckets, the lowest half bucket values cannot be scaled by simply changing the // normalizing offset. Instead, they must be individually re-recorded at the new // scale, and cleared from the current one. // // We know that all half buckets "below" the current lowest one are full of 0s, because // we would have overflowed otherwise. So we need to shift the values in the current // lowest half bucket into that range (including the current lowest half bucket itself). // Iterating up from the lowermost non-zero "from slot" and copying values to the newly // scaled "to slot" (and then zeroing the "from slot"), will work in a single pass, // because the scale "to slot" index will always be a lower index than its or any // preceding non-scaled "from slot" index: // // (Note that we specifically avoid slot 0, as it is directly handled in the outer case) for (int fromIndex = 1; fromIndex < subBucketHalfCount; fromIndex++) { long toValue = valueFromIndex(fromIndex) << numberOfBinaryOrdersOfMagnitude; int toIndex = countsArrayIndex(toValue); int normalizedToIndex = normalizeIndex(toIndex, inactiveCounts.getNormalizingIndexOffset(), inactiveCounts.length()); long countAtFromIndex = inactiveCounts.get(fromIndex + preShiftZeroIndex); inactiveCounts.lazySet(normalizedToIndex, countAtFromIndex); inactiveCounts.lazySet(fromIndex + preShiftZeroIndex, 0); } // Note that the above loop only creates O(N) work for histograms that have values in // the lowest half-bucket (excluding the 0 value). Histograms that never have values // there (e.g. all integer value histograms used as internal storage in DoubleHistograms) // will never loop, and their shifts will remain O(1). }
assert (countsArrayLength == inactiveCounts.length()); int newArrayLength = determineArrayLengthNeeded(newHighestTrackableValue); int countsDelta = newArrayLength - countsArrayLength; copyInactiveCountsContentsOnResize(oldInactiveCounts, countsDelta); copyInactiveCountsContentsOnResize(oldInactiveCounts, countsDelta); establishSize(newHighestTrackableValue);
/** * Construct a new histogram by decoding it from a ByteBuffer. * @param buffer The buffer to decode from * @param minBarForHighestTrackableValue Force highestTrackableValue to be set at least this high * @return The newly constructed histogram */ public static ConcurrentHistogram decodeFromByteBuffer(final ByteBuffer buffer, final long minBarForHighestTrackableValue) { return (ConcurrentHistogram) decodeFromByteBuffer(buffer, ConcurrentHistogram.class, minBarForHighestTrackableValue); }
/** * Construct a new histogram by decoding it from a compressed form in a ByteBuffer. * @param buffer The buffer to decode from * @param minBarForHighestTrackableValue Force highestTrackableValue to be set at least this high * @return The newly constructed histogram * @throws java.util.zip.DataFormatException on error parsing/decompressing the buffer */ public static ConcurrentHistogram decodeFromCompressedByteBuffer(final ByteBuffer buffer, final long minBarForHighestTrackableValue) throws DataFormatException { return (ConcurrentHistogram) decodeFromCompressedByteBuffer(buffer, ConcurrentHistogram.class, minBarForHighestTrackableValue); }
@Override public void recordConvertedDoubleValueWithCount(final double value, final long count) throws ArrayIndexOutOfBoundsException { long criticalValue = wrp.writerCriticalSectionEnter(); try { long integerValue = (long) (value * activeCounts.doubleToIntegerValueConversionRatio); int index = countsArrayIndex(integerValue); activeCounts.addAndGet( normalizeIndex(index, activeCounts.getNormalizingIndexOffset(), activeCounts.length()), count); updateMinAndMax(integerValue); addToTotalCount(count); } finally { wrp.writerCriticalSectionExit(criticalValue); } }
@Override public ConcurrentHistogram copy() { ConcurrentHistogram copy = new ConcurrentHistogram(this); copy.add(this); return copy; }
@Override public ConcurrentHistogram copyCorrectedForCoordinatedOmission(final long expectedIntervalBetweenValueSamples) { ConcurrentHistogram toHistogram = new ConcurrentHistogram(this); toHistogram.addWhileCorrectingForCoordinatedOmission(this, expectedIntervalBetweenValueSamples); return toHistogram; }