@Test public void guarded() { StatsCounter counter = StatsCounter.guardedStatsCounter(new ConcurrentStatsCounter()); counter.recordHits(1); counter.recordMisses(1); counter.recordEviction(); counter.recordEviction(10); counter.recordLoadSuccess(1); counter.recordLoadFailure(1); CacheStats expected = new CacheStats(1, 1, 1, 1, 2, 2, 10); assertThat(counter.snapshot(), is(expected)); assertThat(counter.toString(), is(expected.toString())); assertThat(counter.snapshot().toString(), is(expected.toString())); }
@Override public StatsCounter statsCounter() { return StatsCounter.disabledStatsCounter(); }
@Override public @Nullable V getIfPresent(Object key, boolean recordStats) { V value = data.get(key); if (recordStats) { if (value == null) { statsCounter.recordMisses(1); } else { statsCounter.recordHits(1); } } return value; }
@Test public void concurrent() { StatsCounter counter = new ConcurrentStatsCounter(); ConcurrentTestHarness.timeTasks(5, () -> { counter.recordHits(1); counter.recordMisses(1); counter.recordEviction(); counter.recordEviction(10); counter.recordLoadSuccess(1); counter.recordLoadFailure(1); }); assertThat(counter.snapshot(), is(new CacheStats(5, 5, 5, 5, 10, 10, 50))); }
/** Decorates the remapping function to record statistics if enabled. */ default Function<? super K, ? extends V> statsAware( Function<? super K, ? extends V> mappingFunction, boolean recordLoad) { if (!isRecordingStats()) { return mappingFunction; } return key -> { V value; statsCounter().recordMisses(1); long startTime = statsTicker().read(); try { value = mappingFunction.apply(key); } catch (RuntimeException | Error e) { statsCounter().recordLoadFailure(statsTicker().read() - startTime); throw e; } long loadTime = statsTicker().read() - startTime; if (recordLoad) { if (value == null) { statsCounter().recordLoadFailure(loadTime); } else { statsCounter().recordLoadSuccess(loadTime); } } return value; }; }
@Override public @Nullable V compute(K key, BiFunction<? super K, ? super V, ? extends V> remappingFunction) { requireNonNull(remappingFunction); boolean[] computed = { false }; for (;;) { CompletableFuture<V> future = delegate.get(key); V oldValue = Async.getWhenSuccessful(future); CompletableFuture<V> valueFuture = delegate.compute(key, (k, oldValueFuture) -> { if (future != oldValueFuture) { return oldValueFuture; } computed[0] = true; long startTime = delegate.statsTicker().read(); V newValue = remappingFunction.apply(key, oldValue); long loadTime = delegate.statsTicker().read() - startTime; if (newValue == null) { delegate.statsCounter().recordLoadFailure(loadTime); return null; } delegate.statsCounter().recordLoadSuccess(loadTime); return CompletableFuture.completedFuture(newValue); }, /* recordMiss */ false, /* recordLoad */ false); if (computed[0]) { return Async.getWhenSuccessful(valueFuture); } } }
@Override public void recordEviction(int weight) { try { delegate.recordEviction(weight); } catch (Throwable t) { logger.log(Level.WARNING, "Exception thrown by stats counter", t); } }
@Override public CacheStats snapshot() { try { return delegate.snapshot(); } catch (Throwable t) { logger.log(Level.WARNING, "Exception thrown by stats counter", t); return CacheStats.empty(); } }
@Override public void recordHits(int count) { try { delegate.recordHits(count); } catch (Throwable t) { logger.log(Level.WARNING, "Exception thrown by stats counter", t); } }
@Override public void recordMisses(int count) { try { delegate.recordMisses(count); } catch (Throwable t) { logger.log(Level.WARNING, "Exception thrown by stats counter", t); } }
@Override public void recordLoadFailure(long loadTime) { try { delegate.recordLoadFailure(loadTime); } catch (Throwable t) { logger.log(Level.WARNING, "Exception thrown by stats counter", t); } }
@Override public void recordLoadSuccess(long loadTime) { try { delegate.recordLoadSuccess(loadTime); } catch (Throwable t) { logger.log(Level.WARNING, "Exception thrown by stats counter", t); } }
/** * Enables the accumulation of {@link CacheStats} during the operation of the cache. Without this * {@link Cache#stats} will return zero for all statistics. Note that recording statistics * requires bookkeeping to be performed with each operation, and thus imposes a performance * penalty on cache operation. Any exception thrown by the supplied {@link StatsCounter} will be * suppressed and logged. * * @param statsCounterSupplier a supplier instance that returns a new {@link StatsCounter} * @return this {@code Caffeine} instance (for chaining) */ @NonNull public Caffeine<K, V> recordStats( @NonNull Supplier<? extends StatsCounter> statsCounterSupplier) { requireState(this.statsCounterSupplier == null, "Statistics recording was already set"); requireNonNull(statsCounterSupplier); this.statsCounterSupplier = () -> StatsCounter.guardedStatsCounter(statsCounterSupplier.get()); return this; }
if (valueFuture.isCompletedExceptionally() || (valueFuture.isDone() && (valueFuture.join() == null))) { cache().statsCounter().recordLoadFailure(0L); cache().remove(key); return; cache().statsCounter().recordLoadFailure(loadTime); } else { cache().statsCounter().recordLoadSuccess(loadTime);
@Override public Map<K, V> getAllPresent(Iterable<?> keys) { Set<Object> uniqueKeys = new LinkedHashSet<>(); for (Object key : keys) { uniqueKeys.add(key); } int misses = 0; Map<Object, Object> result = new LinkedHashMap<>(uniqueKeys.size()); for (Object key : uniqueKeys) { Object value = data.get(key); if (value == null) { misses++; } else { result.put(key, value); } } statsCounter.recordMisses(misses); statsCounter.recordHits(result.size()); @SuppressWarnings("unchecked") Map<K, V> castedResult = (Map<K, V>) result; return Collections.unmodifiableMap(castedResult); }
R result; if ((u == null) && recordMiss) { statsCounter().recordMisses(1); result = remappingFunction.apply(t, u); } catch (RuntimeException | Error e) { statsCounter().recordLoadFailure(statsTicker().read() - startTime); throw e; if (recordLoad) { if (result == null) { statsCounter().recordLoadFailure(loadTime); } else { statsCounter().recordLoadSuccess(loadTime);
@Override @SuppressWarnings("deprecation") public void recordEviction() { try { delegate.recordEviction(); } catch (Throwable t) { logger.log(Level.WARNING, "Exception thrown by stats counter", t); } }
@Override default CacheStats stats() { return cache().statsCounter().snapshot(); }
@Override public V computeIfAbsent(K key, Function<? super K, ? extends V> mappingFunction, boolean recordStats, boolean recordLoad) { requireNonNull(mappingFunction); // optimistic fast path due to computeIfAbsent always locking V value = data.get(key); if (value != null) { if (recordStats) { statsCounter.recordHits(1); } return value; } boolean[] missed = new boolean[1]; value = data.computeIfAbsent(key, k -> { // Do not communicate to CacheWriter on a load missed[0] = true; return recordStats ? statsAware(mappingFunction, recordLoad).apply(key) : mappingFunction.apply(key); }); if (!missed[0] && recordStats) { statsCounter.recordHits(1); } return value; }
/** Sequentially loads each missing entry. */ default Map<K, V> loadSequentially(Iterable<? extends K> keys) { Set<K> uniqueKeys = new LinkedHashSet<>(); for (K key : keys) { uniqueKeys.add(key); } int count = 0; Map<K, V> result = new LinkedHashMap<>(uniqueKeys.size()); try { for (K key : uniqueKeys) { count++; V value = get(key); if (value != null) { result.put(key, value); } } } catch (Throwable t) { cache().statsCounter().recordMisses(uniqueKeys.size() - count); throw t; } return Collections.unmodifiableMap(result); }