void assertConsistent() { lock.lock(); try { if (requiresEviction()) { throw new AssertionError("requires evictions: size=" + mostRecentlyUsedQueries.size() + ", maxSize=" + maxSize + ", ramBytesUsed=" + ramBytesUsed() + ", maxRamBytesUsed=" + maxRamBytesUsed); recomputedCacheSize += leafCache.cache.size(); if (recomputedCacheSize != getCacheSize()) { throw new AssertionError("cacheSize mismatch : " + getCacheSize() + " != " + recomputedCacheSize);
/** * Clear the content of this cache. */ public void clear() { lock.lock(); try { cache.clear(); // Note that this also clears the uniqueQueries map since mostRecentlyUsedQueries is the uniqueQueries.keySet view: mostRecentlyUsedQueries.clear(); onClear(); } finally { lock.unlock(); } }
private void onDocIdSetCache(long ramBytesUsed) { this.ramBytesUsed += ramBytesUsed; LRUQueryCache.this.onDocIdSetCache(key, ramBytesUsed); }
/** * Return the total number of times that a {@link Query} has been looked up * in this {@link QueryCache}. Note that this number is incremented once per * segment so running a cached query only once will increment this counter * by the number of segments that are wrapped by the searcher. * Note that by definition, {@link #getTotalCount()} is the sum of * {@link #getHitCount()} and {@link #getMissCount()}. * @see #getHitCount() * @see #getMissCount() */ public final long getTotalCount() { return getHitCount() + getMissCount(); }
/** * Return the number of cache entries that have been removed from the cache * either in order to stay under the maximum configured size/ram usage, or * because a segment has been closed. High numbers of evictions might mean * that queries are not reused or that the {@link QueryCachingPolicy * caching policy} caches too aggressively on NRT segments which get merged * early. * @see #getCacheCount() * @see #getCacheSize() */ public final long getEvictionCount() { return getCacheCount() - getCacheSize(); }
DocIdSet get(Query key, LeafReaderContext context, IndexReader.CacheHelper cacheHelper) { assert lock.isHeldByCurrentThread(); assert key instanceof BoostQuery == false; assert key instanceof ConstantScoreQuery == false; final IndexReader.CacheKey readerKey = cacheHelper.getKey(); final LeafCache leafCache = cache.get(readerKey); if (leafCache == null) { onMiss(readerKey, key); return null; } // this get call moves the query to the most-recently-used position final Query singleton = uniqueQueries.get(key); if (singleton == null) { onMiss(readerKey, key); return null; } final DocIdSet cached = leafCache.get(singleton); if (cached == null) { onMiss(readerKey, singleton); } else { onHit(readerKey, singleton); } return cached; }
private QueryCacheStats(final LRUQueryCache queryCache) { this(queryCache.getCacheCount(), queryCache.getCacheSize(), queryCache.getEvictionCount(), queryCache.getHitCount(), queryCache.getMissCount(), queryCache.getTotalCount(), (float) (queryCache.getHitCount() * 100) / queryCache.getTotalCount(), (float) (queryCache.getMissCount() * 100) / queryCache.getTotalCount()); }
/** Clear all entries that belong to the given index. */ public void clearIndex(String index) { final Set<Object> coreCacheKeys = shardKeyMap.getCoreKeysForIndex(index); for (Object coreKey : coreCacheKeys) { cache.clearCoreCacheKey(coreKey); } // This cache stores two things: filters, and doc id sets. Calling // clear only removes the doc id sets, but if we reach the situation // that the cache does not contain any DocIdSet anymore, then it // probably means that the user wanted to remove everything. if (cache.getCacheSize() == 0) { cache.clear(); } }
private void onDocIdSetEviction(long ramBytesUsed) { this.ramBytesUsed -= ramBytesUsed; LRUQueryCache.this.onDocIdSetEviction(key, 1, ramBytesUsed); }
if (singleton == null) { uniqueQueries.put(query, query); onQueryCache(singleton, LINKED_HASHTABLE_RAM_BYTES_PER_ENTRY + ramBytesUsed(query)); } else { query = singleton; evictIfNecessary();
private void onEviction(Query singleton) { assert lock.isHeldByCurrentThread(); onQueryEviction(singleton, LINKED_HASHTABLE_RAM_BYTES_PER_ENTRY + QUERY_DEFAULT_RAM_BYTES_USED); for (LeafCache leafCache : cache.values()) { leafCache.remove(singleton); } }
private void onEviction(Query singleton) { onQueryEviction(singleton, LINKED_HASHTABLE_RAM_BYTES_PER_ENTRY + ramBytesUsed(singleton)); for (LeafCache leafCache : cache.values()) { leafCache.remove(singleton); } }
@Override protected void onQueryCache(Query filter, long ramBytesUsed) { super.onQueryCache(filter, ramBytesUsed); sharedRamBytesUsed += ramBytesUsed; }
@Override protected void onMiss(Object readerCoreKey, Query filter) { super.onMiss(readerCoreKey, filter); final Stats shardStats = getOrCreateStats(readerCoreKey); shardStats.missCount += 1; } }
@Override protected void onHit(Object readerCoreKey, Query filter) { super.onHit(readerCoreKey, filter); final Stats shardStats = getStats(readerCoreKey); shardStats.hitCount += 1; }
void putIfAbsent(Query query, LeafReaderContext context, DocIdSet set, IndexReader.CacheHelper cacheHelper) { assert query instanceof BoostQuery == false; assert query instanceof ConstantScoreQuery == false; // under a lock to make sure that mostRecentlyUsedQueries and cache remain sync'ed lock.lock(); try { Query singleton = uniqueQueries.putIfAbsent(query, query); if (singleton == null) { onQueryCache(query, LINKED_HASHTABLE_RAM_BYTES_PER_ENTRY + QUERY_DEFAULT_RAM_BYTES_USED); } else { query = singleton; } final IndexReader.CacheKey key = cacheHelper.getKey(); LeafCache leafCache = cache.get(key); if (leafCache == null) { leafCache = new LeafCache(key); final LeafCache previous = cache.put(key, leafCache); ramBytesUsed += HASHTABLE_RAM_BYTES_PER_ENTRY; assert previous == null; // we just created a new leaf cache, need to register a close listener cacheHelper.addClosedListener(this::clearCoreCacheKey); } leafCache.putIfAbsent(query, set); evictIfNecessary(); } finally { lock.unlock(); } }
@Override public long getCacheSize() { LRUQueryCache defaultQueryCache = indexSearcherProvider.getDefaultQueryCache(); return defaultQueryCache.getCacheSize(); }
@Override public long getCacheCount() { LRUQueryCache defaultQueryCache = indexSearcherProvider.getDefaultQueryCache(); return defaultQueryCache.getCacheCount(); }
@Override public long getHitCount() { LRUQueryCache defaultQueryCache = indexSearcherProvider.getDefaultQueryCache(); return defaultQueryCache.getHitCount(); }
/** Clear all entries that belong to the given index. */ public void clearIndex(String index) { final Set<Object> coreCacheKeys = shardKeyMap.getCoreKeysForIndex(index); for (Object coreKey : coreCacheKeys) { cache.clearCoreCacheKey(coreKey); } // This cache stores two things: filters, and doc id sets. Calling // clear only removes the doc id sets, but if we reach the situation // that the cache does not contain any DocIdSet anymore, then it // probably means that the user wanted to remove everything. if (cache.getCacheSize() == 0) { cache.clear(); } }