/** * Put the new bucket entry into backingMap. Notice that we are allowed to replace the existing * cache with a new block for the same cache key. there's a corner case: one thread cache a * block in ramCache, copy to io-engine and add a bucket entry to backingMap. Caching another * new block with the same cache key do the same thing for the same cache key, so if not evict * the previous bucket entry, then memory leak happen because the previous bucketEntry is gone * but the bucketAllocator do not free its memory. * @see BlockCacheUtil#shouldReplaceExistingCacheBlock(BlockCache blockCache,BlockCacheKey * cacheKey, Cacheable newBlock) * @param key Block cache key * @param bucketEntry Bucket entry to put into backingMap. */ private void putIntoBackingMap(BlockCacheKey key, BucketEntry bucketEntry) { BucketEntry previousEntry = backingMap.put(key, bucketEntry); if (previousEntry != null && previousEntry != bucketEntry) { ReentrantReadWriteLock lock = offsetLock.getLock(previousEntry.offset()); lock.writeLock().lock(); try { blockEvicted(key, previousEntry, false); } finally { lock.writeLock().unlock(); } } }
/** * This method will find the buckets that are minimally occupied * and are not reference counted and will free them completely * without any constraint on the access times of the elements, * and as a process will completely free at most the number of buckets * passed, sometimes it might not due to changing refCounts * * @param completelyFreeBucketsNeeded number of buckets to free **/ private void freeEntireBuckets(int completelyFreeBucketsNeeded) { if (completelyFreeBucketsNeeded != 0) { // First we will build a set where the offsets are reference counted, usually // this set is small around O(Handler Count) unless something else is wrong Set<Integer> inUseBuckets = new HashSet<Integer>(); for (BucketEntry entry : backingMap.values()) { if (entry.getRefCount() != 0) { inUseBuckets.add(bucketAllocator.getBucketIndex(entry.offset())); } } Set<Integer> candidateBuckets = bucketAllocator.getLeastFilledBuckets( inUseBuckets, completelyFreeBucketsNeeded); for (Map.Entry<BlockCacheKey, BucketEntry> entry : backingMap.entrySet()) { if (candidateBuckets.contains(bucketAllocator .getBucketIndex(entry.getValue().offset()))) { evictBlock(entry.getKey(), false); } } } }
ReentrantReadWriteLock lock = offsetLock.getLock(bucketEntry.offset()); try { lock.writeLock().lock();
if (bucketEntry != null) { long start = System.nanoTime(); ReentrantReadWriteLock lock = offsetLock.getLock(bucketEntry.offset()); try { lock.readLock().lock(); int len = bucketEntry.getLength(); ByteBuffer bb = ByteBuffer.allocate(len); int lenRead = ioEngine.read(bb, bucketEntry.offset()); if (lenRead != len) { throw new RuntimeException("Only " + lenRead + " bytes read, " + len + " expected");
long foundOffset = entry.getValue().offset(); int foundLen = entry.getValue().getLength(); int bucketSizeIndex = -1;
ReentrantReadWriteLock lock = offsetLock.getLock(bucketEntry.offset()); try { lock.writeLock().lock();
@Test public void testMemoryLeak() throws Exception { final BlockCacheKey cacheKey = new BlockCacheKey("dummy", 1L); cacheAndWaitUntilFlushedToBucket(cache, cacheKey, new CacheTestUtils.ByteArrayCacheable( new byte[10])); long lockId = cache.backingMap.get(cacheKey).offset(); ReentrantReadWriteLock lock = cache.offsetLock.getLock(lockId); lock.writeLock().lock(); Thread evictThread = new Thread("evict-block") { @Override public void run() { cache.evictBlock(cacheKey); } }; evictThread.start(); cache.offsetLock.waitForWaiters(lockId, 1); cache.blockEvicted(cacheKey, cache.backingMap.remove(cacheKey), true); cacheAndWaitUntilFlushedToBucket(cache, cacheKey, new CacheTestUtils.ByteArrayCacheable( new byte[10])); lock.writeLock().unlock(); evictThread.join(); assertEquals(1L, cache.getBlockCount()); assertTrue(cache.getCurrentSize() > 0L); assertTrue("We should have a block!", cache.iterator().hasNext()); }
@VisibleForTesting void blockEvicted(BlockCacheKey cacheKey, BucketEntry bucketEntry, boolean decrementBlockNumber) { bucketAllocator.freeBlock(bucketEntry.offset()); realCacheSize.addAndGet(-1 * bucketEntry.getLength()); blocksByHFile.remove(cacheKey.getHfileName(), cacheKey); if (decrementBlockNumber) { this.blockNumber.decrementAndGet(); } }
@Test public void testOffsetProducesPositiveOutput() { //This number is picked because it produces negative output if the values isn't ensured to be positive. //See HBASE-18757 for more information. long testValue = 549888460800L; BucketCache.BucketEntry bucketEntry = new BucketCache.BucketEntry(testValue, 10, 10L, true); assertEquals(testValue, bucketEntry.offset()); }
while (iterator.hasNext()) { Map.Entry<BlockCacheKey, BucketEntry> entry = iterator.next(); long foundOffset = entry.getValue().offset(); int foundLen = entry.getValue().getLength(); int bucketSizeIndex = -1;
if (bucketEntry != null) { long start = System.nanoTime(); ReentrantReadWriteLock lock = offsetLock.getLock(bucketEntry.offset()); try { lock.readLock().lock(); LOG.trace("Read offset=" + bucketEntry.offset() + ", len=" + len); Cacheable cachedBlock = ioEngine.read(bucketEntry.offset(), len, bucketEntry.deserializerReference()); long timeTaken = System.nanoTime() - start;
private boolean forceEvict(BlockCacheKey cacheKey) { if (!cacheEnabled) { return false; } RAMQueueEntry removedBlock = checkRamCache(cacheKey); BucketEntry bucketEntry = backingMap.get(cacheKey); if (bucketEntry == null) { if (removedBlock != null) { cacheStats.evicted(0, cacheKey.isPrimary()); return true; } else { return false; } } ReentrantReadWriteLock lock = offsetLock.getLock(bucketEntry.offset()); try { lock.writeLock().lock(); if (backingMap.remove(cacheKey, bucketEntry)) { blockEvicted(cacheKey, bucketEntry, removedBlock == null); } else { return false; } } finally { lock.writeLock().unlock(); } cacheStats.evicted(bucketEntry.getCachedTime(), cacheKey.isPrimary()); return true; }
@Test public void testMemoryLeak() throws Exception { final BlockCacheKey cacheKey = new BlockCacheKey("dummy", 1L); cacheAndWaitUntilFlushedToBucket(cache, cacheKey, new CacheTestUtils.ByteArrayCacheable( new byte[10])); long lockId = cache.backingMap.get(cacheKey).offset(); ReentrantReadWriteLock lock = cache.offsetLock.getLock(lockId); lock.writeLock().lock(); Thread evictThread = new Thread("evict-block") { @Override public void run() { cache.evictBlock(cacheKey); } }; evictThread.start(); cache.offsetLock.waitForWaiters(lockId, 1); cache.blockEvicted(cacheKey, cache.backingMap.remove(cacheKey), true); cacheAndWaitUntilFlushedToBucket(cache, cacheKey, new CacheTestUtils.ByteArrayCacheable( new byte[10])); lock.writeLock().unlock(); evictThread.join(); assertEquals(1L, cache.getBlockCount()); assertTrue(cache.getCurrentSize() > 0L); assertTrue("We should have a block!", cache.iterator().hasNext()); }
@Test public void testOffsetProducesPositiveOutput() { //This number is picked because it produces negative output if the values isn't ensured to be positive. //See HBASE-18757 for more information. long testValue = 549888460800L; BucketCache.BucketEntry bucketEntry = new BucketCache.BucketEntry(testValue, 10, 10L, true); assertEquals(testValue, bucketEntry.offset()); }
@VisibleForTesting void blockEvicted(BlockCacheKey cacheKey, BucketEntry bucketEntry, boolean decrementBlockNumber) { bucketAllocator.freeBlock(bucketEntry.offset()); realCacheSize.add(-1 * bucketEntry.getLength()); blocksByHFile.remove(cacheKey); if (decrementBlockNumber) { this.blockNumber.decrement(); } }
private static BucketCacheProtos.BucketEntry toPB(BucketCache.BucketEntry entry) { return BucketCacheProtos.BucketEntry.newBuilder() .setOffset(entry.offset()) .setLength(entry.getLength()) .setDeserialiserIndex(entry.deserialiserIndex) .setAccessCounter(entry.getAccessCounter()) .setPriority(toPB(entry.getPriority())) .build(); }