? new UnsafeSharedMemoryBucketEntry(offset, len, accessCounter, inMemory) : new SharedMemoryBucketEntry(offset, len, accessCounter, inMemory) : new BucketEntry(offset, len, accessCounter, inMemory); bucketEntry.setDeserialiserReference(data.getDeserializer()); try { if (data instanceof HFileBlock) {
if (bucketEntry != null) { long start = System.nanoTime(); ReentrantReadWriteLock lock = offsetLock.getLock(bucketEntry.offset()); try { lock.readLock().lock(); int len = bucketEntry.getLength(); if (LOG.isTraceEnabled()) { LOG.trace("Read offset=" + bucketEntry.offset() + ", len=" + len); Cacheable cachedBlock = ioEngine.read(bucketEntry.offset(), len, bucketEntry.deserializerReference()); long timeTaken = System.nanoTime() - start; if (updateCacheMetrics) { bucketEntry.incrementRefCountAndGet(); bucketEntry.access(accessCount.incrementAndGet()); if (this.ioErrorStartTime > 0) { ioErrorStartTime = -1;
switch (bucketEntryWithKey.getValue().getPriority()) { case SINGLE: { bucketSingle.add(bucketEntryWithKey);
if (bucketEntry != null) { long start = System.nanoTime(); ReentrantReadWriteLock lock = offsetLock.getLock(bucketEntry.offset()); try { lock.readLock().lock(); int len = bucketEntry.getLength(); ByteBuffer bb = ByteBuffer.allocate(len); int lenRead = ioEngine.read(bb, bucketEntry.offset()); if (lenRead != len) { throw new RuntimeException("Only " + lenRead + " bytes read, " + len + " expected"); bucketEntry.deserializerReference(this.deserialiserMap); Cacheable cachedBlock = deserializer.deserialize(bb, true); long timeTaken = System.nanoTime() - start; cacheStats.ioHit(timeTaken); bucketEntry.access(accessCount.incrementAndGet()); if (this.ioErrorStartTime > 0) { ioErrorStartTime = -1;
BucketEntry bucketEntry = new BucketEntry(offset, len, accessCounter, inMemory); bucketEntry.setDeserialiserReference(data.getDeserializer(), deserialiserMap); try { if (data instanceof HFileBlock) {
ReentrantReadWriteLock lock = offsetLock.getLock(bucketEntry.offset()); try { lock.writeLock().lock(); lock.writeLock().unlock(); cacheStats.evicted(bucketEntry.getCachedTime(), cacheKey.isPrimary()); return true;
/** * Attempt to add the specified entry to this queue. * <p> * If the queue is smaller than the max size, or if the specified element is * ordered after the smallest element in the queue, the element will be added * to the queue. Otherwise, there is no side effect of this call. * @param entry a bucket entry with key to try to add to the queue */ public void add(Map.Entry<BlockCacheKey, BucketEntry> entry) { if (cacheSize < maxSize) { queue.add(entry); cacheSize += entry.getValue().getLength(); } else { BucketEntry head = queue.peek().getValue(); if (BucketEntry.COMPARATOR.compare(entry.getValue(), head) > 0) { cacheSize += entry.getValue().getLength(); cacheSize -= head.getLength(); if (cacheSize > maxSize) { queue.poll(); } else { cacheSize += head.getLength(); } queue.add(entry); } } }
@Test public void testMemoryLeak() throws Exception { final BlockCacheKey cacheKey = new BlockCacheKey("dummy", 1L); cacheAndWaitUntilFlushedToBucket(cache, cacheKey, new CacheTestUtils.ByteArrayCacheable( new byte[10])); long lockId = cache.backingMap.get(cacheKey).offset(); ReentrantReadWriteLock lock = cache.offsetLock.getLock(lockId); lock.writeLock().lock(); Thread evictThread = new Thread("evict-block") { @Override public void run() { cache.evictBlock(cacheKey); } }; evictThread.start(); cache.offsetLock.waitForWaiters(lockId, 1); cache.blockEvicted(cacheKey, cache.backingMap.remove(cacheKey), true); cacheAndWaitUntilFlushedToBucket(cache, cacheKey, new CacheTestUtils.ByteArrayCacheable( new byte[10])); lock.writeLock().unlock(); evictThread.join(); assertEquals(1L, cache.getBlockCount()); assertTrue(cache.getCurrentSize() > 0L); assertTrue("We should have a block!", cache.iterator().hasNext()); }
public void add(Map.Entry<BlockCacheKey, BucketEntry> block) { totalSize += block.getValue().getLength(); queue.add(block); }
BucketEntry(long offset, int length, long accessCounter, boolean inMemory) { setOffset(offset); this.length = length; this.accessCounter = accessCounter; if (inMemory) { this.priority = BlockPriority.MEMORY; } else { this.priority = BlockPriority.SINGLE; } }
@VisibleForTesting void blockEvicted(BlockCacheKey cacheKey, BucketEntry bucketEntry, boolean decrementBlockNumber) { bucketAllocator.freeBlock(bucketEntry.offset()); realCacheSize.addAndGet(-1 * bucketEntry.getLength()); blocksByHFile.remove(cacheKey.getHfileName(), cacheKey); if (decrementBlockNumber) { this.blockNumber.decrementAndGet(); } }
public long free(long toFree) { Map.Entry<BlockCacheKey, BucketEntry> entry; long freedBytes = 0; while ((entry = queue.pollLast()) != null) { evictBlock(entry.getKey()); freedBytes += entry.getValue().getLength(); if (freedBytes >= toFree) { return freedBytes; } } return freedBytes; }
@Test public void testOffsetProducesPositiveOutput() { //This number is picked because it produces negative output if the values isn't ensured to be positive. //See HBASE-18757 for more information. long testValue = 549888460800L; BucketCache.BucketEntry bucketEntry = new BucketCache.BucketEntry(testValue, 10, 10L, true); assertEquals(testValue, bucketEntry.offset()); }
switch (bucketEntryWithKey.getValue().getPriority()) { case SINGLE: { bucketSingle.add(bucketEntryWithKey);
ReentrantReadWriteLock lock = offsetLock.getLock(bucketEntry.offset()); try { lock.writeLock().lock(); int refCount = bucketEntry.getRefCount(); if (refCount == 0) { if (backingMap.remove(cacheKey, bucketEntry)) { + " for evicting at a later point"); bucketEntry.markForEvict(); lock.writeLock().unlock(); cacheStats.evicted(bucketEntry.getCachedTime(), cacheKey.isPrimary()); return true;
protoKey.getPrimaryReplicaBlock(), fromPb(protoKey.getBlockType())); BucketCacheProtos.BucketEntry protoValue = entry.getValue(); BucketCache.BucketEntry value = new BucketCache.BucketEntry( protoValue.getOffset(), protoValue.getLength(),