BucketCache.DEFAULT_ERROR_TOLERATION_DURATION); bucketCache = new BucketCache(bucketCacheIOEngineName, bucketCacheSize, blockSize, bucketSizes, writerThreads, writerQueueLen, persistentPath, ioErrorsTolerationDuration, c);
/** * Put the new bucket entry into backingMap. Notice that we are allowed to replace the existing * cache with a new block for the same cache key. there's a corner case: one thread cache a * block in ramCache, copy to io-engine and add a bucket entry to backingMap. Caching another * new block with the same cache key do the same thing for the same cache key, so if not evict * the previous bucket entry, then memory leak happen because the previous bucketEntry is gone * but the bucketAllocator do not free its memory. * @see BlockCacheUtil#shouldReplaceExistingCacheBlock(BlockCache blockCache,BlockCacheKey * cacheKey, Cacheable newBlock) * @param key Block cache key * @param bucketEntry Bucket entry to put into backingMap. */ private void putIntoBackingMap(BlockCacheKey key, BucketEntry bucketEntry) { BucketEntry previousEntry = backingMap.put(key, bucketEntry); if (previousEntry != null && previousEntry != bucketEntry) { ReentrantReadWriteLock lock = offsetLock.getLock(previousEntry.offset()); lock.writeLock().lock(); try { blockEvicted(key, previousEntry, false); } finally { lock.writeLock().unlock(); } } }
/** * Cache the block with the specified name and buffer. * @param cacheKey block's cache key * @param buf block buffer */ @Override public void cacheBlock(BlockCacheKey cacheKey, Cacheable buf) { cacheBlock(cacheKey, buf, false); }
@Override public void shutdown() { disableCache(); LOG.info("Shutdown bucket cache: IO persistent=" + ioEngine.isPersistent() + "; path to write=" + persistencePath); if (ioEngine.isPersistent() && persistencePath != null) { try { join(); persistToFile(); } catch (IOException ex) { LOG.error("Unable to persist data on exit: " + ex.toString(), ex); } catch (InterruptedException e) { LOG.warn("Failed to persist data on exit", e); } } }
private void validateGetPartitionSize(BucketCache bucketCache, float partitionFactor, float minFactor) { long expectedOutput = (long) Math.floor(bucketCache.getAllocator().getTotalSize() * partitionFactor * minFactor); assertEquals(expectedOutput, bucketCache.getPartitionSize(partitionFactor)); }
private boolean forceEvict(BlockCacheKey cacheKey) { if (!cacheEnabled) { return false; } RAMQueueEntry removedBlock = checkRamCache(cacheKey); BucketEntry bucketEntry = backingMap.get(cacheKey); if (bucketEntry == null) { if (removedBlock != null) { cacheStats.evicted(0, cacheKey.isPrimary()); return true; } else { return false; } } ReentrantReadWriteLock lock = offsetLock.getLock(bucketEntry.offset()); try { lock.writeLock().lock(); if (backingMap.remove(cacheKey, bucketEntry)) { blockEvicted(cacheKey, bucketEntry, removedBlock == null); } else { return false; } } finally { lock.writeLock().unlock(); } cacheStats.evicted(bucketEntry.getCachedTime(), cacheKey.isPrimary()); return true; }
String persistencePath = testDir + "/bucket.persistence"; BucketCache bucketCache = new BucketCache(ioEngineName, capacitySize, constructedBlockSize, constructedBlockSizes, writeThreads, writerQLen, persistencePath); long usedSize = bucketCache.getAllocator().getUsedSize(); assertEquals(0, usedSize); bucketCache.cacheBlock(block.getBlockName(), block.getBlock()); usedSize = bucketCache.getAllocator().getUsedSize(); assertNotEquals(0, usedSize); bucketCache.shutdown(); assertTrue(new File(persistencePath).exists()); bucketCache = new BucketCache(ioEngineName, capacitySize, constructedBlockSize, constructedBlockSizes, writeThreads, writerQLen, persistencePath); assertFalse(new File(persistencePath).exists()); assertEquals(usedSize, bucketCache.getAllocator().getUsedSize()); bucketCache.shutdown(); assertTrue(new File(persistencePath).exists()); bucketCache = new BucketCache(ioEngineName, capacitySize, constructedBlockSize, smallBucketSizes, writeThreads, writerQLen, persistencePath); assertFalse(new File(persistencePath).exists()); assertEquals(0, bucketCache.getAllocator().getUsedSize()); assertEquals(0, bucketCache.backingMap.size());
@Test public void testValidBucketCacheConfigs() throws IOException { Configuration conf = HBaseConfiguration.create(); conf.setFloat(BucketCache.ACCEPT_FACTOR_CONFIG_NAME, 0.9f); conf.setFloat(BucketCache.MIN_FACTOR_CONFIG_NAME, 0.5f); conf.setFloat(BucketCache.EXTRA_FREE_FACTOR_CONFIG_NAME, 0.5f); conf.setFloat(BucketCache.SINGLE_FACTOR_CONFIG_NAME, 0.1f); conf.setFloat(BucketCache.MULTI_FACTOR_CONFIG_NAME, 0.7f); conf.setFloat(BucketCache.MEMORY_FACTOR_CONFIG_NAME, 0.2f); BucketCache cache = new BucketCache(ioEngineName, capacitySize, constructedBlockSize, constructedBlockSizes, writeThreads, writerQLen, persistencePath, 100, conf); assertEquals(BucketCache.ACCEPT_FACTOR_CONFIG_NAME + " failed to propagate.", 0.9f, cache.getAcceptableFactor(), 0); assertEquals(BucketCache.MIN_FACTOR_CONFIG_NAME + " failed to propagate.", 0.5f, cache.getMinFactor(), 0); assertEquals(BucketCache.EXTRA_FREE_FACTOR_CONFIG_NAME + " failed to propagate.", 0.5f, cache.getExtraFreeFactor(), 0); assertEquals(BucketCache.SINGLE_FACTOR_CONFIG_NAME + " failed to propagate.", 0.1f, cache.getSingleFactor(), 0); assertEquals(BucketCache.MULTI_FACTOR_CONFIG_NAME + " failed to propagate.", 0.7f, cache.getMultiFactor(), 0); assertEquals(BucketCache.MEMORY_FACTOR_CONFIG_NAME + " failed to propagate.", 0.2f, cache.getMemoryFactor(), 0); }
@Test public void testMemoryLeak() throws Exception { final BlockCacheKey cacheKey = new BlockCacheKey("dummy", 1L); cacheAndWaitUntilFlushedToBucket(cache, cacheKey, new CacheTestUtils.ByteArrayCacheable( new byte[10])); long lockId = cache.backingMap.get(cacheKey).offset(); ReentrantReadWriteLock lock = cache.offsetLock.getLock(lockId); lock.writeLock().lock(); Thread evictThread = new Thread("evict-block") { @Override public void run() { cache.evictBlock(cacheKey); } }; evictThread.start(); cache.offsetLock.waitForWaiters(lockId, 1); cache.blockEvicted(cacheKey, cache.backingMap.remove(cacheKey), true); cacheAndWaitUntilFlushedToBucket(cache, cacheKey, new CacheTestUtils.ByteArrayCacheable( new byte[10])); lock.writeLock().unlock(); evictThread.join(); assertEquals(1L, cache.getBlockCount()); assertTrue(cache.getCurrentSize() > 0L); assertTrue("We should have a block!", cache.iterator().hasNext()); }
@Override public boolean evictBlock(BlockCacheKey cacheKey) { return evictBlock(cacheKey, true); }
bucketAllocator = ((BucketCache)bc).getAllocator(); org.jamon.escaping.Escaping.HTML.write(org.jamon.emit.StandardEmitter.valueOf(((BucketCache)bc).getIoEngine()), jamonWriter);
private static void doDrainOfOneEntry(final BucketCache bc, final BucketCache.WriterThread wt, final BlockingQueue<RAMQueueEntry> q) throws InterruptedException { List<RAMQueueEntry> rqes = BucketCache.getRAMQueueEntries(q, new ArrayList<>(1)); wt.doDrain(rqes); assertTrue(q.isEmpty()); assertTrue(bc.ramCache.isEmpty()); assertEquals(0, bc.heapSize()); } }
/** * Do IOE. Take the RAMQueueEntry that was on the queue, doctor it to throw exception, then * put it back and process it. * @throws IOException * @throws InterruptedException */ @SuppressWarnings("unchecked") @Test public void testIOE() throws IOException, InterruptedException { this.bc.cacheBlock(this.plainKey, plainCacheable); RAMQueueEntry rqe = q.remove(); RAMQueueEntry spiedRqe = Mockito.spy(rqe); Mockito.doThrow(new IOException("Mocked!")).when(spiedRqe). writeToCache(Mockito.any(), Mockito.any(), Mockito.any()); this.q.add(spiedRqe); doDrainOfOneEntry(bc, wt, q); // Cache disabled when ioes w/o ever healing. assertTrue(!bc.isCacheEnabled()); }
cache.evictBlock(key); assertNull(cache.getBlock(key, false, false, false)); CacheTestUtils.getBlockAndAssertEquals(cache, key, blockWithoutNextBlockMetadata, actualBuffer, block2Buffer);
@Override public void run() { List<RAMQueueEntry> entries = new ArrayList<>(); try { while (cacheEnabled && writerEnabled) { try { try { // Blocks entries = getRAMQueueEntries(inputQueue, entries); } catch (InterruptedException ie) { if (!cacheEnabled) break; } doDrain(entries); } catch (Exception ioe) { LOG.error("WriterThread encountered error", ioe); } } } catch (Throwable t) { LOG.warn("Failed doing drain", t); } LOG.info(this.getName() + " exiting, cacheEnabled=" + cacheEnabled); }
@Test public void testBucketAllocator() throws BucketAllocatorException { BucketAllocator mAllocator = cache.getAllocator();
/** * Cache the block with the specified name and buffer. * @param cacheKey block's cache key * @param cachedItem block buffer * @param inMemory if block is in-memory */ @Override public void cacheBlock(BlockCacheKey cacheKey, Cacheable cachedItem, boolean inMemory) { cacheBlockWithWait(cacheKey, cachedItem, inMemory, wait_when_cache); }
/** * Check whether we tolerate IO error this time. If the duration of IOEngine * throwing errors exceeds ioErrorsDurationTimeTolerated, we will disable the * cache */ private void checkIOErrorIsTolerated() { long now = EnvironmentEdgeManager.currentTime(); if (this.ioErrorStartTime > 0) { if (cacheEnabled && (now - ioErrorStartTime) > this.ioErrorsTolerationDuration) { LOG.error("IO errors duration time has exceeded " + ioErrorsTolerationDuration + "ms, disabling cache, please check your IOEngine"); disableCache(); } } else { this.ioErrorStartTime = now; } }
checkIOErrorIsTolerated(); } finally { lock.readLock().unlock();
@Override public long getDataBlockCount() { return getBlockCount(); }