@Test(timeOut = 5000) void testReadMissingMultiple() throws Exception { ReadHandle lh = getLedgerHandle(); when(lh.getId()).thenReturn((long) 0); EntryCacheManager cacheManager = factory.getEntryCacheManager(); EntryCache entryCache = cacheManager.getEntryCache(ml); byte[] data = new byte[10]; entryCache.insert(EntryImpl.create(0, 0, data)); entryCache.insert(EntryImpl.create(0, 2, data)); entryCache.insert(EntryImpl.create(0, 5, data)); entryCache.insert(EntryImpl.create(0, 8, data)); final CountDownLatch counter = new CountDownLatch(1); entryCache.asyncReadEntry(lh, 0, 9, false, new ReadEntriesCallback() { public void readEntriesComplete(List<Entry> entries, Object ctx) { assertEquals(entries.size(), 10); counter.countDown(); } public void readEntriesFailed(ManagedLedgerException exception, Object ctx) { Assert.fail("should not have failed"); } }, null); counter.await(); }
void removeEntryCache(String name) { EntryCache entryCache = caches.remove(name); if (entryCache == null) { return; } long size = entryCache.getSize(); entryCache.clear(); if (log.isDebugEnabled()) { log.debug("Removed cache for {} - Size: {} -- Current Size: {}", name, size / MB, currentSize.get() / MB); } }
((ManagedLedgerImpl) ledger).entryCache.invalidateAllEntries(currentLedger);
totalSize += cache.getSize(); cachesToEvictTotalSize += entryCache.getSize(); cachesToEvict.add(entryCache); log.debug("Added cache {} with size {}", entryCache.getName(), entryCache.getSize()); long singleCacheSizeToFree = (long) (sizeToFree * (entryCache.getSize() / (double) cachesToEvictTotalSize)); Pair<Integer, Long> evicted = entryCache.evictEntries(singleCacheSizeToFree); evictedEntries += evicted.getLeft(); evictedSize += evicted.getRight();
@Override public void operationComplete(Void result, Stat stat) { cursor.asyncDeleteCursorLedger(); cursors.removeCursor(consumerName); // Redo invalidation of entries in cache PositionImpl slowestConsumerPosition = cursors.getSlowestReaderPosition(); if (slowestConsumerPosition != null) { if (log.isDebugEnabled()) { log.debug("Doing cache invalidation up to {}", slowestConsumerPosition); } entryCache.invalidateEntries(slowestConsumerPosition); } else { entryCache.clear(); } trimConsumedLedgersInBackground(); log.info("[{}] [{}] Deleted cursor", name, consumerName); callback.deleteCursorComplete(ctx); }
EntryCache cache2 = cacheManager.getEntryCache(ml2); cache1.insert(EntryImpl.create(1, 1, new byte[4])); cache1.insert(EntryImpl.create(1, 0, new byte[3])); assertEquals(cache1.getSize(), 7); assertEquals(cacheManager.getSize(), 7); assertEquals(cacheManager.mlFactoryMBean.getNumberOfCacheEvictions(), 0); cache2.insert(EntryImpl.create(2, 0, new byte[1])); cache2.insert(EntryImpl.create(2, 1, new byte[1])); cache2.insert(EntryImpl.create(2, 2, new byte[1])); assertEquals(cache2.getSize(), 3); assertEquals(cacheManager.getSize(), 10); cache2.insert(EntryImpl.create(2, 3, new byte[1])); assertEquals(cache1.getSize(), 4); assertEquals(cache2.getSize(), 3); assertEquals(cache2.getSize(), 3); cache2.invalidateEntries(new PositionImpl(2, 1)); assertEquals(cacheManager.getSize(), 1); assertEquals(cache2.getSize(), 1);
public long getCacheSize() { return entryCache.getSize(); }
@Test void doubleInsert() throws Exception { ManagedLedgerFactoryConfig config = new ManagedLedgerFactoryConfig(); config.setMaxCacheSize(10); config.setCacheEvictionWatermark(0.8); factory = new ManagedLedgerFactoryImpl(bkc, bkc.getZkHandle(), config); EntryCacheManager cacheManager = factory.getEntryCacheManager(); EntryCache cache1 = cacheManager.getEntryCache(ml1); assertEquals(cache1.insert(EntryImpl.create(1, 1, new byte[4])), true); assertEquals(cache1.insert(EntryImpl.create(1, 0, new byte[3])), true); assertEquals(cache1.getSize(), 7); assertEquals(cacheManager.getSize(), 7); assertEquals(cache1.insert(EntryImpl.create(1, 0, new byte[5])), false); assertEquals(cache1.getSize(), 7); assertEquals(cacheManager.getSize(), 7); }
public void clear() { caches.values().forEach(cache -> cache.clear()); }
protected void asyncReadEntry(ReadHandle ledger, long firstEntry, long lastEntry, boolean isSlowestReader, OpReadEntry opReadEntry, Object ctx) { long timeout = config.getReadEntryTimeoutSeconds(); boolean checkTimeout = timeout > 0; if (checkTimeout) { // set readOpCount to uniquely validate if ReadEntryCallbackWrapper is already recycled long readOpCount = READ_OP_COUNT_UPDATER.incrementAndGet(this); ReadEntryCallbackWrapper readCallback = ReadEntryCallbackWrapper.create(name, ledger.getId(), firstEntry, opReadEntry, readOpCount, ctx); final ScheduledFuture<?> task = scheduledExecutor.schedule(() -> { // validate ReadEntryCallbackWrapper object is not recycled by bk-client callback (by validating // readOpCount) and fail the callback if read is not completed yet if (readCallback.readOpCount == readOpCount && ReadEntryCallbackWrapper.READ_COMPLETED_UPDATER.get(readCallback) == FALSE) { log.warn("[{}]-{} read entry timeout for {}-{} after {} sec", this.name, ledger.getId(), firstEntry, lastEntry, timeout); readCallback.readEntriesFailed(createManagedLedgerException(BKException.Code.TimeoutException), readOpCount); } }, timeout, TimeUnit.SECONDS); readCallback.task = task; entryCache.asyncReadEntry(ledger, firstEntry, lastEntry, isSlowestReader, readCallback, readOpCount); } else { entryCache.asyncReadEntry(ledger, firstEntry, lastEntry, isSlowestReader, opReadEntry, ctx); } }
void discardEntriesFromCache(ManagedCursorImpl cursor, PositionImpl newPosition) { Pair<PositionImpl, PositionImpl> pair = activeCursors.cursorUpdated(cursor, newPosition); if (pair != null) { entryCache.invalidateEntries(pair.getRight()); } }
ml.entryCache.insert(entry); entry.release();
totalSize += cache.getSize(); cachesToEvictTotalSize += entryCache.getSize(); cachesToEvict.add(entryCache); log.debug("Added cache {} with size {}", entryCache.getName(), entryCache.getSize()); long singleCacheSizeToFree = (long) (sizeToFree * (entryCache.getSize() / (double) cachesToEvictTotalSize)); Pair<Integer, Long> evicted = entryCache.evictEntries(singleCacheSizeToFree); evictedEntries += evicted.first; evictedSize += evicted.second;
@Override public void operationComplete(Void result, Stat stat) { cursor.asyncDeleteCursorLedger(); cursors.removeCursor(consumerName); // Redo invalidation of entries in cache PositionImpl slowestConsumerPosition = cursors.getSlowestReaderPosition(); if (slowestConsumerPosition != null) { if (log.isDebugEnabled()) { log.debug("Doing cache invalidation up to {}", slowestConsumerPosition); } entryCache.invalidateEntries(slowestConsumerPosition); } else { entryCache.clear(); } trimConsumedLedgersInBackground(); log.info("[{}] [{}] Deleted cursor", name, consumerName); callback.deleteCursorComplete(ctx); }
@Override public int compareTo(EntryCache other) { return Longs.compare(getSize(), other.getSize()); }
@Test void cacheDisabled() throws Exception { ManagedLedgerFactoryConfig config = new ManagedLedgerFactoryConfig(); config.setMaxCacheSize(0); config.setCacheEvictionWatermark(0.8); factory = new ManagedLedgerFactoryImpl(bkc, bkc.getZkHandle(), config); EntryCacheManager cacheManager = factory.getEntryCacheManager(); EntryCache cache1 = cacheManager.getEntryCache(ml1); EntryCache cache2 = cacheManager.getEntryCache(ml2); assertTrue(cache1 instanceof EntryCacheManager.EntryCacheDisabled); assertTrue(cache2 instanceof EntryCacheManager.EntryCacheDisabled); cache1.insert(EntryImpl.create(1, 1, new byte[4])); cache1.insert(EntryImpl.create(1, 0, new byte[3])); assertEquals(cache1.getSize(), 0); assertEquals(cacheManager.getSize(), 0); cacheManager.mlFactoryMBean.refreshStats(1, TimeUnit.SECONDS); assertEquals(cacheManager.mlFactoryMBean.getCacheMaxSize(), 0); assertEquals(cacheManager.mlFactoryMBean.getCacheUsedSize(), 0); assertEquals(cacheManager.mlFactoryMBean.getCacheHitsRate(), 0.0); assertEquals(cacheManager.mlFactoryMBean.getCacheMissesRate(), 0.0); assertEquals(cacheManager.mlFactoryMBean.getCacheHitsThroughput(), 0.0); assertEquals(cacheManager.mlFactoryMBean.getNumberOfCacheEvictions(), 0); cache2.insert(EntryImpl.create(2, 0, new byte[1])); cache2.insert(EntryImpl.create(2, 1, new byte[1])); cache2.insert(EntryImpl.create(2, 2, new byte[1])); assertEquals(cache2.getSize(), 0); assertEquals(cacheManager.getSize(), 0); }
ledger.entryCache.clear();
protected void asyncReadEntry(ReadHandle ledger, PositionImpl position, ReadEntryCallback callback, Object ctx) { long timeout = config.getReadEntryTimeoutSeconds(); boolean checkTimeout = timeout > 0; if (checkTimeout) { // set readOpCount to uniquely validate if ReadEntryCallbackWrapper is already recycled long readOpCount = READ_OP_COUNT_UPDATER.incrementAndGet(this); ReadEntryCallbackWrapper readCallback = ReadEntryCallbackWrapper.create(name, position.getLedgerId(), position.getEntryId(), callback, readOpCount, ctx); final ScheduledFuture<?> task = scheduledExecutor.schedule(() -> { // validate ReadEntryCallbackWrapper object is not recycled by bk-client callback (by validating // readOpCount) and fail the callback if read is not completed yet if (readCallback.readOpCount == readOpCount && ReadEntryCallbackWrapper.READ_COMPLETED_UPDATER.get(readCallback) == FALSE) { log.warn("[{}]-{} read entry timeout for {} after {} sec", this.name, ledger.getId(), position, timeout); readCallback.readEntryFailed(createManagedLedgerException(BKException.Code.TimeoutException), readOpCount); } }, timeout, TimeUnit.SECONDS); readCallback.task = task; entryCache.asyncReadEntry(ledger, position, readCallback, readOpCount); } else { entryCache.asyncReadEntry(ledger, position, callback, ctx); } }
void discardEntriesFromCache(ManagedCursorImpl cursor, PositionImpl newPosition) { Pair<PositionImpl, PositionImpl> pair = activeCursors.cursorUpdated(cursor, newPosition); if (pair != null) { entryCache.invalidateEntries(pair.second); } }
ml.entryCache.insert(entry); entry.release();