private ManagedLedgerFactoryImpl(BookKeeper bookKeeper, boolean isBookkeeperManaged, ZooKeeper zooKeeper, ManagedLedgerFactoryConfig config) throws Exception { scheduledExecutor = OrderedScheduler.newSchedulerBuilder() .numThreads(config.getNumManagedLedgerSchedulerThreads()) .name("bookkeeper-ml-scheduler") .build(); orderedExecutor = OrderedExecutor.newBuilder() .numThreads(config.getNumManagedLedgerWorkerThreads()) .name("bookkeeper-ml-workers") .build(); this.bookKeeper = bookKeeper; this.isBookkeeperManaged = isBookkeeperManaged; this.zookeeper = isBookkeeperManaged ? zooKeeper : null; this.store = new MetaStoreImplZookeeper(zooKeeper, orderedExecutor); this.config = config; this.mbean = new ManagedLedgerFactoryMBeanImpl(this); this.entryCacheManager = new EntryCacheManager(this); this.statsTask = scheduledExecutor.scheduleAtFixedRate(() -> refreshStats(), 0, StatsPeriodSeconds, TimeUnit.SECONDS); }
@BeforeClass public void setUpClass() throws Exception { executor = OrderedScheduler.newSchedulerBuilder().numThreads(2).name("test").build(); cachedExecutor = Executors.newCachedThreadPool(); }
private void cleanupOffloaded(long ledgerId, UUID uuid, String offloadDriverName, /* * TODO: use driver name to * identify offloader */ Map<String, String> offloadDriverMetadata, String cleanupReason) { Retries.run(Backoff.exponentialJittered(TimeUnit.SECONDS.toMillis(1), TimeUnit.SECONDS.toHours(1)).limit(10), Retries.NonFatalPredicate, () -> config.getLedgerOffloader().deleteOffloaded(ledgerId, uuid, offloadDriverMetadata), scheduledExecutor, name).whenComplete((ignored, exception) -> { if (exception != null) { log.warn("Error cleaning up offload for {}, (cleanup reason: {})", ledgerId, cleanupReason, exception); } }); }
@BeforeClass void setup() throws Exception { OrderedScheduler executor = OrderedScheduler.newSchedulerBuilder().numThreads(1).build(); ml1 = mock(ManagedLedgerImpl.class); when(ml1.getScheduledExecutor()).thenReturn(executor); when(ml1.getName()).thenReturn("cache1"); ml2 = mock(ManagedLedgerImpl.class); when(ml2.getScheduledExecutor()).thenReturn(executor); when(ml2.getName()).thenReturn("cache2"); }
private void trimConsumedLedgersInBackground(CompletableFuture<?> promise) { executor.executeOrdered(name, safeRun(() -> { internalTrimConsumedLedgers(promise); })); }
@AfterClass public void tearDownClass() throws Exception { executor.shutdown(); cachedExecutor.shutdown(); }
/** * Start cluster * * @throws Exception */ protected void startBookKeeper() throws Exception { zkc = MockZooKeeper.newInstance(); for (int i = 0; i < numBookies; i++) { ZkUtils.createFullPathOptimistic(zkc, "/ledgers/available/192.168.1.1:" + (5000 + i), "".getBytes(), null, null); } zkc.create("/ledgers/LAYOUT", "1\nflat:1".getBytes(), null, null); bkc = new PulsarMockBookKeeper(zkc, executor.chooseThread(this)); }
void notifyCursors() { while (true) { final ManagedCursorImpl waitingCursor = waitingCursors.poll(); if (waitingCursor == null) { break; } executor.execute(safeRun(() -> waitingCursor.notifyEntriesAvailable())); } }
private void scheduleDeferredTrimming(CompletableFuture<?> promise) { scheduledExecutor.schedule(safeRun(() -> trimConsumedLedgersInBackground(promise)), 100, TimeUnit.MILLISECONDS); }
@Override public void asyncAddEntry(ByteBuf buffer, AddEntryCallback callback, Object ctx) { if (log.isDebugEnabled()) { log.debug("[{}] asyncAddEntry size={} state={}", name, buffer.readableBytes(), state); } OpAddEntry addOperation = OpAddEntry.create(this, buffer, callback, ctx); // Jump to specific thread to avoid contention from writers writing from different threads executor.executeOrdered(name, safeRun(() -> { pendingAddEntries.add(addOperation); internalAsyncAddEntry(addOperation); })); }
private void updateLedgersListAfterRollover(MetaStoreCallback<Void> callback) { if (!ledgersListMutex.tryLock()) { // Defer update for later scheduledExecutor.schedule(() -> updateLedgersListAfterRollover(callback), 100, TimeUnit.MILLISECONDS); return; } if (log.isDebugEnabled()) { log.debug("[{}] Updating ledgers ids with new ledger. version={}", name, ledgersStat); } store.asyncUpdateLedgerIds(name, getManagedLedgerInfo(), ledgersStat, callback); }
private void maybeOffloadInBackground(CompletableFuture<PositionImpl> promise) { if (config.getOffloadAutoTriggerSizeThresholdBytes() >= 0) { executor.executeOrdered(name, safeRun(() -> maybeOffload(promise))); } }
private void asyncDeleteLedger(long ledgerId, long retry) { if (retry <= 0) { log.warn("[{}] Failed to delete ledger after retries {}", name, ledgerId); return; } bookKeeper.asyncDeleteLedger(ledgerId, (rc, ctx) -> { if (rc == BKException.Code.NoSuchLedgerExistsException) { log.warn("[{}] Ledger was already deleted {}", name, ledgerId); } else if (rc != BKException.Code.OK) { log.error("[{}] Error deleting ledger {}", name, ledgerId, BKException.getMessage(rc)); scheduledExecutor.schedule(safeRun(() -> { asyncDeleteLedger(ledgerId, retry - 1); }), DEFAULT_LEDGER_DELETE_BACKOFF_TIME_SEC, TimeUnit.SECONDS); } else { if (log.isDebugEnabled()) { log.debug("[{}] Deleted ledger {}", name, ledgerId); } } }, null); }
/** * It handles add failure on the given ledger. it can be triggered when add-entry fails or times out. * * @param ledger */ void handleAddFailure(final LedgerHandle ledger) { // If we get a write error, we will try to create a new ledger and re-submit the pending writes. If the // ledger creation fails (persistent bk failure, another instanche owning the ML, ...), then the writes will // be marked as failed. ml.mbean.recordAddEntryError(); ml.getExecutor().executeOrdered(ml.getName(), SafeRun.safeRun(() -> { // Force the creation of a new ledger. Doing it in a background thread to avoid acquiring ML lock // from a BK callback. ml.ledgerClosed(ledger); })); }
/** * Create ledger async and schedule a timeout task to check ledger-creation is complete else it fails the callback * with TimeoutException. * * @param bookKeeper * @param config * @param digestType * @param cb * @param emptyMap */ protected void asyncCreateLedger(BookKeeper bookKeeper, ManagedLedgerConfig config, DigestType digestType, CreateCallback cb, Map<Object, Object> emptyMap) { AtomicBoolean ledgerCreated = new AtomicBoolean(false); bookKeeper.asyncCreateLedger(config.getEnsembleSize(), config.getWriteQuorumSize(), config.getAckQuorumSize(), digestType, config.getPassword(), cb, ledgerCreated, Collections.emptyMap()); scheduledExecutor.schedule(() -> { if (!ledgerCreated.get()) { ledgerCreated.set(true); cb.createComplete(BKException.Code.TimeoutException, null, null); } }, config.getMetadataOperationsTimeoutSeconds(), TimeUnit.SECONDS); }
@Override public void removeManagedLedger(String ledgerName, MetaStoreCallback<Void> callback) { log.info("[{}] Remove ManagedLedger", ledgerName); zk.delete(prefix + ledgerName, -1, (rc, path, ctx) -> executor.executeOrdered(ledgerName, safeRun(() -> { if (log.isDebugEnabled()) { log.debug("[{}] zk delete done. rc={}", ledgerName, Code.get(rc)); } if (rc == Code.OK.intValue()) { callback.operationComplete(null, null); } else { callback.operationFailed(new MetaStoreException(KeeperException.create(Code.get(rc)))); } })), null); }
@Override public void asyncRemoveCursor(final String ledgerName, final String consumerName, final MetaStoreCallback<Void> callback) { log.info("[{}] Remove consumer={}", ledgerName, consumerName); zk.delete(prefix + ledgerName + "/" + consumerName, -1, (rc, path, ctx) -> executor.executeOrdered(ledgerName, safeRun(() -> { if (log.isDebugEnabled()) { log.debug("[{}] [{}] zk delete done. rc={}", ledgerName, consumerName, Code.get(rc)); } if (rc == Code.OK.intValue()) { callback.operationComplete(null, null); } else { callback.operationFailed(new MetaStoreException(KeeperException.create(Code.get(rc)))); } })), null); }
@Override public void addComplete(int rc, final LedgerHandle lh, long entryId, Object ctx) { if (ledger.getId() != lh.getId()) { log.warn("[{}] ledgerId {} doesn't match with acked ledgerId {}", ml.getName(), ledger.getId(), lh.getId()); } checkArgument(ledger.getId() == lh.getId(), "ledgerId %s doesn't match with acked ledgerId %s", ledger.getId(), lh.getId()); checkArgument(this.ctx == ctx); this.entryId = entryId; if (log.isDebugEnabled()) { log.debug("[{}] [{}] write-complete: ledger-id={} entry-id={} size={} rc={}", this, ml.getName(), lh.getId(), entryId, dataLength, rc); } if (rc != BKException.Code.OK) { handleAddFailure(lh); } else { if(!checkAndCompleteTimeoutTask()) { return; } // Trigger addComplete callback in a thread hashed on the managed ledger name ml.getExecutor().executeOrdered(ml.getName(), this); } }
@Override public void asyncResetCursor(Position newPos, AsyncCallbacks.ResetCursorCallback callback) { checkArgument(newPos instanceof PositionImpl); final PositionImpl newPosition = (PositionImpl) newPos; // order trim and reset operations on a ledger ledger.getExecutor().executeOrdered(ledger.getName(), safeRun(() -> { if (ledger.isValidPosition(newPosition) || newPosition.equals(PositionImpl.earliest) || newPosition.equals(PositionImpl.latest)) { internalResetCursor(newPosition, callback); } else { // caller (replay) should handle this error and retry cursor reset callback.resetFailed(new ManagedLedgerException.InvalidCursorPositionException(newPosition.toString()), newPosition); } })); }