private void initialize() throws DurableDataLogException { if (this.initialized.compareAndSet(false, true)) { try { this.log.initialize(DEFAULT_TIMEOUT); } catch (Exception ex) { this.initialized.set(false); throw ex; } } }
/** * Loads a fresh copy BookKeeperLog Metadata from ZooKeeper, without doing any sort of fencing or otherwise modifying * it. * * @return A new instance of the LogMetadata class, or null if no such metadata exists (most likely due to this being * the first time accessing this log). * @throws DataLogInitializationException If an Exception occurred. */ public ReadOnlyLogMetadata fetchMetadata() throws DataLogInitializationException { return this.log.loadMetadata(); }
/** * Creates a new instance of the DebugLogWrapper class. * * @param logId The Id of the BookKeeperLog to wrap. * @param zkClient A pointer to the CuratorFramework client to use. * @param bookKeeper A pointer to the BookKeeper client to use. * @param config BookKeeperConfig to use. * @param executor An Executor to use for async operations. */ DebugLogWrapper(int logId, CuratorFramework zkClient, BookKeeper bookKeeper, BookKeeperConfig config, ScheduledExecutorService executor) { this.log = new BookKeeperLog(logId, zkClient, bookKeeper, config, executor); this.bkClient = bookKeeper; this.config = config; this.initialized = new AtomicBoolean(); }
@Override public CompletableFuture<Void> truncate(LogAddress upToAddress, Duration timeout) { ensurePreconditions(); Preconditions.checkArgument(upToAddress instanceof LedgerAddress, "upToAddress must be of type LedgerAddress."); return CompletableFuture.runAsync(() -> tryTruncate((LedgerAddress) upToAddress), this.executorService); }
@Override public CloseableIterator<ReadItem, DurableDataLogException> getReader() throws DurableDataLogException { ensurePreconditions(); return new LogReader(getLogMetadata(), this.bookKeeper, this.config); }
log.initialize(TIMEOUT); val currentMetadata = log.loadMetadata(); val lastLedger = currentMetadata.getLedgers().get(currentMetadata.getLedgers().size() - 1); allLedgers.add(new AbstractMap.SimpleImmutableEntry<>(lastLedger.getLedgerId(), log.append(new ByteArraySegment(getWriteData()), TIMEOUT).get(TIMEOUT.toMillis(), TimeUnit.MILLISECONDS);
@Override public void disable() throws DurableDataLogException { // Get the current metadata, disable it, and then persist it back. synchronized (this.lock) { ensurePreconditions(); LogMetadata metadata = getLogMetadata(); Preconditions.checkState(metadata.isEnabled(), "BookKeeperLog is already disabled."); metadata = this.logMetadata.asDisabled(); persistMetadata(metadata, false); this.logMetadata = metadata; log.info("{}: Disabled (Epoch = {}, UpdateVersion = {}).", this.traceObjectId, metadata.getEpoch(), metadata.getUpdateVersion()); } // Close this instance of the BookKeeperLog. This ensures the proper cancellation of any ongoing writes. close(); }
@Override public CompletableFuture<LogAddress> append(ArrayView data, Duration timeout) { ensurePreconditions(); long traceId = LoggerHelpers.traceEnterWithContext(log, this.traceObjectId, "append", data.getLength()); if (data.getLength() > getMaxAppendLength()) { return Futures.failedFuture(new WriteTooLongException(data.getLength(), getMaxAppendLength())); } Timer timer = new Timer(); // Queue up the write. CompletableFuture<LogAddress> result = new CompletableFuture<>(); this.writes.add(new Write(data, getWriteLedger(), result)); // Trigger Write Processor. this.writeProcessor.runAsync(); // Post append tasks. We do not need to wait for these to happen before returning the call. result.whenCompleteAsync((address, ex) -> { if (ex != null) { handleWriteException(ex); } else { // Update metrics and take care of other logging tasks. this.metrics.writeCompleted(timer.getElapsed()); LoggerHelpers.traceLeave(log, this.traceObjectId, "append", traceId, data.getLength(), address); } }, this.executorService); return result; }
LogMetadata oldMetadata = loadMetadata(); newMetadata = updateMetadata(oldMetadata, newLedger, true); LedgerMetadata ledgerMetadata = newMetadata.getLedger(newLedger.getId()); assert ledgerMetadata != null : "cannot find newly added ledger metadata"; this.writeLedger = new WriteLedger(newLedger, ledgerMetadata); this.logMetadata = newMetadata; ledgersToDelete = getLedgerIdsToDelete(oldMetadata, newMetadata);
@Override public void enable() throws DurableDataLogException { Exceptions.checkNotClosed(this.closed.get(), this); synchronized (this.lock) { Preconditions.checkState(this.writeLedger == null, "BookKeeperLog is already initialized; cannot re-enable."); assert this.logMetadata == null : "writeLedger == null but logMetadata != null"; // Load existing metadata. Inexistent metadata means the BookKeeperLog has never been accessed, and therefore // enabled by default. LogMetadata metadata = loadMetadata(); Preconditions.checkState(metadata != null && !metadata.isEnabled(), "BookKeeperLog is already enabled."); metadata = metadata.asEnabled(); persistMetadata(metadata, false); log.info("{}: Enabled (Epoch = {}, UpdateVersion = {}).", this.traceObjectId, metadata.getEpoch(), metadata.getUpdateVersion()); } }
close(); LoggerHelpers.traceLeave(log, this.traceObjectId, "processPendingWrites", traceId, cs); return false; List<Write> toExecute = getWritesToExecute(); success = executeWrites(toExecute);
completeWrite(write); return; handleWriteException(rc, write); } catch (Throwable ex) { write.fail(ex, !isRetryable(ex)); } finally {
WriteLedger currentLedger = getWriteLedger(); Map<Long, Long> lastAddsConfirmed = new HashMap<>(); boolean anythingChanged = false; long lac = fetchLastAddConfirmed(w.getWriteLedger(), lastAddsConfirmed); if (w.getEntryId() >= 0 && w.getEntryId() <= lac) { completeWrite(w); anythingChanged = true; } else if (currentLedger.ledger.getId() != w.getWriteLedger().ledger.getId()) {
@Override public void close() { this.log.close(); }
/** * Updates the Metadata for this BookKeeperLog in ZooKeeper by setting its Enabled flag to true. * @throws DurableDataLogException If an exception occurred. */ public void enable() throws DurableDataLogException { this.log.enable(); }
/** * Open-Fences the BookKeeperLog (initializes it), then updates the Metadata for it in ZooKeeper by setting its * Enabled flag to false. * @throws DurableDataLogException If an exception occurred. */ public void disable() throws DurableDataLogException { initialize(); this.log.disable(); }
/** * Creates a new instance of the BookKeeper log class. * * @param containerId The Id of the Container whose BookKeeperLog to open. * @param zkClient A reference to the CuratorFramework client to use. * @param bookKeeper A reference to the BookKeeper client to use. * @param config Configuration to use. * @param executorService An Executor to use for async operations. */ BookKeeperLog(int containerId, CuratorFramework zkClient, BookKeeper bookKeeper, BookKeeperConfig config, ScheduledExecutorService executorService) { Preconditions.checkArgument(containerId >= 0, "containerId must be a non-negative integer."); this.zkClient = Preconditions.checkNotNull(zkClient, "zkClient"); this.bookKeeper = Preconditions.checkNotNull(bookKeeper, "bookKeeper"); this.config = Preconditions.checkNotNull(config, "config"); this.executorService = Preconditions.checkNotNull(executorService, "executorService"); this.closed = new AtomicBoolean(); this.logNodePath = HierarchyUtils.getPath(containerId, this.config.getZkHierarchyDepth()); this.traceObjectId = String.format("Log[%d]", containerId); this.writes = new WriteQueue(); val retry = createRetryPolicy(this.config.getMaxWriteAttempts(), this.config.getBkWriteTimeoutMillis()); this.writeProcessor = new SequentialAsyncProcessor(this::processWritesSync, retry, this::handleWriteProcessorFailures, this.executorService); this.rolloverProcessor = new SequentialAsyncProcessor(this::rollover, retry, this::handleRolloverFailure, this.executorService); this.metrics = new BookKeeperMetrics.BookKeeperLog(containerId); this.metricReporter = this.executorService.scheduleWithFixedDelay(this::reportMetrics, REPORT_INTERVAL, REPORT_INTERVAL, TimeUnit.MILLISECONDS); }
@Override public long getEpoch() { ensurePreconditions(); return getLogMetadata().getEpoch(); }
private void handleRolloverFailure(Throwable exception) { log.warn("{}: Too many rollover failures; closing.", this.traceObjectId, exception); close(); }
/** * Creates a special DurableDataLog wrapping the BookKeeperLog that does only supports reading from the log. It does * not support initialization or otherwise modifications to the log. Accessing this log will not interfere with other * active writes to this log (i.e., it will not fence anyone out or close Ledgers that shouldn't be closed). * * @return A new DurableDataLog instance. * @throws DataLogInitializationException If an exception occurred fetching metadata from ZooKeeper. */ public DurableDataLog asReadOnly() throws DataLogInitializationException { return new ReadOnlyBooKeeperLog(this.log.loadMetadata()); }