@Override public void registerSuccessfulValue(long l) { firstLogger.registerSuccessfulValue(l); secondLogger.registerSuccessfulValue(l); }
@Override public void registerSuccessfulValue(long l) { firstLogger.registerSuccessfulValue(l); secondLogger.registerSuccessfulValue(l); }
@Override public void registerSuccessfulValue(long l) { statsLogger.registerSuccessfulValue(l); }
@Override public void onSuccess(LogRecordWithDLSN value) { recoverLastEntryStats.registerSuccessfulEvent( stopwatch.stop().elapsed(TimeUnit.MICROSECONDS), TimeUnit.MICROSECONDS); recoverScannedEntriesStats.registerSuccessfulValue(numRecordsScanned.get()); }
@Override public void run() { long now = System.currentTimeMillis(); try { synch(); syncSize.registerSuccessfulValue(unsyncedCount); syncBytes.registerSuccessfulValue(unsyncedBytes); for (LogEntryHolderFuture e : syncNeeded) { statsEntrySyncLatency.registerSuccessfulEvent(now - e.timestamp, TimeUnit.MILLISECONDS); e.syncDone(); } syncNeeded = null; } catch (Throwable t) { failed = true; LOGGER.log(Level.SEVERE, "general commit log failure on " + FileCommitLog.this.logDirectory, t); for (LogEntryHolderFuture e : syncNeeded) { statsEntrySyncLatency.registerFailedEvent(now - e.timestamp, TimeUnit.MILLISECONDS); } } }
private CompletableFuture<?> publishSuspectedLedgersAsync(Collection<String> missingBookies, Set<Long> ledgers) { if (null == ledgers || ledgers.size() == 0) { // there is no ledgers available for this bookie and just // ignoring the bookie failures LOG.info("There is no ledgers for the failed bookie: {}", missingBookies); return FutureUtils.Void(); } LOG.info("Following ledgers: {} of bookie: {} are identified as underreplicated", ledgers, missingBookies); numUnderReplicatedLedger.registerSuccessfulValue(ledgers.size()); return FutureUtils.processList( Lists.newArrayList(ledgers), ledgerId -> ledgerUnderreplicationManager.markLedgerUnderreplicatedAsync(ledgerId, missingBookies), null ); }
@Override public void handleBookiesThatLeft(Set<BookieSocketAddress> leftBookies) { for (BookieSocketAddress addr : leftBookies) { try { BookieNode node = knownBookies.remove(addr); if (null != node) { topology.remove(node); if (this.isWeighted) { this.bookieInfoMap.remove(node); } bookiesLeftCounter.registerSuccessfulValue(1L); if (LOG.isDebugEnabled()) { LOG.debug("Cluster changed : bookie {} left from cluster.", addr); } } } catch (Throwable t) { LOG.error("Unexpected exception while handling leaving bookie {}", addr, t); if (bookiesLeftCounter != null) { bookiesLeftCounter.registerFailedValue(1L); } // no need to re-throw; we want to process the rest of the bookies // exception anyways will be caught/logged/suppressed in the ZK's event handler } } }
@Override public void writeComplete(int rc, long ledgerId, long entryId, BookieSocketAddress addr, Object ctx) { if (rc != BKException.Code.OK) { LOG.error("BK error writing entry for ledgerId: {}, entryId: {}, bookie: {}", ledgerId, entryId, addr, BKException.create(rc)); if (completed.compareAndSet(false, true)) { ledgerFragmentEntryMcb.processResult(rc, null, null); } } else { numEntriesWritten.inc(); if (ctx instanceof Long) { numBytesWritten.registerSuccessfulValue((Long) ctx); } if (LOG.isDebugEnabled()) { LOG.debug("Success writing ledger id {}, entry id {} to a new bookie {}!", ledgerId, entryId, addr); } if (numCompleted.incrementAndGet() == newBookies.size() && completed.compareAndSet(false, true)) { ledgerFragmentEntryMcb.processResult(rc, null, null); } } } };
private void submitCallback(int rc) { if (BKException.Code.OK == rc) { clientCtx.getClientStats().getRecoverAddCountLogger().registerSuccessfulValue(writeCount.get()); clientCtx.getClientStats().getRecoverReadCountLogger().registerSuccessfulValue(readCount.get()); promise.complete(lh); } else { clientCtx.getClientStats().getRecoverAddCountLogger().registerFailedValue(writeCount.get()); clientCtx.getClientStats().getRecoverReadCountLogger().registerFailedValue(readCount.get()); promise.completeExceptionally(BKException.create(rc)); } }
@Override public void handleBookiesThatJoined(Set<BookieSocketAddress> joinedBookies) { // node joined for (BookieSocketAddress addr : joinedBookies) { try { BookieNode node = createBookieNode(addr); topology.add(node); knownBookies.put(addr, node); if (this.isWeighted) { this.bookieInfoMap.putIfAbsent(node, new BookieInfo()); } bookiesJoinedCounter.registerSuccessfulValue(1L); if (LOG.isDebugEnabled()) { LOG.debug("Cluster changed : bookie {} joined the cluster.", addr); } } catch (Throwable t) { // topology.add() throws unchecked exception LOG.error("Unexpected exception while handling joining bookie {}", addr, t); bookiesJoinedCounter.registerFailedValue(1L); // no need to re-throw; we want to process the rest of the bookies // exception anyways will be caught/logged/suppressed in the ZK's event handler } } }
dbLedgerStorageStats.getReadAheadBatchCountStats().registerSuccessfulValue(count); dbLedgerStorageStats.getReadAheadBatchSizeStats().registerSuccessfulValue(size); } catch (Exception e) { if (log.isDebugEnabled()) {
.registerSuccessfulValue(numReqInLastForceWrite); numReqInLastForceWrite = 0;
@Override public boolean acquire() { permitsMetric.registerSuccessfulValue(permitsUpdater.get(this)); if (permitsUpdater.incrementAndGet(this) <= permitsMax || isDarkmode()) { return true; } else { acquireFailureCounter.inc(); permitsUpdater.decrementAndGet(this); return false; } }
dbLedgerStorageStats.getFlushSizeStats().registerSuccessfulValue(sizeToFlush); } catch (IOException e) {
public ByteBuf readEntry(long ledgerId, long entryId) throws IOException, NoLedgerException { long requestNanos = MathUtils.nowInNano(); boolean success = false; int entrySize = 0; try { LedgerDescriptor handle = handles.getReadOnlyHandle(ledgerId); if (LOG.isTraceEnabled()) { LOG.trace("Reading {}@{}", entryId, ledgerId); } ByteBuf entry = handle.readEntry(entryId); bookieStats.getReadBytes().add(entry.readableBytes()); success = true; return entry; } finally { long elapsedNanos = MathUtils.elapsedNanos(requestNanos); if (success) { bookieStats.getReadEntryStats().registerSuccessfulEvent(elapsedNanos, TimeUnit.NANOSECONDS); bookieStats.getReadBytesStats().registerSuccessfulValue(entrySize); } else { bookieStats.getReadEntryStats().registerFailedEvent(elapsedNanos, TimeUnit.NANOSECONDS); bookieStats.getReadEntryStats().registerFailedValue(entrySize); } } }
if (success) { bookieStats.getRecoveryAddEntryStats().registerSuccessfulEvent(elapsedNanos, TimeUnit.NANOSECONDS); bookieStats.getAddBytesStats().registerSuccessfulValue(entrySize); } else { bookieStats.getRecoveryAddEntryStats().registerFailedEvent(elapsedNanos, TimeUnit.NANOSECONDS);
numFragmentsPerLedger.registerSuccessfulValue(lh.getNumFragments()); numBookiesPerLedger.registerSuccessfulValue(lh.getNumBookies()); numLedgersChecked.inc(); } else if (Code.NoSuchLedgerExistsException == rc) {
if (success) { bookieStats.getAddEntryStats().registerSuccessfulEvent(elapsedNanos, TimeUnit.NANOSECONDS); bookieStats.getAddBytesStats().registerSuccessfulValue(entrySize); } else { bookieStats.getAddEntryStats().registerFailedEvent(elapsedNanos, TimeUnit.NANOSECONDS);
final long dataLength = data.length; numEntriesRead.inc(); numBytesRead.registerSuccessfulValue(dataLength); ByteBufList toSend = lh.getDigestManager() .computeDigestAndPackageForSending(entryId,
readReorderedCounter.registerSuccessfulValue(1); writeSet.set(i, writeSet.get(i) & ~MASK_BITS); readReorderedCounter.registerSuccessfulValue(1); return writeSet;