@Override public Future<BulkWriteResponse> writeBulkWithContext(final String stream, List<ByteBuffer> data, WriteContext ctx) { bulkWritePendingStat.inc(); receivedRecordCounter.add(data.size()); BulkWriteOp op = new BulkWriteOp(stream, data, statsLogger, perStreamStatsLogger, getChecksum(ctx), featureChecksumDisabled, accessControlManager); executeStreamOp(op); return op.result().ensure(new Function0<BoxedUnit>() { public BoxedUnit apply() { bulkWritePendingStat.dec(); return null; } }); }
@Override public Future<Rep> apply(Req req, Service<Req, Rep> service) { Future<Rep> result = null; outstandingAsync.inc(); final Stopwatch stopwatch = Stopwatch.createStarted(); try { result = service.apply(req); serviceExec.registerSuccessfulEvent(stopwatch.stop().elapsed(TimeUnit.MICROSECONDS)); } finally { outstandingAsync.dec(); if (null == result) { serviceExec.registerFailedEvent(stopwatch.stop().elapsed(TimeUnit.MICROSECONDS)); } } return result; }
journalStats.getFlushMaxWaitCounter().inc(); } else if (qe != null && ((bufferedEntriesThreshold > 0 && toFlush.size() > bufferedEntriesThreshold) journalStats.getFlushMaxOutstandingBytesCounter().inc(); } else if (qe == null) { journalStats.getFlushEmptyQueueCounter().inc(); } else if (qe.entryId != Bookie.METAENTRY_ID_FORCE_LEDGER) { int entrySize = qe.entry.readableBytes(); journalStats.getJournalWriteBytes().add(entrySize); journalStats.getJournalQueueSize().dec();
@Override public Long get() { // Eventually consistent. return firstCounter.get(); } };
public BoxedUnit apply() { bulkWritePendingStat.dec(); return null; } });
@Override public Long get() { // Eventually consistent. return firstCounter.get(); } };
@Override public void onEntryComplete(int rc, LedgerHandle lh, LedgerEntry entry, Object ctx) { readCounter.inc(); } }, readConcurrency);
@Override public void messageReceived(byte[] data, long publishTimestamp) { messagesReceived.increment(); totalMessagesReceived.increment(); messagesReceivedCounter.inc(); bytesReceived.add(data.length); bytesReceivedCounter.add(data.length); long now = System.currentTimeMillis(); long endToEndLatencyMicros = TimeUnit.MILLISECONDS.toMicros(now - publishTimestamp); if (endToEndLatencyMicros > 0) { endToEndCumulativeLatencyRecorder.recordValue(endToEndLatencyMicros); endToEndLatencyRecorder.recordValue(endToEndLatencyMicros); endToEndLatencyStats.registerSuccessfulEvent(endToEndLatencyMicros, TimeUnit.MICROSECONDS); } while (consumersArePaused) { try { Thread.sleep(1000); } catch (InterruptedException e) { e.printStackTrace(); } } }
@VisibleForTesting void errorOutPendingRequests(Throwable cause, boolean errorOutWriter) { final List<PendingLogRecord> pendingRequestsSnapshot; synchronized (this) { pendingRequestsSnapshot = pendingRequests; encounteredError = errorOutWriter; pendingRequests = null; if (null != rollingFuture) { FutureUtils.setException(rollingFuture, cause); } rollingFuture = null; } pendingRequestDispatch.add(pendingRequestsSnapshot.size()); // After erroring out the writer above, no more requests // will be enqueued to pendingRequests for (PendingLogRecord pendingLogRecord : pendingRequestsSnapshot) { pendingLogRecord.promise.setException(cause); } }
public BoxedUnit apply() { writePendingStat.dec(); return null; } });
@Override public Long get() { // Eventually consistent. return firstCounter.get(); } };
@Override public void onEntryComplete(int rc, LedgerHandle lh, LedgerEntry entry, Object ctx) { readCounter.inc(); } }, readConcurrency);