@VisibleForTesting void errorOutPendingRequests(Throwable cause, boolean errorOutWriter) { final List<PendingLogRecord> pendingRequestsSnapshot; synchronized (this) { pendingRequestsSnapshot = pendingRequests; encounteredError = errorOutWriter; pendingRequests = null; if (null != rollingFuture) { FutureUtils.setException(rollingFuture, cause); } rollingFuture = null; } pendingRequestDispatch.add(pendingRequestsSnapshot.size()); // After erroring out the writer above, no more requests // will be enqueued to pendingRequests for (PendingLogRecord pendingLogRecord : pendingRequestsSnapshot) { pendingLogRecord.promise.setException(cause); } }
@Override public void onSuccess(WriteResponse response) { if (response.getHeader().getCode() == StatusCode.SUCCESS) { latencyStat.registerSuccessfulEvent(stopwatch().elapsed(TimeUnit.MICROSECONDS)); bytes.add(size); writeBytes.add(size); } else { latencyStat.registerFailedEvent(stopwatch().elapsed(TimeUnit.MICROSECONDS)); } } @Override
@Compression public void readFully(DataInputStream in) throws IOException { Preconditions.checkNotNull(in); // Make sure we're reading the right versioned entry. byte version = in.readByte(); if (version != this.version) { throw new IOException(String.format("Version mismatch while reading. Received: %d," + " Required: %d", version, this.version)); } header.read(in); payloadCompressed.read(in); // Decompress CompressionCodec codec = CompressionUtils.getCompressionCodec(header.compressionType); byte[] decompressed = codec.decompress( payloadCompressed.payload, 0, payloadCompressed.length, header.decompressedSize, decompressionStat); this.payloadDecompressed = new Payload(decompressed.length, decompressed); this.compressedEntryBytes.add(payloadCompressed.length); this.decompressedEntryBytes.add(payloadDecompressed.length); }
@Override public void onSuccess(BulkWriteResponse response) { if (response.getHeader().getCode() == StatusCode.SUCCESS) { latencyStat.registerSuccessfulEvent(stopwatch().elapsed(TimeUnit.MICROSECONDS)); bytes.add(size); bulkWriteBytes.add(size); } else { latencyStat.registerFailedEvent(stopwatch().elapsed(TimeUnit.MICROSECONDS)); } } @Override
@Compression public void writeFully(DataOutputStream out) throws IOException { Preconditions.checkNotNull(out); if (!isReady()) { throw new IOException("Entry not writable"); } // Version out.writeByte(version); // Header header.write(out); // Compress CompressionCodec codec = CompressionUtils.getCompressionCodec(header.compressionType); byte[] compressed = codec.compress( payloadDecompressed.payload, 0, payloadDecompressed.length, compressionStat); this.payloadCompressed = new Payload(compressed.length, compressed); this.compressedEntryBytes.add(payloadCompressed.length); this.decompressedEntryBytes.add(payloadDecompressed.length); payloadCompressed.write(out); }
@Override public Future<BulkWriteResponse> writeBulkWithContext(final String stream, List<ByteBuffer> data, WriteContext ctx) { bulkWritePendingStat.inc(); receivedRecordCounter.add(data.size()); BulkWriteOp op = new BulkWriteOp(stream, data, statsLogger, perStreamStatsLogger, getChecksum(ctx), featureChecksumDisabled, accessControlManager); executeStreamOp(op); return op.result().ensure(new Function0<BoxedUnit>() { public BoxedUnit apply() { bulkWritePendingStat.dec(); return null; } }); }
@Override public void onSuccess(BKLogSegmentWriter writer) { try { synchronized (BKAsyncLogWriter.this) { for (PendingLogRecord pendingLogRecord : pendingRequests) { FailpointUtils.checkFailPoint(FailpointUtils.FailPointName.FP_LogWriterIssuePending); writer.asyncWrite(pendingLogRecord.record, pendingLogRecord.flush) .addEventListener(pendingLogRecord); } if (null != rollingFuture) { FutureUtils.setValue(rollingFuture, writer); } rollingFuture = null; pendingRequestDispatch.add(pendingRequests.size()); pendingRequests = null; } } catch (IOException ioe) { errorOutPendingRequestsAndWriter(ioe); } } @Override
blockingReadStats.registerSuccessfulEvent(elapsedMicros); if (!records.isEmpty()) { readCounter.add(records.size()); LogRecordWithDLSN lastRecord = records.get(records.size() - 1); lastTxId = lastRecord.getTransactionId();
@Override public void add(long delta) { counter.add(delta); }
@VisibleForTesting void errorOutPendingRequests(Throwable cause, boolean errorOutWriter) { final List<PendingLogRecord> pendingRequestsSnapshot; synchronized (this) { pendingRequestsSnapshot = pendingRequests; encounteredError = errorOutWriter; pendingRequests = null; if (null != rollingFuture) { FutureUtils.completeExceptionally(rollingFuture, cause); } rollingFuture = null; } pendingRequestDispatch.add(pendingRequestsSnapshot.size()); // After erroring out the writer above, no more requests // will be enqueued to pendingRequests for (PendingLogRecord pendingLogRecord : pendingRequestsSnapshot) { pendingLogRecord.promise.completeExceptionally(cause); } }
@Override public void onSuccess(BulkWriteResponse response) { if (response.getHeader().getCode() == StatusCode.SUCCESS) { latencyStat.registerSuccessfulEvent(stopwatch().elapsed(TimeUnit.MICROSECONDS)); bytes.add(size); bulkWriteBytes.add(size); } else { latencyStat.registerFailedEvent(stopwatch().elapsed(TimeUnit.MICROSECONDS)); } } @Override
@Override public void onSuccess(WriteResponse response) { if (response.getHeader().getCode() == StatusCode.SUCCESS) { latencyStat.registerSuccessfulEvent(stopwatch().elapsed(TimeUnit.MICROSECONDS)); bytes.add(size); writeBytes.add(size); } else { latencyStat.registerFailedEvent(stopwatch().elapsed(TimeUnit.MICROSECONDS)); } } @Override
/** * Garbage collect those entry loggers which are not associated with any active ledgers. */ private void doGcEntryLogs() { // Get a cumulative count, don't update until complete AtomicLong totalEntryLogSizeAcc = new AtomicLong(0L); // Loop through all of the entry logs and remove the non-active ledgers. entryLogMetaMap.forEach((entryLogId, meta) -> { removeIfLedgerNotExists(meta); if (meta.isEmpty()) { // This means the entry log is not associated with any active ledgers anymore. // We can remove this entry log file now. LOG.info("Deleting entryLogId " + entryLogId + " as it has no active ledgers!"); removeEntryLog(entryLogId); gcStats.getReclaimedSpaceViaDeletes().add(meta.getTotalSize()); } totalEntryLogSizeAcc.getAndAdd(meta.getRemainingSize()); }); this.totalEntryLogSize = totalEntryLogSizeAcc.get(); this.numActiveEntryLogs = entryLogMetaMap.keySet().size(); }
@Override public void messageReceived(byte[] data, long publishTimestamp) { messagesReceived.increment(); totalMessagesReceived.increment(); messagesReceivedCounter.inc(); bytesReceived.add(data.length); bytesReceivedCounter.add(data.length); long now = System.currentTimeMillis(); long endToEndLatencyMicros = TimeUnit.MILLISECONDS.toMicros(now - publishTimestamp); if (endToEndLatencyMicros > 0) { endToEndCumulativeLatencyRecorder.recordValue(endToEndLatencyMicros); endToEndLatencyRecorder.recordValue(endToEndLatencyMicros); endToEndLatencyStats.registerSuccessfulEvent(endToEndLatencyMicros, TimeUnit.MICROSECONDS); } while (consumersArePaused) { try { Thread.sleep(1000); } catch (InterruptedException e) { e.printStackTrace(); } } }
@Override public Future<BulkWriteResponse> writeBulkWithContext(final String stream, List<ByteBuffer> data, WriteContext ctx) { bulkWritePendingStat.inc(); receivedRecordCounter.add(data.size()); BulkWriteOp op = new BulkWriteOp(stream, data, statsLogger, perStreamStatsLogger, streamPartitionConverter, getChecksum(ctx), featureChecksumDisabled, accessControlManager); executeStreamOp(op); return op.result().ensure(new Function0<BoxedUnit>() { public BoxedUnit apply() { bulkWritePendingStat.dec(); return null; } }); }