@Override public void onCompleteLazyPersist(String bpId, long blockId, long creationTime, File[] savedFiles, FsVolumeImpl targetVolume) { try (AutoCloseableLock lock = datasetLock.acquire()) { ramDiskReplicaTracker.recordEndLazyPersist(bpId, blockId, savedFiles); targetVolume.incDfsUsedAndNumBlocks(bpId, savedFiles[0].length() + savedFiles[1].length()); // Update metrics (ignore the metadata file size) datanode.getMetrics().incrRamDiskBlocksLazyPersisted(); datanode.getMetrics().incrRamDiskBytesLazyPersisted(savedFiles[1].length()); datanode.getMetrics().addRamDiskBlocksLazyPersistWindowMs( Time.monotonicNow() - creationTime); if (LOG.isDebugEnabled()) { LOG.debug("LazyWriter: Finish persisting RamDisk block: " + " block pool Id: " + bpId + " block id: " + blockId + " to block file " + savedFiles[1] + " and meta file " + savedFiles[0] + " on target volume " + targetVolume); } } }
@Override public void run() { Value value; if (shouldDefer()) { deferredUncachingExecutor.schedule( this, revocationPollingMs, TimeUnit.MILLISECONDS); return; } synchronized (FsDatasetCache.this) { value = mappableBlockMap.get(key); } Preconditions.checkNotNull(value); Preconditions.checkArgument(value.state == State.UNCACHING); IOUtils.closeQuietly(value.mappableBlock); synchronized (FsDatasetCache.this) { mappableBlockMap.remove(key); } long newUsedBytes = release(value.mappableBlock.getLength()); numBlocksCached.addAndGet(-1); dataset.datanode.getMetrics().incrBlocksUncached(1); if (revocationTimeMs != 0) { LOG.debug("Uncaching of {} completed. usedBytes = {}", key, newUsedBytes); } else { LOG.debug("Deferred uncaching of {} completed. usedBytes = {}", key, newUsedBytes); } } }
VolumeScanner(Conf conf, DataNode datanode, FsVolumeReference ref) { this.conf = conf; this.datanode = datanode; this.metrics = datanode.getMetrics(); this.ref = ref; this.volume = ref.getVolume(); ScanResultHandler handler; try { handler = conf.resultHandler.newInstance(); } catch (Throwable e) { LOG.error("unable to instantiate {}", conf.resultHandler, e); handler = new ScanResultHandler(); } this.resultHandler = handler; setName("VolumeScannerThread(" + volume + ")"); setDaemon(true); }
private void sendLifelineIfDue() throws IOException { long startTime = scheduler.monotonicNow(); if (!scheduler.isLifelineDue(startTime)) { if (LOG.isDebugEnabled()) { LOG.debug("Skipping sending lifeline for " + BPServiceActor.this + ", because it is not due."); } return; } if (dn.areHeartbeatsDisabledForTests()) { if (LOG.isDebugEnabled()) { LOG.debug("Skipping sending lifeline for " + BPServiceActor.this + ", because heartbeats are disabled for tests."); } return; } sendLifeline(); dn.getMetrics().addLifeline(scheduler.monotonicNow() - startTime); scheduler.scheduleNextLifeline(scheduler.monotonicNow()); }
BPServiceActor(InetSocketAddress nnAddr, InetSocketAddress lifelineNnAddr, BPOfferService bpos) { this.bpos = bpos; this.dn = bpos.getDataNode(); this.nnAddr = nnAddr; this.lifelineSender = lifelineNnAddr != null ? new LifelineSender(lifelineNnAddr) : null; this.initialRegistrationComplete = lifelineNnAddr != null ? new CountDownLatch(1) : null; this.dnConf = dn.getDnConf(); this.ibrManager = new IncrementalBlockReportManager( dnConf.ibrInterval, dn.getMetrics()); prevBlockReportId = ThreadLocalRandom.current().nextLong(); scheduler = new Scheduler(dnConf.heartBeatInterval, dnConf.getLifelineIntervalMs(), dnConf.blockReportInterval, dnConf.outliersReportIntervalMs); // get the value of maxDataLength. this.maxDataLength = dnConf.getMaxDataLength(); }
private void reconstructTargets(int toReconstructLen) throws IOException { ByteBuffer[] inputs = getStripedReader().getInputBuffers(toReconstructLen); int[] erasedIndices = stripedWriter.getRealTargetIndices(); ByteBuffer[] outputs = stripedWriter.getRealTargetBuffers(toReconstructLen); long start = System.nanoTime(); getDecoder().decode(inputs, erasedIndices, outputs); long end = System.nanoTime(); this.getDatanode().getMetrics().incrECDecodingTime(end - start); stripedWriter.updateRealTargetBuffers(toReconstructLen); }
dataset.datanode.getShortCircuitRegistry().processBlockMlockEvent(key); numBlocksCached.addAndGet(1); dataset.datanode.getMetrics().incrBlocksCached(1); success = true; } finally {
DatanodeCommand cacheReport() throws IOException { // If caching is disabled, do not send a cache report if (dn.getFSDataset().getCacheCapacity() == 0) { return null; } // send cache report if timer has expired. DatanodeCommand cmd = null; final long startTime = monotonicNow(); if (startTime - lastCacheReport > dnConf.cacheReportInterval) { if (LOG.isDebugEnabled()) { LOG.debug("Sending cacheReport from service actor: " + this); } lastCacheReport = startTime; String bpid = bpos.getBlockPoolId(); List<Long> blockIds = dn.getFSDataset().getCacheReport(bpid); long createTime = monotonicNow(); cmd = bpNamenode.cacheReport(bpRegistration, bpid, blockIds); long sendTime = monotonicNow(); long createCost = createTime - startTime; long sendCost = sendTime - createTime; dn.getMetrics().addCacheReport(sendCost); if (LOG.isDebugEnabled()) { LOG.debug("CacheReport of " + blockIds.size() + " block(s) took " + createCost + " msec to generate and " + sendCost + " msecs for RPC and NN processing"); } } return cmd; }
if (replicaInfo != null) { if (!replicaInfo.getIsPersisted()) { datanode.getMetrics().incrRamDiskBlocksDeletedBeforeLazyPersisted();
dn.getMetrics().addBlockReport(brSendCost); final int nCmds = cmds.size(); LOG.info((success ? "S" : "Uns") +
@Override // FsDatasetSpi public InputStream getBlockInputStream(ExtendedBlock b, long seekOffset) throws IOException { ReplicaInfo info; synchronized(this) { info = volumeMap.get(b.getBlockPoolId(), b.getLocalBlock()); } if (info != null && info.getVolume().isTransientStorage()) { ramDiskReplicaTracker.touch(b.getBlockPoolId(), b.getBlockId()); datanode.getMetrics().incrRamDiskBlocksReadHits(); } if(info != null && info.blockDataExists()) { return info.getDataInputStream(seekOffset); } else { throw new IOException("No data exists for block " + b); } }
datanode.getMetrics().incrRamDiskBlocksEvicted(); datanode.getMetrics().addRamDiskBlocksEvictionWindowMs( Time.monotonicNow() - replicaState.getCreationTime()); if (replicaState.getNumReads() == 0) { datanode.getMetrics().incrRamDiskBlocksEvictedWithoutRead();
dn.getMetrics().addHeartbeat(scheduler.monotonicNow() - startTime); dn.getMetrics().addHeartbeatTotal( scheduler.monotonicNow() - startTime);
@Override public void run() { try { initDecoderIfNecessary(); getStripedReader().init(); stripedWriter.init(); reconstruct(); stripedWriter.endTargetBlocks(); // Currently we don't check the acks for packets, this is similar as // block replication. } catch (Throwable e) { LOG.warn("Failed to reconstruct striped block: {}", getBlockGroup(), e); getDatanode().getMetrics().incrECFailedReconstructionTasks(); } finally { getDatanode().decrementXmitsInProgress(getXmits()); final DataNodeMetrics metrics = getDatanode().getMetrics(); metrics.incrECReconstructionTasks(); metrics.incrECReconstructionBytesRead(getBytesRead()); metrics.incrECReconstructionRemoteBytesRead(getRemoteBytesRead()); metrics.incrECReconstructionBytesWritten(getBytesWritten()); getStripedReader().close(); stripedWriter.close(); cleanup(); } }
datanode.getMetrics().addWriteBlockOp(elapsed()); datanode.getMetrics().incrWritesFromClient(peer.isLocal(), size);
@Override void reconstruct() throws IOException { while (getPositionInBlock() < getMaxTargetLength()) { DataNodeFaultInjector.get().stripedBlockReconstruction(); long remaining = getMaxTargetLength() - getPositionInBlock(); final int toReconstructLen = (int) Math.min(getStripedReader().getBufferSize(), remaining); long start = Time.monotonicNow(); // step1: read from minimum source DNs required for reconstruction. // The returned success list is the source DNs we do real read from getStripedReader().readMinimumSources(toReconstructLen); long readEnd = Time.monotonicNow(); // step2: decode to reconstruct targets reconstructTargets(toReconstructLen); long decodeEnd = Time.monotonicNow(); // step3: transfer data if (stripedWriter.transferData2Targets() == 0) { String error = "Transfer failed for all targets."; throw new IOException(error); } long writeEnd = Time.monotonicNow(); // Only the succeed reconstructions are recorded. final DataNodeMetrics metrics = getDatanode().getMetrics(); metrics.incrECReconstructionReadTime(readEnd - start); metrics.incrECReconstructionDecodingTime(decodeEnd - readEnd); metrics.incrECReconstructionWriteTime(writeEnd - decodeEnd); updatePositionInBlock(toReconstructLen); clearBuffers(); } }
datanode.getMetrics().incrRamDiskBlocksWrite(); } catch (DiskOutOfSpaceException de) { datanode.getMetrics().incrRamDiskBlocksWriteFallback();
ramDiskReplicaTracker.addReplica( bpid, replicaInfo.getBlockId(), v, replicaInfo.getNumBytes()); datanode.getMetrics().addRamDiskBytesWrite(replicaInfo.getNumBytes());
@Override public Boolean get() { MetricsRecordBuilder dnMetrics = getMetrics(dn.getMetrics().name()); long blocksCached = MetricsAsserts.getLongCounter("BlocksCached", dnMetrics); return blocksCached > 0; } }, 1000, 30000);
static void logIbrCounts(List<DataNode> datanodes) { final String name = "IncrementalBlockReportsNumOps"; for(DataNode dn : datanodes) { final MetricsRecordBuilder m = MetricsAsserts.getMetrics( dn.getMetrics().name()); final long ibr = MetricsAsserts.getLongCounter(name, m); LOG.info(dn.getDisplayName() + ": " + name + "=" + ibr); } }