DataNode.getMetrics
Code IndexAdd Codota to your IDE (free)

Best Java code snippets using org.apache.hadoop.hdfs.server.datanode.DataNode.getMetrics (Showing top 20 results out of 315)

  • Common ways to obtain DataNode
private void myMethod () {
DataNode d =
  • BlockScanner.Servlet blockScannerServlet;String str;(DataNode) blockScannerServlet.getServletContext().getAttribute(str)
  • BPOfferService bPOfferService;bPOfferService.getDataNode()
  • Smart code suggestions by Codota
}
origin: org.apache.hadoop/hadoop-hdfs

@Override
public void onCompleteLazyPersist(String bpId, long blockId,
  long creationTime, File[] savedFiles, FsVolumeImpl targetVolume) {
 try (AutoCloseableLock lock = datasetLock.acquire()) {
  ramDiskReplicaTracker.recordEndLazyPersist(bpId, blockId, savedFiles);
  targetVolume.incDfsUsedAndNumBlocks(bpId, savedFiles[0].length()
    + savedFiles[1].length());
  // Update metrics (ignore the metadata file size)
  datanode.getMetrics().incrRamDiskBlocksLazyPersisted();
  datanode.getMetrics().incrRamDiskBytesLazyPersisted(savedFiles[1].length());
  datanode.getMetrics().addRamDiskBlocksLazyPersistWindowMs(
    Time.monotonicNow() - creationTime);
  if (LOG.isDebugEnabled()) {
   LOG.debug("LazyWriter: Finish persisting RamDisk block: "
     + " block pool Id: " + bpId + " block id: " + blockId
     + " to block file " + savedFiles[1] + " and meta file " + savedFiles[0]
     + " on target volume " + targetVolume);
  }
 }
}
origin: org.apache.hadoop/hadoop-hdfs

 @Override
 public void run() {
  Value value;
  if (shouldDefer()) {
   deferredUncachingExecutor.schedule(
     this, revocationPollingMs, TimeUnit.MILLISECONDS);
   return;
  }
  synchronized (FsDatasetCache.this) {
   value = mappableBlockMap.get(key);
  }
  Preconditions.checkNotNull(value);
  Preconditions.checkArgument(value.state == State.UNCACHING);
  IOUtils.closeQuietly(value.mappableBlock);
  synchronized (FsDatasetCache.this) {
   mappableBlockMap.remove(key);
  }
  long newUsedBytes = release(value.mappableBlock.getLength());
  numBlocksCached.addAndGet(-1);
  dataset.datanode.getMetrics().incrBlocksUncached(1);
  if (revocationTimeMs != 0) {
   LOG.debug("Uncaching of {} completed. usedBytes = {}",
     key, newUsedBytes);
  } else {
   LOG.debug("Deferred uncaching of {} completed. usedBytes = {}",
     key, newUsedBytes);
  }
 }
}
origin: org.apache.hadoop/hadoop-hdfs

VolumeScanner(Conf conf, DataNode datanode, FsVolumeReference ref) {
 this.conf = conf;
 this.datanode = datanode;
 this.metrics = datanode.getMetrics();
 this.ref = ref;
 this.volume = ref.getVolume();
 ScanResultHandler handler;
 try {
  handler = conf.resultHandler.newInstance();
 } catch (Throwable e) {
  LOG.error("unable to instantiate {}", conf.resultHandler, e);
  handler = new ScanResultHandler();
 }
 this.resultHandler = handler;
 setName("VolumeScannerThread(" + volume + ")");
 setDaemon(true);
}
origin: org.apache.hadoop/hadoop-hdfs

private void sendLifelineIfDue() throws IOException {
 long startTime = scheduler.monotonicNow();
 if (!scheduler.isLifelineDue(startTime)) {
  if (LOG.isDebugEnabled()) {
   LOG.debug("Skipping sending lifeline for " + BPServiceActor.this
     + ", because it is not due.");
  }
  return;
 }
 if (dn.areHeartbeatsDisabledForTests()) {
  if (LOG.isDebugEnabled()) {
   LOG.debug("Skipping sending lifeline for " + BPServiceActor.this
     + ", because heartbeats are disabled for tests.");
  }
  return;
 }
 sendLifeline();
 dn.getMetrics().addLifeline(scheduler.monotonicNow() - startTime);
 scheduler.scheduleNextLifeline(scheduler.monotonicNow());
}
origin: org.apache.hadoop/hadoop-hdfs

BPServiceActor(InetSocketAddress nnAddr, InetSocketAddress lifelineNnAddr,
  BPOfferService bpos) {
 this.bpos = bpos;
 this.dn = bpos.getDataNode();
 this.nnAddr = nnAddr;
 this.lifelineSender = lifelineNnAddr != null ?
   new LifelineSender(lifelineNnAddr) : null;
 this.initialRegistrationComplete = lifelineNnAddr != null ?
   new CountDownLatch(1) : null;
 this.dnConf = dn.getDnConf();
 this.ibrManager = new IncrementalBlockReportManager(
   dnConf.ibrInterval,
   dn.getMetrics());
 prevBlockReportId = ThreadLocalRandom.current().nextLong();
 scheduler = new Scheduler(dnConf.heartBeatInterval,
   dnConf.getLifelineIntervalMs(), dnConf.blockReportInterval,
   dnConf.outliersReportIntervalMs);
 // get the value of maxDataLength.
 this.maxDataLength = dnConf.getMaxDataLength();
}
origin: org.apache.hadoop/hadoop-hdfs

private void reconstructTargets(int toReconstructLen) throws IOException {
 ByteBuffer[] inputs = getStripedReader().getInputBuffers(toReconstructLen);
 int[] erasedIndices = stripedWriter.getRealTargetIndices();
 ByteBuffer[] outputs = stripedWriter.getRealTargetBuffers(toReconstructLen);
 long start = System.nanoTime();
 getDecoder().decode(inputs, erasedIndices, outputs);
 long end = System.nanoTime();
 this.getDatanode().getMetrics().incrECDecodingTime(end - start);
 stripedWriter.updateRealTargetBuffers(toReconstructLen);
}
origin: org.apache.hadoop/hadoop-hdfs

 dataset.datanode.getShortCircuitRegistry().processBlockMlockEvent(key);
 numBlocksCached.addAndGet(1);
 dataset.datanode.getMetrics().incrBlocksCached(1);
 success = true;
} finally {
origin: org.apache.hadoop/hadoop-hdfs

DatanodeCommand cacheReport() throws IOException {
 // If caching is disabled, do not send a cache report
 if (dn.getFSDataset().getCacheCapacity() == 0) {
  return null;
 }
 // send cache report if timer has expired.
 DatanodeCommand cmd = null;
 final long startTime = monotonicNow();
 if (startTime - lastCacheReport > dnConf.cacheReportInterval) {
  if (LOG.isDebugEnabled()) {
   LOG.debug("Sending cacheReport from service actor: " + this);
  }
  lastCacheReport = startTime;
  String bpid = bpos.getBlockPoolId();
  List<Long> blockIds = dn.getFSDataset().getCacheReport(bpid);
  long createTime = monotonicNow();
  cmd = bpNamenode.cacheReport(bpRegistration, bpid, blockIds);
  long sendTime = monotonicNow();
  long createCost = createTime - startTime;
  long sendCost = sendTime - createTime;
  dn.getMetrics().addCacheReport(sendCost);
  if (LOG.isDebugEnabled()) {
   LOG.debug("CacheReport of " + blockIds.size()
     + " block(s) took " + createCost + " msec to generate and "
     + sendCost + " msecs for RPC and NN processing");
  }
 }
 return cmd;
}
origin: org.apache.hadoop/hadoop-hdfs

if (replicaInfo != null) {
 if (!replicaInfo.getIsPersisted()) {
  datanode.getMetrics().incrRamDiskBlocksDeletedBeforeLazyPersisted();
origin: org.apache.hadoop/hadoop-hdfs

dn.getMetrics().addBlockReport(brSendCost);
final int nCmds = cmds.size();
LOG.info((success ? "S" : "Uns") +
origin: org.apache.hadoop/hadoop-hdfs

@Override // FsDatasetSpi
public InputStream getBlockInputStream(ExtendedBlock b,
  long seekOffset) throws IOException {
 ReplicaInfo info;
 synchronized(this) {
  info = volumeMap.get(b.getBlockPoolId(), b.getLocalBlock());
 }
 if (info != null && info.getVolume().isTransientStorage()) {
  ramDiskReplicaTracker.touch(b.getBlockPoolId(), b.getBlockId());
  datanode.getMetrics().incrRamDiskBlocksReadHits();
 }
 if(info != null && info.blockDataExists()) {
  return info.getDataInputStream(seekOffset);
 } else {
  throw new IOException("No data exists for block " + b);
 }
}
origin: org.apache.hadoop/hadoop-hdfs

datanode.getMetrics().incrRamDiskBlocksEvicted();
datanode.getMetrics().addRamDiskBlocksEvictionWindowMs(
  Time.monotonicNow() - replicaState.getCreationTime());
if (replicaState.getNumReads() == 0) {
 datanode.getMetrics().incrRamDiskBlocksEvictedWithoutRead();
origin: org.apache.hadoop/hadoop-hdfs

 dn.getMetrics().addHeartbeat(scheduler.monotonicNow() - startTime);
dn.getMetrics().addHeartbeatTotal(
  scheduler.monotonicNow() - startTime);
origin: org.apache.hadoop/hadoop-hdfs

@Override
public void run() {
 try {
  initDecoderIfNecessary();
  getStripedReader().init();
  stripedWriter.init();
  reconstruct();
  stripedWriter.endTargetBlocks();
  // Currently we don't check the acks for packets, this is similar as
  // block replication.
 } catch (Throwable e) {
  LOG.warn("Failed to reconstruct striped block: {}", getBlockGroup(), e);
  getDatanode().getMetrics().incrECFailedReconstructionTasks();
 } finally {
  getDatanode().decrementXmitsInProgress(getXmits());
  final DataNodeMetrics metrics = getDatanode().getMetrics();
  metrics.incrECReconstructionTasks();
  metrics.incrECReconstructionBytesRead(getBytesRead());
  metrics.incrECReconstructionRemoteBytesRead(getRemoteBytesRead());
  metrics.incrECReconstructionBytesWritten(getBytesWritten());
  getStripedReader().close();
  stripedWriter.close();
  cleanup();
 }
}
origin: org.apache.hadoop/hadoop-hdfs

datanode.getMetrics().addWriteBlockOp(elapsed());
datanode.getMetrics().incrWritesFromClient(peer.isLocal(), size);
origin: org.apache.hadoop/hadoop-hdfs

@Override
void reconstruct() throws IOException {
 while (getPositionInBlock() < getMaxTargetLength()) {
  DataNodeFaultInjector.get().stripedBlockReconstruction();
  long remaining = getMaxTargetLength() - getPositionInBlock();
  final int toReconstructLen =
    (int) Math.min(getStripedReader().getBufferSize(), remaining);
  long start = Time.monotonicNow();
  // step1: read from minimum source DNs required for reconstruction.
  // The returned success list is the source DNs we do real read from
  getStripedReader().readMinimumSources(toReconstructLen);
  long readEnd = Time.monotonicNow();
  // step2: decode to reconstruct targets
  reconstructTargets(toReconstructLen);
  long decodeEnd = Time.monotonicNow();
  // step3: transfer data
  if (stripedWriter.transferData2Targets() == 0) {
   String error = "Transfer failed for all targets.";
   throw new IOException(error);
  }
  long writeEnd = Time.monotonicNow();
  // Only the succeed reconstructions are recorded.
  final DataNodeMetrics metrics = getDatanode().getMetrics();
  metrics.incrECReconstructionReadTime(readEnd - start);
  metrics.incrECReconstructionDecodingTime(decodeEnd - readEnd);
  metrics.incrECReconstructionWriteTime(writeEnd - decodeEnd);
  updatePositionInBlock(toReconstructLen);
  clearBuffers();
 }
}
origin: org.apache.hadoop/hadoop-hdfs

 datanode.getMetrics().incrRamDiskBlocksWrite();
} catch (DiskOutOfSpaceException de) {
datanode.getMetrics().incrRamDiskBlocksWriteFallback();
origin: org.apache.hadoop/hadoop-hdfs

ramDiskReplicaTracker.addReplica(
  bpid, replicaInfo.getBlockId(), v, replicaInfo.getNumBytes());
datanode.getMetrics().addRamDiskBytesWrite(replicaInfo.getNumBytes());
origin: ch.cern.hadoop/hadoop-hdfs

 @Override
 public Boolean get() {
  MetricsRecordBuilder dnMetrics = getMetrics(dn.getMetrics().name());
  long blocksCached =
    MetricsAsserts.getLongCounter("BlocksCached", dnMetrics);
  return blocksCached > 0;
 }
}, 1000, 30000);
origin: ch.cern.hadoop/hadoop-hdfs

static void logIbrCounts(List<DataNode> datanodes) {
 final String name = "IncrementalBlockReportsNumOps";
 for(DataNode dn : datanodes) {
  final MetricsRecordBuilder m = MetricsAsserts.getMetrics(
    dn.getMetrics().name());
  final long ibr = MetricsAsserts.getLongCounter(name, m);
  LOG.info(dn.getDisplayName() + ": " + name + "=" + ibr);
 }
}
org.apache.hadoop.hdfs.server.datanodeDataNodegetMetrics

Popular methods of DataNode

  • shutdown
    Shut down this instance of the datanode. Returns only after shutdown is complete. This method can on
  • createDataNode
    Instantiate & Start a single datanode daemon and wait for it to finish. If this thread is specifical
  • createInterDataNodeProtocolProxy
  • getConf
  • instantiateDataNode
    Instantiate a single datanode object, along with its secure resources. This must be run by invoking
  • runDatanodeDaemon
    Start a single datanode daemon and wait for it to finish. If this thread is specifically interrupted
  • <init>
    Create the DataNode given a configuration, an array of dataDirs, and a namenode proxy.
  • getStartupOption
  • getXceiverCount
    Number of concurrent xceivers per node.
  • parseArguments
    Parse and verify command line arguments and set configuration parameters.
  • recoverBlocks
  • syncBlock
    Block synchronization
  • recoverBlocks,
  • syncBlock,
  • checkDiskError,
  • handleDiskError,
  • join,
  • makeInstance,
  • newSocket,
  • notifyNamenodeDeletedBlock,
  • notifyNamenodeReceivedBlock

Popular in Java

  • Making http requests using okhttp
  • notifyDataSetChanged (ArrayAdapter)
  • setRequestProperty (URLConnection)
  • getSharedPreferences (Context)
  • Color (java.awt)
    The Color class is used to encapsulate colors in the default sRGB color space or colors in arbitrary
  • Window (java.awt)
    A Window object is a top-level window with no borders and no menubar. The default layout for a windo
  • FileInputStream (java.io)
    An input stream that reads bytes from a file. File file = ...finally if (in != null) in.clos
  • Time (java.sql)
    Java representation of an SQL TIME value. Provides utilities to format and parse the time's represen
  • Scanner (java.util)
    A parser that parses a text string of primitive types and strings with the help of regular expressio
  • XPath (javax.xml.xpath)
    XPath provides access to the XPath evaluation environment and expressions. Evaluation of XPath Expr

For IntelliJ IDEA,
Android Studio or Eclipse

  • Search for JavaScript code betaCodota IntelliJ IDEA pluginCodota Android Studio pluginCode IndexSign in
  • EnterpriseFAQAboutBlogContact Us
  • Plugin user guideTerms of usePrivacy policyCodeboxFind Usages
Add Codota to your IDE (free)