@Override // ClientDatanodeProtocol public void triggerBlockReport(BlockReportOptions options) throws IOException { checkSuperuserPrivilege(); for (BPOfferService bpos : blockPoolManager.getAllNamenodeThreads()) { if (bpos != null) { for (BPServiceActor actor : bpos.getBPServiceActors()) { actor.triggerBlockReport(options); } } } }
/** * Return the parent directory path where this replica is located. * @return the parent directory path where this replica is located */ protected File getDir() { return hasSubdirs ? DatanodeUtil.idToBlockDir(baseDir, getBlockId()) : baseDir; }
void reportBadBlocks(ExtendedBlock block, String storageUuid, StorageType storageType) { checkBlock(block); for (BPServiceActor actor : bpServices) { ReportBadBlockAction rbbAction = new ReportBadBlockAction (block, storageUuid, storageType); actor.bpThreadEnqueue(rbbAction); } }
LOG.info("killing datanode " + name + " / " + lookup); ipcPort = dn.ipcServer.getListenerAddress().getPort(); dn.shutdown(); LOG.info("killed datanode " + name + " / " + lookup); break;
/** * A data node is considered to be up if one of the bp services is up */ public boolean isDatanodeUp() { for (BPOfferService bp : blockPoolManager.getAllNamenodeThreads()) { if (bp.isAlive()) { return true; } } return false; }
/** * Ask each of the actors to schedule a block report after * the specified delay. */ void scheduleBlockReport(long delay) { for (BPServiceActor actor : bpServices) { actor.getScheduler().scheduleBlockReport(delay); } }
void joinAll() { for (BPOfferService bpos: this.getAllNamenodeThreads()) { bpos.join(); } }
private void shutdownPeriodicScanners() { shutdownDirectoryScanner(); blockScanner.removeAllVolumeScanners(); }
@Override // ReplicaInPipeline public synchronized ChunkChecksum getLastChecksumAndDataLen() { return new ChunkChecksum(getBytesOnDisk(), lastChecksum); }
/** * This methods arranges for the data node to send * the block report at the next heartbeat. */ public void scheduleAllBlockReport(long delay) { for(BPOfferService bpos : blockPoolManager.getAllNamenodeThreads()) { bpos.scheduleBlockReport(delay); } }
/** * Report a bad block on another DN (eg if we received a corrupt replica * from a remote host). * @param srcDataNode the DN hosting the bad block * @param block the block itself */ public void reportRemoteBadBlock(DatanodeInfo srcDataNode, ExtendedBlock block) throws IOException { BPOfferService bpos = getBPOSForBlock(block); bpos.reportRemoteBadBlock(srcDataNode, block); }
/** * Called by the DN to report an error to the NNs. */ void trySendErrorReport(int errCode, String errMsg) { for (BPServiceActor actor : bpServices) { ErrorReportAction errorReportAction = new ErrorReportAction (errCode, errMsg); actor.bpThreadEnqueue(errorReportAction); } }
/** * Cancels a running plan. * @param planID - Hash string that identifies a plan */ @Override public void cancelDiskBalancePlan(String planID) throws IOException { checkSuperuserPrivilege(); getDiskBalancer().cancelPlan(planID); }
/** * Returns the status of current or last executed work plan. * @return DiskBalancerWorkStatus. * @throws IOException */ @Override public DiskBalancerWorkStatus queryDiskBalancerPlan() throws IOException { checkSuperuserPrivilege(); return getDiskBalancer().queryWorkStatus(); }
@Override public OutputStream getDataOutputStream(boolean append) throws IOException { return getFileIoProvider().getFileOutputStream( getVolume(), getBlockFile(), append); }
@Override public boolean deleteMetadata() { return getFileIoProvider().fullyDelete(getVolume(), getMetaFile()); }
/** * Inflates bytesCopied and returns true or false. This allows us to stop * copying if we have reached close enough. * * @param item DiskBalancerWorkItem * @return -- false if we need to copy more, true if we are done */ private boolean isCloseEnough(DiskBalancerWorkItem item) { long temp = item.getBytesCopied() + ((item.getBytesCopied() * getBlockTolerancePercentage(item)) / 100); return (item.getBytesToCopy() >= temp) ? false : true; }
@Override // DataNodeMXBean public String getDiskBalancerStatus() { try { return getDiskBalancer().queryWorkStatus().toJsonString(); } catch (IOException ex) { LOG.debug("Reading diskbalancer Status failed. ex:{}", ex); return ""; } }
public void waitForNumReplicas(int numReplicas) throws Exception { while (UTIL.getDFSCluster().getDataNodes().size() < numReplicas) { Thread.sleep(100); } for (int i = 0; i < numReplicas; ++i) { for (DataNode dn: UTIL.getDFSCluster().getDataNodes()) { while (!dn.isDatanodeFullyStarted()) { Thread.sleep(100); } } } } }