/** * Test that getInvalidateBlocks observes the maxlimit. */ public void testGetInvalidateBlocks() throws Exception { final int MAX_BLOCKS = 10; final int REMAINING_BLOCKS = 2; final int MAX_LIMIT = MAX_BLOCKS - REMAINING_BLOCKS; DatanodeDescriptor dd = new DatanodeDescriptor(); ArrayList<Block> blockList = new ArrayList<Block>(MAX_BLOCKS); for (int i=0; i<MAX_BLOCKS; i++) { blockList.add(new Block(i, 0, GenerationStamp.FIRST_VALID_STAMP)); } dd.addBlocksToBeInvalidated(blockList); BlockCommand bc = dd.getInvalidateBlocks(MAX_LIMIT); assertEquals(bc.getBlocks().length, MAX_LIMIT); bc = dd.getInvalidateBlocks(MAX_LIMIT); assertEquals(bc.getBlocks().length, REMAINING_BLOCKS); }
/** returns true if the node does not already exists and is added. * false if the node already exists.*/ boolean addNode(Block b, DatanodeDescriptor node, int replication) { // insert into the map if not there yet BlockInfo info = checkBlockInfo(b, replication); // add block to the data-node list and the node to the block info return node.addBlock(info); }
private void updateStats(DatanodeDescriptor node, boolean isAdded) { // // The statistics are protected by the heartbeat lock // For decommissioning/decommissioned nodes, only used capacity // is counted. // assert (Thread.holdsLock(heartbeats)); if (isAdded) { capacityUsed += node.getDfsUsed(); capacityNamespaceUsed += node.getNamespaceUsed(); totalLoad += node.getXceiverCount(); if (!(node.isDecommissionInProgress() || node.isDecommissioned())) { capacityTotal += node.getCapacity(); capacityRemaining += node.getRemaining(); } else { capacityTotal += node.getDfsUsed(); } } else { capacityUsed -= node.getDfsUsed(); capacityNamespaceUsed -= node.getNamespaceUsed(); totalLoad -= node.getXceiverCount(); if (!(node.isDecommissionInProgress() || node.isDecommissioned())) { capacityTotal -= node.getCapacity(); capacityRemaining -= node.getRemaining(); } else { capacityTotal -= node.getDfsUsed(); } } }
/** This method is defined for compatibility reason. */ private static DatanodeDescriptor[] readDatanodeDescriptorArray(DataInput in) throws IOException { DatanodeDescriptor[] locations = new DatanodeDescriptor[in.readInt()]; for (int i = 0; i < locations.length; i++) { locations[i] = new DatanodeDescriptor(); locations[i].readFieldsFromFSEditLog(in); } return locations; }
/** * Stop decommissioning the specified datanodes. */ public void stopDecommission (DatanodeDescriptor node) throws IOException { LOG.info("Stop Decommissioning node " + node.getName()); node.stopDecommission(); }
private void updateStats(DatanodeDescriptor node, boolean isAdded) { // // The statistics are protected by the heartbeat lock // assert(Thread.holdsLock(heartbeats)); if (isAdded) { capacityTotal += node.getCapacity(); capacityUsed += node.getDfsUsed(); capacityRemaining += node.getRemaining(); totalLoad += node.getXceiverCount(); } else { capacityTotal -= node.getCapacity(); capacityUsed -= node.getDfsUsed(); capacityRemaining -= node.getRemaining(); totalLoad -= node.getXceiverCount(); } } /**
Log logr = FSNamesystem.LOG; if (node.isDecommissionInProgress() || node.isDecommissioned()) { logr.debug("Node "+NodeBase.getPath(node)+ " is not chosen because the node is (being) decommissioned"); long remaining = node.getRemaining() - (node.getBlocksScheduled() * blockSize); avgLoad = (double)fs.getTotalLoad()/size; if (node.getXceiverCount() > (2.0 * avgLoad)) { logr.debug("Node "+NodeBase.getPath(node)+ " is not chosen because the node is too busy"); String rackname = node.getNetworkLocation(); int counter=1; for(Iterator<DatanodeDescriptor> iter = results.iterator();
+ "node from name: " + nodeN.getName()); + "node " + nodeS.getName() + " is replaced by " + nodeReg.getName() + " with the same storageID " + nodeS.updateRegInfo(nodeReg); nodeS.setHostName(hostName); heartbeats.add(nodeS); nodeS.updateHeartbeat(0L, 0L, 0L, 0); nodeS.isAlive = true; = new DatanodeDescriptor(nodeReg, NetworkTopology.DEFAULT_RACK, hostName); resolveNetworkLocation(nodeDescr); unprotectedAddDatanode(nodeDescr);
boolean isDead = isDatanodeDead(dn); mustList.remove(dn.getName()); mustList.remove(dn.getHost()); mustList.remove(dn.getHostName()); mustList.remove(dn.getHostName() + ":" + dn.getPort()); if (!isDead && listLiveNodes && this.inHostsList(dn, null)) { nodes.add(dn); for (Iterator<String> it = mustList.iterator(); it.hasNext();) { DatanodeDescriptor dn = new DatanodeDescriptor(new DatanodeID(it.next())); dn.setStartTime(0); dn.setLastUpdate(0); nodes.add(dn);
dd.addBlockToBeReplicated( new Block(i, 0, GenerationStamp.FIRST_VALID_STAMP), ONE_TARGET); nodeReg, dd.getCapacity(), dd.getDfsUsed(), dd.getRemaining(), 0, 0, 0); assertEquals(1, cmds.length); assertEquals(DatanodeProtocol.DNA_TRANSFER, cmds[0].getAction()); blockList.add(new Block(i, 0, GenerationStamp.FIRST_VALID_STAMP)); dd.addBlocksToBeInvalidated(blockList); nodeReg, dd.getCapacity(), dd.getDfsUsed(), dd.getRemaining(), 0, 0, 0); assertEquals(2, cmds.length); assertEquals(DatanodeProtocol.DNA_TRANSFER, cmds[0].getAction()); nodeReg, dd.getCapacity(), dd.getDfsUsed(), dd.getRemaining(), 0, 0, 0); assertEquals(2, cmds.length); assertEquals(DatanodeProtocol.DNA_TRANSFER, cmds[0].getAction()); nodeReg, dd.getCapacity(), dd.getDfsUsed(), dd.getRemaining(), 0, 0, 0); assertEquals(1, cmds.length); assertEquals(DatanodeProtocol.DNA_INVALIDATE, cmds[0].getAction()); nodeReg, dd.getCapacity(), dd.getDfsUsed(), dd.getRemaining(), 0, 0, 0); assertEquals(null, cmds);
if (nodeinfo != null && nodeinfo.isDisallowed()) { setDatanodeDead(nodeinfo); throw new DisallowedDatanodeException(nodeinfo); nodeinfo.updateHeartbeat(capacity, dfsUsed, remaining, namespaceUsed, xceiverCount); updateStats(nodeinfo, true); cmd = nodeinfo.getLeaseRecoveryCommand(Integer.MAX_VALUE); if (cmd != null) { return new DatanodeCommand[]{cmd}; cmd = nodeinfo.getReplicationCommand(maxReplicationStreams - xmitsInProgress); if (cmd != null) { cmd = nodeinfo.getInvalidateBlocks(blockInvalidateLimit); if (cmd != null) { cmds.add(cmd);
/** * Start decommissioning the specified datanode. */ private void startDecommission(DatanodeDescriptor node) throws IOException { if (!node.isDecommissionInProgress() && !node.isDecommissioned()) { LOG.info("Start Decommissioning node " + node.getName() + " with " + node.numBlocks() + " blocks."); synchronized (heartbeats) { updateStats(node, false); node.startDecommission(); updateStats(node, true); } if (((Monitor) dnthread.getRunnable()).startDecommision(node)) { node.setStartTime(now()); } } else if (node.isDecommissionInProgress()) { if (((Monitor) dnthread.getRunnable()).startDecommision(node)) { node.setStartTime(now()); } } }
mustList.remove(dn.getName()); mustList.remove(dn.getHost()); mustList.remove(dn.getHostName()); for (Iterator<String> it = mustList.keySet().iterator(); it.hasNext();) { DatanodeDescriptor dn = new DatanodeDescriptor(new DatanodeID(it.next())); dn.setLastUpdate(0); nodes.add(dn);
/** * Public method that serializes the information about a * Datanode to be stored in the fsImage. */ public void write(DataOutput out) throws IOException { new DatanodeID(node).write(out); out.writeLong(node.getCapacity()); out.writeLong(node.getRemaining()); out.writeLong(node.getLastUpdate()); out.writeInt(node.getXceiverCount()); }
/** * Start decommissioning the specified datanode. */ private void startDecommission (DatanodeDescriptor node) throws IOException { if (!node.isDecommissionInProgress() && !node.isDecommissioned()) { LOG.info("Start Decommissioning node " + node.getName()); node.startDecommission(); // // all the blocks that reside on this node have to be // replicated. Iterator<Block> decommissionBlocks = node.getBlockIterator(); while(decommissionBlocks.hasNext()) { Block block = decommissionBlocks.next(); updateNeededReplications(block, -1, 0); } } }
public void testBlocksCounter() throws Exception { DatanodeDescriptor dd = new DatanodeDescriptor(); assertEquals(0, dd.numBlocks()); BlockInfo blk = new BlockInfo(new Block(1L), 1); BlockInfo blk1 = new BlockInfo(new Block(2L), 2); // add first block assertTrue(dd.addBlock(blk)); assertEquals(1, dd.numBlocks()); // remove a non-existent block assertFalse(dd.removeBlock(blk1)); assertEquals(1, dd.numBlocks()); // add an existent block assertFalse(dd.addBlock(blk)); assertEquals(1, dd.numBlocks()); // add second block assertTrue(dd.addBlock(blk1)); assertEquals(2, dd.numBlocks()); // remove first block assertTrue(dd.removeBlock(blk)); assertEquals(1, dd.numBlocks()); // remove second block assertTrue(dd.removeBlock(blk1)); assertEquals(0, dd.numBlocks()); } }
nodeBeingCheck.getName()); fsnamesystem.readLock(); int numOfBlocks; try { if (!nodeBeingCheck.isDecommissionInProgress()) { return true; numOfBlocks = nodeBeingCheck.numBlocks(); } finally { fsnamesystem.readUnlock(); fsnamesystem.writeLock(); try { Iterator<Block> it = nodeBeingCheck.getBlockIterator(); fsnamesystem.readLock(); try { for( Iterator<Block> it = nodeBeingCheck.getBlockIterator(); it.hasNext(); ) { final Block block = fsnamesystem.isReplicationInProgress( try { handlePendingStopDecommission(); if (!nodeBeingCheck.isDecommissionInProgress()) { return true; nodeBeingCheck.setDecommissioned(); LOG.info("Decommission complete for node " + nodeBeingCheck.getName()); return true;
used = datanode.getDfsUsed(); remaining = datanode.getRemaining(); nonDFSUsed = datanode.getNonDfsUsed(); configCapacity = datanode.getCapacity(); percentUsed = datanode.getDfsUsedPercent(); percentRemaining = datanode.getRemainingPercent();
NameNode.stateChangeLog.info( "BLOCK* NameSystem.addStoredBlock: " + "Targets updated: block " + block + " on " + node.getName() + " is added as a target for block " + storedBlock + " with size " + block.getNumBytes()); boolean added = node.addBlock(storedBlock); LOG.warn("Mark new replica " + block + " from " + node.getName() + "as corrupt because its length " + block.getNumBytes() + " is not valid"); } else if (cursize != block.getNumBytes()) { String logMsg = "Inconsistent size for block " + block + " reported from " + node.getName() + " current size is " + cursize + " reported size is " + block.getNumBytes(); LOG.warn("Mark new replica " + block + " from " + node.getName() + "as corrupt because its length is shorter than existing ones"); markBlockAsCorrupt(block, node); for (; it != null && it.hasNext(); ) { DatanodeDescriptor dd = it.next(); if (!dd.equals(node)) { nodes[count++] = dd; LOG.warn("Mark existing replica " + block + " from " + node.getName() + " as corrupt because its length is shorter than the new one");
/** * Change, if appropriate, the admin state of a datanode to * decommission completed. Return true if decommission is complete. */ boolean checkDecommissionStateInternal(DatanodeDescriptor node) { // // Check to see if all blocks in this decommissioned // node has reached their target replication factor. // if (node.isDecommissionInProgress()) { if (!isReplicationInProgress(node)) { node.setDecommissioned(); LOG.info("Decommission complete for node " + node.getName()); } } if (node.isDecommissioned()) { return true; } return false; }