/** * Get all valid locations of the block & add the block to results * return the length of the added block; 0 if the block is not added */ private long addBlock(Block block, List<BlockWithLocations> results) { ArrayList<String> machineSet = new ArrayList<String>(blocksMap.numNodes(block)); for(Iterator<DatanodeDescriptor> it = blocksMap.nodeIterator(block); it.hasNext();) { String storageID = it.next().getStorageID(); // filter invalidate replicas Collection<Block> blocks = recentInvalidateSets.get(storageID); if(blocks==null || !blocks.contains(block)) { machineSet.add(storageID); } } if(machineSet.size() == 0) { return 0; } else { results.add(new BlockWithLocations(block, machineSet.toArray(new String[machineSet.size()]))); return block.getNumBytes(); } }
/** * Add block b belonging to the specified file inode to the map. */ BlockInfo addINode(Block b, INodeFile iNode) { BlockInfo info = checkBlockInfo(b, iNode.getReplication()); info.inode = iNode; return info; }
private void logBlockReplicationInfo(Block block, DatanodeDescriptor srcNode, NumberReplicas num) { int curReplicas = num.liveReplicas(); int curExpectedReplicas = getReplication(block); INode fileINode = blocksMap.getINode(block); Iterator<DatanodeDescriptor> nodeIter = blocksMap.nodeIterator(block); StringBuffer nodeList = new StringBuffer(); while (nodeIter.hasNext()) { DatanodeDescriptor node = nodeIter.next(); nodeList.append(node.name); nodeList.append(" "); } FSNamesystem.LOG.info("Block: " + block + ", Expected Replicas: " + curExpectedReplicas + ", live replicas: " + curReplicas + ", corrupt replicas: " + num.corruptReplicas() + ", decommissioned replicas: " + num.decommissionedReplicas() + ", excess replicas: " + num.excessReplicas() + ", Is Open File: " + fileINode.isUnderConstruction() + ", Datanodes having this block: " + nodeList + ", Current Datanode: " + srcNode.name + ", Is current datanode decommissioning: " + srcNode.isDecommissionInProgress()); }
@Override public LocatedBlockWithFileName getBlockInfo(long blockId) throws IOException { Block block = new Block(blockId); BlockInfo blockInfo = namesystem.blocksMap.getBlockInfo(block); if (null == blockInfo) { return null; } INodeFile inode = blockInfo.getINode(); if (null == inode) { return null; } String fileName = inode.getFullPathName(); // get the location info List<DatanodeInfo> diList = new ArrayList<DatanodeInfo>(); for (Iterator<DatanodeDescriptor> it = namesystem.blocksMap.nodeIterator(block); it.hasNext();) { diList.add(it.next()); } return new LocatedBlockWithFileName(block, diList.toArray(new DatanodeInfo[] {}), fileName); }
if (blocks != null && blocks.length > 0) { Block last = blocks[blocks.length-1]; BlockInfo storedBlock = blocksMap.getStoredBlock(last); if (file.getPreferredBlockSize() > storedBlock.getNumBytes()) { long fileLength = file.computeContentSummary().getLength(); DatanodeDescriptor[] targets = new DatanodeDescriptor[blocksMap.numNodes(last)]; Iterator<DatanodeDescriptor> it = blocksMap.nodeIterator(last); for (int i = 0; it != null && it.hasNext(); i++) { targets[i] = it.next();
/** * Return the number of nodes that are live */ int countLiveNodes(Block b) { return countLiveNodes(b, blocksMap.nodeIterator(b)); }
+ ", deleteBlock=" + deleteblock + ")"); final BlockInfo oldblockinfo = blocksMap.getStoredBlock(lastblock); if (oldblockinfo == null) { throw new IOException("Block (=" + lastblock + ") not found"); blocksMap.removeBlock(oldblockinfo); final BlockInfo newblockinfo = blocksMap.addINode(lastblock, pendingFile);
boolean initialBlockReport) { assert (hasWriteLock()); BlockInfo storedBlock = blocksMap.getStoredBlock(block); if (storedBlock == null) { storedBlock = blocksMap.getStoredBlockWithoutMatchingGS(block); int numNodes = blocksMap.numNodes(block); int count = 0; DatanodeDescriptor nodes[] = new DatanodeDescriptor[numNodes]; Iterator<DatanodeDescriptor> it = blocksMap.nodeIterator(block); for (; it != null && it.hasNext(); ) { DatanodeDescriptor dd = it.next();
if (setStoredBlock) { when(b1.getINode()).thenReturn(iNFmock); fsn.blockManager.blocksMap.addINode(b1, iNFmock);
public void testSetrepIncWithUnderReplicatedBlocks() throws Exception { Configuration conf = new HdfsConfiguration(); final short REPLICATION_FACTOR = 2; final String FILE_NAME = "/testFile"; final Path FILE_PATH = new Path(FILE_NAME); MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(REPLICATION_FACTOR + 1).build(); try { // create a file with one block with a replication factor of 2 final FileSystem fs = cluster.getFileSystem(); DFSTestUtil.createFile(fs, FILE_PATH, 1L, REPLICATION_FACTOR, 1L); DFSTestUtil.waitReplication(fs, FILE_PATH, REPLICATION_FACTOR); // remove one replica from the blocksMap so block becomes under-replicated // but the block does not get put into the under-replicated blocks queue final FSNamesystem namesystem = cluster.getNamesystem(); Block b = DFSTestUtil.getFirstBlock(fs, FILE_PATH); DatanodeDescriptor dn = namesystem.blockManager.blocksMap.nodeIterator(b).next(); namesystem.blockManager.addToInvalidates(b, dn); namesystem.blockManager.blocksMap.removeNode(b, dn); // increment this file's replication factor FsShell shell = new FsShell(conf); assertEquals(0, shell.run(new String[]{ "-setrep", "-w", Integer.toString(1+REPLICATION_FACTOR), FILE_NAME})); } finally { cluster.shutdown(); } }
/** * Add a block to the file. Returns a reference to the added block. */ Block addBlock(String path, INode[] inodes, Block block) throws IOException { waitForReady(); synchronized (rootDir) { INodeFile fileNode = (INodeFile) inodes[inodes.length-1]; // check quota limits and updated space consumed updateCount(inodes, inodes.length-1, 0, fileNode.getPreferredBlockSize()*fileNode.getReplication()); // associate the new list of blocks with this file namesystem.blocksMap.addINode(block, fileNode); BlockInfo blockInfo = namesystem.blocksMap.getStoredBlock(block); fileNode.addBlock(blockInfo); NameNode.stateChangeLog.debug("DIR* FSDirectory.addFile: " + path + " with " + block + " block is added to the in-memory " + "file system"); } return block; }
NameNode.stateChangeLog.debug("BLOCK* NameSystem.removeStoredBlock: " +block + " from "+node.getName()); if (!blocksMap.removeNode(block, node)) { NameNode.stateChangeLog.debug("BLOCK* NameSystem.removeStoredBlock: " +block+" has already been removed from node "+node); INode fileINode = blocksMap.getINode(block); if (fileINode != null) { decrementSafeBlockCount(block);
/** * Returns whether the given block is one pointed-to by a file. */ private boolean isValidBlock(Block b) { return (blocksMap.getINode(b) != null); }
+ block + " from " + node.getName()); if (!blocksMap.removeNode(block, node)) { if (NameNode.stateChangeLog.isDebugEnabled()) { NameNode.stateChangeLog.debug("BLOCK* NameSystem.removeStoredBlock: " BlockInfo storedBlock = blocksMap.getStoredBlock(block); INodeFile fileINode = storedBlock == null ? null : storedBlock.getINode(); if (fileINode != null &&
long nrInvalid = 0, nrOverReplicated = 0, nrUnderReplicated = 0; neededReplications.clear(); for(BlocksMap.BlockInfo block : blocksMap.getBlocks()) { INodeFile fileINode = block.getINode(); if(fileINode == null) { LOG.info("Total number of blocks = " + blocksMap.size()); LOG.info("Number of invalid blocks = " + nrInvalid); LOG.info("Number of under-replicated blocks = " + nrUnderReplicated);
/** * Add block b belonging to the specified file inode to the map, this * overwrites the map with the new block information. */ BlockInfo updateINode(BlockInfo oldBlock, Block newBlock, INodeFile iNode) throws IOException { // If the old block is not same as the new block, probably the GS was // bumped up, hence remove the old block and replace it with the new one. if (oldBlock != null && !oldBlock.equals(newBlock)) { if (oldBlock.getBlockId() != newBlock.getBlockId()) { throw new IOException("block ids don't match : " + oldBlock + ", " + newBlock); } removeBlock(oldBlock); } BlockInfo info = checkBlockInfo(newBlock, iNode.getReplication()); info.set(newBlock.getBlockId(), newBlock.getNumBytes(), newBlock.getGenerationStamp()); info.inode = iNode; return info; }
/** * Get the total number of blocks in the system. */ public long getBlocksTotal() { return blocksMap.size(); }
/** * Verifies that the block is associated with a file that has a lease. * Increments, logs and then returns the stamp */ synchronized long nextGenerationStampForBlock(Block block) throws IOException { BlockInfo storedBlock = blocksMap.getStoredBlock(block); if (storedBlock == null) { String msg = block + " is already commited, storedBlock == null."; LOG.info(msg); throw new IOException(msg); } INodeFile fileINode = storedBlock.getINode(); if (!fileINode.isUnderConstruction()) { String msg = block + " is already commited, !fileINode.isUnderConstruction()."; LOG.info(msg); throw new IOException(msg); } if (!((INodeFileUnderConstruction)fileINode).setLastRecoveryTime(now())) { String msg = block + " is beening recovered, ignoring this request."; LOG.info(msg); throw new IOException(msg); } return nextGenerationStamp(); }
blockInfo[i] = getFSNamesystem().blocksMap.updateINode(oldblock, blocks[i], file); " abandoned"); getFSNamesystem().blocksMap.removeBlock(oldblocks[oldblocks.length - 1]);