DFSClient client = dfs.getClient(); String clientName = client.getClientName(); ClientProtocol namenode = client.getNamenode(); int createMaxRetries = conf.getInt(ASYNC_DFS_OUTPUT_CREATE_MAX_RETRIES, DEFAULT_ASYNC_DFS_OUTPUT_CREATE_MAX_RETRIES);
/** * Get the internet address of the currently-active NN. This should rarely be * used, since callers of this method who connect directly to the NN using the * resulting InetSocketAddress will not be able to connect to the active NN if * a failover were to occur after this method has been called. * * @param fs the file system to get the active address of. * @return the internet address of the currently-active NN. * @throws IOException if an error occurs while resolving the active NN. */ public static InetSocketAddress getAddressOfActive(FileSystem fs) throws IOException { if (!(fs instanceof DistributedFileSystem)) { throw new IllegalArgumentException("FileSystem " + fs + " is not a DFS."); } // force client address resolution. fs.exists(new Path("/")); DistributedFileSystem dfs = (DistributedFileSystem) fs; DFSClient dfsClient = dfs.getClient(); return RPC.getServerAddress(dfsClient.getNamenode()); }
dfs.getClient().getNamenode().getReplicatedBlockStats(); System.out.println("Replicated Blocks:"); System.out.println("\tUnder replicated blocks: " + dfs.getClient().getNamenode().getECBlockGroupStats(); System.out.println("Erasure Coded Block Groups: "); System.out.println("\tLow redundancy block groups: " +
/** * Get the internet address of the currently-active NN. This should rarely be * used, since callers of this method who connect directly to the NN using the * resulting InetSocketAddress will not be able to connect to the active NN if * a failover were to occur after this method has been called. * * @param fs the file system to get the active address of. * @return the internet address of the currently-active NN. * @throws IOException if an error occurs while resolving the active NN. */ public static InetSocketAddress getAddressOfActive(FileSystem fs) throws IOException { if (!(fs instanceof DistributedFileSystem)) { throw new IllegalArgumentException("FileSystem " + fs + " is not a DFS."); } // force client address resolution. fs.exists(new Path("/")); DistributedFileSystem dfs = (DistributedFileSystem) fs; DFSClient dfsClient = dfs.getClient(); return RPC.getServerAddress(dfsClient.getNamenode()); }
/** * Get the internet address of the currently-active NN. This should rarely be * used, since callers of this method who connect directly to the NN using the * resulting InetSocketAddress will not be able to connect to the active NN if * a failover were to occur after this method has been called. * * @param fs the file system to get the active address of. * @return the internet address of the currently-active NN. * @throws IOException if an error occurs while resolving the active NN. */ public static InetSocketAddress getAddressOfActive(FileSystem fs) throws IOException { if (!(fs instanceof DistributedFileSystem)) { throw new IllegalArgumentException("FileSystem " + fs + " is not a DFS."); } // force client address resolution. fs.exists(new Path("/")); DistributedFileSystem dfs = (DistributedFileSystem) fs; DFSClient dfsClient = dfs.getClient(); return RPC.getServerAddress(dfsClient.getNamenode()); }
/** * Get the list of Blocks for a file. */ public List<LocatedBlock> getFileBlocks(Path filepath, int sizeKB) throws IOException { // Return the blocks we just wrote DFSClient dfsclient = getDFSClient(); return dfsclient.getNamenode().getBlockLocations( filepath.toString(), 0, sizeKB * 1024).getLocatedBlocks(); }
@Test /** Abandon a block while creating a file */ public void testAbandonBlock() throws IOException { String src = FILE_NAME_PREFIX + "foo"; // Start writing a file but do not close it FSDataOutputStream fout = fs.create(new Path(src), true, 4096, (short)1, 512L); for (int i = 0; i < 1024; i++) { fout.write(123); } fout.hflush(); // Now abandon the last block DFSClient dfsclient = ((DistributedFileSystem)fs).getClient(); LocatedBlocks blocks = dfsclient.getNamenode().getBlockLocations(src, 0, 1); LocatedBlock b = blocks.getLastLocatedBlock(); dfsclient.getNamenode().abandonBlock(b.getBlock(), src, dfsclient.clientName); // And close the file fout.close(); }
private void checkBlocks(DatanodeInfo[] includeNodes, String fileName, long fileLen, short replFactor, DFSClient client) throws IOException { Boolean notDone; do { try { Thread.sleep(100); } catch(InterruptedException e) { } List<LocatedBlock> blocks = client.getNamenode(). getBlockLocations(fileName, 0, fileLen).getLocatedBlocks(); assertEquals(1, blocks.size()); DatanodeInfo[] nodes = blocks.get(0).getLocations(); notDone = (nodes.length != replFactor); if (notDone) { LOG.info("Expected replication factor is " + replFactor + " but the real replication factor is " + nodes.length ); } else { List<DatanodeInfo> nodeLocations = Arrays.asList(nodes); for (DatanodeInfo node : includeNodes) { if (!nodeLocations.contains(node) ) { notDone=true; LOG.info("Block is not located at " + node.getName() ); break; } } } } while(notDone); }
public static boolean allBlockReplicasCorrupt(MiniDFSCluster cluster, Path file, int blockNo) throws IOException { DFSClient client = new DFSClient(new InetSocketAddress("localhost", cluster.getNameNodePort()), cluster.getConfiguration()); LocatedBlocks blocks; try { blocks = client.getNamenode().getBlockLocations( file.toString(), 0, Long.MAX_VALUE); } finally { client.close(); } return blocks.get(blockNo).isCorrupt(); }
/** * Get the list of Blocks for a file. */ public List<LocatedBlock> getFileBlocks(Path filepath, int sizeKB) throws IOException { // Return the blocks we just wrote DFSClient dfsclient = getDFSClient(); return dfsclient.getNamenode().getBlockLocations( filepath.toString(), 0, sizeKB * 1024).getLocatedBlocks(); }
@Override void invoke() throws Exception { lbk = client.getNamenode().append(fileName, client.getClientName(), new EnumSetWritable<>(EnumSet.of(CreateFlag.APPEND))); }
/** * Verify the first block of the file is corrupted (for all its replica). */ private void verifyFirstBlockCorrupted(Path filePath, boolean isCorrupted) throws AccessControlException, FileNotFoundException, UnresolvedLinkException, IOException { final LocatedBlocks locatedBlocks = dfs.dfs.getNamenode() .getBlockLocations(filePath.toUri().getPath(), 0, Long.MAX_VALUE); final LocatedBlock firstLocatedBlock = locatedBlocks.get(0); Assert.assertEquals(isCorrupted, firstLocatedBlock.isCorrupt()); }
/** * Verify the number of corrupted block replicas by fetching the block * location from name node. */ private void verifyCorruptedBlockCount(Path filePath, int expectedReplicas) throws AccessControlException, FileNotFoundException, UnresolvedLinkException, IOException { final LocatedBlocks lBlocks = dfs.dfs.getNamenode().getBlockLocations( filePath.toUri().getPath(), 0, Long.MAX_VALUE); // we expect only the first block of the file is used for this test LocatedBlock firstLocatedBlock = lBlocks.get(0); Assert.assertEquals(expectedReplicas, firstLocatedBlock.getLocations().length); }
@Override void invoke() throws Exception { EnumSet<CreateFlag> createFlag = EnumSet.of(CreateFlag.CREATE); this.status = client.getNamenode().create(fileName, FsPermission.getFileDefault(), client.getClientName(), new EnumSetWritable<CreateFlag>(createFlag), false, DataNodes, BlockSize, new CryptoProtocolVersion[] {CryptoProtocolVersion.ENCRYPTION_ZONES}); }
/** * Test DFS Raid */ public void testBlockMissingException() throws Exception { LOG.info("Test testBlockMissingException started."); long blockSize = 1024L; int numBlocks = 4; conf = new HdfsConfiguration(); try { dfs = new MiniDFSCluster.Builder(conf).numDataNodes(NUM_DATANODES).build(); dfs.waitActive(); fileSys = (DistributedFileSystem)dfs.getFileSystem(); Path file1 = new Path("/user/dhruba/raidtest/file1"); createOldFile(fileSys, file1, 1, numBlocks, blockSize); // extract block locations from File system. Wait till file is closed. LocatedBlocks locations = null; locations = fileSys.dfs.getNamenode().getBlockLocations(file1.toString(), 0, numBlocks * blockSize); // remove block of file LOG.info("Remove first block of file"); corruptBlock(file1, locations.get(0).getBlock()); // validate that the system throws BlockMissingException validateFile(fileSys, file1); } finally { if (fileSys != null) fileSys.close(); if (dfs != null) dfs.shutdown(); } LOG.info("Test testBlockMissingException completed."); }
public static boolean allBlockReplicasCorrupt(MiniDFSCluster cluster, Path file, int blockNo) throws IOException { DFSClient client = new DFSClient(new InetSocketAddress("localhost", cluster.getNameNodePort()), cluster.getConfiguration(0)); LocatedBlocks blocks; try { blocks = client.getNamenode().getBlockLocations( file.toString(), 0, Long.MAX_VALUE); } finally { client.close(); } return blocks.get(blockNo).isCorrupt(); }
public void removeBlocks(MiniDFSCluster cluster) throws AccessControlException, FileNotFoundException, UnresolvedLinkException, IOException { for (int corruptIdx : blocksToCorrupt) { // Corrupt a block by deleting it ExtendedBlock block = dfsClient.getNamenode().getBlockLocations( name, blockSize * corruptIdx, Long.MAX_VALUE).get(0).getBlock(); for (int i = 0; i < numDataNodes; i++) { File blockFile = cluster.getBlockFile(i, block); if(blockFile != null && blockFile.exists()) { assertTrue(blockFile.delete()); } } } }
@Override void invoke() throws Exception { DatanodeInfo[] newNodes = new DatanodeInfo[2]; newNodes[0] = nodes[0]; newNodes[1] = nodes[1]; String[] storageIDs = {"s0", "s1"}; client.getNamenode().updatePipeline(client.getClientName(), oldBlock, newBlock, newNodes, storageIDs); // close can fail if the out.close() commit the block after block received // notifications from Datanode. // Since datanodes and output stream have still old genstamps, these // blocks will be marked as corrupt after HDFS-5723 if RECEIVED // notifications reaches namenode first and close() will fail. DFSTestUtil.abortStream((DFSOutputStream) out.getWrappedStream()); }
@Override void prepare() throws Exception { final Path filePath = new Path(file); DFSTestUtil.createFile(dfs, filePath, BlockSize, DataNodes, 0); // append to the file and leave the last block under construction out = this.client.append(file, BlockSize, EnumSet.of(CreateFlag.APPEND), null, null); byte[] appendContent = new byte[100]; new Random().nextBytes(appendContent); out.write(appendContent); ((HdfsDataOutputStream) out).hsync(EnumSet.of(SyncFlag.UPDATE_LENGTH)); LocatedBlocks blks = dfs.getClient() .getLocatedBlocks(file, BlockSize + 1); assertEquals(1, blks.getLocatedBlocks().size()); nodes = blks.get(0).getLocations(); oldBlock = blks.get(0).getBlock(); LocatedBlock newLbk = client.getNamenode().updateBlockForPipeline( oldBlock, client.getClientName()); newBlock = new ExtendedBlock(oldBlock.getBlockPoolId(), oldBlock.getBlockId(), oldBlock.getNumBytes(), newLbk.getBlock().getGenerationStamp()); }
/** * Create a file with one block and corrupt some/all of the block replicas. */ private void createAFileWithCorruptedBlockReplicas(Path filePath, short repl, int corruptBlockCount) throws IOException, AccessControlException, FileNotFoundException, UnresolvedLinkException, InterruptedException, TimeoutException { DFSTestUtil.createFile(dfs, filePath, BLOCK_SIZE, repl, 0); DFSTestUtil.waitReplication(dfs, filePath, repl); // Locate the file blocks by asking name node final LocatedBlocks locatedblocks = dfs.dfs.getNamenode() .getBlockLocations(filePath.toString(), 0L, BLOCK_SIZE); Assert.assertEquals(repl, locatedblocks.get(0).getLocations().length); // The file only has one block LocatedBlock lblock = locatedblocks.get(0); DatanodeInfo[] datanodeinfos = lblock.getLocations(); ExtendedBlock block = lblock.getBlock(); // corrupt some /all of the block replicas for (int i = 0; i < corruptBlockCount; i++) { DatanodeInfo dninfo = datanodeinfos[i]; final DataNode dn = cluster.getDataNode(dninfo.getIpcPort()); corruptBlock(block, dn); LOG.debug("Corrupted block " + block.getBlockName() + " on data node " + dninfo); } }