@Before public void setUp() throws Exception { Configuration c = TEST_UTIL.getConfiguration(); c.setBoolean("dfs.support.append", true); TEST_UTIL.startMiniCluster(1); table = TEST_UTIL.createMultiRegionTable(TABLE_NAME, FAMILY); TEST_UTIL.loadTable(table, FAMILY); // setup the hdfssnapshots client = new DFSClient(TEST_UTIL.getDFSCluster().getURI(), TEST_UTIL.getConfiguration()); String fullUrIPath = TEST_UTIL.getDefaultRootDirPath().toString(); String uriString = TEST_UTIL.getTestFileSystem().getUri().toString(); baseDir = StringUtils.removeStart(fullUrIPath, uriString); client.allowSnapshot(baseDir); }
private static DFSClient newDfsClient (String nnId, Configuration conf) throws IOException { URI uri = URI.create(HDFS_URI_SCHEME + "://" + nnId); return new DFSClient(uri, conf); }
private void copyBlocksToLostFound(String parent, HdfsFileStatus file, LocatedBlocks blocks) throws IOException { final DFSClient dfs = new DFSClient(DFSUtilClient.getNNAddress(conf), conf); final String fullName = file.getFullName(parent); OutputStream fos = null;
private static DFSClient newDfsClient (String nnId, Configuration conf) throws IOException { URI uri = URI.create(HDFS_URI_SCHEME + "://" + nnId); return new DFSClient(uri, conf); }
private static DFSClient newDfsClient (String nnId, Configuration conf) throws IOException { URI uri = URI.create(HDFS_URI_SCHEME + "://" + nnId); return new DFSClient(uri, conf); }
@Override public DFSClient run() throws Exception { return new DFSClient(null, mcp, conf, null); } });
@Override public DFSClient run() throws IOException { return new DFSClient(NetUtils.createSocketAddr(addr), conf); } });
/** Get DFSClient for a namenode corresponding to the BPID from a datanode */ public static DFSClient getDFSClient(final HttpServletRequest request, final Configuration conf) throws IOException, InterruptedException { final String nnAddr = request.getParameter(JspHelper.NAMENODE_ADDRESS); return new DFSClient(DFSUtil.getSocketAddress(nnAddr), conf); }
@Override public DFSClient run() throws IOException { return new DFSClient(NetUtils.createSocketAddr(addr), conf); } });
/** * Get the DFSClient. */ public DFSClient getDFSClient() throws IOException { InetSocketAddress nnAddr = new InetSocketAddress("localhost", cluster.getNameNodePort()); return new DFSClient(nnAddr, conf); }
static public DFSClient createDFSClientAs(UserGroupInformation ugi, final Configuration conf) throws Exception { return ugi.doAs(new PrivilegedExceptionAction<DFSClient>() { @Override public DFSClient run() throws Exception { return new DFSClient(null, mcp, conf, null); } }); } }
public void initialize(URI uri, Configuration conf) throws IOException { setConf(conf); String host = uri.getHost(); if (host == null) { throw new IOException("Incomplete HDFS URI, no host: "+ uri); } InetSocketAddress namenode = NameNode.getAddress(uri.getAuthority()); this.dfs = new DFSClient(namenode, conf, statistics); this.uri = NameNode.getUri(namenode); this.workingDir = getHomeDirectory(); }
/** * Get the DFSClient. */ public DFSClient getDFSClient() throws IOException { InetSocketAddress nnAddr = new InetSocketAddress("localhost", cluster.getNameNodePort()); return new DFSClient(nnAddr, conf); }
public void initialize(URI uri, Configuration conf) throws IOException { super.initialize(uri, conf); setConf(conf); String host = uri.getHost(); if (host == null) { throw new IOException("Incomplete HDFS URI, no host: "+ uri); } InetSocketAddress namenode = NameNode.getAddress(uri.getAuthority()); this.dfs = new DFSClient(namenode, conf, statistics); this.uri = NameNode.getUri(namenode); this.workingDir = getHomeDirectory(); pathValidator = new PathValidator(conf); }
@Test(timeout=60000, expected = SnapshotAccessControlException.class) public void testCreateSymlink() throws Exception { @SuppressWarnings("deprecation") DFSClient dfsclient = new DFSClient(conf); dfsclient.createSymlink(sub2.toString(), "/TestSnapshot/sub1/.snapshot", false); } }
@Test(timeout=60000, expected = SnapshotAccessControlException.class) public void testCreate() throws Exception { @SuppressWarnings("deprecation") DFSClient dfsclient = new DFSClient(conf); dfsclient.create(objInSnapshot.toString(), true); }
@BeforeClass public static void testSetUp() throws Exception { conf = new HdfsConfiguration(); conf.setInt(DFSConfigKeys.DFS_LIST_LIMIT, 2); cluster = new MiniDFSCluster.Builder(conf).build(); fs = cluster.getFileSystem(); fc = FileContext.getFileContext(cluster.getURI(), conf); hftpfs = cluster.getHftpFileSystem(); dfsClient = new DFSClient(NameNode.getAddress(conf), conf); file1 = new Path("filestatus.dat"); writeFile(fs, file1, 1, fileSize, blockSize); }
public static boolean allBlockReplicasCorrupt(MiniDFSCluster cluster, Path file, int blockNo) throws IOException { DFSClient client = new DFSClient(new InetSocketAddress("localhost", cluster.getNameNodePort()), cluster.getConfiguration(0)); LocatedBlocks blocks; try { blocks = client.getNamenode().getBlockLocations( file.toString(), 0, Long.MAX_VALUE); } finally { client.close(); } return blocks.get(blockNo).isCorrupt(); }
@Before public void startUpCluster() throws IOException { conf = new Configuration(); cluster = new MiniDFSCluster.Builder(conf).numDataNodes(NUM_DATANODES).build(); fs = cluster.getFileSystem(); client = new DFSClient(new InetSocketAddress("localhost", cluster.getNameNodePort()), cluster.getConfiguration(0)); dn0 = cluster.getDataNodes().get(0); poolId = cluster.getNamesystem().getBlockPoolId(); dn0Reg = dn0.getDNRegistrationForBP(poolId); }
@BeforeClass public static void testSetUp() throws Exception { conf = new HdfsConfiguration(); conf.setInt(DFSConfigKeys.DFS_LIST_LIMIT, 2); cluster = new MiniDFSCluster.Builder(conf).build(); fs = cluster.getFileSystem(); fc = FileContext.getFileContext(cluster.getURI(0), conf); hftpfs = cluster.getHftpFileSystem(0); dfsClient = new DFSClient(NameNode.getAddress(conf), conf); file1 = new Path("filestatus.dat"); writeFile(fs, file1, 1, fileSize, blockSize); }