@Before public void setUp() throws Exception { Configuration c = TEST_UTIL.getConfiguration(); c.setBoolean("dfs.support.append", true); TEST_UTIL.startMiniCluster(1); table = TEST_UTIL.createMultiRegionTable(TABLE_NAME, FAMILY); TEST_UTIL.loadTable(table, FAMILY); // setup the hdfssnapshots client = new DFSClient(TEST_UTIL.getDFSCluster().getURI(), TEST_UTIL.getConfiguration()); String fullUrIPath = TEST_UTIL.getDefaultRootDirPath().toString(); String uriString = TEST_UTIL.getTestFileSystem().getUri().toString(); baseDir = StringUtils.removeStart(fullUrIPath, uriString); client.allowSnapshot(baseDir); }
if (this.fs instanceof DistributedFileSystem) { DistributedFileSystem dfs = (DistributedFileSystem) this.fs; HdfsFileStatus status = dfs.getClient().getFileInfo(path.toUri().getPath()); if (null != status) { if (unspecifiedStoragePolicyId < 0) { BlockStoragePolicy[] policies = dfs.getStoragePolicies(); for (BlockStoragePolicy policy : policies) { if (policy.getId() == storagePolicyId) {
@Override public List<HdfsFileStatusWithId> listLocatedHdfsStatus( FileSystem fs, Path p, PathFilter filter) throws IOException { DistributedFileSystem dfs = ensureDfs(fs); DFSClient dfsc = dfs.getClient(); final String src = p.toUri().getPath(); DirectoryListing current = dfsc.listPaths(src, org.apache.hadoop.hdfs.protocol.HdfsFileStatus.EMPTY_NAME, true); if (current == null) { // the directory does not exist throw new FileNotFoundException("File " + p + " does not exist."); } final URI fsUri = fs.getUri(); List<HdfsFileStatusWithId> result = new ArrayList<HdfsFileStatusWithId>( current.getPartialListing().length); while (current != null) { org.apache.hadoop.hdfs.protocol.HdfsFileStatus[] hfss = current.getPartialListing(); for (int i = 0; i < hfss.length; ++i) { HdfsLocatedFileStatus next = (HdfsLocatedFileStatus)(hfss[i]); if (filter != null) { Path filterPath = next.getFullPath(p).makeQualified(fsUri, null); if (!filter.accept(filterPath)) continue; } LocatedFileStatus lfs = next.makeQualifiedLocated(fsUri, p); result.add(new HdfsFileStatusWithIdImpl(lfs, next.getFileId())); } current = current.hasMore() ? dfsc.listPaths(src, current.getLastName(), true) : null; } return result; }
private void lostFoundInit(DFSClient dfs) { lfInited = true; try { String lfName = "/lost+found"; final HdfsFileStatus lfStatus = dfs.getFileInfo(lfName); if (lfStatus == null) { // not exists lfInitedOk = dfs.mkdirs(lfName, null, true); lostFound = lfName; } else if (!lfStatus.isDirectory()) { // exists but not a directory LOG.warn("Cannot use /lost+found : a regular file with this name exists."); lfInitedOk = false; } else { // exists and is a directory lostFound = lfName; lfInitedOk = true; } } catch (Exception e) { e.printStackTrace(); lfInitedOk = false; } if (lostFound == null) { LOG.warn("Cannot initialize /lost+found ."); lfInitedOk = false; internalError = true; } }
OutputStream out = dfsClient.createWrappedOutputStream(dfsClient.create( path, permission, flags, createParent, replication, blockSize, null, bufferSize, null), null); final URI uri = new URI(HDFS_URI_SCHEME, nnId, path, null, null); resp.headers().set(LOCATION, uri.toString()); resp.headers().set(CONTENT_LENGTH, 0); resp.headers().set(ACCESS_CONTROL_ALLOW_ORIGIN, "*");
String rootDir = new Path(FSUtils.getWALRootDir(conf) + "/" + HConstants.HREGION_LOGDIR_NAME + "/" + targetRs.getServerName().toString()).toUri().getPath(); h.put(p); DirectoryListing dl = dfs.getClient().listPaths(rootDir, HdfsFileStatus.EMPTY_NAME); HdfsFileStatus[] hfs = dl.getPartialListing(); LOG.info("Log file found: " + hf.getLocalName() + " in " + rootDir); String logFile = rootDir + "/" + hf.getLocalName(); FileStatus fsLog = rfs.getFileStatus(new Path(logFile));
public void initialize(URI uri, Configuration conf) throws IOException { setConf(conf); String host = uri.getHost(); if (host == null) { throw new IOException("Incomplete HDFS URI, no host: "+ uri); } InetSocketAddress namenode = NameNode.getAddress(uri.getAuthority()); this.dfs = new DFSClient(namenode, conf, statistics); this.uri = NameNode.getUri(namenode); this.workingDir = getHomeDirectory(); }
@Test public void testDFSCloseOrdering() throws Exception { DistributedFileSystem fs = new MyDistributedFileSystem(); Path path = new Path("/a"); fs.deleteOnExit(path); fs.close(); InOrder inOrder = inOrder(fs.dfs); inOrder.verify(fs.dfs).closeOutputStreams(eq(false)); inOrder.verify(fs.dfs).delete(eq(path.toString()), eq(true)); inOrder.verify(fs.dfs).close(); }
void verifyFile(final Path file, final Byte expectedPolicyId) throws Exception { final Path parent = file.getParent(); DirectoryListing children = dfs.getClient().listPaths( parent.toString(), HdfsFileStatus.EMPTY_NAME, true); for (HdfsFileStatus child : children.getPartialListing()) { if (child.getLocalName().equals(file.getName())) { verifyFile(parent, child, expectedPolicyId); return; } } Assert.fail("File " + file + " not found."); }
public HdfsEncryptionShim(URI uri, Configuration conf) throws IOException { DistributedFileSystem dfs = (DistributedFileSystem)FileSystem.get(uri, conf); this.conf = conf; this.keyProvider = dfs.getClient().getKeyProvider(); this.hdfsAdmin = new HdfsAdmin(uri, conf); }
@Test(timeout = 120000) public void testCreateEZPopulatesEDEKCache() throws Exception { final Path zonePath = new Path("/TestEncryptionZone"); fsWrapper.mkdir(zonePath, FsPermission.getDirDefault(), false); dfsAdmin.createEncryptionZone(zonePath, TEST_KEY); assertTrue(((KMSClientProvider)fs.getClient().getKeyProvider()). getEncKeyQueueSize(TEST_KEY) > 0); }
public ServerName[] getDataNodes() throws IOException { DistributedFileSystem fs = (DistributedFileSystem) FSUtils.getRootDir(getConf()) .getFileSystem(getConf()); DFSClient dfsClient = fs.getClient(); List<ServerName> hosts = new LinkedList<>(); for (DatanodeInfo dataNode: dfsClient.datanodeReport(HdfsConstants.DatanodeReportType.LIVE)) { hosts.add(ServerName.valueOf(dataNode.getHostName(), -1, -1)); } return hosts.toArray(new ServerName[hosts.size()]); } }
protected final LocatedBlocks ensureFileReplicasOnStorageType( Path path, StorageType storageType) throws IOException { // Ensure that returned block locations returned are correct! LOG.info("Ensure path: " + path + " is on StorageType: " + storageType); assertThat(fs.exists(path), is(true)); long fileLength = client.getFileInfo(path.toString()).getLen(); LocatedBlocks locatedBlocks = client.getLocatedBlocks(path.toString(), 0, fileLength); for (LocatedBlock locatedBlock : locatedBlocks.getLocatedBlocks()) { assertThat(locatedBlock.getStorageTypes()[0], is(storageType)); } return locatedBlocks; }
@Test(timeout=60000, expected = SnapshotAccessControlException.class) public void testCreateSymlink() throws Exception { @SuppressWarnings("deprecation") DFSClient dfsclient = new DFSClient(conf); dfsclient.createSymlink(sub2.toString(), "/TestSnapshot/sub1/.snapshot", false); } }
@Override public FileStatus getFileStatus(Path f) throws IOException, UnresolvedLinkException { HdfsFileStatus fi = dfs.getFileInfo(getUriPath(f)); if (fi != null) { return fi.makeQualified(getUri(), f); } else { throw new FileNotFoundException("File does not exist: " + f.toString()); } }
@Override public void close() throws IOException { try { dfs.closeOutputStreams(false); super.close(); } finally { dfs.close(); } }
/** {@inheritDoc} */ public void close() throws IOException { try { super.processDeleteOnExit(); dfs.close(); } finally { super.close(); } }
private FileStatus[] listStatusInternal(Path p) throws IOException { String src = getPathName(p); DirectoryListing thisListing = dfs.listPaths( src, HdfsFileStatus.EMPTY_NAME); FileStatus[] stats = new FileStatus[partialListing.length]; for (int i = 0; i < partialListing.length; i++) { stats[i] = partialListing[i].makeQualified(getUri(), p); statistics.incrementReadOps(1); return stats; listing.add(fileStatus.makeQualified(getUri(), p)); statistics.incrementLargeReadOps(1); thisListing = dfs.listPaths(src, thisListing.getLastName()); listing.add(fileStatus.makeQualified(getUri(), p)); statistics.incrementLargeReadOps(1); } while (thisListing.hasMore());