public static boolean isDefaultFs(DistributedFileSystem fs) { URI uri = fs.getUri(); String scheme = uri.getScheme(); if (scheme == null) return true; // Assume that relative URI resolves to default FS. URI defaultUri = FileSystem.getDefaultUri(fs.getConf()); if (!defaultUri.getScheme().equalsIgnoreCase(scheme)) return false; // Mismatch. String defaultAuthority = defaultUri.getAuthority(), authority = uri.getAuthority(); if (authority == null) return true; // Schemes match, no authority - assume default. if (defaultAuthority == null) return false; // TODO: What does this even mean? if (!defaultUri.getHost().equalsIgnoreCase(uri.getHost())) return false; // Mismatch. int defaultPort = defaultUri.getPort(), port = uri.getPort(); if (port == -1) return true; // No port, assume default. // Note - this makes assumptions that are DFS-specific; DFS::getDefaultPort is not visible. return (defaultPort == -1) ? (port == NameNode.DEFAULT_PORT) : (port == defaultPort); } }
/** * Checks to see if the specified file system is available * * @param fs filesystem * @throws IOException e */ public static void checkFileSystemAvailable(final FileSystem fs) throws IOException { if (!(fs instanceof DistributedFileSystem)) { return; } IOException exception = null; DistributedFileSystem dfs = (DistributedFileSystem) fs; try { if (dfs.exists(new Path("/"))) { return; } } catch (IOException e) { exception = e instanceof RemoteException ? ((RemoteException)e).unwrapRemoteException() : e; } try { fs.close(); } catch (Exception e) { LOG.error("file system close failed: ", e); } IOException io = new IOException("File system is not available"); io.initCause(exception); throw io; }
if (this.fs instanceof DistributedFileSystem) { DistributedFileSystem dfs = (DistributedFileSystem) this.fs; HdfsFileStatus status = dfs.getClient().getFileInfo(path.toUri().getPath()); if (null != status) { if (unspecifiedStoragePolicyId < 0) { BlockStoragePolicy[] policies = dfs.getStoragePolicies(); for (BlockStoragePolicy policy : policies) { if (policy.getId() == storagePolicyId) {
Configuration conf) { Set<InetSocketAddress> addresses = new HashSet<>(); String serviceName = fs.getCanonicalServiceName(); URI uri = fs.getUri(); int port = uri.getPort(); if (port < 0) { int idx = serviceName.indexOf(':'); port = Integer.parseInt(serviceName.substring(idx+1)); InetSocketAddress addr = new InetSocketAddress(uri.getHost(), port); addresses.add(addr);
dfs.recoverLease(path); sw.start(); boolean isClosed = dfs.isFileClosed(path); while (!isClosed && deadline.hasTimeLeft()) { try { throw new IOException("Recovering the lease failed: ", e1); isClosed = dfs.isFileClosed(path);
String[] racks = null; String[] hosts = null; miniMR = new MiniMRCluster(numTaskTrackers, miniDFS.getFileSystem().getUri().toString(), numTaskTrackerDirectories, racks, hosts, new JobConf(conf)); JobConf jobConf = miniMR.createJobConf(new JobConf(conf)); System.out.println("-------" + jobConf.get("fs.defaultFS")); System.out.println("-------" + miniDFS.getFileSystem().getUri().toString()); System.setProperty("mapred.job.tracker", jobConf.get("mapred.job.tracker")); } catch (IOException e) {
private static DistCpOptions getOptions(int nMaps) throws Exception { Path sourcePath = new Path(cluster.getFileSystem().getUri().toString() + "/tmp/source"); Path targetPath = new Path(cluster.getFileSystem().getUri().toString() + "/tmp/target"); List<Path> sourceList = new ArrayList<Path>(); sourceList.add(sourcePath); final DistCpOptions distCpOptions = new DistCpOptions(sourceList, targetPath); distCpOptions.setMaxMaps(nMaps); return distCpOptions; }
Configuration sconf = new Configuration(); MiniDFSCluster cluster = new MiniDFSCluster(sconf, 1, true, null); final int nameNodePort = cluster.getNameNodePort(); FileSystem fs = cluster.getFileSystem(); assertTrue(fs instanceof DistributedFileSystem); DistributedFileSystem directDfs = (DistributedFileSystem) fs; "org.apache.hadoop.ipc.DummySocketFactory"); fs = FileSystem.get(cconf); assertTrue(fs instanceof DistributedFileSystem); DistributedFileSystem dfs = (DistributedFileSystem) fs; Path filePath = new Path("/dir"); assertFalse(directDfs.exists(filePath)); assertFalse(dfs.exists(filePath)); directDfs.mkdirs(filePath); assertTrue(directDfs.exists(filePath)); assertTrue(dfs.exists(filePath)); dfs.close(); directDfs.close();
@Before public void setup() throws Exception { Configuration hdfsConf = new HdfsConfiguration(); hdfsConf.setLong(DFSConfigKeys.DFS_NAMENODE_MIN_BLOCK_SIZE_KEY, 0); String namenodeDir = new File(MiniDFSCluster.getBaseDirectory(), "name"). getAbsolutePath(); hdfsConf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, namenodeDir); hdfsConf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY, namenodeDir); hdfsConf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCKSIZE); cluster = new MiniDFSCluster.Builder(hdfsConf).numDataNodes(15).build(); fs = cluster.getFileSystem(); fs.enableErasureCodingPolicy(ecPolicy.getName()); fs.setErasureCodingPolicy(new Path("/"), ecPolicy.getName()); cluster.waitActive(); conf = new Configuration(); submitDir = new Path("/"); testFile = new Path("/testfile"); DFSTestUtil.writeFile(fs, testFile, StripedFileTestUtil.generateBytes(BLOCKSIZE)); conf.set(FileInputFormat.INPUT_DIR, fs.getUri().toString() + testFile.toString()); }
DistributedFileSystem dfs = new DistributedFileSystem(); dfs.initialize(new URI("URI to HDFS"), new Configuration()); Path path = new Path("/user/hadoop-user/bar.txt"); if (!dfs.exists(path)) dfs.createNewFile(path); FSDataOutputStream dos = dfs.create(path);
Configuration conf = new Configuration(); dfs.waitActive(); fileSys = dfs.getFileSystem(); if (!fileSys.mkdirs(inDir)) { throw new IOException("Mkdirs failed to create " + inDir.toString()); UtilsForTests.writeFile(dfs.getNameNode(), conf, new Path(inDir + "/file"), (short)1); namenode = (dfs.getFileSystem()).getUri().getHost() + ":" + (dfs.getFileSystem()).getUri().getPort(); mr = new MiniMRCluster(taskTrackers, namenode, 1, new String[] {rack2}, new String[] {"host2.com"}, jc); mr.shutdown(); } finally { if (null != fileSys) { fileSys.delete(inDir, true); fileSys.delete(outputPath, true);
@Test public void testRun() throws Exception { final URI uri = cluster.getFileSystem().getUri(); final String pathString = uri.toString(); Path fileSystemPath = new Path(pathString); Path source = new Path(fileSystemPath.toString() + "/tmp/source"); Path target = new Path(fileSystemPath.toString() + "/tmp/target"); Path listingPath = new Path(fileSystemPath.toString() + "/tmp/META/fileList.seq"); DistCpOptions options = new DistCpOptions(Arrays.asList(source), target); options.setTargetPathExists(false); new GlobbedCopyListing(new Configuration(), CREDENTIALS).buildListing(listingPath, options); verifyContents(listingPath); }
/** Permit paths which explicitly specify the default port. */ protected void checkPath(Path path) { URI thisUri = this.getUri(); URI thatUri = path.toUri(); String thatAuthority = thatUri.getAuthority(); if (thatUri.getScheme() != null && thatUri.getScheme().equalsIgnoreCase(thisUri.getScheme()) && thatUri.getPort() == NameNode.DEFAULT_PORT && thisUri.getPort() == -1 && thatAuthority.substring(0,thatAuthority.indexOf(":")) .equalsIgnoreCase(thisUri.getAuthority())) return; super.checkPath(path); }
public void testPipes() throws IOException { if (System.getProperty("compile.c++") == null) { LOG.info("compile.c++ is not defined, so skipping TestPipes"); return; } MiniDFSCluster dfs = null; MiniMRCluster mr = null; Path inputPath = new Path("testing/in"); Path outputPath = new Path("testing/out"); try { final int numSlaves = 2; Configuration conf = new Configuration(); dfs = new MiniDFSCluster.Builder(conf).numDataNodes(numSlaves).build(); mr = new MiniMRCluster(numSlaves, dfs.getFileSystem().getUri().toString(), 1); writeInputFile(dfs.getFileSystem(), inputPath); runProgram(mr, dfs, wordCountSimple, inputPath, outputPath, 3, 2, twoSplitOutput, null); cleanup(dfs.getFileSystem(), outputPath); runProgram(mr, dfs, wordCountSimple, inputPath, outputPath, 3, 0, noSortOutput, null); cleanup(dfs.getFileSystem(), outputPath); runProgram(mr, dfs, wordCountPart, inputPath, outputPath, 3, 2, fixedPartitionOutput, null); runNonPipedProgram(mr, dfs, wordCountNoPipes, null); mr.waitUntilIdle(); } finally { mr.shutdown(); dfs.shutdown(); } }
fs = FileSystem.newInstance(new URI(pathStr), getConf(), null); } catch (URISyntaxException e) { System.err.println("URISyntaxException for " + pathStr + ":" + } catch (ClassCastException e) { System.err.println("Invalid filesystem for path " + pathStr + ": " + "needed scheme hdfs, but got: " + fs.getScheme()); return 1; IOException ioe = null; try { recovered = dfs.recoverLease(new Path(pathStr)); } catch (FileNotFoundException e) { System.err.println("recoverLease got exception: " + e.getMessage());
@SuppressWarnings("deprecation") @BeforeClass public static void setUp() throws Exception { System.setProperty("hadoop.log.dir", "logs"); Configuration conf = new Configuration(); dfsCluster = new MiniDFSCluster.Builder(conf).numDataNodes(numSlaves) .build(); jConf = new JobConf(conf); FileSystem.setDefaultUri(conf, dfsCluster.getFileSystem().getUri().toString()); mrCluster = MiniMRClientClusterFactory.create(TestMRCredentials.class, 1, jConf); createKeysAsJson("keys.json"); }
cluster.startDataNodes(conf, 1, true, null, new String[]{"/r4"}, new String[]{host4}, null); cluster.waitClusterUp(); String rootDir = new Path(FSUtils.getWALRootDir(conf) + "/" + HConstants.HREGION_LOGDIR_NAME + "/" + targetRs.getServerName().toString()).toUri().getPath(); h.put(p); DirectoryListing dl = dfs.getClient().listPaths(rootDir, HdfsFileStatus.EMPTY_NAME); HdfsFileStatus[] hfs = dl.getPartialListing(); LOG.info("Log file found: " + hf.getLocalName() + " in " + rootDir); String logFile = rootDir + "/" + hf.getLocalName(); FileStatus fsLog = rfs.getFileStatus(new Path(logFile));
@Override public List<HdfsFileStatusWithId> listLocatedHdfsStatus( FileSystem fs, Path p, PathFilter filter) throws IOException { DistributedFileSystem dfs = ensureDfs(fs); DFSClient dfsc = dfs.getClient(); final String src = p.toUri().getPath(); DirectoryListing current = dfsc.listPaths(src, org.apache.hadoop.hdfs.protocol.HdfsFileStatus.EMPTY_NAME, true); if (current == null) { // the directory does not exist throw new FileNotFoundException("File " + p + " does not exist."); } final URI fsUri = fs.getUri(); List<HdfsFileStatusWithId> result = new ArrayList<HdfsFileStatusWithId>( current.getPartialListing().length); while (current != null) { org.apache.hadoop.hdfs.protocol.HdfsFileStatus[] hfss = current.getPartialListing(); for (int i = 0; i < hfss.length; ++i) { HdfsLocatedFileStatus next = (HdfsLocatedFileStatus)(hfss[i]); if (filter != null) { Path filterPath = next.getFullPath(p).makeQualified(fsUri, null); if (!filter.accept(filterPath)) continue; } LocatedFileStatus lfs = next.makeQualifiedLocated(fsUri, p); result.add(new HdfsFileStatusWithIdImpl(lfs, next.getFileId())); } current = current.hasMore() ? dfsc.listPaths(src, current.getLastName(), true) : null; } return result; }
Path partPath = new Path(file); try { Path partPendingPath = getPendingPathFor(partPath); Path partInProgressPath = getInProgressPathFor(partPath); if (fs.exists(partPendingPath)) { LOG.debug("In-progress file {} has been moved to pending after checkpoint, moving to final location.", partPath); fs.rename(partPendingPath, partPath); } else if (fs.exists(partInProgressPath)) { LOG.debug("In-progress file {} is still in-progress, moving to final location.", partPath); DistributedFileSystem dfs = (DistributedFileSystem) fs; LOG.debug("Trying to recover file lease {}", partPath); dfs.recoverLease(partPath); boolean isclosed = dfs.isFileClosed(partPath); StopWatch sw = new StopWatch(); sw.start(); isclosed = dfs.isFileClosed(partPath);
conf.set(CommonConfigurationKeys.HADOOP_SECURITY_SERVICE_USER_NAME_KEY, conf.get(DFSConfigKeys.DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY, "")); URI dfsUri = dfs.getUri(); boolean isHaEnabled = HAUtilClient.isLogicalUri(conf, dfsUri); String nsId = dfsUri.getHost(); List<ProxyAndInfo<RefreshCallQueueProtocol>> proxies = HAUtil.getProxiesForAllNameNodesInNameservice(conf, nsId, NameNodeProxies.createProxy(conf, FileSystem.getDefaultUri(conf), RefreshCallQueueProtocol.class).getProxy();