private int countNonZeroLengthFiles(String path) throws IOException { Path p = new Path(path); int nonZero = 0; for (FileStatus file : fs.listStatus(p)) { if (file.getLen() > 0) { nonZero++; } } return nonZero; }
private int countNonZeroLengthFiles(String path) throws IOException { Path p = new Path(path); int nonZero = 0; for (FileStatus file : fs.listStatus(p)) { if (file.getLen() > 0) { nonZero++; } } return nonZero; }
private int countNonZeroLengthFiles(String path) throws IOException { Path p = new Path(path); int nonZero = 0; for (FileStatus file : fs.listStatus(p)) { if (file.getLen() > 0) { nonZero++; } } return nonZero; }
private int countZeroLengthFiles(String path) throws IOException { Path p = new Path(path); int zeroLength = 0; for (FileStatus file : fs.listStatus(p)) { if (file.getLen() == 0) { zeroLength++; } } return zeroLength; } }
private void verifyAllAvroFiles(String path) throws IOException { Path p = new Path(path); for (FileStatus file : fs.listStatus(p)) { if (file.getLen() > 0) { fileIsGoodAvro(file.getPath()); } } }
@After public void tearDown() throws Exception { FileStatus[] entries = FS.listStatus(new Path("/")); for (FileStatus dir : entries) { FS.delete(dir.getPath(), true); } }
List<Path> paths = Lists.newArrayList(); Path firstPart = null; for (FileStatus outputFile : fs.listStatus(outputPath)) { if (outputFile.getPath().getName().startsWith("part")) { if (firstPart == null) {
/** * List the statuses of the files/directories in the given path if the path * is a directory. * * @param location * @return FileStatus[] * @throws IOException */ public FileStatus[] listStatus(Path location) throws IOException { return dfs.listStatus(location); }
public static void copyToLocalDir(DistributedFileSystem dfs, Path dfsPath, File outDir, HiveConf hiveConf) { try { for(FileStatus file : dfs.listStatus(dfsPath)) { if(file.isDirectory()) { copyToLocalDir(dfs, file.getPath(), outDir, hiveConf); } else { String[] filePathTokens = file.getPath().toString().split("/"); File outFile = new File(outDir.getAbsoluteFile() + "/" + filePathTokens[filePathTokens.length - 1]); // NOTE: Avoid dfs.copyToLocalFile(src, dst) // dfs.copyToLocalFile(src, dst) eventually calls FileUtil.copy() // Internally, its creating a configuration object that differs from // the generated hiveConf for the miniCluster, hence failing to find // the source files FileUtil.copy(dfs, file.getPath(), outFile, false, hiveConf); } } } catch (IOException e) { e.printStackTrace(); } }
/** * List files of the directory * * @return List of files in the directory */ public FileObject[] listFiles() { if (!hasReadPermission()) { log.debug("No read permission : " + path); return null; } try { DistributedFileSystem dfs = HdfsOverFtpSystem.getDfs(); FileStatus fileStats[] = dfs.listStatus(path); FileObject fileObjects[] = new FileObject[fileStats.length]; for (int i = 0; i < fileStats.length; i++) { fileObjects[i] = new HdfsFileObject(fileStats[i].getPath().toString(), user); } return fileObjects; } catch (IOException e) { log.debug("", e); return null; } }
@After public void tearDown() throws Exception { FileStatus[] entries = FS.listStatus(new Path("/")); for (FileStatus dir : entries) { FS.delete(dir.getPath(), true); } }
String checkSnapshot(String snapshot) throws IOException { final StringBuilder b = new StringBuilder("checkSnapshot: ") .append(snapshot); final File subDir = new File(localDir, snapshot); Assert.assertTrue(subDir.exists()); final File[] localFiles = subDir.listFiles(FILE_ONLY); final Path p = snapshotPaths.get(snapshot); final FileStatus[] statuses = dfs.listStatus(p); Assert.assertEquals(localFiles.length, statuses.length); b.append(p).append(" vs ").append(subDir).append(", ") .append(statuses.length).append(" entries"); Arrays.sort(localFiles); Arrays.sort(statuses); for(int i = 0; i < statuses.length; i++) { FileWorker.checkFullFile(statuses[i].getPath(), localFiles[i]); } return b.toString(); }
@After public void tearDown() throws IOException { if (fs.exists(new Path("/sub1"))) { if (fs.exists(new Path("/sub1/.snapshot"))) { for (FileStatus st : fs.listStatus(new Path("/sub1/.snapshot"))) { fs.deleteSnapshot(new Path("/sub1"), st.getPath().getName()); } fs.disallowSnapshot(new Path("/sub1")); } fs.delete(new Path("/sub1"), true); } }
/** * Check the functionality of a snapshot. * * @param hdfs DistributedFileSystem instance * @param snapshotRoot The root of the snapshot * @param snapshottedDir The snapshotted directory */ public static void checkSnapshotCreation(DistributedFileSystem hdfs, Path snapshotRoot, Path snapshottedDir) throws Exception { // Currently we only check if the snapshot was created successfully assertTrue(hdfs.exists(snapshotRoot)); // Compare the snapshot with the current dir FileStatus[] currentFiles = hdfs.listStatus(snapshottedDir); FileStatus[] snapshotFiles = hdfs.listStatus(snapshotRoot); assertEquals("snapshottedDir=" + snapshottedDir + ", snapshotRoot=" + snapshotRoot, currentFiles.length, snapshotFiles.length); }
private void verifyCopy(FileStatus s, FileStatus t, boolean compareName) throws Exception { Assert.assertEquals(s.isDirectory(), t.isDirectory()); if (compareName) { Assert.assertEquals(s.getPath().getName(), t.getPath().getName()); } if (!s.isDirectory()) { // verify the file content is the same byte[] sbytes = DFSTestUtil.readFileBuffer(dfs, s.getPath()); byte[] tbytes = DFSTestUtil.readFileBuffer(dfs, t.getPath()); Assert.assertArrayEquals(sbytes, tbytes); } else { FileStatus[] slist = dfs.listStatus(s.getPath()); FileStatus[] tlist = dfs.listStatus(t.getPath()); Assert.assertEquals(slist.length, tlist.length); for (int i = 0; i < slist.length; i++) { verifyCopy(slist[i], tlist[i], true); } } }
@Test(timeout = 120000) public void testListRecursive() throws Exception { Path rootPath = new Path("/"); Path p = rootPath; for (int i = 0; i < 3; i++) { p = new Path(p, "dir" + i); fs.mkdirs(p); } Path curPath = new Path("/.reserved/raw"); int cnt = 0; FileStatus[] fileStatuses = fs.listStatus(curPath); while (fileStatuses != null && fileStatuses.length > 0) { FileStatus f = fileStatuses[0]; assertMatches(f.getPath().toString(), "/.reserved/raw"); curPath = Path.getPathWithoutSchemeAndAuthority(f.getPath()); cnt++; fileStatuses = fs.listStatus(curPath); } assertEquals(3, cnt); } }
private void verifyCopy(FileStatus s, FileStatus t, boolean compareName) throws Exception { Assert.assertEquals(s.isDirectory(), t.isDirectory()); if (compareName) { Assert.assertEquals(s.getPath().getName(), t.getPath().getName()); } if (!s.isDirectory()) { // verify the file content is the same byte[] sbytes = DFSTestUtil.readFileBuffer(dfs, s.getPath()); byte[] tbytes = DFSTestUtil.readFileBuffer(dfs, t.getPath()); Assert.assertArrayEquals(sbytes, tbytes); } else { FileStatus[] slist = dfs.listStatus(s.getPath()); FileStatus[] tlist = dfs.listStatus(t.getPath()); Assert.assertEquals(slist.length, tlist.length); for (int i = 0; i < slist.length; i++) { verifyCopy(slist[i], tlist[i], true); } } }
@Override public Object run() throws Exception { final DistributedFileSystem fs = cluster.getFileSystem(); try { fs.listStatus(ezRawEncFile); fail("access to /.reserved/raw is superuser-only operation"); } catch (AccessControlException e) { assertExceptionContains("Superuser privilege is required", e); } return null; } });
public void test001DirectDirDeletion() throws Exception { Configuration hconf = getMiniClusterConf(); int numDataNodes = 1; initMiniCluster(hconf, numDataNodes); byte[] keyBytes1 = BlobHelper.serializeToBlob("key-1"); final HoplogOrganizer<SortedHoplogPersistedEvent> organizer = new HdfsSortedOplogOrganizer( regionManager, 1); int count = 10; ArrayList<TestEvent> items = new ArrayList<TestEvent>(); for (int i = 0; i < count; i++) { items.add(new TestEvent(("key-" + i), ("value-" + System.nanoTime()))); } organizer.flush(items.iterator(), count); assertTrue(((String) organizer.read(keyBytes1).getValue()) .startsWith("value-")); FileStatus[] hoplogs = getBucketHoplogs(getName() + "/" + 1, ""); assertEquals(1, hoplogs.length); FileStatus[] testDir = cluster.getFileSystem().listStatus(testDataDir); assertNotNull(testDir); //Note that we also have a "cleanUpInterval" file assertEquals(2, testDir.length); cluster.getFileSystem().delete(testDataDir, true); organizer.flush(items.iterator(), count); hoplogs = getBucketHoplogs(getName() + "/" + 1, ""); assertEquals(1, hoplogs.length); }
private static void checkOuterConsistency(Job job, Path[] src) throws IOException { Path outf = FileOutputFormat.getOutputPath(job); FileStatus[] outlist = cluster.getFileSystem().listStatus(outf, new Utils.OutputFileUtils.OutputFilesFilter()); assertEquals("number of part files is more than 1. It is" + outlist.length, 1, outlist.length); assertTrue("output file with zero length" + outlist[0].getLen(), 0 < outlist[0].getLen()); SequenceFile.Reader r = new SequenceFile.Reader(cluster.getFileSystem(), outlist[0].getPath(), job.getConfiguration()); IntWritable k = new IntWritable(); IntWritable v = new IntWritable(); while (r.next(k, v)) { assertEquals("counts does not match", v.get(), countProduct(k, src, job.getConfiguration())); } r.close(); }