@Test public void testWriteLargeChunk() throws IOException, InterruptedException, ExecutionException { Path f = new Path("/" + name.getMethodName()); EventLoop eventLoop = EVENT_LOOP_GROUP.next(); FanOutOneBlockAsyncDFSOutput out = FanOutOneBlockAsyncDFSOutputHelper.createOutput(FS, f, true, false, (short) 3, 1024 * 1024 * 1024, eventLoop, CHANNEL_CLASS); byte[] b = new byte[50 * 1024 * 1024]; ThreadLocalRandom.current().nextBytes(b); out.write(b); out.flush(false); assertEquals(b.length, out.flush(false).get().longValue()); out.close(); assertEquals(b.length, FS.getFileStatus(f).getLen()); byte[] actual = new byte[b.length]; try (FSDataInputStream in = FS.open(f)) { in.readFully(actual); } assertArrayEquals(b, actual); } }
FileStatus f = dfs.getFileStatus(p); BlockLocation[] lbs; do {
@Test public void testRecover() throws IOException, InterruptedException, ExecutionException { Path f = new Path("/" + name.getMethodName()); EventLoop eventLoop = EVENT_LOOP_GROUP.next(); FanOutOneBlockAsyncDFSOutput out = FanOutOneBlockAsyncDFSOutputHelper.createOutput(FS, f, true, false, (short) 3, FS.getDefaultBlockSize(), eventLoop, CHANNEL_CLASS); byte[] b = new byte[10]; ThreadLocalRandom.current().nextBytes(b); out.write(b, 0, b.length); out.flush(false).get(); // restart one datanode which causes one connection broken TEST_UTIL.getDFSCluster().restartDataNode(0); out.write(b, 0, b.length); try { out.flush(false).get(); fail("flush should fail"); } catch (ExecutionException e) { // we restarted one datanode so the flush should fail LOG.info("expected exception caught", e); } out.recoverAndClose(null); assertEquals(b.length, FS.getFileStatus(f).getLen()); byte[] actual = new byte[b.length]; try (FSDataInputStream in = FS.open(f)) { in.readFully(actual); } assertArrayEquals(b, actual); }
for (String file : files) { FileStatus status = TEST_UTIL.getDFSCluster().getFileSystem(). getFileStatus(new Path(new URI(file).getPath())); BlockLocation[] lbks = ((DistributedFileSystem)TEST_UTIL.getDFSCluster().getFileSystem())
/** * Returns the status information about the file. * * @param location * @return * @throws IOException */ public FileStatus getFileStatus(Path location) throws IOException { return dfs.getFileStatus(location); }
/** * Returns the stat information about the file. */ @Override public FileStatus getFileStatus(Path f) throws IOException { return getDFS().getFileStatus(f); }
/** * Returns the stat information about the file. */ @Override public FileStatus getFileStatus(Path f) throws IOException { return getDFS().getFileStatus(f); }
/** * Get last modification date * * @return last modification date as a long */ public long getLastModified() { try { DistributedFileSystem dfs = HdfsOverFtpSystem.getDfs(); FileStatus fs = dfs.getFileStatus(path); return fs.getModificationTime(); } catch (IOException e) { e.printStackTrace(); return 0; } }
/** * Checks if the object does exist * * @return true if the object does exist */ public boolean doesExist() { try { DistributedFileSystem dfs = HdfsOverFtpSystem.getDfs(); dfs.getFileStatus(path); return true; } catch (IOException e) { // log.debug(path + " does not exist", e); return false; } }
@Override boolean checkNamenodeBeforeReturn() throws Exception { Path targetPath = new Path(target); boolean done = dfs.getFileStatus(targetPath).getLen() == BlockSize * (srcs.length + 1); for (int i = 0; i < CHECKTIMES && !done; i++) { Thread.sleep(1000); done = dfs.getFileStatus(targetPath).getLen() == BlockSize * (srcs.length + 1); } return done; }
static int checkLength(Path file, File localFile) throws IOException { final long length = dfs.getFileStatus(file).getLen(); Assert.assertEquals(localFile.length(), length); Assert.assertTrue(length <= Integer.MAX_VALUE); return (int)length; }
private void syncAndVerify() throws Exception { Assert.assertTrue(sync()); verifyCopy(dfs.getFileStatus(source), dfs.getFileStatus(target), false); }
@Test public void testTruncateShellCommandWithWaitOption() throws Exception { final Path parent = new Path("/test"); final Path src = new Path("/test/testTruncateShellCommandWithWaitOption"); final int oldLength = 2 * BLOCK_SIZE + 1; final int newLength = BLOCK_SIZE + 1; String[] argv = new String[]{"-truncate", "-w", String.valueOf(newLength), src.toString()}; runTruncateShellCommand(src, oldLength, argv); // shouldn't need to wait for block recovery assertThat(fs.getFileStatus(src).getLen(), is((long) newLength)); fs.delete(parent, true); }
@Test public void testTruncateShellCommandOnBlockBoundary() throws Exception { final Path parent = new Path("/test"); final Path src = new Path("/test/testTruncateShellCommandOnBoundary"); final int oldLength = 2 * BLOCK_SIZE; final int newLength = BLOCK_SIZE; String[] argv = new String[]{"-truncate", String.valueOf(newLength), src.toString()}; runTruncateShellCommand(src, oldLength, argv); // shouldn't need to wait for block recovery assertThat(fs.getFileStatus(src).getLen(), is((long) newLength)); fs.delete(parent, true); }
@Override void prepare() throws Exception { final Path targetPath = new Path(target); DFSTestUtil.createFile(dfs, targetPath, BlockSize, DataNodes, 0); for (int i = 0; i < srcPaths.length; i++) { DFSTestUtil.createFile(dfs, srcPaths[i], BlockSize, DataNodes, 0); } assertEquals(BlockSize, dfs.getFileStatus(targetPath).getLen()); }
@Test public void testTruncateShellCommand() throws Exception { final Path parent = new Path("/test"); final Path src = new Path("/test/testTruncateShellCommand"); final int oldLength = 2*BLOCK_SIZE + 1; final int newLength = BLOCK_SIZE + 1; String[] argv = new String[]{"-truncate", String.valueOf(newLength), src.toString()}; runTruncateShellCommand(src, oldLength, argv); // wait for block recovery checkBlockRecovery(src); assertThat(fs.getFileStatus(src).getLen(), is((long) newLength)); fs.delete(parent, true); }
@Override public Object run() throws Exception { final DistributedFileSystem fs = cluster.getFileSystem(); try { fs.getFileStatus(ezRawEncFile); fail("access to /.reserved/raw is superuser-only operation"); } catch (AccessControlException e) { assertExceptionContains("Superuser privilege is required", e); } return null; } });
private void verifyEncryption() throws Exception { assertEquals("Top EZ dir is encrypted", true, fs.getFileStatus(topEZDir).isEncrypted()); assertEquals("Nested EZ dir is encrypted", true, fs.getFileStatus(nestedEZDir).isEncrypted()); assertEquals("Top zone file is encrypted", true, fs.getFileStatus(topEZFile).isEncrypted()); assertEquals("Nested zone file is encrypted", true, fs.getFileStatus(nestedEZFile).isEncrypted()); DFSTestUtil.verifyFilesEqual(fs, topEZBaseFile, topEZFile, len); DFSTestUtil.verifyFilesEqual(fs, nestedEZBaseFile, nestedEZFile, len); DFSTestUtil.verifyFilesNotEqual(fs, topEZRawFile, nestedEZRawFile, len); } }
@Test public void testConcatRelativeTargetPath() throws IOException { Path dir = new Path("/dir"); Path trg = new Path("trg"); Path src = new Path(dir, "src"); dfs.setWorkingDirectory(dir); DFSTestUtil.createFile(dfs, trg, blockSize, REPL_FACTOR, 1); DFSTestUtil.createFile(dfs, src, blockSize, REPL_FACTOR, 1); dfs.concat(trg, new Path[]{src}); assertEquals(blockSize * 2, dfs.getFileStatus(trg).getLen()); assertFalse(dfs.exists(src)); } }
private void syncAndVerify() throws Exception { final FsShell shell = new FsShell(conf); lsrSource("Before sync source: ", shell, source); lsr("Before sync target: ", shell, target); Assert.assertTrue(sync()); lsrSource("After sync source: ", shell, source); lsr("After sync target: ", shell, target); verifyCopy(dfs.getFileStatus(source), dfs.getFileStatus(target), false); }