IOUtils.closeStream(fs.append(idPath)); fs.delete(idPath, true);
@Override public FSDataOutputStream append(Path f, final int bufferSize, final Progressable progress) throws IOException { return append(f, EnumSet.of(CreateFlag.APPEND), bufferSize, progress); }
@Override public FSDataOutputStream append(Path f, final int bufferSize, final Progressable progress) throws IOException { return append(f, EnumSet.of(CreateFlag.APPEND), bufferSize, progress); }
@Override public FSDataOutputStream append(Path f, final int bufferSize, final Progressable progress) throws IOException { return append(f, EnumSet.of(CreateFlag.APPEND), bufferSize, progress); }
private HdfsDataOutputStream appendFileWithoutClosing(Path file, int length) throws IOException { byte[] toAppend = new byte[length]; Random random = new Random(); random.nextBytes(toAppend); HdfsDataOutputStream out = (HdfsDataOutputStream) hdfs.append(file); out.write(toAppend); return out; }
/** * TC1: Append on block boundary. * @throws IOException an exception might be thrown */ public void testTC1() throws Exception { final Path p = new Path("/TC1/foo"); System.out.println("p=" + p); //a. Create file and write one block of data. Close file. final int len1 = (int)BLOCK_SIZE; { FSDataOutputStream out = fs.create(p, false, buffersize, REPLICATION, BLOCK_SIZE); AppendTestUtil.write(out, 0, len1); out.close(); } // Reopen file to append. Append half block of data. Close file. final int len2 = (int)BLOCK_SIZE/2; { FSDataOutputStream out = fs.append(p); AppendTestUtil.write(out, len1, len2); out.close(); } //b. Reopen file and read 1.5 blocks worth of data. Close file. AppendTestUtil.check(fs, p, len1 + len2); }
/** Append a file without closing the output stream */ private HdfsDataOutputStream appendFileWithoutClosing(Path file, int length) throws IOException { byte[] toAppend = new byte[length]; Random random = new Random(); random.nextBytes(toAppend); HdfsDataOutputStream out = (HdfsDataOutputStream) hdfs.append(file); out.write(toAppend); return out; }
@Test(timeout=60000, expected = SnapshotAccessControlException.class) public void testAppend() throws Exception { fs.append(objInSnapshot, 65535, null); }
/** * TC5: Only one simultaneous append. * @throws IOException an exception might be thrown */ public void testTC5() throws Exception { final Path p = new Path("/TC5/foo"); System.out.println("p=" + p); //a. Create file on Machine M1. Write half block to it. Close file. { FSDataOutputStream out = fs.create(p, false, buffersize, REPLICATION, BLOCK_SIZE); AppendTestUtil.write(out, 0, (int)(BLOCK_SIZE/2)); out.close(); } //b. Reopen file in "append" mode on Machine M1. FSDataOutputStream out = fs.append(p); //c. On Machine M2, reopen file in "append" mode. This should fail. try { AppendTestUtil.createHdfsWithDifferentUsername(conf).append(p); fail("This should fail."); } catch(IOException ioe) { AppendTestUtil.LOG.info("GOOD: got an exception", ioe); } //d. On Machine M1, close file. out.close(); }
void doSmallAppends(Path file, DistributedFileSystem fs, int iterations) throws IOException { for (int i = 0; i < iterations; i++) { FSDataOutputStream stm; try { stm = fs.append(file); } catch (IOException e) { // If another thread is already appending, skip this time. continue; } // Failure in write or close will be terminal. AppendTestUtil.write(stm, 0, 123); stm.close(); } }
/** * TC2: Append on non-block boundary. * @throws IOException an exception might be thrown */ public void testTC2() throws Exception { final Path p = new Path("/TC2/foo"); System.out.println("p=" + p); //a. Create file with one and a half block of data. Close file. final int len1 = (int)(BLOCK_SIZE + BLOCK_SIZE/2); { FSDataOutputStream out = fs.create(p, false, buffersize, REPLICATION, BLOCK_SIZE); AppendTestUtil.write(out, 0, len1); out.close(); } AppendTestUtil.check(fs, p, len1); // Reopen file to append quarter block of data. Close file. final int len2 = (int)BLOCK_SIZE/4; { FSDataOutputStream out = fs.append(p); AppendTestUtil.write(out, len1, len2); out.close(); } //b. Reopen file and read 1.75 blocks of data. Close file. AppendTestUtil.check(fs, p, len1 + len2); }
String append(int n) throws IOException { final StringBuilder b = new StringBuilder("append ") .append(n).append(" bytes to ").append(file.getName()); final byte[] bytes = new byte[n]; DFSUtil.getRandom().nextBytes(bytes); { // write to local file final FileOutputStream out = new FileOutputStream(localFile, true); out.write(bytes, 0, bytes.length); out.close(); } { final FSDataOutputStream out = dfs.append(file); out.write(bytes, 0, bytes.length); out.close(); } return b.toString(); }
/** * Append specified length of bytes to a given file */ private static void appendFile(Path p, int length) throws IOException { byte[] toAppend = new byte[length]; Random random = new Random(); random.nextBytes(toAppend); FSDataOutputStream out = cluster.getFileSystem().append(p); try { out.write(toAppend); } finally { IOUtils.closeStream(out); } }
/** * Append specified length of bytes to a given file */ private static void appendFile(Path p, int length) throws IOException { byte[] toAppend = new byte[length]; Random random = new Random(); random.nextBytes(toAppend); FSDataOutputStream out = cluster.getFileSystem().append(p); try { out.write(toAppend); } finally { IOUtils.closeStream(out); } }
@Override public FSDataOutputStream append(Path f, int bufferSize, Progressable progress) throws IOException { if (getFileStatus(f).getPermission().getUserAction() == FsAction.READ) { throw new AclException(f.getName()); } return super.append(f, bufferSize, progress); }
private void recoverLeaseUsingCreate2(Path filepath) throws Exception { FileSystem dfs2 = getFSAsAnotherUser(conf); int size = AppendTestUtil.nextInt(FILE_SIZE); DistributedFileSystem dfsx = (DistributedFileSystem) dfs2; //create file using dfsx Path filepath2 = new Path("/immediateRecoverLease-x2"); FSDataOutputStream stm = dfsx.create(filepath2, true, BUF_SIZE, REPLICATION_NUM, BLOCK_SIZE); assertTrue(dfsx.dfs.exists("/immediateRecoverLease-x2")); try {Thread.sleep(10000);} catch (InterruptedException e) {} dfsx.append(filepath); }
@Test public void testTC1ForAppend2() throws Exception { final Path p = new Path("/TC1/foo2"); //a. Create file and write one block of data. Close file. final int len1 = (int) BLOCK_SIZE; { FSDataOutputStream out = fs.create(p, false, buffersize, REPLICATION, BLOCK_SIZE); AppendTestUtil.write(out, 0, len1); out.close(); } // Reopen file to append. Append half block of data. Close file. final int len2 = (int) BLOCK_SIZE / 2; { FSDataOutputStream out = fs.append(p, EnumSet.of(CreateFlag.APPEND, CreateFlag.NEW_BLOCK), 4096, null); AppendTestUtil.write(out, len1, len2); out.close(); } // b. Reopen file and read 1.5 blocks worth of data. Close file. AppendTestUtil.check(fs, p, len1 + len2); }
/** * TC1: Append on block boundary. * @throws IOException an exception might be thrown */ @Test public void testTC1() throws Exception { final Path p = new Path("/TC1/foo"); System.out.println("p=" + p); //a. Create file and write one block of data. Close file. final int len1 = (int)BLOCK_SIZE; { FSDataOutputStream out = fs.create(p, false, buffersize, REPLICATION, BLOCK_SIZE); AppendTestUtil.write(out, 0, len1); out.close(); } // Reopen file to append. Append half block of data. Close file. final int len2 = (int)BLOCK_SIZE/2; { FSDataOutputStream out = fs.append(p); AppendTestUtil.write(out, len1, len2); out.close(); } //b. Reopen file and read 1.5 blocks worth of data. Close file. AppendTestUtil.check(fs, p, len1 + len2); }
/** * Similar with testRenameUCFileInSnapshot, but do renaming first and then * append file without closing it. Unit test for HDFS-5425. */ @Test public void testAppendFileAfterRenameInSnapshot() throws Exception { final Path test = new Path("/test"); final Path foo = new Path(test, "foo"); final Path bar = new Path(foo, "bar"); DFSTestUtil.createFile(hdfs, bar, BLOCKSIZE, REPL, SEED); SnapshotTestHelper.createSnapshot(hdfs, test, "s0"); // rename bar --> bar2 final Path bar2 = new Path(foo, "bar2"); hdfs.rename(bar, bar2); // append file and keep it as underconstruction. FSDataOutputStream out = hdfs.append(bar2); out.writeByte(0); ((DFSOutputStream) out.getWrappedStream()).hsync( EnumSet.of(SyncFlag.UPDATE_LENGTH)); // save namespace and restart restartClusterAndCheckImage(true); }
out = hdfs.append(file); out.write(1); out.close();