private void createTextFile(Path file, int lineCount) throws IOException { FSDataOutputStream os = fs.create(file); int size = 0; for (int i = 0; i < lineCount; i++) { os.writeBytes("line " + i + System.lineSeparator()); String msg = "line " + i + System.lineSeparator(); size += msg.getBytes().length; } os.close(); }
@BeforeClass public static void setup() throws Exception { try { MiniDFSCluster cluster = util.startMiniDFSCluster(3); // Need 3 DNs for RS-3-2 policy DistributedFileSystem fs = cluster.getFileSystem(); Method enableAllECPolicies = DFSTestUtil.class.getMethod("enableAllECPolicies", DistributedFileSystem.class); enableAllECPolicies.invoke(null, fs); DFSClient client = fs.getClient(); Method setErasureCodingPolicy = DFSClient.class.getMethod("setErasureCodingPolicy", String.class, String.class); setErasureCodingPolicy.invoke(client, "/", "RS-3-2-1024k"); // try a built-in policy try (FSDataOutputStream out = fs.create(new Path("/canary"))) { // If this comes back as having hflush then some test setup assumption is wrong. // Fail the test so that a developer has to look and triage assertFalse("Did not enable EC!", CommonFSUtils.hasCapability(out, HFLUSH)); } } catch (NoSuchMethodException e) { // We're not testing anything interesting if EC is not available, so skip the rest of the test Assume.assumeNoException("Using an older version of hadoop; EC not available.", e); } util.getConfiguration().setBoolean(CommonFSUtils.UNSAFE_STREAM_CAPABILITY_ENFORCE, true); util.startMiniCluster(); }
FSDataOutputStream out = miniDfs.getFileSystem().create( new Path("/path/to/schema/schema.avsc")); out.writeBytes(RECORD_SCHEMA);
FSDataOutputStream fop = dfs.create(p, (short) repCount); final double toWrite = 875.5613; fop.writeDouble(toWrite);
/** * Test that we can start and stop multiple time a cluster * with the same HBaseTestingUtility. */ @Test public void testMultipleStartStop() throws Exception{ HBaseTestingUtility htu1 = new HBaseTestingUtility(); Path foo = new Path("foo"); htu1.startMiniCluster(); htu1.getDFSCluster().getFileSystem().create(foo); assertTrue( htu1.getDFSCluster().getFileSystem().exists(foo)); htu1.shutdownMiniCluster(); htu1.startMiniCluster(); assertFalse( htu1.getDFSCluster().getFileSystem().exists(foo)); htu1.getDFSCluster().getFileSystem().create(foo); assertTrue( htu1.getDFSCluster().getFileSystem().exists(foo)); htu1.shutdownMiniCluster(); }
FSDataOutputStream fop = dfs.create(p, (short) repCount); final double toWrite = 875.5613; fop.writeDouble(toWrite);
/** * Create a new directory and its parent directory on the given path. * <p/> * @param location The path to the new folder, its name included. * @throws java.io.IOException */ public void touchz(Path location) throws IOException { dfs.create(location).close(); }
@Override public HdfsDataOutputStream next(final FileSystem fs, final Path p) throws IOException { if (fs instanceof DistributedFileSystem) { DistributedFileSystem myDfs = (DistributedFileSystem)fs; return myDfs.create(p, permission, overwrite, bufferSize, replication, blockSize, progress, favoredNodes); } throw new UnsupportedOperationException("Cannot create with" + " favoredNodes through a symlink to a non-DistributedFileSystem: " + f + " -> " + p); } }.resolve(this, absF);
@Override public FSDataOutputStream create(Path f, FsPermission permission, boolean overwrite, int bufferSize, short replication, long blockSize, Progressable progress) throws IOException { return this.create(f, permission, overwrite ? EnumSet.of(CreateFlag.CREATE, CreateFlag.OVERWRITE) : EnumSet.of(CreateFlag.CREATE), bufferSize, replication, blockSize, progress, null); }
@Override public FSDataOutputStream create(Path f, FsPermission permission, boolean overwrite, int bufferSize, short replication, long blockSize, Progressable progress) throws IOException { return this.create(f, permission, overwrite ? EnumSet.of(CreateFlag.CREATE, CreateFlag.OVERWRITE) : EnumSet.of(CreateFlag.CREATE), bufferSize, replication, blockSize, progress, null); }
@Override public FSDataOutputStream create(Path f, FsPermission permission, boolean overwrite, int bufferSize, short replication, long blockSize, Progressable progress) throws IOException { return this.create(f, permission, overwrite ? EnumSet.of(CreateFlag.CREATE, CreateFlag.OVERWRITE) : EnumSet.of(CreateFlag.CREATE), bufferSize, replication, blockSize, progress, null); }
public FSDataOutputStream create(Path f, FsPermission permission, boolean overwrite, int bufferSize, short replication, long blockSize, Progressable progress) throws IOException { return create(f, permission, overwrite, bufferSize, replication, blockSize, getConf().getInt("io.bytes.per.checksum", 512), progress); }
SlowWriter(DistributedFileSystem fs, Path filepath, final long sleepms ) throws IOException { super(SlowWriter.class.getSimpleName() + ":" + filepath); this.filepath = filepath; this.out = (HdfsDataOutputStream)fs.create(filepath, REPLICATION); this.sleepms = sleepms; }
static void writeContents(byte[] contents, int fileLength, Path p) throws IOException { FSDataOutputStream out = fs.create(p, true, BLOCK_SIZE, REPLICATION, BLOCK_SIZE); out.write(contents, 0, fileLength); out.close(); }
private static void writePropertiesFile(Path path, String[] lines) throws IOException { PrintStream out = new PrintStream(dfs.create(path, true)); for (String line : lines) { out.println(line); } out.flush(); out.close(); }
FileWorker(Path dir, File localDir, String filename) throws IOException { super(filename); this.file = new Path(dir, filename); this.localFile = new File(localDir, filename); localFile.createNewFile(); dfs.create(file, false, 4096, REPLICATION, BLOCK_SIZE).close(); }
private void writeFile(final DistributedFileSystem dfs, Path dir, String fileName) throws IOException { Path filePath = new Path(dir.toString() + Path.SEPARATOR + fileName); final FSDataOutputStream out = dfs.create(filePath); out.writeChars("teststring"); out.close(); }
private void writeTestFile(String testFileName) throws Exception { Path filePath = new Path(testFileName); FSDataOutputStream stream = dfs.create(filePath); for (int i = 0; i < 10; i++) { byte[] data = RandomStringUtils.randomAlphabetic(102400).getBytes(); stream.write(data); } stream.hsync(); stream.close(); }
static Path createFile(Path dir, int numBlocks, long seed, DistributedFileSystem dfs) throws IOException { final Path f = new Path(dir, seed + "_" + numBlocks); final byte[] bytes = IO_BUF.get(); try(FSDataOutputStream out = dfs.create(f)) { for(int i = 0; i < numBlocks; i++) { out.write(nextBytes(i, seed, bytes)); } } return f; }
@Test(timeout = 60000) public void testRecoverLease() throws Exception { assertEquals("ret: 1, You must supply a -path argument to recoverLease.", runCmd(new String[]{"recoverLease", "-retries", "1"})); FSDataOutputStream out = fs.create(new Path("/foo")); out.write(123); out.close(); assertEquals("ret: 0, recoverLease SUCCEEDED on /foo", runCmd(new String[]{"recoverLease", "-path", "/foo"})); }