public static void deletePathIfExists(JobConf conf, String stepOutputPath) throws IOException { Path path = new Path(stepOutputPath); FileSystem fs = path.getFileSystem(conf); if(fs.exists(path)) { fs.delete(path, true); } }
public static String runTask(String[] args) throws Exception { String workingPath = args[0]; log.info("Deleting indexing hadoop working path [%s].", workingPath); Path p = new Path(workingPath); FileSystem fs = p.getFileSystem(new Configuration()); fs.delete(p, true); return null; } }
private void cleanupFile(Path file) { try { fileSystem.delete(file, false); if (fileSystem.exists(file)) { throw new IOException("Delete failed"); } } catch (IOException e) { log.warn(e, "Failed to delete temporary file: " + file); } }
@Override public void run() { LOG.info("Cancelling deployment from Deployment Failure Hook"); failSessionDuringDeployment(yarnClient, yarnApplication); LOG.info("Deleting files in {}.", yarnFilesDir); try { FileSystem fs = FileSystem.get(yarnConfiguration); if (!fs.delete(yarnFilesDir, true)) { throw new IOException("Deleting files in " + yarnFilesDir + " was unsuccessful"); } fs.close(); } catch (IOException e) { LOG.error("Failed to delete Flink Jar and configuration files in HDFS", e); } } }
@After public void cleanUp() throws IOException { // delete and recreate the test directory, ensuring a clean test dir between tests Path testDir = UTIL.getDataTestDir(); FileSystem fs = UTIL.getTestFileSystem(); fs.delete(testDir, true); if (!fs.mkdirs(testDir)) throw new IOException("Failed mkdir " + testDir); }
private void moveUpFiles(Path specPath, Configuration hconf, Logger log) throws IOException, HiveException { FileSystem fs = specPath.getFileSystem(hconf); if (fs.exists(specPath)) { FileStatus[] taskOutputDirs = fs.listStatus(specPath); if (taskOutputDirs != null) { for (FileStatus dir : taskOutputDirs) { Utilities.renameOrMoveFiles(fs, dir.getPath(), specPath); fs.delete(dir.getPath(), true); } } } }
@Test public void testFromInstrumentedScheme() throws Exception { File tmpDir = Files.createTempDir(); tmpDir.deleteOnExit(); FileSystem fs = FileSystem.get(new URI(InstrumentedLocalFileSystem.SCHEME + ":///"), new Configuration()); Assert.assertTrue(fs instanceof InstrumentedLocalFileSystem); Assert.assertTrue(DecoratorUtils.resolveUnderlyingObject(fs) instanceof LocalFileSystem); Assert.assertEquals(fs.getFileStatus(new Path("/tmp")).getPath(), new Path("instrumented-file:///tmp")); Assert.assertEquals(fs.getUri().getScheme(), "instrumented-file"); Path basePath = new Path(tmpDir.getAbsolutePath()); Assert.assertTrue(fs.exists(basePath)); Path file = new Path(basePath, "file"); Assert.assertFalse(fs.exists(file)); fs.create(new Path(basePath, "file")); Assert.assertTrue(fs.exists(file)); Assert.assertEquals(fs.getFileStatus(file).getLen(), 0); Assert.assertEquals(fs.listStatus(basePath).length, 1); fs.delete(file, false); Assert.assertFalse(fs.exists(file)); }
public PerformTestRCFileAndSeqFile(boolean local, String file) throws IOException { if (local) { fs = FileSystem.getLocal(conf); } else { fs = FileSystem.get(conf); } conf.setInt(RCFile.Writer.COLUMNS_BUFFER_SIZE_CONF_STR, 1 * 1024 * 1024); if (file == null) { Path dir = new Path(System.getProperty("test.tmp.dir", ".") + "/mapred"); testRCFile = new Path(dir, "test_rcfile"); testSeqFile = new Path(dir, "test_seqfile"); } else { testRCFile = new Path(file + "-rcfile"); testSeqFile = new Path(file + "-seqfile"); } fs.delete(testRCFile, true); fs.delete(testSeqFile, true); System.out.println("RCFile:" + testRCFile.toString()); System.out.println("SequenceFile:" + testSeqFile.toString()); }
private void corruptDataFile(final String file, final Configuration conf, final int addRemoveBytes) throws Exception { Path bPath = new Path(file); Path cPath = new Path(bPath.getParent(), bPath.getName() + ".corrupt"); FileSystem fs = bPath.getFileSystem(conf); FileStatus fileStatus = fs.getFileStatus(bPath); int len = addRemoveBytes == Integer.MIN_VALUE ? 0 : (int) fileStatus.getLen() + addRemoveBytes; byte[] buffer = new byte[len]; FSDataInputStream fdis = fs.open(bPath); fdis.readFully(0, buffer, 0, (int) Math.min(fileStatus.getLen(), buffer.length)); fdis.close(); FSDataOutputStream fdos = fs.create(cPath, true); fdos.write(buffer, 0, buffer.length); fdos.close(); fs.delete(bPath, false); fs.rename(cPath, bPath); }
@Test public void testDoubleCreateSemantics() throws Exception { //1 create an already existing open file w/o override flag Path file1 = new Path(dir.toString() + Path.SEPARATOR_CHAR + "file1"); try (FSDataOutputStream os1 = fs.create(file1, false)) { fs.create(file1, false); // should fail fail("Create did not throw an exception"); } catch (RemoteException e) { Assert.assertEquals(AlreadyBeingCreatedException.class, e.unwrapRemoteException().getClass()); } //2 close file and retry creation try { fs.create(file1, false); // should still fail fail("Create did not throw an exception"); } catch (FileAlreadyExistsException e) { // expecting this exception } //3 delete file and retry creation fs.delete(file1, false); try (FSDataOutputStream os2 = fs.create(file1, false)) { Assert.assertNotNull(os2); } }
private String setRootDirAndCleanIt(final HBaseTestingUtility htu, final String subdir) throws IOException { Path testdir = htu.getDataTestDir(subdir); FileSystem fs = FileSystem.get(htu.getConfiguration()); if (fs.exists(testdir)) assertTrue(fs.delete(testdir, true)); FSUtils.setRootDir(htu.getConfiguration(), testdir); return FSUtils.getRootDir(htu.getConfiguration()).toString(); }
@Override public void killAll() throws IOException { log.info("Deleting all segment files from hdfs dir [%s].", storageDirectory.toUri().toString()); final FileSystem fs = storageDirectory.getFileSystem(config); fs.delete(storageDirectory, true); }
private void deleteSchemaFile(String type, Path metadataDirectory) { try { if (!metadataFileSystem.delete(new Path(metadataDirectory, PRESTO_SCHEMA_FILE_NAME), false)) { throw new PrestoException(HIVE_METASTORE_ERROR, "Could not delete " + type + " schema"); } } catch (IOException e) { throw new PrestoException(HIVE_METASTORE_ERROR, "Could not delete " + type + " schema", e); } }
private static void createSeqFile(FileSystem fs, Path file, int rowCount) throws IOException { Configuration conf = new Configuration(); try { if (fs.exists(file)) { fs.delete(file, false); } SequenceFile.Writer w = SequenceFile.createWriter(fs, conf, file, IntWritable.class, Text.class); for (int i = 0; i < rowCount; i++) { w.append(new IntWritable(i), new Text("line " + i)); } w.close(); System.out.println("done"); } catch (IOException e) { e.printStackTrace(); } }
/** * Creates the HDFS filesystem to store output files. * * @param conf Hadoop configuration */ private void createHdfsFilesystem(Configuration conf) throws Exception { // Inits HDFS file system object mFileSystem = FileSystem.get(URI.create(conf.get("fs.defaultFS")), conf); mOutputFilePath = new Path("./MapReduceOutputFile"); if (mFileSystem.exists(mOutputFilePath)) { mFileSystem.delete(mOutputFilePath, true); } }