public static org.apache.hadoop.fs.Path toHadoopPath(Path path) { return new org.apache.hadoop.fs.Path(path.toUri()); }
/** * Returns the FileSystem that owns this Path. * * @return the FileSystem that owns this Path * @throws IOException * thrown if the file system could not be retrieved */ public FileSystem getFileSystem() throws IOException { return FileSystem.get(this.toUri()); }
/** * Creates a new {@code RocksDBStateBackend} that stores its checkpoint data in the * file system and location defined by the given URI. * * <p>A state backend that stores checkpoints in HDFS or S3 must specify the file system * host and port in the URI, or have the Hadoop configuration that describes the file system * (host / high-availability group / possibly credentials) either referenced from the Flink * config, or included in the classpath. * * @param checkpointDataUri The URI describing the filesystem and path to the checkpoint data directory. * @throws IOException Thrown, if no file system can be found for the scheme in the URI. */ public RocksDBStateBackend(String checkpointDataUri) throws IOException { this(new Path(checkpointDataUri).toUri()); }
/** * Creates a new {@code RocksDBStateBackend} that stores its checkpoint data in the * file system and location defined by the given URI. * * <p>A state backend that stores checkpoints in HDFS or S3 must specify the file system * host and port in the URI, or have the Hadoop configuration that describes the file system * (host / high-availability group / possibly credentials) either referenced from the Flink * config, or included in the classpath. * * @param checkpointDataUri The URI describing the filesystem and path to the checkpoint data directory. * @param enableIncrementalCheckpointing True if incremental checkpointing is enabled. * @throws IOException Thrown, if no file system can be found for the scheme in the URI. */ public RocksDBStateBackend(String checkpointDataUri, boolean enableIncrementalCheckpointing) throws IOException { this(new Path(checkpointDataUri).toUri(), enableIncrementalCheckpointing); }
private static List<URL> checkUrls(CommandLine line, Option option) { if (line.hasOption(option.getOpt())) { final String[] urls = line.getOptionValues(option.getOpt()); return Arrays.stream(urls) .distinct() .map((url) -> { try { return Path.fromLocalFile(new File(url).getAbsoluteFile()).toUri().toURL(); } catch (Exception e) { throw new SqlClientException("Invalid path for option '" + option.getLongOpt() + "': " + url, e); } }) .collect(Collectors.toList()); } return null; }
/** * Converts the given Path to a File for this file system. * * <p>If the path is not absolute, it is interpreted relative to this FileSystem's working directory. */ public File pathToFile(Path path) { if (!path.isAbsolute()) { path = new Path(getWorkingDirectory(), path); } return new File(path.toUri().getPath()); }
public CompletedFuture(Path entry) { try{ LocalFileSystem fs = (LocalFileSystem) FileSystem.getUnguardedFileSystem(entry.toUri()); result = entry.isAbsolute() ? new Path(entry.toUri().getPath()): new Path(fs.getWorkingDirectory(),entry); } catch (Exception e){ throw new RuntimeException("DistributedCache supports only local files for Collection Environments"); } }
@Override public void run() { try { final FileSystem fs = FileSystem.get(this.split.getPath().toUri()); this.fdis = fs.open(this.split.getPath()); // check for canceling and close the stream in that case, because no one will obtain it if (this.aborted) { final FSDataInputStream f = this.fdis; this.fdis = null; f.close(); } } catch (Throwable t) { this.error = t; } }
private static TestRecoverableWriter getRecoverableWriter(Path path) { try { final FileSystem fs = FileSystem.get(path.toUri()); if (!(fs instanceof LocalFileSystem)) { fail("Expected Local FS but got a " + fs.getClass().getName() + " for path: " + path); } return new TestRecoverableWriter((LocalFileSystem) fs); } catch (IOException e) { fail(); } return null; }
@Test public void testSuffix() { Path p = new Path("/my/path"); p = p.suffix("_123"); assertEquals("/my/path_123", p.toUri().getPath()); p = new Path("/my/path/"); p = p.suffix("/abc"); assertEquals("/my/path/abc", p.toUri().getPath()); p = new Path("C:/my/windows/path"); p = p.suffix("/abc"); assertEquals("/C:/my/windows/path/abc", p.toUri().getPath()); }
@Test public void testSetPathsSingleWithMulti() { final MultiDummyFileInputFormat format = new MultiDummyFileInputFormat(); final String myPath = "/an/imaginary/path"; format.setFilePaths(myPath); final Path[] filePaths = format.getFilePaths(); Assert.assertEquals(1, filePaths.length); Assert.assertEquals(myPath, filePaths[0].toUri().toString()); // ensure backwards compatibility Assert.assertEquals(myPath, format.filePath.toUri().toString()); }
@Test public void testSetPathOnMulti() { final MultiDummyFileInputFormat format = new MultiDummyFileInputFormat(); final String myPath = "/an/imaginary/path"; format.setFilePath(myPath); final Path[] filePaths = format.getFilePaths(); Assert.assertEquals(1, filePaths.length); Assert.assertEquals(myPath, filePaths[0].toUri().toString()); // ensure backwards compatibility Assert.assertEquals(myPath, format.filePath.toUri().toString()); }
@Test public void testSetPathsMulti() { final MultiDummyFileInputFormat format = new MultiDummyFileInputFormat(); final String myPath = "/an/imaginary/path"; final String myPath2 = "/an/imaginary/path2"; format.setFilePaths(myPath, myPath2); final Path[] filePaths = format.getFilePaths(); Assert.assertEquals(2, filePaths.length); Assert.assertEquals(myPath, filePaths[0].toUri().toString()); Assert.assertEquals(myPath2, filePaths[1].toUri().toString()); }
@AfterClass public static void destroyHDFS() throws Exception { if (hdfsCluster != null) { hdfsCluster.getFileSystem().delete(new org.apache.hadoop.fs.Path(basePath.toUri()), true); hdfsCluster.shutdown(); } }
@AfterClass public static void destroyHDFS() throws Exception { if (hdfsCluster != null) { hdfsCluster.getFileSystem().delete(new org.apache.hadoop.fs.Path(basePath.toUri()), true); hdfsCluster.shutdown(); } }
@Test public void testGetStatistics() throws IOException { final String myString = "my mocked line 1\nmy mocked line 2\n"; final long size = myString.length(); final Path filePath = createTempFilePath(myString); final String myString2 = "my mocked line 1\nmy mocked line 2\nanother mocked line3\n"; final long size2 = myString2.length(); final Path filePath2 = createTempFilePath(myString2); final long totalSize = size + size2; DelimitedInputFormat<String> format = new MyTextInputFormat(); format.setFilePaths(filePath.toUri().toString(), filePath2.toUri().toString()); FileInputFormat.FileBaseStatistics stats = format.getStatistics(null); assertNotNull(stats); assertEquals("The file size from the statistics is wrong.", totalSize, stats.getTotalInputSize()); }
@Test public void testGetParent() { Path p = new Path("/my/fancy/path"); assertEquals("/my/fancy", p.getParent().toUri().getPath()); p = new Path("/my/other/fancy/path/"); assertEquals("/my/other/fancy", p.getParent().toUri().getPath()); p = new Path("hdfs:///my/path"); assertEquals("/my", p.getParent().toUri().getPath()); p = new Path("hdfs:///myPath/"); assertEquals("/", p.getParent().toUri().getPath()); p = new Path("/"); assertNull(p.getParent()); p = new Path("C:/my/windows/path"); assertEquals("/C:/my/windows", p.getParent().toUri().getPath()); }
@Test public void testPathAndScheme() throws Exception { assertEquals(fs.getUri(), getBasePath().getFileSystem().getUri()); assertEquals(fs.getUri().getScheme(), getBasePath().toUri().getScheme()); }
private void setupRocksDB(int fileSizeThreshold, boolean incrementalCheckpoints) throws IOException { String rocksDb = tempFolder.newFolder().getAbsolutePath(); String backups = tempFolder.newFolder().getAbsolutePath(); // we use the fs backend with small threshold here to test the behaviour with file // references, not self contained byte handles RocksDBStateBackend rdb = new RocksDBStateBackend( new FsStateBackend( new Path("file://" + backups).toUri(), fileSizeThreshold), incrementalCheckpoints); rdb.setDbStoragePath(rocksDb); this.stateBackend = rdb; }