Refine search
org.apache.flink.core.fs.FileSystem.getUnguardedFileSystem(path.toUri()); final FileSystem hadoopFs = (flinkFs instanceof HadoopFileSystem) ? ((HadoopFileSystem) flinkFs).getHadoopFileSystem() : null; if (hadoopFs != null) { hadoopConf = hadoopFs.getConf(); URI genericHdfsUri = URI.create("hdfs://localhost:12345/"); org.apache.flink.core.fs.FileSystem accessor = org.apache.flink.core.fs.FileSystem.getUnguardedFileSystem(genericHdfsUri); hadoopConf = ((HadoopFileSystem) accessor).getHadoopFileSystem().getConf(); finalConf = new org.apache.hadoop.conf.Configuration(hadoopConf); finalConf.set(key, extraUserConf.getString(key, null)); URI fsUri = path.toUri(); final String scheme = fsUri.getScheme(); final String authority = fsUri.getAuthority(); fsUri = FileSystem.getDefaultUri(finalConf); return ((LocalFileSystem) fs).getRaw();
/** * If libjars are set in the conf, parse the libjars. * @param conf * @return libjar urls * @throws IOException */ public static URL[] getLibJars(Configuration conf) throws IOException { String jars = conf.get("tmpjars"); if (jars == null || jars.trim().isEmpty()) { return null; } String[] files = jars.split(","); List<URL> cp = new ArrayList<URL>(); for (String file : files) { Path tmp = new Path(file); if (tmp.getFileSystem(conf).equals(FileSystem.getLocal(conf))) { cp.add(FileSystem.getLocal(conf).pathToFile(tmp).toURI().toURL()); } else { LOG.warn("The libjars file " + tmp + " is not on the local " + "filesystem. It will not be added to the local classpath."); } } return cp.toArray(new URL[0]); }
@Test public void testWriteCuboidStatistics() throws IOException { final Configuration conf = HadoopUtil.getCurrentConfiguration(); File tmp = File.createTempFile("cuboidstatistics", ""); final Path outputPath = new Path(tmp.getParent().toString() + File.separator + RandomUtil.randomUUID().toString()); if (!FileSystem.getLocal(conf).exists(outputPath)) { // FileSystem.getLocal(conf).create(outputPath); } System.out.println(outputPath); Map<Long, HLLCounter> cuboidHLLMap = Maps.newHashMap(); CubeStatsWriter.writeCuboidStatistics(conf, outputPath, cuboidHLLMap, 100); FileSystem.getLocal(conf).delete(outputPath, true); } }
private static String getFSIdentifier(URI uri) { if (new LocalFileSystem().getScheme().equals(uri.getScheme())) { return "localhost"; } else { return ClustersNames.getInstance().getClusterName(uri.toString()); } }
/** * Create the directory or check permissions if it already exists. * * The semantics of mkdirsWithExistsAndPermissionCheck method is different * from the mkdirs method provided in the Sun's java.io.File class in the * following way: * While creating the non-existent parent directories, this method checks for * the existence of those directories if the mkdir fails at any point (since * that directory might have just been created by some other process). * If both mkdir() and the exists() check fails for any seemingly * non-existent directory, then we signal an error; Sun's mkdir would signal * an error (return false) if a directory it is attempting to create already * exists or the mkdir fails. * * @param localFS local filesystem * @param dir directory to be created or checked * @param expected expected permission * @throws IOException */ static void mkdirsWithExistsAndPermissionCheck( LocalFileSystem localFS, Path dir, FsPermission expected) throws IOException { File directory = localFS.pathToFile(dir); boolean created = false; if (!directory.exists()) created = mkdirsWithExistsCheck(directory); if (created || !localFS.getFileStatus(dir).getPermission().equals(expected)) localFS.setPermission(dir, expected); }
conf.setVar(ConfVars.HIVEADDEDJARS, Utilities.getResourceFiles(conf, SessionState.ResourceType.JAR)); Path planPath = new Path(ctx.getLocalTmpPath(), "plan.xml"); MapredLocalWork plan = getWork(); LOG.info("Generating plan file " + planPath.toString()); out = FileSystem.getLocal(conf).create(planPath); SerializationUtilities.serializePlan(plan, out); out.close(); String cmdLine = hadoopExec + " jar " + jarCmd + " -localtask -plan " + planPath.toString() + " " + isSilent + " " + hiveConfArgs; cmdLine = cmdLine + " -files " + files; workDir = ctx.getLocalTmpPath().toUri().getPath(); Path p = new Path(f); String target = p.toUri().getPath(); String link = workDir + Path.SEPARATOR + p.getName(); if (FileUtil.symLink(target, link) != 0) {
@SuppressWarnings("deprecation") @Test public void testGetTokensForNamenodes() throws IOException, URISyntaxException { Path TEST_ROOT_DIR = new Path(System.getProperty("test.build.data", "test/build/data")); // ick, but need fq path minus file:/ String binaryTokenFile = FileSystem.getLocal(conf) .makeQualified(new Path(TEST_ROOT_DIR, "tokenFile")).toUri() .getPath(); MockFileSystem fs1 = createFileSystemForServiceName("service1"); Credentials creds = new Credentials(); Token<?> token1 = fs1.getDelegationToken(renewer); creds.addToken(token1.getService(), token1); // wait to set, else the obtain tokens call above will fail with FNF conf.set(MRJobConfig.MAPREDUCE_JOB_CREDENTIALS_BINARY, binaryTokenFile); creds.writeTokenStorageFile(new Path(binaryTokenFile), conf); TokenCache.obtainTokensForNamenodesInternal(fs1, creds, conf, renewer); String fs_addr = fs1.getCanonicalServiceName(); Token<?> nnt = TokenCache.getDelegationToken(creds, fs_addr); assertNotNull("Token for nn is null", nnt); } }
/** * Load the properties from the specified file into a {@link Properties} object. * * @param fileName the name of the file to load properties from * @param conf configuration object to determine the file system to be used * @return a new {@link Properties} instance */ public static Properties fileToProperties(String fileName, Configuration conf) throws IOException, ConfigurationException { PropertiesConfiguration propsConfig = new PropertiesConfiguration(); Path filePath = new Path(fileName); URI fileURI = filePath.toUri(); if (fileURI.getScheme() == null && fileURI.getAuthority() == null) { propsConfig.load(FileSystem.getLocal(conf).open(filePath)); } else { propsConfig.load(filePath.getFileSystem(conf).open(filePath)); } return ConfigurationConverter.getProperties(propsConfig); }
@Test public void testWriterTblProperties() throws Exception { Path root = new Path(workDir, "testWriterTblProperties"); Configuration conf = new Configuration(); FileSystem fs = FileSystem.getLocal(conf).getRaw(); ObjectInspector inspector; synchronized (TestOrcFile.class) { ByteArrayOutputStream myOut = new ByteArrayOutputStream(); System.setOut(new PrintStream(myOut)); FileDump.main(new String[]{root.toUri().toString()}); System.out.flush(); String outDump = new String(myOut.toByteArray());
@BeforeClass public static void makeTestFile() throws Exception { FileOutputStream fos = new FileOutputStream(testFilePath); fos.write("hello".getBytes("UTF-8")); fos.close(); Configuration conf = new Configuration(); FileSystem rawFS = FileSystem.getLocal(conf).getRaw(); FileStatus stat = rawFS.getFileStatus( new Path(testFilePath.toString())); realOwner = stat.getOwner(); realGroup = stat.getGroup(); }
Configuration conf = new Configuration(); OrcOutputFormat of = new OrcOutputFormat(); FileSystem fs = FileSystem.getLocal(conf).getRaw(); Path root = new Path(tmpDir, "testRecordReaderIncompleteDelta").makeQualified(fs); fs.delete(root, true); ObjectInspector inspector; synchronized (TestOrcFile.class) { JobConf job = new JobConf(); job.set(ValidTxnList.VALID_TXNS_KEY, new ValidReadTxnList(new long[0], new BitSet(), 1000, Long.MAX_VALUE).writeToString()); job.set("mapred.input.dir", root.toString()); job.set("bucket_count", "2"); job.set(IOConstants.SCHEMA_EVOLUTION_COLUMNS, MyRow.getColumnNamesProperty()); Path sideFile = new Path(root + "/" + (use130Format ? AcidUtils.deltaSubdir(10,19,0) : AcidUtils.deltaSubdir(10,19)) + "/bucket_00001_flush_length"); assertEquals(true, fs.exists(sideFile)); assertEquals(32, fs.getFileStatus(sideFile).getLen());
public enum LsOption { Recursive, WithBlockSize };
@Test public void testCopyFileFromWindowsLocalPath() throws Exception { assumeTrue(Path.WINDOWS); String windowsTestRootPath = (new File(testRootDir.toUri().getPath() .toString())).getAbsolutePath(); Path testRoot = new Path(windowsTestRootPath, "testPutFile"); lfs.delete(testRoot, true); lfs.mkdirs(testRoot); Path targetDir = new Path(testRoot, "target"); Path filePath = new Path(testRoot, new Path("srcFile")); lfs.create(filePath).close(); checkPut(filePath, targetDir, true); }
@Override public void open(String filePath, CompressionCodec codec, CompressionType cType) throws IOException { Configuration conf = new Configuration(); Path dstPath = new Path(filePath); FileSystem hdfs = dstPath.getFileSystem(conf); if (useRawLocalFileSystem) { if (hdfs instanceof LocalFileSystem) { hdfs = ((LocalFileSystem)hdfs).getRaw(); } else { logger.warn("useRawLocalFileSystem is set to true but file system " + if (conf.getBoolean("hdfs.append.support", false) == true && hdfs.isFile(dstPath)) { fsOut = hdfs.append(dstPath); appending = true; } else { fsOut = hdfs.create(dstPath);
int partitions ) throws IOException, HiveException { JobConf conf = new JobConf(); Utilities.clearWorkMap(conf); conf.set("hive.exec.plan", workDir.toString()); conf.set("mapred.job.tracker", "local"); String isVectorizedString = Boolean.toString(isVectorized); conf.set("hive.vectorized.execution.enabled", isVectorizedString); conf.set("fs.mock.impl", MockFileSystem.class.getName()); conf.set("mapred.mapper.class", ExecMapper.class.getName()); Path root = new Path(warehouseDir, tableName); ((MockFileSystem) root.getFileSystem(conf)).clear(); FileSystem localFs = FileSystem.getLocal(conf).getRaw(); Path mapXml = new Path(workDir, "map.xml"); localFs.delete(mapXml, true); FSDataOutputStream planStream = localFs.create(mapXml); SerializationUtilities.serializePlan(mapWork, planStream); conf.setBoolean(Utilities.HAS_MAP_WORK, true);
private static void mergeBinaryTokens(Credentials creds, Configuration conf) { String binaryTokenFilename = conf.get(MRJobConfig.MAPREDUCE_JOB_CREDENTIALS_BINARY); if (binaryTokenFilename != null) { Credentials binary; try { binary = Credentials.readTokenStorageFile( FileSystem.getLocal(conf).makeQualified( new Path(binaryTokenFilename)), conf); } catch (IOException e) { throw new RuntimeException(e); } // supplement existing tokens with the tokens in the binary file creds.mergeAll(binary); } }
public void deleteLocalFiles(String subdir) throws IOException { String[] localDirs = getLocalDirs(); for (int i = 0; i < localDirs.length; i++) { FileSystem.getLocal(this).delete(new Path(localDirs[i], subdir), true); } }
@Test public void testWriter() throws Exception { Path root = new Path(workDir, "testWriter"); Configuration conf = new Configuration(); FileSystem fs = FileSystem.getLocal(conf).getRaw(); ObjectInspector inspector; synchronized (TestOrcFile.class) { DataInputStream side = fs.open(sidePath); assertEquals(6L, updater.getStats().getRowCount()); assertEquals(false, fs.exists(sidePath));
public static void main(String... args) throws Exception { Configuration config = new Configuration(); FileSystem hdfs = FileSystem.get(config); Class<?> codecClass = Class.forName(args[0]); CompressionCodec codec = (CompressionCodec) ReflectionUtils.newInstance(codecClass, config); InputStream is = FileSystem.getLocal(config).open(new Path(args[1])); OutputStream os = hdfs.create( new Path(args[2] + codec.getDefaultExtension())); OutputStream cos = codec.createOutputStream(os); IOUtils.copyBytes(is, cos, config, true); IOUtils.closeStream(os); IOUtils.closeStream(is); } }
@BeforeClass public static void setup() throws Exception { conf = new Configuration(); shell = new FsShell(conf); lfs = FileSystem.getLocal(conf); testRootDir = lfs.makeQualified(new Path( System.getProperty("test.build.data","test/build/data"), "testShellCopy")); lfs.mkdirs(testRootDir); srcPath = new Path(testRootDir, "srcFile"); dstPath = new Path(testRootDir, "dstFile"); }