@Override public FileSystem getNonCachedFileSystem(URI uri, Configuration conf) throws IOException { return FileSystem.newInstance(uri, conf); }
@Override public FileSystem run() throws IOException { return newInstance(uri, conf); } });
/** * Get a unique local FileSystem object. * @param conf the configuration to configure the FileSystem with * @return a new LocalFileSystem object. * @throws IOException FS creation or initialization failure. */ public static LocalFileSystem newInstanceLocal(Configuration conf) throws IOException { return (LocalFileSystem)newInstance(LocalFileSystem.NAME, conf); }
private static FileSystem buildFileSystem(Properties jobProps, Configuration configuration) throws IOException { URI fsUri = URI.create(jobProps.getProperty(ConfigurationKeys.FS_URI_KEY, ConfigurationKeys.LOCAL_FS_URI)); return FileSystem.newInstance(fsUri, configuration); }
@Override public Object run() throws Exception { // New FileSystem object to be obtained in user context for doAs flow. try (FileSystem doAsFs = FileSystem.newInstance(eventsDir.getPath().toUri(), conf)) { doAsFs.delete(eventsDir.getPath(), true); } return null; } });
public static FileSystem openHdfsConnect() { Configuration conf = new Configuration(); conf.set("fs.defaultFS", SERVER_PATH); FileSystem fs = null; try { fs = FileSystem.newInstance(new URI(SERVER_PATH),conf); } catch (IOException e) { e.printStackTrace(); } catch (URISyntaxException e) { e.printStackTrace(); } return fs; } public static void closeHdfsConnect(FileSystem fs) {
/** * Returns a unique configured FileSystem implementation for the default * filesystem of the supplied configuration. * This always returns a new FileSystem object. * @param conf the configuration to use * @return the new FS instance * @throws IOException FS creation or initialization failure. */ public static FileSystem newInstance(Configuration conf) throws IOException { return newInstance(getDefaultUri(conf), conf); }
@Inject public HdfsDataSegmentPusher(HdfsDataSegmentPusherConfig config, Configuration hadoopConfig, ObjectMapper jsonMapper) { this.hadoopConfig = hadoopConfig; this.jsonMapper = jsonMapper; Path storageDir = new Path(config.getStorageDirectory()); this.fullyQualifiedStorageDirectory = Suppliers.memoize( () -> { try { return FileSystem.newInstance(storageDir.toUri(), hadoopConfig) .makeQualified(storageDir) .toUri() .toString(); } catch (Exception e) { throw new RuntimeException(e); } } ); log.info("Configured HDFS as deep storage"); }
/** * Returns the FileSystem for this URI's scheme and authority. * The entire URI is passed to the FileSystem instance's initialize method. * This always returns a new FileSystem object. * @param uri FS URI * @param config configuration to use * @return the new FS instance * @throws IOException FS creation or initialization failure. */ public static FileSystem newInstance(URI uri, Configuration config) throws IOException { String scheme = uri.getScheme(); String authority = uri.getAuthority(); if (scheme == null) { // no scheme: use default FS return newInstance(config); } if (authority == null) { // no authority URI defaultUri = getDefaultUri(config); if (scheme.equals(defaultUri.getScheme()) // if scheme matches default && defaultUri.getAuthority() != null) { // & default has authority return newInstance(defaultUri, config); // return default } } return CACHE.getUnique(uri, config); }
Path p = new Path(srcPath); if (srcFs == null) { srcFs = FileSystem.newInstance(p.toUri(), conf);
public Avro2OrcStaleDatasetCleaner(String jobId, Properties props) throws IOException { super(jobId, log); props.setProperty(HiveDatasetFinder.HIVE_DATASET_CONFIG_PREFIX_KEY, HIVE_DATASET_CONFIG_AVRO_PREFIX); this.graceTimeInMillis = TimeUnit.DAYS.toMillis(Long.parseLong(props .getProperty(HIVE_PARTITION_DELETION_GRACE_TIME_IN_DAYS, DEFAULT_HIVE_PARTITION_DELETION_GRACE_TIME_IN_DAYS))); Config config = ConfigFactory.parseProperties(props); this.fs = FileSystem.newInstance(new Configuration()); this.metricContext = Instrumented.getMetricContext(ConfigUtils.configToState(config), ValidationJob.class); this.eventSubmitter = new EventSubmitter.Builder(this.metricContext, EventConstants.CONVERSION_NAMESPACE).build(); this.datasetFinder = new ConvertibleHiveDatasetFinder(this.fs, props, this.eventSubmitter); }
private static List<Partition> getPartitions(String completeTableName) { List<String> tableList = At_SPLITTER.splitToList(completeTableName); if (tableList.size() != 2) { log.warn("Invalid table name " + completeTableName); return Collections.EMPTY_LIST; } try (AutoReturnableObject<IMetaStoreClient> client = ComplianceRetentionJob.pool.getClient()) { Table table = client.get().getTable(tableList.get(0), tableList.get(1)); HiveDataset dataset = new HiveDataset(FileSystem.newInstance(new Configuration()), ComplianceRetentionJob.pool, new org.apache.hadoop.hive.ql.metadata.Table(table), new Properties()); return dataset.getPartitionsFromDataset(); } catch (IOException | TException e) { log.warn("Unable to get Partitions for table " + completeTableName + " " + e.getMessage()); } return Collections.EMPTY_LIST; } }
srcFs = FileSystem.newInstance(p.toUri(), conf);
@Override public SharedResourceFactoryResponse<FileSystem> createResource(SharedResourcesBroker<S> broker, ScopedConfigView<S, FileSystemKey> config) throws NotConfiguredException { try { FileSystemKey key = config.getKey(); URI uri = key.getUri(); Configuration hadoopConf = key.getConfiguration(); log.info("Creating instrumented FileSystem for uri " + uri); Class<? extends FileSystem> fsClass = FileSystem.getFileSystemClass(uri.getScheme(), hadoopConf); if (InstrumentedFileSystem.class.isAssignableFrom(fsClass)) { InstrumentedFileSystem tmpfs = (InstrumentedFileSystem) fsClass.newInstance(); hadoopConf = new Configuration(hadoopConf); String schemeKey = "fs." + uri.getScheme() + ".impl"; hadoopConf.set(schemeKey, tmpfs.underlyingFs.getClass().getName()); } FileSystem fs = FileSystem.newInstance(uri, hadoopConf); ServiceLoader<FileSystemInstrumentationFactory> loader = ServiceLoader.load(FileSystemInstrumentationFactory.class); for (FileSystemInstrumentationFactory instrumentationFactory : loader) { fs = instrumentationFactory.instrumentFileSystem(fs, broker, config); } return new ResourceInstance<>(fs); } catch (IOException | ReflectiveOperationException ioe) { throw new RuntimeException(ioe); } }
public void initDatasetFinder(Properties properties) throws IOException { Preconditions.checkArgument(properties.containsKey(GOBBLIN_COMPLIANCE_DATASET_FINDER_CLASS), "Missing required propety " + GOBBLIN_COMPLIANCE_DATASET_FINDER_CLASS); String finderClass = properties.getProperty(GOBBLIN_COMPLIANCE_DATASET_FINDER_CLASS); this.finder = GobblinConstructorUtils.invokeConstructor(DatasetsFinder.class, finderClass, new State(properties)); Iterator<HiveDataset> datasetsIterator = new HiveDatasetFinder(FileSystem.newInstance(new Configuration()), properties).getDatasetsIterator(); while (datasetsIterator.hasNext()) { // Drop partitions from empty tables if property is set, otherwise skip the table HiveDataset hiveDataset = datasetsIterator.next(); List<Partition> partitionsFromDataset = hiveDataset.getPartitionsFromDataset(); String completeTableName = hiveDataset.getTable().getCompleteName(); if (!partitionsFromDataset.isEmpty()) { this.tableNamesList.add(completeTableName); continue; } if (!Boolean.parseBoolean(properties.getProperty(ComplianceConfigurationKeys.SHOULD_DROP_EMPTY_TABLES, ComplianceConfigurationKeys.DEFAULT_SHOULD_DROP_EMPTY_TABLES))) { continue; } if (completeTableName.contains(ComplianceConfigurationKeys.TRASH) || completeTableName .contains(ComplianceConfigurationKeys.BACKUP) || completeTableName .contains(ComplianceConfigurationKeys.STAGING)) { this.tablesToDrop.add(hiveDataset); } } }
@Test public void testFromConfigurationOverride() throws Exception { Configuration configuration = new Configuration(); configuration.set("fs.file.impl", InstrumentedLocalFileSystem.class.getName()); FileSystem fs = FileSystem.newInstance(new URI("file:///"), configuration); Assert.assertTrue(fs instanceof InstrumentedLocalFileSystem); Assert.assertTrue(DecoratorUtils.resolveUnderlyingObject(fs) instanceof LocalFileSystem); Assert.assertEquals(fs.getFileStatus(new Path("/tmp")).getPath(), new Path("file:///tmp")); Assert.assertEquals(fs.getUri().getScheme(), "file"); }
private void createFileSystem(String uri) throws IOException, InterruptedException, URISyntaxException { if (this.state.getPropAsBoolean(ConfigurationKeys.SHOULD_FS_PROXY_AS_USER, ConfigurationKeys.DEFAULT_SHOULD_FS_PROXY_AS_USER)) { // Initialize file system as a proxy user. this.fs = new ProxiedFileSystemWrapper().getProxiedFileSystem(this.state, ProxiedFileSystemWrapper.AuthType.TOKEN, this.state.getProp(ConfigurationKeys.FS_PROXY_AS_USER_TOKEN_FILE), uri, configuration); } else { // Initialize file system as the current user. this.fs = FileSystem.newInstance(URI.create(uri), this.configuration); } }
@Override public void run() { try { int curThreadNum = threadNum.getAndIncrement(); if ("file".equals(uri.getScheme())) FileSystem.get(uri, cfg).setWorkingDirectory(new Path("file:///user/user" + curThreadNum)); changeUserPhase.countDown(); changeUserPhase.await(); newUserInitWorkDir[curThreadNum] = FileSystem.get(uri, cfg).getWorkingDirectory(); FileSystem.get(uri, cfg).setWorkingDirectory(new Path("folder" + curThreadNum)); changeDirPhase.countDown(); changeDirPhase.await(); newWorkDir[curThreadNum] = FileSystem.get(uri, cfg).getWorkingDirectory(); FileSystem.get(uri, cfg).setWorkingDirectory(new Path("/folder" + curThreadNum)); changeAbsDirPhase.countDown(); changeAbsDirPhase.await(); newAbsWorkDir[curThreadNum] = FileSystem.get(uri, cfg).getWorkingDirectory(); newInstanceWorkDir[curThreadNum] = FileSystem.newInstance(uri, cfg).getWorkingDirectory(); finishPhase.countDown(); } catch (InterruptedException | IOException e) { error("Failed to execute test thread.", e); fail(); } } }, THREAD_COUNT, "filesystems-test");
private static FileSystem getWriterFsUsingKeytab(State state, URI uri) throws IOException { FileSystem fs = FileSystem.newInstance(uri, new Configuration()); try { Preconditions.checkArgument(state.contains(ConfigurationKeys.FS_PROXY_AS_USER_NAME), "Missing required property " + ConfigurationKeys.FS_PROXY_AS_USER_NAME); Preconditions.checkArgument(state.contains(ConfigurationKeys.SUPER_USER_NAME_TO_PROXY_AS_OTHERS), "Missing required property " + ConfigurationKeys.SUPER_USER_NAME_TO_PROXY_AS_OTHERS); Preconditions.checkArgument(state.contains(ConfigurationKeys.SUPER_USER_KEY_TAB_LOCATION), "Missing required property " + ConfigurationKeys.SUPER_USER_KEY_TAB_LOCATION); String user = state.getProp(ConfigurationKeys.FS_PROXY_AS_USER_NAME); String superUser = state.getProp(ConfigurationKeys.SUPER_USER_NAME_TO_PROXY_AS_OTHERS); Path keytabLocation = new Path(state.getProp(ConfigurationKeys.SUPER_USER_KEY_TAB_LOCATION)); return ProxiedFileSystemCache.fromKeytab().userNameToProxyAs(user).fsURI(uri) .superUserKeytabLocation(keytabLocation).superUserName(superUser).conf(HadoopUtils.newConfiguration()) .referenceFS(fs).build(); } catch (ExecutionException e) { throw new IOException(e); } }
assertEquals(workDir, FileSystem.newInstance(uri, cfg).getWorkingDirectory());