private static void deleteDir(HdfsContext context, HdfsEnvironment hdfsEnvironment, Path path, boolean recursive) { try { hdfsEnvironment.getFileSystem(context, path).delete(path, recursive); } catch (Exception e) { // don't fail if unable to delete path log.warn(e, "Failed to delete path: " + path.toString()); } }
public static FileHiveMetastore createTestingFileHiveMetastore(File catalogDirectory) { HiveClientConfig hiveClientConfig = new HiveClientConfig(); HdfsConfiguration hdfsConfiguration = new HiveHdfsConfiguration(new HdfsConfigurationUpdater(hiveClientConfig)); HdfsEnvironment hdfsEnvironment = new HdfsEnvironment(hdfsConfiguration, hiveClientConfig, new NoHdfsAuthentication()); return new FileHiveMetastore(hdfsEnvironment, catalogDirectory.toURI().toString(), "test"); }
public FileSystem getFileSystem(HdfsContext context, Path path) throws IOException { return getFileSystem(context.getIdentity().getUser(), path, getConfiguration(context, path)); }
this.hdfsEnvironment.getFileSystem(session.getUser(), path, configuration); RecordReader<?, ?> recordReader = hdfsEnvironment.doAs(session.getUser(), () -> HiveUtil.createRecordReader(configuration, path, start, length, schema, columns));
private List<String> computeFileNamesForMissingBuckets( ConnectorSession session, Table table, HiveStorageFormat storageFormat, Path targetPath, String filePrefix, int bucketCount, PartitionUpdate partitionUpdate) { if (partitionUpdate.getFileNames().size() == bucketCount) { // fast path for common case return ImmutableList.of(); } HdfsContext hdfsContext = new HdfsContext(session, table.getDatabaseName(), table.getTableName()); JobConf conf = toJobConf(hdfsEnvironment.getConfiguration(hdfsContext, targetPath)); String fileExtension = HiveWriterFactory.getFileExtension(conf, fromHiveStorageFormat(storageFormat)); Set<String> fileNames = ImmutableSet.copyOf(partitionUpdate.getFileNames()); ImmutableList.Builder<String> missingFileNamesBuilder = ImmutableList.builder(); for (int i = 0; i < bucketCount; i++) { String fileName = HiveWriterFactory.computeBucketedFileName(filePrefix, i) + fileExtension; if (!fileNames.contains(fileName)) { missingFileNamesBuilder.add(fileName); } } List<String> missingFileNames = missingFileNamesBuilder.build(); verify(fileNames.size() + missingFileNames.size() == bucketCount); return missingFileNames; }
@Override public void abort() { // Must be wrapped in doAs entirely // Implicit FileSystem initializations are possible in HiveRecordWriter#rollback -> RecordWriter#close hdfsEnvironment.doAs(session.getUser(), this::doAbort); }
@Test public void testIsViewFileSystem() { HdfsEnvironment hdfsEnvironment = createTestHdfsEnvironment(new HiveClientConfig()); Path viewfsPath = new Path("viewfs://ns-default/test-folder"); Path nonViewfsPath = new Path("hdfs://localhost/test-dir/test-folder"); // ViewFS check requires the mount point config hdfsEnvironment.getConfiguration(CONTEXT, viewfsPath).set("fs.viewfs.mounttable.ns-default.link./test-folder", "hdfs://localhost/app"); assertTrue(isViewFileSystem(CONTEXT, hdfsEnvironment, viewfsPath)); assertFalse(isViewFileSystem(CONTEXT, hdfsEnvironment, nonViewfsPath)); } }
@Override public CompletableFuture<Collection<Slice>> finish() { // Must be wrapped in doAs entirely // Implicit FileSystem initializations are possible in HiveRecordWriter#commit -> RecordWriter#close ListenableFuture<Collection<Slice>> result = hdfsEnvironment.doAs(session.getUser(), this::doFinish); return MoreFutures.toCompletableFuture(result); }
public static boolean pathExists(HdfsContext context, HdfsEnvironment hdfsEnvironment, Path path) { try { return hdfsEnvironment.getFileSystem(context, path).exists(path); } catch (IOException e) { throw new PrestoException(HIVE_FILESYSTEM_ERROR, "Failed checking path: " + path, e); } }
@Override protected ExtendedHiveMetastore createMetastore(File tempDir) { File baseDir = new File(tempDir, "metastore"); HiveClientConfig hiveConfig = new HiveClientConfig(); HdfsConfigurationUpdater updator = new HdfsConfigurationUpdater(hiveConfig); HdfsConfiguration hdfsConfiguration = new HiveHdfsConfiguration(updator); HdfsEnvironment hdfsEnvironment = new HdfsEnvironment(hdfsConfiguration, hiveConfig, new NoHdfsAuthentication()); return new FileHiveMetastore(hdfsEnvironment, baseDir.toURI().toString(), "test"); }
entry -> session.getProperty(entry.getName(), entry.getJavaType()).toString())); Configuration conf = hdfsEnvironment.getConfiguration(new HdfsContext(session, schemaName, tableName), writePath); this.conf = toJobConf(conf); hdfsEnvironment.getFileSystem(session.getUser(), writePath, conf);
Path path = new Path(hiveSplit.getPath()); Configuration configuration = hdfsEnvironment.getConfiguration(new HdfsContext(session, hiveSplit.getDatabase(), hiveSplit.getTable()), path);
@Override public CompletableFuture<?> appendPage(Page page) { if (page.getPositionCount() > 0) { // Must be wrapped in doAs entirely // Implicit FileSystem initializations are possible in HiveRecordWriter#addRow or #createWriter hdfsEnvironment.doAs(session.getUser(), () -> doAppend(page)); } return NOT_BLOCKED; }
/** * Attempts to remove the file or empty directory. * * @return true if the location no longer exists */ private static boolean deleteRecursivelyIfExists(HdfsContext context, HdfsEnvironment hdfsEnvironment, Path path) { FileSystem fileSystem; try { fileSystem = hdfsEnvironment.getFileSystem(context, path); } catch (IOException ignored) { return false; } return deleteIfExists(fileSystem, path, true); }
/** * GlueHiveMetastore currently uses AWS Default Credential Provider Chain, * See https://docs.aws.amazon.com/sdk-for-java/v1/developer-guide/credentials.html#credentials-default * on ways to set your AWS credentials which will be needed to run this test. */ @Override protected ExtendedHiveMetastore createMetastore(File tempDir) { HiveClientConfig hiveClientConfig = new HiveClientConfig(); HdfsConfiguration hdfsConfiguration = new HiveHdfsConfiguration(new HdfsConfigurationUpdater(hiveClientConfig)); HdfsEnvironment hdfsEnvironment = new HdfsEnvironment(hdfsConfiguration, hiveClientConfig, new NoHdfsAuthentication()); GlueHiveMetastoreConfig glueConfig = new GlueHiveMetastoreConfig(); glueConfig.setDefaultWarehouseDir(tempDir.toURI().toString()); return new GlueHiveMetastore(hdfsEnvironment, glueConfig); }
Configuration configuration = hdfsEnvironment.getConfiguration(hdfsContext, path); InputFormat<?, ?> inputFormat = getInputFormat(configuration, schema, false); FileSystem fs = hdfsEnvironment.getFileSystem(hdfsContext, path); boolean s3SelectPushdownEnabled = shouldEnablePushdownForTable(session, table, path.toString(), partition.getPartition()); FileSystem targetFilesystem = hdfsEnvironment.getFileSystem(hdfsContext, targetPath); JobConf targetJob = toJobConf(targetFilesystem.getConf()); targetJob.setInputFormat(TextInputFormat.class);
private void createEmptyFile(ConnectorSession session, Path path, Table table, Optional<Partition> partition, List<String> fileNames) { JobConf conf = toJobConf(hdfsEnvironment.getConfiguration(new HdfsContext(session, table.getDatabaseName(), table.getTableName()), path)); Properties schema; StorageFormat format; if (partition.isPresent()) { schema = getHiveSchema(partition.get(), table); format = partition.get().getStorage().getStorageFormat(); } else { schema = getHiveSchema(table); format = table.getStorage().getStorageFormat(); } for (String fileName : fileNames) { writeEmptyFile(session, new Path(path, fileName), conf, schema, format.getSerDe(), format.getOutputFormat()); } }
public static void createDirectory(HdfsContext context, HdfsEnvironment hdfsEnvironment, Path path) { try { if (!hdfsEnvironment.getFileSystem(context, path).mkdirs(path, ALL_PERMISSIONS)) { throw new IOException("mkdirs returned false"); } } catch (IOException e) { throw new PrestoException(HIVE_FILESYSTEM_ERROR, "Failed to create directory: " + path, e); } // explicitly set permission since the default umask overrides it on creation try { hdfsEnvironment.getFileSystem(context, path).setPermission(path, ALL_PERMISSIONS); } catch (IOException e) { throw new PrestoException(HIVE_FILESYSTEM_ERROR, "Failed to set permission on directory: " + path, e); } }
HdfsEnvironment hdfsEnvironment = new HdfsEnvironment(hdfsConfiguration, hiveClientConfig, new NoHdfsAuthentication());
Configuration configuration = hdfsEnvironment.getConfiguration(path); InputFormat<?, ?> inputFormat = getInputFormat(configuration, schema, false); FileSystem targetFilesystem = hdfsEnvironment.getFileSystem(split.getPath()); FileStatus file = targetFilesystem.getFileStatus(split.getPath()); hiveSplitSource.addToQueue(createHiveSplits( FileSystem fs = hdfsEnvironment.getFileSystem(path); if (bucket.isPresent()) { Optional<FileStatus> bucketFile = getBucketFile(bucket.get(), fs, path);