public static Path createTemporaryPath(ConnectorSession session, HdfsContext context, HdfsEnvironment hdfsEnvironment, Path targetPath) { // use a per-user temporary directory to avoid permission problems String temporaryPrefix = getTemporaryStagingDirectoryPath(session) .replace("${USER}", context.getIdentity().getUser()); // use relative temporary directory on ViewFS if (isViewFileSystem(context, hdfsEnvironment, targetPath)) { temporaryPrefix = ".hive-staging"; } // create a temporary directory on the same filesystem Path temporaryRoot = new Path(targetPath, temporaryPrefix); Path temporaryPath = new Path(temporaryRoot, randomUUID().toString()); createDirectory(context, hdfsEnvironment, temporaryPath); return temporaryPath; }
private static void renameDirectory(HdfsContext context, HdfsEnvironment hdfsEnvironment, Path source, Path target, Runnable runWhenPathDoesntExist) { if (pathExists(context, hdfsEnvironment, target)) { throw new PrestoException(HIVE_PATH_ALREADY_EXISTS, format("Unable to rename from %s to %s: target directory already exists", source, target)); } if (!pathExists(context, hdfsEnvironment, target.getParent())) { createDirectory(context, hdfsEnvironment, target.getParent()); } // The runnable will assume that if rename fails, it will be okay to delete the directory (if the directory is empty). // This is not technically true because a race condition still exists. runWhenPathDoesntExist.run(); try { if (!hdfsEnvironment.getFileSystem(context, source).rename(source, target)) { throw new PrestoException(HIVE_FILESYSTEM_ERROR, format("Failed to rename %s to %s: rename returned false", source, target)); } } catch (IOException e) { throw new PrestoException(HIVE_FILESYSTEM_ERROR, format("Failed to rename %s to %s", source, target), e); } }
@Override public void createDatabase(Database database) { if (!database.getLocation().isPresent() && defaultDir.isPresent()) { String databaseLocation = new Path(defaultDir.get(), database.getDatabaseName()).toString(); database = Database.builder(database) .setLocation(Optional.of(databaseLocation)) .build(); } try { DatabaseInput databaseInput = GlueInputConverter.convertDatabase(database); glueClient.createDatabase(new CreateDatabaseRequest().withDatabaseInput(databaseInput)); } catch (AlreadyExistsException e) { throw new SchemaAlreadyExistsException(database.getDatabaseName()); } catch (AmazonServiceException e) { throw new PrestoException(HIVE_METASTORE_ERROR, e); } if (database.getLocation().isPresent()) { HiveWriteUtils.createDirectory(hdfsContext, hdfsEnvironment, new Path(database.getLocation().get())); } }
createDirectory(context, hdfsEnvironment, targetPath);
private void prepareAddPartition(HdfsContext context, PartitionAndMore partitionAndMore) { deleteOnly = false; Partition partition = partitionAndMore.getPartition(); String targetLocation = partition.getStorage().getLocation(); Path currentPath = partitionAndMore.getCurrentLocation(); Path targetPath = new Path(targetLocation); SchemaTableName schemaTableName = new SchemaTableName(partition.getDatabaseName(), partition.getTableName()); PartitionAdder partitionAdder = partitionAdders.computeIfAbsent( schemaTableName, ignored -> new PartitionAdder(partition.getDatabaseName(), partition.getTableName(), delegate, PARTITION_COMMIT_BATCH_SIZE)); if (pathExists(context, hdfsEnvironment, currentPath)) { if (!targetPath.equals(currentPath)) { renameDirectory( context, hdfsEnvironment, currentPath, targetPath, () -> cleanUpTasksForAbort.add(new DirectoryCleanUpTask(context, targetPath, true))); } } else { cleanUpTasksForAbort.add(new DirectoryCleanUpTask(context, targetPath, true)); createDirectory(context, hdfsEnvironment, targetPath); } String partitionName = getPartitionName(partition.getDatabaseName(), partition.getTableName(), partition.getValues()); partitionAdder.addPartition(new PartitionWithStatistics(partition, partitionName, partitionAndMore.getStatisticsUpdate())); }
@Override public void triggerConflict(ConnectorSession session, SchemaTableName tableName, ConnectorInsertTableHandle insertTableHandle, List<PartitionUpdate> partitionUpdates) { Path writePath = getStagingPathRoot(insertTableHandle); Path targetPath = getTargetPathRoot(insertTableHandle); if (writePath.equals(targetPath)) { // This conflict does not apply. Trigger a rollback right away so that this test case passes. throw new TestingRollbackException(); } path = new Path(targetPath + "/pk1=b/pk2=add2"); context = new HdfsContext(session, tableName.getSchemaName(), tableName.getTableName()); createDirectory(context, hdfsEnvironment, path); }
public static Path createTemporaryPath(HdfsEnvironment hdfsEnvironment, Path targetPath) { // use a per-user temporary directory to avoid permission problems // TODO: this should use Hadoop UserGroupInformation String temporaryPrefix = "/tmp/presto-" + StandardSystemProperty.USER_NAME.value(); // create a temporary directory on the same filesystem Path temporaryRoot = new Path(targetPath, temporaryPrefix); Path temporaryPath = new Path(temporaryRoot, randomUUID().toString()); createDirectory(hdfsEnvironment, temporaryPath); return temporaryPath; }
public static void renameDirectory(HdfsEnvironment hdfsEnvironment, String schemaName, String tableName, Path source, Path target) { if (pathExists(hdfsEnvironment, target)) { throw new PrestoException(HIVE_PATH_ALREADY_EXISTS, format("Unable to commit creation of table '%s.%s': target directory already exists: %s", schemaName, tableName, target)); } if (!pathExists(hdfsEnvironment, target.getParent())) { createDirectory(hdfsEnvironment, target.getParent()); } try { if (!hdfsEnvironment.getFileSystem(source).rename(source, target)) { throw new PrestoException(HIVE_FILESYSTEM_ERROR, format("Failed to rename %s to %s: rename returned false", source, target)); } } catch (IOException e) { throw new PrestoException(HIVE_FILESYSTEM_ERROR, format("Failed to rename %s to %s", source, target), e); } }
@Override public void createTable(ConnectorSession session, ConnectorTableMetadata tableMetadata) { checkArgument(!isNullOrEmpty(tableMetadata.getOwner()), "Table owner is null or empty"); SchemaTableName schemaTableName = tableMetadata.getTable(); String schemaName = schemaTableName.getSchemaName(); String tableName = schemaTableName.getTableName(); List<String> partitionedBy = getPartitionedBy(tableMetadata.getProperties()); List<HiveColumnHandle> columnHandles = getColumnHandles(connectorId, tableMetadata, ImmutableSet.copyOf(partitionedBy)); HiveStorageFormat hiveStorageFormat = getHiveStorageFormat(tableMetadata.getProperties()); Map<String, String> additionalTableParameters = tableParameterCodec.encode(tableMetadata.getProperties()); LocationHandle locationHandle = locationService.forNewTable(session.getQueryId(), schemaName, tableName); Path targetPath = locationService.targetPathRoot(locationHandle); createDirectory(hdfsEnvironment, targetPath); createTable(schemaName, tableName, tableMetadata.getOwner(), columnHandles, hiveStorageFormat, partitionedBy, additionalTableParameters, targetPath); }
@Override public void triggerConflict(ConnectorSession session, SchemaTableName tableName, ConnectorInsertTableHandle insertTableHandle, List<PartitionUpdate> partitionUpdates) { Path writePath = getStagingPathRoot(insertTableHandle); Path targetPath = getTargetPathRoot(insertTableHandle); if (writePath.equals(targetPath)) { // This conflict does not apply. Trigger a rollback right away so that this test case passes. throw new TestingRollbackException(); } path = new Path(targetPath + "/pk1=b/pk2=add2"); context = new HdfsContext(session, tableName.getSchemaName(), tableName.getTableName()); createDirectory(context, hdfsEnvironment, path); }
LocationHandle locationHandle = locationService.forNewTable(session.getQueryId(), schemaName, tableName); Path targetPath = locationService.targetPathRoot(locationHandle); HiveWriteUtils.createDirectory(hdfsEnvironment, targetPath);