HiveOutputTableHandle result = new HiveOutputTableHandle( schemaName, tableName, metastore.declareIntentionToWrite(session, writeInfo.getWriteMode(), writeInfo.getWritePath(), result.getFilePrefix(), schemaTableName);
WriteInfo writeInfo = locationService.getQueryWriteInfo(handle.getLocationHandle()); Table table = buildTableObject( session.getQueryId(), handle.getSchemaName(), handle.getTableName(), handle.getTableOwner(), handle.getInputColumns(), handle.getTableStorageFormat(), handle.getPartitionedBy(), handle.getBucketProperty(), handle.getAdditionalTableParameters(), writeInfo.getTargetPath(), false, prestoVersion); PrincipalPrivileges principalPrivileges = buildInitialPrivilegeSet(handle.getTableOwner()); if (handle.getBucketProperty().isPresent()) { ImmutableList<PartitionUpdate> partitionUpdatesForMissingBuckets = computePartitionUpdatesForMissingBuckets(session, handle, table, partitionUpdates); Map<String, Type> columnTypes = handle.getInputColumns().stream() .collect(toImmutableMap(HiveColumnHandle::getName, column -> column.getHiveType().getType(typeManager))); Map<List<String>, ComputedStatistics> partitionComputedStatistics = createComputedStatisticsToPartitionMap(computedStatistics, handle.getPartitionedBy(), columnTypes); if (!handle.getPartitionedBy().isEmpty()) { if (isRespectTableFormat(session)) { Verify.verify(handle.getPartitionStorageFormat() == handle.getTableStorageFormat()); metastore.addPartition( session,
.collect(toList()); Path targetPath = locationService.targetPathRoot(handle.getLocationHandle()); Path writePath = locationService.writePathRoot(handle.getLocationHandle()).get(); handle.getSchemaName(), handle.getTableName(), targetPath)); renameDirectory(hdfsEnvironment, handle.getSchemaName(), handle.getTableName(), writePath, targetPath); PartitionCommitter partitionCommitter = new PartitionCommitter(handle.getSchemaName(), handle.getTableName(), metastore, PARTITION_COMMIT_BATCH_SIZE); try { partitionUpdates = PartitionUpdate.mergePartitionUpdates(partitionUpdates); handle.getSchemaName(), handle.getTableName(), handle.getTableOwner(), handle.getInputColumns(), handle.getHiveStorageFormat(), handle.getPartitionedBy(), handle.getAdditionalTableParameters(), targetPath); if (!handle.getPartitionedBy().isEmpty()) { partitionUpdates.stream() .map(partitionUpdate -> createPartition(table, partitionUpdate))
final Path targetPath = new Path(handle.getTargetPath()); if (handle.hasTemporaryPath()) { if (pathExists(targetPath)) { final SchemaTableName table = new SchemaTableName(handle.getSchemaName(), handle.getTableName()); throw new PrestoException(HiveErrorCode.HIVE_PATH_ALREADY_EXISTS, String.format("Unable to commit creation of table '%s': target directory already exists: %s", table, rename(new Path(handle.getTemporaryPath()), targetPath); final List<String> types = handle.getColumnTypes().stream() .map(HiveType::toHiveType) .map(HiveType::getHiveTypeName) for (int i = 0; i < handle.getColumnNames().size(); i++) { final String name = handle.getColumnNames().get(i); final String type = types.get(i); final Field field = new Field(); final HiveStorageFormat hiveStorageFormat = handle.getHiveStorageFormat(); final Database database = databaseDao.getBySourceDatabaseName(connectorId.toString(), handle.getSchemaName()); table.setName(handle.getTableName()); table.setDatabase(database); info.setInputFormat(hiveStorageFormat.getInputFormat()); info.setOutputFormat(hiveStorageFormat.getOutputFormat()); info.setOwner(handle.getTableOwner());
private static ConnectorPageSink createPageSink(HiveTransactionHandle transaction, HiveClientConfig config, ExtendedHiveMetastore metastore, Path outputPath, HiveWriterStats stats) HiveOutputTableHandle handle = new HiveOutputTableHandle( SCHEMA_NAME, TABLE_NAME,
protected Path getStagingPathRoot(ConnectorOutputTableHandle outputTableHandle) { HiveOutputTableHandle handle = (HiveOutputTableHandle) outputTableHandle; return getLocationService() .getQueryWriteInfo(handle.getLocationHandle()) .getWritePath(); }
protected Set<String> listAllDataFiles(ConnectorOutputTableHandle tableHandle) throws IOException { HiveOutputTableHandle hiveOutputTableHandle = (HiveOutputTableHandle) tableHandle; Path writePath = new Path(getLocationService(hiveOutputTableHandle.getSchemaName()).writePathRoot(hiveOutputTableHandle.getLocationHandle()).get().toString()); return listAllDataFiles(writePath); }
private void rollbackCreateTable(ConnectorOutputTableHandle tableHandle) { HiveOutputTableHandle handle = checkType(tableHandle, HiveOutputTableHandle.class, "tableHandle"); cleanupTempDirectory(locationService.writePathRoot(handle.getLocationHandle()).get().toString(), handle.getFilePrefix(), "create table"); // Note: there is no need to cleanup the target directory as it will only be written // to during the commit call and the commit call cleans up after failures. }
protected String getFilePrefix(ConnectorOutputTableHandle outputTableHandle) { return ((HiveOutputTableHandle) outputTableHandle).getFilePrefix(); }
createDirectories(temporaryPath); return new HiveOutputTableHandle( connectorId.toString(), schemaName,
database, tableName.getTableName(), locationService.getTableWriteInfo(((HiveOutputTableHandle) outputHandle).getLocationHandle()).getTargetPath().toString());
private static ConnectorPageSink createPageSink(HiveTransactionHandle transaction, HiveClientConfig config, HiveMetastore metastore, Path outputPath) { LocationHandle locationHandle = new LocationHandle(outputPath, Optional.of(outputPath), false); HiveOutputTableHandle handle = new HiveOutputTableHandle(CLIENT_ID, SCHEMA_NAME, TABLE_NAME, getColumnHandles(), "test", locationHandle, config.getHiveStorageFormat(), ImmutableList.of(), "test", ImmutableMap.of()); JsonCodec<PartitionUpdate> partitionUpdateCodec = JsonCodec.jsonCodec(PartitionUpdate.class); HdfsEnvironment hdfsEnvironment = createHdfsEnvironment(config); HivePageSinkProvider provider = new HivePageSinkProvider(hdfsEnvironment, metastore, new GroupByHashPageIndexerFactory(), TYPE_MANAGER, config, new HiveLocationService(metastore, hdfsEnvironment), partitionUpdateCodec); return provider.createPageSink(transaction, getSession(config), handle); }
protected Path getStagingPathRoot(ConnectorOutputTableHandle outputTableHandle) { HiveOutputTableHandle handle = (HiveOutputTableHandle) outputTableHandle; return getLocationService() .getQueryWriteInfo(handle.getLocationHandle()) .getWritePath(); }
@Override public HiveOutputTableHandle beginCreateTable(ConnectorSession session, ConnectorTableMetadata tableMetadata, Optional<ConnectorNewTableLayout> layout) { checkNoRollback(); verifyJvmTimeZone(); checkArgument(!isNullOrEmpty(tableMetadata.getOwner()), "Table owner is null or empty"); HiveStorageFormat hiveStorageFormat = getHiveStorageFormat(tableMetadata.getProperties()); List<String> partitionedBy = getPartitionedBy(tableMetadata.getProperties()); Map<String, String> additionalTableParameters = tableParameterCodec.encode(tableMetadata.getProperties()); // get the root directory for the database SchemaTableName schemaTableName = tableMetadata.getTable(); String schemaName = schemaTableName.getSchemaName(); String tableName = schemaTableName.getTableName(); List<HiveColumnHandle> columnHandles = getColumnHandles(connectorId, tableMetadata, ImmutableSet.copyOf(partitionedBy)); HiveOutputTableHandle result = new HiveOutputTableHandle( connectorId, schemaName, tableName, columnHandles, session.getQueryId(), locationService.forNewTable(session.getQueryId(), schemaName, tableName), hiveStorageFormat, partitionedBy, tableMetadata.getOwner(), additionalTableParameters); setRollback(() -> rollbackCreateTable(result)); return result; }
metastoreClient.updateTableLocation(database, tableName.getTableName(), locationService.writePath(outputHandle.getLocationHandle(), Optional.empty()).get().toString());
private static ConnectorPageSink createPageSink(HiveTransactionHandle transaction, HiveClientConfig config, ExtendedHiveMetastore metastore, Path outputPath, HiveWriterStats stats) HiveOutputTableHandle handle = new HiveOutputTableHandle( SCHEMA_NAME, TABLE_NAME,
database, tableName.getTableName(), locationService.getTableWriteInfo(((HiveOutputTableHandle) outputHandle).getLocationHandle()).getTargetPath().toString());