HiveOutputTableHandle result = new HiveOutputTableHandle( schemaName, tableName,
private static ConnectorPageSink createPageSink(HiveTransactionHandle transaction, HiveClientConfig config, ExtendedHiveMetastore metastore, Path outputPath, HiveWriterStats stats) HiveOutputTableHandle handle = new HiveOutputTableHandle( SCHEMA_NAME, TABLE_NAME,
createDirectories(temporaryPath); return new HiveOutputTableHandle( connectorId.toString(), schemaName,
private static ConnectorPageSink createPageSink(HiveTransactionHandle transaction, HiveClientConfig config, HiveMetastore metastore, Path outputPath) { LocationHandle locationHandle = new LocationHandle(outputPath, Optional.of(outputPath), false); HiveOutputTableHandle handle = new HiveOutputTableHandle(CLIENT_ID, SCHEMA_NAME, TABLE_NAME, getColumnHandles(), "test", locationHandle, config.getHiveStorageFormat(), ImmutableList.of(), "test", ImmutableMap.of()); JsonCodec<PartitionUpdate> partitionUpdateCodec = JsonCodec.jsonCodec(PartitionUpdate.class); HdfsEnvironment hdfsEnvironment = createHdfsEnvironment(config); HivePageSinkProvider provider = new HivePageSinkProvider(hdfsEnvironment, metastore, new GroupByHashPageIndexerFactory(), TYPE_MANAGER, config, new HiveLocationService(metastore, hdfsEnvironment), partitionUpdateCodec); return provider.createPageSink(transaction, getSession(config), handle); }
@Override public HiveOutputTableHandle beginCreateTable(ConnectorSession session, ConnectorTableMetadata tableMetadata, Optional<ConnectorNewTableLayout> layout) { checkNoRollback(); verifyJvmTimeZone(); checkArgument(!isNullOrEmpty(tableMetadata.getOwner()), "Table owner is null or empty"); HiveStorageFormat hiveStorageFormat = getHiveStorageFormat(tableMetadata.getProperties()); List<String> partitionedBy = getPartitionedBy(tableMetadata.getProperties()); Map<String, String> additionalTableParameters = tableParameterCodec.encode(tableMetadata.getProperties()); // get the root directory for the database SchemaTableName schemaTableName = tableMetadata.getTable(); String schemaName = schemaTableName.getSchemaName(); String tableName = schemaTableName.getTableName(); List<HiveColumnHandle> columnHandles = getColumnHandles(connectorId, tableMetadata, ImmutableSet.copyOf(partitionedBy)); HiveOutputTableHandle result = new HiveOutputTableHandle( connectorId, schemaName, tableName, columnHandles, session.getQueryId(), locationService.forNewTable(session.getQueryId(), schemaName, tableName), hiveStorageFormat, partitionedBy, tableMetadata.getOwner(), additionalTableParameters); setRollback(() -> rollbackCreateTable(result)); return result; }
private static ConnectorPageSink createPageSink(HiveTransactionHandle transaction, HiveClientConfig config, ExtendedHiveMetastore metastore, Path outputPath, HiveWriterStats stats) HiveOutputTableHandle handle = new HiveOutputTableHandle( SCHEMA_NAME, TABLE_NAME,