@Override public void dropColumn(String databaseName, String tableName, String columnName) { verifyCanDropColumn(this, databaseName, tableName, columnName); Table oldTable = getTableOrElseThrow(databaseName, tableName); if (!oldTable.getColumn(columnName).isPresent()) { SchemaTableName name = new SchemaTableName(databaseName, tableName); throw new ColumnNotFoundException(name, columnName); } ImmutableList.Builder<Column> newDataColumns = ImmutableList.builder(); oldTable.getDataColumns().stream() .filter(fieldSchema -> !fieldSchema.getName().equals(columnName)) .forEach(newDataColumns::add); Table newTable = Table.builder(oldTable) .setDataColumns(newDataColumns.build()) .build(); replaceTable(databaseName, tableName, newTable, null); }
@Override public void renameColumn(String databaseName, String tableName, String oldColumnName, String newColumnName) { Table oldTable = getTableOrElseThrow(databaseName, tableName); if (oldTable.getPartitionColumns().stream().anyMatch(c -> c.getName().equals(oldColumnName))) { throw new PrestoException(NOT_SUPPORTED, "Renaming partition columns is not supported"); } ImmutableList.Builder<Column> newDataColumns = ImmutableList.builder(); for (Column column : oldTable.getDataColumns()) { if (column.getName().equals(oldColumnName)) { newDataColumns.add(new Column(newColumnName, column.getType(), column.getComment())); } else { newDataColumns.add(column); } } Table newTable = Table.builder(oldTable) .setDataColumns(newDataColumns.build()) .build(); replaceTable(databaseName, tableName, newTable, null); }
public static Table convertTable(com.amazonaws.services.glue.model.Table glueTable, String dbName) { requireNonNull(glueTable.getStorageDescriptor(), "Table StorageDescriptor is null"); StorageDescriptor sd = glueTable.getStorageDescriptor(); Table.Builder tableBuilder = Table.builder() .setDatabaseName(dbName) .setTableName(glueTable.getName()) .setOwner(nullToEmpty(glueTable.getOwner())) .setTableType(glueTable.getTableType()) .setDataColumns(sd.getColumns().stream() .map(GlueToPrestoConverter::convertColumn) .collect(toList())) .setParameters(firstNonNull(glueTable.getParameters(), ImmutableMap.of())) .setViewOriginalText(Optional.ofNullable(glueTable.getViewOriginalText())) .setViewExpandedText(Optional.ofNullable(glueTable.getViewExpandedText())); if (glueTable.getPartitionKeys() != null) { tableBuilder.setPartitionColumns(glueTable.getPartitionKeys().stream() .map(GlueToPrestoConverter::convertColumn) .collect(toList())); } else { tableBuilder.setPartitionColumns(new ArrayList<>()); } setStorageBuilder(sd, tableBuilder.getStorageBuilder()); return tableBuilder.build(); }
public static com.facebook.presto.hive.metastore.Table getPrestoTestTable(String dbName) { return com.facebook.presto.hive.metastore.Table.builder() .setDatabaseName(dbName) .setTableName("test-tbl" + generateRandom()) .setOwner("owner") .setParameters(ImmutableMap.of()) .setTableType(TableType.EXTERNAL_TABLE.name()) .setDataColumns(ImmutableList.of(getPrestoTestColumn())) .setPartitionColumns(ImmutableList.of(getPrestoTestColumn())) .setViewOriginalText(Optional.of("originalText")) .setViewExpandedText(Optional.of("expandedText")) .withStorage(STORAGE_CONSUMER).build(); }
private void alterBucketProperty(SchemaTableName schemaTableName, Optional<HiveBucketProperty> bucketProperty) { try (Transaction transaction = newTransaction()) { ConnectorSession session = newSession(); String tableOwner = session.getUser(); String schemaName = schemaTableName.getSchemaName(); String tableName = schemaTableName.getTableName(); Optional<Table> table = transaction.getMetastore(schemaName).getTable(schemaName, tableName); Table.Builder tableBuilder = Table.builder(table.get()); tableBuilder.getStorageBuilder().setBucketProperty(bucketProperty); PrincipalPrivileges principalPrivileges = testingPrincipalPrivilege(tableOwner); // hack: replaceView can be used as replaceTable despite its name transaction.getMetastore(schemaName).replaceView(schemaName, tableName, tableBuilder.build(), principalPrivileges); transaction.commit(); } }
public void updateTableLocation(String databaseName, String tableName, String location) { Optional<Table> table = getTable(databaseName, tableName); if (!table.isPresent()) { throw new TableNotFoundException(new SchemaTableName(databaseName, tableName)); } Table.Builder tableBuilder = Table.builder(table.get()); tableBuilder.getStorageBuilder().setLocation(location); // NOTE: this clears the permissions replaceTable(databaseName, tableName, tableBuilder.build(), new PrincipalPrivileges(ImmutableMultimap.of(), ImmutableMultimap.of())); }
@Override public void createTable(Table table, PrincipalPrivileges privileges) { // hack to work around the metastore not being configured for S3 or other FS Table.Builder tableBuilder = Table.builder(table); tableBuilder.getStorageBuilder().setLocation("/"); super.createTable(tableBuilder.build(), privileges); }
.setDataColumns(dataColumns); transaction.getMetastore(schemaName).replaceView(schemaName, tableName, newTable.build(), principalPrivileges);
.setLocation(targetPath.toString()); return tableBuilder.build();
.setStorageFormat(VIEW_STORAGE_FORMAT) .setLocation(""); Table table = tableBuilder.build(); PrincipalPrivileges principalPrivileges = buildInitialPrivilegeSet(session.getUser());
@Override public void dropTable(String databaseName, String tableName, boolean deleteData) { try { Optional<Table> table = getTable(databaseName, tableName); if (!table.isPresent()) { throw new TableNotFoundException(new SchemaTableName(databaseName, tableName)); } // hack to work around the metastore not being configured for S3 or other FS List<String> locations = listAllDataPaths(databaseName, tableName); Table.Builder tableBuilder = Table.builder(table.get()); tableBuilder.getStorageBuilder().setLocation("/"); // drop table replaceTable(databaseName, tableName, tableBuilder.build(), new PrincipalPrivileges(ImmutableMultimap.of(), ImmutableMultimap.of())); delegate.dropTable(databaseName, tableName, false); // drop data if (deleteData) { for (String location : locations) { Path path = new Path(location); hdfsEnvironment.getFileSystem(TESTING_CONTEXT, path).delete(path, true); } } } catch (IOException e) { throw new UncheckedIOException(e); } finally { invalidateTable(databaseName, tableName); } }
transaction.getMetastore(schemaName).createTable(session, tableBuilder.build(), principalPrivileges, Optional.empty(), true, EMPTY_TABLE_STATISTICS);
private static Table table( List<Column> partitionColumns, Optional<HiveBucketProperty> bucketProperty) { Table.Builder tableBuilder = Table.builder(); tableBuilder.getStorageBuilder() .setStorageFormat( StorageFormat.create( "com.facebook.hive.orc.OrcSerde", "org.apache.hadoop.hive.ql.io.RCFileInputFormat", "org.apache.hadoop.hive.ql.io.RCFileInputFormat")) .setLocation("hdfs://VOL1:9000/db_name/table_name") .setSkewed(false) .setBucketProperty(bucketProperty); return tableBuilder .setDatabaseName("test_dbname") .setOwner("testOwner") .setTableName("test_table") .setTableType(TableType.MANAGED_TABLE.toString()) .setDataColumns(ImmutableList.of(new Column("col1", HIVE_STRING, Optional.empty()))) .setParameters(ImmutableMap.of()) .setPartitionColumns(partitionColumns) .build(); }
public static Table fromMetastoreApiTable(org.apache.hadoop.hive.metastore.api.Table table, List<FieldSchema> schema) { StorageDescriptor storageDescriptor = table.getSd(); if (storageDescriptor == null) { throw new PrestoException(HIVE_INVALID_METADATA, "Table is missing storage descriptor"); } Table.Builder tableBuilder = Table.builder() .setDatabaseName(table.getDbName()) .setTableName(table.getTableName()) .setOwner(nullToEmpty(table.getOwner())) .setTableType(table.getTableType()) .setDataColumns(schema.stream() .map(ThriftMetastoreUtil::fromMetastoreApiFieldSchema) .collect(toList())) .setPartitionColumns(table.getPartitionKeys().stream() .map(ThriftMetastoreUtil::fromMetastoreApiFieldSchema) .collect(toList())) .setParameters(table.getParameters() == null ? ImmutableMap.of() : table.getParameters()) .setViewOriginalText(Optional.ofNullable(emptyToNull(table.getViewOriginalText()))) .setViewExpandedText(Optional.ofNullable(emptyToNull(table.getViewExpandedText()))); fromMetastoreApiStorageDescriptor(storageDescriptor, tableBuilder.getStorageBuilder(), table.getTableName()); return tableBuilder.build(); }
private static Table createSimpleTable(SchemaTableName schemaTableName, List<Column> columns, ConnectorSession session, Path targetPath, String queryId) { String tableOwner = session.getUser(); String schemaName = schemaTableName.getSchemaName(); String tableName = schemaTableName.getTableName(); return Table.builder() .setDatabaseName(schemaName) .setTableName(tableName) .setOwner(tableOwner) .setTableType(TableType.MANAGED_TABLE.name()) .setParameters(ImmutableMap.of( PRESTO_VERSION_NAME, TEST_SERVER_VERSION, PRESTO_QUERY_ID_NAME, queryId)) .setDataColumns(columns) .withStorage(storage -> storage .setLocation(targetPath.toString()) .setStorageFormat(fromHiveStorageFormat(ORC)) .setSerdeParameters(ImmutableMap.of())) .build(); }
private void alterBucketProperty(SchemaTableName schemaTableName, Optional<HiveBucketProperty> bucketProperty) { try (Transaction transaction = newTransaction()) { ConnectorSession session = newSession(); String tableOwner = session.getUser(); String schemaName = schemaTableName.getSchemaName(); String tableName = schemaTableName.getTableName(); Optional<Table> table = transaction.getMetastore(schemaName).getTable(schemaName, tableName); Table.Builder tableBuilder = Table.builder(table.get()); tableBuilder.getStorageBuilder().setBucketProperty(bucketProperty); PrincipalPrivileges principalPrivileges = testingPrincipalPrivilege(tableOwner); // hack: replaceView can be used as replaceTable despite its name transaction.getMetastore(schemaName).replaceView(schemaName, tableName, tableBuilder.build(), principalPrivileges); transaction.commit(); } }
public static com.facebook.presto.hive.metastore.Table getPrestoTestTable(String dbName) { return com.facebook.presto.hive.metastore.Table.builder() .setDatabaseName(dbName) .setTableName("test-tbl" + generateRandom()) .setOwner("owner") .setParameters(ImmutableMap.of()) .setTableType(TableType.EXTERNAL_TABLE.name()) .setDataColumns(ImmutableList.of(getPrestoTestColumn())) .setPartitionColumns(ImmutableList.of(getPrestoTestColumn())) .setViewOriginalText(Optional.of("originalText")) .setViewExpandedText(Optional.of("expandedText")) .withStorage(STORAGE_CONSUMER).build(); }
public void updateTableLocation(String databaseName, String tableName, String location) { Optional<Table> table = getTable(databaseName, tableName); if (!table.isPresent()) { throw new TableNotFoundException(new SchemaTableName(databaseName, tableName)); } Table.Builder tableBuilder = Table.builder(table.get()); tableBuilder.getStorageBuilder().setLocation(location); // NOTE: this clears the permissions replaceTable(databaseName, tableName, tableBuilder.build(), new PrincipalPrivileges(ImmutableMultimap.of(), ImmutableMultimap.of())); }
@Override public void createTable(Table table, PrincipalPrivileges privileges) { // hack to work around the metastore not being configured for S3 or other FS Table.Builder tableBuilder = Table.builder(table); tableBuilder.getStorageBuilder().setLocation("/"); super.createTable(tableBuilder.build(), privileges); }
@Override public void addColumn(String databaseName, String tableName, String columnName, HiveType columnType, String columnComment) { Table oldTable = getTableOrElseThrow(databaseName, tableName); Table newTable = Table.builder(oldTable) .addDataColumn(new Column(columnName, columnType, Optional.ofNullable(columnComment))) .build(); replaceTable(databaseName, tableName, newTable, null); }