private boolean hasTheSameSchema(Table newTable, Table existingTable) { List<Column> newTableColumns = newTable.getDataColumns(); List<Column> existingTableColumns = existingTable.getDataColumns(); if (newTableColumns.size() != existingTableColumns.size()) { return false; } for (Column existingColumn : existingTableColumns) { if (newTableColumns.stream() .noneMatch(newColumn -> newColumn.getName().equals(existingColumn.getName()) && newColumn.getType().equals(existingColumn.getType()))) { return false; } } return true; }
public static Properties getHiveSchema(Table table) { // Mimics function in Hive: MetaStoreUtils.getTableMetadata(Table) return getHiveSchema( table.getStorage(), table.getDataColumns(), table.getDataColumns(), table.getParameters(), table.getDatabaseName(), table.getTableName(), table.getPartitionColumns()); }
public static void verifyCanDropColumn(ExtendedHiveMetastore metastore, String databaseName, String tableName, String columnName) { Table table = metastore.getTable(databaseName, tableName) .orElseThrow(() -> new TableNotFoundException(new SchemaTableName(databaseName, tableName))); if (table.getPartitionColumns().stream().anyMatch(column -> column.getName().equals(columnName))) { throw new PrestoException(NOT_SUPPORTED, "Cannot drop partition columns"); } if (table.getDataColumns().size() <= 1) { throw new PrestoException(NOT_SUPPORTED, "Cannot drop the only non-partition column in a table"); } } }
public static List<HiveColumnHandle> getRegularColumnHandles(Table table) { ImmutableList.Builder<HiveColumnHandle> columns = ImmutableList.builder(); int hiveColumnIndex = 0; for (Column field : table.getDataColumns()) { // ignore unsupported types rather than failing HiveType hiveType = field.getType(); if (hiveType.isSupportedType()) { columns.add(new HiveColumnHandle(field.getName(), hiveType, hiveType.getTypeSignature(), hiveColumnIndex, REGULAR, field.getComment())); } hiveColumnIndex++; } return columns.build(); }
for (Column column : table.getDataColumns()) { hiveTypes.put(column.getName(), column.getType());
table.getDataColumns().stream().map(Column::getName).forEach(columnNames::add); List<String> allColumnNames = columnNames.build(); if (allColumnNames.size() > Sets.newHashSet(allColumnNames).size()) { List<Column> tableColumns = table.getDataColumns(); ImmutableMap.Builder<String, Optional<String>> builder = ImmutableMap.builder(); for (Column field : concat(tableColumns, table.getPartitionColumns())) {
private static boolean shouldEnablePushdownForTable(Table table, String path, Properties schema) { return isS3Storage(path) && isSerdeSupported(schema) && isInputFormatSupported(schema) && areColumnTypesSupported(table.getDataColumns()); } }
@Override public void dropColumn(String databaseName, String tableName, String columnName) { verifyCanDropColumn(this, databaseName, tableName, columnName); Table oldTable = getTableOrElseThrow(databaseName, tableName); if (!oldTable.getColumn(columnName).isPresent()) { SchemaTableName name = new SchemaTableName(databaseName, tableName); throw new ColumnNotFoundException(name, columnName); } ImmutableList.Builder<Column> newDataColumns = ImmutableList.builder(); oldTable.getDataColumns().stream() .filter(fieldSchema -> !fieldSchema.getName().equals(columnName)) .forEach(newDataColumns::add); Table newTable = Table.builder(oldTable) .setDataColumns(newDataColumns.build()) .build(); replaceTable(databaseName, tableName, newTable, null); }
@Override public void renameColumn(String databaseName, String tableName, String oldColumnName, String newColumnName) { Table oldTable = getTableOrElseThrow(databaseName, tableName); if (oldTable.getPartitionColumns().stream().anyMatch(c -> c.getName().equals(oldColumnName))) { throw new PrestoException(NOT_SUPPORTED, "Renaming partition columns is not supported"); } ImmutableList.Builder<Column> newDataColumns = ImmutableList.builder(); for (Column column : oldTable.getDataColumns()) { if (column.getName().equals(oldColumnName)) { newDataColumns.add(new Column(newColumnName, column.getType(), column.getComment())); } else { newDataColumns.add(column); } } Table newTable = Table.builder(oldTable) .setDataColumns(newDataColumns.build()) .build(); replaceTable(databaseName, tableName, newTable, null); }
public static TableInput convertTable(Table table) { TableInput input = new TableInput(); input.setName(table.getTableName()); input.setOwner(table.getOwner()); input.setTableType(table.getTableType()); input.setStorageDescriptor(convertStorage(table.getStorage(), table.getDataColumns())); input.setPartitionKeys(table.getPartitionColumns().stream().map(GlueInputConverter::convertColumn).collect(toList())); input.setParameters(table.getParameters()); table.getViewOriginalText().ifPresent(input::setViewOriginalText); table.getViewExpandedText().ifPresent(input::setViewExpandedText); return input; }
List<Column> tableColumns = table.getDataColumns(); List<Column> partitionColumns = partition.getColumns(); if ((tableColumns == null) || (partitionColumns == null)) {
public TableMetadata(Table table, Map<String, HiveColumnStatistics> columnStatistics) { owner = table.getOwner(); tableType = table.getTableType(); dataColumns = table.getDataColumns(); partitionColumns = table.getPartitionColumns(); parameters = table.getParameters(); StorageFormat tableFormat = table.getStorage().getStorageFormat(); storageFormat = Arrays.stream(HiveStorageFormat.values()) .filter(format -> tableFormat.equals(StorageFormat.fromHiveStorageFormat(format))) .findFirst(); bucketProperty = table.getStorage().getBucketProperty(); serdeParameters = table.getStorage().getSerdeParameters(); if (tableType.equals(TableType.EXTERNAL_TABLE.name())) { externalLocation = Optional.of(table.getStorage().getLocation()); } else { externalLocation = Optional.empty(); } viewOriginalText = table.getViewOriginalText(); viewExpandedText = table.getViewExpandedText(); this.columnStatistics = ImmutableMap.copyOf(requireNonNull(columnStatistics, "columnStatistics is null")); }
public static Properties getHiveSchema(Partition partition, Table table) { // Mimics function in Hive: MetaStoreUtils.getSchema(Partition, Table) return getHiveSchema( partition.getStorage(), partition.getColumns(), table.getDataColumns(), table.getParameters(), table.getDatabaseName(), table.getTableName(), table.getPartitionColumns()); }
List<Column> tableColumns = table.getDataColumns(); List<Column> existingPartitionColumns = partition.get().getColumns(); for (int i = 0; i < min(existingPartitionColumns.size(), tableColumns.size()); i++) {
public static org.apache.hadoop.hive.metastore.api.Table toMetastoreApiTable(Table table, PrincipalPrivileges privileges) { org.apache.hadoop.hive.metastore.api.Table result = new org.apache.hadoop.hive.metastore.api.Table(); result.setDbName(table.getDatabaseName()); result.setTableName(table.getTableName()); result.setOwner(table.getOwner()); result.setTableType(table.getTableType()); result.setParameters(table.getParameters()); result.setPartitionKeys(table.getPartitionColumns().stream().map(ThriftMetastoreUtil::toMetastoreApiFieldSchema).collect(toList())); result.setSd(makeStorageDescriptor(table.getTableName(), table.getDataColumns(), table.getStorage())); result.setPrivileges(toMetastoreApiPrincipalPrivilegeSet(table.getOwner(), privileges)); result.setViewOriginalText(table.getViewOriginalText().orElse(null)); result.setViewExpandedText(table.getViewExpandedText().orElse(null)); return result; }
for (Column column : table.get().getDataColumns()) { if (!isWritableType(column.getType())) { throw new PrestoException(
private Partition buildPartitionObject(ConnectorSession session, Table table, PartitionUpdate partitionUpdate) { return Partition.builder() .setDatabaseName(table.getDatabaseName()) .setTableName(table.getTableName()) .setColumns(table.getDataColumns()) .setValues(extractPartitionValues(partitionUpdate.getName())) .setParameters(ImmutableMap.<String, String>builder() .put(PRESTO_VERSION_NAME, prestoVersion) .put(PRESTO_QUERY_ID_NAME, session.getQueryId()) .build()) .withStorage(storage -> storage .setStorageFormat(isRespectTableFormat(session) ? table.getStorage().getStorageFormat() : fromHiveStorageFormat(HiveSessionProperties.getHiveStorageFormat(session))) .setLocation(partitionUpdate.getTargetPath().toString()) .setBucketProperty(table.getStorage().getBucketProperty()) .setSerdeParameters(table.getStorage().getSerdeParameters())) .build(); }
@Test public void testConvertTable() { TableInput tblInput = GlueInputConverter.convertTable(testTbl); assertEquals(tblInput.getName(), testTbl.getTableName()); assertEquals(tblInput.getOwner(), testTbl.getOwner()); assertEquals(tblInput.getTableType(), testTbl.getTableType()); assertEquals(tblInput.getParameters(), testTbl.getParameters()); assertColumnList(tblInput.getStorageDescriptor().getColumns(), testTbl.getDataColumns()); assertColumnList(tblInput.getPartitionKeys(), testTbl.getPartitionColumns()); assertStorage(tblInput.getStorageDescriptor(), testTbl.getStorage()); assertEquals(tblInput.getViewExpandedText(), testTbl.getViewExpandedText().get()); assertEquals(tblInput.getViewOriginalText(), testTbl.getViewOriginalText().get()); }
@Test public void testConvertTable() { com.facebook.presto.hive.metastore.Table prestoTbl = GlueToPrestoConverter.convertTable(testTbl, testDb.getName()); assertEquals(prestoTbl.getTableName(), testTbl.getName()); assertEquals(prestoTbl.getDatabaseName(), testDb.getName()); assertEquals(prestoTbl.getTableType(), testTbl.getTableType()); assertEquals(prestoTbl.getOwner(), testTbl.getOwner()); assertEquals(prestoTbl.getParameters(), testTbl.getParameters()); assertColumnList(prestoTbl.getDataColumns(), testTbl.getStorageDescriptor().getColumns()); assertColumnList(prestoTbl.getPartitionColumns(), testTbl.getPartitionKeys()); assertStorage(prestoTbl.getStorage(), testTbl.getStorageDescriptor()); assertEquals(prestoTbl.getViewOriginalText().get(), testTbl.getViewOriginalText()); assertEquals(prestoTbl.getViewExpandedText().get(), testTbl.getViewExpandedText()); }
protected Partition createDummyPartition(Table table, String partitionName) { return Partition.builder() .setDatabaseName(table.getDatabaseName()) .setTableName(table.getTableName()) .setColumns(table.getDataColumns()) .setValues(toPartitionValues(partitionName)) .withStorage(storage -> storage .setStorageFormat(fromHiveStorageFormat(HiveStorageFormat.ORC)) .setLocation(partitionTargetPath(new SchemaTableName(table.getDatabaseName(), table.getTableName()), partitionName))) .setParameters(ImmutableMap.of( PRESTO_VERSION_NAME, "testversion", PRESTO_QUERY_ID_NAME, "20180101_123456_00001_x1y2z")) .build(); }