private static Properties getPartitionSchema(Table table, Optional<Partition> partition) { if (!partition.isPresent()) { return getHiveSchema(table); } return getHiveSchema(partition.get(), table); }
private Table getTable(SemiTransactionalHiveMetastore metastore, SchemaTableName tableName) { Optional<Table> target = metastore.getTable(tableName.getSchemaName(), tableName.getTableName()); if (!target.isPresent()) { throw new TableNotFoundException(tableName); } Table table = target.get(); verifyOnline(tableName, Optional.empty(), getProtectMode(table), table.getParameters()); return table; }
private static List<String> buildPartitionNames(List<Column> partitionColumns, List<Partition> partitions) { return partitions.stream() .map(partition -> makePartName(partitionColumns, partition.getValues())) .collect(toList()); }
throw new PrestoException(GENERIC_INTERNAL_ERROR, "Partition not loaded: " + hivePartition); String partName = makePartName(table.getPartitionColumns(), partition.getValues()); verifyOnline(tableName, Optional.of(partName), getProtectMode(partition), partition.getParameters());
public static ProtectMode getProtectMode(Partition partition) { return getProtectMode(partition.getParameters()); }
@Override public synchronized void dropColumn(String databaseName, String tableName, String columnName) { alterTable(databaseName, tableName, oldTable -> { verifyCanDropColumn(this, databaseName, tableName, columnName); if (!oldTable.getColumn(columnName).isPresent()) { SchemaTableName name = new SchemaTableName(databaseName, tableName); throw new ColumnNotFoundException(name, columnName); } ImmutableList.Builder<Column> newDataColumns = ImmutableList.builder(); for (Column fieldSchema : oldTable.getDataColumns()) { if (!fieldSchema.getName().equals(columnName)) { newDataColumns.add(fieldSchema); } } return oldTable.withDataColumns(newDataColumns.build()); }); }
private static void checkWritable( SchemaTableName tableName, Optional<String> partitionName, ProtectMode protectMode, Map<String, String> parameters, Storage storage) { String tablePartitionDescription = "Table '" + tableName + "'"; if (partitionName.isPresent()) { tablePartitionDescription += " partition '" + partitionName.get() + "'"; } // verify online verifyOnline(tableName, partitionName, protectMode, parameters); // verify not read only if (protectMode.readOnly) { throw new HiveReadOnlyException(tableName, partitionName); } // verify skew info if (storage.isSkewed()) { throw new PrestoException(NOT_SUPPORTED, format("Inserting into bucketed tables with skew is not supported. %s", tablePartitionDescription)); } }
schema.setProperty("columns.comments", columnCommentBuilder.toString()); schema.setProperty(SERIALIZATION_DDL, toThriftDdl(tableName, dataColumns));
public static ProtectMode getProtectMode(Table table) { return getProtectMode(table.getParameters()); }
@Override public void dropColumn(String databaseName, String tableName, String columnName) { verifyCanDropColumn(this, databaseName, tableName, columnName); org.apache.hadoop.hive.metastore.api.Table table = delegate.getTable(databaseName, tableName) .orElseThrow(() -> new TableNotFoundException(new SchemaTableName(databaseName, tableName))); table.getSd().getCols().removeIf(fieldSchema -> fieldSchema.getName().equals(columnName)); alterTable(databaseName, tableName, table); }
static boolean shouldEnablePushdownForTable(ConnectorSession session, Table table, String path, Optional<Partition> optionalPartition) { if (!isS3SelectPushdownEnabled(session)) { return false; } if (path == null) { return false; } // Hive table partitions could be on different storages, // as a result, we have to check each individual optionalPartition Properties schema = optionalPartition .map(partition -> getHiveSchema(partition, table)) .orElseGet(() -> getHiveSchema(table)); return shouldEnablePushdownForTable(table, path, schema); }
@Override public HiveTableHandle getTableHandle(ConnectorSession session, SchemaTableName tableName) { requireNonNull(tableName, "tableName is null"); Optional<Table> table = metastore.getTable(tableName.getSchemaName(), tableName.getTableName()); if (!table.isPresent()) { return null; } if (isPartitionsSystemTable(tableName)) { // We must not allow $partitions table due to how permissions are checked in PartitionsAwareAccessControl.checkCanSelectFromTable() throw new PrestoException(StandardErrorCode.NOT_SUPPORTED, format("Unexpected table %s present in Hive metastore", tableName)); } verifyOnline(tableName, Optional.empty(), getProtectMode(table.get()), table.get().getParameters()); return new HiveTableHandle(tableName.getSchemaName(), tableName.getTableName()); }
public static void checkPartitionIsWritable(String partitionName, Partition partition) { checkWritable( new SchemaTableName(partition.getDatabaseName(), partition.getTableName()), Optional.of(partitionName), getProtectMode(partition), partition.getParameters(), partition.getStorage()); }
private Path getPartitionMetadataDirectory(Table table, List<String> values) { String partitionName = makePartName(table.getPartitionColumns(), values); return getPartitionMetadataDirectory(table, partitionName); }
@Override public void dropColumn(String databaseName, String tableName, String columnName) { verifyCanDropColumn(this, databaseName, tableName, columnName); Table oldTable = getTableOrElseThrow(databaseName, tableName); if (!oldTable.getColumn(columnName).isPresent()) { SchemaTableName name = new SchemaTableName(databaseName, tableName); throw new ColumnNotFoundException(name, columnName); } ImmutableList.Builder<Column> newDataColumns = ImmutableList.builder(); oldTable.getDataColumns().stream() .filter(fieldSchema -> !fieldSchema.getName().equals(columnName)) .forEach(newDataColumns::add); Table newTable = Table.builder(oldTable) .setDataColumns(newDataColumns.build()) .build(); replaceTable(databaseName, tableName, newTable, null); }
@Test public void testHiveSchemaTable() { Properties expected = MetaStoreUtils.getTableMetadata(TEST_TABLE_WITH_UNSUPPORTED_FIELDS); Properties actual = MetastoreUtil.getHiveSchema(ThriftMetastoreUtil.fromMetastoreApiTable(TEST_TABLE_WITH_UNSUPPORTED_FIELDS, TEST_SCHEMA)); assertEquals(actual, expected); }
public static void checkTableIsWritable(Table table, boolean writesToNonManagedTablesEnabled) { if (!writesToNonManagedTablesEnabled && !table.getTableType().equals(MANAGED_TABLE.toString())) { throw new PrestoException(NOT_SUPPORTED, "Cannot write to non-managed Hive table"); } checkWritable( new SchemaTableName(table.getDatabaseName(), table.getTableName()), Optional.empty(), getProtectMode(table), table.getParameters(), table.getStorage()); }
@Override public synchronized Optional<List<String>> getPartitionNames(String databaseName, String tableName) { requireNonNull(databaseName, "databaseName is null"); requireNonNull(tableName, "tableName is null"); Optional<Table> tableReference = getTable(databaseName, tableName); if (!tableReference.isPresent()) { return Optional.empty(); } Table table = tableReference.get(); Path tableMetadataDirectory = getTableMetadataDirectory(table); List<ArrayDeque<String>> partitions = listPartitions(tableMetadataDirectory, table.getPartitionColumns()); List<String> partitionNames = partitions.stream() .map(partitionValues -> makePartName(table.getPartitionColumns(), ImmutableList.copyOf(partitionValues))) .collect(toList()); return Optional.of(ImmutableList.copyOf(partitionNames)); }
private void createEmptyFile(ConnectorSession session, Path path, Table table, Optional<Partition> partition, List<String> fileNames) { JobConf conf = toJobConf(hdfsEnvironment.getConfiguration(new HdfsContext(session, table.getDatabaseName(), table.getTableName()), path)); Properties schema; StorageFormat format; if (partition.isPresent()) { schema = getHiveSchema(partition.get(), table); format = partition.get().getStorage().getStorageFormat(); } else { schema = getHiveSchema(table); format = table.getStorage().getStorageFormat(); } for (String fileName : fileNames) { writeEmptyFile(session, new Path(path, fileName), conf, schema, format.getSerDe(), format.getOutputFormat()); } }
@Test public void testHiveSchemaPartition() { Properties expected = MetaStoreUtils.getPartitionMetadata(TEST_PARTITION_WITH_UNSUPPORTED_FIELDS, TEST_TABLE_WITH_UNSUPPORTED_FIELDS); Properties actual = MetastoreUtil.getHiveSchema(ThriftMetastoreUtil.fromMetastoreApiPartition(TEST_PARTITION_WITH_UNSUPPORTED_FIELDS), ThriftMetastoreUtil.fromMetastoreApiTable(TEST_TABLE_WITH_UNSUPPORTED_FIELDS, TEST_SCHEMA)); assertEquals(actual, expected); }