private static Properties getPartitionSchema(Table table, Optional<Partition> partition) { if (!partition.isPresent()) { return getHiveSchema(table); } return getHiveSchema(partition.get(), table); }
static boolean shouldEnablePushdownForTable(ConnectorSession session, Table table, String path, Optional<Partition> optionalPartition) { if (!isS3SelectPushdownEnabled(session)) { return false; } if (path == null) { return false; } // Hive table partitions could be on different storages, // as a result, we have to check each individual optionalPartition Properties schema = optionalPartition .map(partition -> getHiveSchema(partition, table)) .orElseGet(() -> getHiveSchema(table)); return shouldEnablePushdownForTable(table, path, schema); }
@Test public void testHiveSchemaTable() { Properties expected = MetaStoreUtils.getTableMetadata(TEST_TABLE_WITH_UNSUPPORTED_FIELDS); Properties actual = MetastoreUtil.getHiveSchema(ThriftMetastoreUtil.fromMetastoreApiTable(TEST_TABLE_WITH_UNSUPPORTED_FIELDS, TEST_SCHEMA)); assertEquals(actual, expected); }
private void createEmptyFile(ConnectorSession session, Path path, Table table, Optional<Partition> partition, List<String> fileNames) { JobConf conf = toJobConf(hdfsEnvironment.getConfiguration(new HdfsContext(session, table.getDatabaseName(), table.getTableName()), path)); Properties schema; StorageFormat format; if (partition.isPresent()) { schema = getHiveSchema(partition.get(), table); format = partition.get().getStorage().getStorageFormat(); } else { schema = getHiveSchema(table); format = table.getStorage().getStorageFormat(); } for (String fileName : fileNames) { writeEmptyFile(session, new Path(path, fileName), conf, schema, format.getSerDe(), format.getOutputFormat()); } }
@Test public void testHiveSchemaPartition() { Properties expected = MetaStoreUtils.getPartitionMetadata(TEST_PARTITION_WITH_UNSUPPORTED_FIELDS, TEST_TABLE_WITH_UNSUPPORTED_FIELDS); Properties actual = MetastoreUtil.getHiveSchema(ThriftMetastoreUtil.fromMetastoreApiPartition(TEST_PARTITION_WITH_UNSUPPORTED_FIELDS), ThriftMetastoreUtil.fromMetastoreApiTable(TEST_TABLE_WITH_UNSUPPORTED_FIELDS, TEST_SCHEMA)); assertEquals(actual, expected); }
public static Properties getHiveSchema(Table table) { // Mimics function in Hive: MetaStoreUtils.getTableMetadata(Table) return getHiveSchema( table.getStorage(), table.getDataColumns(), table.getDataColumns(), table.getParameters(), table.getDatabaseName(), table.getTableName(), table.getPartitionColumns()); }
schema = getHiveSchema(table); schema = getHiveSchema(partition.get(), table); schema = getHiveSchema(table);
public static Properties getHiveSchema(Partition partition, Table table) { // Mimics function in Hive: MetaStoreUtils.getSchema(Partition, Table) return getHiveSchema( partition.getStorage(), partition.getColumns(), table.getDataColumns(), table.getParameters(), table.getDatabaseName(), table.getTableName(), table.getPartitionColumns()); }
@Test public void testHiveSchemaTable() { Properties expected = MetaStoreUtils.getTableMetadata(TEST_TABLE_WITH_UNSUPPORTED_FIELDS); Properties actual = MetastoreUtil.getHiveSchema(ThriftMetastoreUtil.fromMetastoreApiTable(TEST_TABLE_WITH_UNSUPPORTED_FIELDS, TEST_SCHEMA)); assertEquals(actual, expected); }
@Test public void testHiveSchemaPartition() { Properties expected = MetaStoreUtils.getPartitionMetadata(TEST_PARTITION_WITH_UNSUPPORTED_FIELDS, TEST_TABLE_WITH_UNSUPPORTED_FIELDS); Properties actual = MetastoreUtil.getHiveSchema(ThriftMetastoreUtil.fromMetastoreApiPartition(TEST_PARTITION_WITH_UNSUPPORTED_FIELDS), ThriftMetastoreUtil.fromMetastoreApiTable(TEST_TABLE_WITH_UNSUPPORTED_FIELDS, TEST_SCHEMA)); assertEquals(actual, expected); }