public Partition toPartition(String databaseName, String tableName, List<String> values, String location) { return new Partition( databaseName, tableName, values, Storage.builder() .setLocation(externalLocation.orElse(location)) .setStorageFormat(storageFormat.map(StorageFormat::fromHiveStorageFormat).orElse(VIEW_STORAGE_FORMAT)) .setBucketProperty(bucketProperty) .setSerdeParameters(serdeParameters) .build(), columns, parameters); } }
public Table toTable(String databaseName, String tableName, String location) { return new Table( databaseName, tableName, owner, tableType, Storage.builder() .setLocation(externalLocation.orElse(location)) .setStorageFormat(storageFormat.map(StorageFormat::fromHiveStorageFormat).orElse(VIEW_STORAGE_FORMAT)) .setBucketProperty(bucketProperty) .setSerdeParameters(serdeParameters) .build(), dataColumns, partitionColumns, parameters, viewOriginalText, viewExpandedText); } }
public Partition getAugmentedPartitionForInTransactionRead() { // This method augments the location field of the partition to the staging location. // This way, if the partition is accessed in an ongoing transaction, staged data // can be found and accessed. Partition partition = this.partition; String currentLocation = this.currentLocation.toString(); if (!currentLocation.equals(partition.getStorage().getLocation())) { partition = Partition.builder(partition) .withStorage(storage -> storage.setLocation(currentLocation)) .build(); } return partition; }
public void updateTableLocation(String databaseName, String tableName, String location) { Optional<Table> table = getTable(databaseName, tableName); if (!table.isPresent()) { throw new TableNotFoundException(new SchemaTableName(databaseName, tableName)); } Table.Builder tableBuilder = Table.builder(table.get()); tableBuilder.getStorageBuilder().setLocation(location); // NOTE: this clears the permissions replaceTable(databaseName, tableName, tableBuilder.build(), new PrincipalPrivileges(ImmutableMultimap.of(), ImmutableMultimap.of())); }
@Override public void createTable(Table table, PrincipalPrivileges privileges) { // hack to work around the metastore not being configured for S3 or other FS Table.Builder tableBuilder = Table.builder(table); tableBuilder.getStorageBuilder().setLocation("/"); super.createTable(tableBuilder.build(), privileges); }
@Test public void testRoundTrip() { Storage storage = Storage.builder() .setStorageFormat(StorageFormat.create("abc", "in", "out")) .setLocation("/test") .build(); assertEquals(CODEC.fromJson(CODEC.toJson(storage)), storage); } }
.withStorage(storage -> storage .setStorageFormat(fromHiveStorageFormat(DWRF)) .setLocation(partitionTargetPath(tableName, partitionName))) .build(); metastoreClient.alterPartition(tableName.getSchemaName(), tableName.getTableName(), new PartitionWithStatistics(modifiedPartition, partitionName, statsForAllColumns2)); .withStorage(storage -> storage .setStorageFormat(fromHiveStorageFormat(TEXTFILE)) .setLocation(partitionTargetPath(tableName, partitionName))) .build(); metastoreClient.alterPartition(tableName.getSchemaName(), tableName.getTableName(), new PartitionWithStatistics(modifiedPartition, partitionName, statsForSubsetOfColumns)); .withStorage(storage -> storage .setStorageFormat(fromHiveStorageFormat(TEXTFILE)) .setLocation(partitionTargetPath(tableName, partitionName))) .build(); metastoreClient.alterPartition(tableName.getSchemaName(), tableName.getTableName(), new PartitionWithStatistics(modifiedPartition, partitionName, emptyStatistics));
.setStorageFormat(fromHiveStorageFormat(hiveStorageFormat)) .setBucketProperty(bucketProperty) .setLocation(targetPath.toString());
.setLocation(""); Table table = tableBuilder.build(); PrincipalPrivileges principalPrivileges = buildInitialPrivilegeSet(session.getUser());
.setLocation(targetPath.toString()) .setStorageFormat(StorageFormat.create(hiveStorageFormat.getSerDe(), hiveStorageFormat.getInputFormat(), hiveStorageFormat.getOutputFormat())) .setBucketProperty(bucketProperty)
@Override public void dropTable(String databaseName, String tableName, boolean deleteData) { try { Optional<Table> table = getTable(databaseName, tableName); if (!table.isPresent()) { throw new TableNotFoundException(new SchemaTableName(databaseName, tableName)); } // hack to work around the metastore not being configured for S3 or other FS List<String> locations = listAllDataPaths(databaseName, tableName); Table.Builder tableBuilder = Table.builder(table.get()); tableBuilder.getStorageBuilder().setLocation("/"); // drop table replaceTable(databaseName, tableName, tableBuilder.build(), new PrincipalPrivileges(ImmutableMultimap.of(), ImmutableMultimap.of())); delegate.dropTable(databaseName, tableName, false); // drop data if (deleteData) { for (String location : locations) { Path path = new Path(location); hdfsEnvironment.getFileSystem(TESTING_CONTEXT, path).delete(path, true); } } } catch (IOException e) { throw new UncheckedIOException(e); } finally { invalidateTable(databaseName, tableName); } }
private static void setStorageBuilder(StorageDescriptor sd, Storage.Builder storageBuilder) { requireNonNull(sd.getSerdeInfo(), "StorageDescriptor SerDeInfo is null"); SerDeInfo serdeInfo = sd.getSerdeInfo(); Optional<HiveBucketProperty> bucketProperty = Optional.empty(); if (sd.getNumberOfBuckets() > 0) { if (isNullOrEmpty(sd.getBucketColumns())) { throw new PrestoException(HIVE_INVALID_METADATA, "Table/partition metadata has 'numBuckets' set, but 'bucketCols' is not set"); } List<SortingColumn> sortedBy = ImmutableList.of(); if (!isNullOrEmpty(sd.getSortColumns())) { sortedBy = sd.getSortColumns().stream() .map(column -> new SortingColumn( column.getColumn(), Order.fromMetastoreApiOrder(column.getSortOrder(), "unknown"))) .collect(toImmutableList()); } bucketProperty = Optional.of(new HiveBucketProperty(sd.getBucketColumns(), sd.getNumberOfBuckets(), sortedBy)); } storageBuilder.setStorageFormat(StorageFormat.createNullable(serdeInfo.getSerializationLibrary(), sd.getInputFormat(), sd.getOutputFormat())) .setLocation(nullToEmpty(sd.getLocation())) .setBucketProperty(bucketProperty) .setSkewed(sd.getSkewedInfo() != null && !isNullOrEmpty(sd.getSkewedInfo().getSkewedColumnNames())) .setSerdeParameters(firstNonNull(serdeInfo.getParameters(), ImmutableMap.of())) .build(); }
private static Table table( List<Column> partitionColumns, Optional<HiveBucketProperty> bucketProperty) { Table.Builder tableBuilder = Table.builder(); tableBuilder.getStorageBuilder() .setStorageFormat( StorageFormat.create( "com.facebook.hive.orc.OrcSerde", "org.apache.hadoop.hive.ql.io.RCFileInputFormat", "org.apache.hadoop.hive.ql.io.RCFileInputFormat")) .setLocation("hdfs://VOL1:9000/db_name/table_name") .setSkewed(false) .setBucketProperty(bucketProperty); return tableBuilder .setDatabaseName("test_dbname") .setOwner("testOwner") .setTableName("test_table") .setTableType(TableType.MANAGED_TABLE.toString()) .setDataColumns(ImmutableList.of(new Column("col1", HIVE_STRING, Optional.empty()))) .setParameters(ImmutableMap.of()) .setPartitionColumns(partitionColumns) .build(); }
private static Table createSimpleTable(SchemaTableName schemaTableName, List<Column> columns, ConnectorSession session, Path targetPath, String queryId) { String tableOwner = session.getUser(); String schemaName = schemaTableName.getSchemaName(); String tableName = schemaTableName.getTableName(); return Table.builder() .setDatabaseName(schemaName) .setTableName(tableName) .setOwner(tableOwner) .setTableType(TableType.MANAGED_TABLE.name()) .setParameters(ImmutableMap.of( PRESTO_VERSION_NAME, TEST_SERVER_VERSION, PRESTO_QUERY_ID_NAME, queryId)) .setDataColumns(columns) .withStorage(storage -> storage .setLocation(targetPath.toString()) .setStorageFormat(fromHiveStorageFormat(ORC)) .setSerdeParameters(ImmutableMap.of())) .build(); }
private Partition buildPartitionObject(ConnectorSession session, Table table, PartitionUpdate partitionUpdate) { return Partition.builder() .setDatabaseName(table.getDatabaseName()) .setTableName(table.getTableName()) .setColumns(table.getDataColumns()) .setValues(extractPartitionValues(partitionUpdate.getName())) .setParameters(ImmutableMap.<String, String>builder() .put(PRESTO_VERSION_NAME, prestoVersion) .put(PRESTO_QUERY_ID_NAME, session.getQueryId()) .build()) .withStorage(storage -> storage .setStorageFormat(isRespectTableFormat(session) ? table.getStorage().getStorageFormat() : fromHiveStorageFormat(HiveSessionProperties.getHiveStorageFormat(session))) .setLocation(partitionUpdate.getTargetPath().toString()) .setBucketProperty(table.getStorage().getBucketProperty()) .setSerdeParameters(table.getStorage().getSerdeParameters())) .build(); }
protected Partition createDummyPartition(Table table, String partitionName) { return Partition.builder() .setDatabaseName(table.getDatabaseName()) .setTableName(table.getTableName()) .setColumns(table.getDataColumns()) .setValues(toPartitionValues(partitionName)) .withStorage(storage -> storage .setStorageFormat(fromHiveStorageFormat(HiveStorageFormat.ORC)) .setLocation(partitionTargetPath(new SchemaTableName(table.getDatabaseName(), table.getTableName()), partitionName))) .setParameters(ImmutableMap.of( PRESTO_VERSION_NAME, "testversion", PRESTO_QUERY_ID_NAME, "20180101_123456_00001_x1y2z")) .build(); }
public void updateTableLocation(String databaseName, String tableName, String location) { Optional<Table> table = getTable(databaseName, tableName); if (!table.isPresent()) { throw new TableNotFoundException(new SchemaTableName(databaseName, tableName)); } Table.Builder tableBuilder = Table.builder(table.get()); tableBuilder.getStorageBuilder().setLocation(location); // NOTE: this clears the permissions replaceTable(databaseName, tableName, tableBuilder.build(), new PrincipalPrivileges(ImmutableMultimap.of(), ImmutableMultimap.of())); }
@Test public void testRoundTrip() { Storage storage = Storage.builder() .setStorageFormat(StorageFormat.create("abc", "in", "out")) .setLocation("/test") .build(); assertEquals(CODEC.fromJson(CODEC.toJson(storage)), storage); } }
public static void fromMetastoreApiStorageDescriptor(StorageDescriptor storageDescriptor, Storage.Builder builder, String tablePartitionName) { SerDeInfo serdeInfo = storageDescriptor.getSerdeInfo(); if (serdeInfo == null) { throw new PrestoException(HIVE_INVALID_METADATA, "Table storage descriptor is missing SerDe info"); } builder.setStorageFormat(StorageFormat.createNullable(serdeInfo.getSerializationLib(), storageDescriptor.getInputFormat(), storageDescriptor.getOutputFormat())) .setLocation(nullToEmpty(storageDescriptor.getLocation())) .setBucketProperty(HiveBucketProperty.fromStorageDescriptor(storageDescriptor, tablePartitionName)) .setSkewed(storageDescriptor.isSetSkewedInfo() && storageDescriptor.getSkewedInfo().isSetSkewedColNames() && !storageDescriptor.getSkewedInfo().getSkewedColNames().isEmpty()) .setSerdeParameters(serdeInfo.getParameters() == null ? ImmutableMap.of() : serdeInfo.getParameters()); }
@Override public void createTable(Table table, PrincipalPrivileges privileges) { // hack to work around the metastore not being configured for S3 or other FS Table.Builder tableBuilder = Table.builder(table); tableBuilder.getStorageBuilder().setLocation("/"); super.createTable(tableBuilder.build(), privileges); }