public String getDescription() { return format("add table %s.%s", newTable.getDatabaseName(), newTable.getTableName()); }
public void undo(ExtendedHiveMetastore metastore) { if (!tableCreated) { return; } metastore.dropTable(newTable.getDatabaseName(), newTable.getTableName(), false); } }
private Path getTableMetadataDirectory(Table table) { return getTableMetadataDirectory(table.getDatabaseName(), table.getTableName()); }
public static Optional<HiveBucketHandle> getHiveBucketHandle(Table table) { Optional<HiveBucketProperty> hiveBucketProperty = table.getStorage().getBucketProperty(); if (!hiveBucketProperty.isPresent()) { return Optional.empty(); } Map<String, HiveColumnHandle> map = getRegularColumnHandles(table).stream() .collect(Collectors.toMap(HiveColumnHandle::getName, identity())); ImmutableList.Builder<HiveColumnHandle> bucketColumns = ImmutableList.builder(); for (String bucketColumnName : hiveBucketProperty.get().getBucketedBy()) { HiveColumnHandle bucketColumnHandle = map.get(bucketColumnName); if (bucketColumnHandle == null) { throw new PrestoException( HIVE_INVALID_METADATA, format("Table '%s.%s' is bucketed on non-existent column '%s'", table.getDatabaseName(), table.getTableName(), bucketColumnName)); } bucketColumns.add(bucketColumnHandle); } int bucketCount = hiveBucketProperty.get().getBucketCount(); return Optional.of(new HiveBucketHandle(bucketColumns.build(), bucketCount, bucketCount)); }
private static List<HivePartitionKey> getPartitionKeys(Table table, Optional<Partition> partition) { if (!partition.isPresent()) { return ImmutableList.of(); } ImmutableList.Builder<HivePartitionKey> partitionKeys = ImmutableList.builder(); List<Column> keys = table.getPartitionColumns(); List<String> values = partition.get().getValues(); checkCondition(keys.size() == values.size(), HIVE_INVALID_METADATA, "Expected %s partition key values, but got %s", keys.size(), values.size()); for (int i = 0; i < keys.size(); i++) { String name = keys.get(i).getName(); HiveType hiveType = keys.get(i).getType(); if (!hiveType.isSupportedType()) { throw new PrestoException(NOT_SUPPORTED, format("Unsupported Hive type %s found in partition keys of table %s.%s", hiveType, table.getDatabaseName(), table.getTableName())); } String value = values.get(i); checkCondition(value != null, HIVE_INVALID_PARTITION_VALUE, "partition key value cannot be null for field: %s", name); partitionKeys.add(new HivePartitionKey(name, value)); } return partitionKeys.build(); }
@Override public void createTable(Table table, PrincipalPrivileges principalPrivileges) { try { delegate.createTable(table, principalPrivileges); } finally { invalidateTable(table.getDatabaseName(), table.getTableName()); } }
public static List<HiveColumnHandle> getPartitionKeyColumnHandles(Table table) { ImmutableList.Builder<HiveColumnHandle> columns = ImmutableList.builder(); List<Column> partitionKeys = table.getPartitionColumns(); for (Column field : partitionKeys) { HiveType hiveType = field.getType(); if (!hiveType.isSupportedType()) { throw new PrestoException(NOT_SUPPORTED, format("Unsupported Hive type %s found in partition keys of table %s.%s", hiveType, table.getDatabaseName(), table.getTableName())); } columns.add(new HiveColumnHandle(field.getName(), hiveType, hiveType.getTypeSignature(), -1, PARTITION_KEY, field.getComment())); } return columns.build(); }
@Override public void replaceTable(String databaseName, String tableName, Table newTable, PrincipalPrivileges principalPrivileges) { try { delegate.replaceTable(databaseName, tableName, newTable, principalPrivileges); } finally { invalidateTable(databaseName, tableName); invalidateTable(newTable.getDatabaseName(), newTable.getTableName()); } }
private void prepareInsertExistingTable(HdfsContext context, TableAndMore tableAndMore) { deleteOnly = false; Table table = tableAndMore.getTable(); Path targetPath = new Path(table.getStorage().getLocation()); Path currentPath = tableAndMore.getCurrentLocation().get(); cleanUpTasksForAbort.add(new DirectoryCleanUpTask(context, targetPath, false)); if (!targetPath.equals(currentPath)) { asyncRename(hdfsEnvironment, renameExecutor, fileRenameCancelled, fileRenameFutures, context, currentPath, targetPath, tableAndMore.getFileNames().get()); } updateStatisticsOperations.add(new UpdateStatisticsOperation( new SchemaTableName(table.getDatabaseName(), table.getTableName()), Optional.empty(), tableAndMore.getStatisticsUpdate(), true)); }
@Override public void createTable(Table table, PrincipalPrivileges principalPrivileges) { try { TableInput input = GlueInputConverter.convertTable(table); glueClient.createTable(new CreateTableRequest() .withDatabaseName(table.getDatabaseName()) .withTableInput(input)); } catch (AlreadyExistsException e) { throw new TableAlreadyExistsException(new SchemaTableName(table.getDatabaseName(), table.getTableName())); } catch (EntityNotFoundException e) { throw new SchemaNotFoundException(table.getDatabaseName()); } catch (AmazonServiceException e) { throw new PrestoException(HIVE_METASTORE_ERROR, e); } }
public static void checkTableIsWritable(Table table, boolean writesToNonManagedTablesEnabled) { if (!writesToNonManagedTablesEnabled && !table.getTableType().equals(MANAGED_TABLE.toString())) { throw new PrestoException(NOT_SUPPORTED, "Cannot write to non-managed Hive table"); } checkWritable( new SchemaTableName(table.getDatabaseName(), table.getTableName()), Optional.empty(), getProtectMode(table), table.getParameters(), table.getStorage()); }
@Override public LocationHandle forExistingTable(SemiTransactionalHiveMetastore metastore, ConnectorSession session, Table table) { HdfsContext context = new HdfsContext(session, table.getDatabaseName(), table.getTableName()); Path targetPath = new Path(table.getStorage().getLocation()); if (shouldUseTemporaryDirectory(session, context, targetPath)) { Path writePath = createTemporaryPath(session, context, hdfsEnvironment, targetPath); return new LocationHandle(targetPath, writePath, true, STAGE_AND_MOVE_TO_TARGET_DIRECTORY); } else { return new LocationHandle(targetPath, targetPath, true, DIRECT_TO_TARGET_EXISTING_DIRECTORY); } }
private void createEmptyFile(ConnectorSession session, Path path, Table table, Optional<Partition> partition, List<String> fileNames) { JobConf conf = toJobConf(hdfsEnvironment.getConfiguration(new HdfsContext(session, table.getDatabaseName(), table.getTableName()), path)); Properties schema; StorageFormat format; if (partition.isPresent()) { schema = getHiveSchema(partition.get(), table); format = partition.get().getStorage().getStorageFormat(); } else { schema = getHiveSchema(table); format = table.getStorage().getStorageFormat(); } for (String fileName : fileNames) { writeEmptyFile(session, new Path(path, fileName), conf, schema, format.getSerDe(), format.getOutputFormat()); } }
public static Properties getHiveSchema(Partition partition, Table table) { // Mimics function in Hive: MetaStoreUtils.getSchema(Partition, Table) return getHiveSchema( partition.getStorage(), partition.getColumns(), table.getDataColumns(), table.getParameters(), table.getDatabaseName(), table.getTableName(), table.getPartitionColumns()); }
public static Properties getHiveSchema(Table table) { // Mimics function in Hive: MetaStoreUtils.getTableMetadata(Table) return getHiveSchema( table.getStorage(), table.getDataColumns(), table.getDataColumns(), table.getParameters(), table.getDatabaseName(), table.getTableName(), table.getPartitionColumns()); }
public static org.apache.hadoop.hive.metastore.api.Table toMetastoreApiTable(Table table, PrincipalPrivileges privileges) { org.apache.hadoop.hive.metastore.api.Table result = new org.apache.hadoop.hive.metastore.api.Table(); result.setDbName(table.getDatabaseName()); result.setTableName(table.getTableName()); result.setOwner(table.getOwner()); result.setTableType(table.getTableType()); result.setParameters(table.getParameters()); result.setPartitionKeys(table.getPartitionColumns().stream().map(ThriftMetastoreUtil::toMetastoreApiFieldSchema).collect(toList())); result.setSd(makeStorageDescriptor(table.getTableName(), table.getDataColumns(), table.getStorage())); result.setPrivileges(toMetastoreApiPrincipalPrivilegeSet(table.getOwner(), privileges)); result.setViewOriginalText(table.getViewOriginalText().orElse(null)); result.setViewExpandedText(table.getViewExpandedText().orElse(null)); return result; }
private static HiveSplitSource hiveSplitSource( BackgroundHiveSplitLoader backgroundHiveSplitLoader, TupleDomain<HiveColumnHandle> compactEffectivePredicate) { return HiveSplitSource.allAtOnce( SESSION, SIMPLE_TABLE.getDatabaseName(), SIMPLE_TABLE.getTableName(), compactEffectivePredicate, 1, 1, new DataSize(32, MEGABYTE), backgroundHiveSplitLoader, EXECUTOR, new CounterStat()); }
private Partition buildPartitionObject(ConnectorSession session, Table table, PartitionUpdate partitionUpdate) { return Partition.builder() .setDatabaseName(table.getDatabaseName()) .setTableName(table.getTableName()) .setColumns(table.getDataColumns()) .setValues(extractPartitionValues(partitionUpdate.getName())) .setParameters(ImmutableMap.<String, String>builder() .put(PRESTO_VERSION_NAME, prestoVersion) .put(PRESTO_QUERY_ID_NAME, session.getQueryId()) .build()) .withStorage(storage -> storage .setStorageFormat(isRespectTableFormat(session) ? table.getStorage().getStorageFormat() : fromHiveStorageFormat(HiveSessionProperties.getHiveStorageFormat(session))) .setLocation(partitionUpdate.getTargetPath().toString()) .setBucketProperty(table.getStorage().getBucketProperty()) .setSerdeParameters(table.getStorage().getSerdeParameters())) .build(); }
@Test public void testConvertTable() { com.facebook.presto.hive.metastore.Table prestoTbl = GlueToPrestoConverter.convertTable(testTbl, testDb.getName()); assertEquals(prestoTbl.getTableName(), testTbl.getName()); assertEquals(prestoTbl.getDatabaseName(), testDb.getName()); assertEquals(prestoTbl.getTableType(), testTbl.getTableType()); assertEquals(prestoTbl.getOwner(), testTbl.getOwner()); assertEquals(prestoTbl.getParameters(), testTbl.getParameters()); assertColumnList(prestoTbl.getDataColumns(), testTbl.getStorageDescriptor().getColumns()); assertColumnList(prestoTbl.getPartitionColumns(), testTbl.getPartitionKeys()); assertStorage(prestoTbl.getStorage(), testTbl.getStorageDescriptor()); assertEquals(prestoTbl.getViewOriginalText().get(), testTbl.getViewOriginalText()); assertEquals(prestoTbl.getViewExpandedText().get(), testTbl.getViewExpandedText()); }
protected Partition createDummyPartition(Table table, String partitionName) { return Partition.builder() .setDatabaseName(table.getDatabaseName()) .setTableName(table.getTableName()) .setColumns(table.getDataColumns()) .setValues(toPartitionValues(partitionName)) .withStorage(storage -> storage .setStorageFormat(fromHiveStorageFormat(HiveStorageFormat.ORC)) .setLocation(partitionTargetPath(new SchemaTableName(table.getDatabaseName(), table.getTableName()), partitionName))) .setParameters(ImmutableMap.of( PRESTO_VERSION_NAME, "testversion", PRESTO_QUERY_ID_NAME, "20180101_123456_00001_x1y2z")) .build(); }