public String getDescription() { return format("add table %s.%s", newTable.getDatabaseName(), newTable.getTableName()); }
public void undo(ExtendedHiveMetastore metastore) { if (!tableCreated) { return; } metastore.dropTable(newTable.getDatabaseName(), newTable.getTableName(), false); } }
private Path getTableMetadataDirectory(Table table) { return getTableMetadataDirectory(table.getDatabaseName(), table.getTableName()); }
@Override public void createTable(Table table, PrincipalPrivileges principalPrivileges) { try { delegate.createTable(table, principalPrivileges); } finally { invalidateTable(table.getDatabaseName(), table.getTableName()); } }
public static List<HiveColumnHandle> getPartitionKeyColumnHandles(Table table) { ImmutableList.Builder<HiveColumnHandle> columns = ImmutableList.builder(); List<Column> partitionKeys = table.getPartitionColumns(); for (Column field : partitionKeys) { HiveType hiveType = field.getType(); if (!hiveType.isSupportedType()) { throw new PrestoException(NOT_SUPPORTED, format("Unsupported Hive type %s found in partition keys of table %s.%s", hiveType, table.getDatabaseName(), table.getTableName())); } columns.add(new HiveColumnHandle(field.getName(), hiveType, hiveType.getTypeSignature(), -1, PARTITION_KEY, field.getComment())); } return columns.build(); }
@Override public void replaceTable(String databaseName, String tableName, Table newTable, PrincipalPrivileges principalPrivileges) { try { delegate.replaceTable(databaseName, tableName, newTable, principalPrivileges); } finally { invalidateTable(databaseName, tableName); invalidateTable(newTable.getDatabaseName(), newTable.getTableName()); } }
private void prepareInsertExistingTable(HdfsContext context, TableAndMore tableAndMore) { deleteOnly = false; Table table = tableAndMore.getTable(); Path targetPath = new Path(table.getStorage().getLocation()); Path currentPath = tableAndMore.getCurrentLocation().get(); cleanUpTasksForAbort.add(new DirectoryCleanUpTask(context, targetPath, false)); if (!targetPath.equals(currentPath)) { asyncRename(hdfsEnvironment, renameExecutor, fileRenameCancelled, fileRenameFutures, context, currentPath, targetPath, tableAndMore.getFileNames().get()); } updateStatisticsOperations.add(new UpdateStatisticsOperation( new SchemaTableName(table.getDatabaseName(), table.getTableName()), Optional.empty(), tableAndMore.getStatisticsUpdate(), true)); }
public static TableInput convertTable(Table table) { TableInput input = new TableInput(); input.setName(table.getTableName()); input.setOwner(table.getOwner()); input.setTableType(table.getTableType()); input.setStorageDescriptor(convertStorage(table.getStorage(), table.getDataColumns())); input.setPartitionKeys(table.getPartitionColumns().stream().map(GlueInputConverter::convertColumn).collect(toList())); input.setParameters(table.getParameters()); table.getViewOriginalText().ifPresent(input::setViewOriginalText); table.getViewExpandedText().ifPresent(input::setViewExpandedText); return input; }
public static void checkTableIsWritable(Table table, boolean writesToNonManagedTablesEnabled) { if (!writesToNonManagedTablesEnabled && !table.getTableType().equals(MANAGED_TABLE.toString())) { throw new PrestoException(NOT_SUPPORTED, "Cannot write to non-managed Hive table"); } checkWritable( new SchemaTableName(table.getDatabaseName(), table.getTableName()), Optional.empty(), getProtectMode(table), table.getParameters(), table.getStorage()); }
public static org.apache.hadoop.hive.metastore.api.Table toMetastoreApiTable(Table table, PrincipalPrivileges privileges) { org.apache.hadoop.hive.metastore.api.Table result = new org.apache.hadoop.hive.metastore.api.Table(); result.setDbName(table.getDatabaseName()); result.setTableName(table.getTableName()); result.setOwner(table.getOwner()); result.setTableType(table.getTableType()); result.setParameters(table.getParameters()); result.setPartitionKeys(table.getPartitionColumns().stream().map(ThriftMetastoreUtil::toMetastoreApiFieldSchema).collect(toList())); result.setSd(makeStorageDescriptor(table.getTableName(), table.getDataColumns(), table.getStorage())); result.setPrivileges(toMetastoreApiPrincipalPrivilegeSet(table.getOwner(), privileges)); result.setViewOriginalText(table.getViewOriginalText().orElse(null)); result.setViewExpandedText(table.getViewExpandedText().orElse(null)); return result; }
private void createEmptyFile(ConnectorSession session, Path path, Table table, Optional<Partition> partition, List<String> fileNames) { JobConf conf = toJobConf(hdfsEnvironment.getConfiguration(new HdfsContext(session, table.getDatabaseName(), table.getTableName()), path)); Properties schema; StorageFormat format; if (partition.isPresent()) { schema = getHiveSchema(partition.get(), table); format = partition.get().getStorage().getStorageFormat(); } else { schema = getHiveSchema(table); format = table.getStorage().getStorageFormat(); } for (String fileName : fileNames) { writeEmptyFile(session, new Path(path, fileName), conf, schema, format.getSerDe(), format.getOutputFormat()); } }
@Override public LocationHandle forExistingTable(SemiTransactionalHiveMetastore metastore, ConnectorSession session, Table table) { HdfsContext context = new HdfsContext(session, table.getDatabaseName(), table.getTableName()); Path targetPath = new Path(table.getStorage().getLocation()); if (shouldUseTemporaryDirectory(session, context, targetPath)) { Path writePath = createTemporaryPath(session, context, hdfsEnvironment, targetPath); return new LocationHandle(targetPath, writePath, true, STAGE_AND_MOVE_TO_TARGET_DIRECTORY); } else { return new LocationHandle(targetPath, targetPath, true, DIRECT_TO_TARGET_EXISTING_DIRECTORY); } }
public static Properties getHiveSchema(Partition partition, Table table) { // Mimics function in Hive: MetaStoreUtils.getSchema(Partition, Table) return getHiveSchema( partition.getStorage(), partition.getColumns(), table.getDataColumns(), table.getParameters(), table.getDatabaseName(), table.getTableName(), table.getPartitionColumns()); }
public static Properties getHiveSchema(Table table) { // Mimics function in Hive: MetaStoreUtils.getTableMetadata(Table) return getHiveSchema( table.getStorage(), table.getDataColumns(), table.getDataColumns(), table.getParameters(), table.getDatabaseName(), table.getTableName(), table.getPartitionColumns()); }
private static HiveSplitSource hiveSplitSource( BackgroundHiveSplitLoader backgroundHiveSplitLoader, TupleDomain<HiveColumnHandle> compactEffectivePredicate) { return HiveSplitSource.allAtOnce( SESSION, SIMPLE_TABLE.getDatabaseName(), SIMPLE_TABLE.getTableName(), compactEffectivePredicate, 1, 1, new DataSize(32, MEGABYTE), backgroundHiveSplitLoader, EXECUTOR, new CounterStat()); }
private Partition buildPartitionObject(ConnectorSession session, Table table, PartitionUpdate partitionUpdate) { return Partition.builder() .setDatabaseName(table.getDatabaseName()) .setTableName(table.getTableName()) .setColumns(table.getDataColumns()) .setValues(extractPartitionValues(partitionUpdate.getName())) .setParameters(ImmutableMap.<String, String>builder() .put(PRESTO_VERSION_NAME, prestoVersion) .put(PRESTO_QUERY_ID_NAME, session.getQueryId()) .build()) .withStorage(storage -> storage .setStorageFormat(isRespectTableFormat(session) ? table.getStorage().getStorageFormat() : fromHiveStorageFormat(HiveSessionProperties.getHiveStorageFormat(session))) .setLocation(partitionUpdate.getTargetPath().toString()) .setBucketProperty(table.getStorage().getBucketProperty()) .setSerdeParameters(table.getStorage().getSerdeParameters())) .build(); }
@Test public void testConvertTable() { TableInput tblInput = GlueInputConverter.convertTable(testTbl); assertEquals(tblInput.getName(), testTbl.getTableName()); assertEquals(tblInput.getOwner(), testTbl.getOwner()); assertEquals(tblInput.getTableType(), testTbl.getTableType()); assertEquals(tblInput.getParameters(), testTbl.getParameters()); assertColumnList(tblInput.getStorageDescriptor().getColumns(), testTbl.getDataColumns()); assertColumnList(tblInput.getPartitionKeys(), testTbl.getPartitionColumns()); assertStorage(tblInput.getStorageDescriptor(), testTbl.getStorage()); assertEquals(tblInput.getViewExpandedText(), testTbl.getViewExpandedText().get()); assertEquals(tblInput.getViewOriginalText(), testTbl.getViewOriginalText().get()); }
@Test public void testConvertTable() { com.facebook.presto.hive.metastore.Table prestoTbl = GlueToPrestoConverter.convertTable(testTbl, testDb.getName()); assertEquals(prestoTbl.getTableName(), testTbl.getName()); assertEquals(prestoTbl.getDatabaseName(), testDb.getName()); assertEquals(prestoTbl.getTableType(), testTbl.getTableType()); assertEquals(prestoTbl.getOwner(), testTbl.getOwner()); assertEquals(prestoTbl.getParameters(), testTbl.getParameters()); assertColumnList(prestoTbl.getDataColumns(), testTbl.getStorageDescriptor().getColumns()); assertColumnList(prestoTbl.getPartitionColumns(), testTbl.getPartitionKeys()); assertStorage(prestoTbl.getStorage(), testTbl.getStorageDescriptor()); assertEquals(prestoTbl.getViewOriginalText().get(), testTbl.getViewOriginalText()); assertEquals(prestoTbl.getViewExpandedText().get(), testTbl.getViewExpandedText()); }
@Override public void createTable(Table table, PrincipalPrivileges principalPrivileges) { try { TableInput input = GlueInputConverter.convertTable(table); glueClient.createTable(new CreateTableRequest() .withDatabaseName(table.getDatabaseName()) .withTableInput(input)); } catch (AlreadyExistsException e) { throw new TableAlreadyExistsException(new SchemaTableName(table.getDatabaseName(), table.getTableName())); } catch (EntityNotFoundException e) { throw new SchemaNotFoundException(table.getDatabaseName()); } catch (AmazonServiceException e) { throw new PrestoException(HIVE_METASTORE_ERROR, e); } }
protected Partition createDummyPartition(Table table, String partitionName) { return Partition.builder() .setDatabaseName(table.getDatabaseName()) .setTableName(table.getTableName()) .setColumns(table.getDataColumns()) .setValues(toPartitionValues(partitionName)) .withStorage(storage -> storage .setStorageFormat(fromHiveStorageFormat(HiveStorageFormat.ORC)) .setLocation(partitionTargetPath(new SchemaTableName(table.getDatabaseName(), table.getTableName()), partitionName))) .setParameters(ImmutableMap.of( PRESTO_VERSION_NAME, "testversion", PRESTO_QUERY_ID_NAME, "20180101_123456_00001_x1y2z")) .build(); }