private List<HiveColumnHandle> getPartitionColumns(SchemaTableName tableName) { Table sourceTable = metastore.getTable(tableName.getSchemaName(), tableName.getTableName()).get(); return getPartitionKeyColumnHandles(sourceTable); }
public synchronized Map<String, PartitionStatistics> getPartitionStatistics(String databaseName, String tableName, Set<String> partitionNames) Optional<Table> table = getTable(databaseName, tableName); if (!table.isPresent()) { return ImmutableMap.of();
private String getPartitionName(String databaseName, String tableName, List<String> partitionValues) { Table table = getTable(databaseName, tableName) .orElseThrow(() -> new TableNotFoundException(new SchemaTableName(databaseName, tableName))); List<String> columnNames = table.getPartitionColumns().stream() .map(Column::getName) .collect(toImmutableList()); return makePartName(columnNames, partitionValues); }
public synchronized HivePageSinkMetadata generatePageSinkMetadata(SchemaTableName schemaTableName) { checkReadable(); Optional<Table> table = getTable(schemaTableName.getSchemaName(), schemaTableName.getTableName()); if (!table.isPresent()) { return new HivePageSinkMetadata(schemaTableName, Optional.empty(), ImmutableMap.of()); } Map<List<String>, Action<PartitionAndMore>> partitionActionMap = partitionActions.get(schemaTableName); Map<List<String>, Optional<Partition>> modifiedPartitionMap; if (partitionActionMap == null) { modifiedPartitionMap = ImmutableMap.of(); } else { ImmutableMap.Builder<List<String>, Optional<Partition>> modifiedPartitionMapBuilder = ImmutableMap.builder(); for (Map.Entry<List<String>, Action<PartitionAndMore>> entry : partitionActionMap.entrySet()) { modifiedPartitionMapBuilder.put(entry.getKey(), getPartitionFromPartitionAction(entry.getValue())); } modifiedPartitionMap = modifiedPartitionMapBuilder.build(); } return new HivePageSinkMetadata( schemaTableName, table, modifiedPartitionMap); }
@Override public Map<String, ColumnHandle> getColumnHandles(ConnectorSession session, ConnectorTableHandle tableHandle) { SchemaTableName tableName = schemaTableName(tableHandle); Optional<Table> table = metastore.getTable(tableName.getSchemaName(), tableName.getTableName()); if (!table.isPresent()) { throw new TableNotFoundException(tableName); } ImmutableMap.Builder<String, ColumnHandle> columnHandles = ImmutableMap.builder(); for (HiveColumnHandle columnHandle : hiveColumnHandles(table.get())) { columnHandles.put(columnHandle.getName(), columnHandle); } return columnHandles.build(); }
private Table getTable(SemiTransactionalHiveMetastore metastore, SchemaTableName tableName) { Optional<Table> target = metastore.getTable(tableName.getSchemaName(), tableName.getTableName()); if (!target.isPresent()) { throw new TableNotFoundException(tableName); } Table table = target.get(); verifyOnline(tableName, Optional.empty(), getProtectMode(table), table.getParameters()); return table; }
private void failIfAvroSchemaIsSet(HiveTableHandle handle) { String tableName = handle.getTableName(); String schemaName = handle.getSchemaName(); Optional<Table> table = metastore.getTable(schemaName, tableName); if (!table.isPresent()) { throw new TableNotFoundException(new SchemaTableName(schemaName, tableName)); } if (table.get().getParameters().get(AVRO_SCHEMA_URL_KEY) != null) { throw new PrestoException(NOT_SUPPORTED, "ALTER TABLE not supported when Avro schema url is set"); } }
@Override public Optional<ConnectorNewTableLayout> getInsertLayout(ConnectorSession session, ConnectorTableHandle tableHandle) { HiveTableHandle hiveTableHandle = (HiveTableHandle) tableHandle; SchemaTableName tableName = hiveTableHandle.getSchemaTableName(); Table table = metastore.getTable(tableName.getSchemaName(), tableName.getTableName()) .orElseThrow(() -> new TableNotFoundException(tableName)); Optional<HiveBucketHandle> hiveBucketHandle = getHiveBucketHandle(table); if (!hiveBucketHandle.isPresent()) { return Optional.empty(); } HiveBucketProperty bucketProperty = table.getStorage().getBucketProperty() .orElseThrow(() -> new NoSuchElementException("Bucket property should be set")); if (!bucketProperty.getSortedBy().isEmpty() && !isSortedWritingEnabled(session)) { throw new PrestoException(NOT_SUPPORTED, "Writing to bucketed sorted Hive tables is disabled"); } HivePartitioningHandle partitioningHandle = new HivePartitioningHandle( hiveBucketHandle.get().getTableBucketCount(), hiveBucketHandle.get().getColumns().stream() .map(HiveColumnHandle::getHiveType) .collect(Collectors.toList()), OptionalInt.of(hiveBucketHandle.get().getTableBucketCount())); List<String> partitionColumns = hiveBucketHandle.get().getColumns().stream() .map(HiveColumnHandle::getName) .collect(Collectors.toList()); return Optional.of(new ConnectorNewTableLayout(partitioningHandle, partitionColumns)); }
@Override public HiveTableHandle getTableHandle(ConnectorSession session, SchemaTableName tableName) { requireNonNull(tableName, "tableName is null"); Optional<Table> table = metastore.getTable(tableName.getSchemaName(), tableName.getTableName()); if (!table.isPresent()) { return null; } if (isPartitionsSystemTable(tableName)) { // We must not allow $partitions table due to how permissions are checked in PartitionsAwareAccessControl.checkCanSelectFromTable() throw new PrestoException(StandardErrorCode.NOT_SUPPORTED, format("Unexpected table %s present in Hive metastore", tableName)); } verifyOnline(tableName, Optional.empty(), getProtectMode(table.get()), table.get().getParameters()); return new HiveTableHandle(tableName.getSchemaName(), tableName.getTableName()); }
@Override public Map<SchemaTableName, ConnectorViewDefinition> getViews(ConnectorSession session, SchemaTablePrefix prefix) { ImmutableMap.Builder<SchemaTableName, ConnectorViewDefinition> views = ImmutableMap.builder(); List<SchemaTableName> tableNames; if (prefix.getTableName() != null) { tableNames = ImmutableList.of(new SchemaTableName(prefix.getSchemaName(), prefix.getTableName())); } else { tableNames = listViews(session, prefix.getSchemaName()); } for (SchemaTableName schemaTableName : tableNames) { Optional<Table> table = metastore.getTable(schemaTableName.getSchemaName(), schemaTableName.getTableName()); if (table.isPresent() && HiveUtil.isPrestoView(table.get())) { views.put(schemaTableName, new ConnectorViewDefinition( schemaTableName, Optional.ofNullable(table.get().getOwner()), decodeViewData(table.get().getViewOriginalText().get()))); } } return views.build(); }
@Override public void checkCanDropTable(ConnectorTransactionHandle transaction, Identity identity, SchemaTableName tableName) { if (!allowDropTable) { denyDropTable(tableName.toString()); } Optional<Table> target = metastoreProvider.apply(((HiveTransactionHandle) transaction)).getTable(tableName.getSchemaName(), tableName.getTableName()); if (!target.isPresent()) { denyDropTable(tableName.toString(), "Table not found"); } if (!identity.getUser().equals(target.get().getOwner())) { denyDropTable(tableName.toString(), "Owner of the table is different from session user"); } }
public synchronized void truncateUnpartitionedTable(ConnectorSession session, String databaseName, String tableName) { checkReadable(); Optional<Table> table = getTable(databaseName, tableName); SchemaTableName schemaTableName = new SchemaTableName(databaseName, tableName); if (!table.isPresent()) { throw new TableNotFoundException(schemaTableName); } if (!table.get().getTableType().equals(MANAGED_TABLE.toString())) { throw new PrestoException(NOT_SUPPORTED, "Cannot delete from non-managed Hive table"); } if (!table.get().getPartitionColumns().isEmpty()) { throw new IllegalArgumentException("Table is partitioned"); } Path path = new Path(table.get().getStorage().getLocation()); HdfsContext context = new HdfsContext(session, databaseName, tableName); setExclusive((delegate, hdfsEnvironment) -> { RecursiveDeleteResult recursiveDeleteResult = recursiveDeleteFiles(hdfsEnvironment, context, path, ImmutableList.of(""), false); if (!recursiveDeleteResult.getNotDeletedEligibleItems().isEmpty()) { throw new PrestoException(HIVE_FILESYSTEM_ERROR, format( "Error deleting from unpartitioned table %s. These items can not be deleted: %s", schemaTableName, recursiveDeleteResult.getNotDeletedEligibleItems())); } }); }
@Override public void dropTable(ConnectorSession session, ConnectorTableHandle tableHandle) { HiveTableHandle handle = (HiveTableHandle) tableHandle; SchemaTableName tableName = schemaTableName(tableHandle); Optional<Table> target = metastore.getTable(handle.getSchemaName(), handle.getTableName()); if (!target.isPresent()) { throw new TableNotFoundException(tableName); } metastore.dropTable(session, handle.getSchemaName(), handle.getTableName()); }
public static List<String> listAllDataPaths(SemiTransactionalHiveMetastore metastore, String schemaName, String tableName) { ImmutableList.Builder<String> locations = ImmutableList.builder(); Table table = metastore.getTable(schemaName, tableName).get(); if (table.getStorage().getLocation() != null) { // For partitioned table, there should be nothing directly under this directory. // But including this location in the set makes the directory content assert more // extensive, which is desirable. locations.add(table.getStorage().getLocation()); } Optional<List<String>> partitionNames = metastore.getPartitionNames(schemaName, tableName); if (partitionNames.isPresent()) { metastore.getPartitionsByNames(schemaName, tableName, partitionNames.get()).values().stream() .map(Optional::get) .map(partition -> partition.getStorage().getLocation()) .filter(location -> !location.startsWith(table.getStorage().getLocation())) .forEach(locations::add); } return locations.build(); }
Optional<Table> table = metastore.getTable(tableName.getSchemaName(), tableName.getTableName()); if (!table.isPresent()) { throw new TableNotFoundException(tableName);
PrincipalPrivileges principalPrivileges = buildInitialPrivilegeSet(session.getUser()); Optional<Table> existing = metastore.getTable(viewName.getSchemaName(), viewName.getTableName()); if (existing.isPresent()) { if (!replace || !HiveUtil.isPrestoView(existing.get())) {
@Override public OptionalLong metadataDelete(ConnectorSession session, ConnectorTableHandle tableHandle, ConnectorTableLayoutHandle tableLayoutHandle) { HiveTableHandle handle = (HiveTableHandle) tableHandle; HiveTableLayoutHandle layoutHandle = (HiveTableLayoutHandle) tableLayoutHandle; Optional<Table> table = metastore.getTable(handle.getSchemaName(), handle.getTableName()); if (!table.isPresent()) { throw new TableNotFoundException(handle.getSchemaTableName()); } if (table.get().getPartitionColumns().isEmpty()) { metastore.truncateUnpartitionedTable(session, handle.getSchemaName(), handle.getTableName()); } else { for (HivePartition hivePartition : getOrComputePartitions(layoutHandle, session, tableHandle)) { metastore.dropPartition(session, handle.getSchemaName(), handle.getTableName(), toPartitionValues(hivePartition.getPartitionId())); } } // it is too expensive to determine the exact number of deleted rows return OptionalLong.empty(); }
protected String partitionTargetPath(SchemaTableName schemaTableName, String partitionName) { try (Transaction transaction = newTransaction()) { ConnectorSession session = newSession(); SemiTransactionalHiveMetastore metastore = transaction.getMetastore(schemaTableName.getSchemaName()); LocationService locationService = getLocationService(); Table table = metastore.getTable(schemaTableName.getSchemaName(), schemaTableName.getTableName()).get(); LocationHandle handle = locationService.forExistingTable(metastore, session, table); return locationService.getPartitionWriteInfo(handle, Optional.empty(), partitionName).getTargetPath().toString(); } }
.getTable(tableName.getSchemaName(), tableName.getTableName()) .orElseThrow(AssertionError::new);
private void alterBucketProperty(SchemaTableName schemaTableName, Optional<HiveBucketProperty> bucketProperty) { try (Transaction transaction = newTransaction()) { ConnectorSession session = newSession(); String tableOwner = session.getUser(); String schemaName = schemaTableName.getSchemaName(); String tableName = schemaTableName.getTableName(); Optional<Table> table = transaction.getMetastore(schemaName).getTable(schemaName, tableName); Table.Builder tableBuilder = Table.builder(table.get()); tableBuilder.getStorageBuilder().setBucketProperty(bucketProperty); PrincipalPrivileges principalPrivileges = testingPrincipalPrivilege(tableOwner); // hack: replaceView can be used as replaceTable despite its name transaction.getMetastore(schemaName).replaceView(schemaName, tableName, tableBuilder.build(), principalPrivileges); transaction.commit(); } }