/** * Update a resource with the given metadata. * * @param requestContext The request context * @param tableInfo The resource metadata */ @Override public void update(final ConnectorRequestContext requestContext, final TableInfo tableInfo) { final Table existingTable = hiveMetacatConverters.fromTableInfo(get(requestContext, tableInfo.getName())); update(requestContext, existingTable, tableInfo); }
final QualifiedName name = tableInfo.getName(); final String tableName = (name != null) ? name.getTableName() : ""; final String databaseName = (name != null) ? name.getDatabaseName() : ""; final StorageInfo storageInfo = tableInfo.getSerde(); final String owner = (storageInfo != null && storageInfo.getOwner() != null) ? storageInfo.getOwner() : ""; final AuditInfo auditInfo = tableInfo.getAudit(); final int createTime = (auditInfo != null && auditInfo.getCreatedDate() != null) ? dateToEpochSeconds(auditInfo.getCreatedDate()) : 0; final Map<String, String> params = (tableInfo.getMetadata() != null) ? tableInfo.getMetadata() : new HashMap<>(); final List<FieldInfo> fields = tableInfo.getFields(); List<FieldSchema> partitionFields = Collections.emptyList(); List<FieldSchema> nonPartitionFields = Collections.emptyList(); final ViewInfo viewInfo = tableInfo.getView(); final String tableType = (null != viewInfo && !Strings.isNullOrEmpty(viewInfo.getViewOriginalText())) params, tableType.equals(TableType.VIRTUAL_VIEW.name()) ? tableInfo.getView().getViewOriginalText() : null, tableType.equals(TableType.VIRTUAL_VIEW.name()) ? tableInfo.getView().getViewExpandedText() : null, tableType);
/** * {@inheritDoc} */ @Override protected void setTableInfoDetails(final Connection connection, final TableInfo tableInfo) { final QualifiedName tableName = tableInfo.getName(); try ( final PreparedStatement statement = connection.prepareStatement(SQL_GET_AUDIT_INFO) ) { statement.setString(1, tableName.getDatabaseName()); statement.setString(2, tableName.getTableName()); try (final ResultSet resultSet = statement.executeQuery()) { if (resultSet.next()) { final AuditInfo auditInfo = AuditInfo.builder().createdDate(resultSet.getDate(COL_CREATE_TIME)) .lastModifiedDate(resultSet.getDate(COL_UPDATE_TIME)).build(); tableInfo.setAudit(auditInfo); } } } catch (final Exception ignored) { log.info("Ignoring. Error getting the audit info for table {}", tableName); } } }
@Override public void create(@Nonnull final ConnectorRequestContext context, @Nonnull final TableInfo tableInfo) { log.debug("Start: Create table {}", tableInfo.getName()); Preconditions.checkArgument(tableInfo.getSerde() == null || !Strings.isNullOrEmpty(tableInfo.getSerde().getOwner()), "Table owner is null or empty"); final QualifiedName tableName = tableInfo.getName(); if (tableDao.getBySourceDatabaseTableName(catalogName, tableName.getDatabaseName(), tableName.getTableName()) != null) { throw new TableAlreadyExistsException(tableName); } final Database database = databaseDao .getBySourceDatabaseName(catalogName, tableName.getDatabaseName()); if (database == null) { throw new DatabaseNotFoundException(QualifiedName.ofDatabase(catalogName, tableName.getDatabaseName())); } tableDao.save(infoConverter.fromTableInfo(database, tableInfo)); log.debug("End: Create table {}", tableInfo.getName()); }
final TableInfo result = TableInfo.builder().name(name).fields(fields.build()).build(); setTableInfoDetails(connection, result); log.debug("Finished getting table metadata for qualified name {} for request {}", name, context);
if (tableInfo.getMetadata() != null) { table.getParameters().putAll(tableInfo.getMetadata()); tableInfo.getSerde() == null ? null : tableInfo.getSerde().getUri(); if (location != null) { sd.setLocation(location); } else if (sd.getLocation() == null) { final String locationStr = hiveConnectorDatabaseService.get(requestContext, QualifiedName.ofDatabase(tableInfo.getName(). getCatalogName(), tableInfo.getName().getDatabaseName())).getUri(); final Path databasePath = new Path(locationStr); final Path targetPath = new Path(databasePath, tableInfo.getName().getTableName()); sd.setLocation(targetPath.toString()); serdeInfo.setName(tableInfo.getName().getTableName()); final StorageInfo storageInfo = tableInfo.getSerde(); if (storageInfo != null) { if (!Strings.isNullOrEmpty(storageInfo.getSerializationLib())) { if (tableInfo.getFields() != null) { for (FieldInfo column : tableInfo.getFields()) { final FieldSchema field = hiveMetacatConverters.metacatToHiveField(column); if (column.isPartitionKey()) {
@Override public PartitionInfo toPartitionInfo(final TableInfo tableInfo, final Partition partition) { final QualifiedName tableName = tableInfo.getName(); final StorageInfo storageInfo = tableInfo.getSerde(); storageInfo.setUri(partition.getUri()); final AuditInfo auditInfo = AuditInfo.builder().createdDate(partition.getCreatedDate()) .lastModifiedDate(partition.getLastUpdatedDate()) .build(); final AuditInfo tableAuditInfo = tableInfo.getAudit(); if (tableAuditInfo != null) { auditInfo.setCreatedBy(tableAuditInfo.getCreatedBy()); auditInfo.setLastModifiedBy(tableAuditInfo.getLastModifiedBy()); } return PartitionInfo.builder() .name(QualifiedName.ofPartition(tableName.getCatalogName(), tableName.getDatabaseName(), tableName.getTableName(), partition.getName())) .serde(storageInfo) .auditInfo(auditInfo) .build(); }
final TableInfo tableInfo, final PartitionListRequest partitionsRequest) { final QualifiedName tableName = tableInfo.getName(); final com.netflix.iceberg.Table icebergTable = this.icebergTableHandler.getIcebergTable(tableName, HiveTableUtil.getIcebergTableMetadataLocation(tableInfo)); final AuditInfo tableAuditInfo = tableInfo.getAudit();
final QualifiedName tableName = tableInfo.getName(); final Map<String, String> newTableMetadata = tableInfo.getMetadata();
final List<FieldInfo> fields = tableInfo.getFields(); List<FieldSchema> fieldSchemas = Collections.emptyList(); if (notNull(fields)) { && notNull(tableInfo.getSerde()) && Strings.isNullOrEmpty(sd.getSerdeInfo().getSerializationLib()) ) { sd.getSerdeInfo().setSerializationLib(tableInfo.getSerde().getSerializationLib());
private String getNameFromPartVals(final TableInfo tableInfo, final List<String> partVals) { final List<String> partitionKeys = getPartitionKeys(tableInfo.getFields()); if (partitionKeys.size() != partVals.size()) { throw new IllegalArgumentException("Not the same number of partition columns and partition values"); } final StringBuilder builder = new StringBuilder(); for (int i = 0; i < partitionKeys.size(); i++) { if (builder.length() > 0) { builder.append('/'); } builder.append(partitionKeys.get(i)) .append('=') .append(partVals.get(i)); } return builder.toString(); }
/** * getTable. * * @param requestContext The request context * @param name The qualified name of the resource to get * @return table dto */ @Override public TableInfo get(final ConnectorRequestContext requestContext, final QualifiedName name) { final TableInfo info = super.get(requestContext, name); if (!connectorContext.getConfig().isIcebergEnabled() || !HiveTableUtil.isIcebergTable(info)) { return info; } final String tableLoc = HiveTableUtil.getIcebergTableMetadataLocation(info); final com.netflix.iceberg.Table icebergTable = this.icebergTableHandler.getIcebergTable(name, tableLoc); return this.hiveMetacatConverters.fromIcebergTableToTableInfo(name, icebergTable, tableLoc, info.getAudit()); }
/** * Creates location. * @param tableInfo table info * @return location */ public Location toLocation(final TableInfo tableInfo) { final Location location = fromStorageInfo(tableInfo.getSerde()); final Schema schema = new Schema(); schema.setLocation(location); schema.setFields(toFields(tableInfo, schema)); location.setSchema(schema); return location; }
/** * get iceberg table metadata location. * * @param tableInfo table info * @return true for iceberg table */ public static String getIcebergTableMetadataLocation(final TableInfo tableInfo) { return tableInfo.getMetadata().get(DirectSqlTable.PARAM_METADATA_LOCATION); } }
final TableInfo result = TableInfo.builder().name(name).fields(fields.build()).build(); setTableInfoDetails(connection, result); log.debug("Finished getting table metadata for qualified name {} for request {}", name, context);
if (tableInfo.getMetadata() != null) { table.getParameters().putAll(tableInfo.getMetadata()); tableInfo.getSerde() == null ? null : tableInfo.getSerde().getUri(); if (location != null) { sd.setLocation(location); } else if (sd.getLocation() == null) { final String locationStr = hiveConnectorDatabaseService.get(requestContext, QualifiedName.ofDatabase(tableInfo.getName(). getCatalogName(), tableInfo.getName().getDatabaseName())).getUri(); final Path databasePath = new Path(locationStr); final Path targetPath = new Path(databasePath, tableInfo.getName().getTableName()); sd.setLocation(targetPath.toString()); serdeInfo.setName(tableInfo.getName().getTableName()); final StorageInfo storageInfo = tableInfo.getSerde(); if (storageInfo != null) { if (!Strings.isNullOrEmpty(storageInfo.getSerializationLib())) { if (tableInfo.getFields() != null) { for (FieldInfo column : tableInfo.getFields()) { final FieldSchema field = hiveMetacatConverters.metacatToHiveField(column); if (column.isPartitionKey()) {
/** * Converts to PartitionDto. * * @param partition connector partition * @return Metacat partition Info */ @Override public PartitionInfo toPartitionInfo( final TableInfo tableInfo, final Partition partition ) { final QualifiedName tableName = tableInfo.getName(); final QualifiedName partitionName = QualifiedName.ofPartition(tableName.getCatalogName(), tableName.getDatabaseName(), tableName.getTableName(), getNameFromPartVals(tableInfo, partition.getValues())); final String owner = notNull(tableInfo.getSerde()) ? tableInfo.getSerde().getOwner() : ""; final AuditInfo auditInfo = AuditInfo.builder() .createdDate(epochSecondsToDate(partition.getCreateTime())) .lastModifiedDate(epochSecondsToDate(partition.getLastAccessTime())).build(); return PartitionInfo.builder() .serde(toStorageInfo(partition.getSd(), owner)) .name(partitionName) .auditInfo(auditInfo) .metadata(partition.getParameters()) .build(); }
final TableInfo tableInfo, final PartitionListRequest partitionsRequest) { final QualifiedName tableName = tableInfo.getName(); final com.netflix.iceberg.Table icebergTable = this.icebergTableHandler.getIcebergTable(tableName, HiveTableUtil.getIcebergTableMetadataLocation(tableInfo)); final AuditInfo tableAuditInfo = tableInfo.getAudit();
final QualifiedName tableName = tableInfo.getName(); final Map<String, String> newTableMetadata = tableInfo.getMetadata();
final List<FieldInfo> fields = tableInfo.getFields(); List<FieldSchema> fieldSchemas = Collections.emptyList(); if (notNull(fields)) { && notNull(tableInfo.getSerde()) && Strings.isNullOrEmpty(sd.getSerdeInfo().getSerializationLib()) ) { sd.getSerdeInfo().setSerializationLib(tableInfo.getSerde().getSerializationLib());