private String getIcebergPartitionURI(final String databaseName, final String tableName, final String partitionName, @Nullable final Long dataTimestampMillis) { return String.format("%s://%s.%s/%s/snapshot_time=%s", context.getConfig().getIcebergPartitionUriScheme(), databaseName, tableName, partitionName, (dataTimestampMillis == null) ? partitionName.hashCode() : Instant.ofEpochMilli(dataTimestampMillis).getEpochSecond()); } }
private String getIcebergPartitionURI(final String databaseName, final String tableName, final String partitionName, @Nullable final Long dataTimestampMillis) { return String.format("%s://%s.%s/%s/snapshot_time=%s", context.getConfig().getIcebergPartitionUriScheme(), databaseName, tableName, partitionName, (dataTimestampMillis == null) ? partitionName.hashCode() : Instant.ofEpochMilli(dataTimestampMillis).getEpochSecond()); } }
/** * thread Service Manager. * @param connectorContext connector config * @return threadServiceManager */ @Bean public ThreadServiceManager threadServiceManager(final ConnectorContext connectorContext) { return new ThreadServiceManager(connectorContext.getRegistry(), connectorContext.getConfig().getServiceMaxNumberOfThreads(), 1000, "hive"); } }
/** * thread Service Manager. * @param connectorContext connector config * @return threadServiceManager */ @Bean public ThreadServiceManager threadServiceManager(final ConnectorContext connectorContext) { return new ThreadServiceManager(connectorContext.getRegistry(), connectorContext.getConfig().getServiceMaxNumberOfThreads(), 1000, "hive"); } }
@Override public TableMetadata refresh() { refreshFromMetadataLocation(this.location, connectorContext.getConfig().getIcebergRefreshFromMetadataLocationRetryNumber()); return current(); }
/** * {@inheritDoc}. */ @Override public List<String> getPartitionKeys(final ConnectorRequestContext requestContext, final QualifiedName tableName, final PartitionListRequest partitionsRequest, final TableInfo tableInfo) { return context.getConfig().isIcebergEnabled() && HiveTableUtil.isIcebergTable(tableInfo) ? getIcebergPartitionInfos(tableInfo, partitionsRequest) .stream().map(info -> info.getName().getPartitionName()).collect(Collectors.toList()) : directSqlGetPartition.getPartitionKeys(requestContext, tableName, partitionsRequest); }
/** * {@inheritDoc}. */ @Override public List<String> getPartitionKeys(final ConnectorRequestContext requestContext, final QualifiedName tableName, final PartitionListRequest partitionsRequest, final TableInfo tableInfo) { return context.getConfig().isIcebergEnabled() && HiveTableUtil.isIcebergTable(tableInfo) ? getIcebergPartitionInfos(tableInfo, partitionsRequest) .stream().map(info -> info.getName().getPartitionName()).collect(Collectors.toList()) : directSqlGetPartition.getPartitionKeys(requestContext, tableName, partitionsRequest); }
@Override public TableMetadata refresh() { refreshFromMetadataLocation(this.location, connectorContext.getConfig().getIcebergRefreshFromMetadataLocationRetryNumber()); return current(); }
/** * Constructor. * * @param connectorContext connector context * @param jdbcTemplate JDBC template * @param sequenceGeneration sequence generator * @param fastServiceMetric fast service metric */ public DirectSqlSavePartition(final ConnectorContext connectorContext, final JdbcTemplate jdbcTemplate, final SequenceGeneration sequenceGeneration, final HiveConnectorFastServiceMetric fastServiceMetric) { this.registry = connectorContext.getRegistry(); this.catalogName = connectorContext.getCatalogName(); this.batchSize = connectorContext.getConfig().getHiveMetastoreBatchSize(); this.jdbcTemplate = jdbcTemplate; this.sequenceGeneration = sequenceGeneration; this.fastServiceMetric = fastServiceMetric; }
/** * Constructor. * * @param connectorContext connector context * @param jdbcTemplate JDBC template * @param sequenceGeneration sequence generator * @param fastServiceMetric fast service metric */ public DirectSqlSavePartition(final ConnectorContext connectorContext, final JdbcTemplate jdbcTemplate, final SequenceGeneration sequenceGeneration, final HiveConnectorFastServiceMetric fastServiceMetric) { this.registry = connectorContext.getRegistry(); this.catalogName = connectorContext.getCatalogName(); this.batchSize = connectorContext.getConfig().getHiveMetastoreBatchSize(); this.jdbcTemplate = jdbcTemplate; this.sequenceGeneration = sequenceGeneration; this.fastServiceMetric = fastServiceMetric; }
/** * {@inheritDoc}. */ @Override public List<PartitionInfo> getPartitions( final ConnectorRequestContext requestContext, final QualifiedName tableName, final PartitionListRequest partitionsRequest, final TableInfo tableInfo) { return context.getConfig().isIcebergEnabled() && HiveTableUtil.isIcebergTable(tableInfo) ? getIcebergPartitionInfos(tableInfo, partitionsRequest) : directSqlGetPartition.getPartitions(requestContext, tableName, partitionsRequest); }
/** * {@inheritDoc}. */ @Override public List<PartitionInfo> getPartitions( final ConnectorRequestContext requestContext, final QualifiedName tableName, final PartitionListRequest partitionsRequest, final TableInfo tableInfo) { return context.getConfig().isIcebergEnabled() && HiveTableUtil.isIcebergTable(tableInfo) ? getIcebergPartitionInfos(tableInfo, partitionsRequest) : directSqlGetPartition.getPartitions(requestContext, tableName, partitionsRequest); }
/** * {@inheritDoc}. */ @Override public List<String> getPartitionUris( final ConnectorRequestContext requestContext, final QualifiedName tableName, final PartitionListRequest partitionsRequest, final TableInfo tableInfo ) { if (context.getConfig().isIcebergEnabled() && HiveTableUtil.isIcebergTable(tableInfo)) { throw new MetacatNotSupportedException("IcebergTable Unsupported Operation!"); } return directSqlGetPartition.getPartitionUris(requestContext, tableName, partitionsRequest); }
/** * {@inheritDoc}. */ @Override public List<String> getPartitionUris( final ConnectorRequestContext requestContext, final QualifiedName tableName, final PartitionListRequest partitionsRequest, final TableInfo tableInfo ) { if (context.getConfig().isIcebergEnabled() && HiveTableUtil.isIcebergTable(tableInfo)) { throw new MetacatNotSupportedException("IcebergTable Unsupported Operation!"); } return directSqlGetPartition.getPartitionUris(requestContext, tableName, partitionsRequest); }
/** * Number of partitions for the given table. * * @param tableName tableName * @return Number of partitions */ @Override public int getPartitionCount( final ConnectorRequestContext requestContext, final QualifiedName tableName, final TableInfo tableInfo ) { if (context.getConfig().isIcebergEnabled() && HiveTableUtil.isIcebergTable(tableInfo)) { throw new MetacatNotSupportedException("IcebergTable Unsupported Operation!"); } return directSqlGetPartition.getPartitionCount(requestContext, tableName); }
/** * Number of partitions for the given table. * * @param tableName tableName * @return Number of partitions */ @Override public int getPartitionCount( final ConnectorRequestContext requestContext, final QualifiedName tableName, final TableInfo tableInfo ) { if (context.getConfig().isIcebergEnabled() && HiveTableUtil.isIcebergTable(tableInfo)) { throw new MetacatNotSupportedException("IcebergTable Unsupported Operation!"); } return directSqlGetPartition.getPartitionCount(requestContext, tableName); }
/** * get iceberg partition map. * * @param icebergTable iceberg table * @param filter iceberg filter expression * @return scan summary map */ public Map<String, ScanSummary.PartitionMetrics> getPartitionMetricsMap(final Table icebergTable, @Nullable final Expression filter) { return (filter != null) ? ScanSummary.of(icebergTable.newScan().filter(filter)) .limit(connectorContext.getConfig().getMaxPartitionsThreshold()) .throwIfLimited() .build() : ScanSummary.of(icebergTable.newScan()) //the top x records .limit(connectorContext.getConfig().getIcebergTableSummaryFetchSize()) .build(); } }
/** * get iceberg partition map. * * @param icebergTable iceberg table * @param filter iceberg filter expression * @return scan summary map */ public Map<String, ScanSummary.PartitionMetrics> getPartitionMetricsMap(final Table icebergTable, @Nullable final Expression filter) { return (filter != null) ? ScanSummary.of(icebergTable.newScan().filter(filter)) .limit(connectorContext.getConfig().getMaxPartitionsThreshold()) .throwIfLimited() .build() : ScanSummary.of(icebergTable.newScan()) //the top x records .limit(connectorContext.getConfig().getIcebergTableSummaryFetchSize()) .build(); } }
/** * getTable. * * @param requestContext The request context * @param name The qualified name of the resource to get * @return table dto */ @Override public TableInfo get(final ConnectorRequestContext requestContext, final QualifiedName name) { final TableInfo info = super.get(requestContext, name); if (!connectorContext.getConfig().isIcebergEnabled() || !HiveTableUtil.isIcebergTable(info)) { return info; } final String tableLoc = HiveTableUtil.getIcebergTableMetadataLocation(info); final com.netflix.iceberg.Table icebergTable = this.icebergTableHandler.getIcebergTable(name, tableLoc); return this.hiveMetacatConverters.fromIcebergTableToTableInfo(name, icebergTable, tableLoc, info.getAudit()); }
/** * getTable. * * @param requestContext The request context * @param name The qualified name of the resource to get * @return table dto */ @Override public TableInfo get(final ConnectorRequestContext requestContext, final QualifiedName name) { final TableInfo info = super.get(requestContext, name); if (!connectorContext.getConfig().isIcebergEnabled() || !HiveTableUtil.isIcebergTable(info)) { return info; } final String tableLoc = HiveTableUtil.getIcebergTableMetadataLocation(info); final com.netflix.iceberg.Table icebergTable = this.icebergTableHandler.getIcebergTable(name, tableLoc); return this.hiveMetacatConverters.fromIcebergTableToTableInfo(name, icebergTable, tableLoc, info.getAudit()); }