@Override public Table create(final Schema schema, final PartitionSpec spec, final Map<String, String> properties, final String table) { throw new MetacatNotSupportedException("Not supported"); }
@Override public Table create(final Schema schema, final PartitionSpec spec, final String tables) { throw new MetacatNotSupportedException("Not supported"); }
@Override public Table create(final Schema schema, final PartitionSpec spec, final String database, final String table) { throw new MetacatNotSupportedException("not supported"); }
@Override public Table create(final Schema schema, final PartitionSpec spec, final String tables) { throw new MetacatNotSupportedException("Not supported"); }
@Override public Table create(final Schema schema, final PartitionSpec spec, final Map<String, String> properties, final String table) { throw new MetacatNotSupportedException("Not supported"); }
@Override public Table create(final Schema schema, final PartitionSpec spec, final String database, final String table) { throw new MetacatNotSupportedException("not supported"); }
/** * Creates a new catalog. * * @param createCatalogDto catalog */ @RequestMapping( method = RequestMethod.POST, path = "/catalog", consumes = MediaType.APPLICATION_JSON_VALUE ) @ResponseStatus(HttpStatus.CREATED) @ApiOperation( position = 3, value = "Creates a new catalog", notes = "Returns success if there were no errors creating the catalog" ) @ApiResponses( { @ApiResponse( code = HttpURLConnection.HTTP_NOT_IMPLEMENTED, message = "Not yet implemented" ) } ) public void createCatalog(@Valid @RequestBody final CreateCatalogDto createCatalogDto) { throw new MetacatNotSupportedException("Create catalog is not supported."); }
collectRequestExceptionMetrics(tags, e.getClass().getSimpleName()); log.error(e.getMessage(), e); throw new MetacatNotSupportedException("Catalog does not support the operation"); } catch (IllegalArgumentException e) { collectRequestExceptionMetrics(tags, e.getClass().getSimpleName());
/** * {@inheritDoc}. */ @Override public List<String> getPartitionUris( final ConnectorRequestContext requestContext, final QualifiedName tableName, final PartitionListRequest partitionsRequest, final TableInfo tableInfo ) { if (context.getConfig().isIcebergEnabled() && HiveTableUtil.isIcebergTable(tableInfo)) { throw new MetacatNotSupportedException("IcebergTable Unsupported Operation!"); } return directSqlGetPartition.getPartitionUris(requestContext, tableName, partitionsRequest); }
/** * {@inheritDoc}. */ @Override public List<String> getPartitionUris( final ConnectorRequestContext requestContext, final QualifiedName tableName, final PartitionListRequest partitionsRequest, final TableInfo tableInfo ) { if (context.getConfig().isIcebergEnabled() && HiveTableUtil.isIcebergTable(tableInfo)) { throw new MetacatNotSupportedException("IcebergTable Unsupported Operation!"); } return directSqlGetPartition.getPartitionUris(requestContext, tableName, partitionsRequest); }
/** * Number of partitions for the given table. * * @param tableName tableName * @return Number of partitions */ @Override public int getPartitionCount( final ConnectorRequestContext requestContext, final QualifiedName tableName, final TableInfo tableInfo ) { if (context.getConfig().isIcebergEnabled() && HiveTableUtil.isIcebergTable(tableInfo)) { throw new MetacatNotSupportedException("IcebergTable Unsupported Operation!"); } return directSqlGetPartition.getPartitionCount(requestContext, tableName); }
collectRequestExceptionMetrics(tags, e.getClass().getSimpleName()); log.error(e.getMessage(), e); throw new MetacatNotSupportedException("Catalog does not support the operation"); } catch (DatabaseAlreadyExistsException | TableAlreadyExistsException | PartitionAlreadyExistsException e) { collectRequestExceptionMetrics(tags, e.getClass().getSimpleName());
/** * Number of partitions for the given table. * * @param tableName tableName * @return Number of partitions */ @Override public int getPartitionCount( final ConnectorRequestContext requestContext, final QualifiedName tableName, final TableInfo tableInfo ) { if (context.getConfig().isIcebergEnabled() && HiveTableUtil.isIcebergTable(tableInfo)) { throw new MetacatNotSupportedException("IcebergTable Unsupported Operation!"); } return directSqlGetPartition.getPartitionCount(requestContext, tableName); }
/** * {@inheritDoc}. */ @Override public void delete(final ConnectorRequestContext requestContext, final QualifiedName name) { try { this.metacatHiveClient.dropDatabase(name.getDatabaseName()); } catch (NoSuchObjectException exception) { throw new DatabaseNotFoundException(name, exception); } catch (MetaException exception) { throw new InvalidMetaException(name, exception); } catch (InvalidOperationException exception) { throw new MetacatNotSupportedException(exception.getMessage()); } catch (TException exception) { throw new ConnectorException(String.format("Failed delete hive database %s", name), exception); } }
/** * {@inheritDoc}. */ @Override public void delete(final ConnectorRequestContext requestContext, final QualifiedName name) { try { this.metacatHiveClient.dropDatabase(name.getDatabaseName()); } catch (NoSuchObjectException exception) { throw new DatabaseNotFoundException(name, exception); } catch (MetaException exception) { throw new InvalidMetaException(name, exception); } catch (InvalidOperationException exception) { throw new MetacatNotSupportedException(exception.getMessage()); } catch (TException exception) { throw new ConnectorException(String.format("Failed delete hive database %s", name), exception); } }
/** * {@inheritDoc}. */ @Override public void deletePartitions( final ConnectorRequestContext requestContext, final QualifiedName tableName, final List<String> partitionNames, final TableInfo tableInfo ) { //TODO: implemented as next step if (context.getConfig().isIcebergEnabled() && HiveTableUtil.isIcebergTable(tableInfo)) { throw new MetacatNotSupportedException("IcebergTable Unsupported Operation!"); } //The direct sql based deletion doesn't check if the partition is valid if (Boolean.parseBoolean(getContext().getConfiguration() .getOrDefault(HiveConfigConstants.USE_FAST_DELETION, "false"))) { directSqlSavePartition.delete(tableName, partitionNames); } else { //will throw exception if the partitions are invalid super.deletePartitions(requestContext, tableName, partitionNames, tableInfo); } }
/** * {@inheritDoc}. */ @Override public void deletePartitions( final ConnectorRequestContext requestContext, final QualifiedName tableName, final List<String> partitionNames, final TableInfo tableInfo ) { //TODO: implemented as next step if (context.getConfig().isIcebergEnabled() && HiveTableUtil.isIcebergTable(tableInfo)) { throw new MetacatNotSupportedException("IcebergTable Unsupported Operation!"); } //The direct sql based deletion doesn't check if the partition is valid if (Boolean.parseBoolean(getContext().getConfiguration() .getOrDefault(HiveConfigConstants.USE_FAST_DELETION, "false"))) { directSqlSavePartition.delete(tableName, partitionNames); } else { //will throw exception if the partitions are invalid super.deletePartitions(requestContext, tableName, partitionNames, tableInfo); } }
throw new MetacatNotSupportedException("Cannot copy a table from a different source");
case 501: //NOT IMPLEMENTED case 415: //UNSUPPORTED_MEDIA_TYPE return new MetacatNotSupportedException(message); case 400: //BAD_REQUEST return new MetacatBadRequestException(message);