public TableType getTableType() { return Enum.valueOf(TableType.class, tTable.getTableType()); }
public static boolean isMaterializedViewTable(Table table) { if (table == null) { return false; } return TableType.MATERIALIZED_VIEW.toString().equals(table.getTableType()); }
public static boolean isView(Table table) { if (table == null) { return false; } return TableType.VIRTUAL_VIEW.toString().equals(table.getTableType()); }
private static Boolean isViewTable(Table t) { return t.isSetTableType() ? t.getTableType().equals(TableType.VIRTUAL_VIEW.toString()) : null; }
public JSONCreateTableMessage(String server, String servicePrincipal, Table tableObj, Iterator<String> fileIter, Long timestamp) { this(server, servicePrincipal, tableObj.getDbName(), tableObj.getTableName(), tableObj.getTableType(), timestamp); try { this.tableObjJson = MessageBuilder.createTableObjJson(tableObj); } catch (TException e) { throw new IllegalArgumentException("Could not serialize: ", e); } this.files = (fileIter != null) ? Lists.newArrayList(fileIter) : Lists.newArrayList(); }
public JSONDropPartitionMessage(String server, String servicePrincipal, Table tableObj, List<Map<String, String>> partitionKeyValues, long timestamp) { this(server, servicePrincipal, tableObj.getDbName(), tableObj.getTableName(), tableObj.getTableType(), partitionKeyValues, timestamp); try { this.tableObjJson = MessageBuilder.createTableObjJson(tableObj); } catch (TException e) { throw new IllegalArgumentException("Could not serialize: ", e); } }
public JSONDropTableMessage(String server, String servicePrincipal, Table tableObj, Long timestamp) { this(server, servicePrincipal, tableObj.getDbName(), tableObj.getTableName(), tableObj.getTableType(), timestamp); try { this.tableObjJson = MessageBuilder.createTableObjJson(tableObj); } catch (TException e) { throw new IllegalArgumentException("Could not serialize: ", e); } checkValid(); }
@Override public void preCreateTable(Table table) throws MetaException { if (!table.getTableType().equals(TableType.EXTERNAL_TABLE.toString())) { throw new MetaException(KAFKA_STORAGE_HANDLER + " supports only " + TableType.EXTERNAL_TABLE); } Arrays.stream(KafkaTableProperties.values()) .filter(KafkaTableProperties::isMandatory) .forEach(key -> Preconditions.checkNotNull(table.getParameters().get(key.getName()), "Set Table property " + key.getName())); // Put all the default at the pre create. Arrays.stream(KafkaTableProperties.values()).forEach((key) -> { if (table.getParameters().get(key.getName()) == null) { table.putToParameters(key.getName(), key.getDefaultValue()); } }); }
@Override public CreateTableMessage buildCreateTableMessage(Table table) { return new JSONCreateTableMessage(HCAT_SERVER_URL, HCAT_SERVICE_PRINCIPAL, table.getDbName(), table.getTableName(), table.getTableType(), now()); }
@Override public DropTableMessage buildDropTableMessage(Table table) { return new JSONDropTableMessage(HCAT_SERVER_URL, HCAT_SERVICE_PRINCIPAL, table.getDbName(), table.getTableName(), table.getTableType(), now()); }
@Override public InsertMessage buildInsertMessage(String db, Table table, Map<String,String> partKeyVals, List<String> files) { return new JSONInsertMessage(HCAT_SERVER_URL, HCAT_SERVICE_PRINCIPAL, table.getDbName(), table.getTableName(), table.getTableType(), partKeyVals, files, now()); }
public static Table fromMetastoreApiTable(org.apache.hadoop.hive.metastore.api.Table table, List<FieldSchema> schema) { StorageDescriptor storageDescriptor = table.getSd(); if (storageDescriptor == null) { throw new PrestoException(HIVE_INVALID_METADATA, "Table is missing storage descriptor"); } Table.Builder tableBuilder = Table.builder() .setDatabaseName(table.getDbName()) .setTableName(table.getTableName()) .setOwner(nullToEmpty(table.getOwner())) .setTableType(table.getTableType()) .setDataColumns(schema.stream() .map(ThriftMetastoreUtil::fromMetastoreApiFieldSchema) .collect(toList())) .setPartitionColumns(table.getPartitionKeys().stream() .map(ThriftMetastoreUtil::fromMetastoreApiFieldSchema) .collect(toList())) .setParameters(table.getParameters() == null ? ImmutableMap.of() : table.getParameters()) .setViewOriginalText(Optional.ofNullable(emptyToNull(table.getViewOriginalText()))) .setViewExpandedText(Optional.ofNullable(emptyToNull(table.getViewExpandedText()))); fromMetastoreApiStorageDescriptor(storageDescriptor, tableBuilder.getStorageBuilder(), table.getTableName()); return tableBuilder.build(); }
@Override public AlterTableMessage buildAlterTableMessage(Table before, Table after, Long writeId) { return new JSONAlterTableMessage(HCAT_SERVER_URL, HCAT_SERVICE_PRINCIPAL, before.getDbName(), before.getTableName(), before.getTableType(), writeId, now()); }
@Override public DropPartitionMessage buildDropPartitionMessage(Table table, Iterator<Partition> partitions) { return new JSONDropPartitionMessage(HCAT_SERVER_URL, HCAT_SERVICE_PRINCIPAL, table.getDbName(), table.getTableName(), table.getTableType(), MessageBuilder.getPartitionKeyValues(table, partitions), now()); }
@Override public AddPartitionMessage buildAddPartitionMessage(Table table, Iterator<Partition> partitionsIterator) { return new JSONAddPartitionMessage(HCAT_SERVER_URL, HCAT_SERVICE_PRINCIPAL, table.getDbName(), table.getTableName(), table.getTableType(), MessageBuilder.getPartitionKeyValues(table, partitionsIterator), now()); }
@Override public synchronized void dropTable(String databaseName, String tableName, boolean deleteData) { List<String> locations = listAllDataPaths(this, databaseName, tableName); SchemaTableName schemaTableName = new SchemaTableName(databaseName, tableName); Table table = relations.remove(schemaTableName); if (table == null) { throw new TableNotFoundException(schemaTableName); } views.remove(schemaTableName); partitions.keySet().removeIf(partitionName -> partitionName.matches(databaseName, tableName)); // remove data if (deleteData && table.getTableType().equals(MANAGED_TABLE.name())) { for (String location : locations) { if (location != null) { File directory = new File(new Path(location).toUri()); checkArgument(isParentDir(directory, baseDirectory), "Table directory must be inside of the metastore base directory"); deleteDirectory(directory); } } } }
private static void upgradeTableDesc(org.apache.hadoop.hive.metastore.api.Table tableObj, MetaData rv, EximUtil.SemanticAnalyzerWrapperContext x) throws IOException, TException, HiveException { x.getLOG().debug("Converting table " + tableObj.getTableName() + " of type " + tableObj.getTableType() + " with para " + tableObj.getParameters()); //TODO : isPathOwnedByHive is hard coded to true, need to get it from repl dump metadata. TableType tableType = TableType.valueOf(tableObj.getTableType()); HiveStrictManagedMigration.TableMigrationOption migrationOption = HiveStrictManagedMigration.determineMigrationTypeAutomatically(tableObj, tableType, null, x.getConf(), x.getHive().getMSC(), true); HiveStrictManagedMigration.migrateTable(tableObj, tableType, migrationOption, false, getHiveUpdater(x.getConf()), x.getHive().getMSC(), x.getConf()); x.getLOG().debug("Converted table " + tableObj.getTableName() + " of type " + tableObj.getTableType() + " with para " + tableObj.getParameters()); }
@Override public AlterPartitionMessage buildAlterPartitionMessage(Table table, Partition before, Partition after, Long writeId) { return new JSONAlterPartitionMessage(HCAT_SERVER_URL, HCAT_SERVICE_PRINCIPAL, before.getDbName(), before.getTableName(), table.getTableType(), MessageBuilder.getPartitionKeyValues(table,before), writeId, now()); }
@Override public Optional<Table> getTable(String databaseName, String tableName) { try { return retry() .stopOn(NoSuchObjectException.class, HiveViewNotSupportedException.class) .stopOnIllegalExceptions() .run("getTable", stats.getGetTable().wrap(() -> { try (HiveMetastoreClient client = clientProvider.createMetastoreClient()) { Table table = client.getTable(databaseName, tableName); if (table.getTableType().equals(TableType.VIRTUAL_VIEW.name()) && !isPrestoView(table)) { throw new HiveViewNotSupportedException(new SchemaTableName(databaseName, tableName)); } return Optional.of(table); } })); } catch (NoSuchObjectException e) { return Optional.empty(); } catch (TException e) { throw new PrestoException(HIVE_METASTORE_ERROR, e); } catch (Exception e) { throw propagate(e); } }
public TableWrapper(org.apache.hadoop.hive.metastore.api.Table apiTable) { org.apache.hadoop.hive.metastore.api.Table wrapperApiTable = apiTable.deepCopy(); if (wrapperApiTable.getTableType() == null){ // TableType specified was null, we need to figure out what type it was. if (MetaStoreUtils.isExternalTable(wrapperApiTable)){ wrapperApiTable.setTableType(TableType.EXTERNAL_TABLE.toString()); } else if (MetaStoreUtils.isMaterializedViewTable(wrapperApiTable)) { wrapperApiTable.setTableType(TableType.MATERIALIZED_VIEW.toString()); } else if ((wrapperApiTable.getSd() == null) || (wrapperApiTable.getSd().getLocation() == null)) { wrapperApiTable.setTableType(TableType.VIRTUAL_VIEW.toString()); } else { wrapperApiTable.setTableType(TableType.MANAGED_TABLE.toString()); } } initialize(wrapperApiTable); } }