/** * Drop a given table or some partitions. DropTableDesc is currently used for both. * * @param db * The database in question. * @param dropTbl * This is the table we're dropping. * @throws HiveException * Throws this exception if an unexpected error occurs. */ private void dropTableOrPartitions(Hive db, DropTableDesc dropTbl) throws HiveException { // We need to fetch the table before it is dropped so that it can be passed to // post-execution hook Table tbl = null; try { tbl = db.getTable(dropTbl.getTableName()); } catch (InvalidTableException e) { // drop table is idempotent } if (dropTbl.getPartSpecs() == null) { dropTable(db, tbl, dropTbl); } else { dropPartitions(db, tbl, dropTbl); } }
@Override public List<Task<? extends Serializable>> handle(Context context) throws SemanticException { DropTableMessage msg = deserializer.getDropTableMessage(context.dmd.getPayload()); String actualDbName = context.isDbNameEmpty() ? msg.getDB() : context.dbName; String actualTblName = context.isTableNameEmpty() ? msg.getTable() : context.tableName; DropTableDesc dropTableDesc = new DropTableDesc( actualDbName + "." + actualTblName, null, true, true, context.eventOnlyReplicationSpec(), false ); Task<DDLWork> dropTableTask = TaskFactory.get( new DDLWork(readEntitySet, writeEntitySet, dropTableDesc), context.hiveConf ); context.log.debug( "Added drop tbl task : {}:{}", dropTableTask.getId(), dropTableDesc.getTableName() ); updatedMetadata.set(context.dmd.getEventTo().toString(), actualDbName, null, null); return Collections.singletonList(dropTableTask); } }
if (tbl != null && dropTbl.getValidationRequired()) { if (tbl.isView()) { if (!dropTbl.getExpectView()) { if (dropTbl.getIfExists()) { return; if (dropTbl.getExpectMaterializedView()) { throw new HiveException("Cannot drop a view with DROP MATERIALIZED VIEW"); } else { if (!dropTbl.getExpectMaterializedView()) { if (dropTbl.getIfExists()) { return; if (dropTbl.getExpectView()) { throw new HiveException("Cannot drop a materialized view with DROP VIEW"); } else { if (dropTbl.getExpectView()) { if (dropTbl.getIfExists()) { return; } else if (dropTbl.getExpectMaterializedView()) { if (dropTbl.getIfExists()) { return; ReplicationSpec replicationSpec = dropTbl.getReplicationSpec(); if ((tbl!= null) && replicationSpec.isInReplicationScope()){
private void dropPartitions(Hive db, Table tbl, DropTableDesc dropTbl) throws HiveException { ReplicationSpec replicationSpec = dropTbl.getReplicationSpec(); if (replicationSpec.isInReplicationScope()){ for (DropTableDesc.PartSpec partSpec : dropTbl.getPartSpecs()){ List<Partition> partitions = new ArrayList<>(); try { = db.dropPartitions(dropTbl.getTableName(), dropTbl.getPartSpecs(), PartitionDropOptions.instance() .deleteData(true) .ifExists(true) .purgeData(dropTbl.getIfPurge())); for (Partition partition : droppedParts) { console.printInfo("Dropped the partition " + partition.getName());
tbl = db.getTable(dropTbl.getTableName()); } catch (InvalidTableException e) { if (dropTbl.getPartSpecs() == null) { if (!dropTbl.getExpectView()) { throw new HiveException("Cannot drop a view with DROP TABLE"); if (dropTbl.getExpectView()) { throw new HiveException("Cannot drop a base table with DROP VIEW"); db.dropTable(dropTbl.getTableName()); if (tbl != null) { work.getOutputs().add(new WriteEntity(tbl)); validateAlterTableType( tbl, AlterTableDesc.AlterTableTypes.DROPPARTITION, dropTbl.getExpectView()); db.getPartitionNames(dropTbl.getTableName(), (short) -1); Set<Map<String, String>> partitions = new HashSet<Map<String, String>>(); for (String partitionName : partitionNames) { for (Map<String, String> partSpec : dropTbl.getPartSpecs()) { Iterator<Map<String, String>> it = partitions.iterator(); while (it.hasNext()) { db.dropPartition(dropTbl.getTableName(), partition.getValues(), true); work.getOutputs().add(new WriteEntity(partition));
private Task<?> dropPartitionTask(Table table, Map<String, String> partSpec) throws SemanticException { Task<DDLWork> dropPtnTask = null; Map<Integer, List<ExprNodeGenericFuncDesc>> partSpecsExpr = ReplUtils.genPartSpecs(table, Collections.singletonList(partSpec)); if (partSpecsExpr.size() > 0) { DropTableDesc dropPtnDesc = new DropTableDesc(table.getFullyQualifiedName(), partSpecsExpr, null, true, event.replicationSpec()); dropPtnTask = TaskFactory.get( new DDLWork(new HashSet<>(), new HashSet<>(), dropPtnDesc), context.hiveConf ); } return dropPtnTask; }
private void dropPartitions(Hive db, Table tbl, DropTableDesc dropTbl) throws HiveException { ReplicationSpec replicationSpec = dropTbl.getReplicationSpec(); if (replicationSpec.isInReplicationScope()){ for (DropTableDesc.PartSpec partSpec : dropTbl.getPartSpecs()){ try { for (Partition p : Iterables.filter( = db.dropPartitions(dropTbl.getTableName(), dropTbl.getPartSpecs(), PartitionDropOptions.instance() .deleteData(true) .ifExists(true) .purgeData(dropTbl.getIfPurge())); for (Partition partition : droppedParts) { console.printInfo("Dropped the partition " + partition.getName());
private Task<?> dropTableTask(Table table) { assert(table != null); DropTableDesc dropTblDesc = new DropTableDesc(table.getFullyQualifiedName(), table.getTableType(), true, false, event.replicationSpec()); return TaskFactory.get(new DDLWork(new HashSet<>(), new HashSet<>(), dropTblDesc), context.hiveConf); } }
if (!dropTbl.getExpectView()) { if (dropTbl.getIfExists()) { return; if (dropTbl.getExpectMaterializedView()) { throw new HiveException("Cannot drop a view with DROP MATERIALIZED VIEW"); } else { if (!dropTbl.getExpectMaterializedView()) { if (dropTbl.getIfExists()) { return; if (dropTbl.getExpectView()) { throw new HiveException("Cannot drop a materialized view with DROP VIEW"); } else { if (dropTbl.getExpectView()) { if (dropTbl.getIfExists()) { return; } else if (dropTbl.getExpectMaterializedView()) { if (dropTbl.getIfExists()) { return; ReplicationSpec replicationSpec = dropTbl.getReplicationSpec(); if ((tbl!= null) && replicationSpec.isInReplicationScope()){ db.dropTable(dropTbl.getTableName(), dropTbl.getIfPurge()); if (tbl != null) {
/** * Drop a given table or some partitions. DropTableDesc is currently used for both. * * @param db * The database in question. * @param dropTbl * This is the table we're dropping. * @throws HiveException * Throws this exception if an unexpected error occurs. */ private void dropTableOrPartitions(Hive db, DropTableDesc dropTbl) throws HiveException { // We need to fetch the table before it is dropped so that it can be passed to // post-execution hook Table tbl = null; try { tbl = db.getTable(dropTbl.getTableName()); } catch (InvalidTableException e) { // drop table is idempotent } if (dropTbl.getPartSpecs() == null) { dropTable(db, tbl, dropTbl); } else { dropPartitions(db, tbl, dropTbl); } }
private void dropPartitions(Hive db, Table tbl, DropTableDesc dropTbl) throws HiveException { ReplicationSpec replicationSpec = dropTbl.getReplicationSpec(); if (replicationSpec.isInReplicationScope()){ for (DropTableDesc.PartSpec partSpec : dropTbl.getPartSpecs()){ try { for (Partition p : Iterables.filter( = db.dropPartitions(dropTbl.getTableName(), dropTbl.getPartSpecs(), PartitionDropOptions.instance() .deleteData(true) .ignoreProtection(dropTbl.getIgnoreProtection()) .ifExists(true) .purgeData(dropTbl.getIfPurge())); for (Partition partition : droppedParts) { console.printInfo("Dropped the partition " + partition.getName());
ReplUtils.genPartSpecs(new Table(msg.getTableObj()), msg.getPartitions()); if (partSpecs.size() > 0) { DropTableDesc dropPtnDesc = new DropTableDesc(actualDbName + "." + actualTblName, partSpecs, null, true, context.eventOnlyReplicationSpec()); Task<DDLWork> dropPtnTask = TaskFactory.get( ); context.log.debug("Added drop ptn task : {}:{},{}", dropPtnTask.getId(), dropPtnDesc.getTableName(), msg.getPartitions()); updatedMetadata.set(context.dmd.getEventTo().toString(), actualDbName, actualTblName, null); return Collections.singletonList(dropPtnTask);
private static Task<?> dropTableTask(Table table, EximUtil.SemanticAnalyzerWrapperContext x){ return TaskFactory.get(new DDLWork( x.getInputs(), x.getOutputs(), new DropTableDesc(table.getTableName(), null, true, true, null) ), x.getConf()); }
if (!dropTbl.getExpectView()) { if (dropTbl.getIfExists()) { return; if (dropTbl.getExpectView()) { if (dropTbl.getIfExists()) { return; ReplicationSpec replicationSpec = dropTbl.getReplicationSpec(); if ((tbl!= null) && replicationSpec.isInReplicationScope()){ db.dropTable(dropTbl.getTableName(), dropTbl.getIfPurge()); if (tbl != null) {
if (dropTable.getPartSpecs() == null) { for (DropTableDesc.PartSpec partSpec : dropTable.getPartSpecs()) { Table table = hive.getTable(SessionState.get().getCurrentDatabase(), dropTable.getTableName()); List<Partition> partitions = null; try {
String actualDbName = ((dbName == null) || dbName.isEmpty() ? dropTableMessage.getDB() : dbName); String actualTblName = ((tblName == null) || tblName.isEmpty() ? dropTableMessage.getTable() : tblName); DropTableDesc dropTableDesc = new DropTableDesc( actualDbName + "." + actualTblName, null, true, true, LOG.debug("Added drop tbl task : {}:{}", dropTableTask.getId(), dropTableDesc.getTableName()); dbsUpdated.put(actualDbName,dmd.getEventTo()); return tasks; dropPartitionMessage.getPartitions()); if (partSpecs.size() > 0) { DropTableDesc dropPtnDesc = new DropTableDesc( actualDbName + "." + actualTblName, partSpecs, null, true, tasks.add(dropPtnTask); LOG.debug("Added drop ptn task : {}:{},{}", dropPtnTask.getId(), dropPtnDesc.getTableName(), dropPartitionMessage.getPartitions()); dbsUpdated.put(actualDbName, dmd.getEventTo()); tablesUpdated.put(actualDbName + "." + actualTblName, dmd.getEventTo());
private static Task<?> dropTableTask(Table table, EximUtil.SemanticAnalyzerWrapperContext x, ReplicationSpec replicationSpec) { DropTableDesc dropTblDesc = new DropTableDesc(table.getTableName(), table.getTableType(), true, false, replicationSpec); return TaskFactory.get(new DDLWork(x.getInputs(), x.getOutputs(), dropTblDesc), x.getConf()); }
/** * Drop a given table or some partitions. DropTableDesc is currently used for both. * * @param db * The database in question. * @param dropTbl * This is the table we're dropping. * @throws HiveException * Throws this exception if an unexpected error occurs. */ private void dropTableOrPartitions(Hive db, DropTableDesc dropTbl) throws HiveException { // We need to fetch the table before it is dropped so that it can be passed to // post-execution hook Table tbl = null; try { tbl = db.getTable(dropTbl.getTableName()); } catch (InvalidTableException e) { // drop table is idempotent } if (dropTbl.getPartSpecs() == null) { dropTable(db, tbl, dropTbl); } else { dropPartitions(db, tbl, dropTbl); } }
private void analyzeDropTable(ASTNode ast, TableType expectedType) throws SemanticException { String tableName = getUnescapedName((ASTNode) ast.getChild(0)); boolean ifExists = (ast.getFirstChildWithType(HiveParser.TOK_IFEXISTS) != null); // we want to signal an error if the table/view doesn't exist and we're // configured not to fail silently boolean throwException = !ifExists && !HiveConf.getBoolVar(conf, ConfVars.DROPIGNORESNONEXISTENT); ReplicationSpec replicationSpec = new ReplicationSpec(ast); Table tab = getTable(tableName, throwException); if (tab != null) { inputs.add(new ReadEntity(tab)); outputs.add(new WriteEntity(tab, WriteEntity.WriteType.DDL_EXCLUSIVE)); } boolean ifPurge = (ast.getFirstChildWithType(HiveParser.KW_PURGE) != null); DropTableDesc dropTblDesc = new DropTableDesc(tableName, expectedType, ifExists, ifPurge, replicationSpec); rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), dropTblDesc))); }
if (dropTable.getPartSpecs() == null) { for (DropTableDesc.PartSpec partSpec : dropTable.getPartSpecs()) { Table table = hive.getTable(SessionState.get().getCurrentDatabase(), dropTable.getTableName()); List<Partition> partitions = null; try {