private List<Task<?>> alterTableOrSinglePartition( AlterTableDesc alterTbl, Table tbl, Partition part) throws HiveException { EnvironmentContext environmentContext = alterTbl.getEnvironmentContext(); if (environmentContext == null) { environmentContext = new EnvironmentContext(); alterTbl.setEnvironmentContext(environmentContext); if (alterTbl.getOp() == AlterTableDesc.AlterTableTypes.RENAME) { tbl.setDbName(Utilities.getDatabaseName(alterTbl.getNewName())); tbl.setTableName(Utilities.getTableName(alterTbl.getNewName())); } else if (alterTbl.getOp() == AlterTableDesc.AlterTableTypes.ADDCOLS) { StorageDescriptor sd = retrieveStorageDescriptor(tbl, part); String serializationLib = sd.getSerdeInfo().getSerializationLib(); List<FieldSchema> oldCols = (part == null ? tbl.getColsForMetastore() : part.getColsForMetastore()); List<FieldSchema> newCols = alterTbl.getNewCols(); if (serializationLib.equals( "org.apache.hadoop.hive.serde.thrift.columnsetSerDe")) { } else if (alterTbl.getOp() == AlterTableDesc.AlterTableTypes.RENAMECOLUMN) { StorageDescriptor sd = retrieveStorageDescriptor(tbl, part); String serializationLib = sd.getSerdeInfo().getSerializationLib(); List<FieldSchema> newCols = new ArrayList<FieldSchema>(); Iterator<FieldSchema> iterOldCols = oldCols.iterator(); String oldName = alterTbl.getOldColName(); String newName = alterTbl.getNewColName(); String type = alterTbl.getNewColType(); String comment = alterTbl.getNewColComment();
if (alterTbl.getOp() == AlterTableDesc.AlterTableTypes.RENAME) { String names[] = Utilities.getDbTableName(alterTbl.getOldName()); if (Utils.isBootstrapDumpInProgress(db, names[0])) { LOG.error("DDLTask: Rename Table not allowed as bootstrap dump in progress"); Table tbl = db.getTable(alterTbl.getOldName()); if (alterTbl.getPartSpec() != null) { Map<String, String> partSpec = alterTbl.getPartSpec(); if (DDLSemanticAnalyzer.isFullSpec(tbl, partSpec)) { allPartitions = new ArrayList<Partition>(); StringUtils.join(alterTbl.getPartSpec().keySet(), ',') + " for table " + alterTbl.getOldName()); allPartitions = db.getPartitions(tbl, alterTbl.getPartSpec()); EnvironmentContext environmentContext = alterTbl.getEnvironmentContext(); if (environmentContext == null) { environmentContext = new EnvironmentContext(); environmentContext.putToProperties(HiveMetaHook.ALTER_TABLE_OPERATION_TYPE, alterTbl.getOp().name()); if (allPartitions == null) { db.alterTable(alterTbl.getOldName(), tbl, alterTbl.getIsCascade(), environmentContext, true); } else { boolean isTxn = alterTbl.getPartSpec() != null && alterTbl.getOp() == AlterTableTypes.ADDPROPS; db.alterPartitions(Warehouse.getQualifiedName(tbl.getTTable()), allPartitions, environmentContext, isTxn);
public static Task<?> getTableCheckpointTask(ImportTableDesc tableDesc, HashMap<String, String> partSpec, String dumpRoot, HiveConf conf) throws SemanticException { HashMap<String, String> mapProp = new HashMap<>(); mapProp.put(REPL_CHECKPOINT_KEY, dumpRoot); AlterTableDesc alterTblDesc = new AlterTableDesc(AlterTableDesc.AlterTableTypes.ADDPROPS); alterTblDesc.setProps(mapProp); alterTblDesc.setOldName( StatsUtils.getFullyQualifiedTableName(tableDesc.getDatabaseName(), tableDesc.getTableName())); if (partSpec != null) { alterTblDesc.setPartSpec(partSpec); } return TaskFactory.get(new DDLWork(new HashSet<>(), new HashSet<>(), alterTblDesc), conf); }
@Override public boolean mayNeedWriteId() { switch (getOp()) { case ADDPROPS: { return isExplicitStatsUpdate || AcidUtils.isToInsertOnlyTable(null, getProps()) || (AcidUtils.isTransactionalTable(getProps()) && !isFullAcidConversion); } case DROPPROPS: return isExplicitStatsUpdate; // The check for the following ones is performed before setting AlterTableDesc into the acid field. // These need write ID and stuff because they invalidate column stats. case RENAMECOLUMN: case RENAME: case REPLACECOLS: case ADDCOLS: case ALTERLOCATION: case UPDATECOLUMNS: return true; // RENAMEPARTITION is handled in RenamePartitionDesc default: return false; } }
@SuppressWarnings("unchecked") private void setStatsForNonNativeTable(String dbName, String tableName) throws SemanticException { String qTableName = DDLSemanticAnalyzer.getDotName(new String[] { dbName, tableName }); AlterTableDesc alterTblDesc = new AlterTableDesc(AlterTableTypes.DROPPROPS, null, false); HashMap<String, String> mapProp = new HashMap<>(); mapProp.put(StatsSetupConst.COLUMN_STATS_ACCURATE, null); alterTblDesc.setOldName(qTableName); alterTblDesc.setProps(mapProp); alterTblDesc.setDropIfExists(true); this.rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), alterTblDesc))); }
Table tbl = db.getTable(alterTbl.getOldName()); if(alterTbl.getPartSpec() != null) { part = db.getPartition(tbl, alterTbl.getPartSpec(), false); if(part == null) { console.printError("Partition : " + alterTbl.getPartSpec().toString() + " does not exist."); return 1; validateAlterTableType(tbl, alterTbl.getOp(), alterTbl.getExpectView()); if (alterTbl.getOp() == AlterTableDesc.AlterTableTypes.RENAME) { tbl.setTableName(alterTbl.getNewName()); } else if (alterTbl.getOp() == AlterTableDesc.AlterTableTypes.ADDCOLS) { List<FieldSchema> newCols = alterTbl.getNewCols(); List<FieldSchema> oldCols = tbl.getCols(); if (tbl.getSerializationLib().equals( } else if (alterTbl.getOp() == AlterTableDesc.AlterTableTypes.RENAMECOLUMN) { List<FieldSchema> oldCols = tbl.getCols(); List<FieldSchema> newCols = new ArrayList<FieldSchema>(); Iterator<FieldSchema> iterOldCols = oldCols.iterator(); String oldName = alterTbl.getOldColName(); String newName = alterTbl.getNewColName(); String type = alterTbl.getNewColType(); String comment = alterTbl.getNewColComment(); boolean first = alterTbl.getFirst(); String afterCol = alterTbl.getAfterCol(); FieldSchema column = null;
if (alterTable != null) { Table table = hive.getTable(SessionState.get().getCurrentDatabase(), Utilities.getDbTableName(alterTable.getOldName())[1], false); if (alterTable.getPartSpec() != null) { part = hive.getPartition(table, alterTable.getPartSpec(), false); String newLocation = alterTable.getNewLocation(); if (alterTable.getOp() == AlterTableDesc.AlterTableTypes.ALTERLOCATION) { if (part != null) {
private void addInputsOutputsAlterTable(String tableName, Map<String, String> partSpec, AlterTableDesc desc, AlterTableTypes op) throws SemanticException { boolean isCascade = desc != null && desc.getIsCascade(); boolean alterPartitions = partSpec != null && !partSpec.isEmpty(); } else { if (!AlterTableDesc.doesAlterTableTypeSupportPartialPartitionSpec(op)) { throw new SemanticException( ErrorMsg.ALTER_TABLE_TYPE_PARTIAL_PARTITION_SPEC_NO_SUPPORTED, op.getName()); validateAlterTableType(tab, op, desc.getExpectView()); if (op == AlterTableDesc.AlterTableTypes.DROPPROPS && !desc.getIsDropIfExists()) { Map<String, String> tableParams = tab.getTTable().getParameters(); for (String currKey : desc.getProps().keySet()) { if (!tableParams.containsKey(currKey)) { String errorMsg =
AlterTableDesc addConstraintsDesc = new AlterTableDesc(actualDbName + "." + actualTblName, new ArrayList<SQLPrimaryKey>(), new ArrayList<SQLForeignKey>(),
new AlterTableDesc(AlterTableTypes.ALTERPROTECTMODE); alterTblDesc.setOldName(tableName); alterTblDesc.setPartSpec(partSpec); alterTblDesc.setProtectModeEnable(true); break; case HiveParser.TOK_DISABLE: alterTblDesc.setProtectModeEnable(false); break; default: switch (grandChild.getToken().getType()) { case HiveParser.TOK_OFFLINE: alterTblDesc.setProtectModeType(AlterTableDesc.ProtectModeType.OFFLINE); break; case HiveParser.TOK_NO_DROP: if (grandChild.getChildCount() > 0) { alterTblDesc.setProtectModeType(AlterTableDesc.ProtectModeType.NO_DROP_CASCADE); alterTblDesc.setProtectModeType(AlterTableDesc.ProtectModeType.NO_DROP);
/** * Process "alter table <name> skewed by .. on .. stored as directories * @param ast * @param tableName * @param tab * @throws SemanticException */ private void handleAlterTableSkewedBy(ASTNode ast, String tableName, Table tab) throws SemanticException { List<String> skewedColNames = new ArrayList<String>(); List<List<String>> skewedValues = new ArrayList<List<String>>(); /* skewed column names. */ ASTNode skewedNode = (ASTNode) ast.getChild(0); skewedColNames = analyzeSkewedTablDDLColNames(skewedColNames, skewedNode); /* skewed value. */ analyzeDDLSkewedValues(skewedValues, skewedNode); // stored as directories boolean storedAsDirs = analyzeStoredAdDirs(skewedNode); AlterTableDesc alterTblDesc = new AlterTableDesc(tableName, false, skewedColNames, skewedValues); alterTblDesc.setStoredAsSubDirectories(storedAsDirs); /** * Validate information about skewed table */ alterTblDesc.setTable(tab); alterTblDesc.validate(); rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), alterTblDesc))); }
ReplicationSpec.KEY.CURR_STATE_ID.toString(), tablesUpdated.get(tableName).toString()); AlterTableDesc alterTblDesc = new AlterTableDesc( AlterTableDesc.AlterTableTypes.ADDPROPS, null, false); alterTblDesc.setProps(mapProp); alterTblDesc.setOldName(tableName); Task<? extends Serializable> updateReplIdTask = TaskFactory.get( new DDLWork(inputs, outputs, alterTblDesc), conf);
alterTblDesc = new AlterTableDesc(AlterTableTypes.DROPPROPS, partSpec, expectView); if (ast.getChild(1) != null) { alterTblDesc.setDropIfExists(true); alterTblDesc = new AlterTableDesc(AlterTableTypes.ADDPROPS, partSpec, expectView); alterTblDesc.setProps(mapProp); alterTblDesc.setEnvironmentContext(environmentContext); alterTblDesc.setOldName(tableName); alterTblDesc.setIsFullAcidConversion(isAcidConversion); setAcidDdlDesc(alterTblDesc); ddlWork.setNeedLock(true); // Hmm... why don't many other operations here need locks? Table table = getTable(qualified, true); if (AcidUtils.isTransactionalTable(table)) { alterTblDesc.setIsExplicitStatsUpdate(true); setAcidDdlDesc(alterTblDesc);
/** * Handle alter table <name> not stored as directories * * @param tableName * @param tab * @throws SemanticException */ private void handleAlterTableDisableStoredAsDirs(String tableName, Table tab) throws SemanticException { List<String> skewedColNames = tab.getSkewedColNames(); List<List<String>> skewedColValues = tab.getSkewedColValues(); if ((skewedColNames == null) || (skewedColNames.size() == 0) || (skewedColValues == null) || (skewedColValues.size() == 0)) { throw new SemanticException(ErrorMsg.ALTER_TBL_STOREDASDIR_NOT_SKEWED.getMsg(tableName)); } AlterTableDesc alterTblDesc = new AlterTableDesc(tableName, false, skewedColNames, skewedColValues); alterTblDesc.setStoredAsSubDirectories(false); rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), alterTblDesc))); }
Table tbl = db.getTable(alterTbl.getOldName()); if (alterTbl.getPartSpec() != null) { Map<String, String> partSpec = alterTbl.getPartSpec(); if (DDLSemanticAnalyzer.isFullSpec(tbl, partSpec)) { allPartitions = new ArrayList<Partition>(); StringUtils.join(alterTbl.getPartSpec().keySet(), ',') + " for table " + alterTbl.getOldName()); allPartitions = db.getPartitions(tbl, alterTbl.getPartSpec()); db.alterTable(alterTbl.getOldName(), tbl, alterTbl.getIsCascade()); } else { db.alterPartitions(tbl.getTableName(), allPartitions);
private void addInputsOutputsAlterTable(String tableName, Map<String, String> partSpec, AlterTableDesc desc, boolean doForceExclusive) throws SemanticException { addInputsOutputsAlterTable(tableName, partSpec, desc, desc.getOp(), doForceExclusive); }
if (!allowOperationInReplicationScope(db, alterTbl.getOldName(), null, alterTbl.getReplicationSpec())) { LOG.debug("DDLTask: Alter Table is skipped as table {} is newer than update", alterTbl.getOldName()); return 0; if (alterTbl.getOp() == AlterTableTypes.DROPCONSTRAINT ) { return dropConstraint(db, alterTbl); } else if (alterTbl.getOp() == AlterTableTypes.ADDCONSTRAINT) { return addConstraints(db, alterTbl); } else {
private void analyzeAlterTableUpdateColumns(ASTNode ast, String tableName, HashMap<String, String> partSpec) throws SemanticException { boolean isCascade = false; if (null != ast.getFirstChildWithType(HiveParser.TOK_CASCADE)) { isCascade = true; } AlterTableDesc alterTblDesc = new AlterTableDesc(AlterTableTypes.UPDATECOLUMNS); alterTblDesc.setOldName(tableName); alterTblDesc.setIsCascade(isCascade); alterTblDesc.setPartSpec(partSpec); Table tbl = getTable(tableName); if (AcidUtils.isTransactionalTable(tbl)) { setAcidDdlDesc(alterTblDesc); } rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), alterTblDesc), conf)); }
private int dropConstraint(Hive db, AlterTableDesc alterTbl) throws SemanticException, HiveException { try { db.dropConstraint(Utilities.getDatabaseName(alterTbl.getOldName()), Utilities.getTableName(alterTbl.getOldName()), alterTbl.getConstraintName()); } catch (NoSuchObjectException e) { throw new HiveException(e); } return 0; }
@Override public String getFullTableName() { return getOldName(); }