.getWork().getCreateTblDesc(); if (desc == null) {
return createDatabase(db, createDatabaseDesc); return dropDatabase(db, dropDatabaseDesc); return lockDatabase(db, lockDatabaseDesc); return unlockDatabase(db, unlockDatabaseDesc); return switchDatabase(db, switchDatabaseDesc); return descDatabase(db, descDatabaseDesc); return alterDatabase(db, alterDatabaseDesc); return createTable(db, crtTbl); return createTableLike(db, crtTblLike); dropTableOrPartitions(db, dropTbl); return 0; if (!allowOperationInReplicationScope(db, alterTbl.getOldName(), null, alterTbl.getReplicationSpec())) { return dropConstraint(db, alterTbl); } else if (alterTbl.getOp() == AlterTableTypes.ADDCONSTRAINT) { return addConstraints(db, alterTbl); } else { return alterTable(db, alterTbl);
if(partitionInCustomLocation(tbl, p)) { String message = String.format("ARCHIVE cannot run for partition " + "groups with custom locations like %s", p.getLocation()); originalDir = new Path(getOriginalLocation(p)); } else { originalDir = p.getDataLocation(); if (pathExists(intermediateArchivedDir) || pathExists(intermediateOriginalDir)) { recovery = true; console.printInfo("Starting recovery after failed ARCHIVE"); if (!pathExists(intermediateArchivedDir) && !pathExists(intermediateOriginalDir)) { if (pathExists(intermediateArchivedDir)) { throw new HiveException("The intermediate archive directory already exists."); if (pathExists(intermediateArchivedDir)) { console.printInfo("Intermediate archive directory " + intermediateArchivedDir + " already exists. Assuming it contains an archived version of the partition"); if (!pathExists(intermediateOriginalDir)) { console.printInfo("Moving " + originalDir + " to " + intermediateOriginalDir); moveDir(fs, originalDir, intermediateOriginalDir); } else { console.printInfo(intermediateOriginalDir + " already exists. " +
return createDatabase(db, createDatabaseDesc); return dropDatabase(db, dropDatabaseDesc); return lockDatabase(db, lockDatabaseDesc); return unlockDatabase(db, unlockDatabaseDesc); return switchDatabase(db, switchDatabaseDesc); return descDatabase(db, descDatabaseDesc); return alterDatabase(db, alterDatabaseDesc); return createTable(db, crtTbl); return createIndex(db, crtIndex); return alterIndex(db, alterIndex); return dropIndex(db, dropIdx); return createTableLike(db, crtTblLike); dropTableOrPartitions(db, dropTbl); return 0; if (alterTbl != null) { if (alterTbl.getOp() == AlterTableTypes.DROPCONSTRAINT ) {
if(partitionInCustomLocation(tbl, p)) { String message = String.format("UNARCHIVE cannot run for partition " + "groups with custom locations like %s", p.getLocation()); Partition p = partitions.get(0); if(ArchiveUtils.isArchived(p)) { originalDir = new Path(getOriginalLocation(p)); } else { originalDir = new Path(p.getLocation()); originalDir.getName() + INTERMEDIATE_EXTRACTED_DIR_SUFFIX); boolean recovery = false; if(pathExists(intermediateArchivedDir) || pathExists(intermediateExtractedDir)) { recovery = true; console.printInfo("Starting recovery after failed UNARCHIVE"); checkArchiveProperty(partSpecLevel, recovery, p); if(!pathExists(intermediateArchivedDir) && !pathExists(archivePath)) { throw new HiveException("Haven't found any archive where it should be"); if (!pathExists(intermediateExtractedDir) && !pathExists(intermediateArchivedDir)) { try { if (!pathExists(intermediateArchivedDir)) { try { console.printInfo("Moving " + originalDir + " to " + intermediateArchivedDir);
return createDatabase(db, createDatabaseDesc); return dropDatabase(db, dropDatabaseDesc); return switchDatabase(db, switchDatabaseDesc); return descDatabase(descDatabaseDesc); return alterDatabase(alterDatabaseDesc); return createTable(db, crtTbl); return createIndex(db, crtIndex); return alterIndex(db, alterIndex); return dropIndex(db, dropIdx); return createTableLike(db, crtTblLike); return dropTable(db, dropTbl); return alterTable(db, alterTbl); return createView(db, crtView); return addPartition(db, addPartitionDesc); if (simpleDesc != null) {
validateAlterTableType(tbl, AlterTableDesc.AlterTableTypes.ARCHIVE); if (isArchived(p)) { Path originalDir = new Path(getOriginalLocation(p)); Path leftOverIntermediateOriginal = new Path(originalDir.getParent(), originalDir.getName() + INTERMEDIATE_ORIGINAL_DIR_SUFFIX); if (pathExists(leftOverIntermediateOriginal)) { console.printInfo("Deleting " + leftOverIntermediateOriginal + " left over from a previous archiving operation"); deleteDir(leftOverIntermediateOriginal); if (!pathExists(intermediateArchivedDir) && !pathExists(intermediateOriginalDir)) { if (pathExists(intermediateArchivedDir)) { throw new HiveException("The intermediate archive directory already exists."); if (pathExists(intermediateArchivedDir)) { console.printInfo("Intermediate archive directory " + intermediateArchivedDir + " already exists. Assuming it contains an archived version of the partition"); if (!pathExists(intermediateOriginalDir)) { console.printInfo("Moving " + originalDir + " to " + intermediateOriginalDir); moveDir(fs, originalDir, intermediateOriginalDir); } else { console.printInfo(intermediateOriginalDir + " already exists. " +
validateAlterTableType(tbl, AlterTableDesc.AlterTableTypes.UNARCHIVE); if (!isArchived(p)) { Path location = new Path(p.getLocation()); Path leftOverArchiveDir = new Path(location.getParent(), location.getName() + INTERMEDIATE_ARCHIVED_DIR_SUFFIX); if (pathExists(leftOverArchiveDir)) { console.printInfo("Deleting " + leftOverArchiveDir + " left over " + "from a previous unarchiving operation"); deleteDir(leftOverArchiveDir); Path originalLocation = new Path(getOriginalLocation(p)); Path sourceDir = new Path(p.getLocation()); Path intermediateArchiveDir = new Path(originalLocation.getParent(), if (!pathExists(intermediateExtractedDir) && !pathExists(intermediateArchiveDir)) { try { if (!pathExists(intermediateArchiveDir)) { try { console.printInfo("Moving " + originalLocation + " to " + intermediateArchiveDir); if (!pathExists(originalLocation)) { try { console.printInfo("Moving " + intermediateExtractedDir + " to " + originalLocation); setUnArchived(p);
return createDatabase(db, createDatabaseDesc); return dropDatabase(db, dropDatabaseDesc); return lockDatabase(lockDatabaseDesc); return unlockDatabase(unlockDatabaseDesc); return switchDatabase(db, switchDatabaseDesc); return descDatabase(descDatabaseDesc); return alterDatabase(alterDatabaseDesc); return createTable(db, crtTbl); return createIndex(db, crtIndex); return alterIndex(db, alterIndex); return dropIndex(db, dropIdx); return createTableLike(db, crtTblLike); dropTableOrPartitions(db, dropTbl); return 0; return alterTable(db, alterTbl);
if(partitionInCustomLocation(tbl, p)) { String message = String.format("UNARCHIVE cannot run for partition " + "groups with custom locations like %s", p.getLocation()); Partition p = partitions.get(0); if(ArchiveUtils.isArchived(p)) { originalDir = new Path(getOriginalLocation(p)); } else { originalDir = new Path(p.getLocation()); originalDir.getName() + INTERMEDIATE_EXTRACTED_DIR_SUFFIX); boolean recovery = false; if(pathExists(intermediateArchivedDir) || pathExists(intermediateExtractedDir)) { recovery = true; console.printInfo("Starting recovery after failed UNARCHIVE"); checkArchiveProperty(partSpecLevel, recovery, p); if(!pathExists(intermediateArchivedDir) && !pathExists(archivePath)) { throw new HiveException("Haven't found any archive where it should be"); if (!pathExists(intermediateExtractedDir) && !pathExists(intermediateArchivedDir)) { try { if (!pathExists(intermediateArchivedDir)) { try { console.printInfo("Moving " + originalDir + " to " + intermediateArchivedDir);
if(partitionInCustomLocation(tbl, p)) { String message = String.format("ARCHIVE cannot run for partition " + "groups with custom locations like %s", p.getLocation()); originalDir = new Path(getOriginalLocation(p)); } else { originalDir = p.getDataLocation(); if (pathExists(intermediateArchivedDir) || pathExists(intermediateOriginalDir)) { recovery = true; console.printInfo("Starting recovery after failed ARCHIVE"); if (!pathExists(intermediateArchivedDir) && !pathExists(intermediateOriginalDir)) { if (pathExists(intermediateArchivedDir)) { throw new HiveException("The intermediate archive directory already exists."); if (pathExists(intermediateArchivedDir)) { console.printInfo("Intermediate archive directory " + intermediateArchivedDir + " already exists. Assuming it contains an archived version of the partition"); if (!pathExists(intermediateOriginalDir)) { console.printInfo("Moving " + originalDir + " to " + intermediateOriginalDir); moveDir(fs, originalDir, intermediateOriginalDir); } else { console.printInfo(intermediateOriginalDir + " already exists. " +
if(partitionInCustomLocation(tbl, p)) { String message = String.format("UNARCHIVE cannot run for partition " + "groups with custom locations like %s", p.getLocation()); Partition p = partitions.get(0); if(ArchiveUtils.isArchived(p)) { originalDir = new Path(getOriginalLocation(p)); } else { originalDir = new Path(p.getLocation()); originalDir.getName() + INTERMEDIATE_EXTRACTED_DIR_SUFFIX); boolean recovery = false; if(pathExists(intermediateArchivedDir) || pathExists(intermediateExtractedDir)) { recovery = true; console.printInfo("Starting recovery after failed UNARCHIVE"); checkArchiveProperty(partSpecLevel, recovery, p); if(!pathExists(intermediateArchivedDir) && !pathExists(archivePath)) { throw new HiveException("Haven't found any archive where it should be"); if (!pathExists(intermediateExtractedDir) && !pathExists(intermediateArchivedDir)) { try { if (!pathExists(intermediateArchivedDir)) { try { console.printInfo("Moving " + originalDir + " to " + intermediateArchivedDir);
private String getColumnType(String query) { Driver driver = createDriver(); int rc = driver.compile(query); if (rc != 0) { return null; } QueryPlan plan = driver.getPlan(); DDLTask task = (DDLTask) plan.getRootTasks().get(0); DDLWork work = task.getWork(); CreateTableDesc spec = work.getCreateTblDesc(); FieldSchema fs = spec.getCols().get(0); return fs.getType(); }
if(partitionInCustomLocation(tbl, p)) { String message = String.format("ARCHIVE cannot run for partition " + "groups with custom locations like %s", p.getLocation()); originalDir = new Path(getOriginalLocation(p)); } else { originalDir = p.getDataLocation(); if (pathExists(intermediateArchivedDir) || pathExists(intermediateOriginalDir)) { recovery = true; console.printInfo("Starting recovery after failed ARCHIVE"); if (!pathExists(intermediateArchivedDir) && !pathExists(intermediateOriginalDir)) { if (pathExists(intermediateArchivedDir)) { throw new HiveException("The intermediate archive directory already exists."); if (pathExists(intermediateArchivedDir)) { console.printInfo("Intermediate archive directory " + intermediateArchivedDir + " already exists. Assuming it contains an archived version of the partition"); if (!pathExists(intermediateOriginalDir)) { console.printInfo("Moving " + originalDir + " to " + intermediateOriginalDir); moveDir(fs, originalDir, intermediateOriginalDir); } else { console.printInfo(intermediateOriginalDir + " already exists. " +
DDLWork work = ddlTask.getWork(); String tableName = null; boolean retrieveAndInclude = false;
.getWork().getCreateTblDesc(); if (desc == null) {
.getWork().getCreateTblDesc(); if (desc == null) {
.getWork().getCreateTblDesc(); if (desc == null) {
.getWork().getCreateTblDesc(); if (desc == null) {