Refine search
/** * Creates path where partitions matching prefix should lie in filesystem * @param tbl table in which partition is * @return expected location of partitions matching prefix in filesystem */ public Path createPath(Table tbl) throws HiveException { String prefixSubdir; try { prefixSubdir = Warehouse.makePartName(fields, values); } catch (MetaException e) { throw new HiveException("Unable to get partitions directories prefix", e); } Path tableDir = tbl.getDataLocation(); if (tableDir == null) { throw new HiveException("Table has no location set"); } return new Path(tableDir, prefixSubdir); } /**
private void deleteDir(Path dir, Database db) throws HiveException { try { Warehouse wh = new Warehouse(conf); wh.deleteDir(dir, true, db); } catch (MetaException e) { throw new HiveException(e); } }
public static LinkedHashMap<String, String> makeSpecFromName(String name) throws MetaException { if (name == null || name.isEmpty()) { throw new MetaException("Partition name is invalid. " + name); } LinkedHashMap<String, String> partSpec = new LinkedHashMap<>(); makeSpecFromName(partSpec, new Path(name), null); return partSpec; }
public Path getWhRootExternal() throws MetaException { if (whRootExternal != null) { return whRootExternal; } if (!hasExternalWarehouseRoot()) { whRootExternal = getWhRoot(); } else { whRootExternal = getDnsPath(new Path(whRootExternalString)); } return whRootExternal; }
private void deleteParentRecursive(Path parent, int depth, boolean mustPurge, boolean needRecycle) throws IOException, MetaException { if (depth > 0 && parent != null && wh.isWritable(parent)) { if (wh.isDir(parent) && wh.isEmpty(parent)) { wh.deleteDir(parent, true, mustPurge, needRecycle); } deleteParentRecursive(parent.getParent(), depth - 1, mustPurge, needRecycle); } }
/** * Helper method to set location properly in partSpec */ private static void fixLocationInPartSpec( FileSystem fs, ImportTableDesc tblDesc, Table table, Warehouse wh, ReplicationSpec replicationSpec, AddPartitionDesc.OnePartitionDesc partSpec, EximUtil.SemanticAnalyzerWrapperContext x) throws MetaException, HiveException, IOException { Path tgtPath = null; if (tblDesc.getLocation() == null) { if (table.getDataLocation() != null) { tgtPath = new Path(table.getDataLocation().toString(), Warehouse.makePartPath(partSpec.getPartSpec())); } else { Database parentDb = x.getHive().getDatabase(tblDesc.getDatabaseName()); tgtPath = new Path( wh.getDefaultTablePath( parentDb, tblDesc.getTableName()), Warehouse.makePartPath(partSpec.getPartSpec())); } } else { tgtPath = new Path(tblDesc.getLocation(), Warehouse.makePartPath(partSpec.getPartSpec())); } FileSystem tgtFs = FileSystem.get(tgtPath.toUri(), x.getConf()); checkTargetLocationEmpty(tgtFs, tgtPath, replicationSpec, x); partSpec.setLocation(tgtPath.toString()); }
Path loadPath = new Path(path); final FileSystem fs = loadPath.getFileSystem(conf); loadPath = fs.makeQualified(loadPath); if (!fs.exists(loadPath)) { LOG.error("File not found " + loadPath.toUri().toString()); throw new FileNotFoundException(ErrorMsg.REPL_LOAD_PATH_NOT_FOUND.getMsg()); Warehouse wh = new Warehouse(conf); Path filePath = wh.getWhRoot(); if (ifEnableMoveOptimization(filePath, conf)) { conf.setBoolVar(REPL_ENABLE_MOVE_OPTIMIZATION, true); LOG.info(" Set move optimization to true for warehouse " + filePath.toString());
int metaStorePort = findFreePort(); Path postfixedWarehouseDir = new Path(warehouseDir, String.valueOf(metaStorePort)); MetastoreConf.setVar(conf, ConfVars.WAREHOUSE, postfixedWarehouseDir.toString()); Warehouse wh = new Warehouse(conf); if (!wh.isDir(wh.getWhRoot())) { FileSystem fs = wh.getWhRoot().getFileSystem(conf); fs.mkdirs(wh.getWhRoot()); fs.setPermission(wh.getWhRoot(), new FsPermission(FsAction.ALL, FsAction.ALL, FsAction.ALL)); LOG.info("MetaStore warehouse root dir ({}) is created", postfixedWarehouseDir);
Table tbl = tableEvent.getTable(); String name = tbl.getTableName(); org.apache.hadoop.hive.ql.metadata.Table mTbl = new org.apache.hadoop.hive.ql.metadata.Table( tbl); IHMSHandler handler = tableEvent.getHandler(); Configuration conf = handler.getConf(); Warehouse wh = new Warehouse(conf); Path tblPath = new Path(tbl.getSd().getLocation()); fs = wh.getFs(tblPath); Date now = new Date(); SimpleDateFormat sdf = new SimpleDateFormat("yyyy-MM-dd-HH-mm-ss"); Path exportPath = null; if (exportPathString != null && exportPathString.length() == 0) { exportPath = fs.getHomeDirectory(); } else { exportPath = new Path(exportPathString); Path outFile = new Path(metaPath, name + EximUtil.METADATA_NAME); try { SessionState.getConsole().printInfo("Beginning metadata export"); EximUtil.createExportDump(fs, outFile, mTbl, null, null, new HiveConf(conf, MetaDataExportListener.class)); if (moveMetadataToTrash == true) { wh.deleteDir(metaPath, true, false, false);
destPath = wh.getPartitionPath(db, tbl, new_part.getValues()); destPath = constructRenamedPath(destPath, new Path(new_part.getSd().getLocation())); } catch (NoSuchObjectException e) { LOG.debug("Didn't find object in metastore ", e); newPartLoc = destPath.toString(); oldPartLoc = oldPart.getSd().getLocation(); LOG.info("srcPath:" + oldPartLoc); LOG.info("descPath:" + newPartLoc); srcPath = new Path(oldPartLoc); srcFs = wh.getFs(srcPath); destFs = wh.getFs(destPath); Path destParentPath = destPath.getParent(); if (!wh.mkdirs(destParentPath)) { throw new MetaException("Unable to create path " + destParentPath); wh.renameDir(srcPath, destPath, ReplChangeManager.isSourceOfReplication(db)); LOG.info("Partition directory rename from " + srcPath + " to " + destPath + " done."); dataWasMoved = true; String newPartName = Warehouse.makePartName(tbl.getPartitionKeys(), new_part.getValues()); ColumnStatistics cs = updateOrGetPartitionColumnStats(msdb, catName, dbname, name, oldPart.getValues(), oldPart.getSd().getCols(), tbl, new_part, null, null); LOG.error("Revert the data move in renaming a partition."); try { if (destFs.exists(destPath)) { wh.renameDir(destPath, srcPath, false);
Table tbl = tableEvent.getTable(); String name = tbl.getTableName(); org.apache.hadoop.hive.ql.metadata.Table mTbl = new org.apache.hadoop.hive.ql.metadata.Table( tbl); HMSHandler handler = tableEvent.getHandler(); HiveConf hiveconf = handler.getHiveConf(); Warehouse wh = new Warehouse(hiveconf); Path tblPath = new Path(tbl.getSd().getLocation()); fs = wh.getFs(tblPath); Date now = new Date(); SimpleDateFormat sdf = new SimpleDateFormat("yyyy-MM-dd-HH-mm-ss"); String dateString = sdf.format(now); String exportPathString = hiveconf.getVar(HiveConf.ConfVars.METADATA_EXPORT_LOCATION); boolean moveMetadataToTrash = hiveconf .getBoolVar(HiveConf.ConfVars.MOVE_EXPORTED_METADATA_TO_TRASH); Path exportPath = null; if (exportPathString != null && exportPathString.length() == 0) { exportPath = fs.getHomeDirectory(); } else { exportPath = new Path(exportPathString); Path metaPath = new Path(exportPath, name + "." + dateString); SessionState.getConsole().printInfo("Beginning metadata export"); EximUtil.createExportDump(fs, outFile, mTbl, null, null); if (moveMetadataToTrash == true) { wh.deleteDir(metaPath, true);
setTPartition(tPartition); if (table.isView()) { return; if (table.isPartitioned()) { try { if (tPartition.getSd().getLocation() == null) { if (table.getDataLocation() != null) { Path partPath = new Path(table.getDataLocation(), Warehouse.makePartName(table.getPartCols(), tPartition.getValues())); tPartition.getSd().setLocation(partPath.toString()); throw new HiveException("Invalid partition for table " + table.getTableName(), e);
MetaException, NoSuchObjectException, TException { SessionState ss = SessionState.get(); if (ss == null) { throw new MetaException("No current SessionState, cannot create temporary table" Path tblPath = wh.getDnsPath(new Path(tbl.getSd().getLocation())); if (tblPath == null) { throw new MetaException("Temp table path not set for " + tbl.getTableName()); } else { if (!wh.isDir(tblPath)) { if (!wh.mkdirs(tblPath, true)) { throw new MetaException(tblPath + " is not a directory or unable to create one"); tbl.getSd().setLocation(tblPath.toString()); Table tTable = new Table(tbl); if (tables == null) { tables = new HashMap<String, Table>(); ss.getTempTables().put(dbName, tables);
/** * Checks in partition is in custom (not-standard) location. * @param tbl - table in which partition is * @param p - partition * @return true if partition location is custom, false if it is standard */ boolean partitionInCustomLocation(Table tbl, Partition p) throws HiveException { String subdir = null; try { subdir = Warehouse.makePartName(tbl.getPartCols(), p.getValues()); } catch (MetaException e) { throw new HiveException("Unable to get partition's directory", e); } Path tableDir = tbl.getDataLocation(); if(tableDir == null) { throw new HiveException("Table has no location set"); } String standardLocation = (new Path(tableDir, subdir)).toString(); if(ArchiveUtils.isArchived(p)) { return !getOriginalLocation(p).equals(standardLocation); } else { return !p.getLocation().equals(standardLocation); } }
runOptions.shouldModifyManagedTableLocation = false; } else { String curWarehouseRoot = HiveConf.getVar(conf, HiveConf.ConfVars.METASTOREWAREHOUSE); if (arePathsEqual(conf, runOptions.oldWarehouseRoot, curWarehouseRoot)) { LOG.info("oldWarehouseRoot is the same as the current warehouse root {}." runOptions.shouldModifyManagedTableLocation = false; } else { Path oldWhRootPath = new Path(runOptions.oldWarehouseRoot); curWhRootPath = new Path(curWarehouseRoot); FileSystem oldWhRootFs = oldWhRootPath.getFileSystem(conf); FileSystem curWhRootFs = curWhRootPath.getFileSystem(conf); oldWhRootPath = oldWhRootFs.makeQualified(oldWhRootPath); curWhRootPath = curWhRootFs.makeQualified(curWhRootPath); if (!FileUtils.equalsFileSystem(oldWhRootFs, curWhRootFs)) { LOG.info("oldWarehouseRoot {} has a different FS than the current warehouse root {}." if (!isHdfs(oldWhRootFs)) { LOG.info("Warehouse is using non-HDFS FileSystem {}. Disabling shouldModifyManagedTableLocation", oldWhRootFs.getUri()); runOptions.shouldModifyManagedTableLocation = false; } else { HiveConf.setVar(oldWhConf, HiveConf.ConfVars.METASTOREWAREHOUSE, runOptions.oldWarehouseRoot); oldWh = new Warehouse(oldWhConf);
public static String generateTempTableLocation(Configuration conf) throws MetaException { Path path = new Path(SessionState.getTempTableSpace(conf), UUID.randomUUID().toString()); path = Warehouse.getDnsPath(path, conf); return path.toString(); }
Path path = new Path(db.getLocationUri()).getParent(); if (!wh.isWritable(path)) { throw new MetaException("Database not dropped since " + path + " is not writable by " + Path databasePath = wh.getDnsPath(wh.getDatabasePath(db)); for (Table materializedView : materializedViews) { if (materializedView.getSd().getLocation() != null) { Path materializedViewPath = wh.getDnsPath(new Path(materializedView.getSd().getLocation())); if (!wh.isWritable(materializedViewPath.getParent())) { throw new MetaException("Database metadata not deleted since table: " + materializedView.getTableName() + " has a parent location " + materializedViewPath.getParent() + boolean tableDataShouldBeDeleted = checkTableDataShouldBeDeleted(table, deleteData); if (table.getSd().getLocation() != null && tableDataShouldBeDeleted) { tablePath = wh.getDnsPath(new Path(table.getSd().getLocation())); if (!wh.isWritable(tablePath.getParent())) { throw new MetaException("Database metadata not deleted since table: " + table.getTableName() + " has a parent location " + tablePath.getParent() + wh.deleteDir(new Path(db.getLocationUri()), true, db); } catch (Exception e) { LOG.error("Failed to delete database directory: " + db.getLocationUri() +
Path sourcePath = new Path(sourceTable.getSd().getLocation(), Warehouse.makePartName(partitionKeysPresent, partValsPresent)); Path destPath = new Path(destinationTable.getSd().getLocation(), Warehouse.makePartName(partitionKeysPresent, partValsPresent)); List<Partition> destPartitions = new ArrayList<>(); for (Partition partition : partitionsToExchange) { String partToExchangeName = Warehouse.makePartName(destinationTable.getPartitionKeys(), partition.getValues()); if (destPartitionNames.contains(partToExchangeName)) { throw new MetaException("The partition " + partToExchangeName destPartition.setDbName(parsedDestDbName[DB_NAME]); destPartition.setTableName(destinationTable.getTableName()); Path destPartitionPath = new Path(destinationTable.getSd().getLocation(), Warehouse.makePartName(destinationTable.getPartitionKeys(), partition.getValues())); destPartition.getSd().setLocation(destPartitionPath.toString()); ms.addPartition(destPartition); Path destParentPath = destPath.getParent(); if (!wh.isDir(destParentPath)) { if (!wh.mkdirs(destParentPath)) { throw new MetaException("Unable to create path " + destParentPath); pathCreated = wh.renameDir(sourcePath, destPath, false); ms.rollbackTransaction(); if (pathCreated) { wh.renameDir(destPath, sourcePath, false);
if (tab != null) { tab = new Table(tab.getTTable().deepCopy()); tab.getDbName().equals(SessionState.get().getCurrentDatabase())) { Table materializedTab = ctx.getMaterializedTable(cteName); if (materializedTab == null) { if (HiveConf.getBoolVar(conf, HiveConf.ConfVars.HIVESTATSAUTOGATHER)) { location = new Path(qb.getTableDesc().getLocation()); } else { String[] names = Utilities.getDbTableName(tableName); try { Warehouse wh = new Warehouse(conf); destTableDb = names[0]; location = wh.getDatabasePath(db.getDatabase(destTableDb)); } catch (MetaException e) { throw new SemanticException(e); "Error creating temporary folder on: " + location.toString()), e); if (HiveConf.getBoolVar(conf, HiveConf.ConfVars.HIVESTATSAUTOGATHER)) { TableSpec ts = new TableSpec(db, conf, this.ast);
private static void createPartitionIfNotExists(HiveEndPoint ep, IMetaStoreClient msClient, HiveConf conf) throws PartitionCreationFailed { if (ep.partitionVals.isEmpty()) { return; } try { org.apache.hadoop.hive.ql.metadata.Table tableObject = new org.apache.hadoop.hive.ql.metadata.Table(msClient.getTable(ep.database, ep.table)); Map<String, String> partSpec = Warehouse.makeSpecFromValues(tableObject.getPartitionKeys(), ep.partitionVals); AddPartitionDesc addPartitionDesc = new AddPartitionDesc(ep.database, ep.table, true); String partLocation = new Path(tableObject.getDataLocation(), Warehouse.makePartPath(partSpec)).toString(); addPartitionDesc.addPartition(partSpec, partLocation); Partition partition = Hive.convertAddSpecToMetaPartition(tableObject, addPartitionDesc.getPartition(0), conf); msClient.add_partition(partition); } catch (AlreadyExistsException e) { //ignore this - multiple clients may be trying to create the same partition //AddPartitionDesc has ifExists flag but it's not propagated to // HMSHnalder.add_partitions_core() and so it throws... } catch(HiveException|TException e) { LOG.error("Failed to create partition : " + ep, e); throw new PartitionCreationFailed(ep, e); } }