public static String escapePathName(String path) { return escapePathName(path, null); }
if (equalsFileSystem(srcFs, destFs)) { return copy(srcFs, srcPath, destFs, destPath, true, // delete source false, // overwrite destination
public static String makePartName(List<String> partCols, List<String> vals) { return makePartName(partCols, vals, null); }
private static String unescapePathName(String path) { return FileUtils.unescapePathName(path); }
if (!FileUtils.equalsFileSystem(srcFs, destFs)) { throw new InvalidOperationException("table new location " + destPath + " is on a different file system than the old location "
!org.apache.hadoop.hive.metastore.utils.FileUtils.isDirEmpty(fs, finalOutputPath)) {
/** * @param location * @return array of FileStatus objects corresponding to the files * making up the passed storage description */ public List<FileStatus> getFileStatusesForLocation(String location) throws MetaException { try { Path path = new Path(location); FileSystem fileSys = path.getFileSystem(conf); return FileUtils.getFileStatusRecurse(path, -1, fileSys); } catch (IOException ioe) { MetaStoreUtils.logAndThrowMetaException(ioe); } return null; }
public boolean mkdirs(Path f) throws MetaException { FileSystem fs; try { fs = getFs(f); return FileUtils.mkdir(fs, f); } catch (IOException e) { MetaStoreUtils.logAndThrowMetaException(e); } return false; }
public boolean renameDir(Path sourcePath, Path destPath, boolean needCmRecycle) throws MetaException { try { if (needCmRecycle) { // Copy the source files to cmroot. As the client will move the source files to another // location, we should make a copy of the files to cmroot instead of moving it. cm.recycle(sourcePath, RecycleType.COPY, true); } FileSystem srcFs = getFs(sourcePath); FileSystem destFs = getFs(destPath); return FileUtils.rename(srcFs, destFs, sourcePath, destPath); } catch (Exception ex) { MetaStoreUtils.logAndThrowMetaException(ex); } return false; }
FileSystem fs = location.getFileSystem(getConf()); if (!org.apache.hadoop.hive.metastore.utils.HdfsUtils.isPathEncrypted(getConf(), fs.getUri(), location) && !FileUtils.pathHasSnapshotSubDir(location, fs)) { HdfsUtils.HadoopFileStatus status = new HdfsUtils.HadoopFileStatus(getConf(), fs, location); FileStatus targetStatus = fs.getFileStatus(location);
@VisibleForTesting public static List<String> partNameToVals(String name) { if (name == null) { return null; } List<String> vals = new ArrayList<>(); String[] kvp = name.split("/"); for (String kv : kvp) { vals.add(FileUtils.unescapePathName(kv.substring(kv.indexOf('=') + 1))); } return vals; }
destFs = wh.getFs(destPath); if (!FileUtils.equalsFileSystem(srcFs, destFs)) { throw new InvalidOperationException("New table location " + destPath + " is on a different file system than the old location "
private void checkImmutableTable(QB qb, Table dest_tab, Path dest_path, boolean isPart) throws SemanticException { // If the query here is an INSERT_INTO and the target is an immutable table, // verify that our destination is empty before proceeding if (!dest_tab.isImmutable() || !qb.getParseInfo().isInsertIntoTable( dest_tab.getDbName(), dest_tab.getTableName())) { return; } try { FileSystem fs = dest_path.getFileSystem(conf); if (! org.apache.hadoop.hive.metastore.utils.FileUtils.isDirEmpty(fs,dest_path)){ LOG.warn("Attempted write into an immutable table : " + dest_tab.getTableName() + " : " + dest_path); throw new SemanticException( ErrorMsg.INSERT_INTO_IMMUTABLE_TABLE.getMsg(dest_tab.getTableName())); } } catch (IOException ioe) { LOG.warn("Error while trying to determine if immutable table " + (isPart ? "partition " : "") + "has any data : " + dest_tab.getTableName() + " : " + dest_path); throw new SemanticException(ErrorMsg.INSERT_INTO_IMMUTABLE_TABLE.getMsg(ioe.getMessage())); } }
/** * @param db database * @param table table * @return array of FileStatus objects corresponding to the files making up the passed * unpartitioned table */ public List<FileStatus> getFileStatusesForUnpartitionedTable(Database db, Table table) throws MetaException { Path tablePath = getDnsPath(new Path(table.getSd().getLocation())); try { FileSystem fileSys = tablePath.getFileSystem(conf); return FileUtils.getFileStatusRecurse(tablePath, -1, fileSys); } catch (IOException ioe) { MetaStoreUtils.logAndThrowMetaException(ioe); } return null; }
public boolean mkdirs(Path f) throws MetaException { FileSystem fs; try { fs = getFs(f); return FileUtils.mkdir(fs, f); } catch (IOException e) { MetaStoreUtils.logAndThrowMetaException(e); } return false; }
public boolean renameDir(Path sourcePath, Path destPath, boolean needCmRecycle) throws MetaException { try { if (needCmRecycle) { // Copy the source files to cmroot. As the client will move the source files to another // location, we should make a copy of the files to cmroot instead of moving it. cm.recycle(sourcePath, RecycleType.COPY, true); } FileSystem srcFs = getFs(sourcePath); FileSystem destFs = getFs(destPath); return FileUtils.rename(srcFs, destFs, sourcePath, destPath); } catch (Exception ex) { MetaStoreUtils.logAndThrowMetaException(ex); } return false; }
FileSystem fs = location.getFileSystem(getConf()); if (!org.apache.hadoop.hive.metastore.utils.HdfsUtils.isPathEncrypted(getConf(), fs.getUri(), location) && !FileUtils.pathHasSnapshotSubDir(location, fs)) { HdfsUtils.HadoopFileStatus status = new HdfsUtils.HadoopFileStatus(getConf(), fs, location); FileStatus targetStatus = fs.getFileStatus(location);
/** * Makes a valid partition name. * @param partCols The partition columns * @param vals The partition values * @param defaultStr * The default name given to a partition value if the respective value is empty or null. * @return An escaped, valid partition name. * @throws MetaException */ public static String makePartName(List<FieldSchema> partCols, List<String> vals, String defaultStr) throws MetaException { if ((partCols.size() != vals.size()) || (partCols.size() == 0)) { String errorStr = "Invalid partition key & values; keys ["; for (FieldSchema fs : partCols) { errorStr += (fs.getName() + ", "); } errorStr += "], values ["; for (String val : vals) { errorStr += (val + ", "); } throw new MetaException(errorStr + "]"); } List<String> colNames = new ArrayList<>(); for (FieldSchema col: partCols) { colNames.add(col.getName()); } return FileUtils.makePartName(colNames, vals, defaultStr); }
public static String escapePathName(String path) { return FileUtils.escapePathName(path); }
private static String unescapePathName(String path) { return FileUtils.unescapePathName(path); }