/** * Construct a FSTableDescriptors instance using the hbase root dir of the given * conf and the filesystem where that root dir lives. * This instance can do write operations (is not read only). */ public FSTableDescriptors(final Configuration conf) throws IOException { this(conf, FSUtils.getCurrentFileSystem(conf), FSUtils.getRootDir(conf)); }
private Path generateUniqTempDir(boolean withDirCreated) throws IOException { FileSystem fs = FSUtils.getCurrentFileSystem(getConf()); Path dir = new Path(fs.getWorkingDirectory(), NAME); if (!fs.exists(dir)) { fs.mkdirs(dir); } Path newDir = new Path(dir, UUID.randomUUID().toString()); if (withDirCreated) { fs.mkdirs(newDir); } return newDir; }
/** * Create a snapshot file cache for all snapshots under the specified [root]/.snapshot on the * filesystem. * <p> * Immediately loads the file cache. * @param conf to extract the configured {@link FileSystem} where the snapshots are stored and * hbase root directory * @param cacheRefreshPeriod frequency (ms) with which the cache should be refreshed * @param refreshThreadName name of the cache refresh thread * @param inspectSnapshotFiles Filter to apply to each snapshot to extract the files. * @throws IOException if the {@link FileSystem} or root directory cannot be loaded */ public SnapshotFileCache(Configuration conf, long cacheRefreshPeriod, String refreshThreadName, SnapshotFileInspector inspectSnapshotFiles) throws IOException { this(FSUtils.getCurrentFileSystem(conf), FSUtils.getRootDir(conf), 0, cacheRefreshPeriod, refreshThreadName, inspectSnapshotFiles); }
private static void recoverLease(final Configuration conf, final Path path) { try { final FileSystem dfs = FSUtils.getCurrentFileSystem(conf); FSUtils fsUtils = FSUtils.getInstance(dfs, conf); fsUtils.recoverFileLease(dfs, path, conf, new CancelableProgressable() { @Override public boolean progress() { LOG.debug("Still trying to recover WAL lease: " + path); return true; } }); } catch (IOException e) { LOG.warn("unable to recover lease for WAL: " + path, e); } }
/** * Clean up directories with prefix "exportSnapshot-", which are generated when exporting * snapshots. * @throws IOException exception */ protected static void cleanupExportSnapshotLog(Configuration conf) throws IOException { FileSystem fs = FSUtils.getCurrentFileSystem(conf); Path stagingDir = new Path(conf.get(BackupRestoreConstants.CONF_STAGING_ROOT, fs.getWorkingDirectory() .toString())); FileStatus[] files = FSUtils.listStatus(fs, stagingDir); if (files == null) { return; } for (FileStatus file : files) { if (file.getPath().getName().startsWith("exportSnapshot-")) { LOG.debug("Delete log files of exporting snapshot: " + file.getPath().getName()); if (FSUtils.delete(fs, file.getPath(), true) == false) { LOG.warn("Can not delete " + file.getPath()); } } } }
/** * Get the archived WAL file path * @param path - active WAL file path * @param conf - configuration * @return archived path if exists, path - otherwise * @throws IOException exception */ public static Path getArchivedLogPath(Path path, Configuration conf) throws IOException { Path rootDir = FSUtils.getRootDir(conf); Path oldLogDir = new Path(rootDir, HConstants.HREGION_OLDLOGDIR_NAME); if (conf.getBoolean(SEPARATE_OLDLOGDIR, DEFAULT_SEPARATE_OLDLOGDIR)) { ServerName serverName = getServerNameFromWALDirectoryName(path); if (serverName == null) { LOG.error("Couldn't locate log: " + path); return path; } oldLogDir = new Path(oldLogDir, serverName.getServerName()); } Path archivedLogLocation = new Path(oldLogDir, path.getName()); final FileSystem fs = FSUtils.getCurrentFileSystem(conf); if (fs.exists(archivedLogLocation)) { LOG.info("Log " + path + " was moved to " + archivedLogLocation); return archivedLogLocation; } else { LOG.error("Couldn't locate log: " + path); return path; } }
@Override public void setConf(final Configuration conf) { super.setConf(conf); try { long cacheRefreshPeriod = conf.getLong(HFILE_CACHE_REFRESH_PERIOD_CONF_KEY, DEFAULT_HFILE_CACHE_REFRESH_PERIOD); final FileSystem fs = FSUtils.getCurrentFileSystem(conf); Path rootDir = FSUtils.getRootDir(conf); cache = new SnapshotFileCache(fs, rootDir, cacheRefreshPeriod, cacheRefreshPeriod, "snapshot-hfile-cleaner-cache-refresher", new SnapshotFileCache.SnapshotFileInspector() { @Override public Collection<String> filesUnderSnapshot(final Path snapshotDir) throws IOException { return SnapshotReferenceUtil.getHFileNames(conf, fs, snapshotDir); } }); } catch (IOException e) { LOG.error("Failed to create cleaner util", e); } }
FileSystem fs = FSUtils.getCurrentFileSystem(getConf()); if (!fs.delete(this.bulkloadDir, true)) { LOG.error("Deleting folder " + bulkloadDir + " failed!");
@Override public FSDataOutputStream call() throws IOException { try { FileSystem fs = FSUtils.getCurrentFileSystem(this.conf); FsPermission defaultPerms = FSUtils.getFilePermissions(fs, this.conf, HConstants.DATA_FILE_UMASK_KEY); Path tmpDir = getTmpDir(conf); this.hbckLockPath = new Path(tmpDir, HBCK_LOCK_FILE); fs.mkdirs(tmpDir); final FSDataOutputStream out = createFileWithRetries(fs, this.hbckLockPath, defaultPerms); out.writeBytes(InetAddress.getLocalHost().toString()); // Add a note into the file we write on why hbase2 is writing out an hbck1 lock file. out.writeBytes(" Written by an hbase-2.x Master to block an " + "attempt by an hbase-1.x HBCK tool making modification to state. " + "See 'HBCK must match HBase server version' in the hbase refguide."); out.flush(); return out; } catch(RemoteException e) { if(AlreadyBeingCreatedException.class.getName().equals(e.getClassName())){ return null; } else { throw e; } } }
private void unlockHbck() { if (isExclusive() && hbckLockCleanup.compareAndSet(true, false)) { RetryCounter retryCounter = lockFileRetryCounterFactory.create(); do { try { IOUtils.closeQuietly(hbckOutFd); FSUtils.delete(FSUtils.getCurrentFileSystem(getConf()), HBCK_LOCK_PATH, true); LOG.info("Finishing hbck"); return; } catch (IOException ioe) { LOG.info("Failed to delete " + HBCK_LOCK_PATH + ", try=" + (retryCounter.getAttemptTimes() + 1) + " of " + retryCounter.getMaxAttempts()); LOG.debug("Failed to delete " + HBCK_LOCK_PATH, ioe); try { retryCounter.sleepUntilNextRetry(); } catch (InterruptedException ie) { Thread.currentThread().interrupt(); LOG.warn("Interrupted while deleting lock file" + HBCK_LOCK_PATH); return; } } } while (retryCounter.shouldRetry()); } }
FileSystem fileSystem = FSUtils.getCurrentFileSystem(conf);
public void initialize(InputSplit split, Configuration conf) throws IOException { this.scan = TableMapReduceUtil.convertStringToScan(split.getScan()); this.split = split; TableDescriptor htd = split.htd; HRegionInfo hri = this.split.getRegionInfo(); FileSystem fs = FSUtils.getCurrentFileSystem(conf); // region is immutable, this should be fine, // otherwise we have to set the thread read point scan.setIsolationLevel(IsolationLevel.READ_UNCOMMITTED); // disable caching of data blocks scan.setCacheBlocks(false); scan.setScanMetricsEnabled(true); scanner = new ClientSideRegionScanner(conf, fs, new Path(split.restoreDir), htd, hri, scan, null); }
@VisibleForTesting HRegionFileSystem getFileSystem(Connection connection) throws IOException { Admin admin = connection.getAdmin(); return HRegionFileSystem.openRegionFromFileSystem(admin.getConfiguration(), FSUtils.getCurrentFileSystem(admin.getConfiguration()), FSUtils.getTableDir(FSUtils.getRootDir(admin.getConfiguration()), region.getTable()), region, true); }
public void init(final Connection conn, final String backupId, BackupRequest request) throws IOException { if (request.getBackupType() == BackupType.FULL) { backupManager = new BackupManager(conn, conn.getConfiguration()); } else { backupManager = new IncrementalBackupManager(conn, conn.getConfiguration()); } this.backupId = backupId; this.tableList = request.getTableList(); this.conn = conn; this.conf = conn.getConfiguration(); this.fs = FSUtils.getCurrentFileSystem(conf); backupInfo = backupManager.createBackupInfo(backupId, request.getBackupType(), tableList, request.getTargetRootDir(), request.getTotalTasks(), request.getBandwidth()); if (tableList == null || tableList.isEmpty()) { this.tableList = new ArrayList<>(backupInfo.getTables()); } // Start new session backupManager.startBackupSession(); }
tableDirs = FSUtils.getTableDirs(FSUtils.getCurrentFileSystem(getConf()), rootdir);
cleaner.setConf(CONF); FileSystem fs = FSUtils.getCurrentFileSystem(CONF); List<Path> fileNames = listHFileNames(fs, FSUtils.getTableDir(FSUtils.getRootDir(CONF), TABLE_NAME));
private void copyLocalIndexHFiles(Configuration conf, RegionInfo fromRegion, RegionInfo toRegion, boolean move) throws IOException { Path root = FSUtils.getRootDir(conf); Path seondRegion = new Path(FSUtils.getTableDir(root, fromRegion.getTable()) + Path.SEPARATOR + fromRegion.getEncodedName() + Path.SEPARATOR + "L#0/"); Path hfilePath = FSUtils.getCurrentFileSystem(conf).listFiles(seondRegion, true).next().getPath(); Path firstRegionPath = new Path(FSUtils.getTableDir(root, toRegion.getTable()) + Path.SEPARATOR + toRegion.getEncodedName() + Path.SEPARATOR + "L#0/"); FileSystem currentFileSystem = FSUtils.getCurrentFileSystem(conf); assertTrue(FileUtil.copy(currentFileSystem, hfilePath, currentFileSystem, firstRegionPath, move, conf)); }
/** * Construct a FSTableDescriptors instance using the hbase root dir of the given * conf and the filesystem where that root dir lives. * This instance can do write operations (is not read only). */ public FSTableDescriptors(final Configuration conf) throws IOException { this(conf, FSUtils.getCurrentFileSystem(conf), FSUtils.getRootDir(conf)); }
/** * Deletes a table's directory from the file system if exists. Used in unit * tests. */ public static void deleteTableDescriptorIfExists(String tableName, Configuration conf) throws IOException { FileSystem fs = FSUtils.getCurrentFileSystem(conf); FileStatus status = getTableInfoPath(fs, FSUtils.getRootDir(conf), tableName); // The below deleteDirectory works for either file or directory. if (status != null && fs.exists(status.getPath())) { FSUtils.deleteDirectory(fs, status.getPath()); } }
public void initialize(InputSplit split, Configuration conf) throws IOException { this.scan = TableMapReduceUtil.convertStringToScan(split.getScan()); this.split = split; HTableDescriptor htd = split.htd; HRegionInfo hri = this.split.getRegionInfo(); FileSystem fs = FSUtils.getCurrentFileSystem(conf); // region is immutable, this should be fine, // otherwise we have to set the thread read point scan.setIsolationLevel(IsolationLevel.READ_UNCOMMITTED); // disable caching of data blocks scan.setCacheBlocks(false); scanner = new ClientSideRegionScanner(conf, fs, new Path(split.restoreDir), htd, hri, scan, null); }