Refine search
/** * @return Return the tmp dir this tool writes too. */ @VisibleForTesting public static Path getTmpDir(Configuration conf) throws IOException { return new Path(FSUtils.getRootDir(conf), HConstants.HBASE_TEMP_DIRECTORY); }
private Path getSidelineDir() throws IOException { if (sidelineDir == null) { Path hbaseDir = FSUtils.getRootDir(getConf()); Path hbckDir = new Path(hbaseDir, HConstants.HBCK_SIDELINEDIR_NAME); sidelineDir = new Path(hbckDir, hbaseDir.getName() + "-" + startMillis); } return sidelineDir; }
public void start() throws IOException { random = new SecureRandom(); userProvider = UserProvider.instantiate(conf); ugiReferenceCounter = new ConcurrentHashMap<>(); fs = FileSystem.get(conf); baseStagingDir = new Path(FSUtils.getRootDir(conf), HConstants.BULKLOAD_STAGING_DIR_NAME); if (conf.get("hbase.bulkload.staging.dir") != null) { LOG.warn("hbase.bulkload.staging.dir " + " is deprecated. Bulkload staging directory is " + baseStagingDir); } if (!fs.exists(baseStagingDir)) { fs.mkdirs(baseStagingDir, PERM_HIDDEN); } }
public static void main(String[] args) throws Exception { Configuration conf = HBaseConfiguration.create(); Path hbasedir = FSUtils.getRootDir(conf); URI defaultFs = hbasedir.getFileSystem(conf).getUri(); FSUtils.setFsDefault(conf, new Path(defaultFs)); int ret = ToolRunner.run(conf, new RestoreDriver(), args); System.exit(ret); }
public static void main(String[] args) throws Exception { Configuration conf = HBaseConfiguration.create(); Path hbasedir = FSUtils.getRootDir(conf); URI defaultFs = hbasedir.getFileSystem(conf).getUri(); FSUtils.setFsDefault(conf, new Path(defaultFs)); int ret = ToolRunner.run(conf, new BackupDriver(), args); System.exit(ret); }
/** * Main program * * @param args * @throws Exception */ public static void main(String[] args) throws Exception { // create a fsck object Configuration conf = HBaseConfiguration.create(); Path hbasedir = FSUtils.getRootDir(conf); URI defaultFs = hbasedir.getFileSystem(conf).getUri(); FSUtils.setFsDefault(conf, new Path(defaultFs)); int ret = ToolRunner.run(new HBaseFsckTool(conf), args); System.exit(ret); }
/** * Given a path, generates a new path to where we move a corrupted hfile (bad * trailer, no trailer). * * @param hFile * Path to a corrupt hfile (assumes that it is HBASE_DIR/ table * /region/cf/file) * @return path to where corrupted files are stored. This should be * HBASE_DIR/.corrupt/table/region/cf/file. */ Path createQuarantinePath(Path hFile) throws IOException { // extract the normal dirs structure Path cfDir = hFile.getParent(); Path regionDir = cfDir.getParent(); Path tableDir = regionDir.getParent(); // build up the corrupted dirs structure Path corruptBaseDir = new Path(FSUtils.getRootDir(conf), HConstants.CORRUPT_DIR_NAME); if (conf.get("hbase.hfile.quarantine.dir") != null) { LOG.warn("hbase.hfile.quarantine.dir is deprecated. Default to " + corruptBaseDir); } Path corruptTableDir = new Path(corruptBaseDir, tableDir.getName()); Path corruptRegionDir = new Path(corruptTableDir, regionDir.getName()); Path corruptFamilyDir = new Path(corruptRegionDir, cfDir.getName()); Path corruptHfile = new Path(corruptFamilyDir, hFile.getName()); return corruptHfile; }
/** * @param conf * @param tableName * @return A Pair where first item is table dir and second is the split file. * @throws IOException if a remote or network exception occurs */ private static Pair<Path, Path> getTableDirAndSplitFile(final Configuration conf, final TableName tableName) throws IOException { Path hbDir = FSUtils.getRootDir(conf); Path tableDir = FSUtils.getTableDir(hbDir, tableName); Path splitFile = new Path(tableDir, "_balancedSplit"); return new Pair<>(tableDir, splitFile); }
Path rootDir = FSUtils.getRootDir(conf); FileSystem fs = rootDir.getFileSystem(conf); restoreDir = new Path(restoreDir, UUID.randomUUID().toString());
private void restoreSnapshotForPeerCluster(Configuration conf, String peerQuorumAddress) throws IOException { Configuration peerConf = HBaseConfiguration.createClusterConf(conf, peerQuorumAddress, PEER_CONFIG_PREFIX); FileSystem.setDefaultUri(peerConf, peerFSAddress); FSUtils.setRootDir(peerConf, new Path(peerFSAddress, peerHBaseRootAddress)); FileSystem fs = FileSystem.get(peerConf); RestoreSnapshotHelper.copySnapshotForScanner(peerConf, fs, FSUtils.getRootDir(peerConf), new Path(peerFSAddress, peerSnapshotTmpDir), peerSnapshotName); }
/** * Get the archived WAL file path * @param path - active WAL file path * @param conf - configuration * @return archived path if exists, path - otherwise * @throws IOException exception */ public static Path getArchivedLogPath(Path path, Configuration conf) throws IOException { Path rootDir = FSUtils.getRootDir(conf); Path oldLogDir = new Path(rootDir, HConstants.HREGION_OLDLOGDIR_NAME); if (conf.getBoolean(SEPARATE_OLDLOGDIR, DEFAULT_SEPARATE_OLDLOGDIR)) { ServerName serverName = getServerNameFromWALDirectoryName(path); if (serverName == null) { LOG.error("Couldn't locate log: " + path); return path; } oldLogDir = new Path(oldLogDir, serverName.getServerName()); } Path archivedLogLocation = new Path(oldLogDir, path.getName()); final FileSystem fs = FSUtils.getCurrentFileSystem(conf); if (fs.exists(archivedLogLocation)) { LOG.info("Log " + path + " was moved to " + archivedLogLocation); return archivedLogLocation; } else { LOG.error("Couldn't locate log: " + path); return path; } }
/** * Delete the region directory if exists. * @param conf * @param hri * @return True if deleted the region directory. * @throws IOException */ public static boolean deleteRegionDir(final Configuration conf, final HRegionInfo hri) throws IOException { Path rootDir = getRootDir(conf); FileSystem fs = rootDir.getFileSystem(conf); return deleteDirectory(fs, new Path(getTableDir(rootDir, hri.getTable()), hri.getEncodedName())); }
public HFileReplicator(Configuration sourceClusterConf, String sourceBaseNamespaceDirPath, String sourceHFileArchiveDirPath, Map<String, List<Pair<byte[], List<String>>>> tableQueueMap, Configuration conf, Connection connection) throws IOException { this.sourceClusterConf = sourceClusterConf; this.sourceBaseNamespaceDirPath = sourceBaseNamespaceDirPath; this.sourceHFileArchiveDirPath = sourceHFileArchiveDirPath; this.bulkLoadHFileMap = tableQueueMap; this.conf = conf; this.connection = connection; userProvider = UserProvider.instantiate(conf); fsDelegationToken = new FsDelegationToken(userProvider, "renewer"); this.hbaseStagingDir = new Path(FSUtils.getRootDir(conf), HConstants.BULKLOAD_STAGING_DIR_NAME); this.maxCopyThreads = this.conf.getInt(REPLICATION_BULKLOAD_COPY_MAXTHREADS_KEY, REPLICATION_BULKLOAD_COPY_MAXTHREADS_DEFAULT); ThreadFactoryBuilder builder = new ThreadFactoryBuilder(); builder.setNameFormat("HFileReplicationCallable-%1$d"); this.exec = new ThreadPoolExecutor(maxCopyThreads, maxCopyThreads, 60, TimeUnit.SECONDS, new LinkedBlockingQueue<>(), builder.build()); this.exec.allowCoreThreadTimeOut(true); this.copiesPerThread = conf.getInt(REPLICATION_BULKLOAD_COPY_HFILES_PERTHREAD_KEY, REPLICATION_BULKLOAD_COPY_HFILES_PERTHREAD_DEFAULT); sinkFs = FileSystem.get(conf); }
public static List<InputSplit> getSplits(Configuration conf) throws IOException { String snapshotName = getSnapshotName(conf); Path rootDir = FSUtils.getRootDir(conf); FileSystem fs = rootDir.getFileSystem(conf); SnapshotManifest manifest = getSnapshotManifest(conf, snapshotName, rootDir, fs); List<HRegionInfo> regionInfos = getRegionInfosFromManifest(manifest); // TODO: mapred does not support scan as input API. Work around for now. Scan scan = extractScanFromConf(conf); // the temp dir where the snapshot is restored Path restoreDir = new Path(conf.get(RESTORE_DIR_KEY)); RegionSplitter.SplitAlgorithm splitAlgo = getSplitAlgo(conf); int numSplits = conf.getInt(NUM_SPLITS_PER_REGION, 1); return getSplits(scan, manifest, regionInfos, restoreDir, conf, splitAlgo, numSplits); }
public MasterFileSystem(Configuration conf) throws IOException { this.conf = conf; // Set filesystem to be that of this.rootdir else we get complaints about // mismatched filesystems if hbase.rootdir is hdfs and fs.defaultFS is // default localfs. Presumption is that rootdir is fully-qualified before // we get to here with appropriate fs scheme. this.rootdir = FSUtils.getRootDir(conf); this.tempdir = new Path(this.rootdir, HConstants.HBASE_TEMP_DIRECTORY); // Cover both bases, the old way of setting default fs and the new. // We're supposed to run on 0.20 and 0.21 anyways. this.fs = this.rootdir.getFileSystem(conf); this.walRootDir = FSUtils.getWALRootDir(conf); this.walFs = FSUtils.getWALFileSystem(conf); FSUtils.setFsDefault(conf, new Path(this.walFs.getUri())); walFs.setConf(conf); FSUtils.setFsDefault(conf, new Path(this.fs.getUri())); // make sure the fs has the same conf fs.setConf(conf); this.secureRootSubDirPerms = new FsPermission(conf.get("hbase.rootdir.perms", "700")); this.isSecurityEnabled = "kerberos".equalsIgnoreCase(conf.get("hbase.security.authentication")); // setup the filesystem variable createInitialFileSystemLayout(); HFileSystem.addLocationsOrderInterceptor(conf); }
private void init(String tableName) throws Exception { fs = FileSystem.get(conf); Path testDir = FSUtils.getRootDir(conf); Path mobTestDir = new Path(testDir, MobConstants.MOB_DIR_NAME); basePath = new Path(new Path(mobTestDir, tableName), family); mobSuffix = TEST_UTIL.getRandomUUID().toString().replaceAll("-", ""); delSuffix = TEST_UTIL.getRandomUUID().toString().replaceAll("-", "") + "_del"; allFiles.clear(); mobFiles.clear(); delFiles.clear(); }
if (remoteDir != null) { URI defaultFs = remoteDir.getFileSystem(conf).getUri(); FSUtils.setFsDefault(conf, new Path(defaultFs)); FSUtils.setRootDir(conf, remoteDir); rootDir = FSUtils.getRootDir(conf); fs = FileSystem.get(rootDir.toUri(), conf); LOG.debug("fs=" + fs.getUri().toString() + " root=" + rootDir);
Path rootDir = FSUtils.getRootDir(conf); FileSystem fs = rootDir.getFileSystem(conf); Path target = new Path(backupInfo.getTableBackupDir(table)); FileSystem targetFs = target.getFileSystem(conf); FSTableDescriptors descriptors = new FSTableDescriptors(conf, targetFs, FSUtils.getRootDir(conf)); descriptors.createTableDescriptorForTableDirectory(target, orig, false); LOG.debug("Attempting to copy table info for:" + table + " target: " + target for (RegionInfo regionInfo : regions) { Path regionDir = HRegion.getRegionDir(new Path(backupInfo.getTableBackupDir(table)), regionInfo); regionDir = new Path(backupInfo.getTableBackupDir(table), regionDir.getName()); writeRegioninfoOnFilesystem(conf, targetFs, regionDir, regionInfo);
new Path(FSUtils.getRootDir(getConf()), HConstants.HBASE_TEMP_DIRECTORY), filePath); if (fs.exists(hfilePath)) { return false; return false; hfilePath = HFileLink.getHFileFromBackReference(FSUtils.getRootDir(getConf()), filePath); return !fs.exists(hfilePath); } catch (IOException e) {
/** * We don't have an easy way to verify that a flush completed, so we loop until we find a * legitimate hfile and return it. * @param fs * @param table * @return Path of a flushed hfile. * @throws IOException */ Path getFlushedHFile(FileSystem fs, TableName table) throws IOException { Path tableDir= FSUtils.getTableDir(FSUtils.getRootDir(conf), table); Path regionDir = FSUtils.getRegionDirs(fs, tableDir).get(0); Path famDir = new Path(regionDir, FAM_STR); // keep doing this until we get a legit hfile while (true) { FileStatus[] hfFss = fs.listStatus(famDir); if (hfFss.length == 0) { continue; } for (FileStatus hfs : hfFss) { if (!hfs.isDirectory()) { return hfs.getPath(); } } } }