@Override public synchronized String toString() { StringBuilder sb = new StringBuilder(); sb.append("{ meta => "); sb.append((metaEntry != null)? metaEntry.getRegionNameAsString() : "null"); sb.append( ", hdfs => " + getHdfsRegionDir()); sb.append( ", deployed => " + Joiner.on(", ").join(deployedEntries)); sb.append( ", replicaId => " + getReplicaId()); sb.append(" }"); return sb.toString(); }
/** * To get the column family list according to the column family dirs * @param columns * @param hbi * @return a set of column families * @throws IOException */ private Set<String> getColumnFamilyList(Set<String> columns, HbckInfo hbi) throws IOException { Path regionDir = hbi.getHdfsRegionDir(); FileSystem fs = regionDir.getFileSystem(getConf()); FileStatus[] subDirs = fs.listStatus(regionDir, new FSUtils.FamilyDirFilter(fs)); for (FileStatus subdir : subDirs) { String columnfamily = subdir.getPath().getName(); columns.add(columnfamily); } return columns; }
HdfsEntry he = new HdfsEntry(); synchronized (hbi) { if (hbi.getHdfsRegionDir() != null) { errors.print("Directory " + encodedName + " duplicate??" + hbi.getHdfsRegionDir());
/** * Read the .regioninfo file from the file system. If there is no * .regioninfo, add it to the orphan hdfs region list. */ private void loadHdfsRegioninfo(HbckInfo hbi) throws IOException { Path regionDir = hbi.getHdfsRegionDir(); if (regionDir == null) { LOG.warn("No HDFS region dir found: " + hbi + " meta=" + hbi.metaEntry); return; } if (hbi.hdfsEntry.hri != null) { // already loaded data return; } FileSystem fs = FileSystem.get(getConf()); HRegionInfo hri = HRegionFileSystem.loadRegionInfoFileContent(fs, regionDir); LOG.debug("HRegionInfo read: " + hri.toString()); hbi.hdfsEntry.hri = hri; }
/** * To get the column family list according to the column family dirs * @param columns * @param hbi * @return * @throws IOException */ private Set<String> getColumnFamilyList(Set<String> columns, HbckInfo hbi) throws IOException { Path regionDir = hbi.getHdfsRegionDir(); FileSystem fs = regionDir.getFileSystem(getConf()); FileStatus[] subDirs = fs.listStatus(regionDir, new FSUtils.FamilyDirFilter(fs)); for (FileStatus subdir : subDirs) { String columnfamily = subdir.getPath().getName(); columns.add(columnfamily); } return columns; }
/** * To get the column family list according to the column family dirs * @param columns * @param hbi * @return a set of column families * @throws IOException */ private Set<String> getColumnFamilyList(Set<String> columns, HbckInfo hbi) throws IOException { Path regionDir = hbi.getHdfsRegionDir(); FileSystem fs = regionDir.getFileSystem(getConf()); FileStatus[] subDirs = fs.listStatus(regionDir, new FSUtils.FamilyDirFilter(fs)); for (FileStatus subdir : subDirs) { String columnfamily = subdir.getPath().getName(); columns.add(columnfamily); } return columns; }
/** * Iterates through the list of all orphan/invalid regiondirs. */ private void adoptHdfsOrphans(Collection<HbckInfo> orphanHdfsDirs) throws IOException { for (HbckInfo hi : orphanHdfsDirs) { LOG.info("Attempting to handle orphan hdfs dir: " + hi.getHdfsRegionDir()); adoptHdfsOrphan(hi); } }
@Override public synchronized String toString() { StringBuilder sb = new StringBuilder(); sb.append("{ meta => "); sb.append((metaEntry != null)? metaEntry.getRegionNameAsString() : "null"); sb.append( ", hdfs => " + getHdfsRegionDir()); sb.append( ", deployed => " + Joiner.on(", ").join(deployedEntries)); sb.append( ", replicaId => " + getReplicaId()); sb.append(" }"); return sb.toString(); }
/** * Iterates through the list of all orphan/invalid regiondirs. */ private void adoptHdfsOrphans(Collection<HbckInfo> orphanHdfsDirs) throws IOException { for (HbckInfo hi : orphanHdfsDirs) { LOG.info("Attempting to handle orphan hdfs dir: " + hi.getHdfsRegionDir()); adoptHdfsOrphan(hi); } }
public synchronized String toString() { StringBuilder sb = new StringBuilder(); sb.append("{ meta => "); sb.append((metaEntry != null)? metaEntry.getRegionNameAsString() : "null"); sb.append( ", hdfs => " + getHdfsRegionDir()); sb.append( ", deployed => " + Joiner.on(", ").join(deployedEntries)); sb.append(" }"); return sb.toString(); }
boolean inMeta = hbi.metaEntry != null; boolean inHdfs = !shouldCheckHdfs() || hbi.getHdfsRegionDir() != null; boolean hasMetaAssignment = inMeta && hbi.metaEntry.regionServer != null; boolean isDeployed = !hbi.deployedOn.isEmpty(); && Bytes.compareTo(region.getStartKey(), hri.getEndKey()) <= 0) { if(region.isSplit() || region.isOffline()) continue; Path regionDir = hbi.getHdfsRegionDir(); FileSystem fs = regionDir.getFileSystem(getConf()); List<Path> familyDirs = FSUtils.getFamilyDirs(fs, regionDir); errors.reportError(ERROR_CODE.NOT_IN_META, "Region " + descriptiveName + " not in META, but deployed on " + Joiner.on(", ").join(hbi.deployedOn)); debugLsr(hbi.getHdfsRegionDir()); if (hbi.getReplicaId() != RegionInfo.DEFAULT_REPLICA_ID) {
Path p = hi.getHdfsRegionDir(); FileSystem fs = p.getFileSystem(getConf()); FileStatus[] dirs = fs.listStatus(p);
LOG.info("[" + thread + "] Sidelined parent region dir "+ parent.getHdfsRegionDir() + " into " + getSidelineDir()); debugLsr(parent.getHdfsRegionDir());
String parentDir, HbckInfo hi) throws IOException { TableName tableName = hi.getTableName(); Path regionDir = hi.getHdfsRegionDir();
Path p = hbi.getHdfsRegionDir(); if (p == null) { errors.report("No regioninfo in Meta or HDFS. " + hbi);
String thread = Thread.currentThread().getName(); LOG.debug("[" + thread + "] Contained region dir after close and pause"); debugLsr(contained.getHdfsRegionDir()); FileStatus[] dirs = null; try { dirs = fs.listStatus(contained.getHdfsRegionDir()); } catch (FileNotFoundException fnfe) { if (!fs.exists(contained.getHdfsRegionDir())) { LOG.warn("[" + thread + "] HDFS region dir " + contained.getHdfsRegionDir() + " is missing. Assuming already sidelined or moved."); } else { if (!fs.exists(contained.getHdfsRegionDir())) { LOG.warn("[" + thread + "] HDFS region dir " + contained.getHdfsRegionDir() + " already sidelined."); } else { LOG.info("[" + thread + "] Sidelined region dir "+ contained.getHdfsRegionDir() + " into " + getSidelineDir()); debugLsr(contained.getHdfsRegionDir());
debugLsr(hi.getHdfsRegionDir()); try { LOG.info("[" + thread + "] Closing region: " + hi);
@Override public synchronized Void call() throws IOException { // only load entries that haven't been loaded yet. if (hbi.getHdfsHRI() == null) { try { errors.progress(); hbck.loadHdfsRegioninfo(hbi); } catch (IOException ioe) { String msg = "Orphan region in HDFS: Unable to load .regioninfo from table " + hbi.getTableName() + " in hdfs dir " + hbi.getHdfsRegionDir() + "! It may be an invalid format or version file. Treating as " + "an orphaned regiondir."; errors.reportError(ERROR_CODE.ORPHAN_HDFS_REGION, msg); try { hbck.debugLsr(hbi.getHdfsRegionDir()); } catch (IOException ioe2) { LOG.error("Unable to read directory " + hbi.getHdfsRegionDir(), ioe2); throw ioe2; } hbck.orphanHdfsDirs.add(hbi); throw ioe; } } return null; } }
/** * Read the .regioninfo file from the file system. If there is no * .regioninfo, add it to the orphan hdfs region list. */ private void loadHdfsRegioninfo(HbckInfo hbi) throws IOException { Path regionDir = hbi.getHdfsRegionDir(); if (regionDir == null) { if (hbi.getReplicaId() == RegionInfo.DEFAULT_REPLICA_ID) { // Log warning only for default/ primary replica with no region dir LOG.warn("No HDFS region dir found: " + hbi + " meta=" + hbi.metaEntry); } return; } if (hbi.hdfsEntry.hri != null) { // already loaded data return; } FileSystem fs = FileSystem.get(getConf()); RegionInfo hri = HRegionFileSystem.loadRegionInfoFileContent(fs, regionDir); LOG.debug("RegionInfo read: " + hri.toString()); hbi.hdfsEntry.hri = hri; }
/** * Iterates through the list of all orphan/invalid regiondirs. */ private void adoptHdfsOrphans(Collection<HbckInfo> orphanHdfsDirs) throws IOException { for (HbckInfo hi : orphanHdfsDirs) { LOG.info("Attempting to handle orphan hdfs dir: " + hi.getHdfsRegionDir()); adoptHdfsOrphan(hi); } }