TableIntegrityErrorHandler handler = tInfo.new IntegrityFixSuggester(tInfo, errors); if (!tInfo.checkRegionChain(handler)) { errors.report("Found inconsistency in table " + tInfo.getName());
TableIntegrityErrorHandler handler = tInfo.new IntegrityFixSuggester(tInfo, errors); if (!tInfo.checkRegionChain(handler)) { errors.report("Found inconsistency in table " + tInfo.getName());
TableIntegrityErrorHandler handler = tInfo.new IntegrityFixSuggester(tInfo, errors); if (!tInfo.checkRegionChain(handler)) { errors.report("Found inconsistency in table " + tInfo.getName());
private SortedMap<TableName, TableInfo> checkHdfsIntegrity(boolean fixHoles, boolean fixOverlaps) throws IOException { LOG.info("Checking HBase region split map from HDFS data..."); logParallelMerge(); for (TableInfo tInfo : tablesInfo.values()) { TableIntegrityErrorHandler handler; if (fixHoles || fixOverlaps) { handler = tInfo.new HDFSIntegrityFixer(tInfo, errors, getConf(), fixHoles, fixOverlaps); } else { handler = tInfo.new IntegrityFixSuggester(tInfo, errors); } if (!tInfo.checkRegionChain(handler)) { // should dump info as well. errors.report("Found inconsistency in table " + tInfo.getName()); } } return tablesInfo; }
private SortedMap<String, TableInfo> checkHdfsIntegrity(boolean fixHoles, boolean fixOverlaps) throws IOException { LOG.info("Checking HBase region split map from HDFS data..."); for (TableInfo tInfo : tablesInfo.values()) { TableIntegrityErrorHandler handler; if (fixHoles || fixOverlaps) { handler = tInfo.new HDFSIntegrityFixer(tInfo, errors, getConf(), fixHoles, fixOverlaps); } else { handler = tInfo.new IntegrityFixSuggester(tInfo, errors); } if (!tInfo.checkRegionChain(handler)) { // should dump info as well. errors.report("Found inconsistency in table " + tInfo.getName()); } } return tablesInfo; }
IntegrityFixSuggester(TableInfo ti, ErrorReporter errors) { this.errors = errors; setTableInfo(ti); }
IntegrityFixSuggester(TableInfo ti, ErrorReporter errors) { this.errors = errors; setTableInfo(ti); }
@Override public void handleDegenerateRegion(HbckInfo hi) throws IOException{ errors.reportError(ERROR_CODE.DEGENERATE_REGION, "Region has the same start and end key.", getTableInfo(), hi); }
@Override public void handleOverlapInRegionChain(HbckInfo hi1, HbckInfo hi2) throws IOException{ errors.reportError(ERROR_CODE.OVERLAP_IN_REGION_CHAIN, "There is an overlap in the region chain.", getTableInfo(), hi1, hi2); }
@Override public void handleDuplicateStartKeys(HbckInfo r1, HbckInfo r2) throws IOException{ byte[] key = r1.getStartKey(); // dup start key errors.reportError(ERROR_CODE.DUPE_STARTKEYS, "Multiple regions have the same startkey: " + Bytes.toStringBinary(key), getTableInfo(), r1); errors.reportError(ERROR_CODE.DUPE_STARTKEYS, "Multiple regions have the same startkey: " + Bytes.toStringBinary(key), getTableInfo(), r2); }
@Override public void handleRegionEndKeyNotEmpty(byte[] curEndKey) throws IOException { errors.reportError(ERROR_CODE.LAST_REGION_ENDKEY_NOT_EMPTY, "Last region should end with an empty key. You need to " + "create a new region and regioninfo in HDFS to plug the hole.", getTableInfo()); }
@Override public void handleRegionStartKeyNotEmpty(HbckInfo hi) throws IOException{ errors.reportError(ERROR_CODE.FIRST_REGION_STARTKEY_NOT_EMPTY, "First region should start with an empty key. You need to " + " create a new region and regioninfo in HDFS to plug the hole.", getTableInfo(), hi); }
/** * Suggest fixes for each table */ private void suggestFixes( SortedMap<TableName, TableInfo> tablesInfo) throws IOException { logParallelMerge(); for (TableInfo tInfo : tablesInfo.values()) { TableIntegrityErrorHandler handler = tInfo.new IntegrityFixSuggester(tInfo, errors); tInfo.checkRegionChain(handler); } }
@Override public void handleRegionStartKeyNotEmpty(HbckInfo hi) throws IOException{ errors.reportError(ERROR_CODE.FIRST_REGION_STARTKEY_NOT_EMPTY, "First region should start with an empty key. You need to " + " create a new region and regioninfo in HDFS to plug the hole.", getTableInfo(), hi); }
@Override public void handleRegionEndKeyNotEmpty(byte[] curEndKey) throws IOException { errors.reportError(ERROR_CODE.LAST_REGION_ENDKEY_NOT_EMPTY, "Last region should end with an empty key. You need to " + "create a new region and regioninfo in HDFS to plug the hole.", getTableInfo()); }
/** * Suggest fixes for each table */ private void suggestFixes(SortedMap<String, TableInfo> tablesInfo) throws IOException { for (TableInfo tInfo : tablesInfo.values()) { TableIntegrityErrorHandler handler = tInfo.new IntegrityFixSuggester(tInfo, errors); tInfo.checkRegionChain(handler); } }
@Override public void handleOverlapInRegionChain(HbckInfo hi1, HbckInfo hi2) throws IOException{ errors.reportError(ERROR_CODE.OVERLAP_IN_REGION_CHAIN, "There is an overlap in the region chain.", getTableInfo(), hi1, hi2); }
@Override public void handleDuplicateStartKeys(HbckInfo r1, HbckInfo r2) throws IOException{ byte[] key = r1.getStartKey(); // dup start key errors.reportError(ERROR_CODE.DUPE_STARTKEYS, "Multiple regions have the same startkey: " + Bytes.toStringBinary(key), getTableInfo(), r1); errors.reportError(ERROR_CODE.DUPE_STARTKEYS, "Multiple regions have the same startkey: " + Bytes.toStringBinary(key), getTableInfo(), r2); }
@Override public void handleDegenerateRegion(HbckInfo hi) throws IOException{ errors.reportError(ERROR_CODE.DEGENERATE_REGION, "Region has the same start and end key.", getTableInfo(), hi); }
private SortedMap<TableName, TableInfo> checkHdfsIntegrity(boolean fixHoles, boolean fixOverlaps) throws IOException { LOG.info("Checking HBase region split map from HDFS data..."); logParallelMerge(); for (TableInfo tInfo : tablesInfo.values()) { TableIntegrityErrorHandler handler; if (fixHoles || fixOverlaps) { handler = tInfo.new HDFSIntegrityFixer(tInfo, errors, getConf(), fixHoles, fixOverlaps); } else { handler = tInfo.new IntegrityFixSuggester(tInfo, errors); } if (!tInfo.checkRegionChain(handler)) { // should dump info as well. errors.report("Found inconsistency in table " + tInfo.getName()); } } return tablesInfo; }