Refine search
@Override public int run(String[] args) throws Exception { if (args.length != 1) { String message = "When there are multiple HBase clusters are sharing a common root dir, " + "especially for read replica cluster (see detail in HBASE-18477), please consider to " + "use this tool manually sync the flushed HFiles from the source cluster."; message += "\nUsage: " + this.getClass().getName() + " tableName"; System.out.println(message); return -1; } final TableName tableName = TableName.valueOf(args[0]); try { refreshHFiles(tableName); } catch (Throwable t) { LOG.error("Refresh HFiles from table " + tableName.getNameAsString() + " failed: ", t); return -1; } return 0; }
@Test public void testLegalHTableNamesRegex() { for (String tn : legalTableNames) { TableName tName = TableName.valueOf(tn); assertTrue("Testing: '" + tn + "'", Pattern.matches(TableName.VALID_USER_TABLE_REGEX, tName.getNameAsString())); } }
@Test public void testLocalDistributedColumnSelect() throws Exception { String planName = "/hbase/hbase_scan_screen_physical_column_select.json"; runHBasePhysicalVerifyCount(planName, HBaseTestsSuite.TEST_TABLE_1.getNameAsString(), 3); }
@Test public void testLocalDistributedFamilySelect() throws Exception { String planName = "/hbase/hbase_scan_screen_physical_family_select.json"; runHBasePhysicalVerifyCount(planName, HBaseTestsSuite.TEST_TABLE_1.getNameAsString(), 4); }
@Test public void testCloneTableSchemaPreservingSplits() throws Exception { final TableName newTableName = TableName.valueOf(tableName.getNameAsString() + "_new"); testCloneTableSchema(tableName, newTableName, true); }
@Override public void addToBackupSet(String name, TableName[] tables) throws IOException { String[] tableNames = new String[tables.length]; try (final BackupSystemTable table = new BackupSystemTable(conn); final Admin admin = conn.getAdmin()) { for (int i = 0; i < tables.length; i++) { tableNames[i] = tables[i].getNameAsString(); if (!admin.tableExists(TableName.valueOf(tableNames[i]))) { throw new IOException("Cannot add " + tableNames[i] + " because it doesn't exist"); } } table.addToBackupSet(name, tableNames); LOG.info("Added tables [" + StringUtils.join(tableNames, " ") + "] to '" + name + "' backup set"); } }
@Test public void testLocalDistributed() throws Exception { String planName = "/hbase/hbase_scan_screen_physical.json"; runHBasePhysicalVerifyCount(planName, HBaseTestsSuite.TEST_TABLE_1.getNameAsString(), 8); }
@Test public void testLegalTableNamesRegex() { for (String tn : legalTableNames) { TableName tName = TableName.valueOf(tn); assertTrue("Testing: '" + tn + "'", Pattern.matches(TableName.VALID_USER_TABLE_REGEX, tName.getNameAsString())); } }
protected void runVerifyCommonTable(String outputDir, int numReducers, long expectedNumNodes, int index) throws Exception { LOG.info("Verifying common table with index " + index); sleep(SLEEP_IN_MS); Path outputPath = new Path(outputDir); UUID uuid = UUID.randomUUID(); // create a random UUID. Path iterationOutput = new Path(outputPath, uuid.toString()); Verify verify = new VisibilityVerify(TableName.valueOf(COMMON_TABLE_NAME).getNameAsString(), index); verify(numReducers, expectedNumNodes, iterationOutput, verify); }
@Test public void testSkipEmptyColumns() throws Exception { Path bulkOutputPath = new Path(util.getDataTestDirOnTestFS(tn.getNameAsString()), "hfiles"); args.put(ImportTsv.BULK_OUTPUT_CONF_KEY, bulkOutputPath.toString()); args.put(ImportTsv.COLUMNS_CONF_KEY, "HBASE_ROW_KEY,HBASE_TS_KEY,FAM:A,FAM:B"); args.put(ImportTsv.SEPARATOR_CONF_KEY, ","); args.put(ImportTsv.SKIP_EMPTY_COLUMNS, "true"); // 2 Rows of data as input. Both rows are valid and only 3 columns are no-empty among 4 String data = "KEY,1234,VALUE1,VALUE2\nKEY,1235,,VALUE2\n"; doMROnTableTest(util, tn, FAMILY, data, args, 1, 3); util.deleteTable(tn); }
@Test public void testCloneTableSchema() throws Exception { final TableName newTableName = TableName.valueOf(tableName.getNameAsString() + "_new"); testCloneTableSchema(tableName, newTableName, false); }
private SnapshotDescription toSnapshotDescription(ProcedureDescription desc) throws IOException { SnapshotDescription.Builder builder = SnapshotDescription.newBuilder(); if (!desc.hasInstance()) { throw new IOException("Snapshot name is not defined: " + desc.toString()); } String snapshotName = desc.getInstance(); List<NameStringPair> props = desc.getConfigurationList(); String table = null; for (NameStringPair prop : props) { if ("table".equalsIgnoreCase(prop.getName())) { table = prop.getValue(); } } if (table == null) { throw new IOException("Snapshot table is not defined: " + desc.toString()); } TableName tableName = TableName.valueOf(table); builder.setTable(tableName.getNameAsString()); builder.setName(snapshotName); builder.setType(SnapshotDescription.Type.FLUSH); return builder.build(); } }
@Test public void testExecProcedureWithRet() throws Exception { verifyAdminCheckForAction((admin) -> { // Using existing table instead of creating a new one. admin.execProcedureWithReturn("flush-table-proc", TableName.META_TABLE_NAME.getNameAsString(), new HashMap<>()); }); }
@Test public void testCloneSnapshot() throws Exception { byte[] FAMILY = Bytes.toBytes("test"); String snapshotName = tableName.getNameAsString() + "_snap"; TableName clonedTableName = TableName.valueOf(tableName.getNameAsString() + "_clone"); // create base table TEST_UTIL.createTable(tableName, FAMILY); // create snapshot admin.snapshot(snapshotName, tableName); // clone admin.cloneSnapshot(snapshotName, clonedTableName); }
private HRegion createHRegion(WALFactory wals, Durability durability) throws IOException { TableName tableName = TableName.valueOf(name.getMethodName().replaceAll("[^A-Za-z0-9-_]", "_")); TableDescriptor htd = TableDescriptorBuilder.newBuilder(tableName) .setColumnFamily(ColumnFamilyDescriptorBuilder.of(FAMILY)).build(); RegionInfo info = RegionInfoBuilder.newBuilder(tableName).build(); Path path = new Path(DIR, tableName.getNameAsString()); if (FS.exists(path)) { if (!FS.delete(path, true)) { throw new IOException("Failed delete of " + path); } } ChunkCreator.initialize(MemStoreLABImpl.CHUNK_SIZE_DEFAULT, false, 0, 0, 0, null); return HRegion.createHRegion(info, path, CONF, htd, wals.getWAL(info)); }
@Test public void testExecProcedure() throws Exception { verifyAdminCheckForAction((admin) -> { // Using existing table instead of creating a new one. admin.execProcedure("flush-table-proc", TableName.META_TABLE_NAME.getNameAsString(), new HashMap<>()); }); }
@Test public void testCloneTableSchemaPreservingSplits() throws Exception { final TableName tableName = TableName.valueOf(name.getMethodName()); final TableName newTableName = TableName.valueOf(tableName.getNameAsString() + "_new"); testCloneTableSchema(tableName, newTableName, true); }
private static void setupTables() throws IOException { // Get the table name. tableName = TableName.valueOf(util.getConfiguration() .get("hbase.IntegrationTestMTTR.tableName", "IntegrationTestMTTR")); loadTableName = TableName.valueOf(util.getConfiguration() .get("hbase.IntegrationTestMTTR.loadTableName", "IntegrationTestMTTRLoadTestTool")); if (util.getAdmin().tableExists(tableName)) { util.deleteTable(tableName); } if (util.getAdmin().tableExists(loadTableName)) { util.deleteTable(loadTableName); } // Create the table. If this fails then fail everything. TableDescriptor tableDescriptor = util.getAdmin().getDescriptor(tableName); TableDescriptorBuilder builder = TableDescriptorBuilder.newBuilder(tableDescriptor); // Make the max file size huge so that splits don't happen during the test. builder.setMaxFileSize(Long.MAX_VALUE); ColumnFamilyDescriptorBuilder colDescriptorBldr = ColumnFamilyDescriptorBuilder.newBuilder(FAMILY); colDescriptorBldr.setMaxVersions(1); builder.setColumnFamily(colDescriptorBldr.build()); util.getAdmin().createTable(builder.build()); // Setup the table for LoadTestTool int ret = loadTool.run(new String[]{"-tn", loadTableName.getNameAsString(), "-init_only"}); assertEquals("Failed to initialize LoadTestTool", 0, ret); }
/** * Verify that full backup fails on tableset containing real and fake tables. * * @throws Exception if doing the full backup fails */ @Test(expected = IOException.class) public void testFullBackupMixExistAndDNE() throws Exception { LOG.info("create full backup fails on tableset containing real and fake table"); List<TableName> tables = toList(table1.getNameAsString(), "tabledne"); fullTableBackup(tables); } }
@Test public void testCloneTableSchema() throws Exception { final TableName tableName = TableName.valueOf(name.getMethodName()); final TableName newTableName = TableName.valueOf(tableName.getNameAsString() + "_new"); testCloneTableSchema(tableName, newTableName, false); }