/** * Starts the hbase cluster up again after shutting it down previously in a * test. Use this if you want to keep dfs/zk up and just stop/start hbase. * @param servers number of region servers */ public void restartHBaseCluster(int servers) throws IOException, InterruptedException { this.restartHBaseCluster(servers, null); }
private void restartHBaseCluster() throws Exception { LOG.info("\n\nShutting down cluster"); TEST_UTIL.shutdownMiniHBaseCluster(); LOG.info("\n\nSleeping a bit"); Thread.sleep(2000); TEST_UTIL.restartHBaseCluster(NUM_SLAVES_BASE - 1); initialize(); } }
@Test public void testConnectionRideOverClusterRestart() throws IOException, InterruptedException { Configuration config = new Configuration(TEST_UTIL.getConfiguration()); final TableName tableName = TableName.valueOf(name.getMethodName()); TEST_UTIL.createTable(tableName, new byte[][] {FAM_NAM}).close(); Connection connection = ConnectionFactory.createConnection(config); Table table = connection.getTable(tableName); // this will cache the meta location and table's region location table.get(new Get(Bytes.toBytes("foo"))); // restart HBase TEST_UTIL.shutdownMiniHBaseCluster(); TEST_UTIL.restartHBaseCluster(2); // this should be able to discover new locations for meta and table's region table.get(new Get(Bytes.toBytes("foo"))); TEST_UTIL.deleteTable(tableName); table.close(); connection.close(); }
private void mimicSyncUpAfterPut() throws Exception { LOG.debug("mimicSyncUpAfterPut"); utility1.restartHBaseCluster(1); utility2.shutdownMiniHBaseCluster(); utility2.restartHBaseCluster(1); if (rowCount_ht1TargetAtPeer1 != 100 || rowCount_ht2TargetAtPeer1 != 200) { utility1.restartHBaseCluster(1); rowCount_ht1Source = utility1.countRows(ht1Source); LOG.debug("t1_syncup should have 102 rows at source, and it is " + rowCount_ht1Source);
utility2.restartHBaseCluster(1); if (rowCount_ht1TargetAtPeer1 != 50 || rowCount_ht2TargetAtPeer1 != 100) { utility1.restartHBaseCluster(1); rowCount_ht1Source = utility1.countRows(ht1Source); LOG.debug("t1_syncup should have 51 rows at source, and it is " + rowCount_ht1Source);
@Test public void testFlushedSequenceIdPersistLoad() throws Exception { Configuration conf = TEST_UTIL.getConfiguration(); int msgInterval = conf.getInt("hbase.regionserver.msginterval", 100); // insert some data into META TableName tableName = TableName.valueOf("testFlushSeqId"); HTableDescriptor desc = new HTableDescriptor(tableName); desc.addFamily(new HColumnDescriptor(Bytes.toBytes("cf"))); Table table = TEST_UTIL.createTable(desc, null); // flush META region TEST_UTIL.flush(TableName.META_TABLE_NAME); // wait for regionserver report Threads.sleep(msgInterval * 2); // record flush seqid before cluster shutdown Map<byte[], Long> regionMapBefore = TEST_UTIL.getHBaseCluster().getMaster().getServerManager() .getFlushedSequenceIdByRegion(); // restart hbase cluster which will cause flushed sequence id persist and reload TEST_UTIL.getMiniHBaseCluster().shutdown(); TEST_UTIL.restartHBaseCluster(2); TEST_UTIL.waitUntilNoRegionsInTransition(); // check equality after reloading flushed sequence id map Map<byte[], Long> regionMapAfter = TEST_UTIL.getHBaseCluster().getMaster().getServerManager() .getFlushedSequenceIdByRegion(); assertTrue(regionMapBefore.equals(regionMapAfter)); }
UTIL.getConfiguration().set(HConstants.HBASE_MASTER_LOADBALANCER_CLASS, FavoredStochasticBalancer.class.getName()); UTIL.restartHBaseCluster(SLAVES); while (!UTIL.getMiniHBaseCluster().getMaster().isInitialized()) { Threads.sleep(1);
utility2.restartHBaseCluster(1); if (rowCount_ht1TargetAtPeer1 != 200 || rowCount_ht2TargetAtPeer1 != 400) { utility1.restartHBaseCluster(1); rowCount_ht1Source = utility1.countRows(ht1Source); LOG.debug("t1_syncup should have 206 rows at source, and it is " + rowCount_ht1Source);
@Test public void testHBaseConnectionManager() throws Exception{ setColumnWidth(8); runHBaseSQLVerifyCount("SELECT\n" + "row_key\n" + "FROM\n" + " hbase.`[TABLE_NAME]` tableName", 8); /* * Simulate HBase connection close and ensure that the connection * will be reestablished automatically. */ storagePlugin.getConnection().close(); runHBaseSQLVerifyCount("SELECT\n" + "row_key\n" + "FROM\n" + " hbase.`[TABLE_NAME]` tableName", 8); /* * Simulate HBase cluster restart and ensure that running query against * HBase does not require Drill cluster restart. */ HBaseTestsSuite.getHBaseTestingUtility().shutdownMiniHBaseCluster(); HBaseTestsSuite.getHBaseTestingUtility().restartHBaseCluster(1); runHBaseSQLVerifyCount("SELECT\n" + "row_key\n" + "FROM\n" + " hbase.`[TABLE_NAME]` tableName", 8); }
UTIL.getHBaseCluster().waitUntilShutDown(); LOG.info("Starting cluster the second time"); UTIL.restartHBaseCluster(3, ports); UTIL.waitFor(10000, () -> UTIL.getHBaseCluster().getMaster().isInitialized()); serverNode = UTIL.getHBaseCluster().getMaster().getAssignmentManager().getRegionStates()
private void restartHBaseCluster() throws Exception { LOG.info("\n\nShutting down cluster"); TEST_UTIL.shutdownMiniHBaseCluster(); LOG.info("\n\nSleeping a bit"); Thread.sleep(2000); TEST_UTIL.restartHBaseCluster(NUM_SLAVES_BASE - 1); initialize(); } }
@Test public void testConnectionRideOverClusterRestart() throws IOException, InterruptedException { Configuration config = new Configuration(TEST_UTIL.getConfiguration()); final TableName tableName = TableName.valueOf(name.getMethodName()); TEST_UTIL.createTable(tableName, new byte[][] {FAM_NAM}).close(); Connection connection = ConnectionFactory.createConnection(config); Table table = connection.getTable(tableName); // this will cache the meta location and table's region location table.get(new Get(Bytes.toBytes("foo"))); // restart HBase TEST_UTIL.shutdownMiniHBaseCluster(); TEST_UTIL.restartHBaseCluster(2); // this should be able to discover new locations for meta and table's region table.get(new Get(Bytes.toBytes("foo"))); TEST_UTIL.deleteTable(tableName); table.close(); connection.close(); }
private void mimicSyncUpAfterPut() throws Exception { LOG.debug("mimicSyncUpAfterPut"); utility1.restartHBaseCluster(1); utility2.shutdownMiniHBaseCluster(); utility2.restartHBaseCluster(1); if (rowCount_ht1TargetAtPeer1 != 100 || rowCount_ht2TargetAtPeer1 != 200) { utility1.restartHBaseCluster(1); rowCount_ht1Source = utility1.countRows(ht1Source); LOG.debug("t1_syncup should have 102 rows at source, and it is " + rowCount_ht1Source);
utility2.restartHBaseCluster(1); if (rowCount_ht1TargetAtPeer1 != 50 || rowCount_ht2TargetAtPeer1 != 100) { utility1.restartHBaseCluster(1); rowCount_ht1Source = utility1.countRows(ht1Source); LOG.debug("t1_syncup should have 51 rows at source, and it is " + rowCount_ht1Source);
UTIL.getConfiguration().set(HConstants.HBASE_MASTER_LOADBALANCER_CLASS, FavoredStochasticBalancer.class.getName()); UTIL.restartHBaseCluster(SLAVES); while (!UTIL.getMiniHBaseCluster().getMaster().isInitialized()) { Threads.sleep(1);
utility2.restartHBaseCluster(1); if (rowCount_ht1TargetAtPeer1 != 200 || rowCount_ht2TargetAtPeer1 != 400) { utility1.restartHBaseCluster(1); rowCount_ht1Source = utility1.countRows(ht1Source); LOG.debug("t1_syncup should have 206 rows at source, and it is " + rowCount_ht1Source);
utility2.restartHBaseCluster(1); if (rowCount_ht1TargetAtPeer1 != 200 || rowCount_ht2TargetAtPeer1 != 400) { utility1.restartHBaseCluster(1); rowCount_ht1Source = utility1.countRows(ht1Source); LOG.debug("t1_syncup should have 206 rows at source, and it is " + rowCount_ht1Source);
HBaseTestsSuite.getHBaseTestingUtility().restartHBaseCluster(1); runHBaseSQLVerifyCount("SELECT\n" + "row_key\n"