@BeforeClass public static void setUpBeforeClass() throws Exception { TEST_UTIL.startMiniDFSCluster(1); FS = TEST_UTIL.getDFSCluster().getFileSystem(); Path rootDir = TEST_UTIL.createRootDir(); oldLogDir = new Path(rootDir, HConstants.HREGION_OLDLOGDIR_NAME); if (FS.exists(oldLogDir)) FS.delete(oldLogDir, true); logDir = new Path(rootDir, HConstants.HREGION_LOGDIR_NAME); if (FS.exists(logDir)) FS.delete(logDir, true); }
@After public void tearDown() throws Exception { try { wals.shutdown(); } catch (IOException exception) { // one of our tests splits out from under our wals. LOG.warn("Ignoring failure to close wal factory. " + exception.getMessage()); LOG.debug("details of failure to close wal factory.", exception); } TEST_UTIL.getDFSCluster().getFileSystem().delete(this.hbaseRootDir, true); TEST_UTIL.getDFSCluster().getFileSystem().delete(this.hbaseWALRootDir, true); }
@Before public void setUp() throws Exception { htu = new HBaseTestingUtility(); htu.getConfiguration().setInt("dfs.blocksize", 1024);// For the test with multiple blocks htu.getConfiguration().setInt("dfs.replication", 3); htu.startMiniDFSCluster(3, new String[]{"/r1", "/r2", "/r3"}, new String[]{host1, host2, host3}); conf = htu.getConfiguration(); cluster = htu.getDFSCluster(); dfs = (DistributedFileSystem) FileSystem.get(conf); }
@BeforeClass public static void setUp() throws Exception { UTIL.startMiniDFSCluster(3); FS = UTIL.getDFSCluster().getFileSystem(); }
@Before public void setUp() throws Exception { htu = new HBaseTestingUtility(); htu.getConfiguration().setInt("dfs.blocksize", 1024);// For the test with multiple blocks htu.getConfiguration().setInt("dfs.replication", 3); htu.startMiniDFSCluster(3, new String[]{"/r1", "/r2", "/r3"}, new String[]{host1, host2, host3}); conf = htu.getConfiguration(); cluster = htu.getDFSCluster(); dfs = (DistributedFileSystem) FileSystem.get(conf); }
@Before public void setUp() throws Exception { htu = new HBaseTestingUtility(); htu.getConfiguration().setInt("dfs.blocksize", 1024);// For the test with multiple blocks htu.getConfiguration().setInt("dfs.replication", 3); htu.startMiniDFSCluster(3, new String[]{"/r1", "/r2", "/r3"}, new String[]{host1, host2, host3}); conf = htu.getConfiguration(); cluster = htu.getDFSCluster(); dfs = (DistributedFileSystem) FileSystem.get(conf); }
@BeforeClass public static void setUpBeforeClass() throws Exception { Configuration conf = TEST_UTIL.getConfiguration(); // The below config supported by 0.20-append and CDH3b2 conf.setInt("dfs.client.block.recovery.retries", 2); TEST_UTIL.startMiniCluster(3); Path hbaseRootDir = TEST_UTIL.getDFSCluster().getFileSystem().makeQualified(new Path("/hbase")); LOG.info("hbase.rootdir=" + hbaseRootDir); FSUtils.setRootDir(conf, hbaseRootDir); }
@SuppressWarnings("JUnit4TearDownNotRun") public void tearDown() throws Exception { store.stop(false); UTIL.getDFSCluster().getFileSystem().delete(store.getWALDir(), true); try { UTIL.shutdownMiniCluster(); } catch (Exception e) { LOG.warn("failure shutting down cluster", e); } }
@BeforeClass public static void setUp() throws Exception { TEST_UTIL.getConfiguration().setInt(DFS_CLIENT_SOCKET_TIMEOUT_KEY, READ_TIMEOUT_MS); TEST_UTIL.startMiniDFSCluster(3); FS = TEST_UTIL.getDFSCluster().getFileSystem(); EVENT_LOOP_GROUP = new NioEventLoopGroup(); CHANNEL_CLASS = NioSocketChannel.class; }
@Test public void testStreamCreate() throws IOException { try (FSDataOutputStream out = CommonFSUtils.createForWal(util.getDFSCluster().getFileSystem(), new Path("/testStreamCreate"), true)) { assertTrue(CommonFSUtils.hasCapability(out, HFLUSH)); } }
@BeforeClass public static void startCluster() throws Exception { UTIL.startMiniDFSCluster(1); fs = UTIL.getDFSCluster().getFileSystem(); rootDir = UTIL.getDefaultRootDirPath(); }
@After public void tearDown() throws Exception { this.wals.close(); TEST_UTIL.getDFSCluster().getFileSystem().delete(this.hbaseRootDir, true); }
@Before public void setUp() throws Exception { fs = TEST_UTIL.getDFSCluster().getFileSystem(); dir = new Path(TEST_UTIL.createRootDir(), currentTest.getMethodName()); wals = new WALFactory(TEST_UTIL.getConfiguration(), currentTest.getMethodName()); }
private HRegionFileSystem getHRegionFS(HTable table, Configuration conf) throws IOException { FileSystem fs = TEST_UTIL.getDFSCluster().getFileSystem(); Path tableDir = FSUtils.getTableDir(TEST_UTIL.getDefaultRootDirPath(), table.getName()); List<Path> regionDirs = FSUtils.getRegionDirs(fs, tableDir); assertEquals(1, regionDirs.size()); List<Path> familyDirs = FSUtils.getFamilyDirs(fs, regionDirs.get(0)); assertEquals(2, familyDirs.size()); RegionInfo hri = table.getRegionLocator().getAllRegionLocations().get(0).getRegionInfo(); HRegionFileSystem regionFs = new HRegionFileSystem(conf, new HFileSystem(fs), tableDir, hri); return regionFs; }
@BeforeClass public static void setUpBeforeClass() throws Exception { CONF = TEST_UTIL.getConfiguration(); TEST_UTIL.startMiniDFSCluster(1); CLUSTER = TEST_UTIL.getDFSCluster(); FS = CLUSTER.getFileSystem(); DIR = TEST_UTIL.getDataTestDirOnTestFS("TestDurability"); FSUtils.setRootDir(CONF, DIR); }
/** * Setup the config for the cluster */ @BeforeClass public static void setupCluster() throws Exception { setupConf(UTIL.getConfiguration()); UTIL.startMiniCluster(NUM_RS); fs = UTIL.getDFSCluster().getFileSystem(); master = UTIL.getMiniHBaseCluster().getMaster(); rootDir = master.getMasterFileSystem().getRootDir(); archiveDir = new Path(rootDir, HConstants.HFILE_ARCHIVE_DIRECTORY); }
@Before public void startCluster() throws Exception { TEST_UTIL.startMiniCluster(SLAVES); TEST_UTIL.getDFSCluster().waitClusterUp(); TEST_UTIL.getHBaseCluster().waitForActiveAndReadyMaster(120*1000); cluster = TEST_UTIL.getMiniHBaseCluster(); admin = TEST_UTIL.getAdmin(); admin.setBalancerRunning(false, true); }
@Before public void startCluster() throws Exception { TEST_UTIL.startMiniCluster(SLAVES); TEST_UTIL.getDFSCluster().waitClusterUp(); cluster = TEST_UTIL.getMiniHBaseCluster(); master = TEST_UTIL.getMiniHBaseCluster().getMaster(); admin = TEST_UTIL.getAdmin(); admin.setBalancerRunning(false, true); }
/** * Setup the config for the cluster */ @BeforeClass public static void setupCluster() throws Exception { setupConf(TEST_UTIL.getConfiguration()); TEST_UTIL.startMiniCluster(NUM_RS); fs = TEST_UTIL.getDFSCluster().getFileSystem(); rootDir = TEST_UTIL.getMiniHBaseCluster().getMaster().getMasterFileSystem().getRootDir(); }
@Before public void setUp() throws Exception { // Use 2 DataNodes and default values for other StartMiniCluster options. TEST_UTIL.startMiniCluster(StartMiniClusterOption.builder().numDataNodes(2).build()); cluster = TEST_UTIL.getHBaseCluster(); dfsCluster = TEST_UTIL.getDFSCluster(); fs = TEST_UTIL.getTestFileSystem(); admin = TEST_UTIL.getAdmin(); // disable region rebalancing (interferes with log watching) cluster.getMaster().balanceSwitch(false); }