/** * Returns a Path in the test filesystem, obtained from {@link #getTestFileSystem()} * to write temporary test data. Call this method after setting up the mini dfs cluster * if the test relies on it. * @return a unique path in the test filesystem * @param subdirName name of the subdir to create under the base test dir */ public Path getDataTestDirOnTestFS(final String subdirName) throws IOException { return new Path(getDataTestDirOnTestFS(), subdirName); }
private void setHBaseFsTmpDir() throws IOException { String hbaseFsTmpDirInString = this.conf.get("hbase.fs.tmp.dir"); if (hbaseFsTmpDirInString == null) { this.conf.set("hbase.fs.tmp.dir", getDataTestDirOnTestFS("hbase-staging").toString()); LOG.info("Setting hbase.fs.tmp.dir to " + this.conf.get("hbase.fs.tmp.dir")); } else { LOG.info("The hbase.fs.tmp.dir is set to " + hbaseFsTmpDirInString); } }
/** * Cleans a subdirectory under the test data directory on the test filesystem. * @return True if we removed child * @throws IOException */ public boolean cleanupDataTestDirOnTestFS(String subdirName) throws IOException { Path cpath = getDataTestDirOnTestFS(subdirName); return getTestFileSystem().delete(cpath, true); }
@Before public void setUp() throws Exception { rootRegionDir = UTILITY.getDataTestDirOnTestFS("TestMajorCompactionRequest"); regionStoreDir = new Path(rootRegionDir, FAMILY); }
private Path getPath(int index) throws IOException { String methodName = name.getMethodName().replaceAll("[^A-Za-z0-9_-]", "_"); return new Path(UTIL.getDataTestDirOnTestFS(), methodName + "-" + index); }
@Before public void setup() throws IOException { root = TEST_UTIL.getDataTestDirOnTestFS(); }
@Test public void testSkipEmptyColumns() throws Exception { Path bulkOutputPath = new Path(util.getDataTestDirOnTestFS(tn.getNameAsString()), "hfiles"); args.put(ImportTsv.BULK_OUTPUT_CONF_KEY, bulkOutputPath.toString()); args.put(ImportTsv.COLUMNS_CONF_KEY, "HBASE_ROW_KEY,HBASE_TS_KEY,FAM:A,FAM:B"); args.put(ImportTsv.SEPARATOR_CONF_KEY, ","); args.put(ImportTsv.SKIP_EMPTY_COLUMNS, "true"); // 2 Rows of data as input. Both rows are valid and only 3 columns are no-empty among 4 String data = "KEY,1234,VALUE1,VALUE2\nKEY,1235,,VALUE2\n"; doMROnTableTest(util, tn, FAMILY, data, args, 1, 3); util.deleteTable(tn); }
@Test public void testBulkOutputWithTsvImporterTextMapper() throws Exception { Path bulkOutputPath = new Path(util.getDataTestDirOnTestFS(tn.getNameAsString()),"hfiles"); args.put(ImportTsv.MAPPER_CONF_KEY, "org.apache.hadoop.hbase.mapreduce.TsvImporterTextMapper"); args.put(ImportTsv.BULK_OUTPUT_CONF_KEY, bulkOutputPath.toString()); String data = "KEY\u001bVALUE4\u001bVALUE8\n"; doMROnTableTest(data, 4); util.deleteTable(tn); }
@Test public void testDryModeWithBulkModeAndTableDoesNotExistsCreateTableSetToYes() throws Exception { // Prepare the arguments required for the test. Path hfiles = new Path(util.getDataTestDirOnTestFS(tn.getNameAsString()), "hfiles"); args.put(ImportTsv.BULK_OUTPUT_CONF_KEY, hfiles.toString()); args.put(ImportTsv.DRY_RUN_CONF_KEY, "true"); args.put(ImportTsv.CREATE_TABLE_CONF_KEY, "yes"); doMROnTableTest(null, 1); // Verify temporary table was deleted. exception.expect(TableNotFoundException.class); util.deleteTable(tn); }
@Test public void testBulkOutputWithoutAnExistingTable() throws Exception { // Prepare the arguments required for the test. Path hfiles = new Path(util.getDataTestDirOnTestFS(tn.getNameAsString()), "hfiles"); args.put(ImportTsv.BULK_OUTPUT_CONF_KEY, hfiles.toString()); doMROnTableTest(null, 3); util.deleteTable(tn); }
@BeforeClass public static void before() throws Exception { HTU.startMiniCluster(); HTU.getConfiguration().setLong(HConstants.TABLE_MAX_ROWSIZE_KEY, 10 * 1024 * 1024L); rootRegionDir = HTU.getDataTestDirOnTestFS("TestRowTooBig"); }
@Test public void testBulkOutputWithAnExistingTableNoStrictTrue() throws Exception { util.createTable(tn, FAMILY); // Prepare the arguments required for the test. Path hfiles = new Path(util.getDataTestDirOnTestFS(tn.getNameAsString()), "hfiles"); args.put(ImportTsv.BULK_OUTPUT_CONF_KEY, hfiles.toString()); args.put(ImportTsv.NO_STRICT_COL_FAMILY, "true"); doMROnTableTest(null, 3); util.deleteTable(tn); }
@Test public void testDryModeWithBulkOutputAndTableExists() throws Exception { util.createTable(tn, FAMILY); // Prepare the arguments required for the test. Path hfiles = new Path(util.getDataTestDirOnTestFS(tn.getNameAsString()), "hfiles"); args.put(ImportTsv.BULK_OUTPUT_CONF_KEY, hfiles.toString()); args.put(ImportTsv.DRY_RUN_CONF_KEY, "true"); doMROnTableTest(null, 1); // Dry mode should not delete an existing table. If it's not present, // this will throw TableNotFoundException. util.deleteTable(tn); }
private Path buildBulkFiles(TableName table, int value) throws Exception { Path dir = util.getDataTestDirOnTestFS(table.getNameAsString()); Path bulk1 = new Path(dir, table.getNameAsString() + value); FileSystem fs = util.getTestFileSystem(); buildHFiles(fs, bulk1, value); return bulk1; }
@BeforeClass public static void setUpBeforeClass() throws Exception { UTIL.getConfiguration().setInt("replication.source.nb.capacity", 10); UTIL.getConfiguration().setLong("replication.sleep.before.failover", 1000); UTIL.getConfiguration().setLong("hbase.serial.replication.waiting.ms", 100); UTIL.startMiniCluster(3); // disable balancer UTIL.getAdmin().balancerSwitch(false, true); LOG_DIR = UTIL.getDataTestDirOnTestFS("replicated"); FS = UTIL.getTestFileSystem(); FS.mkdirs(LOG_DIR); }
@BeforeClass public static void setUpBeforeClass() throws Exception { EVENT_LOOP_GROUP = new NioEventLoopGroup(); CHANNEL_CLASS = NioSocketChannel.class; UTIL.startMiniDFSCluster(3); UTIL.getTestFileSystem().mkdirs(UTIL.getDataTestDirOnTestFS()); WALS = new WALFactory(UTIL.getConfiguration(), TestCombinedAsyncWriter.class.getSimpleName()); }
protected HRegion initHRegion(TableName tableName, byte[] startKey, byte[] stopKey, String callingMethod, Configuration conf, boolean isReadOnly, byte[]... families) throws IOException { Path logDir = TEST_UTIL.getDataTestDirOnTestFS(callingMethod + ".log"); ChunkCreator.initialize(MemStoreLABImpl.CHUNK_SIZE_DEFAULT, false, 0, 0, 0, null); HRegionInfo hri = new HRegionInfo(tableName, startKey, stopKey); final WAL wal = HBaseTestingUtility.createWal(conf, logDir, hri); return initHRegion(tableName, startKey, stopKey, isReadOnly, Durability.SYNC_WAL, wal, families); }
@Before public void setup() throws Exception { testUtil = new HBaseTestingUtility(); testUtil.startMiniDFSCluster(1); testDir = testUtil.getDataTestDirOnTestFS(); FSUtils.setRootDir(testUtil.getConfiguration(), testDir); }
@BeforeClass public static void setUpBeforeClass() throws Exception { CONF = TEST_UTIL.getConfiguration(); TEST_UTIL.startMiniDFSCluster(1); CLUSTER = TEST_UTIL.getDFSCluster(); FS = CLUSTER.getFileSystem(); DIR = TEST_UTIL.getDataTestDirOnTestFS("TestDurability"); FSUtils.setRootDir(CONF, DIR); }
@Before public void setup() throws ReplicationException, IOException { root = TEST_UTIL.getDataTestDirOnTestFS(); rp.getPeerStorage().addPeer(peerId, ReplicationPeerConfig.newBuilder().setClusterKey(TEST_UTIL.getClusterKey()).build(), true, SyncReplicationState.NONE); rq.addPeerToHFileRefs(peerId); }