/** * Close both the region {@code r} and it's underlying WAL. For use in tests. */ public static void closeRegionAndWAL(final Region r) throws IOException { closeRegionAndWAL((HRegion)r); }
protected void closeRootAndMeta() throws IOException { HBaseTestingUtility.closeRegionAndWAL(meta); }
@Override public void doWork() { try { startingPuts.await(); // Give some time for the batch mutate to get in. // We don't want to race with the mutate Thread.sleep(10); startingClose.countDown(); HBaseTestingUtility.closeRegionAndWAL(region); region = null; } catch (IOException e) { throw new RuntimeException(e); } catch (InterruptedException e) { throw new RuntimeException(e); } } };
@After public void tearDown() throws Exception { HBaseTestingUtility.closeRegionAndWAL(this.region); }
@After public void tearDown() throws Exception { HBaseTestingUtility.closeRegionAndWAL(region); }
@After public void tearDown() throws Exception { HBaseTestingUtility.closeRegionAndWAL(region); }
@After public void tearDown() throws IOException { counter.set(0); scanCompletedCounter.set(0); latch = new CountDownLatch(3); HBaseTestingUtility.closeRegionAndWAL(region); testUtil.cleanupTestDir(); }
@After public void tearDown() throws Exception { if (reader != null) { reader.close(); } if (primaryRegion != null) { HBaseTestingUtility.closeRegionAndWAL(primaryRegion); } if (secondaryRegion != null) { HBaseTestingUtility.closeRegionAndWAL(secondaryRegion); } EnvironmentEdgeManagerTestHelper.reset(); }
@After public void tearDown() throws Exception { HBaseTestingUtility.closeRegionAndWAL(region); testUtil.cleanupTestDir(); }
@After public void tearDown() throws IOException { if (region != null) { HBaseTestingUtility.closeRegionAndWAL(region); } // We have to re-set the lazy seek flag back to the default so that other // unit tests are not affected. StoreScanner.enableLazySeekGlobally( StoreScanner.LAZY_SEEK_ENABLED_BY_DEFAULT); }
@After public void tearDown() throws IOException { // Region may have been closed, but it is still no harm if we close it again here using HTU. HBaseTestingUtility.closeRegionAndWAL(region); EnvironmentEdgeManagerTestHelper.reset(); LOG.info("Cleaning test directory: " + TEST_UTIL.getDataTestDir()); TEST_UTIL.cleanupTestDir(); }
@Test public void testRegionObserverScanTimeStacking() throws Exception { byte[] ROW = Bytes.toBytes("testRow"); byte[] TABLE = Bytes.toBytes(getClass().getName()); byte[] A = Bytes.toBytes("A"); byte[][] FAMILIES = new byte[][] { A }; // Use new HTU to not overlap with the DFS cluster started in #CompactionStacking Configuration conf = new HBaseTestingUtility().getConfiguration(); HRegion region = initHRegion(TABLE, getClass().getName(), conf, FAMILIES); RegionCoprocessorHost h = region.getCoprocessorHost(); h.load(NoDataFromScan.class, Coprocessor.PRIORITY_HIGHEST, conf); h.load(EmptyRegionObsever.class, Coprocessor.PRIORITY_USER, conf); Put put = new Put(ROW); put.addColumn(A, A, A); region.put(put); Get get = new Get(ROW); Result r = region.get(get); assertNull( "Got an unexpected number of rows - no data should be returned with the NoDataFromScan coprocessor. Found: " + r, r.listCells()); HBaseTestingUtility.closeRegionAndWAL(region); }
/** * Tests to do a concurrent flush (using a 2nd thread) while scanning. This tests both * the StoreScanner update readers and the transition from memstore -> snapshot -> store file. * * @throws Exception */ @Test public void testScanAndRealConcurrentFlush() throws Exception { this.region = TEST_UTIL.createLocalHRegion(TESTTABLEDESC, null, null); Table hri = new RegionAsTable(region); try { LOG.info("Added: " + HBaseTestCase.addContent(hri, Bytes.toString(HConstants.CATALOG_FAMILY), Bytes.toString(HConstants.REGIONINFO_QUALIFIER))); int count = count(hri, -1, false); assertEquals(count, count(hri, 100, true)); // do a true concurrent background thread flush } catch (Exception e) { LOG.error("Failed", e); throw e; } finally { HBaseTestingUtility.closeRegionAndWAL(this.region); } }
/** * Tests to do a sync flush during the middle of a scan. This is testing the StoreScanner * update readers code essentially. This is not highly concurrent, since its all 1 thread. * HBase-910. * @throws Exception */ @Test public void testScanAndSyncFlush() throws Exception { this.region = TEST_UTIL.createLocalHRegion(TESTTABLEDESC, null, null); Table hri = new RegionAsTable(region); try { LOG.info("Added: " + HBaseTestCase.addContent(hri, Bytes.toString(HConstants.CATALOG_FAMILY), Bytes.toString(HConstants.REGIONINFO_QUALIFIER))); int count = count(hri, -1, false); assertEquals(count, count(hri, 100, false)); // do a sync flush. } catch (Exception e) { LOG.error("Failed", e); throw e; } finally { HBaseTestingUtility.closeRegionAndWAL(this.region); } }
@Test public void testRegionObserverFlushTimeStacking() throws Exception { byte[] ROW = Bytes.toBytes("testRow"); byte[] TABLE = Bytes.toBytes(getClass().getName()); byte[] A = Bytes.toBytes("A"); byte[][] FAMILIES = new byte[][] { A }; // Use new HTU to not overlap with the DFS cluster started in #CompactionStacking Configuration conf = new HBaseTestingUtility().getConfiguration(); HRegion region = initHRegion(TABLE, getClass().getName(), conf, FAMILIES); RegionCoprocessorHost h = region.getCoprocessorHost(); h.load(NoDataFromFlush.class, Coprocessor.PRIORITY_HIGHEST, conf); h.load(EmptyRegionObsever.class, Coprocessor.PRIORITY_USER, conf); // put a row and flush it to disk Put put = new Put(ROW); put.addColumn(A, A, A); region.put(put); region.flush(true); Get get = new Get(ROW); Result r = region.get(get); assertNull( "Got an unexpected number of rows - no data should be returned with the NoDataFromScan coprocessor. Found: " + r, r.listCells()); HBaseTestingUtility.closeRegionAndWAL(region); }
@Test public void testFilters() throws IOException { try { this.region = TEST_UTIL.createLocalHRegion(TESTTABLEDESC, null, null); HBaseTestCase.addContent(this.region, HConstants.CATALOG_FAMILY); byte [] prefix = Bytes.toBytes("ab"); Filter newFilter = new PrefixFilter(prefix); Scan scan = new Scan(); scan.setFilter(newFilter); rowPrefixFilter(scan); byte[] stopRow = Bytes.toBytes("bbc"); newFilter = new WhileMatchFilter(new InclusiveStopFilter(stopRow)); scan = new Scan(); scan.setFilter(newFilter); rowInclusiveStopFilter(scan, stopRow); } finally { HBaseTestingUtility.closeRegionAndWAL(this.region); } }
@Test public void testThreeStoreFiles() throws IOException { region = TEST_UTIL.createTestRegion(TABLE_NAME, new HColumnDescriptor(FAMILY) .setCompressionType(Compression.Algorithm.GZ) .setBloomFilterType(bloomType) .setMaxVersions(TestMultiColumnScanner.MAX_VERSIONS)); createStoreFile(new int[] {1, 2, 6}); createStoreFile(new int[] {1, 2, 3, 7}); createStoreFile(new int[] {1, 9}); scanColSet(new int[]{1, 4, 6, 7}, new int[]{1, 6, 7}); HBaseTestingUtility.closeRegionAndWAL(region); }
@Test public void testLazySeekBlocksReadWithDelete() throws Exception { byte[] TABLE = Bytes.toBytes("testLazySeekBlocksReadWithDelete"); String FAMILY = "cf1"; Cell kvs[]; this.region = initHRegion(TABLE, testName.getMethodName(), conf, FAMILY); try { deleteFamily(FAMILY, "row", 200); for (int i = 0; i < 100; i++) { putData(FAMILY, "row", "col" + i, i); } putData(FAMILY, "row", "col99", 201); region.flush(true); kvs = getData(FAMILY, "row", Arrays.asList("col0"), 2); assertEquals(0, kvs.length); kvs = getData(FAMILY, "row", Arrays.asList("col99"), 2); assertEquals(1, kvs.length); verifyData(kvs[0], "row", "col99", 201); } finally { HBaseTestingUtility.closeRegionAndWAL(this.region); this.region = null; } }
/** * The ExplicitColumnTracker does not support "raw" scanning. */ @Test public void testRawScanWithColumns() throws Exception { HTableDescriptor htd = hbu.createTableDescriptor(name.getMethodName(), 0, 3, HConstants.FOREVER, KeepDeletedCells.TRUE); Region region = hbu.createLocalHRegion(htd, null, null); Scan s = new Scan(); s.setRaw(true); s.setMaxVersions(); s.addColumn(c0, c0); try { region.getScanner(s); fail("raw scanner with columns should have failed"); } catch (org.apache.hadoop.hbase.DoNotRetryIOException dnre) { // ok! } HBaseTestingUtility.closeRegionAndWAL(region); }
@Test public void testReadAndWriteRegionInfoFile() throws IOException, InterruptedException { HBaseTestingUtility htu = new HBaseTestingUtility(); RegionInfo ri = RegionInfoBuilder.FIRST_META_REGIONINFO; Path basedir = htu.getDataTestDir(); // Create a region. That'll write the .regioninfo file. FSTableDescriptors fsTableDescriptors = new FSTableDescriptors(htu.getConfiguration()); HRegion r = HBaseTestingUtility.createRegionAndWAL(convert(ri), basedir, htu.getConfiguration(), fsTableDescriptors.get(TableName.META_TABLE_NAME)); // Get modtime on the file. long modtime = getModTime(r); HBaseTestingUtility.closeRegionAndWAL(r); Thread.sleep(1001); r = HRegion.openHRegion(basedir, convert(ri), fsTableDescriptors.get(TableName.META_TABLE_NAME), null, htu.getConfiguration()); // Ensure the file is not written for a second time. long modtime2 = getModTime(r); assertEquals(modtime, modtime2); // Now load the file. RegionInfo deserializedRi = HRegionFileSystem.loadRegionInfoFileContent( r.getRegionFileSystem().getFileSystem(), r.getRegionFileSystem().getRegionDir()); HBaseTestingUtility.closeRegionAndWAL(r); }