@Override public boolean hasTooManyStoreFiles() { return getStorefilesCount() > this.blockingFileCount; }
@Override public boolean evaluate() throws Exception { return store.getStorefilesCount() == 1; }
@Override public String explainFailure() throws Exception { return "The store file count " + store.getStorefilesCount() + " is still greater than 1"; } });
public int countStoreFiles() { int count = 0; for (HStore store : stores.values()) { count += store.getStorefilesCount(); } return count; } }
private void waitForStoreFileCount(HStore store, int count, int timeout) throws InterruptedException { long start = System.currentTimeMillis(); while (start + timeout > System.currentTimeMillis() && store.getStorefilesCount() != count) { Thread.sleep(100); } System.out.println("start=" + start + ", now=" + System.currentTimeMillis() + ", cur=" + store.getStorefilesCount()); assertEquals(count, store.getStorefilesCount()); }
private void waitForStoreFileCount(HStore store, int count, int timeout) throws InterruptedException { long start = System.currentTimeMillis(); while (start + timeout > System.currentTimeMillis() && store.getStorefilesCount() != count) { Thread.sleep(100); } System.out.println("start=" + start + ", now=" + System.currentTimeMillis() + ", cur=" + store.getStorefilesCount()); assertEquals(count, store.getStorefilesCount()); }
private int countStoreFiles() throws IOException { HStore store = region.getStore(COLUMN_FAMILY); return store.getStorefilesCount(); }
private long testCompactionWithoutThroughputLimit() throws Exception { Configuration conf = TEST_UTIL.getConfiguration(); conf.set(StoreEngine.STORE_ENGINE_CLASS_KEY, DefaultStoreEngine.class.getName()); conf.setInt(CompactionConfiguration.HBASE_HSTORE_COMPACTION_MIN_KEY, 100); conf.setInt(CompactionConfiguration.HBASE_HSTORE_COMPACTION_MAX_KEY, 200); conf.setInt(HStore.BLOCKING_STOREFILES_KEY, 10000); conf.set(CompactionThroughputControllerFactory.HBASE_THROUGHPUT_CONTROLLER_KEY, NoLimitThroughputController.class.getName()); TEST_UTIL.startMiniCluster(1); try { HStore store = prepareData(); assertEquals(10, store.getStorefilesCount()); long startTime = System.currentTimeMillis(); TEST_UTIL.getAdmin().majorCompact(tableName); while (store.getStorefilesCount() != 1) { Thread.sleep(20); } return System.currentTimeMillis() - startTime; } finally { TEST_UTIL.shutdownMiniCluster(); } }
try { HStore store = prepareData(); assertEquals(10, store.getStorefilesCount()); long startTime = System.currentTimeMillis(); TEST_UTIL.getAdmin().majorCompact(tableName); while (store.getStorefilesCount() != 1) { Thread.sleep(20);
private void loadFlushAndCompact(HRegion region, byte[] family) throws IOException { // create two hfiles in the region createHFileInRegion(region, family); createHFileInRegion(region, family); HStore s = region.getStore(family); int count = s.getStorefilesCount(); assertTrue("Don't have the expected store files, wanted >= 2 store files, but was:" + count, count >= 2); // compact the two files into one file to get files in the archive LOG.debug("Compacting stores"); region.compact(true); }
@Test public void testRefreshStoreFilesNotChanged() throws IOException { init(name.getMethodName()); assertEquals(0, this.store.getStorefilesCount()); // add some data, flush this.store.add(new KeyValue(row, family, qf1, 1, (byte[])null), null); flush(1); // add one more file addStoreFile(); HStore spiedStore = spy(store); // call first time after files changed spiedStore.refreshStoreFiles(); assertEquals(2, this.store.getStorefilesCount()); verify(spiedStore, times(1)).replaceStoreFiles(any(), any()); // call second time spiedStore.refreshStoreFiles(); //ensure that replaceStoreFiles is not called if files are not refreshed verify(spiedStore, times(0)).replaceStoreFiles(null, null); }
/** * Writes Puts to the table and flushes few times. * @return {@link Pair} of (throughput, duration). */ private Pair<Double, Long> generateAndFlushData(Table table) throws IOException { // Internally, throughput is controlled after every cell write, so keep value size less for // better control. final int NUM_FLUSHES = 3, NUM_PUTS = 50, VALUE_SIZE = 200 * 1024; Random rand = new Random(); long duration = 0; for (int i = 0; i < NUM_FLUSHES; i++) { // Write about 10M (10 times of throughput rate) per iteration. for (int j = 0; j < NUM_PUTS; j++) { byte[] value = new byte[VALUE_SIZE]; rand.nextBytes(value); table.put(new Put(Bytes.toBytes(i * 10 + j)).addColumn(family, qualifier, value)); } long startTime = System.nanoTime(); hbtu.getAdmin().flush(tableName); duration += System.nanoTime() - startTime; } HStore store = getStoreWithName(tableName); assertEquals(NUM_FLUSHES, store.getStorefilesCount()); double throughput = (double)store.getStorefilesSize() / TimeUnit.NANOSECONDS.toSeconds(duration); return new Pair<>(throughput, duration); }
int initialFiles = s.getStorefilesCount(); assertEquals(initialFiles, s.getStorefilesCount());
init(name.getMethodName()); assertEquals(0, this.store.getStorefilesCount()); assertEquals(0, this.store.getStorefilesCount()); assertEquals(1, this.store.getStorefilesCount()); assertEquals(1, this.store.getStorefilesCount()); store.refreshStoreFiles(); assertEquals(2, this.store.getStorefilesCount()); addStoreFile(); assertEquals(2, this.store.getStorefilesCount()); store.refreshStoreFiles(); assertEquals(5, this.store.getStorefilesCount()); assertEquals(5, this.store.getStorefilesCount()); store.refreshStoreFiles(); assertEquals(4, this.store.getStorefilesCount()); assertEquals(4, this.store.getStorefilesCount()); store.refreshStoreFiles(); assertEquals(1, this.store.getStorefilesCount()); assertEquals(0, this.store.getStorefilesCount());
@Test public void test() throws Exception { // sleep every 10 loops to give memstore compaction enough time to finish before reaching the // flush size. doIncrement(10); assertSum(); HStore store = UTIL.getHBaseCluster().findRegionsForTable(NAME).get(0).getStore(FAMILY); // should have no store files created as we have done aggregating all in memory assertEquals(0, store.getStorefilesCount()); } }
private static void waitForCompaction(TableName tableName) throws IOException, InterruptedException { boolean compacted = false; for (Region region : TEST_UTIL.getRSForFirstRegionInTable(tableName) .getRegions(tableName)) { for (HStore store : ((HRegion) region).getStores()) { compacted = false; while (!compacted) { if (store.getStorefiles() != null) { while (store.getStorefilesCount() != 1) { Thread.sleep(100); } for (HStoreFile storefile : store.getStorefiles()) { if (!storefile.isCompactedAway()) { compacted = true; break; } Thread.sleep(100); } } else { break; } } } } }
@Test public void testPurgeExpiredFiles() throws Exception { HStore store = prepareData(); assertEquals(10, store.getStorefilesCount()); TEST_UTIL.getAdmin().majorCompact(tableName); TEST_UTIL.waitFor(30000, new ExplainingPredicate<Exception>() { @Override public boolean evaluate() throws Exception { return store.getStorefilesCount() == 1; } @Override public String explainFailure() throws Exception { return "The store file count " + store.getStorefilesCount() + " is still greater than 1"; } }); }
@Test public void test() throws IOException, KeeperException, InterruptedException { long now = System.currentTimeMillis(); put(0, 100, now - 10000); assertValueEquals(0, 100); setExpireBefore(now - 5000); Thread.sleep(5000); UTIL.getAdmin().flush(NAME); assertNotExists(0, 100); put(0, 50, now - 1000); UTIL.getAdmin().flush(NAME); put(50, 100, now - 100); UTIL.getAdmin().flush(NAME); assertValueEquals(0, 100); setExpireBefore(now - 500); Thread.sleep(5000); UTIL.getAdmin().majorCompact(NAME); UTIL.waitFor(30000, () -> UTIL.getHBaseCluster().getRegions(NAME).iterator().next() .getStore(FAMILY).getStorefilesCount() == 1); assertNotExists(0, 50); assertValueEquals(50, 100); } }
assertEquals(2, this.store.getStorefilesCount());
@Test public void test() throws Exception { doIncrement(0); assertSum(); // we do not hack scan operation so using scan we could get the original values added into the // table. try (ResultScanner scanner = TABLE.getScanner(new Scan().withStartRow(ROW) .withStopRow(ROW, true).addFamily(FAMILY).readAllVersions().setAllowPartialResults(true))) { Result r = scanner.next(); assertTrue(r.rawCells().length > 2); } UTIL.flush(NAME); HRegion region = UTIL.getHBaseCluster().findRegionsForTable(NAME).get(0); HStore store = region.getStore(FAMILY); for (;;) { region.compact(true); if (store.getStorefilesCount() == 1) { break; } } assertSum(); // Should only have two cells after flush and major compaction try (ResultScanner scanner = TABLE.getScanner(new Scan().withStartRow(ROW) .withStopRow(ROW, true).addFamily(FAMILY).readAllVersions().setAllowPartialResults(true))) { Result r = scanner.next(); assertEquals(2, r.rawCells().length); } } }