@Override public long getHFilesSize() { // Include only StoreFiles which are HFiles return getStorefilesSize(this.storeEngine.getStoreFileManager().getStorefiles(), HStoreFile::isHFile); }
@Override public long getStorefilesSize() { // Include all StoreFiles return getStorefilesSize(this.storeEngine.getStoreFileManager().getStorefiles(), sf -> true); }
/** * Writes Puts to the table and flushes few times. * @return {@link Pair} of (throughput, duration). */ private Pair<Double, Long> generateAndFlushData(Table table) throws IOException { // Internally, throughput is controlled after every cell write, so keep value size less for // better control. final int NUM_FLUSHES = 3, NUM_PUTS = 50, VALUE_SIZE = 200 * 1024; Random rand = new Random(); long duration = 0; for (int i = 0; i < NUM_FLUSHES; i++) { // Write about 10M (10 times of throughput rate) per iteration. for (int j = 0; j < NUM_PUTS; j++) { byte[] value = new byte[VALUE_SIZE]; rand.nextBytes(value); table.put(new Put(Bytes.toBytes(i * 10 + j)).addColumn(family, qualifier, value)); } long startTime = System.nanoTime(); hbtu.getAdmin().flush(tableName); duration += System.nanoTime() - startTime; } HStore store = getStoreWithName(tableName); assertEquals(NUM_FLUSHES, store.getStorefilesCount()); double throughput = (double)store.getStorefilesSize() / TimeUnit.NANOSECONDS.toSeconds(duration); return new Pair<>(throughput, duration); }
double throughput = (double) store.getStorefilesSize() / duration * 1000;
this.storeSize.addAndGet(getStorefilesSize(hStoreFiles, sf -> true)); this.totalUncompressedBytes.addAndGet(getTotalUmcompressedBytes(hStoreFiles)); this.storeEngine.getStoreFileManager().loadFiles(hStoreFiles);
byte[] regionName = region.getRegionInfo().getRegionName(); HStore store = region.getStore(Bytes.toBytes("f")); long expectedStoreFilesSize = store.getStorefilesSize(); Assert.assertNotNull(store); Assert.assertEquals(expectedStoreFilesSize, store.getSize());
assertTrue(s.getStorefilesSize() > 15*1000);
assertEquals(store.getSize(), store.getStorefilesSize());
storefiles += store.getStorefilesCount(); storeUncompressedSizeMB += (int) (store.getStoreSizeUncompressed() / 1024 / 1024); storefileSizeMB += (int) (store.getStorefilesSize() / 1024 / 1024);
/** * Writes Puts to the table and flushes few times. * @return {@link Pair} of (throughput, duration). */ private Pair<Double, Long> generateAndFlushData(Table table) throws IOException { // Internally, throughput is controlled after every cell write, so keep value size less for // better control. final int NUM_FLUSHES = 3, NUM_PUTS = 50, VALUE_SIZE = 200 * 1024; Random rand = new Random(); long duration = 0; for (int i = 0; i < NUM_FLUSHES; i++) { // Write about 10M (10 times of throughput rate) per iteration. for (int j = 0; j < NUM_PUTS; j++) { byte[] value = new byte[VALUE_SIZE]; rand.nextBytes(value); table.put(new Put(Bytes.toBytes(i * 10 + j)).addColumn(family, qualifier, value)); } long startTime = System.nanoTime(); hbtu.getAdmin().flush(tableName); duration += System.nanoTime() - startTime; } HStore store = getStoreWithName(tableName); assertEquals(NUM_FLUSHES, store.getStorefilesCount()); double throughput = (double)store.getStorefilesSize() / TimeUnit.NANOSECONDS.toSeconds(duration); return new Pair<>(throughput, duration); }
double throughput = (double) store.getStorefilesSize() / duration * 1000;
byte[] regionName = region.getRegionInfo().getRegionName(); HStore store = region.getStore(Bytes.toBytes("f")); long expectedStoreFilesSize = store.getStorefilesSize(); Assert.assertNotNull(store); Assert.assertEquals(expectedStoreFilesSize, store.getSize());
assertTrue(s.getStorefilesSize() > 15*1000);
assertEquals(store.getSize(), store.getStorefilesSize());