@Override public Object call() throws Exception { SortedOplogStatistics stats = HDFSRegionDirector.getInstance().getHdfsRegionStats("/" + regionName); return stats.getRead().getCount(); } };
@Override public Object call() throws Exception { SortedOplogStatistics stats = HDFSRegionDirector.getInstance().getHdfsRegionStats("/" + regionName); return stats.getRead().getCount(); } };
@Override public Object call() throws Exception { Region r = getCache().getRegion(regionName); LocalRegion lr = (LocalRegion) r; SortedOplogStatistics stats = HDFSRegionDirector.getInstance().getHdfsRegionStats("/" + regionName); long readsFromHDFS = stats.getRead().getCount(); assertEquals(0, readsFromHDFS); if (isPutAll) { Map m = new HashMap(); // map with only one entry m.put("key0", "value0"); DistributedPutAllOperation ev = lr.newPutAllOperation(m); lr.basicPutAll(m, ev, null); m.clear(); // map with multiple entries for (int i=1; i<100; i++) { m.put("key"+i, "value"+i); } ev = lr.newPutAllOperation(m); lr.basicPutAll(m, ev, null); } else { for (int i=0; i<100; i++) { r.put("key"+i, "value"+i); } } return null; } });
@Override public ByteBuffer read(byte[] key) throws IOException { assert key != null; if (logger.finestEnabled()) { logger.finest(String.format("Reading key %s from %s", Hex.toHex(key), path)); } long start = sopConfig.getStatistics().getRead().begin(); try { HFileScanner seek = reader.getScanner(true, true); if (seek.seekTo(key) == 0) { ByteBuffer val = seek.getValue(); sopConfig.getStatistics().getRead().end(val.remaining(), start); return val; } sopConfig.getStatistics().getRead().end(start); sopConfig.getStatistics().getBloom().falsePositive(); return null; } catch (IOException e) { sopConfig.getStatistics().getRead().error(start); throw (IOException) e.fillInStackTrace(); } }
public void testReadStats() throws Exception { HoplogOrganizer<SortedHoplogPersistedEvent> bucket = regionManager.create(0); ArrayList<TestEvent> items = new ArrayList<TestEvent>(); for (int i = 0; i < 100; i++) { items.add(new TestEvent("key-" + i, "value-" + System.nanoTime())); } bucket.flush(items.iterator(), 100); // validate read stats assertEquals(0, stats.getRead().getCount()); assertEquals(0, stats.getRead().getBytes()); // number of bytes read must be greater than size of key and value and must be increasing int bytesRead = "key-1".getBytes().length + "value=1233232".getBytes().length; for (int i = 0; i < 5; i++) { long previousRead = stats.getRead().getBytes(); PersistedEventImpl e = bucket.read(BlobHelper.serializeToBlob("key-" + i)); assertNotNull(e); assertEquals(i + 1, stats.getRead().getCount()); assertTrue( (bytesRead + previousRead) < stats.getRead().getBytes()); } //Make sure the block cache stats are being updated. assertTrue(storeStats.getBlockCache().getMisses() > 0); assertTrue(storeStats.getBlockCache().getBytesCached() > 0); assertTrue(storeStats.getBlockCache().getCached() > 0); //Do a duplicate read to make sure we get a hit in the cache PersistedEventImpl e = bucket.read(BlobHelper.serializeToBlob("key-" + 0)); assertTrue(storeStats.getBlockCache().getHits() > 0); }
@Override public Object call() throws Exception { Region r = getCache().getRegion(regionName); SortedOplogStatistics stats = HDFSRegionDirector.getInstance().getHdfsRegionStats("/" + regionName); long readsFromHDFS = stats.getRead().getCount(); assertEquals(0, readsFromHDFS); for (int i=0; i<100; i++) { r.getCache().getLogger().info("SWAP:DOING PUT:key"+i); r.put("key"+i, "value"+i); } return null; } });
public void test010PUTDMLSupport() { Region<Integer, String> r = createRegion(getName()); SortedOplogStatistics stats = HDFSRegionDirector.getInstance().getHdfsRegionStats("/" + getName()); assertEquals(0, stats.getRead().getCount()); for (int i=0; i<100; i++) { r.put(i, "value"+i); } assertEquals(100, stats.getRead().getCount()); sleep(r.getFullPath()); clearBackingCHM(r); LocalRegion lr = (LocalRegion) r; for (int i=0; i<200; i++) { EntryEventImpl ev = lr.newPutEntryEvent(i, "value"+i, null); lr.validatedPut(ev, System.currentTimeMillis()); } // verify that read count on HDFS does not change assertEquals(100, stats.getRead().getCount()); sleep(r.getFullPath()); clearBackingCHM(r); for (int i=0; i<200; i++) { assertEquals("value"+i, r.get(i)); } if (getBatchTimeInterval() > 1000) { // reads from async queue assertEquals(100, stats.getRead().getCount()); } else { assertEquals(300, stats.getRead().getCount()); } }
public void test020GetOperationalData() throws Exception { Region<Integer, String> r = createRegion(getName()); SortedOplogStatistics stats = HDFSRegionDirector.getInstance().getHdfsRegionStats("/" + getName()); assertEquals(0, stats.getRead().getCount()); for (int i=0; i<100; i++) { r.put(i, "value"+i); assertEquals(expectedReadsFromHDFS, stats.getRead().getCount()); sleep(r.getFullPath()); clearBackingCHM(r); expectedReadsFromHDFS = 300; // initial 100 + 200 for reads assertEquals(expectedReadsFromHDFS, stats.getRead().getCount()); for (int i=0; i<200; i++) { assertNull(lr.get(i, null, true, false, false, null, lr.discoverJTA(), null, null, false, false/*allowReadFromHDFS*/)); assertEquals(expectedReadsFromHDFS, stats.getRead().getCount()); assertEquals(expectedReadsFromHDFS, stats.getRead().getCount());
public void test030RemoveOperationalData() throws Exception { Region<Integer, String> r = createRegion(getName()); SortedOplogStatistics stats = HDFSRegionDirector.getInstance().getHdfsRegionStats("/" + getName()); assertEquals(0, stats.getRead().getCount()); for (int i=0; i<100; i++) { r.put(i, "value"+i); assertEquals(expectedReadsFromHDFS, stats.getRead().getCount()); sleep(r.getFullPath()); PartitionedRegion lr = (PartitionedRegion) r; expectedReadsFromHDFS = 250; // initial 100 + 200 for reads + 50 for assertEquals(expectedReadsFromHDFS, stats.getRead().getCount()); for (int i=0; i<50; i++) { assertNull(lr.get(i, null, true, false, false, null, lr.discoverJTA(), null, null, false, false/*allowReadFromHDFS*/)); assertEquals(expectedReadsFromHDFS, stats.getRead().getCount());