@Override public String toString() { StringBuilder sb = new StringBuilder(super.toString()); sb.append(";bytes=").append(getBytes()); return sb.toString(); } }
startTime = compactionStats.begin(); compactionStats.end(0, startTime); return true; compactionStats.end(0, startTime); return true; try { byteCount = fillCompactionHoplog(isMajor, targets, compactedHoplog, lastKnownMajorCSeqNum); compactionStats.end(byteCount, startTime); } catch (InterruptedException e) { if (logger.isDebugEnabled()) logger.debug("{}Compaction execution suspended", logPrefix); compactionStats.error(startTime); return false; } catch (ForceReattemptException e) { if (logger.isDebugEnabled()) logger.debug("{}Compaction execution suspended", logPrefix); compactionStats.error(startTime); return false; compactionStats.error(startTime); return false; compactionStats.error(startTime); throw e; } finally {
@Override public Object call() throws Exception { Region r = getCache().getRegion(regionName); for (int i=0; i<500; i++) { r.put("key"+i, "value"+i); if (i%100 == 0) { // wait for flush pause(3000); } } pause(3000); PartitionedRegion pr = (PartitionedRegion) r; long lastCompactionTS = pr.lastMajorHDFSCompaction(); assertEquals(0, lastCompactionTS); long beforeCompact = System.currentTimeMillis(); pr.forceHDFSCompaction(true, isSynchronous ? 0 : 1); if (isSynchronous) { final SortedOplogStatistics stats = HDFSRegionDirector.getInstance().getHdfsRegionStats("/" + regionName); assertTrue(stats.getMajorCompaction().getCount() > 0); assertTrue(pr.lastMajorHDFSCompaction() >= beforeCompact); } return null; } });
}); read = new IOOperation(readCount.getId(), readInProgress.getId(), readTime.getId(), readBytes.getId(), readErrors.getId()); scan = new ScanOperation(scanCount.getId(), scanInProgress.getId(), scanTime.getId(), scanBytes.getId(), scanErrors.getId(), scanIterations.getId(), scanIterationTime.getId()); write = new IOOperation(writeCount.getId(), writeInProgress.getId(), writeTime.getId(), writeBytes.getId(), writeErrors.getId()); put = new IOOperation(putCount.getId(), putInProgress.getId(), putTime.getId(), putBytes.getId(), putErrors.getId()); flush = new IOOperation(flushCount.getId(), flushInProgress.getId(), flushTime.getId(), flushBytes.getId(), flushErrors.getId()); minorCompaction = new IOOperation(minorCompactionCount.getId(), minorCompactionInProgress.getId(), minorCompactionTime.getId(), minorCompactionBytes.getId(), minorCompactionErrors.getId()); majorCompaction = new IOOperation(majorCompactionCount.getId(), majorCompactionInProgress.getId(), majorCompactionTime.getId(), majorCompactionBytes.getId(), majorCompactionErrors.getId()); bloom = new BloomOperation(bloomCount.getId(), bloomInProgress.getId(), bloomTime.getId(), bloomErrors.getId(), bloomFalsePositive.getId()); clear = new TimedOperation(clearCount.getId(), clearInProgress.getId(), clearTime.getId(), clearErrors.getId()); destroy = new TimedOperation(destroyCount.getId(), destroyInProgress.getId(), destroyTime.getId(), destroyErrors.getId()); blockRead = new IOOperation(brCount.getId(), brInProgress.getId(), brTime.getId(), brBytes.getId(), brErrors.getId()); blockCache = new CacheOperation(bcMisses.getId(), bcHits.getId(), bcCached.getId(), bcBytesCached.getId(), bcBytesEvicted.getId());
}); read = new IOOperation(readCount.getId(), readInProgress.getId(), readTime.getId(), readBytes.getId(), readErrors.getId()); scan = new ScanOperation(scanCount.getId(), scanInProgress.getId(), scanTime.getId(), scanBytes.getId(), scanErrors.getId(), scanIterations.getId(), scanIterationTime.getId()); write = new IOOperation(writeCount.getId(), writeInProgress.getId(), writeTime.getId(), writeBytes.getId(), writeErrors.getId()); put = new IOOperation(putCount.getId(), putInProgress.getId(), putTime.getId(), putBytes.getId(), putErrors.getId()); flush = new IOOperation(flushCount.getId(), flushInProgress.getId(), flushTime.getId(), flushBytes.getId(), flushErrors.getId()); minorCompaction = new IOOperation(minorCompactionCount.getId(), minorCompactionInProgress.getId(), minorCompactionTime.getId(), minorCompactionBytes.getId(), minorCompactionErrors.getId()); majorCompaction = new IOOperation(majorCompactionCount.getId(), majorCompactionInProgress.getId(), majorCompactionTime.getId(), majorCompactionBytes.getId(), majorCompactionErrors.getId()); bloom = new BloomOperation(bloomCount.getId(), bloomInProgress.getId(), bloomTime.getId(), bloomErrors.getId(), bloomFalsePositive.getId()); clear = new TimedOperation(clearCount.getId(), clearInProgress.getId(), clearTime.getId(), clearErrors.getId()); destroy = new TimedOperation(destroyCount.getId(), destroyInProgress.getId(), destroyTime.getId(), destroyErrors.getId()); blockRead = new IOOperation(brCount.getId(), brInProgress.getId(), brTime.getId(), brBytes.getId(), brErrors.getId()); blockCache = new CacheOperation(bcMisses.getId(), bcHits.getId(), bcCached.getId(), bcBytesCached.getId(), bcBytesEvicted.getId());
long start = stats.getFlush().begin(); int byteCount = 0; writer = null; } catch (IOException e) { stats.getFlush().error(start); try { e = handleWriteHdfsIOError(writer, so, e); stats.getFlush().error(start); deleteTmpFile(writer, so); writer = null; stats.getFlush().end(byteCount, start); incrementDiskUsage(so.getSize()); } catch (BucketMovedException e) { stats.getFlush().error(start); deleteTmpFile(writer, so); writer = null; throw e; } catch (IOException e) { stats.getFlush().error(start); logger.warn(LocalizedStrings.HOPLOG_FLUSH_OPERATION_FAILED, e); throw e;
startTime = compactionStats.begin(); compactionStats.end(0, startTime); return true; logger.fine("Skipping compaction, no need to compact a major compacted file. Major?" + isMajor); compactionStats.end(0, startTime); return true; try { byteCount = fillCompactionHoplog(isMajor, targets, compactedHoplog, lastKnownMajorCSeqNum); compactionStats.end(byteCount, startTime); } catch (InterruptedException e) { logger.fine("Compaction execution suspended"); compactionStats.error(startTime); return false; } catch (ForceReattemptException e) { logger.fine("Compaction execution suspended"); compactionStats.error(startTime); return false; compactionStats.error(startTime); return false; compactionStats.error(startTime); throw e; } finally {
long start = stats.getFlush().begin(); int byteCount = 0; if (writer == null) { HeapDataOutputStream out = new HeapDataOutputStream(1024, null); if (abortFlush) { stats.getFlush().end(byteCount, start); throw new CacheClosedException("Either the region has been cleared " + "or closed. Aborting the ongoing flush operation."); stats.getFlush().error(start); stats.getFlush().end(byteCount, start);
long start = stats.getFlush().begin(); int byteCount = 0; writer = null; } catch (IOException e) { stats.getFlush().error(start); try { e = handleWriteHdfsIOError(writer, so, e); stats.getFlush().error(start); deleteTmpFile(writer, so); writer = null; stats.getFlush().end(byteCount, start); incrementDiskUsage(so.getSize()); } catch (BucketMovedException e) { stats.getFlush().error(start); deleteTmpFile(writer, so); writer = null; throw e; } catch (IOException e) { stats.getFlush().error(start); logger.warning(LocalizedStrings.HOPLOG_FLUSH_OPERATION_FAILED, e); throw e;
long start = stats.getFlush().begin(); int byteCount = 0; if (writer == null) { while (bufferIter.hasNext()) { if (abortFlush) { stats.getFlush().end(byteCount, start); throw new CacheClosedException("Either the region has been cleared " + "or closed. Aborting the ongoing flush operation."); stats.getFlush().error(start); stats.getFlush().end(byteCount, start);
public void test030RemoveOperationalData() throws Exception { Region<Integer, String> r = createRegion(getName()); SortedOplogStatistics stats = HDFSRegionDirector.getInstance().getHdfsRegionStats("/" + getName()); assertEquals(0, stats.getRead().getCount()); for (int i=0; i<100; i++) { r.put(i, "value"+i); assertEquals(expectedReadsFromHDFS, stats.getRead().getCount()); sleep(r.getFullPath()); PartitionedRegion lr = (PartitionedRegion) r; expectedReadsFromHDFS = 250; // initial 100 + 200 for reads + 50 for assertEquals(expectedReadsFromHDFS, stats.getRead().getCount()); for (int i=0; i<50; i++) { assertNull(lr.get(i, null, true, false, false, null, lr.discoverJTA(), null, null, false, false/*allowReadFromHDFS*/)); assertEquals(expectedReadsFromHDFS, stats.getRead().getCount());
public void test020GetOperationalData() throws Exception { Region<Integer, String> r = createRegion(getName()); SortedOplogStatistics stats = HDFSRegionDirector.getInstance().getHdfsRegionStats("/" + getName()); assertEquals(0, stats.getRead().getCount()); for (int i=0; i<100; i++) { r.put(i, "value"+i); assertEquals(expectedReadsFromHDFS, stats.getRead().getCount()); sleep(r.getFullPath()); clearBackingCHM(r); expectedReadsFromHDFS = 300; // initial 100 + 200 for reads assertEquals(expectedReadsFromHDFS, stats.getRead().getCount()); for (int i=0; i<200; i++) { assertNull(lr.get(i, null, true, false, false, null, lr.discoverJTA(), null, null, false, false/*allowReadFromHDFS*/)); assertEquals(expectedReadsFromHDFS, stats.getRead().getCount()); assertEquals(expectedReadsFromHDFS, stats.getRead().getCount());
public void test010PUTDMLSupport() { Region<Integer, String> r = createRegion(getName()); SortedOplogStatistics stats = HDFSRegionDirector.getInstance().getHdfsRegionStats("/" + getName()); assertEquals(0, stats.getRead().getCount()); for (int i=0; i<100; i++) { r.put(i, "value"+i); } assertEquals(100, stats.getRead().getCount()); sleep(r.getFullPath()); clearBackingCHM(r); LocalRegion lr = (LocalRegion) r; for (int i=0; i<200; i++) { EntryEventImpl ev = lr.newPutEntryEvent(i, "value"+i, null); lr.validatedPut(ev, System.currentTimeMillis()); } // verify that read count on HDFS does not change assertEquals(100, stats.getRead().getCount()); sleep(r.getFullPath()); clearBackingCHM(r); for (int i=0; i<200; i++) { assertEquals("value"+i, r.get(i)); } if (getBatchTimeInterval() > 1000) { // reads from async queue assertEquals(100, stats.getRead().getCount()); } else { assertEquals(300, stats.getRead().getCount()); } }
assertEquals(0, stats.getFlush().getCount()); assertEquals(0, stats.getFlush().getBytes()); assertEquals(0, stats.getActiveFileCount()); int bytesSent = 0; assertEquals(j + 1, stats.getFlush().getCount()); assertTrue(stats.getFlush().getBytes() > bytesSent); assertEquals(j + 1, stats.getActiveFileCount()); assertEquals(0, stats.getMinorCompaction().getCount()); assertEquals(0, stats.getMinorCompaction().getBytes()); assertEquals(0, stats.getInactiveFileCount()); bucket.getCompactor().compact(false, false); assertEquals(1, stats.getMinorCompaction().getCount()); assertEquals(1, stats.getActiveFileCount()); assertEquals(0, stats.getInactiveFileCount()); assertEquals(stats.getMinorCompaction().getBytes(), stats.getFlush() .getBytes());
@Override public Object call() throws Exception { Region r = getCache().getRegion(regionName); LocalRegion lr = (LocalRegion) r; SortedOplogStatistics stats = HDFSRegionDirector.getInstance().getHdfsRegionStats("/" + regionName); long readsFromHDFS = stats.getRead().getCount(); assertEquals(0, readsFromHDFS); if (isPutAll) { Map m = new HashMap(); // map with only one entry m.put("key0", "value0"); DistributedPutAllOperation ev = lr.newPutAllOperation(m); lr.basicPutAll(m, ev, null); m.clear(); // map with multiple entries for (int i=1; i<100; i++) { m.put("key"+i, "value"+i); } ev = lr.newPutAllOperation(m); lr.basicPutAll(m, ev, null); } else { for (int i=0; i<100; i++) { r.put("key"+i, "value"+i); } } return null; } });
@Override public ByteBuffer read(byte[] key) throws IOException { assert key != null; if (logger.finestEnabled()) { logger.finest(String.format("Reading key %s from %s", Hex.toHex(key), path)); } long start = sopConfig.getStatistics().getRead().begin(); try { HFileScanner seek = reader.getScanner(true, true); if (seek.seekTo(key) == 0) { ByteBuffer val = seek.getValue(); sopConfig.getStatistics().getRead().end(val.remaining(), start); return val; } sopConfig.getStatistics().getRead().end(start); sopConfig.getStatistics().getBloom().falsePositive(); return null; } catch (IOException e) { sopConfig.getStatistics().getRead().error(start); throw (IOException) e.fillInStackTrace(); } }