compactionStats.end(0, startTime); return true; logger.debug("{}Skipping compaction, no need to compact a major compacted file. Major?" + isMajor, logPrefix); compactionStats.end(0, startTime); return true; try { byteCount = fillCompactionHoplog(isMajor, targets, compactedHoplog, lastKnownMajorCSeqNum); compactionStats.end(byteCount, startTime); } catch (InterruptedException e) { if (logger.isDebugEnabled())
if (isClosed()) { handler.complete(); factory.getConfiguration().getStatistics().getFlush().end(0, start);
compactionStats.end(0, startTime); return true; logger.fine("Skipping compaction, no need to compact a major compacted file. Major?" + isMajor); compactionStats.end(0, startTime); return true; try { byteCount = fillCompactionHoplog(isMajor, targets, compactedHoplog, lastKnownMajorCSeqNum); compactionStats.end(byteCount, startTime); } catch (InterruptedException e) { logger.fine("Compaction execution suspended");
stats.getFlush().end(byteCount, start); incrementDiskUsage(so.getSize()); } catch (BucketMovedException e) {
stats.getFlush().end(byteCount, start); incrementDiskUsage(so.getSize()); } catch (BucketMovedException e) {
try { compacted |= level.compact(aborted); factory.getConfiguration().getStatistics().getMinorCompaction().end(start);
@Override public ByteBuffer read(byte[] key) throws IOException { assert key != null; if (logger.finestEnabled()) { logger.finest(String.format("Reading key %s from %s", Hex.toHex(key), path)); } long start = sopConfig.getStatistics().getRead().begin(); try { HFileScanner seek = reader.getScanner(true, true); if (seek.seekTo(key) == 0) { ByteBuffer val = seek.getValue(); sopConfig.getStatistics().getRead().end(val.remaining(), start); return val; } sopConfig.getStatistics().getRead().end(start); sopConfig.getStatistics().getBloom().falsePositive(); return null; } catch (IOException e) { sopConfig.getStatistics().getRead().error(start); throw (IOException) e.fillInStackTrace(); } }
@Override public void run() { try { compactor.add(soplog); compactor.compact(false, null); unflushed.removeFirstOccurrence(buffer); // TODO need to invoke this while NOT holding write lock handler.complete(); factory.getConfiguration().getStatistics().getFlush().end(buffer.dataSize(), start); } catch (Exception e) { handleError(e, aborted); return; } } };
@Override public void abortBeforeRun() { handler.complete(); factory.getConfiguration().getStatistics().getFlush().end(start); }
@Override public void put(byte[] key, byte[] value) { assert key != null; assert value != null; long start = factory.getConfiguration().getStatistics().getPut().begin(); getCurrent().put(key, value); factory.getConfiguration().getStatistics().getPut().end(value.length, start); }