@Override public void downloaded(String blobId, long timeTaken, TimeUnit unit, long size) { downloadHisto.update(size); downloadSizeSeries.mark(size); downloadTimeSeries.mark(recordedTimeUnit.convert(timeTaken, unit)); opsLogger.debug("Downloaded {} - {} bytes in {} ms", blobId, size, unit.toMillis(timeTaken)); }
@Override public void downloaded(String blobId, long timeTaken, TimeUnit unit, long size) { downloadHisto.update(size); downloadSizeSeries.mark(size); downloadTimeSeries.mark(recordedTimeUnit.convert(timeTaken, unit)); opsLogger.debug("Downloaded {} - {} bytes in {} ms", blobId, size, unit.toMillis(timeTaken)); }
@Override public void uploaded(long timeTaken, TimeUnit unit, long size) { uploadHisto.update(size); //Recording upload like this is not accurate. A more accurate way //would be to mark as upload or download is progressing. //That would however add quite a bit of overhead //Approach below would record an upload/download at moment when //it got completed. So acts like a rough approximation uploadSizeSeries.mark(size); uploadTimeSeries.mark(recordedTimeUnit.convert(timeTaken, unit)); opsLogger.debug("Uploaded {} bytes in {} ms", size, unit.toMillis(timeTaken)); }
@Override public void uploaded(long timeTaken, TimeUnit unit, long size) { uploadHisto.update(size); //Recording upload like this is not accurate. A more accurate way //would be to mark as upload or download is progressing. //That would however add quite a bit of overhead //Approach below would record an upload/download at moment when //it got completed. So acts like a rough approximation uploadSizeSeries.mark(size); uploadTimeSeries.mark(recordedTimeUnit.convert(timeTaken, unit)); opsLogger.debug("Uploaded {} bytes in {} ms", size, unit.toMillis(timeTaken)); }
@Override public void downloaded(String blobId, long timeTaken, TimeUnit unit, long size) { downloadHisto.update(size); downloadSizeSeries.mark(size); downloadTimeSeries.mark(recordedTimeUnit.convert(timeTaken, unit)); opsLogger.debug("Downloaded {} - {} bytes in {} ms", blobId, size, unit.toMillis(timeTaken)); }
@Override public void uploaded(long timeTaken, TimeUnit unit, long size) { uploadHisto.update(size); //Recording upload like this is not accurate. A more accurate way //would be to mark as upload or download is progressing. //That would however add quite a bit of overhead //Approach below would record an upload/download at moment when //it got completed. So acts like a rough approximation uploadSizeSeries.mark(size); uploadTimeSeries.mark(recordedTimeUnit.convert(timeTaken, unit)); opsLogger.debug("Uploaded {} bytes in {} ms", size, unit.toMillis(timeTaken)); }
@Override public void done() throws CommitFailedException { try { int docCount = Integer.parseInt(luceneIndexMBean.getDocCount(indexPath)); HistogramStats docCountHistogram = statisticsProvider.getHistogram(indexPath + NO_DOCS, StatsOptions.METRICS_ONLY); docCountHistogram.update(docCount); long indexSize = Long.parseLong(luceneIndexMBean.getSize(indexPath)); HistogramStats indexSizeHistogram = statisticsProvider.getHistogram(indexPath + INDEX_SIZE, StatsOptions.METRICS_ONLY); indexSizeHistogram.update(indexSize); log.debug("{} stats updated; docCount {}, size {}", indexPath, docCount, indexSize); } catch (IOException e) { log.debug("could not update no_docs/index_size stats for index at {}", indexPath, e); } } }
@Override public void done() throws CommitFailedException { try { int docCount = Integer.parseInt(luceneIndexMBean.getDocCount(indexPath)); HistogramStats docCountHistogram = statisticsProvider.getHistogram(indexPath + NO_DOCS, StatsOptions.METRICS_ONLY); docCountHistogram.update(docCount); long indexSize = Long.parseLong(luceneIndexMBean.getSize(indexPath)); HistogramStats indexSizeHistogram = statisticsProvider.getHistogram(indexPath + INDEX_SIZE, StatsOptions.METRICS_ONLY); indexSizeHistogram.update(indexSize); log.debug("{} stats updated; docCount {}, size {}", indexPath, docCount, indexSize); } catch (IOException e) { log.debug("could not update no_docs/index_size stats for index at {}", indexPath, e); } } }
@Override public void onCreateNodeObject(String path) { nodePathDepths.update(PathUtils.getDepth(path)); if (countPaths) { updatePathCount(path); } }
public void doneOneCycle(long timeInMillis, long updates){ indexerExecutionCountMeter.mark(); indexedNodeCountMeter.mark(updates); indexerTimer.update(timeInMillis, TimeUnit.MILLISECONDS); indexedNodePerCycleHisto.update(updates); }
public void doneOneCycle(long timeInMillis, long updates){ indexerExecutionCountMeter.mark(); indexedNodeCountMeter.mark(updates); indexerTimer.update(timeInMillis, TimeUnit.MILLISECONDS); indexedNodePerCycleHisto.update(updates); }
public void doneOneCycle(long timeInMillis, long updates){ indexerExecutionCountMeter.mark(); indexedNodeCountMeter.mark(updates); indexerTimer.update(timeInMillis, TimeUnit.MILLISECONDS); indexedNodePerCycleHisto.update(updates); }
@Override public void doneMerge(int numRetries, long time, boolean suspended, boolean exclusive) { mergeSuccessRate.mark(); mergeSuccessRetries.update(numRetries); mergeSuccessTime.update(time, TimeUnit.MILLISECONDS); if (exclusive) { mergeSuccessExclusive.mark(); } if (suspended) { mergeSuccessSuspended.mark(); } }
public void close() throws IOException { if (closed) { return; } log.debug("[{}] Closing NRTIndex [{}]", definition.getIndexPath(), getName()); decrementReaderUseCount(dirReaderUsedForPrevious); //'readers' already has dirReader so no need to close it explicitly decrementReaderUseCount(readers); assertAllReadersAreClosed(); if (indexWriter != null) { //TODO Close call can possibly be speeded up by //avoiding merge and dropping stuff in memory. To be explored //indexWrite.close(waitForMerges) indexWriter.close(); sizeHisto.update(dirSize(directory)); directory.close(); FileUtils.deleteQuietly(indexDir); log.debug("[{}] Removed directory [{}]", this, indexDir); } //Null the reference to previous so as to let it //garbage collect. It would not be accessed post close previous = null; closed = true; openTime.stop(); }
public void close() throws IOException { if (closed) { return; } log.debug("[{}] Closing NRTIndex [{}]", definition.getIndexPath(), getName()); decrementReaderUseCount(dirReaderUsedForPrevious); //'readers' already has dirReader so no need to close it explicitly decrementReaderUseCount(readers); assertAllReadersAreClosed(); if (indexWriter != null) { //TODO Close call can possibly be speeded up by //avoiding merge and dropping stuff in memory. To be explored //indexWrite.close(waitForMerges) indexWriter.close(); sizeHisto.update(dirSize(directory)); directory.close(); FileUtils.deleteQuietly(indexDir); log.debug("[{}] Removed directory [{}]", this, indexDir); } //Null the reference to previous so as to let it //garbage collect. It would not be accessed post close previous = null; closed = true; openTime.stop(); }
@Override public void doneMerge(int numRetries, long timeMillis, long suspendMillis, boolean exclusive) { mergeSuccessRate.mark(); mergeSuccessRetries.update(numRetries); mergeSuccessTime.update(timeMillis, TimeUnit.MILLISECONDS); if (exclusive) { mergeSuccessExclusive.mark(); } mergeSuspendTime.update(suspendMillis, TimeUnit.MILLISECONDS); if (suspendMillis > 0) { mergeSuccessSuspended.mark(); } }
@Override public void doneBackgroundUpdate(BackgroundWriteStats stats) { writeClean.update(stats.clean, TimeUnit.MILLISECONDS); writeSplit.update(stats.split, TimeUnit.MILLISECONDS); writeSweep.update(stats.sweep, TimeUnit.MILLISECONDS); writeWrite.update(stats.write, TimeUnit.MILLISECONDS); writeTotal.update(stats.totalWriteTime, TimeUnit.MILLISECONDS); writeNum.update(stats.num); //Record rate of num of bg writes pushed per second numWritesRate.mark(stats.num); }
@Override public void doneBackgroundUpdate(BackgroundWriteStats stats) { writeClean.update(stats.clean, TimeUnit.MILLISECONDS); writeSplit.update(stats.split, TimeUnit.MILLISECONDS); writeSweep.update(stats.sweep, TimeUnit.MILLISECONDS); writeWrite.update(stats.write, TimeUnit.MILLISECONDS); writeTotal.update(stats.totalWriteTime, TimeUnit.MILLISECONDS); writeNum.update(stats.num); //Record rate of num of bg writes pushed per second numWritesRate.mark(stats.num); }
@Override public void doneBackgroundRead(BackgroundReadStats stats) { readHead.update(stats.readHead, TimeUnit.MILLISECONDS); readCacheInvalidate.update(stats.cacheInvalidationTime, TimeUnit.MILLISECONDS); readDiffCache.update(stats.populateDiffCache, TimeUnit.MILLISECONDS); readLock.update(stats.lock, TimeUnit.MILLISECONDS); readDispatch.update(stats.dispatchChanges, TimeUnit.MILLISECONDS); readTotalTime.update(stats.totalReadTime, TimeUnit.MILLISECONDS); //Record rate of num of external changes pulled per second numChangesRate.mark(stats.numExternalChanges); numChangesHisto.update(stats.numExternalChanges); // update lag of external changes changesLag.mark(stats.externalChangesLag); }
@Override public void doneBackgroundRead(BackgroundReadStats stats) { readHead.update(stats.readHead, TimeUnit.MILLISECONDS); readCacheInvalidate.update(stats.cacheInvalidationTime, TimeUnit.MILLISECONDS); readDiffCache.update(stats.populateDiffCache, TimeUnit.MILLISECONDS); readLock.update(stats.lock, TimeUnit.MILLISECONDS); readDispatch.update(stats.dispatchChanges, TimeUnit.MILLISECONDS); readTotalTime.update(stats.totalReadTime, TimeUnit.MILLISECONDS); //Record rate of num of external changes pulled per second numChangesRate.mark(stats.numExternalChanges); numChangesHisto.update(stats.numExternalChanges); // update lag of external changes changesLag.mark(stats.externalChangesLag); }