ctx.stop(); log.debug("Added {} documents for {} indexes from external changes", indexedCount, indexPaths.size());
tryFlush(); } finally { timer.stop();
ctx.stop(); log.debug("Added {} documents for {} indexes from external changes", indexedCount, indexPaths.size());
public void close() throws IOException { if (closed) { return; } log.debug("[{}] Closing NRTIndex [{}]", definition.getIndexPath(), getName()); decrementReaderUseCount(dirReaderUsedForPrevious); //'readers' already has dirReader so no need to close it explicitly decrementReaderUseCount(readers); assertAllReadersAreClosed(); if (indexWriter != null) { //TODO Close call can possibly be speeded up by //avoiding merge and dropping stuff in memory. To be explored //indexWrite.close(waitForMerges) indexWriter.close(); sizeHisto.update(dirSize(directory)); directory.close(); FileUtils.deleteQuietly(indexDir); log.debug("[{}] Removed directory [{}]", this, indexDir); } //Null the reference to previous so as to let it //garbage collect. It would not be accessed post close previous = null; closed = true; openTime.stop(); }
public void close() throws IOException { if (closed) { return; } log.debug("[{}] Closing NRTIndex [{}]", definition.getIndexPath(), getName()); decrementReaderUseCount(dirReaderUsedForPrevious); //'readers' already has dirReader so no need to close it explicitly decrementReaderUseCount(readers); assertAllReadersAreClosed(); if (indexWriter != null) { //TODO Close call can possibly be speeded up by //avoiding merge and dropping stuff in memory. To be explored //indexWrite.close(waitForMerges) indexWriter.close(); sizeHisto.update(dirSize(directory)); directory.close(); FileUtils.deleteQuietly(indexDir); log.debug("[{}] Removed directory [{}]", this, indexDir); } //Null the reference to previous so as to let it //garbage collect. It would not be accessed post close previous = null; closed = true; openTime.stop(); }
@Test public void usageTest() throws Exception { AtomicLong counter = new AtomicLong(); SimpleStats stats = new SimpleStats(counter, SimpleStats.Type.COUNTER); stats.mark(); assertEquals(1, counter.get()); assertEquals(1, stats.getCount()); stats.inc(); assertEquals(2, counter.get()); stats.dec(); assertEquals(1, counter.get()); stats.inc(7); assertEquals(8, counter.get()); stats.dec(7); assertEquals(1, counter.get()); stats.mark(2); assertEquals(3, counter.get()); counter.set(0); stats.update(100, TimeUnit.SECONDS); assertEquals(TimeUnit.MILLISECONDS.convert(100, TimeUnit.SECONDS), counter.get()); counter.set(0); TimerStats.Context context = stats.time(); long delta = context.stop(); TimeUnit.MILLISECONDS.sleep(42); assertEquals(TimeUnit.NANOSECONDS.toMillis(delta), counter.get()); }
@Test public void noopTest() throws Exception { NoopStats noop = NoopStats.INSTANCE; assertEquals(0, noop.getCount()); noop.mark(); assertEquals(0, noop.getCount()); noop.mark(10); assertEquals(0, noop.getCount()); noop.dec(); assertEquals(0, noop.getCount()); noop.inc(); assertEquals(0, noop.getCount()); noop.inc(5); assertEquals(0, noop.getCount()); noop.dec(7); assertEquals(0, noop.getCount()); noop.update(100, TimeUnit.SECONDS); assertEquals(0, noop.getCount()); TimerStats.Context context = noop.time(); assertEquals(0, context.stop()); }
private V asyncReadIfPresent(K key) { TimerStats.Context ctx = stats.startReadTimer(); try { MultiGenerationMap.ValueWithGenerationInfo<V> v = map.readValue(key); if (v == null) { return null; } if (v.isCurrentGeneration() && !cache.needSwitch()) { // don't persist again on eviction memCacheMetadata.putFromPersistenceAndIncrement(key); } else { // persist again during eviction memCacheMetadata.increment(key); } return v.getValue(); } finally { ctx.stop(); } }
@Override public V get(K key, Callable<? extends V> valueLoader) throws ExecutionException { // Get stats covered in getIfPresent V value = getIfPresent(key); if (value != null) { return value; } // Track entry load time TimerStats.Context ctx = stats.startLoaderTimer(); try { memCacheMetadata.increment(key); value = memCache.get(key, valueLoader); ctx.stop(); if (!async) { write((K) key, value); } broadcast(key, value); return value; } catch (ExecutionException e) { stats.markException(); throw e; } }
public QueryResult executeQuery(String statement, String language, long limit, long offset, HashMap<String, Value> bindVariableMap) throws RepositoryException { try { Map<String, PropertyValue> bindMap = convertMap(bindVariableMap); TimerStats.Context context = queryDuration.time(); Result r = queryEngine.executeQuery( statement, language, limit, offset, bindMap, sessionContext.getSessionLocalMappings()); queryCount.mark(); long millis = TimeUnit.NANOSECONDS.toMillis(context.stop()); queryOpsLogger.debug("Executed query [{}] in [{}] ms", statement, millis); sessionContext.getStatisticManager() .logQueryEvaluationTime(language, statement, millis); return new QueryResultImpl(sessionContext, r); } catch (IllegalArgumentException e) { throw new InvalidQueryException(e); } catch (ParseException e) { throw new InvalidQueryException(e); } }
private V asyncReadIfPresent(K key) { TimerStats.Context ctx = stats.startReadTimer(); try { MultiGenerationMap.ValueWithGenerationInfo<V> v = map.readValue(key); if (v == null) { return null; } if (v.isCurrentGeneration() && !cache.needSwitch()) { // don't persist again on eviction memCacheMetadata.putFromPersistenceAndIncrement(key); } else { // persist again during eviction memCacheMetadata.increment(key); } return v.getValue(); } finally { ctx.stop(); } }
@Override public V get(K key, Callable<? extends V> valueLoader) throws ExecutionException { // Get stats covered in getIfPresent V value = getIfPresent(key); if (value != null) { return value; } // Track entry load time TimerStats.Context ctx = stats.startLoaderTimer(); try { memCacheMetadata.increment(key); value = memCache.get(key, valueLoader); ctx.stop(); if (!async) { write((K) key, value); } broadcast(key, value); return value; } catch (ExecutionException e) { stats.markException(); throw e; } }
public QueryResult executeQuery(String statement, String language, long limit, long offset, HashMap<String, Value> bindVariableMap) throws RepositoryException { try { Map<String, PropertyValue> bindMap = convertMap(bindVariableMap); TimerStats.Context context = queryDuration.time(); Result r = queryEngine.executeQuery( statement, language, limit, offset, bindMap, sessionContext.getSessionLocalMappings()); queryCount.mark(); long millis = TimeUnit.NANOSECONDS.toMillis(context.stop()); queryOpsLogger.debug("Executed query [{}] in [{}] ms", statement, millis); sessionContext.getStatisticManager() .logQueryEvaluationTime(language, statement, millis); return new QueryResultImpl(sessionContext, r); } catch (IllegalArgumentException e) { throw new InvalidQueryException(e); } catch (ParseException e) { throw new InvalidQueryException(e); } }
public QueryResult executeQuery(String statement, String language, long limit, long offset, HashMap<String, Value> bindVariableMap) throws RepositoryException { try { Map<String, PropertyValue> bindMap = convertMap(bindVariableMap); TimerStats.Context context = queryDuration.time(); Result r = queryEngine.executeQuery( statement, language, limit, offset, bindMap, sessionContext.getSessionLocalMappings()); queryCount.mark(); long millis = TimeUnit.NANOSECONDS.toMillis(context.stop()); queryOpsLogger.debug("Executed query [{}] in [{}] ms", statement, millis); sessionContext.getStatisticManager() .logQueryEvaluationTime(language, statement, millis); return new QueryResultImpl(sessionContext, r); } catch (IllegalArgumentException e) { throw new InvalidQueryException(e); } catch (ParseException e) { throw new InvalidQueryException(e); } }
@Override public Integer call() throws Exception { try { final TimerStats.Context uploadContext = cacheStats.startUpLoaderTimer(); uploader.write(id, upload); LOG.debug("File added to backend [{}]", upload); uploadContext.stop(); return 1; } catch (Exception e) { LOG.error("Error adding file to backend", e); throw e; } } });
@Override public Integer call() throws Exception { try { final TimerStats.Context uploadContext = cacheStats.startUpLoaderTimer(); uploader.write(id, upload); LOG.debug("File added to backend [{}]", upload); uploadContext.stop(); return 1; } catch (Exception e) { LOG.error("Error adding file to backend", e); throw e; } } });
@Test public void timerContext() throws Exception{ AtomicLong counter = new AtomicLong(); VirtualClock clock = new VirtualClock(); Timer time = new Timer(new ExponentiallyDecayingReservoir(), clock); TimerStats timerStats = new CompositeStats(counter, time); TimerStats.Context context = timerStats.time(); clock.tick = TimeUnit.SECONDS.toNanos(314); context.close(); assertEquals(1, time.getCount()); assertEquals(TimeUnit.SECONDS.toMillis(314), counter.get()); }
private V syncReadIfPresent(K key) { cache.switchGenerationIfNeeded(); TimerStats.Context ctx = stats.startReadTimer(); V v = map.get(key); ctx.stop(); if (v != null) { memCacheMetadata.putFromPersistenceAndIncrement(key); } return v; }