public void free(final ByteBuf buffer) { int size = buffer.capacity(); bytesDeallocatedAdder.add(size); maxAllocationSizeAcc.accumulate(currentAllocationSize.longValue()); currentAllocationSize.addAndGet(-size); buffersDeallocatedAdder.increment(); impl.free(buffer); }
public long getLatestQueryTime() { return _latestQueryTime.get(); }
@Before public void setup() { maxRingBufferPending = new LongAccumulator(Long::max, Long.MIN_VALUE); droppedCount = new AtomicLong(0); producerExecutor = Executors.newSingleThreadExecutor(); }
/** * Resets the max and total to the current value. */ public void reset() { _total.reset(); _max.reset(); long current = _current.get(); _total.add(current); _max.accumulate(current); }
private static void testAccumulate() { LongBinaryOperator op = (x, y) -> 2 * x + y; LongAccumulator accumulator = new LongAccumulator(op, 1L); ExecutorService executor = Executors.newFixedThreadPool(2); IntStream.range(0, 10) .forEach(i -> executor.submit(() -> accumulator.accumulate(i))); ConcurrentUtils.stop(executor); System.out.format("Add: %d\n", accumulator.getThenReset()); } }
@Override protected void encode(ChannelHandlerContext channelHandlerContext, CorfuMsg corfuMsg, ByteBuf byteBuf) throws Exception { try { corfuMsg.serialize(byteBuf); if(log.isDebugEnabled()) { long prev = maxValue.get(); maxValue.accumulate(byteBuf.readableBytes()); long curr = maxValue.get(); // The max value has been updated. if (prev < curr) { log.debug("encode: New max write buffer found {}", curr); } } } catch (Exception e) { log.error("encode: Error during serialization!", e); } } }
/** * Return the current statistics resetting the internal values if reset is true. */ private DTimeMetricStats getStatistics(boolean reset) { if (reset) { // Note these values are not guaranteed to be consistent wrt each other // but should be reasonably consistent (small time between count and total) final long beans = beanCount.sumThenReset(); final long maxVal = max.getThenReset(); final long totalVal = total.sumThenReset(); final long countVal = count.sumThenReset(); final long startTimeVal = startTime.getAndSet(System.currentTimeMillis()); return new DTimeMetricStats(metricType, name, startTimeVal, countVal, totalVal, maxVal, beans); } else { return new DTimeMetricStats(metricType, name, startTime.get(), count.sum(), total.sum(), max.get(), beanCount.sum()); } }
/** * Copy the mappings stored in a {@link LocatableResolver} to the specified replica. * * @param replica The {@link LocatableResolver} to copy to. * @return A future that will complete when the copy is finished. */ public CompletableFuture<Void> copyToAsync(@Nonnull final LocatableResolver replica) { if (!replica.getDatabase().equals(runner.getDatabase())) { throw new IllegalArgumentException("copy must be within same database"); } final LongAccumulator maxAccumulator = new LongAccumulator(Long::max, 0L); final AtomicInteger counter = new AtomicInteger(); return copyInternal(replica, maxAccumulator, counter) .thenCompose(ignore -> replica.setWindow(maxAccumulator.get())); }
/** * Resets the statistics. */ public void reset() { _max.reset(); _total.set(0); _count.set(0); _totalVariance100.reset(); }
private final long num = SimpleReservoir.this.num.sumThenReset(); private final long sum = SimpleReservoir.this.sum.sumThenReset(); private final long max = SimpleReservoir.this.max.getThenReset(); private final long min = SimpleReservoir.this.min.getThenReset();
/** * Resets the max, total and current value to the given parameter. * * @param value the new current value */ public void reset(final long value) { _current.set(value); _total.reset(); _max.reset(); if (value > 0) { _total.add(value); _max.accumulate(value); } }
private static void testAccumulate() { LongBinaryOperator op = (x, y) -> 2 * x + y; LongAccumulator accumulator = new LongAccumulator(op, 1L); ExecutorService executor = Executors.newFixedThreadPool(2); IntStream.range(0, 10) .forEach(i -> executor.submit(() -> accumulator.accumulate(i))); ConcurrentUtils.stop(executor); System.out.format("Add: %d\n", accumulator.getThenReset()); } }
/** * Return the current statistics reseting the internal values if reset is true. */ public ValueStatistics getStatistics(boolean reset) { if (reset) { // Note these values are not guaranteed to be consistent wrt each other // but should be reasonably consistent (small time between count and total) final long startTimeVal = startTime.getAndSet(System.currentTimeMillis()); final long countVal = count.sumThenReset(); final long totalVal = total.sumThenReset(); final long maxVal = max.getThenReset(); return new DefaultValueStatistics(startTimeVal, countVal, totalVal, maxVal); } else { return new DefaultValueStatistics(startTime.get(), count.sum(), total.sum(), max.get()); } }
/** * Reset all the internal counters and start time. */ @Override public void reset() { startTime.set(System.currentTimeMillis()); max.reset(); count.reset(); total.reset(); beanCount.reset(); }
private Object complicatedCalculation(Object value) { maxRingBufferPending.accumulate(processor.getPending()); sleep(CONSUMER_LATENCY); return value; }
public long getLatestQueryTime() { return latestQueryTime.get(); } }
/** * Resets the max and total to the current value. */ public void reset() { _total.reset(); _max.reset(); long current = _current.get(); _total.add(current); _max.accumulate(current); }
/** * Return a Snapshot of the query execution statistics potentially resetting the internal counters. */ Snapshot getSnapshot(boolean reset) { List<MetaQueryPlanOriginCount> origins = getOrigins(reset); // not guaranteed to be consistent due to time gaps between getting each value out of LongAdders but can live with that // relative to the cost of making sure count and totalTime etc are all guaranteed to be consistent if (reset) { return new Snapshot(queryPlan, count.sumThenReset(), totalTime.sumThenReset(), totalBeans.sumThenReset(), maxTime.getThenReset(), startTime.getAndSet(System.currentTimeMillis()), lastQueryTime, origins); } return new Snapshot(queryPlan, count.sum(), totalTime.sum(), totalBeans.sum(), maxTime.get(), startTime.get(), lastQueryTime, origins); }
public void init(@Nonnull ServerConf serverConf, @Nonnull ZkHelixPropertyStore<ZNRecord> propertyStore) throws Exception { LOGGER.info("Initializing server instance"); _serverConf = serverConf; ServerBuilder serverBuilder = new ServerBuilder(_serverConf, propertyStore); _serverMetrics = serverBuilder.getServerMetrics(); _instanceDataManager = serverBuilder.buildInstanceDataManager(); _queryExecutor = serverBuilder.buildQueryExecutor(_instanceDataManager); _latestQueryTime = new LongAccumulator(Long::max, 0); _queryScheduler = serverBuilder.buildQueryScheduler(_queryExecutor, _latestQueryTime); _requestHandler = new ScheduledRequestHandler(_queryScheduler, _serverMetrics); _nettyServer = serverBuilder.buildNettyServer(new RequestHandlerFactory() { @Override public NettyServer.RequestHandler createNewRequestHandler() { return _requestHandler; } }); LOGGER.info("Finish initializing server instance"); }
public void resetMaxUsedCount() { maxUsed.reset(); }