protected void configureFileBackedBuffer(final MeterConfig config) { // Persistent buffer for in-memory samples try { final boolean deleteFilesOnClose = config.getShutdownSaveMode().equals("save_all_timelines"); final FileBackedBuffer fileBackedBuffer = new FileBackedBuffer(config.getSpoolDir(), "TimelineEventHandler", deleteFilesOnClose, config.getSegmentsSize(), config.getMaxNbSegments()); bind(FileBackedBuffer.class).toInstance(fileBackedBuffer); } catch (IOException e) { throw new RuntimeException(e); } }
public void runAggregationThread() { aggregatorThread.scheduleWithFixedDelay(new Runnable() { @Override public void run() { getAndProcessTimelineAggregationCandidates(); } }, config.getAggregationInterval().getMillis(), config.getAggregationInterval().getMillis(), TimeUnit.MILLISECONDS); }
private void maybePerformBackgroundWrites() { // If already running background writes, just return maybePerformBackgroundWritesCount.incrementAndGet(); if (!doingWritesNow.compareAndSet(false, true)) { return; } else { try { if (shuttingDown.get()) { performBackgroundWrites(); } final int pendingCount = pendingChunkCount.get(); if (pendingCount > 0) { if (pendingCount >= config.getBackgroundWriteBatchSize() || new DateTime().isBefore(lastWriteTime.plusMillis((int) config.getBackgroundWriteMaxDelay().getMillis()))) { performBackgroundWrites(); lastWriteTime = new DateTime(); } } } finally { doingWritesNow.set(false); } } }
private synchronized void purgeFilesAndAccumulators(final DateTime purgeAccumulatorsIfBefore, final DateTime purgeFilesIfBefore) { purgeOldSourcesAndAccumulators(purgeAccumulatorsIfBefore); final Replayer replayer = new Replayer(config.getSpoolDir()); replayer.purgeOldFiles(purgeFilesIfBefore); }
final String[] chunkCountsToAggregate = config.getChunksToAggregate().split(","); for (int aggregationLevel = 0; aggregationLevel < config.getMaxAggregationLevel(); aggregationLevel++) { final long startingAggregatesCreated = aggregatesCreated.get(); final Map<String, Long> initialCounters = captureAggregatorCounters();
private void performWrites() { final InternalCallContext context = createCallContext(); // This is the atomic operation: bulk insert the new aggregated TimelineChunk objects, and delete // or invalidate the ones that were aggregated. This should be very fast. final long startWriteTime = System.currentTimeMillis(); aggregatorSqlDao.begin(); timelineDao.bulkInsertTimelineChunks(chunksToWrite, context); if (config.getDeleteAggregatedChunks()) { aggregatorSqlDao.deleteTimelineChunks(chunkIdsToInvalidateOrDelete, context); } else { aggregatorSqlDao.makeTimelineChunksInvalid(chunkIdsToInvalidateOrDelete, context); } aggregatorSqlDao.commit(); msWritingDb.addAndGet(System.currentTimeMillis() - startWriteTime); timelineChunksWritten.addAndGet(chunksToWrite.size()); timelineChunksInvalidatedOrDeleted.addAndGet(chunkIdsToInvalidateOrDelete.size()); chunksToWrite.clear(); chunkIdsToInvalidateOrDelete.clear(); final long sleepMs = config.getAggregationSleepBetweenBatches().getMillis(); if (sleepMs > 0) { final long timeBeforeSleep = System.currentTimeMillis(); try { Thread.sleep(sleepMs); } catch (InterruptedException e) { Thread.currentThread().interrupt(); } msSpentSleeping.addAndGet(System.currentTimeMillis() - timeBeforeSleep); } timelineChunkBatchesProcessed.incrementAndGet(); }
public void runBackgroundWriteThread() { if (!performForegroundWrites) { backgroundWriteThread.scheduleWithFixedDelay(new Runnable() { @Override public void run() { maybePerformBackgroundWrites(); } }, config.getBackgroundWriteCheckInterval().getMillis(), config.getBackgroundWriteCheckInterval().getMillis(), TimeUnit.MILLISECONDS); } }
timelineChunksQueuedForCreation.incrementAndGet(); if (chunkIdsToInvalidateOrDelete.size() >= config.getMaxChunkIdsToInvalidateOrDelete()) { performWrites();
@BeforeMethod(groups = "fast") public void setUp() throws Exception { Assert.assertTrue(basePath.mkdir()); System.setProperty("killbill.usage.timelines.spoolDir", basePath.getAbsolutePath()); System.setProperty("killbill.usage.timelines.length", "60s"); final MeterConfig config = new ConfigurationObjectFactory(System.getProperties()).build(MeterConfig.class); timelineEventHandler = new TimelineEventHandler(config, dao, timelineCoder, sampleCoder, new BackgroundDBChunkWriter(dao, config, internalCallContextFactory), new FileBackedBuffer(config.getSpoolDir(), "TimelineEventHandler", 1024 * 1024, 10)); dao.getOrAddSource(HOST_UUID.toString(), internalCallContext); }
@BeforeMethod(alwaysRun = true) public void setUp() throws Exception { Assert.assertTrue(basePath.mkdir()); System.setProperty("killbill.usage.timelines.spoolDir", basePath.getAbsolutePath()); final MeterConfig config = new ConfigurationObjectFactory(System.getProperties()).build(MeterConfig.class); timelineEventHandler = new TimelineEventHandler(config, dao, timelineCoder, sampleCoder, new BackgroundDBChunkWriter(dao, config, internalCallContextFactory), new FileBackedBuffer(config.getSpoolDir(), "TimelineEventHandler", 1024 * 1024, 10)); dao.getOrAddSource(HOST_UUID.toString(), internalCallContext); eventTypeId = dao.getOrAddEventCategory(EVENT_TYPE, internalCallContext); }