private File computeCommitFile() { return new File(tuningConfig.getBasePersistDirectory(), "commit.json"); }
private void resetNextFlush() { nextFlush = DateTimes.nowUtc().plus(tuningConfig.getIntermediatePersistPeriod()).getMillis(); }
cachePopulatorStats ); maxBytesTuningConfig = TuningConfigs.getMaxBytesInMemoryOrDefault(tuningConfig.getMaxBytesInMemory()); log.info("Created Appenderator for dataSource[%s].", schema.getDataSource());
public boolean isPushRequired(AppenderatorConfig tuningConfig) { return isPushRequired(tuningConfig.getMaxRowsPerSegment(), tuningConfig.getMaxTotalRows()); }
final File baseDir = tuningConfig.getBasePersistDirectory(); if (!baseDir.exists()) { return null; identifier.getShardSpec(), identifier.getVersion(), tuningConfig.getMaxRowsInMemory(), maxBytesTuningConfig, tuningConfig.isReportParseExceptions(), null, hydrants
final IndexSpec indexSpec = tuningConfig.getIndexSpec(); persistedFile = indexMerger.persist( indexToPersist.getIndex(), new File(persistDir, String.valueOf(indexToPersist.getCount())), indexSpec, tuningConfig.getSegmentWriteOutMediumFactory() );
private Sink getOrCreateSink(final SegmentIdWithShardSpec identifier) { Sink retVal = sinks.get(identifier); if (retVal == null) { retVal = new Sink( identifier.getInterval(), schema, identifier.getShardSpec(), identifier.getVersion(), tuningConfig.getMaxRowsInMemory(), maxBytesTuningConfig, tuningConfig.isReportParseExceptions(), null ); try { segmentAnnouncer.announceSegment(retVal.getSegment()); } catch (IOException e) { log.makeAlert(e, "Failed to announce new segment[%s]", schema.getDataSource()) .addData("interval", retVal.getInterval()) .emit(); } sinks.put(identifier, retVal); metrics.setSinkCount(sinks.size()); sinkTimeline.add(retVal.getInterval(), retVal.getVersion(), identifier.getShardSpec().createChunk(retVal)); } return retVal; }
private void initializeExecutors() { final int maxPendingPersists = tuningConfig.getMaxPendingPersists(); if (persistExecutor == null) { // use a blocking single threaded executor to throttle the firehose when write to disk is slow persistExecutor = MoreExecutors.listeningDecorator( Execs.newBlockingSingleThreaded( "appenderator_persist_%d", maxPendingPersists ) ); } if (pushExecutor == null) { // use a blocking single threaded executor to throttle the firehose when write to disk is slow pushExecutor = MoreExecutors.listeningDecorator( Execs.newBlockingSingleThreaded( "appenderator_merge_%d", 1 ) ); } if (intermediateTempExecutor == null) { // use single threaded executor with SynchronousQueue so that all abandon operations occur sequentially intermediateTempExecutor = MoreExecutors.listeningDecorator( Execs.newBlockingSingleThreaded( "appenderator_abandon_%d", 0 ) ); } }
)); if (rowsCurrentlyInMemory.get() >= tuningConfig.getMaxRowsInMemory()) { persist = true; persistReasons.add(StringUtils.format( "rowsCurrentlyInMemory[%d] is greater than maxRowsInMemory[%d]", rowsCurrentlyInMemory.get(), tuningConfig.getMaxRowsInMemory() ));
final File baseDir = tuningConfig.getBasePersistDirectory(); if (!baseDir.exists()) { return null; identifier.getShardSpec(), identifier.getVersion(), tuningConfig.getMaxRowsInMemory(), maxBytesTuningConfig, tuningConfig.isReportParseExceptions(), null, hydrants
schema.getAggregators(), mergedTarget, tuningConfig.getIndexSpec(), tuningConfig.getSegmentWriteOutMediumFactory() );
public boolean isPushRequired(AppenderatorConfig tuningConfig) { boolean overThreshold = getNumRowsInSegment() >= tuningConfig.getMaxRowsPerSegment(); Long maxTotal = tuningConfig.getMaxTotalRows(); if (maxTotal != null) { overThreshold |= getTotalNumRowsInAppenderator() >= maxTotal; } return overThreshold; }
private Sink getOrCreateSink(final SegmentIdentifier identifier) { Sink retVal = sinks.get(identifier); if (retVal == null) { retVal = new Sink( identifier.getInterval(), schema, identifier.getShardSpec(), identifier.getVersion(), tuningConfig.getMaxRowsInMemory(), maxBytesTuningConfig, tuningConfig.isReportParseExceptions(), null ); try { segmentAnnouncer.announceSegment(retVal.getSegment()); } catch (IOException e) { log.makeAlert(e, "Failed to announce new segment[%s]", schema.getDataSource()) .addData("interval", retVal.getInterval()) .emit(); } sinks.put(identifier, retVal); metrics.setSinkCount(sinks.size()); sinkTimeline.add(retVal.getInterval(), retVal.getVersion(), identifier.getShardSpec().createChunk(retVal)); } return retVal; }
private void initializeExecutors() { final int maxPendingPersists = tuningConfig.getMaxPendingPersists(); if (persistExecutor == null) { // use a blocking single threaded executor to throttle the firehose when write to disk is slow persistExecutor = MoreExecutors.listeningDecorator( Execs.newBlockingSingleThreaded( "appenderator_persist_%d", maxPendingPersists ) ); } if (pushExecutor == null) { // use a blocking single threaded executor to throttle the firehose when write to disk is slow pushExecutor = MoreExecutors.listeningDecorator( Execs.newBlockingSingleThreaded( "appenderator_merge_%d", 1 ) ); } if (intermediateTempExecutor == null) { // use single threaded executor with SynchronousQueue so that all abandon operations occur sequentially intermediateTempExecutor = MoreExecutors.listeningDecorator( Execs.newBlockingSingleThreaded( "appenderator_abandon_%d", 0 ) ); } }
)); if (rowsCurrentlyInMemory.get() >= tuningConfig.getMaxRowsInMemory()) { persist = true; persistReasons.add(StringUtils.format( "rowsCurrentlyInMemory[%d] is greater than maxRowsInMemory[%d]", rowsCurrentlyInMemory.get(), tuningConfig.getMaxRowsInMemory() ));
final IndexSpec indexSpec = tuningConfig.getIndexSpec(); persistedFile = indexMerger.persist( indexToPersist.getIndex(), new File(persistDir, String.valueOf(indexToPersist.getCount())), indexSpec, tuningConfig.getSegmentWriteOutMediumFactory() );
private File computeLockFile() { return new File(tuningConfig.getBasePersistDirectory(), ".lock"); }
private void resetNextFlush() { nextFlush = DateTimes.nowUtc().plus(tuningConfig.getIntermediatePersistPeriod()).getMillis(); }
cachePopulatorStats ); maxBytesTuningConfig = TuningConfigs.getMaxBytesInMemoryOrDefault(tuningConfig.getMaxBytesInMemory()); log.info("Created Appenderator for dataSource[%s].", schema.getDataSource());
schema.getAggregators(), mergedTarget, tuningConfig.getIndexSpec(), tuningConfig.getSegmentWriteOutMediumFactory() );