@LifecycleStart public void start() { serverAnnouncer.announce(); fireChiefExecutor = Execs.multiThreaded(fireDepartments.size(), "chief-%d"); for (final FireDepartment fireDepartment : fireDepartments) { final DataSchema schema = fireDepartment.getDataSchema(); final FireChief chief = new FireChief(fireDepartment, conglomerate); chiefs.computeIfAbsent(schema.getDataSource(), k -> new HashMap<>()) .put(fireDepartment.getTuningConfig().getShardSpec().getPartitionNum(), chief); fireChiefExecutor.submit(chief); } }
"%s-overseer-%d", schema.getDataSource(), config.getShardSpec().getPartitionNum() ); ThreadRenamingCallable<ScheduledExecutors.Signal> threadRenamingCallable =
"%s-overseer-%d", schema.getDataSource(), config.getShardSpec().getPartitionNum() ); ThreadRenamingCallable<ScheduledExecutors.Signal> threadRenamingCallable =
private static String makeTaskId(FireDepartment fireDepartment) { return makeTaskId( fireDepartment.getDataSchema().getDataSource(), fireDepartment.getTuningConfig().getShardSpec().getPartitionNum(), DateTimes.nowUtc(), makeRandomId() ); }
@Override public Appenderator build( final DataSchema schema, final RealtimeTuningConfig config, final FireDepartmentMetrics metrics ) { return Appenderators.createRealtime( schema, config.withBasePersistDirectory( makeBasePersistSubdirectory( config.getBasePersistDirectory(), schema.getDataSource(), config.getShardSpec() ) ), metrics, dataSegmentPusher, objectMapper, indexIO, indexMerger, conglomerate, segmentAnnouncer, emitter, queryExecutorService, cache, cacheConfig, cachePopulatorStats ); }
sinkInterval, versioningPolicy.getVersion(sinkInterval), config.getShardSpec() sinkInterval, schema, config.getShardSpec(), versioningPolicy.getVersion(sinkInterval), config.getMaxRowsInMemory(),
interval, schema, config.getShardSpec(), version, config.getMaxRowsInMemory(),
"%s-flusher-%d", getSchema().getDataSource(), getConfig().getShardSpec().getPartitionNum() ); ThreadRenamingCallable<ScheduledExecutors.Signal> threadRenamingCallable =
private SegmentIdWithShardSpec getSegmentIdentifier(long timestamp) { if (!rejectionPolicy.accept(timestamp)) { return null; } final Granularity segmentGranularity = schema.getGranularitySpec().getSegmentGranularity(); final VersioningPolicy versioningPolicy = config.getVersioningPolicy(); DateTime truncatedDateTime = segmentGranularity.bucketStart(DateTimes.utc(timestamp)); final long truncatedTime = truncatedDateTime.getMillis(); SegmentIdWithShardSpec retVal = segments.get(truncatedTime); if (retVal == null) { final Interval interval = new Interval( truncatedDateTime, segmentGranularity.increment(truncatedDateTime) ); retVal = new SegmentIdWithShardSpec( schema.getDataSource(), interval, versioningPolicy.getVersion(interval), config.getShardSpec() ); addSegment(retVal); } return retVal; }
private Sink getSink(long timestamp) { if (!rejectionPolicy.accept(timestamp)) { return null; } final Granularity segmentGranularity = schema.getGranularitySpec().getSegmentGranularity(); final VersioningPolicy versioningPolicy = config.getVersioningPolicy(); DateTime truncatedDateTime = segmentGranularity.bucketStart(DateTimes.utc(timestamp)); final long truncatedTime = truncatedDateTime.getMillis(); Sink retVal = sinks.get(truncatedTime); if (retVal == null) { final Interval sinkInterval = new Interval( truncatedDateTime, segmentGranularity.increment(truncatedDateTime) ); retVal = new Sink( sinkInterval, schema, config.getShardSpec(), versioningPolicy.getVersion(sinkInterval), config.getMaxRowsInMemory(), TuningConfigs.getMaxBytesInMemoryOrDefault(config.getMaxBytesInMemory()), config.isReportParseExceptions(), config.getDedupColumn() ); addSink(retVal); } return retVal; }
Intervals.utc(0, TimeUnit.HOURS.toMillis(1)), schema, tuningConfig.getShardSpec(), DateTimes.of("2014-12-01T12:34:56.789").toString(), tuningConfig.getMaxRowsInMemory(),
Intervals.utc(0, TimeUnit.HOURS.toMillis(1)), schema, tuningConfig.getShardSpec(), DateTimes.of("2014-12-01T12:34:56.789").toString(), tuningConfig.getMaxRowsInMemory(),
new SegmentDescriptor(sink.getInterval(), sink.getVersion(), config.getShardSpec().getPartitionNum()), mergeExecutor, new Runnable()
testInterval, schema2, tuningConfig.getShardSpec(), DateTimes.of("2014-12-01T12:34:56.789").toString(), tuningConfig.getMaxRowsInMemory(),
@Test public void testSerdeWithDefaults() throws Exception { String jsonStr = "{\"type\":\"realtime\"}"; ObjectMapper mapper = TestHelper.makeJsonMapper(); RealtimeTuningConfig config = (RealtimeTuningConfig) mapper.readValue( mapper.writeValueAsString( mapper.readValue( jsonStr, TuningConfig.class ) ), TuningConfig.class ); Assert.assertNotNull(config.getBasePersistDirectory()); Assert.assertEquals(0, config.getHandoffConditionTimeout()); Assert.assertEquals(0, config.getAlertTimeout()); Assert.assertEquals(new IndexSpec(), config.getIndexSpec()); Assert.assertEquals(new Period("PT10M"), config.getIntermediatePersistPeriod()); Assert.assertEquals(NoneShardSpec.instance(), config.getShardSpec()); Assert.assertEquals(0, config.getMaxPendingPersists()); Assert.assertEquals(1000000, config.getMaxRowsInMemory()); Assert.assertEquals(0, config.getMergeThreadPriority()); Assert.assertEquals(0, config.getPersistThreadPriority()); Assert.assertEquals(new Period("PT10M"), config.getWindowPeriod()); Assert.assertEquals(false, config.isReportParseExceptions()); }
Assert.assertEquals(new IndexSpec(), config.getIndexSpec()); Assert.assertEquals(new Period("PT1H"), config.getIntermediatePersistPeriod()); Assert.assertEquals(NoneShardSpec.instance(), config.getShardSpec()); Assert.assertEquals(100, config.getMaxPendingPersists()); Assert.assertEquals(100, config.getMaxRowsInMemory());
interval, schema, tuningConfig.getShardSpec(), version, tuningConfig.getMaxRowsInMemory(),
Intervals.of("0/P5000Y"), schema, tuningConfig.getShardSpec(), DateTimes.nowUtc().toString(), tuningConfig.getMaxRowsInMemory(), Intervals.of("0/P5000Y"), schema2, tuningConfig.getShardSpec(), DateTimes.nowUtc().toString(), tuningConfig.getMaxRowsInMemory(),
interval, schema, tuningConfig.getShardSpec(), version, tuningConfig.getMaxRowsInMemory(),
@LifecycleStart public void start() { serverAnnouncer.announce(); fireChiefExecutor = Execs.multiThreaded(fireDepartments.size(), "chief-%d"); for (final FireDepartment fireDepartment : fireDepartments) { final DataSchema schema = fireDepartment.getDataSchema(); final FireChief chief = new FireChief(fireDepartment, conglomerate); chiefs.computeIfAbsent(schema.getDataSource(), k -> new HashMap<>()) .put(fireDepartment.getTuningConfig().getShardSpec().getPartitionNum(), chief); fireChiefExecutor.submit(chief); } }