public DruidRecordWriter(DataSchema dataSchema, RealtimeTuningConfig realtimeTuningConfig, DataSegmentPusher dataSegmentPusher, int maxPartitionSize, final Path segmentsDescriptorsDir, final FileSystem fileSystem) { File basePersistDir = new File(realtimeTuningConfig.getBasePersistDirectory(), UUID.randomUUID().toString()); this.tuningConfig = Preconditions.checkNotNull(realtimeTuningConfig.withBasePersistDirectory(basePersistDir), "realtimeTuningConfig is null"); this.dataSchema = Preconditions.checkNotNull(dataSchema, "data schema is null"); appenderator = Appenderators.createOffline(this.dataSchema, tuningConfig, new FireDepartmentMetrics(), dataSegmentPusher, DruidStorageHandlerUtils.JSON_MAPPER, DruidStorageHandlerUtils.INDEX_IO, DruidStorageHandlerUtils.INDEX_MERGER_V9); this.maxPartitionSize = maxPartitionSize; appenderator.startJob(); this.segmentsDescriptorDir = Preconditions.checkNotNull(segmentsDescriptorsDir, "segmentsDescriptorsDir is null"); this.fileSystem = Preconditions.checkNotNull(fileSystem, "file system is null"); this.segmentGranularity = this.dataSchema.getGranularitySpec().getSegmentGranularity(); committerSupplier = Suppliers.ofInstance(Committers.nil())::get; }
null, new File(basePersistDirectory, dataSource), new CustomVersioningPolicy(version), null, null,
new SegmentIdentifier(dataSchema.getDataSource(), interval, tuningConfig.getVersioningPolicy().getVersion(interval), new LinearShardSpec(0)); return currentOpenSegment; new SegmentIdentifier(dataSchema.getDataSource(), interval, tuningConfig.getVersioningPolicy().getVersion(interval), new LinearShardSpec(currentOpenSegment.getShardSpec().getPartitionNum() + 1)); pushSegments(Lists.newArrayList(currentOpenSegment)); new SegmentIdentifier(dataSchema.getDataSource(), interval, tuningConfig.getVersioningPolicy().getVersion(interval), new LinearShardSpec(0)); pushSegments(Lists.newArrayList(currentOpenSegment));
@Override public Object startJob() { computeBaseDir(schema).mkdirs(); initializeExecutors(); handoffNotifier.start(); Object retVal = bootstrapSinksFromDisk(); startPersistThread(); // Push pending sinks bootstrapped from previous run mergeAndPush(); resetNextFlush(); return retVal; }
@Override public ScheduledExecutors.Signal call() throws Exception { log.info("Abandoning segment %s", sink.getSegment().getIdentifier()); abandonSegment(truncatedTime, sink); return ScheduledExecutors.Signal.STOP; } }
@Override public void run() { abandonSegment(sink.getInterval().getStartMillis(), sink); metrics.incrementHandOffCount(); } }
private Sink getSink(long timestamp) { if (theSink.getInterval().contains(timestamp)) { return theSink; } else { return null; } }
private boolean runFirehose(Firehose firehose) { final Supplier<Committer> committerSupplier = Committers.supplierFromFirehose(firehose); while (firehose.hasMore()) { if (Thread.interrupted() || stopping) { return false; } Plumbers.addNextRow(committerSupplier, firehose, plumber, config.isReportParseExceptions(), metrics); } return true; }
@Override public String apply(Sink input) { return input.getSegment().getIdentifier(); } }
public Plumber findPlumber() { return plumberSchool.findPlumber(schema, metrics); }
/** * If currIndex is A, creates a new index B, sets currIndex to B and returns A. * * @return the current index after swapping in a new one */ public FireHydrant swap() { return makeNewCurrIndex(interval.getStartMillis(), schema); }
@Override public void run() { checkForSegmentHandoffs(); } }, 0L, pollDurationMillis, TimeUnit.MILLISECONDS
@Override public boolean accept(long timestamp) { long maxTimestamp = this.maxTimestamp; if (timestamp > maxTimestamp) { maxTimestamp = tryUpdateMaxTimestamp(timestamp); } return timestamp >= (maxTimestamp - windowMillis); }
@Override public SegmentHandoffNotifier createSegmentHandoffNotifier(String dataSource) { return new CoordinatorBasedSegmentHandoffNotifier( dataSource, client, config ); } }
public CoordinatorBasedSegmentHandoffNotifier( String dataSource, CoordinatorClient coordinatorClient, CoordinatorBasedSegmentHandoffNotifierConfig config ) { this.dataSource = dataSource; this.coordinatorClient = coordinatorClient; this.pollDurationMillis = config.getPollDuration().getMillis(); }
@Override public void close() { super.close(); handoffNotifier.close(); }
@Override public ScheduledExecutors.Signal call() throws Exception { log.info("Abandoning segment %s", sink.getSegment().getIdentifier()); abandonSegment(truncatedTime, sink); return ScheduledExecutors.Signal.STOP; } }
@Override public Sink getSink(long timestamp) { if (theSink.getInterval().contains(timestamp)) { return theSink; } else { return null; } }
new SegmentIdentifier(dataSchema.getDataSource(), interval, tuningConfig.getVersioningPolicy().getVersion(interval), new LinearShardSpec(partitionNumber)); new SegmentIdentifier(dataSchema.getDataSource(), interval, tuningConfig.getVersioningPolicy().getVersion(interval), new LinearShardSpec(partitionNumber));
/** * If currHydrant is A, creates a new index B, sets currHydrant to B and returns A. * * @return the current index after swapping in a new one */ public FireHydrant swap() { return makeNewCurrIndex(interval.getStartMillis(), schema); }