@Override public Object startJob() { computeBaseDir(schema).mkdirs(); initializeExecutors(); handoffNotifier.start(); Object retVal = bootstrapSinksFromDisk(); startPersistThread(); // Push pending sinks bootstrapped from previous run mergeAndPush(); resetNextFlush(); return retVal; }
persistAndMerge(entry.getKey(), entry.getValue()); shutdownExecutors();
@Override public void doRun() { for (Pair<FireHydrant, Interval> pair : indexesToPersist) { metrics.incrementRowOutputCount(persistHydrant(pair.lhs, schema, pair.rhs)); } commitRunnable.run(); } }
@Override public void startJob() { computeBaseDir(schema).mkdirs(); initializeExecutors(); bootstrapSinksFromDisk(); registerServerViewCallback(); startPersistThread(); }
if (!hydrant.hasSwapped()) { log.info("Hydrant[%s] hasn't swapped yet, swapping. Sink[%s]", hydrant, sink); final int rowCount = persistHydrant(hydrant, schema, interval); metrics.incrementRowOutputCount(rowCount); final File mergedTarget = new File(computePersistDir(schema, interval), "merged"); if (mergedTarget.exists()) { log.info("Skipping already-merged sink: %s", sink); if (shuttingDown) { abandonSegment(truncatedTime, sink);
File baseDir = computeBaseDir(schema); if (baseDir == null || !baseDir.exists()) { return null; File corruptSegmentDir = computeCorruptedFileDumpDir(segmentDir, schema); log.info("Renaming %s to %s", segmentDir.getAbsolutePath(), corruptSegmentDir.getAbsolutePath()); FileUtils.copyDirectory(segmentDir, corruptSegmentDir); hydrants ); addSink(currSink);
protected File computePersistDir(Schema schema, Interval interval) { return new File(computeBaseDir(schema), interval.toString().replace("/", "_")); }
/** * Unannounces a given sink and removes all local references to it. */ protected void abandonSegment(final long truncatedTime, final Sink sink) { try { segmentAnnouncer.unannounceSegment(sink.getSegment()); FileUtils.deleteDirectory(computePersistDir(schema, sink.getInterval())); log.info("Removing sinkKey %d for segment %s", truncatedTime, sink.getSegment().getIdentifier()); sinks.remove(truncatedTime); sinkTimeline.remove( sink.getInterval(), sink.getVersion(), new SingleElementPartitionChunk<>(sink) ); synchronized (handoffCondition) { handoffCondition.notifyAll(); } } catch (IOException e) { log.makeAlert(e, "Unable to abandon old segment for dataSource[%s]", schema.getDataSource()) .addData("interval", sink.getInterval()) .emit(); } }
try { segmentAnnouncer.unannounceSegment(sink.getSegment()); removeSegment(sink, computePersistDir(schema, sink.getInterval())); log.info("Removing sinkKey %d for segment %s", truncatedTime, sink.getSegment().getIdentifier()); sinks.remove(truncatedTime);
@Override public void run() { abandonSegment(sink.getInterval().getStartMillis(), sink); metrics.incrementHandOffCount(); } }
@Override public Plumber findPlumber( final DataSchema schema, final RealtimeTuningConfig config, final FireDepartmentMetrics metrics ) { verifyState(); return new RealtimePlumber( schema, config, metrics, emitter, conglomerate, segmentAnnouncer, queryExecutorService, dataSegmentPusher, segmentPublisher, handoffNotifierFactory.createSegmentHandoffNotifier(schema.getDataSource()), indexMergerV9, indexIO, cache, cacheConfig, objectMapper ); }
persistAndMerge(entry.getKey(), entry.getValue());
final File persistDir = computePersistDir(schema, interval); final File mergedTarget = new File(persistDir, "merged"); final File isPushedMarker = new File(persistDir, "isPushedMarker"); if (!hydrant.hasSwapped()) { log.info("Hydrant[%s] hasn't swapped yet, swapping. Sink[%s]", hydrant, sink); final int rowCount = persistHydrant(hydrant, schema, interval, null); metrics.incrementRowOutputCount(rowCount); abandonSegment(truncatedTime, sink);
protected File computePersistDir(DataSchema schema, Interval interval) { return new File(computeBaseDir(schema), interval.toString().replace("/", "_")); }
new File(computePersistDir(schema, interval), String.valueOf(indexToPersist.getCount())) );
if (segmentVersion.compareTo(sinkVersion) >= 0) { log.info("Segment version[%s] >= sink version[%s]", segmentVersion, sinkVersion); abandonSegment(sinkKey, sink);
@Override public Plumber findPlumber(final Schema schema, final FireDepartmentMetrics metrics) { verifyState(); final RejectionPolicy rejectionPolicy = rejectionPolicyFactory.create(windowPeriod); log.info("Creating plumber using rejectionPolicy[%s]", rejectionPolicy); return new RealtimePlumber( windowPeriod, basePersistDirectory, segmentGranularity, schema, metrics, rejectionPolicy, emitter, conglomerate, segmentAnnouncer, queryExecutorService, versioningPolicy, dataSegmentPusher, segmentPublisher, serverView, maxPendingPersists ); }
persistAndMerge(entry.getKey(), entry.getValue()); shutdownExecutors();
protected void bootstrapSinksFromDisk() File baseDir = computeBaseDir(schema); if (baseDir == null || !baseDir.exists()) { return;
indexToPersist.getIndex(), interval, new File(computePersistDir(schema, interval), String.valueOf(indexToPersist.getCount())), indexSpec, config.getSegmentWriteOutMediumFactory()