public DateTime bucketEnd(DateTime time) { return increment(bucketStart(time)); }
@Override public Interval next() { if (!hasNext()) { throw new NoSuchElementException("There are no more intervals"); } Interval retVal = new Interval(currStart, currEnd); currStart = currEnd; currEnd = increment(currStart); return retVal; }
/** * Return a granularity-sized Interval containing a particular DateTime. */ public final Interval bucket(DateTime t) { DateTime start = bucketStart(t); return new Interval(start, increment(start)); }
private IntervalIterator(Interval inputInterval) { this.inputInterval = inputInterval; currStart = bucketStart(inputInterval.getStart()); currEnd = increment(currStart); }
new Duration( System.currentTimeMillis(), segmentGranularity.increment(truncatedNow).getMillis() + windowMillis Duration initialDelay = new Duration( System.currentTimeMillis(), segmentGranularity.increment(truncatedNow).getMillis() + windowMillis ); Duration rate = new Duration(truncatedNow, segmentGranularity.increment(truncatedNow)); ScheduledExecutors.scheduleAtFixedRate(scheduledExecutor, initialDelay, rate, threadRenamingCallable);
new Duration( System.currentTimeMillis(), segmentGranularity.increment(truncatedNow).getMillis() + windowMillis Duration initialDelay = new Duration( System.currentTimeMillis(), segmentGranularity.increment(truncatedNow).getMillis() + windowMillis ); Duration rate = new Duration(truncatedNow, segmentGranularity.increment(truncatedNow)); ScheduledExecutors.scheduleAtFixedRate(scheduledExecutor, initialDelay, rate, threadRenamingCallable);
for (FileStatus fileStatus : FSSpideringIterator.spiderIterable(fs, new Path(bucketOutput))) { if (fileStatus.getModificationTime() > mTime) { bucketsToRun.add(new Interval(timeBucket, segmentGranularity.increment(timeBucket))); break;
public Interval getInterval() { DateTime min = DateTimes.utc(minTimestamp); return new Interval(min, isEmpty() ? min : gran.increment(DateTimes.utc(getMaxTimeMillis()))); }
new Duration( System.currentTimeMillis(), schema.getGranularitySpec().getSegmentGranularity().increment(truncatedNow).getMillis() + windowMillis Duration initialDelay = new Duration( System.currentTimeMillis(), schema.getGranularitySpec().getSegmentGranularity().increment(truncatedNow).getMillis() + windowMillis ); Duration rate = new Duration(truncatedNow, segmentGranularity.increment(truncatedNow)); ScheduledExecutors.scheduleAtFixedRate(flushScheduledExec, initialDelay, rate, threadRenamingCallable);
IncrementalIndexCursor( VirtualColumns virtualColumns, boolean descending, Filter filter, Interval interval, Interval actualInterval, Granularity gran ) { currEntry = new IncrementalIndexRowHolder(); columnSelectorFactory = new IncrementalIndexColumnSelectorFactory(index, virtualColumns, descending, currEntry); // Set maxRowIndex before creating the filterMatcher. See https://github.com/apache/incubator-druid/pull/6340 maxRowIndex = index.getLastRowIndex(); filterMatcher = filter == null ? BooleanValueMatcher.of(true) : filter.makeMatcher(columnSelectorFactory); numAdvanced = -1; final long timeStart = Math.max(interval.getStartMillis(), actualInterval.getStartMillis()); cursorIterable = index.getFacts().timeRangeIterable( descending, timeStart, Math.min(actualInterval.getEndMillis(), gran.increment(interval.getStart()).getMillis()) ); emptyRange = !cursorIterable.iterator().hasNext(); time = gran.toDateTime(interval.getStartMillis()); reset(); }
private SegmentIdWithShardSpec getSegmentIdentifier(long timestamp) { if (!rejectionPolicy.accept(timestamp)) { return null; } final Granularity segmentGranularity = schema.getGranularitySpec().getSegmentGranularity(); final VersioningPolicy versioningPolicy = config.getVersioningPolicy(); DateTime truncatedDateTime = segmentGranularity.bucketStart(DateTimes.utc(timestamp)); final long truncatedTime = truncatedDateTime.getMillis(); SegmentIdWithShardSpec retVal = segments.get(truncatedTime); if (retVal == null) { final Interval interval = new Interval( truncatedDateTime, segmentGranularity.increment(truncatedDateTime) ); retVal = new SegmentIdWithShardSpec( schema.getDataSource(), interval, versioningPolicy.getVersion(interval), config.getShardSpec() ); addSegment(retVal); } return retVal; }
private Sink getSink(long timestamp) { if (!rejectionPolicy.accept(timestamp)) { return null; } final Granularity segmentGranularity = schema.getGranularitySpec().getSegmentGranularity(); final VersioningPolicy versioningPolicy = config.getVersioningPolicy(); DateTime truncatedDateTime = segmentGranularity.bucketStart(DateTimes.utc(timestamp)); final long truncatedTime = truncatedDateTime.getMillis(); Sink retVal = sinks.get(truncatedTime); if (retVal == null) { final Interval sinkInterval = new Interval( truncatedDateTime, segmentGranularity.increment(truncatedDateTime) ); retVal = new Sink( sinkInterval, schema, config.getShardSpec(), versioningPolicy.getVersion(sinkInterval), config.getMaxRowsInMemory(), TuningConfigs.getMaxBytesInMemoryOrDefault(config.getMaxBytesInMemory()), config.isReportParseExceptions(), config.getDedupColumn() ); addSink(retVal); } return retVal; }
final long timeEnd = Math.min( interval.getEndMillis(), gran.increment(inputInterval.getStart()).getMillis() );
public DateTime bucketEnd(DateTime time) { return increment(bucketStart(time)); }
@Override public Interval next() { if (!hasNext()) { throw new NoSuchElementException("There are no more intervals"); } Interval retVal = new Interval(currStart, currEnd); currStart = currEnd; currEnd = increment(currStart); return retVal; }
private IntervalIterator(Interval inputInterval) { this.inputInterval = inputInterval; currStart = bucketStart(inputInterval.getStart()); currEnd = increment(currStart); }
/** * Return a granularity-sized Interval containing a particular DateTime. */ public final Interval bucket(DateTime t) { DateTime start = bucketStart(t); return new Interval(start, increment(start)); }
public Interval getInterval() { DateTime min = DateTimes.utc(minTimestamp); return new Interval(min, isEmpty() ? min : gran.increment(DateTimes.utc(getMaxTimeMillis()))); }
IncrementalIndexCursor( VirtualColumns virtualColumns, boolean descending, Filter filter, Interval interval, Interval actualInterval, Granularity gran ) { currEntry = new IncrementalIndexRowHolder(); columnSelectorFactory = new IncrementalIndexColumnSelectorFactory(index, virtualColumns, descending, currEntry); // Set maxRowIndex before creating the filterMatcher. See https://github.com/apache/incubator-druid/pull/6340 maxRowIndex = index.getLastRowIndex(); filterMatcher = filter == null ? BooleanValueMatcher.of(true) : filter.makeMatcher(columnSelectorFactory); numAdvanced = -1; final long timeStart = Math.max(interval.getStartMillis(), actualInterval.getStartMillis()); cursorIterable = index.getFacts().timeRangeIterable( descending, timeStart, Math.min(actualInterval.getEndMillis(), gran.increment(interval.getStart()).getMillis()) ); emptyRange = !cursorIterable.iterator().hasNext(); time = gran.toDateTime(interval.getStartMillis()); reset(); }
private SegmentIdentifier getSegmentIdentifier(long timestamp) { if (!rejectionPolicy.accept(timestamp)) { return null; } final Granularity segmentGranularity = schema.getGranularitySpec().getSegmentGranularity(); final VersioningPolicy versioningPolicy = config.getVersioningPolicy(); DateTime truncatedDateTime = segmentGranularity.bucketStart(DateTimes.utc(timestamp)); final long truncatedTime = truncatedDateTime.getMillis(); SegmentIdentifier retVal = segments.get(truncatedTime); if (retVal == null) { final Interval interval = new Interval( truncatedDateTime, segmentGranularity.increment(truncatedDateTime) ); retVal = new SegmentIdentifier( schema.getDataSource(), interval, versioningPolicy.getVersion(interval), config.getShardSpec() ); addSegment(retVal); } return retVal; }