DateTime truncatedDateTime = segmentGranularity.bucketStart(DateTimes.utc(timestamp)); final Interval interval = new Interval(truncatedDateTime, segmentGranularity.increment(truncatedDateTime));
public static GranularitySpec getGranularitySpec(Configuration configuration, Properties tableProperties) { final String segmentGranularity = tableProperties.getProperty(Constants.DRUID_SEGMENT_GRANULARITY) != null ? tableProperties.getProperty(Constants.DRUID_SEGMENT_GRANULARITY) : HiveConf.getVar(configuration, HiveConf.ConfVars.HIVE_DRUID_INDEXING_GRANULARITY); final boolean rollup = tableProperties.getProperty(DruidConstants.DRUID_ROLLUP) != null ? Boolean.parseBoolean(tableProperties.getProperty(Constants.DRUID_SEGMENT_GRANULARITY)) : HiveConf.getBoolVar(configuration, HiveConf.ConfVars.HIVE_DRUID_ROLLUP); return new UniformGranularitySpec(Granularity.fromString(segmentGranularity), Granularity.fromString(tableProperties.getProperty(DruidConstants.DRUID_QUERY_GRANULARITY) == null ? "NONE" : tableProperties.getProperty(DruidConstants.DRUID_QUERY_GRANULARITY)), rollup, null); }
Granularities .DAY .bucketStart(new DateTime((long) input.get(DruidConstants.DEFAULT_TIMESTAMP_COLUMN))) .getMillis()) .build()))
rollupGranularity.bucketStart(inputRow.getTimestamp()).getMillis(), inputRow ); interval = config.getGranularitySpec() .getSegmentGranularity() .bucket(DateTimes.utc(inputRow.getTimestampFromEpoch()));
final DateTime key = segmentGranularity.toDate(status.getPath().toString()); final Long currVal = inputModifiedTimes.get(key.getMillis()); final long mTime = status.getModificationTime(); "%s/%s", config.getSchema().getIOConfig().getSegmentOutputPath(), segmentGranularity.toPath(timeBucket) ); for (FileStatus fileStatus : FSSpideringIterator.spiderIterable(fs, new Path(bucketOutput))) { if (fileStatus.getModificationTime() > mTime) { bucketsToRun.add(new Interval(timeBucket, segmentGranularity.increment(timeBucket))); break;
IncrementalIndexCursor( VirtualColumns virtualColumns, boolean descending, Filter filter, Interval interval, Interval actualInterval, Granularity gran ) { currEntry = new TimeAndDimsHolder(); columnSelectorFactory = new IncrementalIndexColumnSelectorFactory(index, virtualColumns, descending, currEntry); filterMatcher = makeFilterMatcher(filter, this); numAdvanced = -1; maxRowIndex = index.getLastRowIndex(); final long timeStart = Math.max(interval.getStartMillis(), actualInterval.getStartMillis()); cursorIterable = index.getFacts().timeRangeIterable( descending, timeStart, Math.min(actualInterval.getEndMillis(), gran.increment(interval.getStart()).getMillis()) ); emptyRange = !cursorIterable.iterator().hasNext(); time = gran.toDateTime(interval.getStartMillis()); reset(); }
@Override public Interval next() { if (!hasNext()) { throw new NoSuchElementException("There are no more intervals"); } Interval retVal = new Interval(currStart, currEnd); currStart = currEnd; currEnd = increment(currStart); return retVal; }
@JsonCreator public UniformGranularitySpec( @JsonProperty("segmentGranularity") Granularity segmentGranularity, @JsonProperty("queryGranularity") Granularity queryGranularity, @JsonProperty("rollup") Boolean rollup, @JsonProperty("intervals") List<Interval> inputIntervals ) { this.queryGranularity = queryGranularity == null ? DEFAULT_QUERY_GRANULARITY : queryGranularity; this.rollup = rollup == null ? Boolean.TRUE : rollup; this.segmentGranularity = segmentGranularity == null ? DEFAULT_SEGMENT_GRANULARITY : segmentGranularity; if (inputIntervals != null) { List<Interval> granularIntervals = Lists.newArrayList(); for (Interval inputInterval : inputIntervals) { Iterables.addAll(granularIntervals, this.segmentGranularity.getIterable(inputInterval)); } this.inputIntervals = ImmutableList.copyOf(inputIntervals); this.wrappedSpec = new ArbitraryGranularitySpec(queryGranularity, rollup, granularIntervals); } else { this.inputIntervals = null; this.wrappedSpec = null; } }
public static List<Granularity> granularitiesFinerThan(final Granularity gran0) { final List<Granularity> retVal = Lists.newArrayList(); final DateTime origin = (gran0 instanceof PeriodGranularity) ? ((PeriodGranularity) gran0).getOrigin() : null; final DateTimeZone tz = (gran0 instanceof PeriodGranularity) ? ((PeriodGranularity) gran0).getTimeZone() : null; for (GranularityType gran : GranularityType.values()) { /** * All and None are excluded b/c when asked to give all granularities finer * than "TEN_MINUTE", you want the answer to be "FIVE_MINUTE, MINUTE and SECOND" * it doesn't make sense to include ALL or None to be part of this. */ if (gran == GranularityType.ALL || gran == GranularityType.NONE) { continue; } final Granularity segmentGranularity = gran.create(origin, tz); final long segmentGranularityDurationMillis = segmentGranularity.bucket(DateTimes.EPOCH).toDurationMillis(); final long gran0DurationMillis = gran0.bucket(DateTimes.EPOCH).toDurationMillis(); if (segmentGranularityDurationMillis <= gran0DurationMillis) { retVal.add(segmentGranularity); } } retVal.sort((g1, g2) -> { long duration1 = g2.bucket(DateTimes.EPOCH).toDurationMillis(); long duration2 = g1.bucket(DateTimes.EPOCH).toDurationMillis(); return Longs.compare(duration1, duration2); }); return retVal; }
private SegmentIdentifier tryAllocateFirstSegment(TaskActionToolbox toolbox, Task task, Interval rowInterval) throws IOException { // No existing segments for this row, but there might still be nearby ones that conflict with our preferred // segment granularity. Try that first, and then progressively smaller ones if it fails. final List<Interval> tryIntervals = Granularity.granularitiesFinerThan(preferredSegmentGranularity) .stream() .map(granularity -> granularity.bucket(timestamp)) .collect(Collectors.toList()); for (Interval tryInterval : tryIntervals) { if (tryInterval.contains(rowInterval)) { final SegmentIdentifier identifier = tryAllocate(toolbox, task, tryInterval, rowInterval, false); if (identifier != null) { return identifier; } } } return null; }
for (Interval interval : dataGranularity.getIterable(inputInterval)) { intervals.add(trim(inputInterval, interval)); intervalPath = customFormatter.print(t); } else { intervalPath = dataGranularity.toPath(t);
@Override public Sequence<Cursor> makeCursors( final Filter filter, final Interval interval, final VirtualColumns virtualColumns, final Granularity gran, final boolean descending, @Nullable QueryMetrics<?> queryMetrics ) { if (index.isEmpty()) { return Sequences.empty(); } final Interval dataInterval = new Interval(getMinTime(), gran.bucketEnd(getMaxTime())); if (!interval.overlaps(dataInterval)) { return Sequences.empty(); } final Interval actualInterval = interval.overlap(dataInterval); Iterable<Interval> iterable = gran.getIterable(actualInterval); if (descending) { iterable = Lists.reverse(ImmutableList.copyOf(iterable)); } return Sequences .simple(iterable) .map(i -> new IncrementalIndexCursor(virtualColumns, descending, filter, i, actualInterval, gran)); }
public DateTime toDate(String filePath) { return toDate(filePath, Formatter.DEFAULT); }
@Override public String apply(long value) { final long truncated = granularity.bucketStart(DateTimes.utc(value)).getMillis(); return formatter == null ? String.valueOf(truncated) : formatter.print(truncated); }
interval = granularitySpec.getSegmentGranularity().bucket(inputRow.getTimestamp()); } else { final Optional<Interval> optInterval = granularitySpec.bucketInterval(inputRow.getTimestamp()); queryGranularity.bucketStart(inputRow.getTimestamp()).getMillis(), inputRow );
final long timeEnd = Math.min( interval.getEndMillis(), gran.increment(inputInterval.getStart()).getMillis() ); columnCache ); final DateTime myBucket = gran.toDateTime(inputInterval.getStartMillis());
public Interval getInterval() { DateTime min = DateTimes.utc(minTimestamp); return new Interval(min, isEmpty() ? min : gran.increment(DateTimes.utc(getMaxTimeMillis()))); }
/** * If "query" has a single universal timestamp, return it. Otherwise return null. This is useful * for keeping timestamps in sync across partial queries that may have different intervals. * * @param query the query * * @return universal timestamp, or null */ public static DateTime getUniversalTimestamp(final GroupByQuery query) { final Granularity gran = query.getGranularity(); final String timestampStringFromContext = query.getContextValue(CTX_KEY_FUDGE_TIMESTAMP, ""); if (!timestampStringFromContext.isEmpty()) { return DateTimes.utc(Long.parseLong(timestampStringFromContext)); } else if (Granularities.ALL.equals(gran)) { final DateTime timeStart = query.getIntervals().get(0).getStart(); return gran.getIterable(new Interval(timeStart, timeStart.plus(1))).iterator().next().getStart(); } else { return null; } }
final Interval rowInterval = queryGranularity.bucket(timestamp);
DateTime truncatedDateTime = segmentGranularity.bucketStart(DateTimes.utc(truncatedTime)); final Interval interval = new Interval(truncatedDateTime, segmentGranularity.increment(truncatedDateTime));