@Override public ScheduledExecutors.Signal doCall() { if (stopped) { log.info("Stopping flusher thread"); return ScheduledExecutors.Signal.STOP; } long minTimestamp = segmentGranularity.bucketStart( getRejectionPolicy().getCurrMaxTime().minus(windowMillis) ).getMillis(); List<Map.Entry<Long, Sink>> sinksToPush = Lists.newArrayList(); for (Map.Entry<Long, Sink> entry : getSinks().entrySet()) { final Long intervalStart = entry.getKey(); if (intervalStart < minTimestamp) { log.info("Adding entry[%s] to flush.", entry); sinksToPush.add(entry); } } for (final Map.Entry<Long, Sink> entry : sinksToPush) { flushAfterDuration(entry.getKey(), entry.getValue()); } if (stopped) { log.info("Stopping flusher thread"); return ScheduledExecutors.Signal.STOP; } else { return ScheduledExecutors.Signal.REPEAT; } } };
@Override public Sink getSink(long timestamp) { if (!rejectionPolicy.accept(timestamp)) { return null; } final long truncatedTime = segmentGranularity.truncate(timestamp); Sink retVal = sinks.get(truncatedTime); if (retVal == null) { final Interval sinkInterval = new Interval( new DateTime(truncatedTime), segmentGranularity.increment(new DateTime(truncatedTime)) ); retVal = new Sink(sinkInterval, schema, versioningPolicy.getVersion(sinkInterval)); try { segmentAnnouncer.announceSegment(retVal.getSegment()); sinks.put(truncatedTime, retVal); sinkTimeline.add(retVal.getInterval(), retVal.getVersion(), new SingleElementPartitionChunk<Sink>(retVal)); } catch (IOException e) { log.makeAlert(e, "Failed to announce new segment[%s]", schema.getDataSource()) .addData("interval", retVal.getInterval()) .emit(); } } return retVal; }
private SegmentIdentifier getSegmentIdentifier(long timestamp) { if (!rejectionPolicy.accept(timestamp)) { return null; } final Granularity segmentGranularity = schema.getGranularitySpec().getSegmentGranularity(); final VersioningPolicy versioningPolicy = config.getVersioningPolicy(); DateTime truncatedDateTime = segmentGranularity.bucketStart(DateTimes.utc(timestamp)); final long truncatedTime = truncatedDateTime.getMillis(); SegmentIdentifier retVal = segments.get(truncatedTime); if (retVal == null) { final Interval interval = new Interval( truncatedDateTime, segmentGranularity.increment(truncatedDateTime) ); retVal = new SegmentIdentifier( schema.getDataSource(), interval, versioningPolicy.getVersion(interval), config.getShardSpec() ); addSegment(retVal); } return retVal; }
@Override public ScheduledExecutors.Signal doCall() { if (stopped) { log.info("Stopping flusher thread"); return ScheduledExecutors.Signal.STOP; } long minTimestamp = getSegmentGranularity().truncate( getRejectionPolicy().getCurrMaxTime().minus(windowMillis) ).getMillis(); List<Map.Entry<Long, Sink>> sinksToPush = Lists.newArrayList(); for (Map.Entry<Long, Sink> entry : getSinks().entrySet()) { final Long intervalStart = entry.getKey(); if (intervalStart < minTimestamp) { log.info("Adding entry[%s] to flush.", entry); sinksToPush.add(entry); } } for (final Map.Entry<Long, Sink> entry : sinksToPush) { flushAfterDuration(entry.getKey(), entry.getValue()); } if (stopped) { log.info("Stopping flusher thread"); return ScheduledExecutors.Signal.STOP; } else { return ScheduledExecutors.Signal.REPEAT; } } }
private Sink getSink(long timestamp) { if (!rejectionPolicy.accept(timestamp)) { return null; } final Granularity segmentGranularity = schema.getGranularitySpec().getSegmentGranularity(); final VersioningPolicy versioningPolicy = config.getVersioningPolicy(); DateTime truncatedDateTime = segmentGranularity.bucketStart(DateTimes.utc(timestamp)); final long truncatedTime = truncatedDateTime.getMillis(); Sink retVal = sinks.get(truncatedTime); if (retVal == null) { final Interval sinkInterval = new Interval( truncatedDateTime, segmentGranularity.increment(truncatedDateTime) ); retVal = new Sink( sinkInterval, schema, config.getShardSpec(), versioningPolicy.getVersion(sinkInterval), config.getMaxRowsInMemory(), config.isReportParseExceptions() ); addSink(retVal); } return retVal; }
rejectionPolicy.getCurrMaxTime().minus(windowMillis) ); long minTimestamp = minTimestampAsDate.getMillis();
log.info("Starting merge and push."); DateTime minTimestampAsDate = segmentGranularity.bucketStart( DateTimes.utc(Math.max(windowMillis, rejectionPolicy.getCurrMaxTime().getMillis()) - windowMillis) ); long minTimestamp = minTimestampAsDate.getMillis();
log.info("Starting merge and push."); DateTime minTimestampAsDate = segmentGranularity.bucketStart( DateTimes.utc(Math.max(windowMillis, rejectionPolicy.getCurrMaxTime().getMillis()) - windowMillis) ); long minTimestamp = minTimestampAsDate.getMillis();