public static Timestamp fromDate(Date d) { return fromEpochMillis(d.getTime()); }
@Override public String toString() { return String.format("%s[%s]", getClass().getSimpleName(), asDate()); }
@Override protected Timestamp parse(String input) throws Exception { if (input.matches("^[\\d]+$")) { return Timestamp.fromEpochSeconds(Integer.valueOf(input)); } DateTime dateTime = parser.parseDateTime(input); return Timestamp.fromEpochMillis(dateTime.getMillis()); }
@Override public Sample call(Sample s) { Timestamp oldTs = s.getTimestamp(); Timestamp newTs = Timestamp.fromEpochMillis(m_timeoffset + Math.round(oldTs.asMillis()/m_timescaleFactor)); return new Sample(newTs, s.getResource(), s.getName(), s.getType(), s.getValue()); }
Aggregation(Resource resource, Timestamp start, Timestamp end, ResultDescriptor resultDescriptor, Duration resolution, Iterator<Row<Measurement>> input) { m_resultDescriptor = checkNotNull(resultDescriptor, "result descriptor argument"); m_resource = checkNotNull(resource, "resource argument"); checkNotNull(start, "start argument"); checkNotNull(end, "end argument"); m_resolution = checkNotNull(resolution, "resolution argument"); m_input = checkNotNull(input, "input argument"); Duration interval = resultDescriptor.getInterval(); checkArgument(resolution.isMultiple(interval), "resolution must be a multiple of interval"); m_timestamps = new IntervalGenerator(start.stepFloor(m_resolution), end.stepCeiling(m_resolution), m_resolution); m_intervalsPer = (double) resolution.divideBy(interval); m_working = m_input.hasNext() ? m_input.next() : null; m_nextOut = m_timestamps.hasNext() ? new Row<Measurement>(m_timestamps.next(), m_resource) : null; // If the input stream contains any Samples earlier than what's relevant, iterate past them. if (m_nextOut != null) { while (m_working != null && m_working.getTimestamp().lte(m_nextOut.getTimestamp().minus(m_resolution))) { m_working = nextWorking(); } } }
final Timestamp start = Timestamp.now().minus(m_ttl, TimeUnit.SECONDS); final Timestamp end = Timestamp.now(); for (Timestamp partition : new IntervalGenerator(start.stepFloor(resourceShard), end.stepFloor(resourceShard), resourceShard)) { BoundStatement bindStatement = m_deleteStatement.bind(); bindStatement.setString(SchemaConstants.F_CONTEXT, context.getId()); bindStatement.setInt(SchemaConstants.F_PARTITION, (int) partition.asSeconds()); bindStatement.setString(SchemaConstants.F_RESOURCE, resource.getId()); Timestamp end = Timestamp.now(); Timestamp start = end.minus(DELETION_INTERVAL, TimeUnit.DAYS); for (Timestamp partition : new IntervalGenerator(start.stepFloor(resourceShard), end.stepFloor(resourceShard), resourceShard)) { BoundStatement bindStatement = m_deleteStatement.bind(); bindStatement.setString(SchemaConstants.F_CONTEXT, context.getId()); bindStatement.setInt(SchemaConstants.F_PARTITION, (int) partition.asSeconds()); bindStatement.setString(SchemaConstants.F_RESOURCE, resource.getId()); start = end.minus(DELETION_INTERVAL, TimeUnit.DAYS);
Timestamp now = Timestamp.now(); int ttl = m_ttl; if (calculateTimeToLive) { ttl -= (int) (now.asSeconds() - m.getTimestamp().asSeconds()); if (ttl <= 0) { LOG.debug("Skipping expired sample: {}", m); .value(SchemaConstants.F_PARTITION, m.getTimestamp().stepFloor(resourceShard).asSeconds()) .value(SchemaConstants.F_RESOURCE, m.getResource().getId()) .value(SchemaConstants.F_COLLECTED, m.getTimestamp().asMillis()) .value(SchemaConstants.F_METRIC_NAME, m.getName()) .value(SchemaConstants.F_VALUE, ValueType.decompose(m.getValue()));
private Iterator<com.datastax.driver.core.Row> cassandraSelect(Context context, Resource resource, Timestamp start, Timestamp end) { List<Future<ResultSet>> futures = Lists.newArrayList(); Duration resourceShard = m_contextConfigurations.getResourceShard(context); Timestamp lower = start.stepFloor(resourceShard); Timestamp upper = end.stepFloor(resourceShard); for (Timestamp partition : new IntervalGenerator(lower, upper, resourceShard)) { BoundStatement bindStatement = m_selectStatement.bind(); bindStatement.setString(SchemaConstants.F_CONTEXT, context.getId()); bindStatement.setInt(SchemaConstants.F_PARTITION, (int) partition.asSeconds()); bindStatement.setString(SchemaConstants.F_RESOURCE, resource.getId()); bindStatement.setTimestamp("start", start.asDate()); bindStatement.setTimestamp("end", end.asDate()); // Use the context specific consistency level bindStatement.setConsistencyLevel(m_contextConfigurations.getReadConsistency(context)); futures.add(m_session.executeAsync(bindStatement)); } return new ConcurrentResultWrapper(futures); }
if (intervalCeiling.lt(last.getTimestamp())) { break; if (lastIntervalCeiling != null && lastIntervalCeiling.gt(lowerBound)) { lowerBound = lastIntervalCeiling; if (intervalCeiling.lt(upperBound)) { upperBound = intervalCeiling; if (lowerBound.gt(upperBound)) { lowerBound = upperBound; Duration elapsedWithinInterval = upperBound.minus(lowerBound); Duration elapsedBetweenSamples = current.getTimestamp().minus(last.getTimestamp());
private boolean inRange() { if (m_working == null || m_nextOut == null) { return false; } Timestamp rangeUpper = m_nextOut.getTimestamp(); Timestamp rangeLower = m_nextOut.getTimestamp().minus(m_resolution); return m_working.getTimestamp().lte(rangeUpper) && m_working.getTimestamp().gt(rangeLower); }
public Duration minus(Timestamp t) { if (t.gt(this)) throw new IllegalArgumentException("you can only subtract an earlier date from a later one... negative durations don't make sense"); TimeUnit finest = finest(m_unit, t.getUnit()); return new Duration(convert(finest) - t.convert(finest), finest); }
PrimaryData(Resource resource, Timestamp start, Timestamp end, ResultDescriptor resultDescriptor, Iterator<Row<Sample>> input) { m_resultDescriptor = checkNotNull(resultDescriptor, "result descriptor argument"); m_resource = checkNotNull(resource, "resource argument"); checkNotNull(start, "start argument"); checkNotNull(end, "end argument"); m_interval = resultDescriptor.getInterval(); m_timestamps = new IntervalGenerator(start.stepFloor(m_interval), end.stepCeiling(m_interval), m_interval); // Gather the whole collection of rows. // We need these since the next sample for a given metric may only appear a few rows ahead Iterators.addAll(m_samples, checkNotNull(input, "input argument")); }
@Override public Results<Sample> select(Context context, Resource resource, Optional<Timestamp> start, Optional<Timestamp> end) { Timer.Context timer = m_sampleSelectTimer.time(); validateSelect(start, end); Timestamp upper = end.isPresent() ? end.get() : Timestamp.now(); Timestamp lower = start.isPresent() ? start.get() : upper.minus(Duration.seconds(86400)); LOG.debug("Querying database for resource {}, from {} to {}", resource, lower, upper); Results<Sample> samples = new Results<>(); DriverAdapter driverAdapter = new DriverAdapter(cassandraSelect(context, resource, lower, upper)); for (Row<Sample> row : driverAdapter) { samples.addRow(row); } LOG.debug("{} results returned from database", driverAdapter.getResultCount()); m_samplesSelected.mark(driverAdapter.getResultCount()); try { return samples; } finally { timer.stop(); } }
public Iterable<Results.Row<Sample>> select(Context context, Resource resource, Timestamp start, Timestamp end, ResultDescriptor descriptor, Duration step) { return new DriverAdapter(cassandraSelect(context, resource, start.minus(step), end), descriptor.getSourceNames()); }
public static Timestamp fromEpochMillis(long millis) { return new Timestamp(millis, TimeUnit.MILLISECONDS); }
static Sample parseSample(String line) { List<String> parts = s_lineTokenizer.splitToList(line); String[] path = s_pathTokenizer.splitToList(parts.get(0)).toArray(new String[] {}); Resource resource = parseResource(path.length > 1 ? Arrays.copyOf(path, path.length - 1) : path); String name = path.length > 1 ? path[path.length - 1] : "value"; Double value = Double.parseDouble(parts.get(1)); Long stamp = Long.parseLong(parts.get(2)); return sample(fromEpochSeconds(stamp), resource, name, value); }