PrimaryData(Resource resource, Timestamp start, Timestamp end, ResultDescriptor resultDescriptor, Iterator<Row<Sample>> input) { m_resultDescriptor = checkNotNull(resultDescriptor, "result descriptor argument"); m_resource = checkNotNull(resource, "resource argument"); checkNotNull(start, "start argument"); checkNotNull(end, "end argument"); m_interval = resultDescriptor.getInterval(); m_timestamps = new IntervalGenerator(start.stepFloor(m_interval), end.stepCeiling(m_interval), m_interval); // Gather the whole collection of rows. // We need these since the next sample for a given metric may only appear a few rows ahead Iterators.addAll(m_samples, checkNotNull(input, "input argument")); }
public Timestamp stepFloor(Duration d) { return stepFloor(d.getDuration(), d.getUnit()); }
Aggregation(Resource resource, Timestamp start, Timestamp end, ResultDescriptor resultDescriptor, Duration resolution, Iterator<Row<Measurement>> input) { m_resultDescriptor = checkNotNull(resultDescriptor, "result descriptor argument"); m_resource = checkNotNull(resource, "resource argument"); checkNotNull(start, "start argument"); checkNotNull(end, "end argument"); m_resolution = checkNotNull(resolution, "resolution argument"); m_input = checkNotNull(input, "input argument"); Duration interval = resultDescriptor.getInterval(); checkArgument(resolution.isMultiple(interval), "resolution must be a multiple of interval"); m_timestamps = new IntervalGenerator(start.stepFloor(m_resolution), end.stepCeiling(m_resolution), m_resolution); m_intervalsPer = (double) resolution.divideBy(interval); m_working = m_input.hasNext() ? m_input.next() : null; m_nextOut = m_timestamps.hasNext() ? new Row<Measurement>(m_timestamps.next(), m_resource) : null; // If the input stream contains any Samples earlier than what's relevant, iterate past them. if (m_nextOut != null) { while (m_working != null && m_working.getTimestamp().lte(m_nextOut.getTimestamp().minus(m_resolution))) { m_working = nextWorking(); } } }
private Iterator<com.datastax.driver.core.Row> cassandraSelect(Context context, Resource resource, Timestamp start, Timestamp end) { List<Future<ResultSet>> futures = Lists.newArrayList(); Duration resourceShard = m_contextConfigurations.getResourceShard(context); Timestamp lower = start.stepFloor(resourceShard); Timestamp upper = end.stepFloor(resourceShard); for (Timestamp partition : new IntervalGenerator(lower, upper, resourceShard)) { BoundStatement bindStatement = m_selectStatement.bind(); bindStatement.setString(SchemaConstants.F_CONTEXT, context.getId()); bindStatement.setInt(SchemaConstants.F_PARTITION, (int) partition.asSeconds()); bindStatement.setString(SchemaConstants.F_RESOURCE, resource.getId()); bindStatement.setTimestamp("start", start.asDate()); bindStatement.setTimestamp("end", end.asDate()); // Use the context specific consistency level bindStatement.setConsistencyLevel(m_contextConfigurations.getReadConsistency(context)); futures.add(m_session.executeAsync(bindStatement)); } return new ConcurrentResultWrapper(futures); }
for (Timestamp partition : new IntervalGenerator(start.stepFloor(resourceShard), end.stepFloor(resourceShard), resourceShard)) { BoundStatement bindStatement = m_deleteStatement.bind(); for (Timestamp partition : new IntervalGenerator(start.stepFloor(resourceShard), end.stepFloor(resourceShard), resourceShard)) { BoundStatement bindStatement = m_deleteStatement.bind();
.value(SchemaConstants.F_PARTITION, m.getTimestamp().stepFloor(resourceShard).asSeconds()) .value(SchemaConstants.F_RESOURCE, m.getResource().getId()) .value(SchemaConstants.F_COLLECTED, m.getTimestamp().asMillis())