@Override public String toString() { // We wrap this in a toString() method to avoid build the string // unless the log message is actually printed return samples.stream() .map(s -> s.getResource().getId()) .distinct() .collect(Collectors.joining(", ")); } });
@Override public String toString() { // We wrap this in a toString() method to avoid build the string // unless the log message is actually printed return samples.stream() .map(s -> s.getResource().getId()) .distinct() .collect(Collectors.joining(", ")); } });
@Override public String toString() { return String.format( "%s[timestamp=%s, context=%s, resource=%s, name=%s, type=%s, value=%s]", getClass().getSimpleName(), getTimestamp(), getContext(), getResource(), getName(), getType(), getValue()); }
/** * Construct a new {@link DriverAdapter}. * * @param input * cassandra driver {@link ResultSet} * @param metrics * the set of result metrics to include; an empty set indicates that all metrics * should be included */ DriverAdapter(Iterator<com.datastax.driver.core.Row> input, Set<String> metrics) { m_results = checkNotNull(input, "input argument"); m_metrics = checkNotNull(metrics, "metrics argument"); if (m_results.hasNext()) { Sample m = getNextSample(); m_next = new Results.Row<>(m.getTimestamp(), m.getResource()); addSample(m_next, m); } }
@Override public void onEvent(SampleBatchEvent event) throws Exception { // We'd expect the logs from this thread to be in collectd.log Logging.putPrefix("collectd"); List<Sample> samples = event.getSamples(); // Decrement our entry counter m_numEntriesOnRingBuffer.decrementAndGet(); // Partition the samples into collections smaller then max_batch_size for (List<Sample> batch : Lists.partition(samples, m_maxBatchSize)) { try { if (event.isIndexOnly() && !NewtsUtils.DISABLE_INDEXING) { LOG.debug("Indexing {} samples", batch.size()); m_indexer.update(batch); } else { LOG.debug("Inserting {} samples", batch.size()); m_sampleRepository.insert(batch); } if (LOG.isDebugEnabled()) { String uniqueResourceIds = batch.stream() .map(s -> s.getResource().getId()) .distinct() .collect(Collectors.joining(", ")); LOG.debug("Successfully inserted samples for resources with ids {}", uniqueResourceIds); } } catch (Throwable t) { RATE_LIMITED_LOGGER.error("An error occurred while inserting samples. Some sample may be lost.", t); } } }
maybeIndexResource(cacheQueue, generators, sample.getContext(), sample.getResource()); maybeIndexResourceAttributes(cacheQueue, generators, sample.getContext(), sample.getResource()); maybeAddMetricName(cacheQueue, generators, sample.getContext(), sample.getResource(), sample.getName());
@Override public void onEvent(SampleBatchEvent event) throws Exception { // We'd expect the logs from this thread to be in collectd.log Logging.putPrefix("collectd"); List<Sample> samples = event.getSamples(); // Decrement our entry counter m_numEntriesOnRingBuffer.decrementAndGet(); // Partition the samples into collections smaller then max_batch_size for (List<Sample> batch : Lists.partition(samples, m_maxBatchSize)) { try { if (event.isIndexOnly() && !NewtsUtils.DISABLE_INDEXING) { LOG.debug("Indexing {} samples", batch.size()); m_indexer.update(batch); } else { LOG.debug("Inserting {} samples", batch.size()); m_sampleRepository.insert(batch); } if (LOG.isDebugEnabled()) { String uniqueResourceIds = batch.stream() .map(s -> s.getResource().getId()) .distinct() .collect(Collectors.joining(", ")); LOG.debug("Successfully inserted samples for resources with ids {}", uniqueResourceIds); } } catch (Throwable t) { RATE_LIMITED_LOGGER.error("An error occurred while inserting samples. Some sample may be lost.", t); } } }
maybeIndexResource(cacheQueue, generators, sample.getContext(), sample.getResource()); maybeIndexResourceAttributes(cacheQueue, generators, sample.getContext(), sample.getResource()); maybeAddMetricName(cacheQueue, generators, sample.getContext(), sample.getResource(), sample.getName());
@Override public Sample call(Sample s) { Timestamp oldTs = s.getTimestamp(); Timestamp newTs = Timestamp.fromEpochMillis(m_timeoffset + Math.round(oldTs.asMillis()/m_timescaleFactor)); return new Sample(newTs, s.getResource(), s.getName(), s.getType(), s.getValue()); }
@Override public Results.Row<Sample> next() { if (!hasNext()) throw new NoSuchElementException(); Results.Row<Sample> nextNext = null; while (m_results.hasNext()) { Sample m = getNextSample(); if (m.getTimestamp().gt(m_next.getTimestamp())) { nextNext = new Results.Row<>(m.getTimestamp(), m.getResource()); addSample(nextNext, m); break; } addSample(m_next, m); } try { return m_next; } finally { m_next = nextNext; } }
private Sample getRate(Sample sample) { ValueType<?> value = NAN; Sample previous = m_prevSamples.get(sample.getName()); if (previous != null) { long elapsed = sample.getTimestamp().asSeconds() - previous.getTimestamp().asSeconds(); try { value = new Gauge(sample.getValue().delta(previous.getValue()).doubleValue() / elapsed); } catch (ArithmeticException e) { value = NAN; } } return new Sample(sample.getTimestamp(), sample.getResource(), sample.getName(), GAUGE, value, sample.getAttributes()); }
.value(SchemaConstants.F_CONTEXT, m.getContext().getId()) .value(SchemaConstants.F_PARTITION, m.getTimestamp().stepFloor(resourceShard).asSeconds()) .value(SchemaConstants.F_RESOURCE, m.getResource().getId()) .value(SchemaConstants.F_COLLECTED, m.getTimestamp().asMillis()) .value(SchemaConstants.F_METRIC_NAME, m.getName())
@Override public String call(List<Sample> samples) { JSONBuilder bldr = new JSONBuilder(); for(Sample sample : samples) { if (isNaN(sample)) continue; //System.err.println("Importing: " + sample); bldr.newObject(); bldr.attr("timestamp", sample.getTimestamp().asMillis()); bldr.attr("resource", sample.getResource().getId()); bldr.attr("name", sample.getName()); bldr.attr("type", sample.getType().name()); if (sample.getType() == MetricType.GAUGE) { bldr.attr("value", sample.getValue().doubleValue()); } else { bldr.attr("value", sample.getValue().longValue()); } } return bldr.toString(); } };