private void reportWrite(int numOps, int dataSize) { if (metricsCollector != null) { metricsCollector.increment(Constants.Metrics.Name.Dataset.WRITE_COUNT, numOps); metricsCollector.increment(Constants.Metrics.Name.Dataset.WRITE_BYTES, dataSize); metricsCollector.increment(Constants.Metrics.Name.Dataset.OP_COUNT, numOps); } }
private void reportWrite(int numOps, int dataSize) { if (metricsCollector != null) { metricsCollector.increment(Constants.Metrics.Name.Dataset.WRITE_COUNT, numOps); metricsCollector.increment(Constants.Metrics.Name.Dataset.WRITE_BYTES, dataSize); metricsCollector.increment(Constants.Metrics.Name.Dataset.OP_COUNT, numOps); } }
private void incrementMetric(String metricName, long value) { if (metrics != null) { metrics.increment(metricName, value); } }
private void incrementMetric(String metricName, long value) { if (metrics != null) { metrics.increment(metricName, value); } }
pendingStoreQueue.enqueue(pendingStoreRequest); metricsCollector.increment("persist.requested", 1L); metricsCollector.increment("persist.success", 1L); if (!pendingStoreRequest.isTransactional()) { return null; pendingStoreRequest.getEndTimestamp(), pendingStoreRequest.getEndSequenceId()); } else { metricsCollector.increment("persist.failure", 1L); Throwables.propagateIfInstanceOf(pendingStoreRequest.getFailureCause(), IOException.class); throw new IOException("Unable to write message to " + storeRequest.getTopicId(),
pendingStoreQueue.enqueue(pendingStoreRequest); metricsCollector.increment("persist.requested", 1L); metricsCollector.increment("persist.success", 1L); if (!pendingStoreRequest.isTransactional()) { return null; pendingStoreRequest.getEndTimestamp(), pendingStoreRequest.getEndSequenceId()); } else { metricsCollector.increment("persist.failure", 1L); Throwables.propagateIfInstanceOf(pendingStoreRequest.getFailureCause(), IOException.class); throw new IOException("Unable to write message to " + storeRequest.getTopicId(),
public void add(List<Fact> facts) { // Simply collecting all rows/cols/values that need to be put to the underlying table. NavigableMap<byte[], NavigableMap<byte[], byte[]>> gaugesTable = Maps.newTreeMap(Bytes.BYTES_COMPARATOR); NavigableMap<byte[], NavigableMap<byte[], byte[]>> incrementsTable = Maps.newTreeMap(Bytes.BYTES_COMPARATOR); for (Fact fact : facts) { for (Measurement measurement : fact.getMeasurements()) { byte[] rowKey = codec.createRowKey(fact.getDimensionValues(), measurement.getName(), fact.getTimestamp()); byte[] column = codec.createColumn(fact.getTimestamp()); if (MeasureType.COUNTER == measurement.getType()) { inc(incrementsTable, rowKey, column, measurement.getValue()); } else { set(gaugesTable, rowKey, column, Bytes.toBytes(measurement.getValue())); } } } NavigableMap<byte[], NavigableMap<byte[], Long>> convertedIncrementsTable = Maps.transformValues(incrementsTable, TRANSFORM_MAP_BYTE_ARRAY_TO_LONG); NavigableMap<byte[], NavigableMap<byte[], Long>> convertedGaugesTable = Maps.transformValues(gaugesTable, TRANSFORM_MAP_BYTE_ARRAY_TO_LONG); // todo: replace with single call, to be able to optimize rpcs in underlying table timeSeriesTable.put(convertedGaugesTable); timeSeriesTable.increment(convertedIncrementsTable); if (metrics != null) { metrics.increment(putCountMetric, convertedGaugesTable.size()); metrics.increment(incrementCountMetric, convertedIncrementsTable.size()); } }