public void skippedBackPressureMs(long ms) { this.skippedBackPressureMs.incrBy(ms); } }
public CountMetric scope(String key) { CountMetric val = _value.get(key); if (val == null) { _value.put(key, val = new CountMetric()); } return val; }
public void incrReportedErrorCount() { this.reportedErrorCount.incr(); }
public Map<String, Object> getValueAndReset() { Map<String, Object> ret = new HashMap<>(); for (Map.Entry<String, CountMetric> e : _value.entrySet()) { ret.put(e.getKey(), e.getValue().getValueAndReset()); } return ret; } }
_lostMessageCount.incrBy(omitted.size()); _lostMessageCount.incrBy(offset - _emittedToOffset); _emittedToOffset = offset; LOG.warn("{} Using new offset: {}", _partition, _emittedToOffset); _fetchAPILatencyMax.update(millis); _fetchAPILatencyMean.update(millis); _fetchAPICallCount.incr(); if (msgs != null) { int numMessages = 0; _fetchAPIMessageCount.incrBy(numMessages);
@Override public Map<String, Object> getMetricsData() { Map<String, Object> ret = new HashMap<>(); ret.put(partitionId + "/receiveApiLatencyMean", receiveApiLatencyMean.getValueAndReset()); ret.put(partitionId + "/receiveApiCallCount", receiveApiCallCount.getValueAndReset()); ret.put(partitionId + "/receiveMessageCount", receiveMessageCount.getValueAndReset()); return ret; } }
public void skippedMaxSpoutMs(long ms) { this.skippedMaxSpoutMs.incrBy(ms); }
@Override public EventDataWrap receive() { long start = System.currentTimeMillis(); Iterable<EventData> receivedEvents = null; /*Get one message at a time for backward compatibility behaviour*/ try { receivedEvents = receiver.receiveSync(1); } catch (ServiceBusException e) { logger.error("Exception occured during receive" + e.toString()); return null; } long end = System.currentTimeMillis(); long millis = (end - start); receiveApiLatencyMean.update(millis); receiveApiCallCount.incr(); if (receivedEvents == null || receivedEvents.spliterator().getExactSizeIfKnown() == 0) { return null; } receiveMessageCount.incr(); EventData receivedEvent = receivedEvents.iterator().next(); MessageId messageId = new MessageId(partitionId, receivedEvent.getSystemProperties().getOffset(), receivedEvent.getSystemProperties().getSequenceNumber()); return EventDataWrap.create(receivedEvent, messageId); }
public EventHubReceiverImpl(EventHubSpoutConfig config, String partitionId) { this.connectionString = config.getConnectionString(); this.entityName = config.getEntityPath(); this.partitionId = partitionId; this.consumerGroupName = config.getConsumerGroupName(); receiveApiLatencyMean = new ReducedMetric(new MeanReducer()); receiveApiCallCount = new CountMetric(); receiveMessageCount = new CountMetric(); }
public Object getValueAndReset() { Map ret = new HashMap(); for(Map.Entry<String, CountMetric> e : _value.entrySet()) { ret.put(e.getKey(), e.getValue().getValueAndReset()); } return ret; } }
public void skippedInactiveMs(long ms) { this.skippedInactiveMs.incrBy(ms); }
@Override public void ack(Object msgId) { LOGGER.trace("Acking For... {} Current TimeInMillis since epoch {}", msgId, System.currentTimeMillis()); long scn = (Long) msgId; this.pendingMessagesToBeAcked.remove(scn); this.failureMessages.remove(scn); this.failureMetric.update(this.failureMessages.size()); this.successCountMetric.incr(); this.pendingMessageSize.update(this.pendingMessagesToBeAcked.size()); }
public CountMetric scope(String key) { CountMetric val = _value.get(key); if(val == null) { _value.put(key, val = new CountMetric()); } return val; }
public Object getValueAndReset() { Map<String, Object> ret = new HashMap<>(); for (Map.Entry<String, CountMetric> e : value.entrySet()) { ret.put(e.getKey(), e.getValue().getValueAndReset()); } return ret; } }
public void incrReportedErrorCountBy(long n) { this.reportedErrorCount.incrBy(n); }
public void fail(Long offset) { if (offset < _emittedToOffset - _spoutConfig.maxOffsetBehind) { LOG.info( "Skipping failed tuple at offset={}" + " because it's more than maxOffsetBehind={}" + " behind _emittedToOffset={} for {}", offset, _spoutConfig.maxOffsetBehind, _emittedToOffset, _partition ); } else { LOG.debug("Failing at offset={} with _pending.size()={} pending and _emittedToOffset={} for {}", offset, _pending.size(), _emittedToOffset, _partition); numberFailed++; if (numberAcked == 0 && numberFailed > _spoutConfig.maxOffsetBehind) { throw new RuntimeException("Too many tuple failures"); } // Offset may not be considered for retry by failedMsgRetryManager if (this._failedMsgRetryManager.retryFurther(offset)) { this._failedMsgRetryManager.failed(offset); } else { // state for the offset should be cleaned up LOG.warn("Will not retry failed kafka offset {} further", offset); _messageIneligibleForRetryCount.incr(); _pending.remove(offset); this._failedMsgRetryManager.acked(offset); } } }
public CountMetric scope(String key) { CountMetric val = value.get(key); if (val == null) { value.put(key, val = new CountMetric()); } return val; }
public Map getMetricsDataMap() { String[] metricPrefixes = new String[] { _partition.getId(), // Correct metric prefix, see STORM-2775 _partition.toString() // Old prefix, kept for backwards compatibility }; Map<String, Object> ret = new HashMap<>(); for (String metricPrefix : metricPrefixes) { ret.put(metricPrefix + "/fetchAPILatencyMax", _fetchAPILatencyMax.getValueAndReset()); ret.put(metricPrefix + "/fetchAPILatencyMean", _fetchAPILatencyMean.getValueAndReset()); ret.put(metricPrefix + "/fetchAPICallCount", _fetchAPICallCount.getValueAndReset()); ret.put(metricPrefix + "/fetchAPIMessageCount", _fetchAPIMessageCount.getValueAndReset()); ret.put(metricPrefix + "/lostMessageCount", _lostMessageCount.getValueAndReset()); ret.put(metricPrefix + "/messageIneligibleForRetryCount", _messageIneligibleForRetryCount.getValueAndReset()); } return ret; }
@Override public void beforeBulk(long executionId, BulkRequest request) { eventCounter.scope("BulkRequest").incrBy(1); }