public void incrReportedErrorCount() { this.reportedErrorCount.incr(); }
_fetchAPILatencyMax.update(millis); _fetchAPILatencyMean.update(millis); _fetchAPICallCount.incr(); if (msgs != null) { int numMessages = 0;
@Override public EventDataWrap receive() { long start = System.currentTimeMillis(); Iterable<EventData> receivedEvents = null; /*Get one message at a time for backward compatibility behaviour*/ try { receivedEvents = receiver.receiveSync(1); } catch (ServiceBusException e) { logger.error("Exception occured during receive" + e.toString()); return null; } long end = System.currentTimeMillis(); long millis = (end - start); receiveApiLatencyMean.update(millis); receiveApiCallCount.incr(); if (receivedEvents == null || receivedEvents.spliterator().getExactSizeIfKnown() == 0) { return null; } receiveMessageCount.incr(); EventData receivedEvent = receivedEvents.iterator().next(); MessageId messageId = new MessageId(partitionId, receivedEvent.getSystemProperties().getOffset(), receivedEvent.getSystemProperties().getSequenceNumber()); return EventDataWrap.create(receivedEvent, messageId); }
@Override public void ack(Object msgId) { LOGGER.trace("Acking For... {} Current TimeInMillis since epoch {}", msgId, System.currentTimeMillis()); long scn = (Long) msgId; this.pendingMessagesToBeAcked.remove(scn); this.failureMessages.remove(scn); this.failureMetric.update(this.failureMessages.size()); this.successCountMetric.incr(); this.pendingMessageSize.update(this.pendingMessagesToBeAcked.size()); }
public void fail(Long offset) { if (offset < _emittedToOffset - _spoutConfig.maxOffsetBehind) { LOG.info( "Skipping failed tuple at offset={}" + " because it's more than maxOffsetBehind={}" + " behind _emittedToOffset={} for {}", offset, _spoutConfig.maxOffsetBehind, _emittedToOffset, _partition ); } else { LOG.debug("Failing at offset={} with _pending.size()={} pending and _emittedToOffset={} for {}", offset, _pending.size(), _emittedToOffset, _partition); numberFailed++; if (numberAcked == 0 && numberFailed > _spoutConfig.maxOffsetBehind) { throw new RuntimeException("Too many tuple failures"); } // Offset may not be considered for retry by failedMsgRetryManager if (this._failedMsgRetryManager.retryFurther(offset)) { this._failedMsgRetryManager.failed(offset); } else { // state for the offset should be cleaned up LOG.warn("Will not retry failed kafka offset {} further", offset); _messageIneligibleForRetryCount.incr(); _pending.remove(offset); this._failedMsgRetryManager.acked(offset); } } }
this.failureMessages.remove(failedScn); this.pendingMessagesToBeAcked.remove(failedScn); this.sidelineCountMetric.incr(); LOGGER.info("Sidelining SCN = {} Tx = {}", failedScn, txRetrEvent); txRetrEvent = null;
eventCounter.scope("already_in_cache").incr(); _collector.emit(tuple, new Values(url, content, metadata)); eventCounter.scope("key_too_large").incr(); _collector.emit(tuple, new Values(url, content, metadata)); PutObjectResult result = client.putObject(bucketName, getKeyPrefix() + key, input, md); eventCounter.scope("cached").incr(); eventCounter.scope("s3_exception").incr(); } finally { try {