private void awaitUntilNextPoll() { try (Timer.Context ctx = metrics.timer(CONSUMER_IDLE_TIME, subscription.getTopicName(), subscription.getName()).time()) { Thread.sleep(idleTimeCalculator.increaseIdleTime()); } catch (InterruptedException ex) { Thread.currentThread().interrupt(); } }
private void awaitUntilNextPoll() { try (Timer.Context ctx = metrics.timer(CONSUMER_IDLE_TIME, subscription.getTopicName(), subscription.getName()).time()) { Thread.sleep(idleTimeCalculator.increaseIdleTime()); } catch (InterruptedException ex) { Thread.currentThread().interrupt(); } }
public Timer subscriptionLatencyTimer(Subscription subscription) { return timer(SUBSCRIPTION_LATENCY, subscription.getTopicName(), subscription.getName()); }
public CachedTopic(Topic topic, HermesMetrics hermesMetrics, KafkaTopics kafkaTopics, boolean blacklisted) { this.topic = topic; this.kafkaTopics = kafkaTopics; this.hermesMetrics = hermesMetrics; this.blacklisted = blacklisted; globalRequestMeter = hermesMetrics.meter(Meters.METER); topicRequestMeter = hermesMetrics.meter(Meters.TOPIC_METER, topic.getName()); globalDelayedProcessingMeter = hermesMetrics.meter(Meters.DELAYED_PROCESSING); topicDelayedProcessingMeter = hermesMetrics.meter(Meters.TOPIC_DELAYED_PROCESSING, topic.getName()); globalRequestReadLatencyTimer = hermesMetrics.timer(Timers.PARSING_REQUEST); topicRequestReadLatencyTimer = hermesMetrics.timer(Timers.TOPIC_PARSING_REQUEST, topic.getName()); globalMessageCreationTimer = hermesMetrics.timer(Timers.MESSAGE_CREATION_LATENCY); topicMessageCreationTimer = hermesMetrics.timer(Timers.MESSAGE_CREATION_TOPIC_LATENCY, topic.getName()); topicMessageContentSize = hermesMetrics.messageContentSizeHistogram(topic.getName()); globalMessageContentSize = hermesMetrics.messageContentSizeHistogram(); published = hermesMetrics.counter(Counters.PUBLISHED, topic.getName()); globalThroughputMeter = hermesMetrics.meter(Meters.THROUGHPUT_BYTES); topicThroughputMeter = hermesMetrics.meter(Meters.TOPIC_THROUGHPUT_BYTES, topic.getName()); if (Topic.Ack.ALL.equals(topic.getAck())) { topicProducerLatencyTimer = hermesMetrics.timer(Timers.ACK_ALL_LATENCY); globalProducerLatencyTimer = hermesMetrics.timer(Timers.ACK_ALL_TOPIC_LATENCY, topic.getName()); topicBrokerLatencyTimer = hermesMetrics.timer(Timers.ACK_ALL_BROKER_LATENCY); globalBrokerLatencyTimer = hermesMetrics.timer(Timers.ACK_ALL_BROKER_TOPIC_LATENCY, topic.getName()); } else { topicProducerLatencyTimer = hermesMetrics.timer(Timers.ACK_LEADER_LATENCY); globalProducerLatencyTimer = hermesMetrics.timer(Timers.ACK_LEADER_TOPIC_LATENCY, topic.getName()); topicBrokerLatencyTimer = hermesMetrics.timer(Timers.ACK_LEADER_BROKER_LATENCY); globalBrokerLatencyTimer = hermesMetrics.timer(Timers.ACK_LEADER_BROKER_TOPIC_LATENCY, topic.getName()); } }
@Override public void run() { try (Timer.Context c = metrics.timer("offset-committer.duration").time()) { // committed offsets need to be drained first so that there is no possibility of new committed offsets // showing up after inflight queue is drained - this would lead to stall in committing offsets ReducingConsumer committedOffsetsReducer = processCommittedOffsets(); Map<SubscriptionPartition, Long> maxCommittedOffsets = committedOffsetsReducer.reduced; ReducingConsumer inflightOffsetReducer = processInflightOffsets(committedOffsetsReducer.all); Map<SubscriptionPartition, Long> minInflightOffsets = inflightOffsetReducer.reduced; int scheduledToCommit = 0; OffsetsToCommit offsetsToCommit = new OffsetsToCommit(); for (SubscriptionPartition partition : Sets.union(minInflightOffsets.keySet(), maxCommittedOffsets.keySet())) { long offset = Math.min( minInflightOffsets.getOrDefault(partition, Long.MAX_VALUE), maxCommittedOffsets.getOrDefault(partition, Long.MAX_VALUE) ); if (offset >= 0 && offset < Long.MAX_VALUE) { scheduledToCommit++; offsetsToCommit.add(new SubscriptionPartitionOffset(partition, offset)); } } messageCommitter.commitOffsets(offsetsToCommit); metrics.counter("offset-committer.committed").inc(scheduledToCommit); cleanupUnusedSubscriptions(); } catch (Exception exception) { logger.error("Failed to run offset committer: {}", exception.getMessage(), exception); } }
@Override public void run() { try (Timer.Context c = metrics.timer("offset-committer.duration").time()) { // committed offsets need to be drained first so that there is no possibility of new committed offsets // showing up after inflight queue is drained - this would lead to stall in committing offsets ReducingConsumer committedOffsetsReducer = processCommittedOffsets(); Map<SubscriptionPartition, Long> maxCommittedOffsets = committedOffsetsReducer.reduced; ReducingConsumer inflightOffsetReducer = processInflightOffsets(committedOffsetsReducer.all); Map<SubscriptionPartition, Long> minInflightOffsets = inflightOffsetReducer.reduced; int scheduledToCommit = 0; OffsetsToCommit offsetsToCommit = new OffsetsToCommit(); for (SubscriptionPartition partition : Sets.union(minInflightOffsets.keySet(), maxCommittedOffsets.keySet())) { long offset = Math.min( minInflightOffsets.getOrDefault(partition, Long.MAX_VALUE), maxCommittedOffsets.getOrDefault(partition, Long.MAX_VALUE) ); if (offset >= 0 && offset < Long.MAX_VALUE) { scheduledToCommit++; offsetsToCommit.add(new SubscriptionPartitionOffset(partition, offset)); } } messageCommitter.commitOffsets(offsetsToCommit); metrics.counter("offset-committer.committed").inc(scheduledToCommit); cleanupUnusedSubscriptions(); } catch (Exception exception) { logger.error("Failed to run offset committer: {}", exception.getMessage(), exception); } }