private void moveMessagesToTheirQueue(SystemStreamPartition ssp, List<IncomingMessageEnvelope> envelopes) { long nextOffset = nextOffsets.get(ssp); for (IncomingMessageEnvelope env : envelopes) { sink.addMessage(ssp, env); // move message to the BlockingEnvelopeMap's queue LOG.trace("IncomingMessageEnvelope. got envelope with offset:{} for ssp={}", env.getOffset(), ssp); nextOffset = Long.valueOf(env.getOffset()) + 1; } nextOffsets.put(ssp, nextOffset); }
private void fetchMessages() { Set<SystemStreamPartition> sspsToFetch = new HashSet<>(); for (SystemStreamPartition ssp : nextOffsets.keySet()) { if (sink.needsMoreMessages(ssp)) { sspsToFetch.add(ssp);
private void populateCurrentLags(Set<SystemStreamPartition> ssps) { Map<MetricName, ? extends Metric> consumerMetrics = kafkaConsumer.metrics(); // populate the MetricNames first time if (perPartitionMetrics.isEmpty()) { HashMap<String, String> tags = new HashMap<>(); tags.put("client-id", clientId); // this is required by the KafkaConsumer to get the metrics for (SystemStreamPartition ssp : ssps) { TopicPartition tp = KafkaSystemConsumer.toTopicPartition(ssp); perPartitionMetrics.put(ssp, new MetricName(tp + ".records-lag", "consumer-fetch-manager-metrics", "", tags)); } } for (SystemStreamPartition ssp : ssps) { MetricName mn = perPartitionMetrics.get(ssp); Metric currentLagMetric = consumerMetrics.get(mn); // High watermark is fixed to be the offset of last available message, // so the lag is now at least 0, which is the same as Samza's definition. // If the lag is not 0, then isAtHead is not true, and kafkaClient keeps polling. long currentLag = (currentLagMetric != null) ? (long) currentLagMetric.value() : -1L; latestLags.put(ssp, currentLag); // calls the setIsAtHead for the BlockingEnvelopeMap sink.setIsAtHighWatermark(ssp, currentLag == 0); } }
private void fetchMessages() { Set<SystemStreamPartition> sspsToFetch = new HashSet<>(); for (SystemStreamPartition ssp : nextOffsets.keySet()) { if (sink.needsMoreMessages(ssp)) { sspsToFetch.add(ssp);
private void fetchMessages() { Set<SystemStreamPartition> sspsToFetch = new HashSet<>(); for (SystemStreamPartition ssp : nextOffsets.keySet()) { if (sink.needsMoreMessages(ssp)) { sspsToFetch.add(ssp);
consumer.register(ssp1, "0"); consumer.start(); consumer.messageSink.addMessage(ssp0, ime0); Assert.assertEquals(true, consumer.messageSink.needsMoreMessages(ssp0)); consumer.messageSink.addMessage(ssp1, ime1); Assert.assertEquals(true, consumer.messageSink.needsMoreMessages(ssp1)); consumer.messageSink.addMessage(ssp1, ime11); Assert.assertEquals(false, consumer.messageSink.needsMoreMessages(ssp1));
consumer.register(ssp1, "0"); consumer.start(); consumer.messageSink.addMessage(ssp0, ime0); Assert.assertEquals(false, consumer.messageSink.needsMoreMessages(ssp0)); consumer.messageSink.addMessage(ssp1, ime1); Assert.assertEquals(true, consumer.messageSink.needsMoreMessages(ssp1)); consumer.messageSink.addMessage(ssp1, ime11); Assert.assertEquals(false, consumer.messageSink.needsMoreMessages(ssp1));
/** * Create a KafkaSystemConsumer for the provided {@code systemName} * @param kafkaConsumer kafka Consumer object to be used by this system consumer * @param systemName system name for which we create the consumer * @param config application config * @param clientId clientId from the kafka consumer to be used in the KafkaConsumerProxy * @param metrics metrics for this KafkaSystemConsumer * @param clock system clock */ public KafkaSystemConsumer(Consumer<K, V> kafkaConsumer, String systemName, Config config, String clientId, KafkaSystemConsumerMetrics metrics, Clock clock) { super(metrics.registry(), clock, metrics.getClass().getName()); this.kafkaConsumer = kafkaConsumer; this.clientId = clientId; this.systemName = systemName; this.config = config; this.metrics = metrics; fetchThresholdBytesEnabled = new KafkaConfig(config).isConsumerFetchThresholdBytesEnabled(systemName); // create a sink for passing the messages between the proxy and the consumer messageSink = new KafkaConsumerMessageSink(); // Create the proxy to do the actual message reading. String metricName = String.format("%s-%s", systemName, clientId); proxy = new KafkaConsumerProxy(kafkaConsumer, systemName, clientId, messageSink, metrics, metricName); LOG.info("{}: Created KafkaConsumerProxy {} ", this, proxy); }
/** * Create a KafkaSystemConsumer for the provided {@code systemName} * @param kafkaConsumer kafka Consumer object to be used by this system consumer * @param systemName system name for which we create the consumer * @param config application config * @param clientId clientId from the kafka consumer to be used in the KafkaConsumerProxy * @param metrics metrics for this KafkaSystemConsumer * @param clock system clock */ public KafkaSystemConsumer(Consumer<K, V> kafkaConsumer, String systemName, Config config, String clientId, KafkaSystemConsumerMetrics metrics, Clock clock) { super(metrics.registry(), clock, metrics.getClass().getName()); this.kafkaConsumer = kafkaConsumer; this.clientId = clientId; this.systemName = systemName; this.config = config; this.metrics = metrics; fetchThresholdBytesEnabled = new KafkaConfig(config).isConsumerFetchThresholdBytesEnabled(systemName); // create a sink for passing the messages between the proxy and the consumer messageSink = new KafkaConsumerMessageSink(); // Create the proxy to do the actual message reading. String metricName = String.format("%s-%s", systemName, clientId); proxy = new KafkaConsumerProxy(kafkaConsumer, systemName, clientId, messageSink, metrics, metricName); LOG.info("{}: Created KafkaConsumerProxy {} ", this, proxy); }
private void populateCurrentLags(Set<SystemStreamPartition> ssps) { Map<MetricName, ? extends Metric> consumerMetrics = kafkaConsumer.metrics(); // populate the MetricNames first time if (perPartitionMetrics.isEmpty()) { HashMap<String, String> tags = new HashMap<>(); tags.put("client-id", clientId); // this is required by the KafkaConsumer to get the metrics for (SystemStreamPartition ssp : ssps) { TopicPartition tp = KafkaSystemConsumer.toTopicPartition(ssp); perPartitionMetrics.put(ssp, new MetricName(tp + ".records-lag", "consumer-fetch-manager-metrics", "", tags)); } } for (SystemStreamPartition ssp : ssps) { MetricName mn = perPartitionMetrics.get(ssp); Metric currentLagMetric = consumerMetrics.get(mn); // High watermark is fixed to be the offset of last available message, // so the lag is now at least 0, which is the same as Samza's definition. // If the lag is not 0, then isAtHead is not true, and kafkaClient keeps polling. long currentLag = (currentLagMetric != null) ? (long) currentLagMetric.value() : -1L; latestLags.put(ssp, currentLag); // calls the setIsAtHead for the BlockingEnvelopeMap sink.setIsAtHighWatermark(ssp, currentLag == 0); } }
/** * Create a KafkaSystemConsumer for the provided {@code systemName} * @param kafkaConsumer kafka Consumer object to be used by this system consumer * @param systemName system name for which we create the consumer * @param config application config * @param clientId clientId from the kafka consumer to be used in the KafkaConsumerProxy * @param metrics metrics for this KafkaSystemConsumer * @param clock system clock */ public KafkaSystemConsumer(Consumer<K, V> kafkaConsumer, String systemName, Config config, String clientId, KafkaSystemConsumerMetrics metrics, Clock clock) { super(metrics.registry(), clock, metrics.getClass().getName()); this.kafkaConsumer = kafkaConsumer; this.clientId = clientId; this.systemName = systemName; this.config = config; this.metrics = metrics; fetchThresholdBytesEnabled = new KafkaConfig(config).isConsumerFetchThresholdBytesEnabled(systemName); // create a sink for passing the messages between the proxy and the consumer messageSink = new KafkaConsumerMessageSink(); // Create the proxy to do the actual message reading. String metricName = String.format("%s-%s", systemName, clientId); proxy = new KafkaConsumerProxy(kafkaConsumer, systemName, clientId, messageSink, metrics, metricName); LOG.info("{}: Created KafkaConsumerProxy {} ", this, proxy); }
private void populateCurrentLags(Set<SystemStreamPartition> ssps) { Map<MetricName, ? extends Metric> consumerMetrics = kafkaConsumer.metrics(); // populate the MetricNames first time if (perPartitionMetrics.isEmpty()) { HashMap<String, String> tags = new HashMap<>(); tags.put("client-id", clientId); // this is required by the KafkaConsumer to get the metrics for (SystemStreamPartition ssp : ssps) { TopicPartition tp = KafkaSystemConsumer.toTopicPartition(ssp); perPartitionMetrics.put(ssp, new MetricName(tp + ".records-lag", "consumer-fetch-manager-metrics", "", tags)); } } for (SystemStreamPartition ssp : ssps) { MetricName mn = perPartitionMetrics.get(ssp); Metric currentLagMetric = consumerMetrics.get(mn); // High watermark is fixed to be the offset of last available message, // so the lag is now at least 0, which is the same as Samza's definition. // If the lag is not 0, then isAtHead is not true, and kafkaClient keeps polling. long currentLag = (currentLagMetric != null) ? (long) currentLagMetric.value() : -1L; latestLags.put(ssp, currentLag); // calls the setIsAtHead for the BlockingEnvelopeMap sink.setIsAtHighWatermark(ssp, currentLag == 0); } }
private void initializeLags() { // This is expensive, so only do it once at the beginning. After the first poll, we can rely on metrics for lag. Map<TopicPartition, Long> endOffsets = kafkaConsumer.endOffsets(topicPartitionToSSP.keySet()); endOffsets.forEach((tp, offset) -> { SystemStreamPartition ssp = topicPartitionToSSP.get(tp); long startingOffset = nextOffsets.get(ssp); // End offsets are the offset of the newest message + 1 // If the message we are about to consume is < end offset, we are starting with a lag. long initialLag = endOffsets.get(tp) - startingOffset; LOG.info("Initial lag for SSP {} is {} (end={}, startOffset={})", ssp, initialLag, endOffsets.get(tp), startingOffset); latestLags.put(ssp, initialLag); sink.setIsAtHighWatermark(ssp, initialLag == 0); }); // initialize lag metrics refreshLagMetrics(); }
private void initializeLags() { // This is expensive, so only do it once at the beginning. After the first poll, we can rely on metrics for lag. Map<TopicPartition, Long> endOffsets = kafkaConsumer.endOffsets(topicPartitionToSSP.keySet()); endOffsets.forEach((tp, offset) -> { SystemStreamPartition ssp = topicPartitionToSSP.get(tp); long startingOffset = nextOffsets.get(ssp); // End offsets are the offset of the newest message + 1 // If the message we are about to consume is < end offset, we are starting with a lag. long initialLag = endOffsets.get(tp) - startingOffset; LOG.info("Initial lag for SSP {} is {} (end={}, startOffset={})", ssp, initialLag, endOffsets.get(tp), startingOffset); latestLags.put(ssp, initialLag); sink.setIsAtHighWatermark(ssp, initialLag == 0); }); // initialize lag metrics refreshLagMetrics(); }
private void initializeLags() { // This is expensive, so only do it once at the beginning. After the first poll, we can rely on metrics for lag. Map<TopicPartition, Long> endOffsets = kafkaConsumer.endOffsets(topicPartitionToSSP.keySet()); endOffsets.forEach((tp, offset) -> { SystemStreamPartition ssp = topicPartitionToSSP.get(tp); long startingOffset = nextOffsets.get(ssp); // End offsets are the offset of the newest message + 1 // If the message we are about to consume is < end offset, we are starting with a lag. long initialLag = endOffsets.get(tp) - startingOffset; LOG.info("Initial lag for SSP {} is {} (end={}, startOffset={})", ssp, initialLag, endOffsets.get(tp), startingOffset); latestLags.put(ssp, initialLag); sink.setIsAtHighWatermark(ssp, initialLag == 0); }); // initialize lag metrics refreshLagMetrics(); }
private void moveMessagesToTheirQueue(SystemStreamPartition ssp, List<IncomingMessageEnvelope> envelopes) { long nextOffset = nextOffsets.get(ssp); for (IncomingMessageEnvelope env : envelopes) { sink.addMessage(ssp, env); // move message to the BlockingEnvelopeMap's queue LOG.trace("IncomingMessageEnvelope. got envelope with offset:{} for ssp={}", env.getOffset(), ssp); nextOffset = Long.valueOf(env.getOffset()) + 1; } nextOffsets.put(ssp, nextOffset); }
private void moveMessagesToTheirQueue(SystemStreamPartition ssp, List<IncomingMessageEnvelope> envelopes) { long nextOffset = nextOffsets.get(ssp); for (IncomingMessageEnvelope env : envelopes) { sink.addMessage(ssp, env); // move message to the BlockingEnvelopeMap's queue LOG.trace("IncomingMessageEnvelope. got envelope with offset:{} for ssp={}", env.getOffset(), ssp); nextOffset = Long.valueOf(env.getOffset()) + 1; } nextOffsets.put(ssp, nextOffset); }