Codota Logo
KafkaSystemConsumerMetrics
Code IndexAdd Codota to your IDE (free)

How to use
KafkaSystemConsumerMetrics
in
org.apache.samza.system.kafka

Best Java code snippets using org.apache.samza.system.kafka.KafkaSystemConsumerMetrics (Showing top 19 results out of 315)

  • Add the Codota plugin to your IDE and get smart completions
private void myMethod () {
Point p =
  • Codota Iconnew Point(x, y)
  • Codota Iconnew Point()
  • Codota IconMouseEvent e;e.getPoint()
  • Smart code suggestions by Codota
}
origin: apache/samza

KafkaConsumerProxy(Consumer<K, V> kafkaConsumer, String systemName, String clientId,
  KafkaSystemConsumer.KafkaConsumerMessageSink messageSink, KafkaSystemConsumerMetrics samzaConsumerMetrics,
  String metricName) {
 this.kafkaConsumer = kafkaConsumer;
 this.systemName = systemName;
 this.sink = messageSink;
 this.kafkaConsumerMetrics = samzaConsumerMetrics;
 this.metricName = metricName;
 this.clientId = clientId;
 this.kafkaConsumerMetrics.registerClientProxy(metricName);
 consumerPollThread = new Thread(createProxyThreadRunnable());
 consumerPollThread.setDaemon(true);
 consumerPollThread.setName(
   "Samza KafkaConsumerProxy Poll " + consumerPollThread.getName() + " - " + systemName);
 LOG.info("Creating KafkaConsumerProxy with systeName={}, clientId={}, metricsName={}", systemName, clientId, metricName);
}
origin: org.apache.samza/samza-kafka

private void refreshLagMetrics() {
 for (Map.Entry<SystemStreamPartition, Long> e : nextOffsets.entrySet()) {
  SystemStreamPartition ssp = e.getKey();
  Long offset = e.getValue();
  TopicAndPartition tp = new TopicAndPartition(ssp.getStream(), ssp.getPartition().getPartitionId());
  Long lag = latestLags.get(ssp);
  LOG.trace("Latest offset of {} is  {}; lag = {}", ssp, offset, lag);
  if (lag != null && offset != null && lag >= 0) {
   long streamEndOffset = offset.longValue() + lag.longValue();
   // update the metrics
   kafkaConsumerMetrics.setHighWatermarkValue(tp, streamEndOffset);
   kafkaConsumerMetrics.setLagValue(tp, lag.longValue());
  }
 }
}
origin: apache/samza

/**
 * Add new partition to the list of polled partitions.
 * Bust only be called before {@link KafkaConsumerProxy#start} is called..
 */
public void addTopicPartition(SystemStreamPartition ssp, long nextOffset) {
 LOG.info(String.format("Adding new topicPartition %s with offset %s to queue for consumer %s", ssp, nextOffset,
   this));
 topicPartitionToSSP.put(KafkaSystemConsumer.toTopicPartition(ssp), ssp); //registered SSPs
 // this is already vetted offset so there is no need to validate it
 nextOffsets.put(ssp, nextOffset);
 kafkaConsumerMetrics.setNumTopicPartitions(metricName, nextOffsets.size());
}
origin: apache/samza

KafkaSystemConsumerMetrics metrics = new KafkaSystemConsumerMetrics(systemName, registry);
metrics.registerTopicAndPartition(tp1);
metrics.registerTopicAndPartition(tp2);
metrics.registerClientProxy(clientName);
metrics.setOffsets(tp1, 1001);
metrics.setOffsets(tp2, 1002);
expectedValues.put(metrics.offsets().get(tp1).getName(), "1001");
expectedValues.put(metrics.offsets().get(tp2).getName(), "1002");
metrics.incBytesReads(tp1, 10);
metrics.incBytesReads(tp1, 5); // total 15
expectedValues.put(metrics.bytesRead().get(tp1).getName(), "15");
metrics.incReads(tp1);
metrics.incReads(tp1); // total 2
expectedValues.put(metrics.reads().get(tp1).getName(), "2");
metrics.setHighWatermarkValue(tp2, 1000);
metrics.setHighWatermarkValue(tp2, 1001); // final value 1001
expectedValues.put(metrics.highWatermark().get(tp2).getName(), "1001");
metrics.setLagValue(tp1, 200);
metrics.setLagValue(tp1, 201); // final value 201
expectedValues.put(metrics.lag().get(tp1).getName(), "201");
metrics.incClientBytesReads(clientName, 100); // broker-bytes-read
origin: org.apache.samza/samza-kafka

private void updateMetrics(ConsumerRecord<K, V> r, TopicPartition tp) {
 TopicAndPartition tap = KafkaSystemConsumer.toTopicAndPartition(tp);
 SystemStreamPartition ssp = new SystemStreamPartition(systemName, tp.topic(), new Partition(tp.partition()));
 Long lag = latestLags.get(ssp);
 if (lag == null) {
  throw new SamzaException("Unknown/unregistered ssp in latestLags. ssp=" + ssp + "; system=" + systemName);
 }
 long currentSSPLag = lag.longValue(); // lag between the current offset and the highwatermark
 if (currentSSPLag < 0) {
  return;
 }
 long recordOffset = r.offset();
 long highWatermark = recordOffset + currentSSPLag; // derived value for the highwatermark
 int size = getRecordSize(r);
 kafkaConsumerMetrics.incReads(tap);
 kafkaConsumerMetrics.incBytesReads(tap, size);
 kafkaConsumerMetrics.setOffsets(tap, recordOffset);
 kafkaConsumerMetrics.incClientBytesReads(metricName, size);
 kafkaConsumerMetrics.setHighWatermarkValue(tap, highWatermark);
}
origin: org.apache.samza/samza-kafka_2.11

kafkaConsumerMetrics.incClientReads(metricName);
  SLEEP_MS_WHILE_NO_TOPIC_PARTITION);
kafkaConsumerMetrics.incClientSkippedFetchRequests(metricName);
origin: apache/samza

/**
 * record the ssp and the offset. Do not submit it to the consumer yet.
 * @param systemStreamPartition ssp to register
 * @param offset offset to register with
 */
@Override
public void register(SystemStreamPartition systemStreamPartition, String offset) {
 if (started.get()) {
  String msg = String.format("%s: Trying to register partition after consumer has been started. ssp=%s", this,
    systemStreamPartition);
  throw new SamzaException(msg);
 }
 if (!systemStreamPartition.getSystem().equals(systemName)) {
  LOG.warn("{}: ignoring SSP {}, because this consumer's system doesn't match.", this, systemStreamPartition);
  return;
 }
 LOG.info("{}: Registering ssp = {} with offset {}", this, systemStreamPartition, offset);
 super.register(systemStreamPartition, offset);
 TopicPartition tp = toTopicPartition(systemStreamPartition);
 topicPartitionsToSSP.put(tp, systemStreamPartition);
 String existingOffset = topicPartitionsToOffset.get(tp);
 // register the older (of the two) offset in the consumer, to guarantee we do not miss any messages.
 if (existingOffset == null || compareOffsets(existingOffset, offset) > 0) {
  topicPartitionsToOffset.put(tp, offset);
 }
 metrics.registerTopicAndPartition(toTopicAndPartition(tp));
}
origin: apache/samza

private void updateMetrics(ConsumerRecord<K, V> r, TopicPartition tp) {
 TopicAndPartition tap = KafkaSystemConsumer.toTopicAndPartition(tp);
 SystemStreamPartition ssp = new SystemStreamPartition(systemName, tp.topic(), new Partition(tp.partition()));
 Long lag = latestLags.get(ssp);
 if (lag == null) {
  throw new SamzaException("Unknown/unregistered ssp in latestLags. ssp=" + ssp + "; system=" + systemName);
 }
 long currentSSPLag = lag.longValue(); // lag between the current offset and the highwatermark
 if (currentSSPLag < 0) {
  return;
 }
 long recordOffset = r.offset();
 long highWatermark = recordOffset + currentSSPLag; // derived value for the highwatermark
 int size = getRecordSize(r);
 kafkaConsumerMetrics.incReads(tap);
 kafkaConsumerMetrics.incBytesReads(tap, size);
 kafkaConsumerMetrics.setOffsets(tap, recordOffset);
 kafkaConsumerMetrics.incClientBytesReads(metricName, size);
 kafkaConsumerMetrics.setHighWatermarkValue(tap, highWatermark);
}
origin: apache/samza

kafkaConsumerMetrics.incClientReads(metricName);
  SLEEP_MS_WHILE_NO_TOPIC_PARTITION);
kafkaConsumerMetrics.incClientSkippedFetchRequests(metricName);
origin: org.apache.samza/samza-kafka_2.11

/**
 * record the ssp and the offset. Do not submit it to the consumer yet.
 * @param systemStreamPartition ssp to register
 * @param offset offset to register with
 */
@Override
public void register(SystemStreamPartition systemStreamPartition, String offset) {
 if (started.get()) {
  String msg = String.format("%s: Trying to register partition after consumer has been started. ssp=%s", this,
    systemStreamPartition);
  throw new SamzaException(msg);
 }
 if (!systemStreamPartition.getSystem().equals(systemName)) {
  LOG.warn("{}: ignoring SSP {}, because this consumer's system doesn't match.", this, systemStreamPartition);
  return;
 }
 LOG.info("{}: Registering ssp = {} with offset {}", this, systemStreamPartition, offset);
 super.register(systemStreamPartition, offset);
 TopicPartition tp = toTopicPartition(systemStreamPartition);
 topicPartitionsToSSP.put(tp, systemStreamPartition);
 String existingOffset = topicPartitionsToOffset.get(tp);
 // register the older (of the two) offset in the consumer, to guarantee we do not miss any messages.
 if (existingOffset == null || compareOffsets(existingOffset, offset) > 0) {
  topicPartitionsToOffset.put(tp, offset);
 }
 metrics.registerTopicAndPartition(toTopicAndPartition(tp));
}
origin: org.apache.samza/samza-kafka_2.11

private void updateMetrics(ConsumerRecord<K, V> r, TopicPartition tp) {
 TopicAndPartition tap = KafkaSystemConsumer.toTopicAndPartition(tp);
 SystemStreamPartition ssp = new SystemStreamPartition(systemName, tp.topic(), new Partition(tp.partition()));
 Long lag = latestLags.get(ssp);
 if (lag == null) {
  throw new SamzaException("Unknown/unregistered ssp in latestLags. ssp=" + ssp + "; system=" + systemName);
 }
 long currentSSPLag = lag.longValue(); // lag between the current offset and the highwatermark
 if (currentSSPLag < 0) {
  return;
 }
 long recordOffset = r.offset();
 long highWatermark = recordOffset + currentSSPLag; // derived value for the highwatermark
 int size = getRecordSize(r);
 kafkaConsumerMetrics.incReads(tap);
 kafkaConsumerMetrics.incBytesReads(tap, size);
 kafkaConsumerMetrics.setOffsets(tap, recordOffset);
 kafkaConsumerMetrics.incClientBytesReads(metricName, size);
 kafkaConsumerMetrics.setHighWatermarkValue(tap, highWatermark);
}
origin: org.apache.samza/samza-kafka_2.11

private void refreshLagMetrics() {
 for (Map.Entry<SystemStreamPartition, Long> e : nextOffsets.entrySet()) {
  SystemStreamPartition ssp = e.getKey();
  Long offset = e.getValue();
  TopicAndPartition tp = new TopicAndPartition(ssp.getStream(), ssp.getPartition().getPartitionId());
  Long lag = latestLags.get(ssp);
  LOG.trace("Latest offset of {} is  {}; lag = {}", ssp, offset, lag);
  if (lag != null && offset != null && lag >= 0) {
   long streamEndOffset = offset.longValue() + lag.longValue();
   // update the metrics
   kafkaConsumerMetrics.setHighWatermarkValue(tp, streamEndOffset);
   kafkaConsumerMetrics.setLagValue(tp, lag.longValue());
  }
 }
}
origin: org.apache.samza/samza-kafka

kafkaConsumerMetrics.incClientReads(metricName);
  SLEEP_MS_WHILE_NO_TOPIC_PARTITION);
kafkaConsumerMetrics.incClientSkippedFetchRequests(metricName);
origin: org.apache.samza/samza-kafka

/**
 * Add new partition to the list of polled partitions.
 * Bust only be called before {@link KafkaConsumerProxy#start} is called..
 */
public void addTopicPartition(SystemStreamPartition ssp, long nextOffset) {
 LOG.info(String.format("Adding new topicPartition %s with offset %s to queue for consumer %s", ssp, nextOffset,
   this));
 topicPartitionToSSP.put(KafkaSystemConsumer.toTopicPartition(ssp), ssp); //registered SSPs
 // this is already vetted offset so there is no need to validate it
 nextOffsets.put(ssp, nextOffset);
 kafkaConsumerMetrics.setNumTopicPartitions(metricName, nextOffsets.size());
}
origin: org.apache.samza/samza-kafka_2.11

KafkaConsumerProxy(Consumer<K, V> kafkaConsumer, String systemName, String clientId,
  KafkaSystemConsumer.KafkaConsumerMessageSink messageSink, KafkaSystemConsumerMetrics samzaConsumerMetrics,
  String metricName) {
 this.kafkaConsumer = kafkaConsumer;
 this.systemName = systemName;
 this.sink = messageSink;
 this.kafkaConsumerMetrics = samzaConsumerMetrics;
 this.metricName = metricName;
 this.clientId = clientId;
 this.kafkaConsumerMetrics.registerClientProxy(metricName);
 consumerPollThread = new Thread(createProxyThreadRunnable());
 consumerPollThread.setDaemon(true);
 consumerPollThread.setName(
   "Samza KafkaConsumerProxy Poll " + consumerPollThread.getName() + " - " + systemName);
 LOG.info("Creating KafkaConsumerProxy with systeName={}, clientId={}, metricsName={}", systemName, clientId, metricName);
}
origin: org.apache.samza/samza-kafka

/**
 * record the ssp and the offset. Do not submit it to the consumer yet.
 * @param systemStreamPartition ssp to register
 * @param offset offset to register with
 */
@Override
public void register(SystemStreamPartition systemStreamPartition, String offset) {
 if (started.get()) {
  String msg = String.format("%s: Trying to register partition after consumer has been started. ssp=%s", this,
    systemStreamPartition);
  throw new SamzaException(msg);
 }
 if (!systemStreamPartition.getSystem().equals(systemName)) {
  LOG.warn("{}: ignoring SSP {}, because this consumer's system doesn't match.", this, systemStreamPartition);
  return;
 }
 LOG.info("{}: Registering ssp = {} with offset {}", this, systemStreamPartition, offset);
 super.register(systemStreamPartition, offset);
 TopicPartition tp = toTopicPartition(systemStreamPartition);
 topicPartitionsToSSP.put(tp, systemStreamPartition);
 String existingOffset = topicPartitionsToOffset.get(tp);
 // register the older (of the two) offset in the consumer, to guarantee we do not miss any messages.
 if (existingOffset == null || compareOffsets(existingOffset, offset) > 0) {
  topicPartitionsToOffset.put(tp, offset);
 }
 metrics.registerTopicAndPartition(toTopicAndPartition(tp));
}
origin: apache/samza

private void refreshLagMetrics() {
 for (Map.Entry<SystemStreamPartition, Long> e : nextOffsets.entrySet()) {
  SystemStreamPartition ssp = e.getKey();
  Long offset = e.getValue();
  TopicAndPartition tp = new TopicAndPartition(ssp.getStream(), ssp.getPartition().getPartitionId());
  Long lag = latestLags.get(ssp);
  LOG.trace("Latest offset of {} is  {}; lag = {}", ssp, offset, lag);
  if (lag != null && offset != null && lag >= 0) {
   long streamEndOffset = offset.longValue() + lag.longValue();
   // update the metrics
   kafkaConsumerMetrics.setHighWatermarkValue(tp, streamEndOffset);
   kafkaConsumerMetrics.setLagValue(tp, lag.longValue());
  }
 }
}
origin: org.apache.samza/samza-kafka_2.11

/**
 * Add new partition to the list of polled partitions.
 * Bust only be called before {@link KafkaConsumerProxy#start} is called..
 */
public void addTopicPartition(SystemStreamPartition ssp, long nextOffset) {
 LOG.info(String.format("Adding new topicPartition %s with offset %s to queue for consumer %s", ssp, nextOffset,
   this));
 topicPartitionToSSP.put(KafkaSystemConsumer.toTopicPartition(ssp), ssp); //registered SSPs
 // this is already vetted offset so there is no need to validate it
 nextOffsets.put(ssp, nextOffset);
 kafkaConsumerMetrics.setNumTopicPartitions(metricName, nextOffsets.size());
}
origin: org.apache.samza/samza-kafka

KafkaConsumerProxy(Consumer<K, V> kafkaConsumer, String systemName, String clientId,
  KafkaSystemConsumer.KafkaConsumerMessageSink messageSink, KafkaSystemConsumerMetrics samzaConsumerMetrics,
  String metricName) {
 this.kafkaConsumer = kafkaConsumer;
 this.systemName = systemName;
 this.sink = messageSink;
 this.kafkaConsumerMetrics = samzaConsumerMetrics;
 this.metricName = metricName;
 this.clientId = clientId;
 this.kafkaConsumerMetrics.registerClientProxy(metricName);
 consumerPollThread = new Thread(createProxyThreadRunnable());
 consumerPollThread.setDaemon(true);
 consumerPollThread.setName(
   "Samza KafkaConsumerProxy Poll " + consumerPollThread.getName() + " - " + systemName);
 LOG.info("Creating KafkaConsumerProxy with systeName={}, clientId={}, metricsName={}", systemName, clientId, metricName);
}
org.apache.samza.system.kafkaKafkaSystemConsumerMetrics

Most used methods

  • incBytesReads
  • incClientBytesReads
  • incClientReads
  • incReads
  • registerClientProxy
  • registerTopicAndPartition
  • setHighWatermarkValue
  • setLagValue
  • setNumTopicPartitions
  • setOffsets
  • incClientSkippedFetchRequests
  • registry
  • incClientSkippedFetchRequests,
  • registry,
  • <init>,
  • bytesRead,
  • clientBytesRead,
  • clientReads,
  • group,
  • highWatermark,
  • lag,
  • offsets

Popular in Java

  • Reading from database using SQL prepared statement
  • getContentResolver (Context)
  • getSystemService (Context)
  • addToBackStack (FragmentTransaction)
  • PrintWriter (java.io)
    Prints formatted representations of objects to a text-output stream. This class implements all of th
  • Thread (java.lang)
    A thread is a thread of execution in a program. The Java Virtual Machine allows an application to ha
  • InetAddress (java.net)
    This class represents an Internet Protocol (IP) address. An IP address is either a 32-bit or 128-bit
  • Permission (java.security)
    Abstract class for representing access to a system resource. All permissions have a name (whose inte
  • Random (java.util)
    This class provides methods that return pseudo-random values.It is dangerous to seed Random with the
  • Table (org.hibernate.mapping)
    A relational table
Codota Logo
  • Products

    Search for Java codeSearch for JavaScript codeEnterprise
  • IDE Plugins

    IntelliJ IDEAWebStormAndroid StudioEclipseVisual Studio CodePyCharmSublime TextPhpStormVimAtomGoLandRubyMineEmacsJupyter
  • Company

    About UsContact UsCareers
  • Resources

    FAQBlogCodota Academy Plugin user guide Terms of usePrivacy policyJava Code IndexJavascript Code Index
Get Codota for your IDE now