@Override public String toString() { return topicPartition.toString() + "@" + offset; } }
@Override public String toString() { return topicPartition.toString(); } }
@Override public String getSource() { return _topicPartition.toString(); }
@Override public void start(WatermarkStorage watermarkStorage) throws IOException { Preconditions.checkArgument(watermarkStorage != null, "Watermark Storage should not be null"); Map<String, CheckpointableWatermark> watermarkMap = watermarkStorage.getCommittedWatermarks(KafkaWatermark.class, Collections.singletonList(_partition.toString())); KafkaWatermark watermark = (KafkaWatermark) watermarkMap.get(_partition.toString()); if (watermark == null) { LOG.info("Offset is null - seeking to beginning of topic and partition for {} ", _partition.toString()); _consumer.seekToBeginning(_partition); } else { // seek needs to go one past the last committed offset LOG.info("Offset found in consumer for partition {}. Seeking to one past what we found : {}", _partition.toString(), watermark.getLwm().getValue() + 1); _consumer.seek(_partition, watermark.getLwm().getValue() + 1); } _isStarted.set(true); }
/** * Poll more records from the Kafka Broker. * * @throws PollTimeoutException if poll returns 0 record and consumer's position < requested endOffset. */ private void pollRecords() { if (LOG.isTraceEnabled()) { stopwatch.reset().start(); } records = consumer.poll(pollTimeoutDurationMs); if (LOG.isTraceEnabled()) { stopwatch.stop(); LOG.trace("Pulled [{}] records in [{}] ms", records.count(), stopwatch.elapsed(TimeUnit.MILLISECONDS)); } // Fail if we can not poll within one lap of pollTimeoutMs. if (records.isEmpty() && consumer.position(topicPartition) < endOffset) { throw new PollTimeoutException(String.format(ERROR_POLL_TIMEOUT_FORMAT, pollTimeoutMs, topicPartition.toString(), startOffset, consumer.position(topicPartition), endOffset)); } consumerRecordIterator = records.iterator(); consumerPosition = consumer.position(topicPartition); }
? Double.compare(r1.load().expectedUtilizationFor(resource()), r2.load().expectedUtilizationFor(resource())) : Double.compare(r2.load().expectedUtilizationFor(resource()), r1.load().expectedUtilizationFor(resource())); return result == 0 ? r1.topicPartition().toString().compareTo(r2.topicPartition().toString()) : result; });
.thenComparing(r -> r.topicPartition().toString()));
@Test public void testFetcherMetricsTemplates() throws Exception { metrics.close(); Map<String, String> clientTags = Collections.singletonMap("client-id", "clientA"); metrics = new Metrics(new MetricConfig().tags(clientTags)); metricsRegistry = new FetcherMetricsRegistry(clientTags.keySet(), "consumer" + groupId); fetcher.close(); fetcher = createFetcher(subscriptions, metrics); // Fetch from topic to generate topic metrics subscriptions.assignFromUser(singleton(tp0)); subscriptions.seek(tp0, 0); assertEquals(1, fetcher.sendFetches()); client.prepareResponse(fullFetchResponse(tp0, this.records, Errors.NONE, 100L, 0)); consumerClient.poll(time.timer(0)); assertTrue(fetcher.hasCompletedFetches()); Map<TopicPartition, List<ConsumerRecord<byte[], byte[]>>> partitionRecords = fetcher.fetchedRecords(); assertTrue(partitionRecords.containsKey(tp0)); // Create throttle metrics Fetcher.throttleTimeSensor(metrics, metricsRegistry); // Verify that all metrics except metrics-count have registered templates Set<MetricNameTemplate> allMetrics = new HashSet<>(); for (MetricName n : metrics.metrics().keySet()) { String name = n.name().replaceAll(tp0.toString(), "{topic}-{partition}"); if (!n.group().equals("kafka-metrics-count")) allMetrics.add(new MetricNameTemplate(name, n.group(), "", n.tags().keySet())); } TestUtils.checkEquals(allMetrics, new HashSet<>(metricsRegistry.getAllTemplates()), "metrics", "templates"); }
@Override public String toString() { String result = topicPartition.toString() + ": " + preferredLeaderBroker; return result; } }
@Override public String toString() { return topicPartition.toString(); } }
@Override public String toString() { return topicPartition.toString(); } }
/** * Produces a string representation containing useful information about a Task starting with the given indent. * This is useful in debugging scenarios. * * @return A string representation of the Task instance. */ public String toString(final String indent) { final StringBuilder sb = new StringBuilder(); sb.append(indent); sb.append("TaskId: "); sb.append(id); sb.append("\n"); // print topology if (topology != null) { sb.append(indent).append(topology.toString(indent + "\t")); } // print assigned partitions if (partitions != null && !partitions.isEmpty()) { sb.append(indent).append("Partitions ["); for (final TopicPartition topicPartition : partitions) { sb.append(topicPartition.toString()).append(", "); } sb.setLength(sb.length() - 2); sb.append("]\n"); } return sb.toString(); }
@Override public String toString() { String result = topicPartition.toString() + ": "; result += source.name() + " -> " + dest.name(); return result; } }
@Override public void flush(Map<TopicPartition, OffsetAndMetadata> partitionOffsets) { log.debug("Flushing..."); // Process results of all the outstanding futures specified by each TopicPartition. for (Map.Entry<TopicPartition, OffsetAndMetadata> partitionOffset : partitionOffsets.entrySet()) { log.trace("Received flush for partition " + partitionOffset.getKey().toString()); Map<Integer, OutstandingFuturesForPartition> outstandingFuturesForTopic = allOutstandingFutures.get(partitionOffset.getKey().topic()); if (outstandingFuturesForTopic == null) { continue; } OutstandingFuturesForPartition outstandingFutures = outstandingFuturesForTopic.get(partitionOffset.getKey().partition()); if (outstandingFutures == null) { continue; } try { ApiFutures.allAsList(outstandingFutures.futures).get(); } catch (Exception e) { throw new RuntimeException(e); } finally { outstandingFutures.futures.clear(); } } allOutstandingFutures.clear(); }
/** * Poll more records from the Kafka Broker. * * @throws PollTimeoutException if poll returns 0 record and consumer's position < requested endOffset. */ private void pollRecords() { if (LOG.isTraceEnabled()) { stopwatch.reset().start(); } records = consumer.poll(pollTimeoutDurationMs); if (LOG.isTraceEnabled()) { stopwatch.stop(); LOG.trace("Pulled [{}] records in [{}] ms", records.count(), stopwatch.elapsed(TimeUnit.MILLISECONDS)); } // Fail if we can not poll within one lap of pollTimeoutMs. if (records.isEmpty() && consumer.position(topicPartition) < endOffset) { throw new PollTimeoutException(String.format(ERROR_POLL_TIMEOUT_FORMAT, pollTimeoutMs, topicPartition.toString(), startOffset, consumer.position(topicPartition), endOffset)); } consumerRecordIterator = records.iterator(); consumerPosition = consumer.position(topicPartition); }
consumerRecord.value().getSender(), consumerRecord.value().getReceivers().get(0), consumerRecord.value().getPayloadClass(), consumerRecord.offset(), topicPartition.toString()));