@Override public Set<TopicPartition> assignment() { return delegate.assignment(); }
private void throwIfEmittingForUnassignedPartition(TopicPartition currBatchTp) { final Set<TopicPartition> assignments = consumer.assignment(); if (!assignments.contains(currBatchTp)) { throw new IllegalStateException("The spout is asked to emit tuples on a partition it is not assigned." + " This indicates a bug in the TopicFilter or ManualPartitioner implementations." + " The current partition is [" + currBatchTp + "], the assigned partitions are [" + assignments + "]."); } }
private Collection<TopicPartition> pauseTopicPartitions(TopicPartition excludedTp) { final Set<TopicPartition> pausedTopicPartitions = new HashSet<>(consumer.assignment()); LOG.debug("Currently assigned topic-partitions {}", pausedTopicPartitions); pausedTopicPartitions.remove(excludedTp); consumer.pause(pausedTopicPartitions); LOG.debug("Paused topic-partitions {}", pausedTopicPartitions); return pausedTopicPartitions; }
/** * The check if the consumption is done or not. The consumption is done if the consumer has caught up with the * log end or all the partitions are paused. * @param endOffsets the log end for each partition. * @return true if the consumption is done, false otherwise. */ private boolean consumptionDone(Map<TopicPartition, Long> endOffsets) { Set<TopicPartition> partitionsNotPaused = new HashSet<>(_metricConsumer.assignment()); partitionsNotPaused.removeAll(_metricConsumer.paused()); for (TopicPartition tp : partitionsNotPaused) { if (_metricConsumer.position(tp) < endOffsets.get(tp)) { return false; } } return true; }
/** * Assign partitions to the KafkaConsumer. * @param <K> The consumer key type * @param <V> The consumer value type * @param consumer The Kafka consumer to assign partitions to * @param newAssignment The partitions to assign. * @param listener The rebalance listener to call back on when the assignment changes */ public <K, V> void assignPartitions(Consumer<K, V> consumer, Set<TopicPartition> newAssignment, ConsumerRebalanceListener listener) { Set<TopicPartition> currentAssignment = consumer.assignment(); if (!newAssignment.equals(currentAssignment)) { listener.onPartitionsRevoked(currentAssignment); consumer.assign(newAssignment); listener.onPartitionsAssigned(newAssignment); } }
Set<TopicPartition> assignment = consumer.assignment(); if (!isAtLeastOnceProcessing()) { return new PollablePartitionsInfo(assignment, Collections.emptyMap());
private ConsumerRecords<K, V> pollKafkaBroker(PollablePartitionsInfo pollablePartitionsInfo) { doSeekRetriableTopicPartitions(pollablePartitionsInfo.pollableEarliestRetriableOffsets); Set<TopicPartition> pausedPartitions = new HashSet<>(consumer.assignment()); pausedPartitions.removeIf(pollablePartitionsInfo.pollablePartitions::contains); try { consumer.pause(pausedPartitions); final ConsumerRecords<K, V> consumerRecords = consumer.poll(kafkaSpoutConfig.getPollTimeoutMs()); ackRetriableOffsetsIfCompactedAway(pollablePartitionsInfo.pollableEarliestRetriableOffsets, consumerRecords); final int numPolledRecords = consumerRecords.count(); LOG.debug("Polled [{}] records from Kafka", numPolledRecords); if (kafkaSpoutConfig.getProcessingGuarantee() == KafkaSpoutConfig.ProcessingGuarantee.AT_MOST_ONCE) { //Commit polled records immediately to ensure delivery is at-most-once. Map<TopicPartition, OffsetAndMetadata> offsetsToCommit = createFetchedOffsetsMetadata(consumer.assignment()); consumer.commitSync(offsetsToCommit); LOG.debug("Committed offsets {} to Kafka", offsetsToCommit); } return consumerRecords; } finally { consumer.resume(pausedPartitions); } }
/** * Execute poll using pause API just for sending heartbeat, not polling messages. */ void retainConnection() { pollingLock.lock(); TopicPartition[] assignments = null; try { final Set<TopicPartition> assignmentSet = kafkaConsumer.assignment(); if (assignmentSet.isEmpty()) { return; } if (logger.isDebugEnabled()) { logger.debug("Pausing " + assignmentSet); } assignments = assignmentSet.toArray(new TopicPartition[assignmentSet.size()]); kafkaConsumer.pause(assignments); kafkaConsumer.poll(0); if (logger.isDebugEnabled()) { logger.debug("Resuming " + assignments); } } finally { try { if (assignments != null) { kafkaConsumer.resume(assignments); } } finally { pollingLock.unlock(); } } }
while (_metricConsumer.assignment().isEmpty()) { pollerCount++; _metricConsumer.poll(10); for (TopicPartition tp : _metricConsumer.assignment()) { timestampToSeek.put(tp, startTimeMs); Set<TopicPartition> assignment = new HashSet<>(_metricConsumer.assignment()); Map<TopicPartition, Long> endOffsets = _metricConsumer.endOffsets(assignment); Map<TopicPartition, OffsetAndTimestamp> offsetsForTimes = _metricConsumer.offsetsForTimes(timestampToSeek); LOG.debug("Starting consuming from metrics reporter topic partitions {}.", _metricConsumer.assignment()); _metricConsumer.assignment(), startTimeMs, endTimeMs, totalMetricsAdded);
@Override public void nextTuple() { try { if (refreshAssignmentTimer.isExpiredResetOnTrue()) { refreshAssignment(); } if (commitTimer != null && commitTimer.isExpiredResetOnTrue()) { if (isAtLeastOnceProcessing()) { commitOffsetsForAckedTuples(); } else if (kafkaSpoutConfig.getProcessingGuarantee() == ProcessingGuarantee.NO_GUARANTEE) { Map<TopicPartition, OffsetAndMetadata> offsetsToCommit = createFetchedOffsetsMetadata(consumer.assignment()); consumer.commitAsync(offsetsToCommit, null); LOG.debug("Committed offsets {} to Kafka", offsetsToCommit); } } PollablePartitionsInfo pollablePartitionsInfo = getPollablePartitionsInfo(); if (pollablePartitionsInfo.shouldPoll()) { try { setWaitingToEmit(pollKafkaBroker(pollablePartitionsInfo)); } catch (RetriableException e) { LOG.error("Failed to poll from kafka.", e); } } emitIfWaitingNotEmitted(); } catch (InterruptException e) { throwKafkaConsumerInterruptedException(); } }
@Override public String getSampleMessage(final String topic) { String message = null; if (listTopics().contains(topic)) { try (Consumer<String, String> kafkaConsumer = kafkaConsumerFactory.createConsumer()) { kafkaConsumer.assign(kafkaConsumer.partitionsFor(topic).stream() .map(partitionInfo -> new TopicPartition(topic, partitionInfo.partition())) .collect(Collectors.toList())); kafkaConsumer.assignment().stream() .filter(p -> (kafkaConsumer.position(p) - 1) >= 0) .forEach(p -> kafkaConsumer.seek(p, kafkaConsumer.position(p) - 1)); final ConsumerRecords<String, String> records = kafkaConsumer.poll(KAFKA_CONSUMER_TIMEOUT); message = records.isEmpty() ? null : records.iterator().next().value(); kafkaConsumer.unsubscribe(); } } return message; }
@Override public Set<TopicPartition> assignment() { return consumer.assignment(); }
@Override public Set<TopicPartition> assignment() { return delegate.assignment(); }
@Override public Set<TopicPartition> assignment() { return kafkaConsumer.assignment(); }
return null; }).given(consumer).commitSync(any(Map.class)); given(consumer.assignment()).willReturn(records.keySet()); final CountDownLatch pauseLatch = new CountDownLatch(2); willAnswer(i -> {
return null; }).given(consumer).commitSync(any(Map.class)); given(consumer.assignment()).willReturn(records1.keySet()); TopicPartitionInitialOffset[] topicPartitionOffset = new TopicPartitionInitialOffset[] { new TopicPartitionInitialOffset("foo", 0) };
private Set<TopicPartition> getAssignedPartitions() { Set<TopicPartition> assignedPartitions = consumer.assignment(); if (assignedPartitions.isEmpty()) { // Polling with an immediate timeout will initialize the assignments for a fresh consumer. pollRecords(0L); assignedPartitions = consumer.assignment(); } return assignedPartitions; }
private static Set<TopicPartition> localPartitions(Consumer<byte[], byte[]> consumer, String topic) { Set<TopicPartition> result = new HashSet<>(); Set<TopicPartition> assignment = consumer.assignment(); for (TopicPartition tp : assignment) { if (tp.topic().equals(topic)) { result.add(tp); } } return result; }
@Override public KafkaReadStream<K, V> assignment(Handler<AsyncResult<Set<TopicPartition>>> handler) { this.submitTask((consumer, future) -> { Set<TopicPartition> partitions = consumer.assignment(); if (future != null) { future.complete(partitions); } }, handler); return this; }
@Override public void subscribe(Collection<String> topics, ConsumerRebalanceListener callback) { Set<String> newSubscription = new HashSet<>(topics); // TODO: This is a hot fix for KAFKA-3664 and should be removed after the issue is fixed. commitSync(); for (TopicPartition tp : _kafkaConsumer.assignment()) { if (!newSubscription.contains(tp.topic())) { _consumerRecordsProcessor.clear(tp); } } _consumerRebalanceListener.setUserListener(callback); _kafkaConsumer.subscribe(new ArrayList<>(topics), _consumerRebalanceListener); }