@Test public void testPause() { KafkaConsumer<byte[], byte[]> consumer = newConsumer(groupId); consumer.assign(singletonList(tp0)); assertEquals(singleton(tp0), consumer.assignment()); assertTrue(consumer.paused().isEmpty()); consumer.pause(singleton(tp0)); assertEquals(singleton(tp0), consumer.paused()); consumer.resume(singleton(tp0)); assertTrue(consumer.paused().isEmpty()); consumer.unsubscribe(); assertTrue(consumer.paused().isEmpty()); consumer.close(); }
reconsumer.resume(Arrays.asList(partitionRecords.getKey())); } else { reconsumer.pause(Arrays.asList(partitionRecords.getKey()));
inOrder.verify(consumerMock).pause(Collections.singleton(partitionTwo)); inOrder.verify(consumerMock).poll(anyLong()); inOrder.verify(consumerMock).resume(Collections.singleton(partitionTwo));
consumer.pause(partitions); consumer.seekToEnd(partitions);
protected synchronized void pauseAssignedPartitions() { if (! isReadingPaused.get()) { logger.info("Pausing kafka reading"); realConsumer.pause(realConsumer.assignment()); isReadingPaused.set(true); } }
kafka.pause(partitions.partitionsToBePaused()); kafka.resume(partitions.partitionsToBeResumed());
/** * Pause the specified partition * @param tp partition */ @Override public void pausePartition(TopicPartition tp) { consumer.pause(tp); }
/** * Pause the specified partition * @param tp partition */ @Override public void pausePartition(TopicPartition tp) { consumer.pause(tp); }
/** * Pause the specified partition * @param tp partition */ @Override public void pausePartition(TopicPartition tp) { consumer.pause(Arrays.asList(tp)); }
private void pauseAll() { consumer.pause(consumer.assignment()); }
private Collection<TopicPartition> pauseTopicPartitions(TopicPartition excludedTp) { final Set<TopicPartition> pausedTopicPartitions = new HashSet<>(kafkaConsumer.assignment()); LOG.debug("Currently assigned topic-partitions {}", pausedTopicPartitions); pausedTopicPartitions.remove(excludedTp); kafkaConsumer.pause(pausedTopicPartitions); LOG.debug("Paused topic-partitions {}", pausedTopicPartitions); return pausedTopicPartitions; }
@Override public void pause(TopicPartition... partitions) { if (consumer == null) { throw new IllegalWorkerStateException("SinkTaskContext may not be used to pause consumption until the task is initialized"); } try { Collections.addAll(pausedPartitions, partitions); if (sinkTask.shouldPause()) { log.debug("{} Connector is paused, so not pausing consumer's partitions {}", this, partitions); } else { consumer.pause(Arrays.asList(partitions)); log.debug("{} Pausing partitions {}. Connector is not paused.", this, partitions); } } catch (IllegalStateException e) { throw new IllegalWorkerStateException("SinkTasks may not pause partitions that are not currently assigned to them.", e); } }
while (true) { ConsumerRecords<String, String> records = consumer.poll(100000000); consumer.pause(Arrays.asList(new TopicPartition(topic, 0))); consumer.pause(Arrays.asList(new TopicPartition(topic, 1))); records.forEach(record -> { System.out.printf("client : %s , topic: %s , partition: %d , offset = %d, key = %s, value = %s%n", clientid, record.topic(),
consumer.pause(consumer.assignment()); Future<Boolean> future = executor.submit(new ConsumeRecords(records, partitionToUncommittedOffsetMap)); futures.add(future);
consumer.pause(singleton(topicPartition)); running.remove(topicPartition.partition());
public void pause() { consumer.pause(partitionsAssigned); pause = true; Log.create(LOGGER) .setLogType(LOG_TYPE) .setEvent("pause") .info(); }
pauseAll(); else if (!context.pausedPartitions().isEmpty()) consumer.pause(context.pausedPartitions());
private ConsumerRecords<K, V> pollKafkaBroker(PollablePartitionsInfo pollablePartitionsInfo) { doSeekRetriableTopicPartitions(pollablePartitionsInfo.pollableEarliestRetriableOffsets); Set<TopicPartition> pausedPartitions = new HashSet<>(kafkaConsumer.assignment()); Iterator<TopicPartition> pausedIter = pausedPartitions.iterator(); while (pausedIter.hasNext()) { if (pollablePartitionsInfo.pollablePartitions.contains(pausedIter.next())) { pausedIter.remove(); } } try { kafkaConsumer.pause(pausedPartitions); final ConsumerRecords<K, V> consumerRecords = kafkaConsumer.poll(kafkaSpoutConfig.getPollTimeoutMs()); ackRetriableOffsetsIfCompactedAway(pollablePartitionsInfo.pollableEarliestRetriableOffsets, consumerRecords); final int numPolledRecords = consumerRecords.count(); LOG.debug("Polled [{}] records from Kafka", numPolledRecords); if (kafkaSpoutConfig.getProcessingGuarantee() == KafkaSpoutConfig.ProcessingGuarantee.AT_MOST_ONCE) { //Commit polled records immediately to ensure delivery is at-most-once. Map<TopicPartition, OffsetAndMetadata> offsetsToCommit = createFetchedOffsetsMetadata(kafkaConsumer.assignment()); kafkaConsumer.commitSync(offsetsToCommit); LOG.debug("Committed offsets {} to Kafka", offsetsToCommit); } return consumerRecords; } finally { kafkaConsumer.resume(pausedPartitions); } }
int partitionid = partitionInfo.partition(); TopicPartition partition = new TopicPartition(fieryConfig.getKafkatopic(), partitionid); consumer.pause(partition);