@Override public Set<TopicPartition> paused() { return delegate.paused(); }
/** * The check if the consumption is done or not. The consumption is done if the consumer has caught up with the * log end or all the partitions are paused. * @param endOffsets the log end for each partition. * @return true if the consumption is done, false otherwise. */ private boolean consumptionDone(Map<TopicPartition, Long> endOffsets) { Set<TopicPartition> partitionsNotPaused = new HashSet<>(_metricConsumer.assignment()); partitionsNotPaused.removeAll(_metricConsumer.paused()); for (TopicPartition tp : partitionsNotPaused) { if (_metricConsumer.position(tp) < endOffsets.get(tp)) { return false; } } return true; }
LOG.debug("Starting consuming from metrics reporter topic partitions {}.", _metricConsumer.assignment()); _metricConsumer.resume(_metricConsumer.paused()); int totalMetricsAdded = 0; long maxTimeStamp = -1L;
@Override public Set<TopicPartition> paused() { return _kafkaConsumer.paused(); }
@Override public Set<TopicPartition> paused() { return consumer.paused(); }
@Override public Set<TopicPartition> paused() { return consumer.paused(); }
@Override public Set<TopicPartition> paused() { return kafkaConsumer.paused(); }
@Override public Set<TopicPartition> paused() { return delegate.paused(); }
return null; }).given(consumer).pause(records.keySet()); given(consumer.paused()).willReturn(records.keySet()); final CountDownLatch resumeLatch = new CountDownLatch(2); willAnswer(i -> {
@Override public void close() throws IOException { // This method does not throw a KafkaException if (consumer.paused().contains(topicPartition)) PAUSED_PARTITIONS.dec(); } }
@Override public void paused(Handler<AsyncResult<Set<TopicPartition>>> handler) { this.submitTask((consumer, future) -> { Set<TopicPartition> result = consumer.paused(); if (future != null) { future.complete(result); } }, handler); }
return null; }).given(consumer).pause(anyCollection()); willAnswer(i -> paused.get()).given(consumer).paused(); Map<TopicPartition, List<ConsumerRecord>> records1 = new LinkedHashMap<>(); records1.put(topicPartition, Arrays.asList( consumer.paused(); // need some other interaction with mock between polls for InOrder InOrder inOrder = inOrder(consumer, log1, log2); inOrder.verify(consumer).poll(any(Duration.class)); inOrder.verify(consumer).paused(); inOrder.verify(consumer).poll(any(Duration.class));
return null; }).given(consumer).pause(anyCollection()); willAnswer(i -> paused.get()).given(consumer).paused(); Map<TopicPartition, List<ConsumerRecord>> records1 = new LinkedHashMap<>(); records1.put(topicPartition, Collections.singletonList( consumer.paused(); // need some other interaction with mock between polls for InOrder Message<?> received2 = source.receive(); consumer.paused(); // need some other interaction with mock between polls for InOrder Message<?> received3 = source.receive(); consumer.paused(); // need some other interaction with mock between polls for InOrder Message<?> received4 = source.receive(); consumer.paused(); // need some other interaction with mock between polls for InOrder Message<?> received5 = source.receive(); consumer.paused(); // need some other interaction with mock between polls for InOrder Message<?> received6 = source.receive(); StaticMessageHeaderAccessor.getAcknowledgmentCallback(received3) inOrder.verify(consumer).subscribe(anyCollection(), any(ConsumerRebalanceListener.class)); inOrder.verify(consumer).poll(any(Duration.class)); inOrder.verify(consumer).paused(); inOrder.verify(consumer).poll(any(Duration.class)); inOrder.verify(consumer).paused(); inOrder.verify(consumer).poll(any(Duration.class)); inOrder.verify(consumer).paused(); inOrder.verify(consumer).poll(any(Duration.class)); inOrder.verify(consumer).paused(); inOrder.verify(consumer).commitSync(Collections.singletonMap(topicPartition, new OffsetAndMetadata(3L))); inOrder.verify(consumer).commitSync(Collections.singletonMap(topicPartition, new OffsetAndMetadata(6L)));
return null; }).given(consumer).pause(records.keySet()); given(consumer.paused()).willReturn(records.keySet()); final CountDownLatch resumeLatch = new CountDownLatch(1); willAnswer(i -> {
return null; }).given(consumer).pause(anyCollection()); willAnswer(i -> paused.get()).given(consumer).paused(); Map<TopicPartition, List<ConsumerRecord>> records1 = new LinkedHashMap<>(); records1.put(topicPartition, Arrays.asList(
return null; }).given(consumer).pause(anyCollection()); willAnswer(i -> paused.get()).given(consumer).paused(); Map<TopicPartition, List<ConsumerRecord>> records1 = new LinkedHashMap<>(); records1.put(topicPartition, Arrays.asList(
@Test public void consumerMethods() throws Exception { testConsumerMethod(c -> assertEquals(this.assignedPartitions, c.assignment())); testConsumerMethod(c -> assertEquals(Collections.singleton(topic), c.subscription())); testConsumerMethod(c -> assertEquals(2, c.partitionsFor(topics.get(2)).size())); testConsumerMethod(c -> assertEquals(topics.size(), c.listTopics().size())); testConsumerMethod(c -> assertEquals(0, c.metrics().size())); testConsumerMethod(c -> { Collection<TopicPartition> partitions = Collections.singleton(new TopicPartition(topic, 1)); c.pause(partitions); assertEquals(partitions, c.paused()); c.resume(partitions); }); testConsumerMethod(c -> { TopicPartition partition = new TopicPartition(topic, 1); Collection<TopicPartition> partitions = Collections.singleton(partition); long position = c.position(partition); c.seekToBeginning(partitions); assertEquals(0, c.position(partition)); c.seekToEnd(partitions); assertTrue("Did not seek to end", c.position(partition) > 0); c.seek(partition, position); }); }