@Test public void verifyPollTimesOutDuringMetadataUpdate() { final Time time = new MockTime(); Metadata metadata = createMetadata(); final MockClient client = new MockClient(time, metadata); initMetadata(client, Collections.singletonMap(topic, 1)); Node node = metadata.fetch().nodes().get(0); final PartitionAssignor assignor = new RoundRobinAssignor(); final KafkaConsumer<String, String> consumer = newConsumer(time, client, metadata, assignor, true); consumer.subscribe(singleton(topic), getConsumerRebalanceListener(consumer)); prepareRebalance(client, node, assignor, singletonList(tp0), null); consumer.poll(Duration.ZERO); // The underlying client should NOT get a fetch request final Queue<ClientRequest> requests = client.requests(); Assert.assertEquals(0, requests.size()); }
@Test public void testPollThrowsInterruptExceptionIfInterrupted() { final Time time = new MockTime(); final Metadata metadata = createMetadata(); final MockClient client = new MockClient(time, metadata); initMetadata(client, Collections.singletonMap(topic, 1)); Node node = metadata.fetch().nodes().get(0); final PartitionAssignor assignor = new RoundRobinAssignor(); KafkaConsumer<String, String> consumer = newConsumer(time, client, metadata, assignor, false); consumer.subscribe(singleton(topic), getConsumerRebalanceListener(consumer)); prepareRebalance(client, node, assignor, singletonList(tp0), null); consumer.updateAssignmentMetadataIfNeeded(time.timer(Long.MAX_VALUE)); consumer.poll(Duration.ZERO); // interrupt the thread and call poll try { Thread.currentThread().interrupt(); expectedException.expect(InterruptException.class); consumer.poll(Duration.ZERO); } finally { // clear interrupted state again since this thread may be reused by JUnit Thread.interrupted(); } consumer.close(Duration.ofMillis(0)); }
@SuppressWarnings("deprecation") @Test public void verifyDeprecatedPollDoesNotTimeOutDuringMetadataUpdate() { final Time time = new MockTime(); Metadata metadata = createMetadata(); final MockClient client = new MockClient(time, metadata); initMetadata(client, Collections.singletonMap(topic, 1)); Node node = metadata.fetch().nodes().get(0); final PartitionAssignor assignor = new RoundRobinAssignor(); final KafkaConsumer<String, String> consumer = newConsumer(time, client, metadata, assignor, true); consumer.subscribe(singleton(topic), getConsumerRebalanceListener(consumer)); prepareRebalance(client, node, assignor, singletonList(tp0), null); consumer.poll(0L); // The underlying client SHOULD get a fetch request final Queue<ClientRequest> requests = client.requests(); Assert.assertEquals(1, requests.size()); final Class<? extends AbstractRequest.Builder> aClass = requests.peek().requestBuilder().getClass(); Assert.assertEquals(FetchRequest.Builder.class, aClass); }
@Test public void testChangingRegexSubscription() { PartitionAssignor assignor = new RoundRobinAssignor(); String otherTopic = "other"; TopicPartition otherTopicPartition = new TopicPartition(otherTopic, 0); Time time = new MockTime(); Metadata metadata = createMetadata(); MockClient client = new MockClient(time, metadata); Map<String, Integer> partitionCounts = new HashMap<>(); partitionCounts.put(topic, 1); partitionCounts.put(otherTopic, 1); initMetadata(client, partitionCounts); Node node = metadata.fetch().nodes().get(0); KafkaConsumer<String, String> consumer = newConsumer(time, client, metadata, assignor, false); Node coordinator = prepareRebalance(client, node, singleton(topic), assignor, singletonList(tp0), null); consumer.subscribe(Pattern.compile(topic), getConsumerRebalanceListener(consumer)); consumer.updateAssignmentMetadataIfNeeded(time.timer(Long.MAX_VALUE)); consumer.poll(Duration.ZERO); assertEquals(singleton(topic), consumer.subscription()); consumer.subscribe(Pattern.compile(otherTopic), getConsumerRebalanceListener(consumer)); client.prepareMetadataUpdate(TestUtils.metadataUpdateWith(1, partitionCounts)); prepareRebalance(client, node, singleton(otherTopic), assignor, singletonList(otherTopicPartition), coordinator); consumer.poll(Duration.ZERO); assertEquals(singleton(otherTopic), consumer.subscription()); consumer.close(Duration.ofMillis(0)); }
@Test public void testRegexSubscription() { String unmatchedTopic = "unmatched"; Time time = new MockTime(); Metadata metadata = createMetadata(); MockClient client = new MockClient(time, metadata); Map<String, Integer> partitionCounts = new HashMap<>(); partitionCounts.put(topic, 1); partitionCounts.put(unmatchedTopic, 1); initMetadata(client, partitionCounts); Node node = metadata.fetch().nodes().get(0); PartitionAssignor assignor = new RoundRobinAssignor(); KafkaConsumer<String, String> consumer = newConsumer(time, client, metadata, assignor, true); prepareRebalance(client, node, singleton(topic), assignor, singletonList(tp0), null); consumer.subscribe(Pattern.compile(topic), getConsumerRebalanceListener(consumer)); client.prepareMetadataUpdate(TestUtils.metadataUpdateWith(1, partitionCounts)); consumer.updateAssignmentMetadataIfNeeded(time.timer(Long.MAX_VALUE)); assertEquals(singleton(topic), consumer.subscription()); assertEquals(singleton(tp0), consumer.assignment()); consumer.close(Duration.ofMillis(0)); }
@Test public void verifyNoCoordinatorLookupForManualAssignmentWithSeek() { Time time = new MockTime(); Metadata metadata = createMetadata(); MockClient client = new MockClient(time, metadata); initMetadata(client, Collections.singletonMap(topic, 1)); PartitionAssignor assignor = new RoundRobinAssignor(); KafkaConsumer<String, String> consumer = newConsumer(time, client, metadata, assignor, true); consumer.assign(singleton(tp0)); consumer.seekToBeginning(singleton(tp0)); // there shouldn't be any need to lookup the coordinator or fetch committed offsets. // we just lookup the starting position and send the record fetch. client.prepareResponse(listOffsetsResponse(Collections.singletonMap(tp0, 50L))); client.prepareResponse(fetchResponse(tp0, 50L, 5)); ConsumerRecords<String, String> records = consumer.poll(Duration.ofMillis(1)); assertEquals(5, records.count()); assertEquals(55L, consumer.position(tp0)); consumer.close(Duration.ofMillis(0)); }
@Test(expected = InvalidTopicException.class) public void testSubscriptionOnInvalidTopic() { Time time = new MockTime(); Metadata metadata = createMetadata(); MockClient client = new MockClient(time, metadata); initMetadata(client, Collections.singletonMap(topic, 1)); Cluster cluster = metadata.fetch(); PartitionAssignor assignor = new RoundRobinAssignor(); String invalidTopicName = "topic abc"; // Invalid topic name due to space List<MetadataResponse.TopicMetadata> topicMetadata = new ArrayList<>(); topicMetadata.add(new MetadataResponse.TopicMetadata(Errors.INVALID_TOPIC_EXCEPTION, invalidTopicName, false, Collections.emptyList())); MetadataResponse updateResponse = new MetadataResponse(cluster.nodes(), cluster.clusterResource().clusterId(), cluster.controller().id(), topicMetadata); client.prepareMetadataUpdate(updateResponse); KafkaConsumer<String, String> consumer = newConsumer(time, client, metadata, assignor, true); consumer.subscribe(singleton(invalidTopicName), getConsumerRebalanceListener(consumer)); consumer.poll(Duration.ZERO); } }
@Test public void testProtocolMetadataOrder() { RoundRobinAssignor roundRobin = new RoundRobinAssignor(); RangeAssignor range = new RangeAssignor(); try (Metrics metrics = new Metrics(time)) { ConsumerCoordinator coordinator = buildCoordinator(metrics, Arrays.<PartitionAssignor>asList(roundRobin, range), ConsumerConfig.DEFAULT_EXCLUDE_INTERNAL_TOPICS, false, true); List<ProtocolMetadata> metadata = coordinator.metadata(); assertEquals(2, metadata.size()); assertEquals(roundRobin.name(), metadata.get(0).name()); assertEquals(range.name(), metadata.get(1).name()); } try (Metrics metrics = new Metrics(time)) { ConsumerCoordinator coordinator = buildCoordinator(metrics, Arrays.<PartitionAssignor>asList(range, roundRobin), ConsumerConfig.DEFAULT_EXCLUDE_INTERNAL_TOPICS, false, true); List<ProtocolMetadata> metadata = coordinator.metadata(); assertEquals(2, metadata.size()); assertEquals(range.name(), metadata.get(0).name()); assertEquals(roundRobin.name(), metadata.get(1).name()); } }
@Test(expected = NoOffsetForPartitionException.class) public void testMissingOffsetNoResetPolicy() { Time time = new MockTime(); Metadata metadata = createMetadata(); MockClient client = new MockClient(time, metadata); initMetadata(client, Collections.singletonMap(topic, 1)); Node node = metadata.fetch().nodes().get(0); PartitionAssignor assignor = new RoundRobinAssignor(); KafkaConsumer<String, String> consumer = newConsumer(time, client, metadata, assignor, OffsetResetStrategy.NONE, true, groupId); consumer.assign(singletonList(tp0)); client.prepareResponseFrom(new FindCoordinatorResponse(Errors.NONE, node), node); Node coordinator = new Node(Integer.MAX_VALUE - node.id(), node.host(), node.port()); // lookup committed offset and find nothing client.prepareResponseFrom(offsetResponse(Collections.singletonMap(tp0, -1L), Errors.NONE), coordinator); consumer.poll(Duration.ZERO); }
@Test public void verifyHeartbeatSentWhenFetchedDataReady() throws Exception { Time time = new MockTime(); Metadata metadata = createMetadata(); MockClient client = new MockClient(time, metadata); initMetadata(client, Collections.singletonMap(topic, 1)); Node node = metadata.fetch().nodes().get(0); PartitionAssignor assignor = new RoundRobinAssignor(); KafkaConsumer<String, String> consumer = newConsumer(time, client, metadata, assignor, true); consumer.subscribe(singleton(topic), getConsumerRebalanceListener(consumer)); Node coordinator = prepareRebalance(client, node, assignor, singletonList(tp0), null); consumer.updateAssignmentMetadataIfNeeded(time.timer(Long.MAX_VALUE)); consumer.poll(Duration.ZERO); // respond to the outstanding fetch so that we have data available on the next poll client.respondFrom(fetchResponse(tp0, 0, 5), node); client.poll(0, time.milliseconds()); client.prepareResponseFrom(fetchResponse(tp0, 5, 0), node); AtomicBoolean heartbeatReceived = prepareHeartbeatResponse(client, coordinator); time.sleep(heartbeatIntervalMs); Thread.sleep(heartbeatIntervalMs); consumer.poll(Duration.ZERO); assertTrue(heartbeatReceived.get()); consumer.close(Duration.ofMillis(0)); }
@Test public void verifyHeartbeatSent() throws Exception { Time time = new MockTime(); Metadata metadata = createMetadata(); MockClient client = new MockClient(time, metadata); initMetadata(client, Collections.singletonMap(topic, 1)); Node node = metadata.fetch().nodes().get(0); PartitionAssignor assignor = new RoundRobinAssignor(); KafkaConsumer<String, String> consumer = newConsumer(time, client, metadata, assignor, true); consumer.subscribe(singleton(topic), getConsumerRebalanceListener(consumer)); Node coordinator = prepareRebalance(client, node, assignor, singletonList(tp0), null); // initial fetch client.prepareResponseFrom(fetchResponse(tp0, 0, 0), node); consumer.updateAssignmentMetadataIfNeeded(time.timer(Long.MAX_VALUE)); assertEquals(singleton(tp0), consumer.assignment()); AtomicBoolean heartbeatReceived = prepareHeartbeatResponse(client, coordinator); // heartbeat interval is 2 seconds time.sleep(heartbeatIntervalMs); Thread.sleep(heartbeatIntervalMs); consumer.updateAssignmentMetadataIfNeeded(time.timer(Long.MAX_VALUE)); assertTrue(heartbeatReceived.get()); consumer.close(Duration.ofMillis(0)); }
@Test public void testAutoCommitSentBeforePositionUpdate() { Time time = new MockTime(); Metadata metadata = createMetadata(); MockClient client = new MockClient(time, metadata); initMetadata(client, Collections.singletonMap(topic, 1)); Node node = metadata.fetch().nodes().get(0); PartitionAssignor assignor = new RoundRobinAssignor(); KafkaConsumer<String, String> consumer = newConsumer(time, client, metadata, assignor, true); consumer.subscribe(singleton(topic), getConsumerRebalanceListener(consumer)); Node coordinator = prepareRebalance(client, node, assignor, singletonList(tp0), null); consumer.updateAssignmentMetadataIfNeeded(time.timer(Long.MAX_VALUE)); consumer.poll(Duration.ZERO); // respond to the outstanding fetch so that we have data available on the next poll client.respondFrom(fetchResponse(tp0, 0, 5), node); client.poll(0, time.milliseconds()); time.sleep(autoCommitIntervalMs); client.prepareResponseFrom(fetchResponse(tp0, 5, 0), node); // no data has been returned to the user yet, so the committed offset should be 0 AtomicBoolean commitReceived = prepareOffsetCommitResponse(client, coordinator, tp0, 0); consumer.poll(Duration.ZERO); assertTrue(commitReceived.get()); consumer.close(Duration.ofMillis(0)); }
Node node = metadata.fetch().nodes().get(0); PartitionAssignor assignor = new RoundRobinAssignor();
Node node = metadata.fetch().nodes().get(0); PartitionAssignor assignor = new RoundRobinAssignor();
@Test public void testResetToCommittedOffset() { Time time = new MockTime(); Metadata metadata = createMetadata(); MockClient client = new MockClient(time, metadata); initMetadata(client, Collections.singletonMap(topic, 1)); Node node = metadata.fetch().nodes().get(0); PartitionAssignor assignor = new RoundRobinAssignor(); KafkaConsumer<String, String> consumer = newConsumer(time, client, metadata, assignor, OffsetResetStrategy.NONE, true, groupId); consumer.assign(singletonList(tp0)); client.prepareResponseFrom(new FindCoordinatorResponse(Errors.NONE, node), node); Node coordinator = new Node(Integer.MAX_VALUE - node.id(), node.host(), node.port()); client.prepareResponseFrom(offsetResponse(Collections.singletonMap(tp0, 539L), Errors.NONE), coordinator); consumer.poll(Duration.ZERO); assertEquals(539L, consumer.position(tp0)); }
@Test public void testResetUsingAutoResetPolicy() { Time time = new MockTime(); Metadata metadata = createMetadata(); MockClient client = new MockClient(time, metadata); initMetadata(client, Collections.singletonMap(topic, 1)); Node node = metadata.fetch().nodes().get(0); PartitionAssignor assignor = new RoundRobinAssignor(); KafkaConsumer<String, String> consumer = newConsumer(time, client, metadata, assignor, OffsetResetStrategy.LATEST, true, groupId); consumer.assign(singletonList(tp0)); client.prepareResponseFrom(new FindCoordinatorResponse(Errors.NONE, node), node); Node coordinator = new Node(Integer.MAX_VALUE - node.id(), node.host(), node.port()); client.prepareResponseFrom(offsetResponse(Collections.singletonMap(tp0, -1L), Errors.NONE), coordinator); client.prepareResponse(listOffsetsResponse(Collections.singletonMap(tp0, 50L))); consumer.poll(Duration.ZERO); assertEquals(50L, consumer.position(tp0)); }
@Test public void testCommitsFetchedDuringAssign() { long offset1 = 10000; long offset2 = 20000; Time time = new MockTime(); Metadata metadata = createMetadata(); MockClient client = new MockClient(time, metadata); initMetadata(client, Collections.singletonMap(topic, 2)); Node node = metadata.fetch().nodes().get(0); PartitionAssignor assignor = new RoundRobinAssignor(); KafkaConsumer<String, String> consumer = newConsumer(time, client, metadata, assignor, true); consumer.assign(singletonList(tp0)); // lookup coordinator client.prepareResponseFrom(new FindCoordinatorResponse(Errors.NONE, node), node); Node coordinator = new Node(Integer.MAX_VALUE - node.id(), node.host(), node.port()); // fetch offset for one topic client.prepareResponseFrom(offsetResponse(Collections.singletonMap(tp0, offset1), Errors.NONE), coordinator); assertEquals(offset1, consumer.committed(tp0).offset()); consumer.assign(Arrays.asList(tp0, tp1)); // fetch offset for two topics Map<TopicPartition, Long> offsets = new HashMap<>(); offsets.put(tp0, offset1); client.prepareResponseFrom(offsetResponse(offsets, Errors.NONE), coordinator); assertEquals(offset1, consumer.committed(tp0).offset()); offsets.remove(tp0); offsets.put(tp1, offset2); client.prepareResponseFrom(offsetResponse(offsets, Errors.NONE), coordinator); assertEquals(offset2, consumer.committed(tp1).offset()); consumer.close(Duration.ofMillis(0)); }
Node node = metadata.fetch().nodes().get(0); PartitionAssignor assignor = new RoundRobinAssignor();
public static List<List<LogPartition>> roundRobinAssignments(int threads, Map<String, Integer> streams) { PartitionAssignor assignor = new RoundRobinAssignor(); return assignments(assignor, threads, streams); }