private Node getControllerNode(int controllerId, Collection<Node> brokers) { for (Node broker : brokers) { if (broker.id() == controllerId) return broker; } return null; }
@Test public void testLeastLoadedNode() { client.ready(node, time.milliseconds()); awaitReady(client, node); client.poll(1, time.milliseconds()); assertTrue("The client should be ready", client.isReady(node, time.milliseconds())); // leastloadednode should be our single node Node leastNode = client.leastLoadedNode(time.milliseconds()); assertEquals("There should be one leastloadednode", leastNode.id(), node.id()); // sleep for longer than reconnect backoff time.sleep(reconnectBackoffMsTest); // CLOSE node selector.serverDisconnect(node.idString()); client.poll(1, time.milliseconds()); assertFalse("After we forced the disconnection the client is no longer ready.", client.ready(node, time.milliseconds())); leastNode = client.leastLoadedNode(time.milliseconds()); assertNull("There should be NO leastloadednode", leastNode); }
public int leaderId() { return leader == null ? -1 : leader.id(); }
@Override public void onFailure(RuntimeException e) { synchronized (Fetcher.this) { FetchSessionHandler handler = sessionHandler(fetchTarget.id()); if (handler != null) { handler.handleError(e); } } } });
private Node controller(MetadataResponse response) { if (response.controller() == null || response.controller().id() == MetadataResponse.NO_CONTROLLER_ID) return null; return response.controller(); }
@VisibleForTesting public int getTransactionCoordinatorId() { Object transactionManager = getValue(kafkaProducer, "transactionManager"); Node node = (Node) invoke(transactionManager, "coordinator", FindCoordinatorRequest.CoordinatorType.TRANSACTION); return node.id(); }
@VisibleForTesting public int getTransactionCoordinatorId() { Object transactionManager = getValue(kafkaProducer, "transactionManager"); Node node = (Node) invoke(transactionManager, "coordinator", FindCoordinatorRequest.CoordinatorType.TRANSACTION); return node.id(); }
public KafkaTopicPartitionLeader(KafkaTopicPartition topicPartition, Node leader) { this.topicPartition = topicPartition; if (leader == null) { this.leaderId = -1; this.leaderHost = null; this.leaderPort = -1; } else { this.leaderId = leader.id(); this.leaderPort = leader.port(); this.leaderHost = leader.host(); } int cachedHash = (leader == null) ? 14 : leader.hashCode(); this.cachedHash = 31 * cachedHash + topicPartition.hashCode(); }
private MetadataResponse newMetadataResponse(String topic, Errors error) { List<MetadataResponse.PartitionMetadata> partitionsMetadata = new ArrayList<>(); if (error == Errors.NONE) { Optional<MetadataResponse.TopicMetadata> foundMetadata = initialUpdateResponse.topicMetadata() .stream() .filter(topicMetadata -> topicMetadata.topic().equals(topic)) .findFirst(); foundMetadata.ifPresent(topicMetadata -> { partitionsMetadata.addAll(topicMetadata.partitionMetadata()); }); } MetadataResponse.TopicMetadata topicMetadata = new MetadataResponse.TopicMetadata(error, topic, false, partitionsMetadata); List<Node> brokers = new ArrayList<>(initialUpdateResponse.brokers()); return new MetadataResponse(brokers, initialUpdateResponse.clusterId(), initialUpdateResponse.controller().id(), Collections.singletonList(topicMetadata)); }
@Test public void testPartialDrain() throws Exception { RecordAccumulator accum = createTestRecordAccumulator( 1024 + DefaultRecordBatch.RECORD_BATCH_OVERHEAD, 10 * 1024, CompressionType.NONE, 10L); int appends = 1024 / msgSize + 1; List<TopicPartition> partitions = asList(tp1, tp2); for (TopicPartition tp : partitions) { for (int i = 0; i < appends; i++) accum.append(tp, 0L, key, value, Record.EMPTY_HEADERS, null, maxBlockTimeMs); } assertEquals("Partition's leader should be ready", Collections.singleton(node1), accum.ready(cluster, time.milliseconds()).readyNodes); List<ProducerBatch> batches = accum.drain(cluster, Collections.singleton(node1), 1024, 0).get(node1.id()); assertEquals("But due to size bound only one partition should have been retrieved", 1, batches.size()); }
@Override public int getLeaderToShutDown(String topic) throws Exception { AdminClient client = AdminClient.create(getStandardProperties()); TopicDescription result = client.describeTopics(Collections.singleton(topic)).all().get().get(topic); return result.partitions().get(0).leader().id(); }
@Test public void testNoEpoch() { metadata.update(emptyMetadataResponse(), 0L); MetadataResponse metadataResponse = TestUtils.metadataUpdateWith("dummy", 1, Collections.emptyMap(), Collections.singletonMap("topic-1", 1), (error, partition, leader, leaderEpoch, replicas, isr, offlineReplicas) -> new MetadataResponse.PartitionMetadata(error, partition, leader, Optional.empty(), replicas, isr, offlineReplicas)); metadata.update(metadataResponse, 10L); TopicPartition tp = new TopicPartition("topic-1", 0); // no epoch assertFalse(metadata.lastSeenLeaderEpoch(tp).isPresent()); // still works assertTrue(metadata.partitionInfoIfCurrent(tp).isPresent()); assertEquals(metadata.partitionInfoIfCurrent(tp).get().partition(), 0); assertEquals(metadata.partitionInfoIfCurrent(tp).get().leader().id(), 0); }
@Test public void testLinger() throws Exception { long lingerMs = 10L; RecordAccumulator accum = createTestRecordAccumulator( 1024 + DefaultRecordBatch.RECORD_BATCH_OVERHEAD, 10 * 1024, CompressionType.NONE, lingerMs); accum.append(tp1, 0L, key, value, Record.EMPTY_HEADERS, null, maxBlockTimeMs); assertEquals("No partitions should be ready", 0, accum.ready(cluster, time.milliseconds()).readyNodes.size()); time.sleep(10); assertEquals("Our partition's leader should be ready", Collections.singleton(node1), accum.ready(cluster, time.milliseconds()).readyNodes); List<ProducerBatch> batches = accum.drain(cluster, Collections.singleton(node1), Integer.MAX_VALUE, 0).get(node1.id()); assertEquals(1, batches.size()); ProducerBatch batch = batches.get(0); Iterator<Record> iter = batch.records().records().iterator(); Record record = iter.next(); assertEquals("Keys should match", ByteBuffer.wrap(key), record.key()); assertEquals("Values should match", ByteBuffer.wrap(value), record.value()); assertFalse("No more records", iter.hasNext()); }
@Override protected Struct toStruct(short version) { Struct struct = new Struct(ApiKeys.FIND_COORDINATOR.responseSchema(version)); struct.setIfExists(THROTTLE_TIME_MS, throttleTimeMs); struct.set(ERROR_CODE, error.code()); struct.setIfExists(ERROR_MESSAGE, errorMessage); Struct coordinator = struct.instance(COORDINATOR_KEY_NAME); coordinator.set(NODE_ID_KEY_NAME, node.id()); coordinator.set(HOST_KEY_NAME, node.host()); coordinator.set(PORT_KEY_NAME, node.port()); struct.set(COORDINATOR_KEY_NAME, coordinator); return struct; }
@Test(expected = NoOffsetForPartitionException.class) public void testMissingOffsetNoResetPolicy() { Time time = new MockTime(); Metadata metadata = createMetadata(); MockClient client = new MockClient(time, metadata); initMetadata(client, Collections.singletonMap(topic, 1)); Node node = metadata.fetch().nodes().get(0); PartitionAssignor assignor = new RoundRobinAssignor(); KafkaConsumer<String, String> consumer = newConsumer(time, client, metadata, assignor, OffsetResetStrategy.NONE, true, groupId); consumer.assign(singletonList(tp0)); client.prepareResponseFrom(new FindCoordinatorResponse(Errors.NONE, node), node); Node coordinator = new Node(Integer.MAX_VALUE - node.id(), node.host(), node.port()); // lookup committed offset and find nothing client.prepareResponseFrom(offsetResponse(Collections.singletonMap(tp0, -1L), Errors.NONE), coordinator); consumer.poll(Duration.ZERO); }
@Test public void testResetToCommittedOffset() { Time time = new MockTime(); Metadata metadata = createMetadata(); MockClient client = new MockClient(time, metadata); initMetadata(client, Collections.singletonMap(topic, 1)); Node node = metadata.fetch().nodes().get(0); PartitionAssignor assignor = new RoundRobinAssignor(); KafkaConsumer<String, String> consumer = newConsumer(time, client, metadata, assignor, OffsetResetStrategy.NONE, true, groupId); consumer.assign(singletonList(tp0)); client.prepareResponseFrom(new FindCoordinatorResponse(Errors.NONE, node), node); Node coordinator = new Node(Integer.MAX_VALUE - node.id(), node.host(), node.port()); client.prepareResponseFrom(offsetResponse(Collections.singletonMap(tp0, 539L), Errors.NONE), coordinator); consumer.poll(Duration.ZERO); assertEquals(539L, consumer.position(tp0)); }
@Test public void testResetUsingAutoResetPolicy() { Time time = new MockTime(); Metadata metadata = createMetadata(); MockClient client = new MockClient(time, metadata); initMetadata(client, Collections.singletonMap(topic, 1)); Node node = metadata.fetch().nodes().get(0); PartitionAssignor assignor = new RoundRobinAssignor(); KafkaConsumer<String, String> consumer = newConsumer(time, client, metadata, assignor, OffsetResetStrategy.LATEST, true, groupId); consumer.assign(singletonList(tp0)); client.prepareResponseFrom(new FindCoordinatorResponse(Errors.NONE, node), node); Node coordinator = new Node(Integer.MAX_VALUE - node.id(), node.host(), node.port()); client.prepareResponseFrom(offsetResponse(Collections.singletonMap(tp0, -1L), Errors.NONE), coordinator); client.prepareResponse(listOffsetsResponse(Collections.singletonMap(tp0, 50L))); consumer.poll(Duration.ZERO); assertEquals(50L, consumer.position(tp0)); }
private void setupCoordinator(int retryBackoffMs, int rebalanceTimeoutMs) { this.mockTime = new MockTime(); Metadata metadata = new Metadata(retryBackoffMs, 60 * 60 * 1000L, true); this.mockClient = new MockClient(mockTime, metadata); this.consumerClient = new ConsumerNetworkClient(new LogContext(), mockClient, metadata, mockTime, retryBackoffMs, REQUEST_TIMEOUT_MS, HEARTBEAT_INTERVAL_MS); Metrics metrics = new Metrics(); mockClient.updateMetadata(TestUtils.metadataUpdateWith(1, emptyMap())); this.node = metadata.fetch().nodes().get(0); this.coordinatorNode = new Node(Integer.MAX_VALUE - node.id(), node.host(), node.port()); this.coordinator = new DummyCoordinator(consumerClient, metrics, mockTime, rebalanceTimeoutMs, retryBackoffMs); }
private Node prepareRebalance(MockClient client, Node node, PartitionAssignor assignor, List<TopicPartition> partitions, Node coordinator) { if (coordinator == null) { // lookup coordinator client.prepareResponseFrom(new FindCoordinatorResponse(Errors.NONE, node), node); coordinator = new Node(Integer.MAX_VALUE - node.id(), node.host(), node.port()); } // join group client.prepareResponseFrom(joinGroupFollowerResponse(assignor, 1, "memberId", "leaderId", Errors.NONE), coordinator); // sync group client.prepareResponseFrom(syncGroupResponse(partitions, Errors.NONE), coordinator); return coordinator; }