public Node getLeader() { if (this.leaderId == -1) { return null; } else { return new Node(leaderId, leaderHost, leaderPort); } }
private List<Node> convertToNodes(Map<Integer, Node> brokers, Object[] brokerIds) { List<Node> nodes = new ArrayList<>(brokerIds.length); for (Object brokerId : brokerIds) if (brokers.containsKey(brokerId)) nodes.add(brokers.get(brokerId)); else nodes.add(new Node((int) brokerId, "", -1)); return nodes; }
/** * Create a "bootstrap" cluster using the given list of host/ports * @param addresses The addresses * @return A cluster for these hosts/ports */ public static Cluster bootstrap(List<InetSocketAddress> addresses) { List<Node> nodes = new ArrayList<>(); int nodeId = -1; for (InetSocketAddress address : addresses) nodes.add(new Node(nodeId--, address.getHostString(), address.getPort())); return new Cluster(null, true, nodes, new ArrayList<>(0), Collections.emptySet(), Collections.emptySet(), Collections.emptySet(), null); }
static MetadataCache bootstrap(List<InetSocketAddress> addresses) { List<Node> nodes = new ArrayList<>(); int nodeId = -1; for (InetSocketAddress address : addresses) nodes.add(new Node(nodeId--, address.getHostString(), address.getPort())); return new MetadataCache(null, nodes, Collections.emptyList(), Collections.emptySet(), Collections.emptySet(), Collections.emptySet(), null, Cluster.bootstrap(addresses)); }
private FindCoordinatorResponse createFindCoordinatorResponse() { return new FindCoordinatorResponse(Errors.NONE, new Node(10, "host1", 2014)); }
private static Cluster mockCluster() { HashMap<Integer, Node> nodes = new HashMap<>(); nodes.put(0, new Node(0, "localhost", 8121)); nodes.put(1, new Node(1, "localhost", 8122)); nodes.put(2, new Node(2, "localhost", 8123)); return new Cluster("mockClusterId", nodes.values(), Collections.<PartitionInfo>emptySet(), Collections.<String>emptySet(), Collections.<String>emptySet(), nodes.get(0)); }
private static Cluster mockCluster(int controllerIndex) { HashMap<Integer, Node> nodes = new HashMap<>(); nodes.put(0, new Node(0, "localhost", 8121)); nodes.put(1, new Node(1, "localhost", 8122)); nodes.put(2, new Node(2, "localhost", 8123)); return new Cluster("mockClusterId", nodes.values(), Collections.emptySet(), Collections.emptySet(), Collections.emptySet(), nodes.get(controllerIndex)); }
/** * Turn a broker instance into a node instance. * * @param broker broker instance * @return Node representing the given broker */ private static Node brokerToNode(Broker broker) { return new Node(broker.id(), broker.host(), broker.port()); }
public static Cluster clusterWith(final int nodes, final Map<String, Integer> topicPartitionCounts) { final Node[] ns = new Node[nodes]; for (int i = 0; i < nodes; i++) ns[i] = new Node(i, "localhost", 1969); final List<PartitionInfo> parts = new ArrayList<>(); for (final Map.Entry<String, Integer> topicPartition : topicPartitionCounts.entrySet()) { final String topic = topicPartition.getKey(); final int partitions = topicPartition.getValue(); for (int i = 0; i < partitions; i++) parts.add(new PartitionInfo(topic, i, ns[i % ns.length], ns, ns)); } return new Cluster("kafka-cluster", asList(ns), parts, Collections.emptySet(), Topic.INTERNAL_TOPICS); }
private LeaderAndIsrRequest createLeaderAndIsrRequest(int version) { Map<TopicPartition, LeaderAndIsrRequest.PartitionState> partitionStates = new HashMap<>(); List<Integer> isr = asList(1, 2); List<Integer> replicas = asList(1, 2, 3, 4); partitionStates.put(new TopicPartition("topic5", 105), new LeaderAndIsrRequest.PartitionState(0, 2, 1, new ArrayList<>(isr), 2, replicas, false)); partitionStates.put(new TopicPartition("topic5", 1), new LeaderAndIsrRequest.PartitionState(1, 1, 1, new ArrayList<>(isr), 2, replicas, false)); partitionStates.put(new TopicPartition("topic20", 1), new LeaderAndIsrRequest.PartitionState(1, 0, 1, new ArrayList<>(isr), 2, replicas, false)); Set<Node> leaders = Utils.mkSet( new Node(0, "test0", 1223), new Node(1, "test1", 1223) ); return new LeaderAndIsrRequest.Builder((short) version, 1, 10, 0, partitionStates, leaders).build(); }
@Test public void testToString() { String topic = "sample"; int partition = 0; Node leader = new Node(0, "localhost", 9092); Node r1 = new Node(1, "localhost", 9093); Node r2 = new Node(2, "localhost", 9094); Node[] replicas = new Node[] {leader, r1, r2}; Node[] inSyncReplicas = new Node[] {leader, r1}; Node[] offlineReplicas = new Node[] {r2}; PartitionInfo partitionInfo = new PartitionInfo(topic, partition, leader, replicas, inSyncReplicas, offlineReplicas); String expected = String.format("Partition(topic = %s, partition = %d, leader = %s, replicas = %s, isr = %s, offlineReplicas = %s)", topic, partition, leader.idString(), "[0,1,2]", "[0,1]", "[2]"); Assert.assertEquals(expected, partitionInfo.toString()); }
private MetadataResponse createMetadataResponse() { Node node = new Node(1, "host1", 1001); List<Node> replicas = asList(node); List<Node> isr = asList(node); List<Node> offlineReplicas = asList(); List<MetadataResponse.TopicMetadata> allTopicMetadata = new ArrayList<>(); allTopicMetadata.add(new MetadataResponse.TopicMetadata(Errors.NONE, "__consumer_offsets", true, asList(new MetadataResponse.PartitionMetadata(Errors.NONE, 1, node, Optional.of(5), replicas, isr, offlineReplicas)))); allTopicMetadata.add(new MetadataResponse.TopicMetadata(Errors.LEADER_NOT_AVAILABLE, "topic2", false, Collections.emptyList())); allTopicMetadata.add(new MetadataResponse.TopicMetadata(Errors.NONE, "topic3", false, asList(new MetadataResponse.PartitionMetadata(Errors.LEADER_NOT_AVAILABLE, 0, null, Optional.empty(), replicas, isr, offlineReplicas)))); return new MetadataResponse(asList(node), null, MetadataResponse.NO_CONTROLLER_ID, allTopicMetadata); }
public FindCoordinatorResponse(Struct struct) { this.throttleTimeMs = struct.getOrElse(THROTTLE_TIME_MS, DEFAULT_THROTTLE_TIME); error = Errors.forCode(struct.get(ERROR_CODE)); errorMessage = struct.getOrElse(ERROR_MESSAGE, null); Struct broker = (Struct) struct.get(COORDINATOR_KEY_NAME); int nodeId = broker.getInt(NODE_ID_KEY_NAME); String host = broker.getString(HOST_KEY_NAME); int port = broker.getInt(PORT_KEY_NAME); node = new Node(nodeId, host, port); }
@Test(expected = NoOffsetForPartitionException.class) public void testMissingOffsetNoResetPolicy() { Time time = new MockTime(); Metadata metadata = createMetadata(); MockClient client = new MockClient(time, metadata); initMetadata(client, Collections.singletonMap(topic, 1)); Node node = metadata.fetch().nodes().get(0); PartitionAssignor assignor = new RoundRobinAssignor(); KafkaConsumer<String, String> consumer = newConsumer(time, client, metadata, assignor, OffsetResetStrategy.NONE, true, groupId); consumer.assign(singletonList(tp0)); client.prepareResponseFrom(new FindCoordinatorResponse(Errors.NONE, node), node); Node coordinator = new Node(Integer.MAX_VALUE - node.id(), node.host(), node.port()); // lookup committed offset and find nothing client.prepareResponseFrom(offsetResponse(Collections.singletonMap(tp0, -1L), Errors.NONE), coordinator); consumer.poll(Duration.ZERO); }
@Before public void setup() { Map<String, String> metricTags = new LinkedHashMap<>(); metricTags.put("client-id", CLIENT_ID); int batchSize = 16 * 1024; long deliveryTimeoutMs = 3000L; long totalSize = 1024 * 1024; String metricGrpName = "producer-metrics"; MetricConfig metricConfig = new MetricConfig().tags(metricTags); this.brokerNode = new Node(0, "localhost", 2211); this.transactionManager = new TransactionManager(logContext, transactionalId, transactionTimeoutMs, DEFAULT_RETRY_BACKOFF_MS); Metrics metrics = new Metrics(metricConfig, time); SenderMetricsRegistry senderMetrics = new SenderMetricsRegistry(metrics); this.accumulator = new RecordAccumulator(logContext, batchSize, CompressionType.NONE, 0L, 0L, deliveryTimeoutMs, metrics, metricGrpName, time, apiVersions, transactionManager, new BufferPool(totalSize, batchSize, metrics, time, metricGrpName)); this.sender = new Sender(logContext, this.client, this.metadata, this.accumulator, true, MAX_REQUEST_SIZE, ACKS_ALL, MAX_RETRIES, senderMetrics, this.time, REQUEST_TIMEOUT, 50, transactionManager, apiVersions); this.client.updateMetadata(TestUtils.metadataUpdateWith(1, singletonMap("test", 2))); }
@Test public void testResetToCommittedOffset() { Time time = new MockTime(); Metadata metadata = createMetadata(); MockClient client = new MockClient(time, metadata); initMetadata(client, Collections.singletonMap(topic, 1)); Node node = metadata.fetch().nodes().get(0); PartitionAssignor assignor = new RoundRobinAssignor(); KafkaConsumer<String, String> consumer = newConsumer(time, client, metadata, assignor, OffsetResetStrategy.NONE, true, groupId); consumer.assign(singletonList(tp0)); client.prepareResponseFrom(new FindCoordinatorResponse(Errors.NONE, node), node); Node coordinator = new Node(Integer.MAX_VALUE - node.id(), node.host(), node.port()); client.prepareResponseFrom(offsetResponse(Collections.singletonMap(tp0, 539L), Errors.NONE), coordinator); consumer.poll(Duration.ZERO); assertEquals(539L, consumer.position(tp0)); }
@Test public void testResetUsingAutoResetPolicy() { Time time = new MockTime(); Metadata metadata = createMetadata(); MockClient client = new MockClient(time, metadata); initMetadata(client, Collections.singletonMap(topic, 1)); Node node = metadata.fetch().nodes().get(0); PartitionAssignor assignor = new RoundRobinAssignor(); KafkaConsumer<String, String> consumer = newConsumer(time, client, metadata, assignor, OffsetResetStrategy.LATEST, true, groupId); consumer.assign(singletonList(tp0)); client.prepareResponseFrom(new FindCoordinatorResponse(Errors.NONE, node), node); Node coordinator = new Node(Integer.MAX_VALUE - node.id(), node.host(), node.port()); client.prepareResponseFrom(offsetResponse(Collections.singletonMap(tp0, -1L), Errors.NONE), coordinator); client.prepareResponse(listOffsetsResponse(Collections.singletonMap(tp0, 50L))); consumer.poll(Duration.ZERO); assertEquals(50L, consumer.position(tp0)); }
private void setupCoordinator(int retryBackoffMs, int rebalanceTimeoutMs) { this.mockTime = new MockTime(); Metadata metadata = new Metadata(retryBackoffMs, 60 * 60 * 1000L, true); this.mockClient = new MockClient(mockTime, metadata); this.consumerClient = new ConsumerNetworkClient(new LogContext(), mockClient, metadata, mockTime, retryBackoffMs, REQUEST_TIMEOUT_MS, HEARTBEAT_INTERVAL_MS); Metrics metrics = new Metrics(); mockClient.updateMetadata(TestUtils.metadataUpdateWith(1, emptyMap())); this.node = metadata.fetch().nodes().get(0); this.coordinatorNode = new Node(Integer.MAX_VALUE - node.id(), node.host(), node.port()); this.coordinator = new DummyCoordinator(consumerClient, metrics, mockTime, rebalanceTimeoutMs, retryBackoffMs); }
private Node prepareRebalance(MockClient client, Node node, PartitionAssignor assignor, List<TopicPartition> partitions, Node coordinator) { if (coordinator == null) { // lookup coordinator client.prepareResponseFrom(new FindCoordinatorResponse(Errors.NONE, node), node); coordinator = new Node(Integer.MAX_VALUE - node.id(), node.host(), node.port()); } // join group client.prepareResponseFrom(joinGroupFollowerResponse(assignor, 1, "memberId", "leaderId", Errors.NONE), coordinator); // sync group client.prepareResponseFrom(syncGroupResponse(partitions, Errors.NONE), coordinator); return coordinator; }
private Node prepareRebalance(MockClient client, Node node, final Set<String> subscribedTopics, PartitionAssignor assignor, List<TopicPartition> partitions, Node coordinator) { if (coordinator == null) { // lookup coordinator client.prepareResponseFrom(new FindCoordinatorResponse(Errors.NONE, node), node); coordinator = new Node(Integer.MAX_VALUE - node.id(), node.host(), node.port()); } // join group client.prepareResponseFrom(new MockClient.RequestMatcher() { @Override public boolean matches(AbstractRequest body) { JoinGroupRequest joinGroupRequest = (JoinGroupRequest) body; PartitionAssignor.Subscription subscription = ConsumerProtocol.deserializeSubscription(joinGroupRequest.groupProtocols().get(0).metadata()); return subscribedTopics.equals(new HashSet<>(subscription.topics())); } }, joinGroupFollowerResponse(assignor, 1, "memberId", "leaderId", Errors.NONE), coordinator); // sync group client.prepareResponseFrom(syncGroupResponse(partitions, Errors.NONE), coordinator); return coordinator; }