private SimpleConsumerThread<T> createAndStartSimpleConsumerThread( List<KafkaTopicPartitionState<TopicAndPartition>> seedPartitions, Node leader, ExceptionProxy errorHandler) throws IOException, ClassNotFoundException { // each thread needs its own copy of the deserializer, because the deserializer is // not necessarily thread safe final KeyedDeserializationSchema<T> clonedDeserializer = InstantiationUtil.clone(deserializer, runtimeContext.getUserCodeClassLoader()); // seed thread with list of fetch partitions (otherwise it would shut down immediately again SimpleConsumerThread<T> brokerThread = new SimpleConsumerThread<>( this, errorHandler, kafkaConfig, leader, seedPartitions, unassignedPartitionsQueue, clonedDeserializer, invalidOffsetBehavior); brokerThread.setName(String.format("SimpleConsumer - %s - broker-%s (%s:%d)", runtimeContext.getTaskName(), leader.id(), leader.host(), leader.port())); brokerThread.setDaemon(true); brokerThread.start(); LOG.info("Starting thread {}", brokerThread.getName()); return brokerThread; }
public Node getLeader() { if (this.leaderId == -1) { return null; } else { return new Node(leaderId, leaderHost, leaderPort); } }
/** * Initiate a connection to the given node */ private void initiateConnect(Node node, long now) { String nodeConnectionId = node.idString(); try { this.connectionStates.connecting(nodeConnectionId, now, node.host(), clientDnsLookup); InetAddress address = this.connectionStates.currentAddress(nodeConnectionId); log.debug("Initiating connection to node {} using address {}", node, address); selector.connect(nodeConnectionId, new InetSocketAddress(address, node.port()), this.socketSendBuffer, this.socketReceiveBuffer); } catch (IOException e) { /* attempt failed, we'll try again after the backoff */ connectionStates.disconnected(nodeConnectionId, now); /* maybe the problem is our metadata, update it */ metadataUpdater.requestUpdate(); log.warn("Error connecting to node {}", node, e); } }
public KafkaTopicPartitionLeader(KafkaTopicPartition topicPartition, Node leader) { this.topicPartition = topicPartition; if (leader == null) { this.leaderId = -1; this.leaderHost = null; this.leaderPort = -1; } else { this.leaderId = leader.id(); this.leaderPort = leader.port(); this.leaderHost = leader.host(); } int cachedHash = (leader == null) ? 14 : leader.hashCode(); this.cachedHash = 31 * cachedHash + topicPartition.hashCode(); }
@Override public void onSuccess(ClientResponse resp, RequestFuture<Void> future) { log.debug("Received FindCoordinator response {}", resp); clearFindCoordinatorFuture(); FindCoordinatorResponse findCoordinatorResponse = (FindCoordinatorResponse) resp.responseBody(); Errors error = findCoordinatorResponse.error(); if (error == Errors.NONE) { synchronized (AbstractCoordinator.this) { // use MAX_VALUE - node.id as the coordinator id to allow separate connections // for the coordinator in the underlying network client layer int coordinatorConnectionId = Integer.MAX_VALUE - findCoordinatorResponse.node().id(); AbstractCoordinator.this.coordinator = new Node( coordinatorConnectionId, findCoordinatorResponse.node().host(), findCoordinatorResponse.node().port()); log.info("Discovered group coordinator {}", coordinator); client.tryConnect(coordinator); heartbeat.resetSessionTimeout(); } future.complete(null); } else if (error == Errors.GROUP_AUTHORIZATION_FAILED) { future.raise(new GroupAuthorizationException(groupId)); } else { log.debug("Group coordinator lookup failed: {}", error.message()); future.raise(error); } }
private Node getControllerNode(int controllerId, Collection<Node> brokers) { for (Node broker : brokers) { if (broker.id() == controllerId) return broker; } return null; }
/** * @return all the available broker level metrics. Null is returned if nothing is available. */ public MetricSampleAggregationResult<String, BrokerEntity> brokerMetrics() { List<Node> nodes = _metadataClient.cluster().nodes(); Set<BrokerEntity> brokerEntities = new HashSet<>(nodes.size()); for (Node node : nodes) { brokerEntities.add(new BrokerEntity(node.host(), node.id())); } return _brokerMetricSampleAggregator.aggregate(brokerEntities); }
@Test public void testRaiseErrorWhenNoPartitionsPendingOnDrain() throws InterruptedException { final long pid = 13131L; final short epoch = 1; doInitTransactions(pid, epoch); transactionManager.beginTransaction(); // Don't execute transactionManager.maybeAddPartitionToTransaction(tp0). This should result in an error on drain. accumulator.append(tp0, time.milliseconds(), "key".getBytes(), "value".getBytes(), Record.EMPTY_HEADERS, null, MAX_BLOCK_TIMEOUT); Node node1 = new Node(0, "localhost", 1111); PartitionInfo part1 = new PartitionInfo(topic, 0, node1, null, null); Cluster cluster = new Cluster(null, Collections.singletonList(node1), Collections.singletonList(part1), Collections.emptySet(), Collections.emptySet()); Set<Node> nodes = new HashSet<>(); nodes.add(node1); Map<Integer, List<ProducerBatch>> drainedBatches = accumulator.drain(cluster, nodes, Integer.MAX_VALUE, time.milliseconds()); // We shouldn't drain batches which haven't been added to the transaction yet. assertTrue(drainedBatches.containsKey(node1.id())); assertTrue(drainedBatches.get(node1.id()).isEmpty()); }
for (Node node : brokers) { Struct broker = struct.instance(BROKERS); broker.set(NODE_ID, node.id()); broker.set(HOST, node.host()); broker.set(PORT, node.port()); broker.setIfExists(RACK, node.rack()); brokerArray.add(broker); struct.setIfExists(CONTROLLER_ID, controller == null ? NO_CONTROLLER_ID : controller.id()); replicas.add(node.id()); partitionData.set(REPLICAS, replicas.toArray()); ArrayList<Integer> isr = new ArrayList<>(partitionMetadata.isr.size()); for (Node node : partitionMetadata.isr) isr.add(node.id()); partitionData.set(ISR, isr.toArray()); if (partitionData.hasField(OFFLINE_REPLICAS)) { ArrayList<Integer> offlineReplicas = new ArrayList<>(partitionMetadata.offlineReplicas.size()); for (Node node : partitionMetadata.offlineReplicas) offlineReplicas.add(node.id()); partitionData.set(OFFLINE_REPLICAS, offlineReplicas.toArray());
@Test public void testLeastLoadedNode() { client.ready(node, time.milliseconds()); awaitReady(client, node); client.poll(1, time.milliseconds()); assertTrue("The client should be ready", client.isReady(node, time.milliseconds())); // leastloadednode should be our single node Node leastNode = client.leastLoadedNode(time.milliseconds()); assertEquals("There should be one leastloadednode", leastNode.id(), node.id()); // sleep for longer than reconnect backoff time.sleep(reconnectBackoffMsTest); // CLOSE node selector.serverDisconnect(node.idString()); client.poll(1, time.milliseconds()); assertFalse("After we forced the disconnection the client is no longer ready.", client.ready(node, time.milliseconds())); leastNode = client.leastLoadedNode(time.milliseconds()); assertNull("There should be NO leastloadednode", leastNode); }
@Test public void testBootstrap() { String ipAddress = "140.211.11.105"; String hostName = "www.example.com"; Cluster cluster = Cluster.bootstrap(Arrays.asList( new InetSocketAddress(ipAddress, 9002), new InetSocketAddress(hostName, 9002) )); Set<String> expectedHosts = Utils.mkSet(ipAddress, hostName); Set<String> actualHosts = new HashSet<>(); for (Node node : cluster.nodes()) actualHosts.add(node.host()); assertEquals(expectedHosts, actualHosts); }
private String formatNodeIds(Node[] nodes) { StringBuilder b = new StringBuilder("["); if (nodes != null) { for (int i = 0; i < nodes.length; i++) { b.append(nodes[i].idString()); if (i < nodes.length - 1) b.append(','); } } b.append("]"); return b.toString(); }
@Test public void testToString() { String topic = "sample"; int partition = 0; Node leader = new Node(0, "localhost", 9092); Node r1 = new Node(1, "localhost", 9093); Node r2 = new Node(2, "localhost", 9094); Node[] replicas = new Node[] {leader, r1, r2}; Node[] inSyncReplicas = new Node[] {leader, r1}; Node[] offlineReplicas = new Node[] {r2}; PartitionInfo partitionInfo = new PartitionInfo(topic, partition, leader, replicas, inSyncReplicas, offlineReplicas); String expected = String.format("Partition(topic = %s, partition = %d, leader = %s, replicas = %s, isr = %s, offlineReplicas = %s)", topic, partition, leader.idString(), "[0,1,2]", "[0,1]", "[2]"); Assert.assertEquals(expected, partitionInfo.toString()); }
@Test public void testDeleteConsumerGroups() throws Exception { final HashMap<Integer, Node> nodes = new HashMap<>(); nodes.put(0, new Node(0, "localhost", 8121)); final Cluster cluster = new Cluster( "mockClusterId", nodes.values(), Collections.<PartitionInfo>emptyList(), Collections.<String>emptySet(), Collections.<String>emptySet(), nodes.get(0)); final List<String> groupIds = singletonList("group-0"); try (AdminClientUnitTestEnv env = new AdminClientUnitTestEnv(cluster)) { env.kafkaClient().setNodeApiVersions(NodeApiVersions.create()); //Retriable FindCoordinatorResponse errors should be retried env.kafkaClient().prepareResponse(new FindCoordinatorResponse(Errors.COORDINATOR_NOT_AVAILABLE, Node.noNode())); env.kafkaClient().prepareResponse(new FindCoordinatorResponse(Errors.COORDINATOR_LOAD_IN_PROGRESS, Node.noNode())); env.kafkaClient().prepareResponse(new FindCoordinatorResponse(Errors.NONE, env.cluster().controller())); final Map<String, Errors> response = new HashMap<>(); response.put("group-0", Errors.NONE); env.kafkaClient().prepareResponse(new DeleteGroupsResponse(response)); final DeleteConsumerGroupsResult result = env.adminClient().deleteConsumerGroups(groupIds); final KafkaFuture<Void> results = result.deletedGroups().get("group-0"); assertNull(results.get()); //should throw error for non-retriable errors env.kafkaClient().prepareResponse(new FindCoordinatorResponse(Errors.GROUP_AUTHORIZATION_FAILED, Node.noNode())); final DeleteConsumerGroupsResult errorResult = env.adminClient().deleteConsumerGroups(groupIds); TestUtils.assertFutureError(errorResult.deletedGroups().get("group-0"), GroupAuthorizationException.class); } }
private Node prepareRebalance(MockClient client, Node node, PartitionAssignor assignor, List<TopicPartition> partitions, Node coordinator) { if (coordinator == null) { // lookup coordinator client.prepareResponseFrom(new FindCoordinatorResponse(Errors.NONE, node), node); coordinator = new Node(Integer.MAX_VALUE - node.id(), node.host(), node.port()); } // join group client.prepareResponseFrom(joinGroupFollowerResponse(assignor, 1, "memberId", "leaderId", Errors.NONE), coordinator); // sync group client.prepareResponseFrom(syncGroupResponse(partitions, Errors.NONE), coordinator); return coordinator; }
public int leaderId() { return leader == null ? -1 : leader.id(); }
MetricDef brokerMetricDef = KafkaMetricDef.brokerMetricDef(); for (Node node : cluster.nodes()) { BrokerLoad brokerLoad = _brokerLoad.get(node.id()); if (brokerLoad == null) { LOG.warn("Skip generating broker metric sample for broker {} because all broker metrics are missing.", node.id()); continue; } else if (!brokerLoad.minRequiredBrokerMetricsAvailable()) { if (brokerLoad.missingBrokerMetricsInMinSupportedVersion().size() == 0) { LOG.warn("Skip generating broker metric sample for broker {} because there are not enough topic metrics to " + "generate broker metrics.", node.id()); } else { LOG.warn("Skip generating broker metric sample for broker {} because the following required metrics are missing {}.", node.id(), brokerLoad.missingBrokerMetricsInMinSupportedVersion()); BrokerMetricSample brokerMetricSample = new BrokerMetricSample(node.host(), node.id(), brokerLoad._brokerSampleDeserializationVersion); for (Map.Entry<Byte, Set<RawMetricType>> entry : RawMetricType.brokerMetricTypesDiffByVersion().entrySet()) { for (RawMetricType rawBrokerMetricType : entry.getValue()) { validSample = false; LOG.warn("Skip generating broker metric sample for broker {} because it does not have {} metrics (serde " + "version {}) or the metrics are inconsistent.", node.id(), rawBrokerMetricType, entry.getKey()); break; } else { brokerMetricSample.close(_maxMetricTimestamp); LOG.trace("Added broker metric sample for broker {}", node.id()); brokerMetricSamples.add(brokerMetricSample);