public Node nodeById(int nodeId) { return cluster.nodeById(nodeId); }
@Override public void handleDisconnection(String destination) { Cluster cluster = metadata.fetch(); // 'processDisconnection' generates warnings for misconfigured bootstrap server configuration // resulting in 'Connection Refused' and misconfigured security resulting in authentication failures. // The warning below handles the case where connection to a broker was established, but was disconnected // before metadata could be obtained. if (cluster.isBootstrapConfigured()) { int nodeId = Integer.parseInt(destination); Node node = cluster.nodeById(nodeId); if (node != null) log.warn("Bootstrap broker {} disconnected", node); } metadataFetchInProgress = false; }
/** * Sanity check whether the provided brokers exist in cluster or not. * @param brokerIds A list of broker id. */ private void sanityCheckBrokerPresence(Collection<Integer> brokerIds) { Cluster cluster = _loadMonitor.refreshClusterAndGeneration().cluster(); Set<Integer> invalidBrokerIds = brokerIds.stream().filter(id -> cluster.nodeById(id) == null).collect(Collectors.toSet()); if (!invalidBrokerIds.isEmpty()) { throw new IllegalArgumentException(String.format("Broker %s does not exist.", invalidBrokerIds)); } } }
switch (task.type()) { case LEADER_ACTION: if (cluster.nodeById(task.proposal().newLeader()) == null) { _executionTaskManager.markTaskDead(task); LOG.warn("Killing execution for task {} because the target leader is down", task); if (cluster.nodeById(broker) == null) { _executionTaskManager.markTaskDead(task); LOG.warn("Killing execution for task {} because the new replica {} is down.", task, broker);
/** * Test that we propagate exceptions encountered when fetching metadata. */ @Test public void testPropagatedMetadataFetchException() throws Exception { Cluster cluster = mockCluster(0); try (final AdminClientUnitTestEnv env = new AdminClientUnitTestEnv(Time.SYSTEM, cluster, newStrMap(AdminClientConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:8121", AdminClientConfig.REQUEST_TIMEOUT_MS_CONFIG, "10"))) { env.kafkaClient().setNodeApiVersions(NodeApiVersions.create()); env.kafkaClient().createPendingAuthenticationError(cluster.nodeById(0), TimeUnit.DAYS.toMillis(1)); env.kafkaClient().prepareResponse(new CreateTopicsResponse(Collections.singletonMap("myTopic", new ApiError(Errors.NONE, "")))); KafkaFuture<Void> future = env.adminClient().createTopics( Collections.singleton(new NewTopic("myTopic", Collections.singletonMap(0, asList(0, 1, 2)))), new CreateTopicsOptions().timeoutMs(1000)).all(); TestUtils.assertFutureError(future, SaslAuthenticationException.class); } }
@Test public void testCreateTopicsHandleNotControllerException() throws Exception { try (AdminClientUnitTestEnv env = mockClientEnv()) { env.kafkaClient().setNodeApiVersions(NodeApiVersions.create()); env.kafkaClient().prepareResponseFrom(new CreateTopicsResponse( Collections.singletonMap("myTopic", new ApiError(Errors.NOT_CONTROLLER, ""))), env.cluster().nodeById(0)); env.kafkaClient().prepareResponse(new MetadataResponse(env.cluster().nodes(), env.cluster().clusterResource().clusterId(), 1, Collections.<MetadataResponse.TopicMetadata>emptyList())); env.kafkaClient().prepareResponseFrom(new CreateTopicsResponse( Collections.singletonMap("myTopic", new ApiError(Errors.NONE, ""))), env.cluster().nodeById(1)); KafkaFuture<Void> future = env.adminClient().createTopics( Collections.singleton(new NewTopic("myTopic", Collections.singletonMap(0, asList(0, 1, 2)))), new CreateTopicsOptions().timeoutMs(10000)).all(); future.get(); } }