@Override public List<Node> fetchNodes() { return cluster.nodes(); }
@Override public List<Node> fetchNodes() { return metadata.fetch().nodes(); }
@Override public List<Node> fetchNodes() { return cluster.nodes(); }
public boolean isReady() { if (authException != null) { log.debug("Metadata is not usable: failed to get metadata.", authException); throw authException; } if (cluster.nodes().isEmpty()) { log.trace("Metadata is not ready: bootstrap nodes have not been " + "initialized yet."); return false; } if (cluster.isBootstrapConfigured()) { log.trace("Metadata is not ready: we have not fetched metadata from " + "the bootstrap nodes yet."); return false; } log.trace("Metadata is ready to use."); return true; }
/** * Receive new metadata, and transition into the QUIESCENT state. * Updates lastMetadataUpdateMs, cluster, and authException. */ public void update(Cluster cluster, long now) { if (cluster.isBootstrapConfigured()) { log.debug("Setting bootstrap cluster metadata {}.", cluster); } else { log.debug("Updating cluster metadata to {}", cluster); this.lastMetadataUpdateMs = now; } this.state = State.QUIESCENT; this.authException = null; if (!cluster.nodes().isEmpty()) { this.cluster = cluster; } } }
public void clearController() { if (cluster.controller() != null) { log.trace("Clearing cached controller node {}.", cluster.controller()); this.cluster = new Cluster(cluster.clusterResource().clusterId(), cluster.nodes(), Collections.emptySet(), Collections.emptySet(), Collections.emptySet(), null); } }
@Override public List<Node> fetchNodes() { return metadata.fetch().nodes(); }
@Test public void testBootstrap() { String ipAddress = "140.211.11.105"; String hostName = "www.example.com"; Cluster cluster = Cluster.bootstrap(Arrays.asList( new InetSocketAddress(ipAddress, 9002), new InetSocketAddress(hostName, 9002) )); Set<String> expectedHosts = Utils.mkSet(ipAddress, hostName); Set<String> actualHosts = new HashSet<>(); for (Node node : cluster.nodes()) actualHosts.add(node.host()); assertEquals(expectedHosts, actualHosts); }
private void prepareFindCoordinatorResponse(Errors error) { client.prepareResponse(new FindCoordinatorResponse(error, metadata.fetch().nodes().get(0))); }
private KafkaConsumer<String, String> consumerWithPendingAuthentication() { Time time = new MockTime(); Metadata metadata = createMetadata(); MockClient client = new MockClient(time, metadata); initMetadata(client, singletonMap(topic, 1)); Node node = metadata.fetch().nodes().get(0); PartitionAssignor assignor = new RangeAssignor(); client.createPendingAuthenticationError(node, 0); return newConsumer(time, client, metadata, assignor, false); }
@Before public void setup() { client.updateMetadata(initialUpdateResponse); node = metadata.fetch().nodes().get(0); records = buildRecords(1L, 3, 1); nextRecords = buildRecords(4L, 2, 4); emptyRecords = buildRecords(0L, 0, 0); partialRecords = buildRecords(4L, 1, 0); partialRecords.buffer().putInt(Records.SIZE_OFFSET, 10000); }
@Test public void verifyPollTimesOutDuringMetadataUpdate() { final Time time = new MockTime(); Metadata metadata = createMetadata(); final MockClient client = new MockClient(time, metadata); initMetadata(client, Collections.singletonMap(topic, 1)); Node node = metadata.fetch().nodes().get(0); final PartitionAssignor assignor = new RoundRobinAssignor(); final KafkaConsumer<String, String> consumer = newConsumer(time, client, metadata, assignor, true); consumer.subscribe(singleton(topic), getConsumerRebalanceListener(consumer)); prepareRebalance(client, node, assignor, singletonList(tp0), null); consumer.poll(Duration.ZERO); // The underlying client should NOT get a fetch request final Queue<ClientRequest> requests = client.requests(); Assert.assertEquals(0, requests.size()); }
@SuppressWarnings("deprecation") @Test public void verifyDeprecatedPollDoesNotTimeOutDuringMetadataUpdate() { final Time time = new MockTime(); Metadata metadata = createMetadata(); final MockClient client = new MockClient(time, metadata); initMetadata(client, Collections.singletonMap(topic, 1)); Node node = metadata.fetch().nodes().get(0); final PartitionAssignor assignor = new RoundRobinAssignor(); final KafkaConsumer<String, String> consumer = newConsumer(time, client, metadata, assignor, true); consumer.subscribe(singleton(topic), getConsumerRebalanceListener(consumer)); prepareRebalance(client, node, assignor, singletonList(tp0), null); consumer.poll(0L); // The underlying client SHOULD get a fetch request final Queue<ClientRequest> requests = client.requests(); Assert.assertEquals(1, requests.size()); final Class<? extends AbstractRequest.Builder> aClass = requests.peek().requestBuilder().getClass(); Assert.assertEquals(FetchRequest.Builder.class, aClass); }
@Test public void testAdminClientApisAuthenticationFailure() throws Exception { Cluster cluster = mockBootstrapCluster(); try (final AdminClientUnitTestEnv env = new AdminClientUnitTestEnv(Time.SYSTEM, cluster, newStrMap(AdminClientConfig.REQUEST_TIMEOUT_MS_CONFIG, "1000"))) { env.kafkaClient().setNodeApiVersions(NodeApiVersions.create()); env.kafkaClient().createPendingAuthenticationError(cluster.nodes().get(0), TimeUnit.DAYS.toMillis(1)); callAdminClientApisAndExpectAnAuthenticationError(env); } }
@Test(expected = NoOffsetForPartitionException.class) public void testMissingOffsetNoResetPolicy() { Time time = new MockTime(); Metadata metadata = createMetadata(); MockClient client = new MockClient(time, metadata); initMetadata(client, Collections.singletonMap(topic, 1)); Node node = metadata.fetch().nodes().get(0); PartitionAssignor assignor = new RoundRobinAssignor(); KafkaConsumer<String, String> consumer = newConsumer(time, client, metadata, assignor, OffsetResetStrategy.NONE, true, groupId); consumer.assign(singletonList(tp0)); client.prepareResponseFrom(new FindCoordinatorResponse(Errors.NONE, node), node); Node coordinator = new Node(Integer.MAX_VALUE - node.id(), node.host(), node.port()); // lookup committed offset and find nothing client.prepareResponseFrom(offsetResponse(Collections.singletonMap(tp0, -1L), Errors.NONE), coordinator); consumer.poll(Duration.ZERO); }
@Test public void testResetToCommittedOffset() { Time time = new MockTime(); Metadata metadata = createMetadata(); MockClient client = new MockClient(time, metadata); initMetadata(client, Collections.singletonMap(topic, 1)); Node node = metadata.fetch().nodes().get(0); PartitionAssignor assignor = new RoundRobinAssignor(); KafkaConsumer<String, String> consumer = newConsumer(time, client, metadata, assignor, OffsetResetStrategy.NONE, true, groupId); consumer.assign(singletonList(tp0)); client.prepareResponseFrom(new FindCoordinatorResponse(Errors.NONE, node), node); Node coordinator = new Node(Integer.MAX_VALUE - node.id(), node.host(), node.port()); client.prepareResponseFrom(offsetResponse(Collections.singletonMap(tp0, 539L), Errors.NONE), coordinator); consumer.poll(Duration.ZERO); assertEquals(539L, consumer.position(tp0)); }
@Test public void testResetUsingAutoResetPolicy() { Time time = new MockTime(); Metadata metadata = createMetadata(); MockClient client = new MockClient(time, metadata); initMetadata(client, Collections.singletonMap(topic, 1)); Node node = metadata.fetch().nodes().get(0); PartitionAssignor assignor = new RoundRobinAssignor(); KafkaConsumer<String, String> consumer = newConsumer(time, client, metadata, assignor, OffsetResetStrategy.LATEST, true, groupId); consumer.assign(singletonList(tp0)); client.prepareResponseFrom(new FindCoordinatorResponse(Errors.NONE, node), node); Node coordinator = new Node(Integer.MAX_VALUE - node.id(), node.host(), node.port()); client.prepareResponseFrom(offsetResponse(Collections.singletonMap(tp0, -1L), Errors.NONE), coordinator); client.prepareResponse(listOffsetsResponse(Collections.singletonMap(tp0, 50L))); consumer.poll(Duration.ZERO); assertEquals(50L, consumer.position(tp0)); }
private void setupCoordinator(int retryBackoffMs, int rebalanceTimeoutMs) { this.mockTime = new MockTime(); Metadata metadata = new Metadata(retryBackoffMs, 60 * 60 * 1000L, true); this.mockClient = new MockClient(mockTime, metadata); this.consumerClient = new ConsumerNetworkClient(new LogContext(), mockClient, metadata, mockTime, retryBackoffMs, REQUEST_TIMEOUT_MS, HEARTBEAT_INTERVAL_MS); Metrics metrics = new Metrics(); mockClient.updateMetadata(TestUtils.metadataUpdateWith(1, emptyMap())); this.node = metadata.fetch().nodes().get(0); this.coordinatorNode = new Node(Integer.MAX_VALUE - node.id(), node.host(), node.port()); this.coordinator = new DummyCoordinator(consumerClient, metrics, mockTime, rebalanceTimeoutMs, retryBackoffMs); }
@Test public void testExpiryOfUnsentBatchesShouldNotCauseUnresolvedSequences() throws Exception { final long producerId = 343434L; TransactionManager transactionManager = new TransactionManager(); setupWithTransactionState(transactionManager); prepareAndReceiveInitProducerId(producerId, Errors.NONE); assertTrue(transactionManager.hasProducerId()); assertEquals(0, transactionManager.sequenceNumber(tp0).longValue()); // Send first ProduceRequest Future<RecordMetadata> request1 = accumulator.append(tp0, 0L, "key".getBytes(), "value".getBytes(), null, null, MAX_BLOCK_TIMEOUT).future; Node node = metadata.fetch().nodes().get(0); time.sleep(10000L); client.disconnect(node.idString()); client.blackout(node, 10); sender.run(time.milliseconds()); assertFutureFailure(request1, TimeoutException.class); assertFalse(transactionManager.hasUnresolvedSequence(tp0)); }
@Test public void testCreateTopicsHandleNotControllerException() throws Exception { try (AdminClientUnitTestEnv env = mockClientEnv()) { env.kafkaClient().setNodeApiVersions(NodeApiVersions.create()); env.kafkaClient().prepareResponseFrom(new CreateTopicsResponse( Collections.singletonMap("myTopic", new ApiError(Errors.NOT_CONTROLLER, ""))), env.cluster().nodeById(0)); env.kafkaClient().prepareResponse(new MetadataResponse(env.cluster().nodes(), env.cluster().clusterResource().clusterId(), 1, Collections.<MetadataResponse.TopicMetadata>emptyList())); env.kafkaClient().prepareResponseFrom(new CreateTopicsResponse( Collections.singletonMap("myTopic", new ApiError(Errors.NONE, ""))), env.cluster().nodeById(1)); KafkaFuture<Void> future = env.adminClient().createTopics( Collections.singleton(new NewTopic("myTopic", Collections.singletonMap(0, asList(0, 1, 2)))), new CreateTopicsOptions().timeoutMs(10000)).all(); future.get(); } }