/** * Create a Kafka topic with the given parameters. * * @param topic The name of the topic. * @param partitions The number of partitions for this topic. * @param replication The replication factor for (partitions of) this topic. * @param topicConfig Additional topic-level configuration settings. */ void createTopic(final String topic, final int partitions, final int replication, final Map<String, String> topicConfig) { log.debug("Creating topic { name: {}, partitions: {}, replication: {}, config: {} }", topic, partitions, replication, topicConfig); final ImmutableMap<String, Object> props = ImmutableMap.of( AdminClientConfig.BOOTSTRAP_SERVERS_CONFIG, brokerList(), AdminClientConfig.RETRIES_CONFIG, 5); try (AdminClient adminClient = AdminClient.create(props)) { final NewTopic newTopic = new NewTopic(topic, partitions, (short) replication); newTopic.configs(topicConfig); try { final CreateTopicsResult result = adminClient.createTopics(ImmutableList.of(newTopic)); result.all().get(); } catch (final Exception e) { throw new RuntimeException("Failed to create topic:" + topic, e); } } }
public void createTopic(String topicName) throws Exception { kafkaAdminClient.createTopics(Collections.singleton(new NewTopic(topicName, 1, (short)1))) .all() .get(30, TimeUnit.SECONDS); }
@Override public void createTestTopic(String topic, int numberOfPartitions, int replicationFactor, Properties properties) { LOG.info("Creating topic {}", topic); try (AdminClient adminClient = AdminClient.create(getStandardProperties())) { NewTopic topicObj = new NewTopic(topic, numberOfPartitions, (short) replicationFactor); adminClient.createTopics(Collections.singleton(topicObj)).all().get(); } catch (Exception e) { e.printStackTrace(); fail("Create test topic : " + topic + " failed, " + e.getMessage()); } }
@Test public void testCreateTopics() throws Exception { try (AdminClientUnitTestEnv env = mockClientEnv()) { env.kafkaClient().setNodeApiVersions(NodeApiVersions.create()); env.kafkaClient().prepareResponse(body -> body instanceof CreateTopicsRequest, new CreateTopicsResponse(Collections.singletonMap("myTopic", new ApiError(Errors.NONE, "")))); KafkaFuture<Void> future = env.adminClient().createTopics( Collections.singleton(new NewTopic("myTopic", Collections.singletonMap(0, asList(0, 1, 2)))), new CreateTopicsOptions().timeoutMs(10000)).all(); future.get(); } }
/** * Test that the client properly times out when we don't receive any metadata. */ @Test public void testTimeoutWithoutMetadata() throws Exception { try (final AdminClientUnitTestEnv env = new AdminClientUnitTestEnv(Time.SYSTEM, mockBootstrapCluster(), newStrMap(AdminClientConfig.REQUEST_TIMEOUT_MS_CONFIG, "10"))) { env.kafkaClient().setNodeApiVersions(NodeApiVersions.create()); env.kafkaClient().prepareResponse(new CreateTopicsResponse(Collections.singletonMap("myTopic", new ApiError(Errors.NONE, "")))); KafkaFuture<Void> future = env.adminClient().createTopics( Collections.singleton(new NewTopic("myTopic", Collections.singletonMap(0, asList(0, 1, 2)))), new CreateTopicsOptions().timeoutMs(1000)).all(); TestUtils.assertFutureError(future, TimeoutException.class); } }
new CreateTopicsOptions().timeoutMs(10000)).all();
@Test public void testUnreachableBootstrapServer() throws Exception { // This tests the scenario in which the bootstrap server is unreachable for a short while, // which prevents AdminClient from being able to send the initial metadata request Cluster cluster = Cluster.bootstrap(Collections.singletonList(new InetSocketAddress("localhost", 8121))); try (final AdminClientUnitTestEnv env = new AdminClientUnitTestEnv(Time.SYSTEM, cluster)) { Cluster discoveredCluster = mockCluster(0); env.kafkaClient().setNodeApiVersions(NodeApiVersions.create()); env.kafkaClient().setUnreachable(cluster.nodes().get(0), 200); env.kafkaClient().prepareResponse(body -> body instanceof MetadataRequest, new MetadataResponse(discoveredCluster.nodes(), discoveredCluster.clusterResource().clusterId(), 1, Collections.emptyList())); env.kafkaClient().prepareResponse(body -> body instanceof CreateTopicsRequest, new CreateTopicsResponse(Collections.singletonMap("myTopic", new ApiError(Errors.NONE, "")))); KafkaFuture<Void> future = env.adminClient().createTopics( Collections.singleton(new NewTopic("myTopic", Collections.singletonMap(0, asList(0, 1, 2)))), new CreateTopicsOptions().timeoutMs(10000)).all(); future.get(); } }
/** * Test that we propagate exceptions encountered when fetching metadata. */ @Test public void testPropagatedMetadataFetchException() throws Exception { Cluster cluster = mockCluster(0); try (final AdminClientUnitTestEnv env = new AdminClientUnitTestEnv(Time.SYSTEM, cluster, newStrMap(AdminClientConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:8121", AdminClientConfig.REQUEST_TIMEOUT_MS_CONFIG, "10"))) { env.kafkaClient().setNodeApiVersions(NodeApiVersions.create()); env.kafkaClient().createPendingAuthenticationError(cluster.nodeById(0), TimeUnit.DAYS.toMillis(1)); env.kafkaClient().prepareResponse(new CreateTopicsResponse(Collections.singletonMap("myTopic", new ApiError(Errors.NONE, "")))); KafkaFuture<Void> future = env.adminClient().createTopics( Collections.singleton(new NewTopic("myTopic", Collections.singletonMap(0, asList(0, 1, 2)))), new CreateTopicsOptions().timeoutMs(1000)).all(); TestUtils.assertFutureError(future, SaslAuthenticationException.class); } }
env.adminClient().createTopics( Collections.singleton(new NewTopic("myTopic", Collections.singletonMap(0, asList(0, 1, 2)))), new CreateTopicsOptions().timeoutMs(10000)).all().get(); fail("Expected an authentication error."); } catch (ExecutionException e) {
@Test public void testConnectionFailureOnMetadataUpdate() throws Exception { // This tests the scenario in which we successfully connect to the bootstrap server, but // the server disconnects before sending the full response Cluster cluster = mockBootstrapCluster(); try (final AdminClientUnitTestEnv env = new AdminClientUnitTestEnv(Time.SYSTEM, cluster)) { Cluster discoveredCluster = mockCluster(0); env.kafkaClient().setNodeApiVersions(NodeApiVersions.create()); env.kafkaClient().prepareResponse(request -> request instanceof MetadataRequest, null, true); env.kafkaClient().prepareResponse(request -> request instanceof MetadataRequest, new MetadataResponse(discoveredCluster.nodes(), discoveredCluster.clusterResource().clusterId(), 1, Collections.emptyList())); env.kafkaClient().prepareResponse(body -> body instanceof CreateTopicsRequest, new CreateTopicsResponse(Collections.singletonMap("myTopic", new ApiError(Errors.NONE, "")))); KafkaFuture<Void> future = env.adminClient().createTopics( Collections.singleton(new NewTopic("myTopic", Collections.singletonMap(0, asList(0, 1, 2)))), new CreateTopicsOptions().timeoutMs(10000)).all(); future.get(); } }
@Test public void testCreateTopicsHandleNotControllerException() throws Exception { try (AdminClientUnitTestEnv env = mockClientEnv()) { env.kafkaClient().setNodeApiVersions(NodeApiVersions.create()); env.kafkaClient().prepareResponseFrom(new CreateTopicsResponse( Collections.singletonMap("myTopic", new ApiError(Errors.NOT_CONTROLLER, ""))), env.cluster().nodeById(0)); env.kafkaClient().prepareResponse(new MetadataResponse(env.cluster().nodes(), env.cluster().clusterResource().clusterId(), 1, Collections.<MetadataResponse.TopicMetadata>emptyList())); env.kafkaClient().prepareResponseFrom(new CreateTopicsResponse( Collections.singletonMap("myTopic", new ApiError(Errors.NONE, ""))), env.cluster().nodeById(1)); KafkaFuture<Void> future = env.adminClient().createTopics( Collections.singleton(new NewTopic("myTopic", Collections.singletonMap(0, asList(0, 1, 2)))), new CreateTopicsOptions().timeoutMs(10000)).all(); future.get(); } }
private void addTopics(AdminClient adminClient, List<NewTopic> topicsToAdd) { CreateTopicsResult topicResults = adminClient.createTopics(topicsToAdd); try { topicResults.all().get(this.operationTimeout, TimeUnit.SECONDS); } catch (InterruptedException e) { Thread.currentThread().interrupt(); logger.error("Interrupted while waiting for topic creation results", e); } catch (TimeoutException e) { throw new KafkaException("Timed out waiting for create topics results", e); } catch (ExecutionException e) { if (e.getCause() instanceof TopicExistsException) { // Possible race with another app instance logger.debug("Failed to create topics", e.getCause()); } else { logger.error("Failed to create topics", e.getCause()); throw new KafkaException("Failed to create topics", e.getCause()); // NOSONAR } } }
private void createTopics(AdminClient admin, List<NewTopic> newTopics) { CreateTopicsResult createTopics = admin.createTopics(newTopics); try { createTopics.all().get(this.adminTimeout, TimeUnit.SECONDS); } catch (Exception e) { throw new KafkaException(e); } }
public void createTopics(String... topics) throws ExecutionException, InterruptedException { Map<String, Object> adminConfigs = new HashMap<>(); adminConfigs.put(AdminClientConfig.BOOTSTRAP_SERVERS_CONFIG, connectionString()); try(AdminClient admin = AdminClient.create(adminConfigs)) { List<NewTopic> newTopics = Stream.of(topics) .map(t -> new NewTopic(t, numBroker, (short) numBroker)) .collect(Collectors.toList()); CreateTopicsResult createTopics = admin.createTopics(newTopics); createTopics.all().get(); } }
public void createTopics(String... topics) throws ExecutionException, InterruptedException { Map<String, Object> adminConfigs = new HashMap<>(); adminConfigs.put(AdminClientConfig.BOOTSTRAP_SERVERS_CONFIG, connectionString()); try(AdminClient admin = AdminClient.create(adminConfigs)) { List<NewTopic> newTopics = Stream.of(topics) .map(t -> new NewTopic(t, numBroker, (short) numBroker)) .collect(Collectors.toList()); CreateTopicsResult createTopics = admin.createTopics(newTopics); createTopics.all().get(); } }
@SuppressWarnings({ "rawtypes", "unchecked" }) @Override public CompletableFuture<Void> createTopic(String topic, int partitions) { return CompletableFuture.runAsync(() -> { try { NewTopic newTopic = new NewTopic(topic, partitions, config.replicationFactor); newTopic.configs(new HashMap<>((Map) topicProperties)); admin.createTopics(Arrays.asList(newTopic)).all().get(); } catch (InterruptedException | ExecutionException e) { throw new RuntimeException(e); } }); }
private void createTopics(AdminClient admin, List<NewTopic> newTopics) { CreateTopicsResult createTopics = admin.createTopics(newTopics); try { createTopics.all().get(this.adminTimeout, TimeUnit.SECONDS); } catch (Exception e) { throw new KafkaException(e); } }
void ensureTopics() { try { Set<String> topics = getAdminClient().listTopics().names().get(1, TimeUnit.SECONDS); List<Topic> requiredTopics = Arrays.asList(spansTopic, tracesTopic, servicesTopic, dependenciesTopic); Set<NewTopic> newTopics = new HashSet<>(); for (Topic requiredTopic : requiredTopics) { if (!topics.contains(requiredTopic.name)) { NewTopic newTopic = requiredTopic.newTopic(); newTopics.add(newTopic); } else { LOG.info("Topic {} already exists.", requiredTopic.name); } } getAdminClient().createTopics(newTopics).all().get(); } catch (Exception e) { LOG.error("Error ensuring topics are created", e); } }
public void createTopic(String topic, int partitions, short replicationFactor) { log.info("Creating topic: " + topic + ", partitions: " + partitions + ", replications: " + replicationFactor); if (topicExists(topic)) { throw new IllegalArgumentException("Cannot create Topic already exists: " + topic); } CreateTopicsResult ret = adminClient.createTopics( Collections.singletonList(new NewTopic(topic, partitions, replicationFactor))); try { ret.all().get(5, TimeUnit.MINUTES); } catch (InterruptedException e) { Thread.currentThread().interrupt(); throw new StreamRuntimeException(e); } catch (ExecutionException e) { throw new StreamRuntimeException(e); } catch (TimeoutException e) { throw new StreamRuntimeException("Unable to create topics " + topic + " within the timeout", e); } }
public static DeadLetterQueueReporter createAndSetup(WorkerConfig workerConfig, ConnectorTaskId id, SinkConnectorConfig sinkConfig, Map<String, Object> producerProps, ErrorHandlingMetrics errorHandlingMetrics) { String topic = sinkConfig.dlqTopicName(); try (AdminClient admin = AdminClient.create(workerConfig.originals())) { if (!admin.listTopics().names().get().contains(topic)) { log.error("Topic {} doesn't exist. Will attempt to create topic.", topic); NewTopic schemaTopicRequest = new NewTopic(topic, DLQ_NUM_DESIRED_PARTITIONS, sinkConfig.dlqTopicReplicationFactor()); admin.createTopics(singleton(schemaTopicRequest)).all().get(); } } catch (InterruptedException e) { throw new ConnectException("Could not initialize dead letter queue with topic=" + topic, e); } catch (ExecutionException e) { if (!(e.getCause() instanceof TopicExistsException)) { throw new ConnectException("Could not initialize dead letter queue with topic=" + topic, e); } } KafkaProducer<byte[], byte[]> dlqProducer = new KafkaProducer<>(producerProps); return new DeadLetterQueueReporter(dlqProducer, sinkConfig, id, errorHandlingMetrics); }