private List<AclBinding> getAclBindings(Map<AclBindingFilter, KafkaFuture<FilterResults>> futures) { List<AclBinding> acls = new ArrayList<>(); for (KafkaFuture<FilterResults> value: futures.values()) { FilterResults results; try { results = value.get(); } catch (Throwable e) { // This should be unreachable, since the future returned by KafkaFuture#allOf should // have failed if any Future failed. throw new KafkaException("DeleteAclsResult#all: internal error", e); } for (FilterResult result : results.values()) { if (result.exception() != null) throw result.exception(); acls.add(result.binding()); } } return acls; } }
@Test public void testAllOfFuturesHandlesZeroFutures() throws Exception { KafkaFuture<Void> allFuture = KafkaFuture.allOf(); assertTrue(allFuture.isDone()); assertFalse(allFuture.isCancelled()); assertFalse(allFuture.isCompletedExceptionally()); allFuture.get(); }
/** * Return a future which succeeds only if all the ACLs deletions succeed, and which contains all the deleted ACLs. * Note that it if the filters don't match any ACLs, this is not considered an error. */ public KafkaFuture<Collection<AclBinding>> all() { return KafkaFuture.allOf(futures.values().toArray(new KafkaFuture[0])).thenApply(v -> getAclBindings(futures)); }
futures.add(new KafkaFutureImpl<>()); KafkaFuture<Void> allFuture = KafkaFuture.allOf(futures.toArray(new KafkaFuture[0])); final List<CompleterThread> completerThreads = new ArrayList<>(); final List<WaiterThread> waiterThreads = new ArrayList<>(); waiterThreads.add(new WaiterThread<>(futures.get(i), i)); assertFalse(allFuture.isDone()); for (int i = 0; i < numThreads; i++) { waiterThreads.get(i).start(); completerThreads.get(i).start(); assertFalse(allFuture.isDone()); completerThreads.get(numThreads - 1).start(); allFuture.get(); assertTrue(allFuture.isDone()); for (int i = 0; i < numThreads; i++) { assertEquals(Integer.valueOf(i), futures.get(i).get());
@Test public void testCompleteFutures() throws Exception { KafkaFutureImpl<Integer> future123 = new KafkaFutureImpl<>(); assertTrue(future123.complete(123)); assertEquals(Integer.valueOf(123), future123.get()); assertFalse(future123.complete(456)); assertTrue(future123.isDone()); assertFalse(future123.isCancelled()); assertFalse(future123.isCompletedExceptionally()); KafkaFuture<Integer> future456 = KafkaFuture.completedFuture(456); assertEquals(Integer.valueOf(456), future456.get()); KafkaFutureImpl<Integer> futureFail = new KafkaFutureImpl<>(); futureFail.completeExceptionally(new RuntimeException("We require more vespene gas")); try { futureFail.get(); Assert.fail("Expected an exception"); } catch (ExecutionException e) { assertEquals(RuntimeException.class, e.getCause().getClass()); Assert.assertEquals("We require more vespene gas", e.getCause().getMessage()); } }
public Set<String> topics() { KafkaFuture<Set<String>> names = adminClient.listTopics().names(); try { KafkaFuture.allOf(names).get(10, TimeUnit.SECONDS); Set<String> topics = names.get(); log.debug("Got topics: {}", topics); return topics; } catch (InterruptedException | ExecutionException | TimeoutException e) { throw new IllegalStateException("Exception occured during topic retrieval.", e); } }
/** * Return a future which succeeds only if all the records deletions succeed. */ public KafkaFuture<Void> all() { return KafkaFuture.allOf(futures.values().toArray(new KafkaFuture[0])); } }
@Test public void testThenApply() throws Exception { KafkaFutureImpl<Integer> future = new KafkaFutureImpl<>(); KafkaFuture<Integer> doubledFuture = future.thenApply(integer -> 2 * integer); assertFalse(doubledFuture.isDone()); KafkaFuture<Integer> tripledFuture = future.thenApply(integer -> 3 * integer); assertFalse(tripledFuture.isDone()); future.complete(21); assertEquals(Integer.valueOf(21), future.getNow(-1)); assertEquals(Integer.valueOf(42), doubledFuture.getNow(-1)); assertEquals(Integer.valueOf(63), tripledFuture.getNow(-1)); KafkaFuture<Integer> quadrupledFuture = future.thenApply(integer -> 4 * integer); assertEquals(Integer.valueOf(84), quadrupledFuture.getNow(-1)); KafkaFutureImpl<Integer> futureFail = new KafkaFutureImpl<>(); KafkaFuture<Integer> futureAppliedFail = futureFail.thenApply(integer -> 2 * integer); futureFail.completeExceptionally(new RuntimeException()); assertTrue(futureFail.isCompletedExceptionally()); assertTrue(futureAppliedFail.isCompletedExceptionally()); }
void maybePurgeCommitedRecords() { // we do not check any possible exceptions since none of them are fatal // that should cause the application to fail, and we will try delete with // newer offsets anyways. if (deleteRecordsResult == null || deleteRecordsResult.all().isDone()) { if (deleteRecordsResult != null && deleteRecordsResult.all().isCompletedExceptionally()) { log.debug("Previous delete-records request has failed: {}. Try sending the new request now", deleteRecordsResult.lowWatermarks()); } final Map<TopicPartition, RecordsToDelete> recordsToDelete = new HashMap<>(); for (final Map.Entry<TopicPartition, Long> entry : active.recordsToDelete().entrySet()) { recordsToDelete.put(entry.getKey(), RecordsToDelete.beforeOffset(entry.getValue())); } deleteRecordsResult = adminClient.deleteRecords(recordsToDelete); log.trace("Sent delete-records request: {}", recordsToDelete); } }
@Override public boolean conditionMet() { return result.listings().isDone(); } }, "Timed out waiting for listTopics to complete");
/** * Return a future which succeeds if log directory information of all replicas are available */ public KafkaFuture<Map<TopicPartitionReplica, ReplicaLogDirInfo>> all() { return KafkaFuture.allOf(futures.values().toArray(new KafkaFuture[0])). thenApply(new KafkaFuture.BaseFunction<Void, Map<TopicPartitionReplica, ReplicaLogDirInfo>>() { @Override public Map<TopicPartitionReplica, ReplicaLogDirInfo> apply(Void v) { Map<TopicPartitionReplica, ReplicaLogDirInfo> replicaLogDirInfos = new HashMap<>(); for (Map.Entry<TopicPartitionReplica, KafkaFuture<ReplicaLogDirInfo>> entry : futures.entrySet()) { try { replicaLogDirInfos.put(entry.getKey(), entry.getValue().get()); } catch (InterruptedException | ExecutionException e) { // This should be unreachable, because allOf ensured that all the futures completed successfully. throw new RuntimeException(e); } } return replicaLogDirInfos; } }); }
/** * Return a future which succeeds only if all the consumer group deletions succeed. */ public KafkaFuture<Void> all() { return KafkaFuture.allOf(futures.values().toArray(new KafkaFuture[0])); } }
private Config getKafkaBrokerConfig(AdminClient admin) throws Exception { final Collection<Node> nodes = admin.describeCluster().nodes().get(KAFKA_QUERY_TIMEOUT.toMillis(), TimeUnit.MILLISECONDS); if (nodes.isEmpty()) { throw new ConnectException("No brokers available to obtain default settings"); } String nodeId = nodes.iterator().next().idString(); Set<ConfigResource> resources = Collections.singleton(new ConfigResource(ConfigResource.Type.BROKER, nodeId)); final Map<ConfigResource, Config> configs = admin.describeConfigs(resources).all().get( KAFKA_QUERY_TIMEOUT.toMillis(), TimeUnit.MILLISECONDS ); if (configs.isEmpty()) { throw new ConnectException("No configs have been received"); } return configs.values().iterator().next(); } }
/** * Return a future which succeeds only if all the topic descriptions succeed. */ public KafkaFuture<Map<String, TopicDescription>> all() { return KafkaFuture.allOf(futures.values().toArray(new KafkaFuture[0])). thenApply(new KafkaFuture.BaseFunction<Void, Map<String, TopicDescription>>() { @Override public Map<String, TopicDescription> apply(Void v) { Map<String, TopicDescription> descriptions = new HashMap<>(futures.size()); for (Map.Entry<String, KafkaFuture<TopicDescription>> entry : futures.entrySet()) { try { descriptions.put(entry.getKey(), entry.getValue().get()); } catch (InterruptedException | ExecutionException e) { // This should be unreachable, because allOf ensured that all the futures // completed successfully. throw new RuntimeException(e); } } return descriptions; } }); } }
/** * Return a future which succeeds if all the partition creations succeed. */ public KafkaFuture<Void> all() { return KafkaFuture.allOf(values.values().toArray(new KafkaFuture[0])); } }
private void tryDelete(AdminClient adminClient, String topic) throws Exception { try { adminClient.deleteTopics(Collections.singleton(topic)).all().get(DELETE_TIMEOUT_SECONDS, TimeUnit.SECONDS); } catch (TimeoutException e) { LOG.info("Did not receive delete topic response within %d seconds. Checking if it succeeded", DELETE_TIMEOUT_SECONDS); if (adminClient.listTopics().names().get(DELETE_TIMEOUT_SECONDS, TimeUnit.SECONDS).contains(topic)) { throw new Exception("Topic still exists after timeout"); } } }
/** * Return a future which succeeds only if all the brokers have responded without error */ public KafkaFuture<Map<Integer, Map<String, LogDirInfo>>> all() { return KafkaFuture.allOf(futures.values().toArray(new KafkaFuture[0])). thenApply(new KafkaFuture.BaseFunction<Void, Map<Integer, Map<String, LogDirInfo>>>() { @Override public Map<Integer, Map<String, LogDirInfo>> apply(Void v) { Map<Integer, Map<String, LogDirInfo>> descriptions = new HashMap<>(futures.size()); for (Map.Entry<Integer, KafkaFuture<Map<String, LogDirInfo>>> entry : futures.entrySet()) { try { descriptions.put(entry.getKey(), entry.getValue().get()); } catch (InterruptedException | ExecutionException e) { // This should be unreachable, because allOf ensured that all the futures completed successfully. throw new RuntimeException(e); } } return descriptions; } }); } }
/** * Return a future which succeeds if all the topic creations succeed. */ public KafkaFuture<Void> all() { return KafkaFuture.allOf(futures.values().toArray(new KafkaFuture[0])); } }
/** * Create a Kafka topic with the given parameters. * * @param topic The name of the topic. * @param partitions The number of partitions for this topic. * @param replication The replication factor for (partitions of) this topic. * @param topicConfig Additional topic-level configuration settings. */ void createTopic(final String topic, final int partitions, final int replication, final Map<String, String> topicConfig) { log.debug("Creating topic { name: {}, partitions: {}, replication: {}, config: {} }", topic, partitions, replication, topicConfig); final ImmutableMap<String, Object> props = ImmutableMap.of( AdminClientConfig.BOOTSTRAP_SERVERS_CONFIG, brokerList(), AdminClientConfig.RETRIES_CONFIG, 5); try (AdminClient adminClient = AdminClient.create(props)) { final NewTopic newTopic = new NewTopic(topic, partitions, (short) replication); newTopic.configs(topicConfig); try { final CreateTopicsResult result = adminClient.createTopics(ImmutableList.of(newTopic)); result.all().get(); } catch (final Exception e) { throw new RuntimeException("Failed to create topic:" + topic, e); } } }
/** * Return a future which yields all ConsumerGroupDescription objects, if all the describes succeed. */ public KafkaFuture<Map<String, ConsumerGroupDescription>> all() { return KafkaFuture.allOf(futures.values().toArray(new KafkaFuture[0])).thenApply( new KafkaFuture.BaseFunction<Void, Map<String, ConsumerGroupDescription>>() { @Override public Map<String, ConsumerGroupDescription> apply(Void v) { try { Map<String, ConsumerGroupDescription> descriptions = new HashMap<>(futures.size()); for (Map.Entry<String, KafkaFuture<ConsumerGroupDescription>> entry : futures.entrySet()) { descriptions.put(entry.getKey(), entry.getValue().get()); } return descriptions; } catch (InterruptedException | ExecutionException e) { // This should be unreachable, since the KafkaFuture#allOf already ensured // that all of the futures completed successfully. throw new RuntimeException(e); } } }); } }