/** * Return a future which succeeds if all the replica movement have succeeded */ public KafkaFuture<Void> all() { return KafkaFuture.allOf(futures.values().toArray(new KafkaFuture[0])); } }
/** * Return a future which succeeds only if all the ACL creations succeed. */ public KafkaFuture<Void> all() { return KafkaFuture.allOf(futures.values().toArray(new KafkaFuture[0])); } }
/** * Return a future which succeeds only if all the alter configs operations succeed. */ public KafkaFuture<Void> all() { return KafkaFuture.allOf(futures.values().toArray(new KafkaFuture[0])); }
/** * Return a future which succeeds only if all the records deletions succeed. */ public KafkaFuture<Void> all() { return KafkaFuture.allOf(futures.values().toArray(new KafkaFuture[0])); } }
/** * Return a future which succeeds only if all the consumer group deletions succeed. */ public KafkaFuture<Void> all() { return KafkaFuture.allOf(futures.values().toArray(new KafkaFuture[0])); } }
/** * Return a future which succeeds if all the partition creations succeed. */ public KafkaFuture<Void> all() { return KafkaFuture.allOf(values.values().toArray(new KafkaFuture[0])); } }
/** * Return a future which succeeds if all the topic creations succeed. */ public KafkaFuture<Void> all() { return KafkaFuture.allOf(futures.values().toArray(new KafkaFuture[0])); } }
/** * Return a future which succeeds only if all the topic deletions succeed. */ public KafkaFuture<Void> all() { return KafkaFuture.allOf(futures.values().toArray(new KafkaFuture[0])); } }
/** * Return a future which succeeds only if all the ACLs deletions succeed, and which contains all the deleted ACLs. * Note that it if the filters don't match any ACLs, this is not considered an error. */ public KafkaFuture<Collection<AclBinding>> all() { return KafkaFuture.allOf(futures.values().toArray(new KafkaFuture[0])).thenApply(v -> getAclBindings(futures)); }
/** * Return a future which succeeds if log directory information of all replicas are available */ public KafkaFuture<Map<TopicPartitionReplica, ReplicaLogDirInfo>> all() { return KafkaFuture.allOf(futures.values().toArray(new KafkaFuture[0])). thenApply(new KafkaFuture.BaseFunction<Void, Map<TopicPartitionReplica, ReplicaLogDirInfo>>() { @Override public Map<TopicPartitionReplica, ReplicaLogDirInfo> apply(Void v) { Map<TopicPartitionReplica, ReplicaLogDirInfo> replicaLogDirInfos = new HashMap<>(); for (Map.Entry<TopicPartitionReplica, KafkaFuture<ReplicaLogDirInfo>> entry : futures.entrySet()) { try { replicaLogDirInfos.put(entry.getKey(), entry.getValue().get()); } catch (InterruptedException | ExecutionException e) { // This should be unreachable, because allOf ensured that all the futures completed successfully. throw new RuntimeException(e); } } return replicaLogDirInfos; } }); }
/** * Return a future which succeeds only if all the topic descriptions succeed. */ public KafkaFuture<Map<String, TopicDescription>> all() { return KafkaFuture.allOf(futures.values().toArray(new KafkaFuture[0])). thenApply(new KafkaFuture.BaseFunction<Void, Map<String, TopicDescription>>() { @Override public Map<String, TopicDescription> apply(Void v) { Map<String, TopicDescription> descriptions = new HashMap<>(futures.size()); for (Map.Entry<String, KafkaFuture<TopicDescription>> entry : futures.entrySet()) { try { descriptions.put(entry.getKey(), entry.getValue().get()); } catch (InterruptedException | ExecutionException e) { // This should be unreachable, because allOf ensured that all the futures // completed successfully. throw new RuntimeException(e); } } return descriptions; } }); } }
/** * Return a future which yields all ConsumerGroupDescription objects, if all the describes succeed. */ public KafkaFuture<Map<String, ConsumerGroupDescription>> all() { return KafkaFuture.allOf(futures.values().toArray(new KafkaFuture[0])).thenApply( new KafkaFuture.BaseFunction<Void, Map<String, ConsumerGroupDescription>>() { @Override public Map<String, ConsumerGroupDescription> apply(Void v) { try { Map<String, ConsumerGroupDescription> descriptions = new HashMap<>(futures.size()); for (Map.Entry<String, KafkaFuture<ConsumerGroupDescription>> entry : futures.entrySet()) { descriptions.put(entry.getKey(), entry.getValue().get()); } return descriptions; } catch (InterruptedException | ExecutionException e) { // This should be unreachable, since the KafkaFuture#allOf already ensured // that all of the futures completed successfully. throw new RuntimeException(e); } } }); } }
/** * Return a future which succeeds only if all the brokers have responded without error */ public KafkaFuture<Map<Integer, Map<String, LogDirInfo>>> all() { return KafkaFuture.allOf(futures.values().toArray(new KafkaFuture[0])). thenApply(new KafkaFuture.BaseFunction<Void, Map<Integer, Map<String, LogDirInfo>>>() { @Override public Map<Integer, Map<String, LogDirInfo>> apply(Void v) { Map<Integer, Map<String, LogDirInfo>> descriptions = new HashMap<>(futures.size()); for (Map.Entry<Integer, KafkaFuture<Map<String, LogDirInfo>>> entry : futures.entrySet()) { try { descriptions.put(entry.getKey(), entry.getValue().get()); } catch (InterruptedException | ExecutionException e) { // This should be unreachable, because allOf ensured that all the futures completed successfully. throw new RuntimeException(e); } } return descriptions; } }); } }
/** * Return a future which succeeds only if all the config descriptions succeed. */ public KafkaFuture<Map<ConfigResource, Config>> all() { return KafkaFuture.allOf(futures.values().toArray(new KafkaFuture[0])). thenApply(new KafkaFuture.BaseFunction<Void, Map<ConfigResource, Config>>() { @Override public Map<ConfigResource, Config> apply(Void v) { Map<ConfigResource, Config> configs = new HashMap<>(futures.size()); for (Map.Entry<ConfigResource, KafkaFuture<Config>> entry : futures.entrySet()) { try { configs.put(entry.getKey(), entry.getValue().get()); } catch (InterruptedException | ExecutionException e) { // This should be unreachable, because allOf ensured that all the futures // completed successfully. throw new RuntimeException(e); } } return configs; } }); } }
futures.add(new KafkaFutureImpl<>()); KafkaFuture<Void> allFuture = KafkaFuture.allOf(futures.toArray(new KafkaFuture[0])); final List<CompleterThread> completerThreads = new ArrayList<>(); final List<WaiterThread> waiterThreads = new ArrayList<>();
@Test public void testAllOfFuturesHandlesZeroFutures() throws Exception { KafkaFuture<Void> allFuture = KafkaFuture.allOf(); assertTrue(allFuture.isDone()); assertFalse(allFuture.isCancelled()); assertFalse(allFuture.isCompletedExceptionally()); allFuture.get(); }
public Set<String> topics() { KafkaFuture<Set<String>> names = adminClient.listTopics().names(); try { KafkaFuture.allOf(names).get(10, TimeUnit.SECONDS); Set<String> topics = names.get(); log.debug("Got topics: {}", topics); return topics; } catch (InterruptedException | ExecutionException | TimeoutException e) { throw new IllegalStateException("Exception occured during topic retrieval.", e); } }