CompletableFuture<List<TableEntry>> getResultFutures() { return Futures.allOfWithResults(this.resultFutures); } }
public CompletableFuture<Map<Long, Long>> getSealedSegmentsSize(String scope, String stream, List<Long> sealedSegments, String delegationToken) { return Futures.allOfWithResults( sealedSegments .stream() .parallel() .collect(Collectors.toMap(x -> x, x -> getSegmentOffset(scope, stream, x, delegationToken)))); }
private CompletableFuture<Void> clearMarkers(final Set<Long> segments) { return Futures.toVoid(Futures.allOfWithResults(segments.stream().parallel() .map(this::removeColdMarker).collect(Collectors.toList()))); }
@Override public CompletableFuture<Map<String, Data>> getCurrentTxns() { return store.getChildren(activeTxRoot) .thenCompose(children -> { return Futures.allOfWithResults(children.stream().map(x -> getTxnInEpoch(Integer.parseInt(x))).collect(Collectors.toList())) .thenApply(list -> { Map<String, Data> map = new HashMap<>(); list.forEach(map::putAll); return map; }); }); }
@Override public CompletableFuture<Integer> getNumberOfOngoingTransactions() { return store.getChildren(activeTxRoot).thenCompose(list -> Futures.allOfWithResults(list.stream().map(epoch -> getNumberOfOngoingTransactions(Integer.parseInt(epoch))).collect(Collectors.toList()))) .thenApply(list -> list.stream().reduce(0, Integer::sum)); }
@Override public CompletableFuture<WriterFlushResult> flush(Duration timeout) { return Futures.allOfWithResults(this.processors.stream().map(wsp -> wsp.flush(timeout)).collect(Collectors.toList())) .thenApply(results -> { WriterFlushResult r = results.get(0); for (int i = 1; i < results.size(); i++) { r.withFlushResult(results.get(i)); } return r; }); }
/** * Fetches epoch history records for active epoch and the supplied `epoch` from the store. */ private CompletableFuture<List<EpochRecord>> getEpochRecords(String scope, String stream, int epoch, OperationContext context) { List<CompletableFuture<EpochRecord>> list = new ArrayList<>(); list.add(streamMetadataStore.getEpoch(scope, stream, epoch, context, executor)); list.add(streamMetadataStore.getActiveEpoch(scope, stream, context, true, executor)); return Futures.allOfWithResults(list); }
public CompletableFuture<Void> notifyPolicyUpdates(String scope, String stream, List<Segment> activeSegments, ScalingPolicy policy, String delegationToken, long requestId) { return Futures.toVoid(Futures.allOfWithResults(activeSegments .stream() .parallel() .map(segment -> notifyPolicyUpdate(scope, stream, policy, segment.segmentId(), delegationToken, requestId)) .collect(Collectors.toList()))); }
public CompletableFuture<Void> notifyNewSegments(String scope, String stream, StreamConfiguration configuration, List<Long> segmentIds, String controllerToken, long requestId) { return Futures.toVoid(Futures.allOfWithResults(segmentIds .stream() .parallel() .map(segment -> notifyNewSegment(scope, stream, segment, configuration.getScalingPolicy(), controllerToken, requestId)) .collect(Collectors.toList()))); }
@VisibleForTesting CompletableFuture<List<EpochRecord>> fetchEpochs(int fromEpoch, int toEpoch, boolean ignoreCache) { // fetch history time series chunk corresponding to from. // read entries till either last entry or till to // if to is not in this chunk fetch the next chunk and read till to // keep doing this until all records till to have been read. // keep computing history record from history time series by applying delta on previous. return getActiveEpochRecord(ignoreCache) .thenApply(currentEpoch -> currentEpoch.getEpoch() / historyChunkSize.get()) .thenCompose(latestChunkNumber -> Futures.allOfWithResults( IntStream.range(fromEpoch / historyChunkSize.get(), toEpoch / historyChunkSize.get() + 1) .mapToObj(i -> { int firstEpoch = i * historyChunkSize.get() > fromEpoch ? i * historyChunkSize.get() : fromEpoch; boolean ignoreCached = i >= latestChunkNumber; return getEpochsFromHistoryChunk(i, firstEpoch, toEpoch, ignoreCached); }).collect(Collectors.toList()))) .thenApply(c -> c.stream().flatMap(Collection::stream).collect(Collectors.toList())); }
public static Map<Segment, Long> getSegmentsForStreams(Controller controller, ReaderGroupConfig config) { Map<Stream, StreamCut> streamToStreamCuts = config.getStartingStreamCuts(); final List<CompletableFuture<Map<Segment, Long>>> futures = new ArrayList<>(streamToStreamCuts.size()); streamToStreamCuts.entrySet().forEach(e -> { if (e.getValue().equals(StreamCut.UNBOUNDED)) { futures.add(controller.getSegmentsAtTime(e.getKey(), 0L)); } else { futures.add(CompletableFuture.completedFuture(e.getValue().asImpl().getPositions())); } }); return getAndHandleExceptions(allOfWithResults(futures).thenApply(listOfMaps -> { return listOfMaps.stream() .flatMap(map -> map.entrySet().stream()) .collect(Collectors.toMap(e -> e.getKey(), e -> e.getValue())); }), InvalidStreamException::new); }
@Override public CompletableFuture<Map<String, Data>> getTxnInEpoch(int epoch) { return Futures.exceptionallyExpecting(store.getChildren(getEpochPath(epoch)), e -> Exceptions.unwrap(e) instanceof StoreException.DataNotFoundException, Collections.emptyList()) .thenCompose(txIds -> Futures.allOfWithResults(txIds.stream().collect( Collectors.toMap(txId -> txId, txId -> Futures.exceptionallyExpecting(store.getData(getActiveTxPath(epoch, txId)), e -> Exceptions.unwrap(e) instanceof StoreException.DataNotFoundException, EMPTY_DATA))) ).thenApply(txnMap -> txnMap.entrySet().stream().filter(x -> !x.getValue().equals(EMPTY_DATA)) .collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue))) ); }
@Override public CompletableFuture<Boolean> isStreamCutValid(Map<Long, Long> streamCut) { return Futures.allOfWithResults(streamCut.keySet().stream().map(x -> getSegment(x).thenApply(segment -> new SimpleEntry<>(segment.getKeyStart(), segment.getKeyEnd()))) .collect(Collectors.toList())) .thenAccept(x -> RecordHelper.validateStreamCut(new ArrayList<>(x))) .handle((r, e) -> { if (e != null) { if (Exceptions.unwrap(e) instanceof IllegalArgumentException) { return false; } else { log.warn("Exception while trying to validate a stream cut for stream {}/{}", scope, name); throw Exceptions.sneakyThrow(e); } } else { return true; } }); }
/** * List the streams in scope. * * @param scopeName Name of scope * @return A map of streams in scope to their configs. */ @Override public CompletableFuture<Map<String, StreamConfiguration>> listStreamsInScope(final String scopeName) { return getScope(scopeName).listStreamsInScope().thenCompose(streams -> { HashMap<String, CompletableFuture<Optional<StreamConfiguration>>> result = new HashMap<>(); for (String s : streams) { Stream stream = getStream(scopeName, s, null); result.put(stream.getName(), Futures.exceptionallyExpecting(stream.getConfiguration(), e -> e instanceof StoreException.DataNotFoundException, null) .thenApply(Optional::ofNullable)); } return Futures.allOfWithResults(result) .thenApply(x -> { return x.entrySet().stream().filter(y -> y.getValue().isPresent()) .collect(Collectors.toMap(Map.Entry::getKey, entry -> entry.getValue().get())); }); }); }
private Map<String, List<Long>> executeUpdates(HashMap<String, ArrayList<TableEntry>> updates, TableStore tableStore) throws Exception { val updateResult = updates.entrySet().stream() .collect(Collectors.toMap(Map.Entry::getKey, e -> tableStore.put(e.getKey(), e.getValue(), TIMEOUT))); return Futures.allOfWithResults(updateResult).get(TIMEOUT.toMillis(), TimeUnit.MILLISECONDS); }
private CompletableFuture<List<EpochRecord>> getEpochsFromHistoryChunk(int chunk, int firstEpoch, int toEpoch, boolean ignoreCached) { return getEpochRecord(firstEpoch) .thenCompose(first -> getHistoryTimeSeriesChunk(chunk, ignoreCached) .thenCompose(x -> { List<CompletableFuture<EpochRecord>> identity = new ArrayList<>(); identity.add(CompletableFuture.completedFuture(first)); return Futures.allOfWithResults(x.getHistoryRecords().stream() .filter(r -> r.getEpoch() > firstEpoch && r.getEpoch() <= toEpoch) .reduce(identity, (r, s) -> { CompletableFuture<EpochRecord> next = newEpochRecord(r.get(r.size() - 1), s.getEpoch(), s.getReferenceEpoch(), s.getSegmentsCreated(), s.getSegmentsSealed().stream().map(StreamSegmentRecord::segmentId) .collect(Collectors.toList()), s.getScaleTime()); ArrayList<CompletableFuture<EpochRecord>> list = new ArrayList<>(r); list.add(next); return list; }, (r, s) -> { ArrayList<CompletableFuture<EpochRecord>> list = new ArrayList<>(r); list.addAll(s); return list; })); })); }
private void readEventsAndVerify(final String scope, int startInclusive, int endExclusive) { log.info("Read and Verify events between [{},{})", startInclusive, endExclusive); final List<CompletableFuture<List<EventRead<Integer>>>> readResults = new ArrayList<>(); //start reading using configured number of readers for (int i = 0; i < NUMBER_OF_READERS; i++) { readResults.add(asyncReadEvents(scope, "reader-" + i)); } //results from all readers List<List<EventRead<Integer>>> results = Futures.allOfWithResults(readResults).join(); List<EventRead<Integer>> eventsRead = results.stream().flatMap(List::stream).collect(Collectors.toList()); verifyEvents(eventsRead, startInclusive, endExclusive); }
/** * Flushes eligible operations to Storage, if necessary. Does not perform any mergers. */ private CompletableFuture<Void> flush(Void ignored) { checkRunning(); long traceId = LoggerHelpers.traceEnterWithContext(log, this.traceObjectId, "flush"); // Flush everything we can flush. val flushFutures = this.processors.values().stream() .filter(ProcessorCollection::mustFlush) .map(a -> a.flush(this.config.getFlushTimeout())) .collect(Collectors.toList()); return Futures .allOfWithResults(flushFutures) .thenAcceptAsync(flushResults -> { FlushStageResult result = new FlushStageResult(); flushResults.forEach(result::withFlushResult); if (result.getFlushedBytes() + result.getMergedBytes() + result.count > 0) { logStageEvent("Flush", result); } LoggerHelpers.traceLeave(log, this.traceObjectId, "flush", traceId); }, this.executor); }
protected TreeMap<LogAddress, byte[]> populate(DurableDataLog log, int writeCount) { TreeMap<LogAddress, byte[]> writtenData = new TreeMap<>(Comparator.comparingLong(LogAddress::getSequence)); val data = new ArrayList<byte[]>(); val futures = new ArrayList<CompletableFuture<LogAddress>>(); for (int i = 0; i < writeCount; i++) { byte[] writeData = getWriteData(); futures.add(log.append(new ByteArraySegment(writeData), TIMEOUT)); data.add(writeData); } val addresses = Futures.allOfWithResults(futures).join(); for (int i = 0; i < data.size(); i++) { writtenData.put(addresses.get(i), data.get(i)); } return writtenData; }
/** * Tests a scenario where all item processors finish immediately (they return a completed future). */ @Test public void testInstantCompletion() { final int itemCount = 10000; Supplier<Integer> nextIndex = new AtomicInteger()::incrementAndGet; Function<Integer, CompletableFuture<Integer>> itemProcessor = i -> CompletableFuture.completedFuture(nextIndex.get()); @Cleanup val p = new TestProcessor(CAPACITY, itemProcessor, executorService()); val resultFutures = new ArrayList<CompletableFuture<Integer>>(itemCount); for (int i = 0; i < itemCount; i++) { resultFutures.add(p.process(i)); } // Verify they have been executed in order. val results = Futures.allOfWithResults(resultFutures).join(); for (int i = 0; i < results.size(); i++) { Assert.assertEquals("Unexpected result at index " + i, i + 1, (int) results.get(i)); } }