/** * Attaches the given callback as an exception listener to the given CompletableFuture, which will be invoked when * the future times out (fails with a TimeoutException). * * @param future The future to attach to. * @param callback The callback to invoke. * @param <T> The Type of the future's result. */ public static <T> void onTimeout(CompletableFuture<T> future, Consumer<TimeoutException> callback) { exceptionListener(future, TimeoutException.class, callback); }
private void setOutcomeAfterSecondEntry() { CompletableFuture<ReadResultEntryContents> sourceFuture = this.secondEntry.getContent(); sourceFuture.thenAccept(this.result::complete); Futures.exceptionListener(sourceFuture, this.result::completeExceptionally); }
/** * Creates a new instance of the StorageReadManager.Request class. * * @param offset The offset to read at. * @param length The length of the read. * @param successCallback A Consumer that will be invoked in case of successful completion of this request. * @param failureCallback A Consumer that will be invoked in case this request failed to process. * @param timeout Timeout for the request. */ Request(long offset, int length, Consumer<Result> successCallback, Consumer<Throwable> failureCallback, Duration timeout) { Preconditions.checkArgument(offset >= 0, "offset must be a non-negative number."); Preconditions.checkArgument(length > 0, "length must be a positive integer."); this.offset = offset; this.length = length; this.timeout = timeout; this.resultFuture = new CompletableFuture<>(); this.resultFuture.thenAccept(successCallback); Futures.exceptionListener(this.resultFuture, failureCallback); }
/** * Given a Supplier returning a Future, completes another future either with the result of the first future, in case * of normal completion, or exceptionally with the exception of the first future. * * @param futureSupplier A Supplier returning a Future to listen to. * @param toComplete A CompletableFuture that has not yet been completed, which will be completed with the result * of the Future from futureSupplier. * @param <T> Return type of Future. */ public static <T> void completeAfter(Supplier<CompletableFuture<? extends T>> futureSupplier, CompletableFuture<T> toComplete) { Preconditions.checkArgument(!toComplete.isDone(), "toComplete is already completed."); try { CompletableFuture<? extends T> f = futureSupplier.get(); // Async termination. f.thenAccept(toComplete::complete); Futures.exceptionListener(f, toComplete::completeExceptionally); } catch (Throwable ex) { // Synchronous termination. toComplete.completeExceptionally(ex); throw ex; } }
Futures.exceptionListener(f, result::completeExceptionally); } else { result.completeExceptionally(ex);
/** * Registers the given request as a dependent of this request. If this Request succeeds, the given Request will * be completed as well (with the appropriate result). If this Request fails, the given Request will fail as well. * * @param request The request to add as a dependent. */ void addDependent(Request request) { Preconditions.checkArgument(isSubRequest(this, request), "Given Request does is not a sub-request of this one."); this.resultFuture.thenRun(() -> request.complete(this)); Futures.exceptionListener(this.resultFuture, request::fail); }
private CompletableFuture<List<EventRead<Integer>>> asyncReadEvents(final String scope, final String readerId) { CompletableFuture<List<EventRead<Integer>>> result = CompletableFuture.supplyAsync( () -> readEvents(scope, readerId), readerExecutor); Futures.exceptionListener(result, t -> log.error("Error observed while reading events for reader id :{}", readerId, t)); return result; }
public RawClient(Controller controller, ConnectionFactory connectionFactory, Segment segmentId) { this.segmentId = segmentId; this.connection = controller.getEndpointForSegment(segmentId.getScopedName()) .thenCompose((PravegaNodeUri uri) -> connectionFactory.establishConnection(uri, responseProcessor)); Futures.exceptionListener(connection, e -> closeConnection(e)); }
private void checkScaleStatus(CompletableFuture<Boolean> scaleStatus) { Futures.exceptionListener(scaleStatus, t -> log.error("Scale Operation completed with an error", t)); if (Futures.await(scaleStatus, SCALE_WAIT_SECONDS)) { log.info("Scale operation has completed: {}", scaleStatus.join()); if (!scaleStatus.join()) { log.error("Scale operation did not complete", scaleStatus.join()); Assert.fail("Scale operation did not complete successfully"); } } else { Assert.fail("Scale operation threw an exception"); } }
/** * Triggers a number of metadata cleanups by repeatedly appending to a random new segment until a cleanup task is detected. * * @param expectedSegmentNames The segments that we are expecting to evict. */ CompletableFuture<Void> triggerMetadataCleanup(Collection<String> expectedSegmentNames) { String tempSegmentName = getSegmentName(Long.hashCode(System.nanoTime())); HashSet<String> remainingSegments = new HashSet<>(expectedSegmentNames); CompletableFuture<Void> cleanupTask = Futures.futureWithTimeout(TIMEOUT, this.executor); // Inject this callback into the MetadataCleaner callback, which was setup for us in createMetadataCleaner(). this.metadataCleanupFinishedCallback = evictedSegmentNames -> { remainingSegments.removeAll(evictedSegmentNames); if (remainingSegments.size() == 0) { cleanupTask.complete(null); } }; CompletableFuture<Void> af = appendRandomly(tempSegmentName, true, () -> !cleanupTask.isDone()); Futures.exceptionListener(af, cleanupTask::completeExceptionally); return cleanupTask; }
/** * Tests the exceptionListener() method. */ @Test public void testExceptionListener() { AtomicReference<Throwable> thrownException = new AtomicReference<>(); CompletableFuture<Void> cf = new CompletableFuture<>(); Futures.exceptionListener(cf, thrownException::set); cf.complete(null); Assert.assertNull("exceptionListener invoked the callback when the future was completed normally.", thrownException.get()); thrownException.set(null); cf = new CompletableFuture<>(); Exception ex = new IntentionalException(); Futures.exceptionListener(cf, thrownException::set); cf.completeExceptionally(ex); Assert.assertNotNull("exceptionListener did not invoke the callback when the future was completed exceptionally.", thrownException.get()); Assert.assertEquals("Unexpected exception was passed to the callback from exceptionListener when the future was completed exceptionally.", ex, thrownException.get()); }
private void createWritersInternal(EventStreamClientFactory clientFactory, final int writers, String scope, String stream, CompletableFuture<Void> writersComplete) { testState.writersListComplete.add(writersComplete); log.info("Client factory details {}", clientFactory.toString()); log.info("Creating {} writers", writers); List<CompletableFuture<Void>> writerFutureList = new ArrayList<>(); log.info("Writers writing in the scope {}", scope); CompletableFuture.runAsync(() -> { for (int i = 0; i < writers; i++) { log.info("Starting writer{}", i); final EventStreamWriter<String> tmpWriter = instantiateWriter(clientFactory, stream); final CompletableFuture<Void> writerFuture = startWriting(tmpWriter); Futures.exceptionListener(writerFuture, t -> log.error("Error while writing events:", t)); writerFutureList.add(writerFuture); } }, executorService).thenRun(() -> { testState.writers.addAll(writerFutureList); Futures.completeAfter(() -> Futures.allOf(writerFutureList), writersComplete); Futures.exceptionListener(writersComplete, t -> log.error("Exception while waiting for writers to complete", t)); }); }
/** * Triggers at least one attribute cleanup for the given segment by continuously appending data (with no attributes) * for that segment until a cleanup is detected * * @param segmentName The segment we are trying to evict attributes for. */ CompletableFuture<Void> triggerAttributeCleanup(String segmentName) { CompletableFuture<Void> cleanupTask = Futures.futureWithTimeout(TIMEOUT, this.executor); SegmentMetadata sm = super.metadata.getStreamSegmentMetadata(super.metadata.getStreamSegmentId(segmentName, false)); // Inject this callback into the MetadataCleaner callback, which was setup for us in createMetadataCleaner(). this.metadataCleanupFinishedCallback = ignored -> { boolean onlyCoreAttributes = sm.getAttributes().keySet().stream().allMatch(Attributes::isCoreAttribute); if (onlyCoreAttributes) { cleanupTask.complete(null); } }; CompletableFuture<Void> af = appendRandomly(segmentName, false, () -> !cleanupTask.isDone()); Futures.exceptionListener(af, cleanupTask::completeExceptionally); return cleanupTask; }
try { CompletableFuture<Void> init = newAggregator.initialize(this.config.getFlushTimeout()); Futures.exceptionListener(init, ex -> newAggregator.close()); return init.thenApply(ignored -> { this.processors.put(streamSegmentId, pc);
/** * Triggers all the Future Reads in the given collection. * * @param futureReads The Future Reads to trigger. */ private void triggerFutureReads(Collection<FutureReadResultEntry> futureReads) { for (FutureReadResultEntry r : futureReads) { ReadResultEntry entry = getSingleReadResultEntry(r.getStreamSegmentOffset(), r.getRequestedReadLength()); assert entry != null : "Serving a StorageReadResultEntry with a null result"; assert !(entry instanceof FutureReadResultEntry) : "Serving a FutureReadResultEntry with another FutureReadResultEntry."; log.trace("{}: triggerFutureReads (Offset = {}, Type = {}).", this.traceObjectId, r.getStreamSegmentOffset(), entry.getType()); if (entry.getType() == ReadResultEntryType.EndOfStreamSegment) { // We have attempted to read beyond the end of the stream. Fail the read request with the appropriate message. r.fail(new StreamSegmentSealedException(String.format("StreamSegment has been sealed at offset %d. There can be no more reads beyond this offset.", this.metadata.getLength()))); } else { if (!entry.getContent().isDone()) { // Normally, all Future Reads are served from Cache, since they reflect data that has just been appended. // However, it's possible that after recovery, we get a read for some data that we do not have in the // cache (but it's not a tail read) - this data exists in Storage but our StorageLength has not yet been // updated. As such, the only solution we have is to return a FutureRead which will be satisfied when // the Writer updates the StorageLength (and trigger future reads). In that scenario, entry we get // will likely not be auto-fetched, so we need to request the content. entry.requestContent(this.config.getStorageReadDefaultTimeout()); } CompletableFuture<ReadResultEntryContents> entryContent = entry.getContent(); entryContent.thenAccept(r::complete); Futures.exceptionListener(entryContent, r::fail); } } }
/** * Attempts to map a Segment to an Id, by first trying to retrieve an existing id, and, should that not exist, * assign a new one. * * @param segmentName The name of the Segment to assign id for. * @param timeout Timeout for the operation. */ private void assignSegmentId(String segmentName, Duration timeout) { TimeoutTimer timer = new TimeoutTimer(timeout); Futures.exceptionListener( getSegmentInfoInternal(segmentName, timer.getRemaining()) .thenComposeAsync(si -> submitAssignmentWithRetry(SegmentInfo.deserialize(si), timer.getRemaining()), this.executor), ex -> failAssignment(segmentName, ex)); }
Futures.exceptionListener(appendFuture, successfulMap::completeExceptionally);
readerList.add(reader); final CompletableFuture<Void> readerFuture = startReading(reader); Futures.exceptionListener(readerFuture, t -> log.error("Error while reading events:", t)); readerFutureList.add(readerFuture); testState.readers.addAll(readerFutureList); Futures.completeAfter(() -> Futures.allOf(readerFutureList), testState.readersComplete); Futures.exceptionListener(testState.readersComplete, t -> log.error("Exception while waiting for all readers to complete", t)); });
private Map<Stream, StreamCut> generateStreamCuts(final ReaderGroup readerGroup) { log.info("Generate StreamCuts"); String readerId = "streamCut"; CompletableFuture<Map<io.pravega.client.stream.Stream, StreamCut>> streamCuts = null; try (EventStreamClientFactory clientFactory = EventStreamClientFactory.withScope(SCOPE_2, ClientConfig.builder().controllerURI(controllerURI).build()); EventStreamReader<Integer> reader = clientFactory.createReader(readerId, READER_GROUP_NAME, new JavaSerializer<Integer>(), readerConfig)) { streamCuts = readerGroup.generateStreamCuts(executor); //create checkpoint Exceptions.handleInterrupted(() -> TimeUnit.MILLISECONDS.sleep(GROUP_REFRESH_TIME_MILLIS)); // sleep for group refresh. //read the next event, this causes the reader to update its latest offset. EventRead<Integer> event = reader.readNextEvent(READ_TIMEOUT); assertTrue("No events expected as all events are read", (event.getEvent() == null) && (!event.isCheckpoint())); Futures.exceptionListener(streamCuts, t -> log.error("StreamCut generation failed", t)); assertTrue("Stream cut generation should be completed", Futures.await(streamCuts)); } catch (ReinitializationRequiredException e) { log.error("Exception while reading event using readerId: {}", readerId, e); fail("Reinitialization Exception is not expected"); } return streamCuts.join(); }
keyRanges, executorService).getFuture(); Futures.exceptionListener(scaleStatus, t -> log.error("Scale Operation completed with an error", t));