public static RetryWithBackoff withExpBackoff(long initialMillis, int multiplier, int attempts) { return withExpBackoff(initialMillis, multiplier, attempts, Long.MAX_VALUE); }
private Retry.RetryAndThrowBase<? extends Exception> createRetryPolicy(int maxWriteAttempts, int writeTimeout) { int initialDelay = writeTimeout / maxWriteAttempts; int maxDelay = writeTimeout * maxWriteAttempts; return Retry.withExpBackoff(initialDelay, 2, maxWriteAttempts, maxDelay) .retryWhen(ex -> true); // Retry for every exception. }
public static <U> U withRetries(Supplier<U> supplier, Predicate<Throwable> predicate, int numOfTries) { return Retry.withExpBackoff(100, 2, numOfTries, 1000) .retryWhen(predicate) .run(supplier::get); }
public static <U> CompletableFuture<U> withRetriesAsync(Supplier<CompletableFuture<U>> futureSupplier, Predicate<Throwable> predicate, int numOfTries, ScheduledExecutorService executor) { return Retry .withExpBackoff(100, 2, numOfTries, 10000) .retryWhen(predicate) .runAsync(futureSupplier, executor); }
private RetryWithBackoff getRetryFromConfig(EventWriterConfig config) { return Retry.withExpBackoff(config.getInitalBackoffMillis(), config.getBackoffMultiple(), config.getRetryAttempts(), config.getMaxBackoffMillis()); }
private RetryWithBackoff getRetryFromConfig(EventWriterConfig config) { return Retry.withExpBackoff(config.getInitalBackoffMillis(), config.getBackoffMultiple(), config.getRetryAttempts(), config.getMaxBackoffMillis()); } }
@Test public void retryPredicateTest() { AtomicInteger i = new AtomicInteger(0); try { Retry.withExpBackoff(10, 10, 10) .retryWhen(e -> i.getAndIncrement() != 1) .run(() -> { throw new Exception("test"); }); } catch (Exception e) { assert i.get() == 2; } }
/** * Updates the reader group data at specified path by applying the updater method on the existing data. * It repeatedly invokes conditional update on specified path until is succeeds or max attempts (10) are exhausted. * * @param path Reader group node path. * @param updater Function to obtain the new data value from existing data value. * @throws Exception Throws exception thrown from Curator, or from application of updater method. */ private void updateReaderGroupData(String path, Function<ReaderGroupData, ReaderGroupData> updater) throws Exception { final long initialMillis = 100L; final int multiplier = 2; final int attempts = 10; final long maxDelay = 2000; Stat stat = new Stat(); Retry.withExpBackoff(initialMillis, multiplier, attempts, maxDelay) .retryingOn(KeeperException.BadVersionException.class) .throwingOn(Exception.class) .run(() -> { byte[] data = client.getData().storingStatIn(stat).forPath(path); ReaderGroupData groupData = groupDataSerializer.deserialize(ByteBuffer.wrap(data)); groupData = updater.apply(groupData); byte[] newData = groupDataSerializer.serialize(groupData).array(); client.setData() .withVersion(stat.getVersion()) .forPath(path, newData); return null; }); }
private CompletableFuture<Integer> retryFuture(final long delay, final int multiplier, final int attempts, final long maxDelay, final boolean success, final ScheduledExecutorService executorService) { loopCounter.set(0); accumulator.set(0); return Retry.withExpBackoff(delay, multiplier, attempts, maxDelay) .retryingOn(RetryableException.class) .throwingOn(NonretryableException.class) .runAsync(() -> futureComputation(success, executorService), executorService); }
private int retry(long delay, int multiplier, int attempts, long maxDelay, boolean success) { loopCounter.set(0); accumulator.set(0); return Retry.withExpBackoff(delay, multiplier, attempts, maxDelay) .retryingOn(RetryableException.class) .throwingOn(NonretryableException.class) .run(() -> { accumulator.getAndAdd(loopCounter.getAndIncrement()); int i = loopCounter.get(); log.debug("Loop counter = " + i); if (i % 10 == 0) { if (success) { return accumulator.get(); } else { throw new NonretryableException(); } } else { throw new RetryableException(); } }); }
private CompletableFuture<Void> retryFutureInExecutor(final long delay, final int multiplier, final int attempts, final long maxDelay, final boolean success, final ScheduledExecutorService executorService) { loopCounter.set(0); accumulator.set(0); return Retry.withExpBackoff(delay, multiplier, attempts, maxDelay) .retryingOn(RetryableException.class) .throwingOn(NonretryableException.class) .runInExecutor(() -> { accumulator.getAndAdd(loopCounter.getAndIncrement()); int i = loopCounter.get(); log.debug("Loop counter = " + i); if (i % 10 == 0) { if (success) { log.debug("result = ", accumulator.get()); return; } else { throw new NonretryableException(); } } else { throw new RetryableException(); } }, executorService); }
val wasInvoked = new Semaphore(0); val waitOn = new CompletableFuture<Void>(); val retry = Retry.withExpBackoff(1, 2, 3) .retryWhen(t -> true); val error = new AtomicReference<Throwable>();
/** * Executes the given request on the given FencingTextContext.. We retry all expected exceptions, and when we do, we * make sure to execute them on the current (active) Segment Store instance (since the previous one may be unusable). */ private CompletableFuture<Void> executeWithFencing(StoreRequest request, int index, FencingTestContext context) { log.debug("Initiating Operation #{} on iteration {}.", index, context.getIteration()); AtomicReference<StreamSegmentStore> requestStore = new AtomicReference<>(context.getActiveStore()); return Retry.withExpBackoff(50, 2, 10, TIMEOUT.toMillis() / 10) .retryWhen(ex -> { requestStore.getAndSet(context.getActiveStore()); ex = Exceptions.unwrap(ex); log.info("Operation #{} (Iteration = {}) failed due to {}.", index, context.getIteration(), ex.toString()); return isExpectedFencingException(ex); }) .runAsync(() -> request.apply(requestStore.get()), executorService()); }
private void checkReads(HashMap<String, ByteArrayOutputStream> segmentContents, StreamSegmentStore store) { for (Map.Entry<String, ByteArrayOutputStream> e : segmentContents.entrySet()) { String segmentName = e.getKey(); byte[] expectedData = e.getValue().toByteArray(); long segmentLength = store.getStreamSegmentInfo(segmentName, TIMEOUT).join().getLength(); Assert.assertEquals("Unexpected Read Index length for segment " + segmentName, expectedData.length, segmentLength); AtomicLong expectedCurrentOffset = new AtomicLong(0); // We retry a number of times on StreamSegmentNotExists. It is possible that waitForSegmentsInStorage may have // returned successfully because it detected the Segment was complete there, but the internal callback to the // ReadIndex (completeMerge) may not yet have been executed. The ReadIndex has a mechanism to cope with this, // but it only retries once, after a fixed time interval, which is more than generous on any system. // However, on very slow systems, it is possible that that callback may take a significant amount of time to even // begin executing, hence the trying to read data that was merged from a Transaction may result in a spurious // StreamSegmentNotExistsException. // This is gracefully handled by retries in AppendProcessor and/or Client, but in this case, we simply have to // do the retries ourselves, hoping that the callback eventually executes. Retry.withExpBackoff(100, 2, 10, TIMEOUT.toMillis() / 5) .retryWhen(ex -> Exceptions.unwrap(ex) instanceof StreamSegmentNotExistsException) .run(() -> { checkSegmentReads(segmentName, expectedCurrentOffset, segmentLength, store, expectedData); return null; }); } }
@Override public CompletableFuture<Void> processEvent(TestBase event) { receivedForProcessing.add(event); CompletableFuture<Void> result = new CompletableFuture<>(); Retry.withExpBackoff(100, 1, 5, 100) .retryWhen(RetryableException::isRetryable) .runAsync(() -> event.process(null), executor) .whenCompleteAsync((r, e) -> { if (e != null) { Throwable cause = Exceptions.unwrap(e); if (cause instanceof OperationDisallowedException) { Retry.indefinitelyWithExpBackoff("Error writing event back into requeststream") .runAsync(() -> writer.write(event), executor) .thenAccept(v -> result.completeExceptionally(cause)); } else { result.completeExceptionally(cause); } } else { result.complete(r); } }, executor); return result; } }
/** * Invoke the simple scale down Test, produce no into a stream. * The test will periodically check if a scale event has occurred by talking to controller via * controller client. * * @throws InterruptedException if interrupted * @throws URISyntaxException If URI is invalid */ private CompletableFuture<Void> scaleDownTest() { final ControllerImpl controller = getController(); // overall wait for test to complete in 260 seconds (4.2 minutes) or scale down, whichever happens first. return Retry.withExpBackoff(10, 10, 30, Duration.ofSeconds(10).toMillis()) .retryingOn(ScaleOperationNotDoneException.class) .throwingOn(RuntimeException.class) .runAsync(() -> controller.getCurrentSegments(SCOPE, SCALE_DOWN_STREAM_NAME) .thenAccept(x -> { if (x.getSegments().size() == 2) { throw new ScaleOperationNotDoneException(); } else { log.info("scale down done successfully"); } }), scaleExecutorService); }
/** * Invoke the simple scale up Test, produce traffic from multiple writers in parallel. * The test will periodically check if a scale event has occurred by talking to controller via * controller client. * * @throws InterruptedException if interrupted * @throws URISyntaxException If URI is invalid */ private CompletableFuture<Void> scaleUpTest() { ClientFactoryImpl clientFactory = getClientFactory(); ControllerImpl controller = getController(); final AtomicBoolean exit = new AtomicBoolean(false); createWriters(clientFactory, 6, SCOPE, SCALE_UP_STREAM_NAME); // overall wait for test to complete in 260 seconds (4.2 minutes) or scale up, whichever happens first. return Retry.withExpBackoff(10, 10, 30, Duration.ofSeconds(10).toMillis()) .retryingOn(ScaleOperationNotDoneException.class) .throwingOn(RuntimeException.class) .runAsync(() -> controller.getCurrentSegments(SCOPE, SCALE_UP_STREAM_NAME) .thenAccept(x -> { log.debug("size ==" + x.getSegments().size()); if (x.getSegments().size() == 1) { throw new ScaleOperationNotDoneException(); } else { log.info("scale up done successfully"); exit.set(true); } }), scaleExecutorService); }
private CompletableFuture<Void> processEvent(WriterMock requestEventWriter) throws InterruptedException { return Retry.withExpBackoff(100, 10, 5, 1000) .retryingOn(TaskExceptions.StartException.class) .throwingOn(RuntimeException.class) .runAsync(() -> { ControllerEvent event; try { event = requestEventWriter.getEventQueue().take(); } catch (InterruptedException e) { throw new RuntimeException(e); } return streamRequestHandler.processEvent(event) .exceptionally(e -> { requestEventWriter.getEventQueue().add(event); throw new CompletionException(e); }); }, executor); }
/** * Invoke the scale up Test with transactional writes. Produce traffic from multiple writers in parallel. Each * writer writes using transactions. The test will periodically check if a scale event has occurred by talking to * controller via controller client. * * @throws InterruptedException if interrupted * @throws URISyntaxException If URI is invalid */ private CompletableFuture<Void> scaleUpTxnTest() { ControllerImpl controller = getController(); final AtomicBoolean exit = new AtomicBoolean(false); ClientFactoryImpl clientFactory = getClientFactory(); startWritingIntoTxn(clientFactory.createTransactionalEventWriter(SCALE_UP_TXN_STREAM_NAME, new JavaSerializer<>(), EventWriterConfig.builder().build()), exit); // overall wait for test to complete in 260 seconds (4.2 minutes) or scale up, whichever happens first. return Retry.withExpBackoff(10, 10, 30, Duration.ofSeconds(10).toMillis()) .retryingOn(ScaleOperationNotDoneException.class) .throwingOn(RuntimeException.class) .runAsync(() -> controller.getCurrentSegments(SCOPE, SCALE_UP_TXN_STREAM_NAME) .thenAccept(x -> { if (x.getSegments().size() == 1) { throw new ScaleOperationNotDoneException(); } else { log.info("txn test scale up done successfully"); exit.set(true); } }), scaleExecutorService); } }
private boolean createScopeWithSimpleRetry(String scopeName, URI controllerURI) throws ExecutionException, InterruptedException { // Need to retry since there is a delay for the mesos DNS name to resolve correctly. @Cleanup final ControllerImpl controllerClient = new ControllerImpl(ControllerImplConfig.builder() .clientConfig(ClientConfig.builder() .controllerURI(controllerURI) .build()) .build(), executorService); CompletableFuture<Boolean> retryResult = Retry.withExpBackoff(500, 2, 10, 5000) .retryingOn(Exception.class) .throwingOn(IllegalArgumentException.class) .runAsync(() -> controllerClient.createScope(scopeName), executorService); return retryResult.get(); }