@Test public void retryPredicateTest() { AtomicInteger i = new AtomicInteger(0); try { Retry.withExpBackoff(10, 10, 10) .retryWhen(e -> i.getAndIncrement() != 1) .run(() -> { throw new Exception("test"); }); } catch (Exception e) { assert i.get() == 2; } }
/** * Creates a new Segment Store Instance, with retries. * Normally we have the Controller coordinating which instances are the rightful survivors, however in this case * we need to simulate some of this behavior ourselves, by being insistent. It is possible that previous instances * meddle with the BKLog ZK metadata during the new instance's initialization, causing the new instance to wrongfully * assume it's not the rightful survivor. A quick retry solves this problem, as there is no other kind of information * available to disambiguate this. */ void createNewInstance() { this.newInstanceRetry.run(() -> { int instanceId = getIteration() + 1; log.info("Starting Instance {}.", instanceId); ServiceBuilder b = createBuilder(instanceId); this.builders.add(b); this.activeStore.set(b.createStreamSegmentService()); this.iteration.incrementAndGet(); log.info("Instance {} Started.", instanceId); return null; }); } }
private void checkReads(HashMap<String, ByteArrayOutputStream> segmentContents, StreamSegmentStore store) { for (Map.Entry<String, ByteArrayOutputStream> e : segmentContents.entrySet()) { String segmentName = e.getKey(); byte[] expectedData = e.getValue().toByteArray(); long segmentLength = store.getStreamSegmentInfo(segmentName, TIMEOUT).join().getLength(); Assert.assertEquals("Unexpected Read Index length for segment " + segmentName, expectedData.length, segmentLength); AtomicLong expectedCurrentOffset = new AtomicLong(0); // We retry a number of times on StreamSegmentNotExists. It is possible that waitForSegmentsInStorage may have // returned successfully because it detected the Segment was complete there, but the internal callback to the // ReadIndex (completeMerge) may not yet have been executed. The ReadIndex has a mechanism to cope with this, // but it only retries once, after a fixed time interval, which is more than generous on any system. // However, on very slow systems, it is possible that that callback may take a significant amount of time to even // begin executing, hence the trying to read data that was merged from a Transaction may result in a spurious // StreamSegmentNotExistsException. // This is gracefully handled by retries in AppendProcessor and/or Client, but in this case, we simply have to // do the retries ourselves, hoping that the callback eventually executes. Retry.withExpBackoff(100, 2, 10, TIMEOUT.toMillis() / 5) .retryWhen(ex -> Exceptions.unwrap(ex) instanceof StreamSegmentNotExistsException) .run(() -> { checkSegmentReads(segmentName, expectedCurrentOffset, segmentLength, store, expectedData); return null; }); } }
Retry.withExpBackoff(500, 2, 10) .retryWhen(ex -> true) .run(() -> this.streamManager.get().createScope(SCOPE));
public static <U> U withRetries(Supplier<U> supplier, Predicate<Throwable> predicate, int numOfTries) { return Retry.withExpBackoff(100, 2, numOfTries, 1000) .retryWhen(predicate) .run(supplier::get); }