/** * Updates the reader group data at specified path by applying the updater method on the existing data. * It repeatedly invokes conditional update on specified path until is succeeds or max attempts (10) are exhausted. * * @param path Reader group node path. * @param updater Function to obtain the new data value from existing data value. * @throws Exception Throws exception thrown from Curator, or from application of updater method. */ private void updateReaderGroupData(String path, Function<ReaderGroupData, ReaderGroupData> updater) throws Exception { final long initialMillis = 100L; final int multiplier = 2; final int attempts = 10; final long maxDelay = 2000; Stat stat = new Stat(); Retry.withExpBackoff(initialMillis, multiplier, attempts, maxDelay) .retryingOn(KeeperException.BadVersionException.class) .throwingOn(Exception.class) .run(() -> { byte[] data = client.getData().storingStatIn(stat).forPath(path); ReaderGroupData groupData = groupDataSerializer.deserialize(ByteBuffer.wrap(data)); groupData = updater.apply(groupData); byte[] newData = groupDataSerializer.serialize(groupData).array(); client.setData() .withVersion(stat.getVersion()) .forPath(path, newData); return null; }); }
.retryingOn(NotDoneException.class) .throwingOn(RuntimeException.class) .runAsync(() -> controller.getCurrentSegments("test", "test")
.retryingOn(NotDoneException.class) .throwingOn(RuntimeException.class) .runAsync(() -> controller.getCurrentSegments("test", "test")
this.retryConfig = Retry.withExpBackoff(config.getInitialBackoffMillis(), config.getBackoffMultiple(), config.getRetryAttempts(), config.getMaxBackoffMillis()) .retryingOn(StatusRuntimeException.class) .throwingOn(Exception.class);
.retryingOn(StoreException.DataNotFoundException.class) .throwingOn(IllegalStateException.class) .run(() -> { .retryingOn(IllegalStateException.class) .throwingOn(RuntimeException.class) .run(() -> {
private CompletableFuture<Void> retryFutureInExecutor(final long delay, final int multiplier, final int attempts, final long maxDelay, final boolean success, final ScheduledExecutorService executorService) { loopCounter.set(0); accumulator.set(0); return Retry.withExpBackoff(delay, multiplier, attempts, maxDelay) .retryingOn(RetryableException.class) .throwingOn(NonretryableException.class) .runInExecutor(() -> { accumulator.getAndAdd(loopCounter.getAndIncrement()); int i = loopCounter.get(); log.debug("Loop counter = " + i); if (i % 10 == 0) { if (success) { log.debug("result = ", accumulator.get()); return; } else { throw new NonretryableException(); } } else { throw new RetryableException(); } }, executorService); }
@Override public boolean write(ByteBuffer data, long expectedOffset) throws SegmentSealedException { synchronized (lock) { //Used to preserver order. long appendSequence = requestIdGenerator.get(); return retrySchedule.retryingOn(ConnectionFailedException.class) .throwingOn(SegmentSealedException.class) .run(() -> { if (client == null || client.isClosed()) { client = new RawClient(controller, connectionFactory, segmentId); long requestId = requestIdGenerator.get(); log.debug("Setting up append on segment: {}", segmentId); SetupAppend setup = new SetupAppend(requestId, writerId, segmentId.getScopedName(), delegationToken); val reply = client.sendRequest(requestId, setup); AppendSetup appendSetup = transformAppendSetup(reply.join()); if (appendSetup.getLastEventNumber() >= appendSequence) { return true; } } val request = new ConditionalAppend(writerId, appendSequence, expectedOffset, new Event(Unpooled.wrappedBuffer(data))); val reply = client.sendRequest(appendSequence, request); return transformDataAppended(reply.join()); }); } }
/** * Invoke the scale up Test with transactional writes. Produce traffic from multiple writers in parallel. Each * writer writes using transactions. The test will periodically check if a scale event has occurred by talking to * controller via controller client. * * @throws InterruptedException if interrupted * @throws URISyntaxException If URI is invalid */ private CompletableFuture<Void> scaleUpTxnTest() { ControllerImpl controller = getController(); final AtomicBoolean exit = new AtomicBoolean(false); ClientFactoryImpl clientFactory = getClientFactory(); startWritingIntoTxn(clientFactory.createTransactionalEventWriter(SCALE_UP_TXN_STREAM_NAME, new JavaSerializer<>(), EventWriterConfig.builder().build()), exit); // overall wait for test to complete in 260 seconds (4.2 minutes) or scale up, whichever happens first. return Retry.withExpBackoff(10, 10, 30, Duration.ofSeconds(10).toMillis()) .retryingOn(ScaleOperationNotDoneException.class) .throwingOn(RuntimeException.class) .runAsync(() -> controller.getCurrentSegments(SCOPE, SCALE_UP_TXN_STREAM_NAME) .thenAccept(x -> { if (x.getSegments().size() == 1) { throw new ScaleOperationNotDoneException(); } else { log.info("txn test scale up done successfully"); exit.set(true); } }), scaleExecutorService); } }
/** * Invoke the simple scale up Test, produce traffic from multiple writers in parallel. * The test will periodically check if a scale event has occurred by talking to controller via * controller client. * * @throws InterruptedException if interrupted * @throws URISyntaxException If URI is invalid */ private CompletableFuture<Void> scaleUpTest() { ClientFactoryImpl clientFactory = getClientFactory(); ControllerImpl controller = getController(); final AtomicBoolean exit = new AtomicBoolean(false); createWriters(clientFactory, 6, SCOPE, SCALE_UP_STREAM_NAME); // overall wait for test to complete in 260 seconds (4.2 minutes) or scale up, whichever happens first. return Retry.withExpBackoff(10, 10, 30, Duration.ofSeconds(10).toMillis()) .retryingOn(ScaleOperationNotDoneException.class) .throwingOn(RuntimeException.class) .runAsync(() -> controller.getCurrentSegments(SCOPE, SCALE_UP_STREAM_NAME) .thenAccept(x -> { log.debug("size ==" + x.getSegments().size()); if (x.getSegments().size() == 1) { throw new ScaleOperationNotDoneException(); } else { log.info("scale up done successfully"); exit.set(true); } }), scaleExecutorService); }
private int retry(long delay, int multiplier, int attempts, long maxDelay, boolean success) { loopCounter.set(0); accumulator.set(0); return Retry.withExpBackoff(delay, multiplier, attempts, maxDelay) .retryingOn(RetryableException.class) .throwingOn(NonretryableException.class) .run(() -> { accumulator.getAndAdd(loopCounter.getAndIncrement()); int i = loopCounter.get(); log.debug("Loop counter = " + i); if (i % 10 == 0) { if (success) { return accumulator.get(); } else { throw new NonretryableException(); } } else { throw new RetryableException(); } }); }
/** * Invoke the simple scale down Test, produce no into a stream. * The test will periodically check if a scale event has occurred by talking to controller via * controller client. * * @throws InterruptedException if interrupted * @throws URISyntaxException If URI is invalid */ private CompletableFuture<Void> scaleDownTest() { final ControllerImpl controller = getController(); // overall wait for test to complete in 260 seconds (4.2 minutes) or scale down, whichever happens first. return Retry.withExpBackoff(10, 10, 30, Duration.ofSeconds(10).toMillis()) .retryingOn(ScaleOperationNotDoneException.class) .throwingOn(RuntimeException.class) .runAsync(() -> controller.getCurrentSegments(SCOPE, SCALE_DOWN_STREAM_NAME) .thenAccept(x -> { if (x.getSegments().size() == 2) { throw new ScaleOperationNotDoneException(); } else { log.info("scale down done successfully"); } }), scaleExecutorService); }
private CompletableFuture<Void> processEvent(WriterMock requestEventWriter) throws InterruptedException { return Retry.withExpBackoff(100, 10, 5, 1000) .retryingOn(TaskExceptions.StartException.class) .throwingOn(RuntimeException.class) .runAsync(() -> { ControllerEvent event; try { event = requestEventWriter.getEventQueue().take(); } catch (InterruptedException e) { throw new RuntimeException(e); } return streamRequestHandler.processEvent(event) .exceptionally(e -> { requestEventWriter.getEventQueue().add(event); throw new CompletionException(e); }); }, executor); }
private boolean createScopeWithSimpleRetry(String scopeName, URI controllerURI) throws ExecutionException, InterruptedException { // Need to retry since there is a delay for the mesos DNS name to resolve correctly. @Cleanup final ControllerImpl controllerClient = new ControllerImpl(ControllerImplConfig.builder() .clientConfig(ClientConfig.builder() .controllerURI(controllerURI) .build()) .build(), executorService); CompletableFuture<Boolean> retryResult = Retry.withExpBackoff(500, 2, 10, 5000) .retryingOn(Exception.class) .throwingOn(IllegalArgumentException.class) .runAsync(() -> controllerClient.createScope(scopeName), executorService); return retryResult.get(); }
private CompletableFuture<Integer> retryFuture(final long delay, final int multiplier, final int attempts, final long maxDelay, final boolean success, final ScheduledExecutorService executorService) { loopCounter.set(0); accumulator.set(0); return Retry.withExpBackoff(delay, multiplier, attempts, maxDelay) .retryingOn(RetryableException.class) .throwingOn(NonretryableException.class) .runAsync(() -> futureComputation(success, executorService), executorService); }
@Override public long fetchProperty(SegmentAttribute attribute) { Exceptions.checkNotClosed(closed.get(), this); val future = RETRY_SCHEDULE.retryingOn(ConnectionFailedException.class) .throwingOn(NoSuchSegmentException.class) .runAsync(() -> getPropertyAsync(attribute.getValue(), delegationToken), connectionFactory.getInternalExecutor()); return Futures.getThrowingException(future).getValue(); }
@Override public boolean compareAndSetAttribute(SegmentAttribute attribute, long expectedValue, long newValue) { Exceptions.checkNotClosed(closed.get(), this); val future = RETRY_SCHEDULE.retryingOn(ConnectionFailedException.class) .throwingOn(NoSuchSegmentException.class) .runAsync(() -> updatePropertyAsync(attribute.getValue(), expectedValue, newValue, delegationToken), connectionFactory.getInternalExecutor()); return Futures.getThrowingException(future).isSuccess(); }
@Override public long fetchCurrentSegmentLength() { Exceptions.checkNotClosed(closed.get(), this); val future = RETRY_SCHEDULE.retryingOn(ConnectionFailedException.class) .throwingOn(NoSuchSegmentException.class) .runAsync(() -> getStreamSegmentInfo(delegationToken), connectionFactory.getInternalExecutor()); return Futures.getThrowingException(future).getWriteOffset(); }
@Override public void sealSegment() { val future = RETRY_SCHEDULE.retryingOn(ConnectionFailedException.class) .throwingOn(NoSuchSegmentException.class) .runAsync(() -> sealSegmentAsync(segmentId, delegationToken), connectionFactory.getInternalExecutor()); future.join(); }
@Override public void truncateSegment(long offset) { val future = RETRY_SCHEDULE.retryingOn(ConnectionFailedException.class) .throwingOn(NoSuchSegmentException.class) .runAsync(() -> truncateSegmentAsync(segmentId, offset, delegationToken), connectionFactory.getInternalExecutor()); future.join(); }
@Override public SegmentInfo getSegmentInfo() { val future = RETRY_SCHEDULE.retryingOn(ConnectionFailedException.class) .throwingOn(NoSuchSegmentException.class) .runAsync(() -> getStreamSegmentInfo(delegationToken), connectionFactory.getInternalExecutor()); StreamSegmentInfo info = Futures.getThrowingException(future); return new SegmentInfo(segmentId, info.getStartOffset(), info.getWriteOffset(), info.isSealed(), info.getLastModified()); }