/** * Updates the reader group data at specified path by applying the updater method on the existing data. * It repeatedly invokes conditional update on specified path until is succeeds or max attempts (10) are exhausted. * * @param path Reader group node path. * @param updater Function to obtain the new data value from existing data value. * @throws Exception Throws exception thrown from Curator, or from application of updater method. */ private void updateReaderGroupData(String path, Function<ReaderGroupData, ReaderGroupData> updater) throws Exception { final long initialMillis = 100L; final int multiplier = 2; final int attempts = 10; final long maxDelay = 2000; Stat stat = new Stat(); Retry.withExpBackoff(initialMillis, multiplier, attempts, maxDelay) .retryingOn(KeeperException.BadVersionException.class) .throwingOn(Exception.class) .run(() -> { byte[] data = client.getData().storingStatIn(stat).forPath(path); ReaderGroupData groupData = groupDataSerializer.deserialize(ByteBuffer.wrap(data)); groupData = updater.apply(groupData); byte[] newData = groupDataSerializer.serialize(groupData).array(); client.setData() .withVersion(stat.getVersion()) .forPath(path, newData); return null; }); }
.retryingOn(NotDoneException.class) .throwingOn(RuntimeException.class) .runAsync(() -> controller.getCurrentSegments("test", "test") .thenAccept(streamSegments -> { if (streamSegments.getSegments().size() > 3) {
public <ThrowsT extends Exception> RetryAndThrowExceptionally<RetryT, ThrowsT> throwingOn(Class<ThrowsT> throwType) { Preconditions.checkNotNull(throwType); return new RetryAndThrowExceptionally<>(retryType, throwType, params); } }
.retryingOn(NotDoneException.class) .throwingOn(RuntimeException.class) .runAsync(() -> controller.getCurrentSegments("test", "test") .thenAccept(streamSegments -> { if (streamSegments.getSegments().stream().anyMatch(x -> StreamSegmentNameUtils.getEpoch(x.getSegmentId()) > 5)) {
.retryingOn(ScaleOperationNotDoneException.class) .throwingOn(RuntimeException.class) .runAsync(() -> controller.getCurrentSegments(SCOPE, STREAM_NAME) .thenAccept(x -> { int currentNumOfSegments = x.getSegments().size();
.retryingOn(NotDoneException.class) .throwingOn(RuntimeException.class) .runAsync(() -> controller.getCurrentSegments("test", "test") .thenAccept(streamSegments -> { if (streamSegments.getSegments().size() < 3) {
long traceId = LoggerHelpers.traceEnter(log, "createStream", streamConfig, requestId); final CompletableFuture<CreateStreamStatus> result = this.retryConfig.runAsync(() -> { RPCAsyncCallback<CreateStreamStatus> callback = new RPCAsyncCallback<>(requestId, "createStream"); new ControllerClientTagger(client).withTag(requestId, "createStream", scope, streamName)
.retryingOn(StoreException.DataNotFoundException.class) .throwingOn(IllegalStateException.class) .run(() -> { Futures.getAndHandleExceptions(streamStore.getConfiguration(SCOPE, stream1, null, executor), CompletionException::new); .retryingOn(IllegalStateException.class) .throwingOn(RuntimeException.class) .run(() -> { Futures.getAndHandleExceptions( streamStore.getConfiguration(SCOPE, stream1, null, executor)
long traceId = LoggerHelpers.traceEnter(log, "truncateStream", streamCut, requestId); final CompletableFuture<UpdateStreamStatus> result = this.retryConfig.runAsync(() -> { RPCAsyncCallback<UpdateStreamStatus> callback = new RPCAsyncCallback<>(requestId, "truncateStream"); new ControllerClientTagger(client).withTag(requestId, "truncateStream", scope, stream)
long traceId = LoggerHelpers.traceEnter(log, "deleteScope", scopeName, requestId); final CompletableFuture<DeleteScopeStatus> result = this.retryConfig.runAsync(() -> { RPCAsyncCallback<DeleteScopeStatus> callback = new RPCAsyncCallback<>(requestId, "deleteScope"); new ControllerClientTagger(client).withTag(requestId, "deleteScope", scopeName)
long traceId = LoggerHelpers.traceEnter(log, "createScope", scopeName, requestId); final CompletableFuture<CreateScopeStatus> result = this.retryConfig.runAsync(() -> { RPCAsyncCallback<CreateScopeStatus> callback = new RPCAsyncCallback<>(requestId, "createScope"); new ControllerClientTagger(client).withTag(requestId, "createScope", scopeName)
long traceId = LoggerHelpers.traceEnter(log, "deleteStream", scope, streamName, requestId); final CompletableFuture<DeleteStreamStatus> result = this.retryConfig.runAsync(() -> { RPCAsyncCallback<DeleteStreamStatus> callback = new RPCAsyncCallback<>(requestId, "deleteStream"); new ControllerClientTagger(client).withTag(requestId, "deleteStream", scope, streamName)
long traceId = LoggerHelpers.traceEnter(log, "updateStream", streamConfig, requestId); final CompletableFuture<UpdateStreamStatus> result = this.retryConfig.runAsync(() -> { RPCAsyncCallback<UpdateStreamStatus> callback = new RPCAsyncCallback<>(requestId, "updateStream"); new ControllerClientTagger(client).withTag(requestId, "updateStream", scope, streamName)
long traceId = LoggerHelpers.traceEnter(log, "sealStream", scope, streamName, requestId); final CompletableFuture<UpdateStreamStatus> result = this.retryConfig.runAsync(() -> { RPCAsyncCallback<UpdateStreamStatus> callback = new RPCAsyncCallback<>(requestId, "sealStream"); new ControllerClientTagger(client).withTag(requestId, "sealStream", scope, streamName)
final CompletableFuture<ScaleStatusResponse> result = this.retryConfig.runAsync(() -> { RPCAsyncCallback<ScaleStatusResponse> callback = new RPCAsyncCallback<>(traceId, "checkScale"); client.checkScale(ScaleStatusRequest.newBuilder()
/** * Invoke the scale up Test with transactional writes. Produce traffic from multiple writers in parallel. Each * writer writes using transactions. The test will periodically check if a scale event has occurred by talking to * controller via controller client. * * @throws InterruptedException if interrupted * @throws URISyntaxException If URI is invalid */ private CompletableFuture<Void> scaleUpTxnTest() { ControllerImpl controller = getController(); final AtomicBoolean exit = new AtomicBoolean(false); ClientFactoryImpl clientFactory = getClientFactory(); startWritingIntoTxn(clientFactory.createTransactionalEventWriter(SCALE_UP_TXN_STREAM_NAME, new JavaSerializer<>(), EventWriterConfig.builder().build()), exit); // overall wait for test to complete in 260 seconds (4.2 minutes) or scale up, whichever happens first. return Retry.withExpBackoff(10, 10, 30, Duration.ofSeconds(10).toMillis()) .retryingOn(ScaleOperationNotDoneException.class) .throwingOn(RuntimeException.class) .runAsync(() -> controller.getCurrentSegments(SCOPE, SCALE_UP_TXN_STREAM_NAME) .thenAccept(x -> { if (x.getSegments().size() == 1) { throw new ScaleOperationNotDoneException(); } else { log.info("txn test scale up done successfully"); exit.set(true); } }), scaleExecutorService); } }
@Override public CompletableFuture<StreamSegments> getCurrentSegments(final String scope, final String stream) { Exceptions.checkNotClosed(closed.get(), this); Exceptions.checkNotNullOrEmpty(scope, "scope"); Exceptions.checkNotNullOrEmpty(stream, "stream"); long traceId = LoggerHelpers.traceEnter(log, "getCurrentSegments", scope, stream); final CompletableFuture<SegmentRanges> result = this.retryConfig.runAsync(() -> { RPCAsyncCallback<SegmentRanges> callback = new RPCAsyncCallback<>(traceId, "getCurrentSegments"); client.getCurrentSegments(ModelHelper.createStreamInfo(scope, stream), callback); return callback.getFuture(); }, this.executor); return result.thenApply(ranges -> { log.debug("Received the following data from the controller {}", ranges.getSegmentRangesList()); NavigableMap<Double, SegmentWithRange> rangeMap = new TreeMap<>(); for (SegmentRange r : ranges.getSegmentRangesList()) { Preconditions.checkState(r.getMinKey() <= r.getMaxKey(), "Min keyrange %s was not less than maximum keyRange %s for segment %s", r.getMinKey(), r.getMaxKey(), r.getSegmentId()); rangeMap.put(r.getMaxKey(), new SegmentWithRange(ModelHelper.encode(r.getSegmentId()), r.getMinKey(), r.getMaxKey())); } return new StreamSegments(rangeMap, ranges.getDelegationToken()); }).whenComplete((x, e) -> { if (e != null) { log.warn("getCurrentSegments failed: ", e); } LoggerHelpers.traceLeave(log, "getCurrentSegments", traceId); }); }
private CompletableFuture<Void> retryFutureInExecutor(final long delay, final int multiplier, final int attempts, final long maxDelay, final boolean success, final ScheduledExecutorService executorService) { loopCounter.set(0); accumulator.set(0); return Retry.withExpBackoff(delay, multiplier, attempts, maxDelay) .retryingOn(RetryableException.class) .throwingOn(NonretryableException.class) .runInExecutor(() -> { accumulator.getAndAdd(loopCounter.getAndIncrement()); int i = loopCounter.get(); log.debug("Loop counter = " + i); if (i % 10 == 0) { if (success) { log.debug("result = ", accumulator.get()); return; } else { throw new NonretryableException(); } } else { throw new RetryableException(); } }, executorService); }
@Override public CompletableFuture<Map<Segment, Long>> getSegmentsAtTime(final Stream stream, final long timestamp) { Exceptions.checkNotClosed(closed.get(), this); Preconditions.checkNotNull(stream, "stream"); long traceId = LoggerHelpers.traceEnter(log, "getSegmentsAtTime", stream, timestamp); final CompletableFuture<SegmentsAtTime> result = this.retryConfig.runAsync(() -> { RPCAsyncCallback<SegmentsAtTime> callback = new RPCAsyncCallback<>(traceId, "getSegmentsAtTime"); StreamInfo streamInfo = ModelHelper.createStreamInfo(stream.getScope(), stream.getStreamName()); GetSegmentsRequest request = GetSegmentsRequest.newBuilder() .setStreamInfo(streamInfo) .setTimestamp(timestamp) .build(); client.getSegments(request, callback); return callback.getFuture(); }, this.executor); return result.thenApply(segments -> { log.debug("Received the following data from the controller {}", segments.getSegmentsList()); return segments.getSegmentsList() .stream() .collect(Collectors.toMap(location -> ModelHelper.encode(location.getSegmentId()), location -> location.getOffset())); }).whenComplete((x, e) -> { if (e != null) { log.warn("getSegmentsAtTime failed: ", e); } LoggerHelpers.traceLeave(log, "getSegmentsAtTime", traceId); }); }
/** * Invoke the simple scale up Test, produce traffic from multiple writers in parallel. * The test will periodically check if a scale event has occurred by talking to controller via * controller client. * * @throws InterruptedException if interrupted * @throws URISyntaxException If URI is invalid */ private CompletableFuture<Void> scaleUpTest() { ClientFactoryImpl clientFactory = getClientFactory(); ControllerImpl controller = getController(); final AtomicBoolean exit = new AtomicBoolean(false); createWriters(clientFactory, 6, SCOPE, SCALE_UP_STREAM_NAME); // overall wait for test to complete in 260 seconds (4.2 minutes) or scale up, whichever happens first. return Retry.withExpBackoff(10, 10, 30, Duration.ofSeconds(10).toMillis()) .retryingOn(ScaleOperationNotDoneException.class) .throwingOn(RuntimeException.class) .runAsync(() -> controller.getCurrentSegments(SCOPE, SCALE_UP_STREAM_NAME) .thenAccept(x -> { log.debug("size ==" + x.getSegments().size()); if (x.getSegments().size() == 1) { throw new ScaleOperationNotDoneException(); } else { log.info("scale up done successfully"); exit.set(true); } }), scaleExecutorService); }