private Iterable<Tag> tags(final ExecutionContext context) { return singleton(Tag.of("retries", String.valueOf(context.getExecutions()))); }
/** * Returns the elapsed time since initial execution began. */ public Duration getElapsedTime() { return context.getElapsedTime(); }
public ExecutionContext copy() { return new ExecutionContext(this); } }
public SegmentMetaDataResponse checkDataSource(String engineName) { // @formatter:off RetryPolicy retryPolicy = new RetryPolicy() .retryOn(ResourceAccessException.class) .retryOn(Exception.class) .retryIf(result -> result == null) .withBackoff(delay, maxDelay, TimeUnit.SECONDS) .withMaxDuration(maxDuration, TimeUnit.SECONDS); // @formatter:on Callable<SegmentMetaDataResponse> callable = () -> queryService.segmentMetadata(engineName); // @formatter:off SegmentMetaDataResponse response = Failsafe.with(retryPolicy) .onRetriesExceeded((o, throwable) -> { throw new DataSourceIngestionException("Retries exceed for checking datasource : " + engineName); }) .onComplete((o, throwable, ctx) -> { if(ctx != null) { LOGGER.debug("Completed checking datasource({}). {} tries. Take time {} seconds.", engineName, ctx.getExecutions(), ctx.getElapsedTime().toSeconds()); } }) .get(callable); // @formatter:on return response; }
/** * Gets the number of execution attempts so far. */ public int getAttemptCount() { return context.getAttemptCount(); }
void handleComplete(ExecutionResult result, ExecutionContext context) { if (successListener != null && result.getSuccessAll()) successListener.handle(result, context.copy()); else if (failureListener != null && !result.getSuccessAll()) failureListener.handle(result, context.copy()); if (completeListener != null) completeListener.handle(result, context.copy()); }
.onComplete((o, throwable, ctx) -> { if(ctx != null) { LOGGER.debug("Completed checking task ({}). {} tries. Take time {} seconds.", taskId, ctx.getExecutions(), ctx.getElapsedTime().toSeconds());
private Iterable<Tag> tags(final ExecutionContext context) { return singleton(Tag.of("retries", String.valueOf(context.getExecutions()))); }
@Override public void onRetry(final RequestArguments arguments, @Nullable final ClientHttpResponse result, @Nullable final Throwable failure, final ExecutionContext context) { final Iterable<Tag> tags = tags(arguments, result, failure, context); registry.timer(metricName, tags).record(Duration.ofNanos(context.getElapsedTime().toNanos())); }
private MetadataLoadingResult fetchTopicMetadata(CachedTopic topic, ExecutionContext context) { int attempt = context.getExecutions() + 1; if (brokerMessageProducer.isTopicAvailable(topic)) { logger.info("Successfully loaded metadata for topic {}, attempt #{}", topic.getQualifiedName(), attempt); return MetadataLoadingResult.success(topic.getTopicName()); } logger.warn("Failed to load metadata for topic {}, attempt #{}", topic.getQualifiedName(), attempt); return MetadataLoadingResult.failure(topic.getTopicName()); }
@Override public void onRetry(final RequestArguments arguments, @Nullable final ClientHttpResponse result, @Nullable final Throwable failure, final ExecutionContext context) { final Iterable<Tag> tags = tags(arguments, result, failure, context); registry.timer(metricName, tags).record(Duration.ofNanos(context.getElapsedTime().toNanos())); }
private SchemaLoadingResult loadLatestSchema(Topic topic, ExecutionContext context) { int attempt = context.getExecutions() + 1; try { schemaRepository.getLatestAvroSchema(topic); logger.info("Successfully loaded schema for topic {}, attempt #{}", topic.getQualifiedName(), attempt); return SchemaLoadingResult.success(topic); } catch (SchemaNotFoundException e) { logger.warn("Failed to load schema for topic {}, attempt #{}. {}", topic.getQualifiedName(), attempt, e.getMessage()); return SchemaLoadingResult.missing(topic); } catch (CouldNotLoadSchemaException e) { logger.error("Failed to load schema for topic {}, attempt #{}", topic.getQualifiedName(), attempt, e); } return SchemaLoadingResult.failure(topic); }
/** * Obtain an async failsafe retryer instance with the specified policy, metrics, and executor service. * @param retryPolicy retry policy * @param metrics retry metrics * @param retryExec executor service for scheduling async retries * @return {@link net.jodah.failsafe.AsyncFailsafe} instance */ static AsyncFailsafe<?> failsafe(RetryPolicy retryPolicy, RetryMetrics metrics, ScheduledExecutorService retryExec) { long startMs = System.currentTimeMillis(); return Failsafe.with(retryPolicy).with(retryExec) .onRetry(e -> metrics.retryCount.inc()) .onRetriesExceeded(e -> { metrics.retryTimer.update(System.currentTimeMillis() - startMs); metrics.permFailureCount.inc(); }) .onSuccess((e, ctx) -> { if (ctx.getExecutions() > 1) { metrics.retryTimer.update(System.currentTimeMillis() - startMs); } else { metrics.successCount.inc(); } }); } }
/** * Obtain an async failsafe retryer instance with the specified policy, metrics, and executor service. * @param retryPolicy retry policy * @param metrics retry metrics * @param retryExec executor service for scheduling async retries * @return {@link net.jodah.failsafe.AsyncFailsafe} instance */ static AsyncFailsafe<?> failsafe(RetryPolicy retryPolicy, RetryMetrics metrics, ScheduledExecutorService retryExec) { long startMs = System.currentTimeMillis(); return Failsafe.with(retryPolicy).with(retryExec) .onRetry(e -> metrics.retryCount.inc()) .onRetriesExceeded(e -> { metrics.retryTimer.update(System.currentTimeMillis() - startMs); metrics.permFailureCount.inc(); }) .onSuccess((e, ctx) -> { if (ctx.getExecutions() > 1) { metrics.retryTimer.update(System.currentTimeMillis() - startMs); } else { metrics.successCount.inc(); } }); } }
/** * Calls all BackingChecker if they accept the current configuration. * * @throws ConfigurationException */ public void verifyInstallation() throws ConfigurationException { RetryPolicy retryPolicy = buildRetryPolicy(); // Get all checkers for (BackingChecker checker : getCheckers()) { if (checker.accepts(configurationGenerator)) { try { Failsafe.with(retryPolicy) .onFailedAttempt(failure -> log.error(failure.getMessage())) // .onRetry((c, f, ctx) -> log.warn(String.format("Failure %d. Retrying....", ctx.getExecutions()))) // .run(() -> checker.check(configurationGenerator)); // } catch (FailsafeException e) { if (e.getCause() instanceof ConfigurationException) { throw ((ConfigurationException) e.getCause()); } else { throw e; } } } } }
/** * Obtain an async failsafe retryer instance with the specified policy, metrics, and executor service. * @param retryPolicy retry policy * @param metrics retry metrics * @param retryExec executor service for scheduling async retries * @return {@link net.jodah.failsafe.AsyncFailsafe} instance */ static AsyncFailsafe<?> failsafe(RetryPolicy retryPolicy, RetryMetrics metrics, ScheduledExecutorService retryExec) { long startMs = System.currentTimeMillis(); return Failsafe.with(retryPolicy).with(retryExec) .onRetry(e -> metrics.retryCount.inc()) .onRetriesExceeded(e -> { metrics.retryTimer.update(System.currentTimeMillis() - startMs); metrics.permFailureCount.inc(); }) .onSuccess((e, ctx) -> { if (ctx.getExecutions() > 1) { metrics.retryTimer.update(System.currentTimeMillis() - startMs); } else { metrics.successCount.inc(); } }); } }
/** * Obtain an async failsafe retryer instance with the specified policy, metrics, and executor service. * @param retryPolicy retry policy * @param metrics retry metrics * @param retryExec executor service for scheduling async retries * @return {@link net.jodah.failsafe.AsyncFailsafe} instance */ static AsyncFailsafe<?> failsafe(RetryPolicy retryPolicy, RetryMetrics metrics, ScheduledExecutorService retryExec) { long startMs = System.currentTimeMillis(); return Failsafe.with(retryPolicy).with(retryExec) .onRetry(e -> metrics.retryCount.inc()) .onRetriesExceeded(e -> { metrics.retryTimer.update(System.currentTimeMillis() - startMs); metrics.permFailureCount.inc(); }) .onSuccess((e, ctx) -> { if (ctx.getExecutions() > 1) { metrics.retryTimer.update(System.currentTimeMillis() - startMs); } else { metrics.successCount.inc(); } }); } }
/** * Obtain an async failsafe retryer instance with the specified policy, metrics, and executor service. * @param retryPolicy retry policy * @param metrics retry metrics * @param retryExec executor service for scheduling async retries * @return {@link net.jodah.failsafe.AsyncFailsafe} instance */ static AsyncFailsafe<?> failsafe(RetryPolicy retryPolicy, RetryMetrics metrics, ScheduledExecutorService retryExec) { long startMs = System.currentTimeMillis(); return Failsafe.with(retryPolicy).with(retryExec) .onRetry(e -> metrics.retryCount.inc()) .onRetriesExceeded(e -> { metrics.retryTimer.update(System.currentTimeMillis() - startMs); metrics.permFailureCount.inc(); }) .onSuccess((e, ctx) -> { if (ctx.getExecutions() > 1) { metrics.retryTimer.update(System.currentTimeMillis() - startMs); } else { metrics.successCount.inc(); } }); } }