private void stopKafkaIngestion(String overlordAddress, String dataSourceName) { try { FullResponseHolder response = RetryUtils.retry(() -> DruidStorageHandlerUtils.getResponseFromCurrentLeader(getHttpClient(), new Request(HttpMethod.POST, new URL(String.format("http://%s/druid/indexer/v1/supervisor/%s/shutdown", overlordAddress, dataSourceName))), new FullResponseHandler(Charset.forName("UTF-8"))), input -> input instanceof IOException, getMaxRetryCount()); if (response.getStatus().equals(HttpResponseStatus.OK)) { CONSOLE.printInfo("Druid Kafka Ingestion shutdown successful."); } else { throw new IOException(String.format("Unable to stop Kafka Ingestion Druid status [%d] full response [%s]", response.getStatus().getCode(), response.getContent())); } } catch (Exception e) { throw new RuntimeException(e); } }
private static void awaitNextRetry(final Throwable e, final int nTry, final boolean quiet) throws InterruptedException { final long sleepMillis = nextRetrySleepMillis(nTry); if (quiet) { log.debug(e, "Failed on try %d, retrying in %,dms.", nTry, sleepMillis); } else { log.warn(e, "Failed on try %d, retrying in %,dms.", nTry, sleepMillis); } Thread.sleep(sleepMillis); }
awaitNextRetry(e, nTry, nTry <= quietTries); } else { Throwables.propagateIfInstanceOf(e, Exception.class);
private void addNextSyncToWorkQueue() { if (consecutiveFailedAttemptCount > 0) { try { long sleepMillis = RetryUtils.nextRetrySleepMillis(consecutiveFailedAttemptCount); log.info("Scheduling next syncup in [%d] millis from server [%s].", sleepMillis, druidServer.getName()); executor.schedule( addToQueueRunnable, sleepMillis, TimeUnit.MILLISECONDS ); } catch (Exception ex) { log.makeAlert( ex, "WTF! Couldn't schedule next sync. Server[%s] is not being synced any more, restarting Druid process on that server might fix the issue.", druidServer.getName() ).emit(); } } else { addToQueueRunnable.run(); } }
private void resetKafkaIngestion(String overlordAddress, String dataSourceName) { try { FullResponseHolder response = RetryUtils.retry(() -> DruidStorageHandlerUtils.getResponseFromCurrentLeader(getHttpClient(), new Request(HttpMethod.POST, new URL(String.format("http://%s/druid/indexer/v1/supervisor/%s/reset", overlordAddress, dataSourceName))), new FullResponseHandler(Charset.forName("UTF-8"))), input -> input instanceof IOException, getMaxRetryCount()); if (response.getStatus().equals(HttpResponseStatus.OK)) { CONSOLE.printInfo("Druid Kafka Ingestion Reset successful."); } else { throw new IOException(String.format("Unable to reset Kafka Ingestion Druid status [%d] full response [%s]", response.getStatus().getCode(), response.getContent())); } } catch (Exception e) { throw new RuntimeException(e); } }
try { coordinatorResponse = RetryUtils.retry(() -> DruidStorageHandlerUtils.getResponseFromCurrentLeader(getHttpClient(), new Request(HttpMethod.GET, new URL(String.format("http://%s/status", coordinatorAddress))), new FullResponseHandler(Charset.forName("UTF-8"))).getContent(),
FullResponseHolder response = RetryUtils.retry(() -> DruidStorageHandlerUtils.getResponseFromCurrentLeader(getHttpClient(), new Request(HttpMethod.GET, new URL(String.format("http://%s/druid/indexer/v1/supervisor/%s", overlordAddress, dataSourceName))),
FullResponseHolder response = RetryUtils.retry(() -> DruidStorageHandlerUtils.getResponseFromCurrentLeader(getHttpClient(), new Request(HttpMethod.GET, new URL(String.format("http://%s/druid/indexer/v1/supervisor/%s/status",
/** * Same as {@link #retry(Callable, Predicate, int, int)} with quietTries = 0. */ public static <T> T retry(final Callable<T> f, Predicate<Throwable> shouldRetry, final int maxTries) throws Exception { return retry(f, shouldRetry, 0, maxTries); }
/** * Retries S3 operations that fail due to io-related exceptions. Service-level exceptions (access denied, file not * found, etc) are not retried. */ public static <T> T retryS3Operation(Callable<T> f) throws Exception { final int maxTries = 10; return RetryUtils.retry(f, S3RETRY, maxTries); }
public <T> T retryTransaction(final TransactionCallback<T> callback, final int quietTries, final int maxTries) { final Callable<T> call = new Callable<T>() { @Override public T call() throws Exception { return getDBI().inTransaction(callback); } }; try { return RetryUtils.retry(call, shouldRetry, quietTries, maxTries); } catch (Exception e) { throw Throwables.propagate(e); } }
public <T> T retryWithHandle( final HandleCallback<T> callback, final Predicate<Throwable> myShouldRetry ) { final Callable<T> call = new Callable<T>() { @Override public T call() throws Exception { return getDBI().withHandle(callback); } }; try { return RetryUtils.retry(call, myShouldRetry, DEFAULT_MAX_TRIES); } catch (Exception e) { throw Throwables.propagate(e); } }
return RetryUtils.retry( new Callable<Long>()
public String submitTask(final String task) return RetryUtils.retry( new Callable<String>()
return RetryUtils.retry( new Callable<Boolean>()
return RetryUtils.retry( new Callable<URI>()
if (jarFile.getName().endsWith(".jar")) { try { RetryUtils.retry( new Callable<Boolean>()
public static boolean deleteWithRetry(final FileSystem fs, final Path path, final boolean recursive) { try { return RetryUtils.retry( new Callable<Boolean>() { @Override public Boolean call() throws Exception { return fs.delete(path, recursive); } }, shouldRetryPredicate(), NUM_RETRIES ); } catch (Exception e) { log.error(e, "Failed to cleanup path[%s]", path); throw Throwables.propagate(e); } } }
return RetryUtils.retry( new Callable<URI>()
private void deleteWithRetries(final String s3Bucket, final String s3Path) throws Exception { RetryUtils.retry( (Callable<Void>) () -> { try { s3Client.deleteObject(s3Bucket, s3Path); return null; } catch (Exception e) { log.info(e, "Error while trying to delete [s3://%s/%s]", s3Bucket, s3Path); throw e; } }, S3Utils.S3RETRY, 3 ); } }