private AsyncLazy(Single<TValue> single) { logger.debug("constructor"); this.single = single .doOnSuccess(v -> this.succeeded = true) .doOnError(e -> this.failed = true) .cache(); }
@Override public void destroy() throws Exception { Set<String> deploymentIds = vertx.deploymentIDs(); boolean hasDeployedVerticles = !deploymentIds.isEmpty(); if (hasDeployedVerticles) { CountDownLatch countDownLatch = new CountDownLatch(deploymentIds.size()); deploymentIds.forEach(id -> vertx.rxUndeploy(id) .doOnSuccess(foo -> countDownLatch.countDown()) .doOnError(e -> { log.error("", e); countDownLatch.countDown(); }) .subscribe()); countDownLatch.await(); } vertx.close(); } }
static Single<DatabaseAccount> getDatabaseAccountFromAnyLocationsAsync( URL defaultEndpoint, List<String> locations, Func1<URL, Single<DatabaseAccount>> getDatabaseAccountFn) { return getDatabaseAccountFn.call(defaultEndpoint).onErrorResumeNext( e -> { logger.error("Fail to reach global gateway [{}], [{}]", defaultEndpoint, e.getMessage()); if (locations.isEmpty()) { return Single.error(e); } rx.Observable<rx.Observable<DatabaseAccount>> obs = rx.Observable.range(0, locations.size()) .map(index -> getDatabaseAccountFn.call(LocationHelper.getLocationEndpoint(defaultEndpoint, locations.get(index))).toObservable()); // iterate and get the database account from the first non failure, otherwise get the last error. rx.Observable<DatabaseAccount> res = rx.Observable.concatDelayError(obs).first().single(); return res.toSingle().doOnError( innerE -> { logger.error("Fail to reach location any of locations", String.join(",", locations), innerE.getMessage()); }); }); }
@Override public Single<? extends JobDetails> scheduleJob(String type, String name, Map<String, String> parameters, Trigger trigger) { if (now.get().getMillis() >= trigger.getTriggerTime()) { return Single.error(new RuntimeException("Trigger time has already passed")); } String lockName = QUEUE_LOCK_PREFIX + trigger.getTriggerTime(); // The lock we obtain here is basically a shared lock. We need a lock to avoid a possible race condition. // Without the lock the job could could be written to scheduled_jobs_idx and wind up having the partition to // which it was written deleted before the job is ever executed. The job would then never get executed. return lockManager.acquireLock(lockName, SCHEDULING_LOCK, SCHEDULING_LOCK_TIMEOUT_IN_SEC, false) .map(lock -> { if (lock.isLocked()) { UUID jobId = UUID.randomUUID(); return jobsService.createJobDetails(jobId, type, name, parameters, trigger, new Date(trigger.getTriggerTime())); } throw new RuntimeException("Failed to acquire scheduling lock [" + lockName + "]"); }) .flatMap(details -> jobsService.insert(new Date(trigger.getTriggerTime()), details) .map(resultSet -> details)) .toSingle() .doOnError(t -> logger.warn("Failed to schedule job " + name, t)); }
private Single<JobExecutionState> releaseJobExecutionLock(JobExecutionState state) { String jobLock = "org.hawkular.metrics.scheduler.job." + state.currentDetails.getJobId(); return lockManager.releaseLock(jobLock, hostname) .map(released -> { if (!released) { logger.warnf("Failed to release job lock for %s", state.currentDetails); } return state; }) .toSingle() .doOnError(t -> logger.warnf(t, "There was an error trying to release job lock [%s] for %s", jobLock, state.currentDetails)); }
private Single<JobExecutionState> setJobFinished(JobExecutionState state) { return session.execute(updateJobToFinished.bind(state.timeSlice, state.currentDetails.getJobId()), queryScheduler) .toSingle() .map(resultSet -> state) .doOnError(t -> logger.warnf(t, "There was an error while updated the finished jobs index for %s", state.currentDetails)); }
private Completable doPostJobExecutionWithoutRescheduling(Completable job, JobDetailsImpl jobDetails, Date timeSlice, Set<UUID> activeJobs) { return job .toSingle(() -> new JobExecutionState(jobDetails, activeJobs)) .flatMap(this::releaseJobExecutionLock) .flatMap(this::setJobFinished) .doOnError(t -> { logger.debug("There was an error during post-job execution, but the job has already been " + "rescheduled.", t); publishJobFinished(jobDetails); }) .doOnSuccess(states -> publishJobFinished(states.currentDetails)) .toCompletable(); }
" chunks to layer '" + layer + "' after " + duration + " ms"); }) .doOnError(err -> { long duration = System.currentTimeMillis() - timestamp; log.error("Failed to import [" + correlationId + "] to layer '" +
private void handleMessage(Exchange outputExchange, RabbitPublisher publisher, Message message) { try { //change in logback.xml to DEBUG level to see every message payload logged log.debugWithParams("Received message.", "payload", new String(message.payload), "metadata", message.basicProperties); publisher.call( outputExchange, new RoutingKey(message.envelope.getRoutingKey()), message.basicProperties, new Payload(message.payload) ) .doOnSuccess(ignore -> message.acknowledger.ack()) .doOnError(throwable -> message.acknowledger.reject()) .subscribe(); }catch (Exception e){ message.acknowledger.reject(); } }
.reduce(0, (a, b) -> a + b) .toSingle() .doOnError(onFinish::accept) .doOnSuccess(i -> onFinish.accept(null));
.doOnError(t -> { logger.warn("There was an error during post-job execution", t); publishJobFinished(jobDetails);