@Override public Future<Void> doSaveAsync(final SnapshotMetadata metadata, final Object snapshot) { LOG.debug("In doSaveAsync - metadata: {}, snapshot: {}", metadata, snapshot); return Futures.future(() -> doSave(metadata, snapshot), executionContext); }
private PendingRequest(KvStateID kvStateId, byte[] serializedKeyAndNamespace) { this.kvStateId = kvStateId; this.serializedKeyAndNamespace = serializedKeyAndNamespace; this.promise = Futures.promise(); } }
@Override public Future<Optional<SelectedSnapshot>> doLoadAsync(final String persistenceId, final SnapshotSelectionCriteria criteria) { LOG.debug("In doLoadAsync - persistenceId: {}, criteria: {}", persistenceId, criteria); // Select the youngest 'maxLoadAttempts' snapshots that match the criteria. This may help in situations where // saving of a snapshot could not be completed because of a JVM crash. Hence, an attempt to load that snapshot // will fail but loading an older snapshot may succeed. Deque<SnapshotMetadata> metadatas = getSnapshotMetadatas(persistenceId, criteria).stream() .sorted(LocalSnapshotStore::compare).collect(reverse()).stream().limit(maxLoadAttempts) .collect(Collectors.toCollection(ArrayDeque::new)); if (metadatas.isEmpty()) { return Futures.successful(Optional.empty()); } LOG.debug("doLoadAsync - found: {}", metadatas); return Futures.future(() -> doLoad(metadatas), executionContext); }
@Override public Future<Set<SourceIdentifier>> getProvidedSources() { return akka.dispatch.Futures.successful(providedSources); }
private <T> Future<T> combineFutureWithPossiblePriorReadOnlyTxFutures(final Future<T> future, final TransactionIdentifier txId) { if(!priorReadOnlyTxPromises.containsKey(txId) && !priorReadOnlyTxPromises.isEmpty()) { Collection<Entry<TransactionIdentifier, Promise<Object>>> priorReadOnlyTxPromiseEntries = new ArrayList<>(priorReadOnlyTxPromises.entrySet()); if(priorReadOnlyTxPromiseEntries.isEmpty()) { return future; } List<Future<Object>> priorReadOnlyTxFutures = new ArrayList<>(priorReadOnlyTxPromiseEntries.size()); for(Entry<TransactionIdentifier, Promise<Object>> entry: priorReadOnlyTxPromiseEntries) { LOG.debug("Tx: {} - waiting on future for prior read-only Tx {}", txId, entry.getKey()); priorReadOnlyTxFutures.add(entry.getValue().future()); } Future<Iterable<Object>> combinedFutures = Futures.sequence(priorReadOnlyTxFutures, getActorContext().getClientDispatcher()); final Promise<T> returnPromise = Futures.promise(); final OnComplete<Iterable<Object>> onComplete = new OnComplete<Iterable<Object>>() { @Override public void onComplete(final Throwable failure, final Iterable<Object> notUsed) { LOG.debug("Tx: {} - prior read-only Tx futures complete", txId); // Complete the returned Promise with the original Future. returnPromise.completeWith(future); } }; combinedFutures.onComplete(onComplete, getActorContext().getClientDispatcher()); return returnPromise.future(); } else { return future; } }
@Override public void handleError(Exception exception) { jobManagerFuture = Futures.failed(exception); }
final Future<Iterable<Object>> aggregate = Futures.sequence( futures, getContext().system().dispatcher());
public Future<ActorRef> getActorRefFuture(K key) { final int partitionNumber = retrievalService.getPartitionID(key); synchronized (cacheLock) { ActorRef result = cache.get(partitionNumber); if(result != null) { return Futures.successful(result); } } LOG.debug("Retrieve actor URL from retrieval service."); String actorURL = retrievalService.retrieveActorURL(key); if (actorURL == null) { return Futures.failed(new Exception("Could not retrieve actor.")); } else { ActorSelection selection = getContext().system().actorSelection(actorURL); LOG.debug("Resolve actor URL to ActorRef."); Future<ActorRef> actorRefFuture = selection.resolveOne(lookupTimeout); actorRefFuture.onSuccess(new OnSuccess<ActorRef>() { @Override public void onSuccess(ActorRef result) throws Throwable { synchronized (cacheLock) { cache.put(partitionNumber, result); } } }, executor); return actorRefFuture; } }
@Override public Future<Set<SourceIdentifier>> getProvidedSources() { return akka.dispatch.Futures.successful(providedSources); }
ActorRef[] actors = new ActorRef[WORKER_COUNT]; for (int workerIndex = 0; workerIndex < WORKER_COUNT; workerIndex++) { Promise<Integer> promise = Futures.promise(); futures.add(promise.future()); Props actorProps = Props.create(InternalActor.class, workerIndex, promise); Futures.sequence(futures, system.dispatcher()), Duration.apply(10, TimeUnit.MINUTES));
@Override public Future<byte[]> recover(Throwable failure) throws Throwable { if (failure instanceof UnknownKvStateID || failure instanceof UnknownKvStateKeyGroupLocation || failure instanceof UnknownKvStateLocation || failure instanceof ConnectException) { // These failures are likely to be caused by out-of-sync // KvStateLocation. Therefore we retry this query and // force look up the location. return getKvState( jobId, queryableStateName, keyHashCode, serializedKeyAndNamespace, true); } else { return Futures.failed(failure); } } }, executionContext);
private Future<Iterable<Object>> invokeCohorts(MessageSupplier messageSupplier) { List<Future<Object>> futureList = Lists.newArrayListWithCapacity(cohorts.size()); for(CohortInfo cohort : cohorts) { Object message = messageSupplier.newMessage(transactionId, cohort.getActorVersion()); if(LOG.isDebugEnabled()) { LOG.debug("Tx {}: Sending {} to cohort {}", transactionId, message , cohort); } futureList.add(actorContext.executeOperationAsync(cohort.getResolvedActor(), message, actorContext.getTransactionCommitOperationTimeout())); } return akka.dispatch.Futures.sequence(futureList, actorContext.getClientDispatcher()); }
@Override public Future<Void> doDeleteAsync(final String persistenceId, final SnapshotSelectionCriteria criteria) { LOG.debug("In doDeleteAsync - persistenceId: {}, criteria: {}", persistenceId, criteria); return Futures.future(() -> doDelete(persistenceId, criteria), executionContext); }
public ClusteredDeviceSourcesResolverImpl(String topologyId, String nodeId, ActorSystem actorSystem, SchemaSourceRegistry schemaRegistry, List<SchemaSourceRegistration<? extends SchemaSourceRepresentation>> sourceRegistrations) { this.topologyId = topologyId; this.nodeId = nodeId; this.actorSystem = actorSystem; this.schemaRegistry = schemaRegistry; this.sourceRegistrations = sourceRegistrations; resolvedSourcesPromise = Futures.promise(); }
@Override @Nonnull public Future<Boolean> isEmpty() { return Futures.successful(nrTokens < ONE_TOKEN); }
@Override public Future<Object> recover(Throwable failure) throws Throwable { if (tries > 0) { LOG.debug("Query state failed with {}. Try to recover. #{} left.", failure, tries - 1); return queryStateFutureWithFailover(tries - 1, queryState); } else { return Futures.failed(failure); } } }, executor);
@Override protected <T> void onTransactionReady(final TransactionIdentifier transaction, final Collection<Future<T>> cohortFutures) { final State localState = currentState; Preconditions.checkState(localState instanceof Allocated, "Readying transaction %s while state is %s", transaction, localState); final TransactionIdentifier currentTx = ((Allocated)localState).getIdentifier(); Preconditions.checkState(transaction.equals(currentTx), "Readying transaction %s while %s is allocated", transaction, currentTx); // Transaction ready and we are not waiting for futures -- go to idle if (cohortFutures.isEmpty()) { currentState = IDLE_STATE; return; } // Combine the ready Futures into 1 final Future<Iterable<T>> combined = Futures.sequence(cohortFutures, getActorContext().getClientDispatcher()); // Record the we have outstanding futures final State newState = new Submitted(transaction, combined); currentState = newState; // Attach a completion reset, but only if we do not allocate a transaction // in-between combined.onComplete(new OnComplete<Iterable<T>>() { @Override public void onComplete(final Throwable arg0, final Iterable<T> arg1) { STATE_UPDATER.compareAndSet(TransactionChainProxy.this, newState, IDLE_STATE); } }, getActorContext().getClientDispatcher()); }
@Override public Future<Void> doDeleteAsync(final SnapshotMetadata metadata) { LOG.debug("In doDeleteAsync - metadata: {}", metadata); // Multiple snapshot files here mean that there were multiple snapshots for this seqNr - we delete all of them. // Usually snapshot-stores would keep one snapshot per sequenceNr however here in the file-based one we // timestamp snapshots and allow multiple to be kept around (for the same seqNr) if desired. return Futures.future(() -> doDelete(metadata), executionContext); }
public <T> BytesBodyHandler(){ this(Futures.promise()); }
public void putSuccessful(@Nonnull String shardName, @Nonnull PrimaryShardInfo info) { primaryShardInfoCache.put(shardName, Futures.successful(info)); }