self(), new QueueRefreshRequest( queueName, false ), getContext().dispatcher(), getSelf() ); refreshSchedulersByQueueName.put( queueName, scheduler ); self(), new QueueTimeoutRequest( queueName ), getContext().dispatcher(), getSelf() ); timeoutSchedulersByQueueName.put( queueName, scheduler ); self(), new ShardCheckRequest( queueName ), getContext().dispatcher(), getSelf() ); shardAllocationSchedulersByQueueName.put( queueName, scheduler );
@Override public void onReceive(Object message) throws Exception { if (message instanceof QueryState) { @SuppressWarnings("unchecked") QueryState<K> queryState = (QueryState<K>) message; LOG.debug("Query state for key {}.", queryState.getKey()); Future<Object> futureResult = queryStateFutureWithFailover(queryAttempts, queryState); Patterns.pipe(futureResult, getContext().dispatcher()).to(getSender()); } else { throw new RuntimeException("Unknown message " + message); } }
public RpcRegistry(final RemoteRpcProviderConfig config, final ActorRef rpcInvoker, final ActorRef rpcRegistrar) { super(config, config.getRpcRegistryPersistenceId(), new RoutingTable(rpcInvoker, ImmutableSet.of())); this.rpcRegistrar = Preconditions.checkNotNull(rpcRegistrar); this.mxBean = new RemoteRpcRegistryMXBeanImpl(new BucketStoreAccess(self(), getContext().dispatcher(), config.getAskDuration()), config.getAskDuration()); }
/** * Schedules messages ever interval seconds. * @param initialDelay * @param interval * @param message * @return */ private Cancellable scheduleCron(int initialDelay, int interval, MessageTypes message) { return scheduler.schedule(secs(initialDelay), secs(interval), getSelf(), message, getContext().dispatcher(), null); }
private void readAggregateEvents(Messages.RetreiveCorrelationIdEventsAsync retreiveAggregateEvents) { final ActorRef sender = sender(); final ActorRef self = self(); final Future<Messages.EventWrapperBatch> future = storage.loadEventWrappersForCorrelationIdAsync(retreiveAggregateEvents.getAggregateType(), retreiveAggregateEvents.getCorrelationId(), retreiveAggregateEvents.getFromJournalId()); future.onSuccess(new OnSuccess<Messages.EventWrapperBatch>() { @Override public void onSuccess(Messages.EventWrapperBatch result) throws Throwable { sender.tell(result, self); } }, getContext().dispatcher()); future.onFailure(new OnFailure() { @Override public void onFailure(Throwable failure) throws Throwable { log.error("failed to read events from journalstorage {} ", retreiveAggregateEvents, failure); } }, getContext().dispatcher() ); }
private void scheduleKillAll() { final Runnable killAll = new Runnable() { @Override public void run() { router.route(PoisonPill.getInstance(), getSelf()); } }; getContext() .system() .scheduler() .scheduleOnce(Duration.create(SHUTDOWN_GRACE_TIME, TimeUnit.MILLISECONDS), killAll, getContext().dispatcher()); } }
private void registerConnectionTimeout() { if (connectionTimeout != null) { connectionTimeout.cancel(); } connectionTimeoutId = UUID.randomUUID(); connectionTimeout = getContext().system().scheduler().scheduleOnce( timeout, getSelf(), decorateMessage(new JobClientMessages.ConnectionTimeout(connectionTimeoutId)), getContext().dispatcher(), ActorRef.noSender() ); }
void addMessageToRetry(Object message, ActorRef replyTo, String failureMessage) { LOG.debug("{}: Adding message {} to retry", shard.persistenceId(), message); MessageInfo messageInfo = new MessageInfo(message, replyTo, failureMessage); FiniteDuration period = shard.getDatastoreContext().getShardRaftConfig().getElectionTimeOutInterval().$times(2); messageInfo.timer = shard.getContext().system().scheduler().scheduleOnce(period, shard.getSelf(), messageInfo, shard.getContext().dispatcher(), ActorRef.noSender()); messagesToRetry.add(messageInfo); }
private void registerConnectionTimeout() { if (connectionTimeout != null) { connectionTimeout.cancel(); } connectionTimeoutId = UUID.randomUUID(); connectionTimeout = getContext().system().scheduler().scheduleOnce( timeout, getSelf(), decorateMessage(new JobClientMessages.ConnectionTimeout(connectionTimeoutId)), getContext().dispatcher(), ActorRef.noSender() ); }
private void registerConnectionTimeout() { if (connectionTimeout != null) { connectionTimeout.cancel(); } connectionTimeoutId = UUID.randomUUID(); connectionTimeout = getContext().system().scheduler().scheduleOnce( timeout, getSelf(), decorateMessage(new JobClientMessages.ConnectionTimeout(connectionTimeoutId)), getContext().dispatcher(), ActorRef.noSender() ); }
private void registerConnectionTimeout() { if (connectionTimeout != null) { connectionTimeout.cancel(); } connectionTimeoutId = UUID.randomUUID(); connectionTimeout = getContext().system().scheduler().scheduleOnce( timeout, getSelf(), decorateMessage(new JobClientMessages.ConnectionTimeout(connectionTimeoutId)), getContext().dispatcher(), ActorRef.noSender() ); }
public void schedule(UntypedActorContext context) { if (repeatedTriggerTime <= 0) { LOGGER.info("Scheduling once {}", toString()); context.system().scheduler().scheduleOnce( startTime(), context.self(), getScheduledMessage(), context.dispatcher(), ActorRef.noSender()); } else { LOGGER.info("Scheduling repeated {}", toString()); context.system().scheduler().schedule( startTime(), Duration.create(getRepeatedTriggerTime(), getTriggerTimeUnit()), context.self(), getScheduledMessage(), context.dispatcher(), ActorRef.noSender()); } }
private void scheduleInflightCommitRetry(EntityOwnershipShard shard) { FiniteDuration duration = shard.getDatastoreContext().getShardRaftConfig().getElectionTimeOutInterval(); log.debug("Scheduling retry for BatchedModifications commit {} in {}", inflightCommit.getTransactionID(), duration); retryCommitSchedule = shard.getContext().system().scheduler().scheduleOnce(duration, shard.getSelf(), COMMIT_RETRY_MESSAGE, shard.getContext().dispatcher(), ActorRef.noSender()); }
/** * Gets the buckets from bucket store for the given node addresses and sends them to remote gossiper * * @param remote remote node to send Buckets to * @param addresses node addresses whose buckets needs to be sent */ void sendGossipTo(final ActorRef remote, final Set<Address> addresses){ Future<Object> futureReply = Patterns.ask(getContext().parent(), new GetBucketsByMembers(addresses), config.getAskDuration()); futureReply.map(getMapperToSendGossip(remote), getContext().dispatcher()); }
@Override public void preStart() { // Execute the bulk update at least once a while even if not enough messages are there getContext().system().scheduler().schedule( config.getMetaDataUpdateMaxDuration(), config.getMetaDataUpdateMaxDuration(), getSelf(), TICK, getContext().dispatcher(), null); }
private void scheduleHealthCheck() { final Runnable healthCheckRunnable = new Runnable() { @Override public void run() { getSelf().tell(Signal.CHECK, getSelf()); } }; healthCheck = getContext() .system() .scheduler() .schedule(Duration.create(1000, TimeUnit.MILLISECONDS), Duration.create(1000, TimeUnit.MILLISECONDS), healthCheckRunnable, getContext().dispatcher()); }
/** * Gets bucket versions from bucket store and sends to the supplied address * * @param remoteActorSystemAddress remote gossiper to send to */ void getLocalStatusAndSendTo(Address remoteActorSystemAddress){ //Get local status from bucket store and send to remote Future<Object> futureReply = Patterns.ask(getContext().parent(), new GetBucketVersions(), config.getAskDuration()); //Find gossiper on remote system ActorSelection remoteRef = getContext().system().actorSelection( remoteActorSystemAddress.toString() + getSelf().path().toStringWithoutAddress()); if(log.isTraceEnabled()) { log.trace("Sending bucket versions to [{}]", remoteRef); } futureReply.map(getMapperToSendLocalStatus(remoteRef), getContext().dispatcher()); }
public Master(FiniteDuration workTimeout) { this.workTimeout = workTimeout; ClusterClientReceptionist.get(getContext().system()).registerService(getSelf()); this.cleanupTask = getContext().system().scheduler().schedule(workTimeout.div(2), workTimeout.div(2), getSelf(), CleanupTick, getContext().dispatcher(), getSelf()); }
@Override protected void onRecoveryComplete() { restoreFromSnapshot = null; //notify shard manager getContext().parent().tell(new ActorInitialized(), getSelf()); // Being paranoid here - this method should only be called once but just in case... if (txCommitTimeoutCheckSchedule == null) { // Schedule a message to be periodically sent to check if the current in-progress // transaction should be expired and aborted. FiniteDuration period = Duration.create(transactionCommitTimeout / 3, TimeUnit.MILLISECONDS); txCommitTimeoutCheckSchedule = getContext().system().scheduler().schedule( period, period, getSelf(), TX_COMMIT_TIMEOUT_CHECK_MESSAGE, getContext().dispatcher(), ActorRef.noSender()); } }
public Worker(ActorRef clusterClient, Props workExecutorProps, FiniteDuration registerInterval) { this.clusterClient = clusterClient; this.workExecutor = getContext().watch(getContext().actorOf(workExecutorProps, "exec")); this.registerTask = getContext().system().scheduler().schedule(Duration.Zero(), registerInterval, clusterClient, new SendToAll("/user/master/singleton", new RegisterWorker(workerId)), getContext().dispatcher(), getSelf()); }