/** * Mimics a partial flush between the current instance and the address to flush, by opening and closing the necessary * latches on both ends. * @param addressToFlush address to flush in addition to the current address * @param block if true, mimics setting a flush. Otherwise, mimics un-setting a flush. * @throws Exception if there are issues */ private void mimicPartialFlushViaRPC(Address addressToFlush, boolean block) throws Exception { StateTransferControlCommand cmd = commandsFactory.buildStateTransferControlCommand(block); Vector<Address> recipient = new Vector<Address>(); recipient.add(addressToFlush); if (!block) rpcManager.getFlushTracker().unblock(); rpcManager.callRemoteMethods(recipient, cmd, true, cfg.getStateRetrievalTimeout(), true); if (block) rpcManager.getFlushTracker().block(); }
public List<Address> getMembersOutsideBuddyGroup() { List<Address> members = new ArrayList<Address>(rpcManager.getMembers()); members.remove(rpcManager.getLocalAddress()); members.removeAll(getBuddyAddresses()); return members; }
/** * Cache started, check whether the node is the coordinator and set the singleton store cache loader's active * status. */ @CacheStarted public void cacheStarted(Event e) { localAddress = cache.getLocalAddress(); active = cache.getRPCManager().isCoordinator(); if (log.isDebugEnabled()) log.debug("cache started: " + this); }
public Address getLocalAddress() { if (rpcManager == null) return null; return rpcManager.getLocalAddress(); }
List<Address> mbrs = rpcManager.getMembers(); Boolean searchSubtrees = buddyManager.isDataGravitationSearchBackupTrees(); GravitateDataCommand command = commandsFactory.buildGravitateDataCommand(fqn, searchSubtrees); List resps = rpcManager.callRemoteMethods(null, command, GroupRequest.GET_ALL, buddyManager.getBuddyCommunicationTimeout(), new ResponseValidityFilter(rpcManager.getMembers().size()), false);
buddyGroup = new BuddyGroup(); buddyGroup.setDataOwner(cache.getLocalAddress()); Address localAddress = rpcManager.getLocalAddress(); if (localAddress == null) ((NextMemberBuddyLocator) buddyLocator).setChannel(rpcManager.getChannel());
private void makeRemoteCall(List<Address> recipients, ReplicableCommand call) throws Exception { // remove non-members from dest list if (recipients != null) { Iterator<Address> recipientsIt = recipients.iterator(); List<Address> members = cache.getMembers(); while (recipientsIt.hasNext()) { if (!members.contains(recipientsIt.next())) { recipientsIt.remove(); } } } rpcManager.callRemoteMethods(recipients == null ? null : new Vector<Address>(recipients), call, true, config.getBuddyCommunicationTimeout(), false); }
public Object perform(InvocationContext ctx) throws Throwable { if (enabled) rpcManager.getFlushTracker().block(); else rpcManager.getFlushTracker().unblock(); return null; }
public List<Address> getMembers() { if (rpcManager == null) return null; return rpcManager.getMembers(); }
mimicPartialFlushViaRPC(rpcManager.getLastStateTransferSource(), true); mimicPartialFlushViaRPC(rpcManager.getLastStateTransferSource(), false);
rpcManager.fetchPartialState(members, subtreeRoot.getFqn()); cache.getInvocationContext().getOptionOverrides().setCacheModeLocal(false); rpcManager.fetchPartialState(sources, fqn, subtreeRoot.getFqn());
@Override protected PrepareCommand buildPrepareCommand(GlobalTransaction gtx, List modifications, boolean onePhaseCommit) { // optimistic locking NEVER does one-phase prepares. return commandsFactory.buildOptimisticPrepareCommand(gtx, modifications, rpcManager.getLocalAddress(), false); }
/** * Flushes existing method calls. */ public void flush() { List<ReplicableCommand> toReplicate; synchronized (elements) { if (log.isTraceEnabled()) log.trace("flush(): flushing repl queue (num elements=" + elements.size() + ")"); toReplicate = new ArrayList<ReplicableCommand>(elements); elements.clear(); } if (toReplicate.size() > 0) { try { ReplicateCommand replicateCommand = commandsFactory.buildReplicateCommand(toReplicate); // send to all live nodes in the cluster rpcManager.callRemoteMethods(null, replicateCommand, false, configuration.getSyncReplTimeout(), false); } catch (Throwable t) { log.error("failed replicating " + toReplicate.size() + " elements in replication queue", t); } } } }
private void writeTxLog(ObjectOutputStream out) throws Exception FlushTracker flushTracker = rpcManager.getFlushTracker();
/** * If we are within one transaction we won't do any replication as replication would only be performed at commit time. * If the operation didn't originate locally we won't do any replication either. */ private Object handleCrudMethod(InvocationContext ctx, VisitableCommand command, boolean forceAsync) throws Throwable { boolean local = isLocalModeForced(ctx); if (local && ctx.getTransaction() == null) return invokeNextInterceptor(ctx, command); // FIRST pass this call up the chain. Only if it succeeds (no exceptions) locally do we attempt to replicate. Object returnValue = invokeNextInterceptor(ctx, command); if (ctx.getTransaction() == null && ctx.isOriginLocal()) { if (trace) { log.trace("invoking method " + command.getClass().getSimpleName() + ", members=" + rpcManager.getMembers() + ", mode=" + configuration.getCacheMode() + ", exclude_self=" + true + ", timeout=" + configuration.getSyncReplTimeout()); } replicateCall(ctx, command, !forceAsync && isSynchronous(ctx.getOptionOverrides()), ctx.getOptionOverrides()); } else { if (local) ctx.getTransactionContext().addLocalModification((WriteCommand) command); } return returnValue; }
protected void broadcastRollback(GlobalTransaction gtx, InvocationContext ctx) throws Throwable { boolean remoteCallSync = configuration.isSyncRollbackPhase(); if (rpcManager.getMembers() != null && rpcManager.getMembers().size() > 1) { // Broadcast rollback() to all other members (excluding myself) try { broadcastTxs.remove(gtx); RollbackCommand rollbackCommand = commandsFactory.buildRollbackCommand(gtx); if (log.isDebugEnabled()) log.debug("running remote rollback for " + gtx + " and coord=" + rpcManager.getLocalAddress()); replicateCall(ctx, rollbackCommand, remoteCallSync, ctx.getOptionOverrides()); } catch (Exception e) { log.error("Rollback failed", e); throw e; } } }
protected PrepareCommand buildPrepareCommand(GlobalTransaction gtx, List modifications, boolean onePhaseCommit) { return commandsFactory.buildPrepareCommand(gtx, modifications, rpcManager.getLocalAddress(), onePhaseCommit); }
protected void replicateCall(Vector<Address> recipients, ReplicableCommand call, boolean sync, boolean wrapCacheCommandInReplicateMethod, boolean useOutOfBandMessage, boolean isBroadcast, long timeout) throws Throwable { if (trace) log.trace("Broadcasting call " + call + " to recipient list " + recipients); if (!sync && replicationQueue != null && !usingBuddyReplication) { if (trace) log.trace("Putting call " + call + " on the replication queue."); replicationQueue.add(commandsFactory.buildReplicateCommand(call)); } else { if (usingBuddyReplication && !isBroadcast) call = buddyManager.transformFqns((VisitableCommand) call); Vector<Address> callRecipients = recipients; if (callRecipients == null) { callRecipients = usingBuddyReplication && !isBroadcast ? buddyManager.getBuddyAddressesAsVector() : null; if (trace) log.trace("Setting call recipients to " + callRecipients + " since the original list of recipients passed in is null."); } ReplicableCommand toCall = wrapCacheCommandInReplicateMethod ? commandsFactory.buildReplicateCommand(call) : call; List rsps = rpcManager.callRemoteMethods(callRecipients, toCall, sync, // is synchronised? timeout, useOutOfBandMessage ); if (trace) log.trace("responses=" + rsps); if (sync) checkResponses(rsps); } }
protected Object executeCommand(ReplicableCommand cmd, Message req) throws Throwable FlushTracker flushTracker = rpcManager.getFlushTracker();
/** * The cluster formation changed, so determine whether the current node stopped being the coordinator or became * the coordinator. This method can lead to an optional in memory to cache loader state push, if the current node * became the coordinator. This method will report any issues that could potentially arise from this push. */ @ViewChanged public void viewChange(ViewChangedEvent event) { boolean tmp = isCoordinator(event.getNewView()); if (active != tmp) { try { activeStatusChanged(tmp); } catch (PushStateException e) { log.error("exception reported changing nodes active status", e); } } } }