/** * Completes the transaction in the cache when the originator no longer belongs to the cache topology. */ private CompletableFuture<Void> completeWithRemoteCommand(AdvancedCache<?, ?> cache, RpcManager rpcManager, TxState state) throws Throwable { CommandsFactory commandsFactory = cache.getComponentRegistry().getCommandsFactory(); CacheRpcCommand command = buildRemoteCommand(cache.getCacheConfiguration(), commandsFactory, state); CompletableFuture<Void> remote = rpcManager .invokeCommandOnAll(command, validOnly(), rpcManager.getSyncRpcOptions()) .handle(handler()) .toCompletableFuture(); commandsFactory.initializeReplicableCommand(command, false); CompletableFuture<Void> local = command.invokeAsync().handle(handler()); return CompletableFuture.allOf(remote, local); }
@Override public <T> CompletionStage<T> invokeCommandOnAll(ReplicableCommand command, ResponseCollector<T> collector, RpcOptions rpcOptions) { long start = timeService.time(); CompletionStage<T> request = actual.invokeCommandOnAll(command, collector, rpcOptions); return request.thenApply(responseMap -> { updateStats(command, true, timeService.timeDuration(start, NANOSECONDS), actual.getTransport().getMembers()); return responseMap; }); }
/** * Commits a remote 1PC transaction that is already in MARK_COMMIT state */ public int onePhaseCommitRemoteTransaction(GlobalTransaction gtx, List<WriteCommand> modifications) { RpcManager rpcManager = cache.getRpcManager(); CommandsFactory factory = cache.getComponentRegistry().getCommandsFactory(); try { //only pessimistic tx are committed in 1PC and it doesn't use versions. PrepareCommand command = factory.buildPrepareCommand(gtx, modifications, true); CompletionStage<Void> cs = rpcManager.invokeCommandOnAll(command, validOnly(), rpcManager.getSyncRpcOptions()); factory.initializeReplicableCommand(command, false); command.invokeAsync().join(); cs.toCompletableFuture().join(); forgetTransaction(gtx, rpcManager, factory); return loggingCompleted(true) == Status.OK ? XAResource.XA_OK : XAException.XAER_RMERR; } catch (Throwable throwable) { //transaction should commit but we still can have exceptions (timeouts or similar) return XAException.XAER_RMERR; } }
@Override public final <T> CompletionStage<T> invokeCommandOnAll(ReplicableCommand command, ResponseCollector<T> collector, RpcOptions rpcOptions) { return performRequest(getTransport().getMembers(), command, collector, c -> realOne.invokeCommandOnAll(command, c, rpcOptions), rpcOptions); }
/** * Rollbacks a transaction that is remove in all the cluster members. */ public final void rollbackRemoteTransaction(GlobalTransaction gtx) { RpcManager rpcManager = cache.getRpcManager(); CommandsFactory factory = cache.getComponentRegistry().getCommandsFactory(); try { RollbackCommand rollbackCommand = factory.buildRollbackCommand(gtx); rollbackCommand.setTopologyId(rpcManager.getTopologyId()); CompletionStage<Void> cs = rpcManager .invokeCommandOnAll(rollbackCommand, validOnly(), rpcManager.getSyncRpcOptions()); factory.initializeReplicableCommand(rollbackCommand, false); rollbackCommand.invokeAsync().join(); cs.toCompletableFuture().join(); } catch (Throwable throwable) { throw Util.rewrapAsCacheException(throwable); } finally { forgetTransaction(gtx, rpcManager, factory); } }
public Object invokeRemotelyAsync(List<Address> finalOwners, InvocationContext rCtx, WriteCommand writeCmd) { if (rCtx.isOriginLocal() && writeCmd.isSuccessful()) { // This is called with the entry locked. In order to avoid deadlocks we must not wait for RPC while // holding the lock, therefore we'll return a future and wait for it in LockingInterceptor after // unlocking (and committing) the entry. if (isSynchronous(writeCmd)) { if (finalOwners != null) { return rpcManager.invokeCommand( finalOwners, writeCmd, MapResponseCollector.ignoreLeavers(finalOwners.size()), rpcManager.getSyncRpcOptions()); } else { return rpcManager.invokeCommandOnAll(writeCmd, MapResponseCollector.ignoreLeavers(), rpcManager.getSyncRpcOptions()); } } else { rpcManager.sendToMany(finalOwners, writeCmd, DeliverOrder.NONE); } } return null; } }
private Object broadcastClearIfNotLocal(InvocationContext rCtx, VisitableCommand rCommand, Object rv) { FlagAffectedCommand flagCmd = (FlagAffectedCommand) rCommand; if ( !isLocalModeForced( flagCmd ) ) { // just broadcast the clear command - this is simplest! if ( rCtx.isOriginLocal() ) { ((TopologyAffectedCommand) rCommand).setTopologyId(rpcManager.getTopologyId()); if (isSynchronous(flagCmd)) { // the result value will be ignored, we don't need to propagate rv return asyncValue(rpcManager.invokeCommandOnAll(rCommand, VoidResponseCollector.ignoreLeavers(), syncRpcOptions)); } else { rpcManager.sendToAll(rCommand, DeliverOrder.NONE); } } } return rv; }
@Override public <T> CompletionStage<T> invokeCommandOnAll(ReplicableCommand command, ResponseCollector<T> collector, RpcOptions rpcOptions) { rpcCollector.addRPC(new RpcDetail(getAddress(), command, cacheName, delegate.getTransport().getMembers())); return delegate.invokeCommandOnAll(command, collector, rpcOptions); }
@Override public Object visitClearCommand(InvocationContext ctx, ClearCommand command) { Object retval = invokeNext(ctx, command); if (!isLocalModeForced(command)) { // just broadcast the clear command - this is simplest! if (ctx.isOriginLocal()) { command.setTopologyId(rpcManager.getTopologyId()); if (isSynchronous(command)) { return asyncValue(rpcManager.invokeCommandOnAll(command, VoidResponseCollector.ignoreLeavers(), syncRpcOptions)); } else { rpcManager.sendToAll(command, DeliverOrder.NONE); } } } return retval; }
private void rollbackRemote(ComponentRegistry cr, CacheXid cacheXid, TxState state) { RollbackCommand rpcCommand = cr.getCommandsFactory().buildRollbackCommand(state.getGlobalTransaction()); RpcManager rpcManager = cr.getComponent(RpcManager.class); rpcCommand.setTopologyId(rpcManager.getTopologyId()); rpcManager.invokeCommandOnAll(rpcCommand, VoidResponseCollector.validOnly(), rpcManager.getSyncRpcOptions()) .thenRun(() -> { //ignore exception so the rollback can be retried. //if a node doesn't find the remote transaction, it returns null. TxFunction function = new SetCompletedTransactionFunction(false); rwMap.eval(cacheXid, function); }); }
@Override public Object visitLockControlCommand(TxInvocationContext ctx, LockControlCommand command) { Object retVal = invokeNext( ctx, command ); if ( ctx.isOriginLocal() ) { //unlock will happen async as it is a best effort boolean sync = !command.isUnlock(); List<Address> members = getMembers(); ( (LocalTxInvocationContext) ctx ).remoteLocksAcquired(members); command.setTopologyId(rpcManager.getTopologyId()); if (sync) { return asyncValue(rpcManager.invokeCommandOnAll(command, VoidResponseCollector.ignoreLeavers(), syncRpcOptions)); } else { rpcManager.sendToAll(command, DeliverOrder.NONE); } } return retVal; }
private <T extends WriteCommand & RemoteLockCommand> CompletableFuture<?> invalidateAcrossCluster( T command, boolean isTransactional, Object key, Object keyLockOwner) { // increment invalidations counter if statistics maintained incrementInvalidations(); InvalidateCommand invalidateCommand; if (!isLocalModeForced(command)) { if (isTransactional) { invalidateCommand = commandInitializer.buildBeginInvalidationCommand( EnumUtil.EMPTY_BIT_SET, new Object[] { key }, keyLockOwner); } else { invalidateCommand = commandsFactory.buildInvalidateCommand(EnumUtil.EMPTY_BIT_SET, new Object[] {key }); } invalidateCommand.setTopologyId(rpcManager.getTopologyId()); if (log.isDebugEnabled()) { log.debug("Cache [" + rpcManager.getAddress() + "] replicating " + invalidateCommand); } if (isSynchronous(command)) { return rpcManager.invokeCommandOnAll(invalidateCommand, VoidResponseCollector.ignoreLeavers(), syncRpcOptions) .toCompletableFuture(); } else { rpcManager.sendToAll(invalidateCommand, DeliverOrder.NONE); } } return null; }
private CompletionStage<Void> invalidateAcrossCluster(boolean synchronous, Object[] keys, InvocationContext ctx) { // increment invalidations counter if statistics maintained incrementInvalidations(); final InvalidateCommand invalidateCommand = commandsFactory.buildInvalidateCommand( EnumUtil.EMPTY_BIT_SET, keys ); if ( log.isDebugEnabled() ) { log.debug( "Cache [" + rpcManager.getAddress() + "] replicating " + invalidateCommand ); } TopologyAffectedCommand command = invalidateCommand; if ( ctx.isInTxScope() ) { TxInvocationContext txCtx = (TxInvocationContext) ctx; // A Prepare command containing the invalidation command in its 'modifications' list is sent to the remote nodes // so that the invalidation is executed in the same transaction and locks can be acquired and released properly. // This is 1PC on purpose, as an optimisation, even if the current TX is 2PC. // If the cache uses 2PC it's possible that the remotes will commit the invalidation and the originator rolls back, // but this does not impact consistency and the speed benefit is worth it. command = commandsFactory.buildPrepareCommand( txCtx.getGlobalTransaction(), Collections.<WriteCommand>singletonList( invalidateCommand ), true ); } command.setTopologyId(rpcManager.getTopologyId()); if (synchronous) { return rpcManager.invokeCommandOnAll(command, VoidResponseCollector.ignoreLeavers(), syncRpcOptions); } else { rpcManager.sendToAll(command, DeliverOrder.NONE); } return null; } }
public void testInvokeCommandOnAll() throws Exception { ClusteredGetCommand command = TestingUtil.extractCommandsFactory(cache(0)).buildClusteredGetCommand("key", 0, 0L); RpcManager rpcManager0 = cache(0).getAdvancedCache().getRpcManager(); Exceptions.expectException(IllegalArgumentException.class, () -> rpcManager0.invokeCommandOnAll(command, SingleResponseCollector.validOnly(), rpcManager0.getSyncRpcOptions())); command.setTopologyId(rpcManager0.getTopologyId()); CompletionStage<Map<Address, Response>> stage1 = rpcManager0.invokeCommandOnAll(command, MapResponseCollector.validOnly(), rpcManager0.getSyncRpcOptions()); assertResponse(makeMap(address(1), SUCCESSFUL_EMPTY_RESPONSE, address(2), SUCCESSFUL_EMPTY_RESPONSE), stage1); }
rpcManager.invokeCommandOnAll(rpcCommand, VoidResponseCollector.validOnly(), rpcManager.getSyncRpcOptions()) .handle((aVoid, throwable) -> {
public void testInvokeCommandOnAllSuspect() throws Exception { DistributionManager distributionManager = cache(0).getAdvancedCache().getDistributionManager(); CacheTopology initialTopology = distributionManager.getCacheTopology(); assertEquals(CacheTopology.Phase.NO_REBALANCE, initialTopology.getPhase()); try { ClusteredGetCommand command = TestingUtil.extractCommandsFactory(cache(0)).buildClusteredGetCommand("key", 0, 0L); RpcManager rpcManager0 = cache(0).getAdvancedCache().getRpcManager(); // Add a node to the cache topology, but not to the JGroups cluster view List<Address> newMembers = new ArrayList<>(initialTopology.getMembers()); newMembers.add(SUSPECT); ConsistentHash newCH = new ReplicatedConsistentHashFactory().create(MurmurHash3.getInstance(), 1, 1, newMembers, null); CacheTopology suspectTopology = new CacheTopology(initialTopology.getTopologyId(), initialTopology.getRebalanceId(), newCH, null, null, CacheTopology.Phase.NO_REBALANCE, newCH.getMembers(), null); distributionManager.setCacheTopology(suspectTopology); command.setTopologyId(rpcManager0.getTopologyId()); CompletionStage<Map<Address, Response>> stage1 = rpcManager0.invokeCommandOnAll(command, MapResponseCollector.validOnly(), rpcManager0.getSyncRpcOptions()); Exceptions.expectExecutionException(SuspectException.class, stage1.toCompletableFuture()); } finally { distributionManager.setCacheTopology(initialTopology); } }