@Override public TXManagerImpl getTxManager() { return delegate.getTxManager(); }
TXManagerImpl getTXManagerImpl(InternalCache cache) { return cache.getTxManager(); }
TXManagerImpl getTXManager(InternalCache cache) { return cache.getTxManager(); }
/** * @return the node which a transaction is already is progress, null otherwise */ private DistributedMember getTransactionalNode() { if (cache.getTxManager().getTXState() != null) { return cache.getTxManager().getTXState().getTarget(); } return null; }
private void setIfTransactionDistributed(ReplyProcessor21 processor) { if (processor != null) { DistributionManager distributionManager = processor.getDistributionManager(); if (distributionManager != null) { InternalCache cache = distributionManager.getCache(); if (cache != null && cache.getTxManager() != null) { this.isTransactionDistributed = cache.getTxManager().isDistributed(); } } } } }
@Override public void validateExecution(Function function, Set targetMembers) { InternalCache cache = GemFireCacheImpl.getInstance(); if (cache != null && cache.getTxManager().getTXState() != null) { TXStateProxyImpl tx = (TXStateProxyImpl) cache.getTxManager().getTXState(); tx.getRealDeal(null, region); tx.incOperationCount(); } }
public TXEntryState createReadEntry(LocalRegion r, Object entryKey, RegionEntry re, Object vId, Object pendingValue) { InternalCache cache = r.getCache(); boolean isDistributed = false; if (cache.getTxManager().getTXState() != null) { isDistributed = cache.getTxManager().getTXState().isDistTx(); } else { // TXCoordinator and datanode are same isDistributed = cache.getTxManager().isDistributed(); } TXEntryState result = cache.getTXEntryStateFactory().createEntry(re, vId, pendingValue, entryKey, this, isDistributed); this.entryMods.put(entryKey, result); return result; }
@Before public void setup() { initMocks(this); when(cache.getDistributedSystem()).thenReturn(distributedSystem); when(distributedSystem.getDistributionManager()).thenReturn(distributionManager); when(region.getCache()).thenReturn(cache); when(region.getDistributionManager()).thenReturn(distributionManager); when(txStateProxy.isInProgress()).thenReturn(true); originalTxManager = TXManagerImpl.getCurrentInstanceForTest(); // The constructor sets the new tx manager as currentInstance txManager = spy(new TXManagerImpl(mock(CachePerfStats.class), cache)); txManager.setTXState(txStateProxy); txManager.setDistributed(false); when(cache.getTxManager()).thenReturn(txManager); }
@Test public void commitReleasesServerAffinityAfterCommit() { TXCommitMessage txCommitMessage = mock(TXCommitMessage.class); TXManagerImpl txManager = mock(TXManagerImpl.class); when(cache.getTxManager()).thenReturn(txManager); when(serverRegionProxy.commit(anyInt())).thenReturn(txCommitMessage); doNothing().when(cancelCriterion).checkCancelInProgress(null); doNothing().when(txManager).setTXState(null); ClientTXStateStub stub = spy(new ClientTXStateStub(cache, dm, stateProxy, target, region)); InOrder order = inOrder(serverRegionProxy, internalPool, cancelCriterion); stub.commit(); order.verify(serverRegionProxy).commit(anyInt()); order.verify(internalPool).releaseServerAffinity(); order.verify(cancelCriterion).checkCancelInProgress(null); }
@Test public void commitReleasesServerAffinity_whenCommitThrowsAnException() { TXManagerImpl txManager = mock(TXManagerImpl.class); when(cache.getTxManager()).thenReturn(txManager); when(serverRegionProxy.commit(anyInt())).thenThrow(new InternalGemFireError()); doNothing().when(cancelCriterion).checkCancelInProgress(null); doNothing().when(txManager).setTXState(null); ClientTXStateStub stub = spy(new ClientTXStateStub(cache, dm, stateProxy, target, region)); InOrder order = inOrder(serverRegionProxy, internalPool); assertThatThrownBy(() -> stub.commit()).isInstanceOf(InternalGemFireError.class); order.verify(serverRegionProxy).commit(anyInt()); order.verify(internalPool).releaseServerAffinity(); } }
public static UpdateEntryVersionResponse send(InternalDistributedMember recipient, PartitionedRegion r, EntryEventImpl event) throws ForceReattemptException { Set recipients = Collections.singleton(recipient); UpdateEntryVersionResponse p = new UpdateEntryVersionResponse(r.getSystem(), recipient, event.getKey()); PRUpdateEntryVersionMessage m = new PRUpdateEntryVersionMessage(recipients, r.getPRId(), p, event); m.setTransactionDistributed(r.getCache().getTxManager().isDistributed()); Set failures = r.getDistributionManager().putOutgoing(m); if (failures != null && failures.size() > 0) { throw new ForceReattemptException( String.format("Failed sending < %s >.", m)); } return p; } }
/** perform local cache modifications using the server's TXCommitMessage */ private void afterServerCommit(TXCommitMessage txcm) { if (this.internalAfterSendCommit != null) { this.internalAfterSendCommit.run(); } if (cache == null) { // we can probably delete this block because cache is now a final var // fixes bug 42933 return; } cache.getCancelCriterion().checkCancelInProgress(null); txcm.setDM(dm); txcm.setAckRequired(false); txcm.setDisableListeners(true); cache.getTxManager().setTXState(null); txcm.hookupRegions(dm); txcm.basicProcess(); }
/** * Sends a BucketBackupMessage requesting that another VM backup an existing bucket * * @param recipients the member that the contains keys/value message is sent to * @param r the PartitionedRegion that contains the bucket */ public static void send(Set recipients, PartitionedRegion r, int bucketId) { Assert.assertTrue(recipients != null, "BucketBackupMessage NULL sender list"); BucketBackupMessage m = new BucketBackupMessage(recipients, r.getPRId(), bucketId); m.setTransactionDistributed(r.getCache().getTxManager().isDistributed()); r.getDistributionManager().putOutgoing(m); }
public static PartitionResponse send(Set recipients, PartitionedRegion r, final boolean validateOnly, final boolean onlyBuckets) { PartitionResponse p = new PartitionResponse(r.getSystem(), recipients); DumpBucketsMessage m = new DumpBucketsMessage(recipients, r.getPRId(), p, validateOnly, onlyBuckets); m.setTransactionDistributed(r.getCache().getTxManager().isDistributed()); r.getDistributionManager().putOutgoing(m); return p; }
public static DumpB2NResponse send(Set recipients, PartitionedRegion r, int bId, boolean justPrimaryInfo) { DumpB2NResponse p = new DumpB2NResponse(r.getSystem(), recipients); DumpB2NRegion m = new DumpB2NRegion(recipients, r.getPRId(), p, bId, justPrimaryInfo); m.setTransactionDistributed(r.getCache().getTxManager().isDistributed()); r.getDistributionManager().putOutgoing(m); return p; }
public static PartitionResponse send(Set recipients, PartitionedRegion r) { PartitionResponse p = new PartitionResponse(r.getSystem(), recipients); DumpAllPRConfigMessage m = new DumpAllPRConfigMessage(recipients, r.getPRId(), p); m.setTransactionDistributed(r.getCache().getTxManager().isDistributed()); r.getDistributionManager().putOutgoing(m); return p; }
public static ReplyProcessor21 send(Set recipients, PartitionedRegion r, RegionEventImpl event) { ReplyProcessor21 response = new ReplyProcessor21(r.getSystem(), recipients); InvalidatePartitionedRegionMessage msg = new InvalidatePartitionedRegionMessage(recipients, event.getCallbackArgument(), r, response); msg.setTransactionDistributed(r.getCache().getTxManager().isDistributed()); r.getSystem().getDistributionManager().putOutgoing(msg); return response; }
/** * sends a message to the given recipients asking for the size of either their primary bucket * entries or the values sets of their primary buckets * * @param recipients recipients of the message * @param r the local PartitionedRegion instance * @param bucketIds the buckets to look for, or null for all buckets */ public static SizeResponse send(Set recipients, PartitionedRegion r, ArrayList<Integer> bucketIds, boolean estimate) { Assert.assertTrue(recipients != null, "SizeMessage NULL recipients set"); SizeResponse p = new SizeResponse(r.getSystem(), recipients); SizeMessage m = new SizeMessage(recipients, r.getPRId(), p, bucketIds, estimate); m.setTransactionDistributed(r.getCache().getTxManager().isDistributed()); r.getDistributionManager().putOutgoing(m); return p; }
/** * Sends a message to make the recipient primary for the bucket. * * * @param newPrimary the member to to become primary * @param pr the PartitionedRegion of the bucket * @param bid the bucket to become primary for */ public static void send(Collection<InternalDistributedMember> acceptedMembers, InternalDistributedMember newPrimary, PartitionedRegion pr, int bid) { Assert.assertTrue(newPrimary != null, "VolunteerPrimaryBucketMessage NULL recipient"); ReplyProcessor21 response = new ReplyProcessor21(pr.getSystem(), acceptedMembers); EndBucketCreationMessage msg = new EndBucketCreationMessage(acceptedMembers, pr.getPRId(), response, bid, newPrimary); msg.setTransactionDistributed(pr.getCache().getTxManager().isDistributed()); pr.getDistributionManager().putOutgoing(msg); }
/** * Sends a DestroyRegionOnDataStoreMessage requesting that another VM destroy an existing region * */ public static void send(InternalDistributedMember recipient, PartitionedRegion r, Object callbackArg) { DistributionManager dm = r.getDistributionManager(); ReplyProcessor21 rp = new ReplyProcessor21(dm, recipient); int procId = rp.getProcessorId(); DestroyRegionOnDataStoreMessage m = new DestroyRegionOnDataStoreMessage(recipient, r.getPRId(), rp, callbackArg); m.setTransactionDistributed(r.getCache().getTxManager().isDistributed()); r.getDistributionManager().putOutgoing(m); rp.waitForRepliesUninterruptibly(); }