static FailureXSiteConsumer replaceOn(Cache<?, ?> cache) { return wrapComponent(cache, XSiteStateConsumer.class, (wrapOn, current) -> new FailureXSiteConsumer(current), true); }
@Override protected void createCacheManagers() throws Throwable { ConfigurationBuilder builder = getDefaultClusteredCacheConfig(CacheMode.SCATTERED_SYNC, false); builder.clustering().biasAcquisition(BiasAcquisition.ON_WRITE).remoteTimeout(1000); createCluster(builder, 3); TestingUtil.wrapComponent(cache(0), RpcManager.class, rpcManager -> rpcManager0 = new FailingRpcManager(rpcManager)); cache(1); // just touch to start it TestingUtil.wrapComponent(cache(2), RpcManager.class, rpcManager -> rpcManager2 = new CountingRpcManager(rpcManager)); }
private LoggingRpcManager rpcManager() { RpcManager rpcManager = extractComponent(cache(0, cacheName()), RpcManager.class); if (rpcManager instanceof LoggingRpcManager) { return (LoggingRpcManager) rpcManager; } return wrapComponent(cache(0, cacheName()), RpcManager.class, LoggingRpcManager::new); }
TestingUtil.wrapComponent(c, RpcManager.class, original -> new AbstractDelegatingRpcManager(original) { @Override protected <T> CompletionStage<T> performRequest(Collection<Address> targets, ReplicableCommand command,
@Override protected void createCacheManagers() throws Throwable { createCluster(getDefaultClusteredCacheConfig(CacheMode.DIST_SYNC), 2); FunctionalMapImpl<Object, Object> fmap0 = FunctionalMapImpl.create(cache(0).getAdvancedCache()); FunctionalMapImpl<Object, Object> fmap1 = FunctionalMapImpl.create(cache(1).getAdvancedCache()); rw0 = ReadWriteMapImpl.create(fmap0); rw1 = ReadWriteMapImpl.create(fmap1); wo0 = WriteOnlyMapImpl.create(fmap0); wo1 = WriteOnlyMapImpl.create(fmap1); key = new MagicKey(cache(0)); rpcManager0 = TestingUtil.wrapComponent(cache(0), RpcManager.class, CountingRpcManager::new); rpcManager1 = TestingUtil.wrapComponent(cache(1), RpcManager.class, CountingRpcManager::new); }
wrapComponent(cache(LON, testCaches.removeIndex), XSiteStateProvider.class, (WrapFactory<XSiteStateProvider, XSiteStateProvider, Cache<?, ?>>) (wrapOn, current) -> new XSiteProviderDelegator(current) { @Override } else { log.debugf("Block x-site state transfer start command in cache %s", addressOf(cache(LON, 1))); wrapComponent(cache(LON, 1), XSiteStateProvider.class, (WrapFactory<XSiteStateProvider, XSiteStateProvider, Cache<?, ?>>) (wrapOn, current) -> new XSiteProviderDelegator(current) { @Override
TestingUtil.wrapComponent(cache(0), StateTransferLock.class, stl -> new UnblockingStateTransferLock(stl, currentTopology + 1, handle::stopBlocking));
caches().forEach(c -> TestingUtil.wrapComponent(c, RpcManager.class, original -> new AbstractDelegatingRpcManager(original) { @Override protected <T> CompletionStage<T> performRequest(Collection<Address> targets, ReplicableCommand command,
private void testBeforeTopology(BiFunction<FunctionalMap.ReadWriteMap<String, Integer>, String, Integer> op, int expectedIncrement) throws Exception { cache(0).put("key", 1); // Blocking on receiver side. We cannot block the StateResponseCommand on the server side since // the InternalCacheEntries in its state are the same instances of data stored in DataContainer // - therefore when the command is blocked on sender the command itself would be mutated by applying // the transaction below. BlockingStateConsumer bsc2 = TestingUtil.wrapComponent(cache(2), StateConsumer.class, BlockingStateConsumer::new); tm(2).begin(); FunctionalMap.ReadWriteMap<String, Integer> rw = ReadWriteMapImpl.create( FunctionalMapImpl.create(this.<String, Integer>cache(2).getAdvancedCache())); assertEquals(new Integer(1), op.apply(rw, "key")); Transaction tx = tm(2).suspend(); chf.setOwnerIndexes(0, 2); Future<?> future = fork(() -> { TestResourceTracker.testThreadStarted(this); addClusterEnabledCacheManager(cb).getCache(); }); bsc2.await(); DistributionInfo distributionInfo = cache(2).getAdvancedCache().getDistributionManager().getCacheTopology().getDistribution("key"); assertFalse(distributionInfo.isReadOwner()); assertTrue(distributionInfo.isWriteBackup()); tm(2).resume(tx); tm(2).commit(); bsc2.unblock(); future.get(10, TimeUnit.SECONDS); InternalCacheEntry<Object, Object> ice = cache(2).getAdvancedCache().getDataContainer().get("key"); assertEquals("Current ICE: " + ice, 1 + expectedIncrement, ice.getValue()); }
private void testAfterTopology(BiFunction<FunctionalMap.ReadWriteMap<String, Integer>, String, Integer> op, int expectedIncrement) throws Exception { cache(0).put("key", 1); // Blocking on receiver side. We cannot block the StateResponseCommand on the server side since // the InternalCacheEntries in its state are the same instances of data stored in DataContainer // - therefore when the command is blocked on sender the command itself would be mutated by applying // the transaction below. BlockingStateConsumer bsc2 = TestingUtil.wrapComponent(cache(2), StateConsumer.class, BlockingStateConsumer::new); chf.setOwnerIndexes(0, 2); Future<?> future = fork(() -> { TestResourceTracker.testThreadStarted(this); addClusterEnabledCacheManager(cb).getCache(); }); bsc2.await(); DistributionInfo distributionInfo = cache(2).getAdvancedCache().getDistributionManager().getCacheTopology().getDistribution("key"); assertFalse(distributionInfo.isReadOwner()); assertTrue(distributionInfo.isWriteBackup()); withTx(tm(2), () -> { FunctionalMap.ReadWriteMap<String, Integer> rw = ReadWriteMapImpl.create( FunctionalMapImpl.create(this.<String, Integer>cache(2).getAdvancedCache())); assertEquals(new Integer(1), op.apply(rw, "key")); return null; }); bsc2.unblock(); future.get(10, TimeUnit.SECONDS); InternalCacheEntry<Object, Object> ice = cache(2).getAdvancedCache().getDataContainer().get("key"); assertEquals("Current ICE: " + ice, 1 + expectedIncrement, ice.getValue()); }
public void testReplay() throws Exception { final Object key = new MagicKey("TxReplay3Test", cache(0)); final StateSequencer sequencer = new StateSequencer(); sequencer.logicalThread("tx1", TX1_LOCKED, TX1_UNSURE); sequencer.logicalThread("tx2", TX2_PENDING); sequencer.logicalThread("join", JOIN_NEW_NODE); sequencer.logicalThread("main", MAIN_ADVANCE); sequencer.order(TX1_LOCKED, MAIN_ADVANCE, TX2_PENDING, JOIN_NEW_NODE, TX1_UNSURE); wrapComponent(cache(1), RpcManager.class, (wrapOn, current) -> new UnsureResponseRpcManager(current, sequencer), true); Handler handler = wrapInboundInvocationHandler(cache(0), current -> new Handler(current, sequencer)); handler.setOrigin(address(cache(2))); Future<Void> tx1 = fork(() -> { cache(1).put(key, VALUE_1); return null; }); sequencer.advance(MAIN_ADVANCE); Future<Void> tx2 = fork(() -> { cache(2).put(key, VALUE_2); return null; }); sequencer.enter(JOIN_NEW_NODE); addClusterEnabledCacheManager(config()).getCache(); waitForClusterToForm(); sequencer.exit(JOIN_NEW_NODE); tx1.get(30, TimeUnit.SECONDS); tx2.get(30, TimeUnit.SECONDS); assertEquals(VALUE_2, cache(0).get(key)); }