Future<Void> f = fork(() -> rw.eval(key, entry -> entry.set(entry.find().orElse(0) + 1)).join());
data.put("key1", "newValue1"); data.put("key2", "newValue2"); Traversable<String> previousValues = readWriteMap.evalMany(data, (v, readWriteView) -> { String prev = readWriteView.find().orElse(null); readWriteView.set(v, new MetaLifespan(Duration.ofHours(1).toMillis()));
<K> void doReadWriteToRemoveAllAndReturnPrevs(Supplier<K> keySupplier, WriteOnlyMap<K, String> map1, ReadWriteMap<K, String> map2) { K key1 = keySupplier.get(), key2 = keySupplier.get(), key3 = keySupplier.get(); Map<K, String> data = new HashMap<>(); data.put(key1, "one"); data.put(key2, "two"); data.put(key3, "three"); await(map1.evalMany(data, setValueConsumer())); Traversable<String> prevTraversable = map2.evalAll(removeReturnPrevOrNull()); Set<String> prevValues = prevTraversable.collect(HashSet::new, HashSet::add, HashSet::addAll); assertEquals(new HashSet<>(data.values()), prevValues); }
rw.eval(key, view -> view.set(view.get() + "+2")).join(); assertEquals("v1+2", backup.get(key)); rw.evalMany(Util.asSet(keys), view -> view.set(view.get() + "+3")) .forEach(ret -> assertEquals(null, ret)); for (Object key : keys) { rw.evalMany(Util.asSet(keys), view -> view.find().orElse("none")) .forEach(ret -> assertEquals("none", ret)); for (Object key : keys) { TransactionManager tm = cache(site, 0).getAdvancedCache().getTransactionManager(); tm.begin(); rw.eval(keys[0], "v4", MarshallableFunctions.setValueReturnPrevOrNull()).join(); rw.evalMany(Util.asSet(keys[1], keys[2]), view -> view.find().orElse("none")) .forEach(ret -> assertEquals("none", ret)); tm.commit();
CompletableFuture<Void> future = putFromLoadMap.eval(key, new VersionedEntry(value, version, txTimestamp)); assert future.isDone(); // async try-locking should be done immediately return true;
rwMap.eval(cacheXid, function); return null; });
data.put("key1", "newValue1"); data.put("key2", "newValue2"); Traversable<String> previousValues = readWriteMap.evalMany(data, (v, readWriteView) -> { String prev = readWriteView.find().orElse(null); readWriteView.set(v, new MetaLifespan(Duration.ofHours(1).toMillis()));
List<CountDownLatch> latches = new ArrayList<>(); latches.addAll(Arrays.asList(new CountDownLatch(1), new CountDownLatch(1), new CountDownLatch(1))); AutoCloseable onCreate = rwMap.listeners().onCreate(created -> { assertEquals("created", created.get()); latches.get(0).countDown(); }); AutoCloseable onModify = rwMap.listeners().onModify((before, after) -> { assertEquals("created", before.get()); assertEquals("modified", after.get()); latches.get(1).countDown(); }); AutoCloseable onRemove = rwMap.listeners().onRemove(removed -> { assertEquals("modified", removed.get()); latches.get(2).countDown(); awaitEventIfOwner(isOwner, rwMap.eval(key2, "created", setValueReturnPrevOrNull()), latches.get(0)); awaitEventIfOwner(isOwner, rwMap.eval(key2, "modified", setValueReturnPrevOrNull()), latches.get(1)); awaitEventIfOwner(isOwner, rwMap.eval(key2, removeReturnPrevOrNull()), latches.get(2)); awaitEventIfOwner(isOwner, rwMap.eval(key3, new SetConstantOnReadWrite<>("created")), latches.get(0)); awaitEventIfOwner(isOwner, rwMap.eval(key3, new SetConstantOnReadWrite<>("modified")), latches.get(1)); awaitEventIfOwner(isOwner, rwMap.eval(key3, removeReturnPrevOrNull()), latches.get(2)); awaitNoEvent(rwMap.eval(key5, "cuatro", setValueReturnPrevOrNull()), latches.get(0)); awaitNoEvent(rwMap.eval(key5, "four", setValueReturnPrevOrNull()), latches.get(1)); awaitNoEvent(rwMap.eval(key5, removeReturnPrevOrNull()), latches.get(2));
tm.begin(); try { rw1.eval(new UserType<>(1, "key"), view -> { assertFalse(view.find().isPresent()); view.set(new UserType<>(1, "value")); return null; }); rw2.eval(new UserType<>(2, "key"), view -> { UserType<String> value = view.find().orElseThrow(() -> new AssertionError()); assertEquals(2, value.type);
assertEquals("a", rw.eval(KEY, append("b")).join()); assertEquals("ab", cache.get(KEY)); assertEquals("ab", rw.evalMany(Collections.singleton(KEY), append("c")).findAny().get()); assertEquals("abc", cache.put(KEY, "abcd")); tm.commit(); assertEquals("y", rw.eval(KEY, "z", MarshallableFunctions.setValueReturnPrevOrNull()).join()); assertTrue(cache.replace(KEY, "z", "a")); tm.commit(); assertEquals("a", rw.eval(KEY, append("b")).join()); assertEquals("ab", cache.getAll(Collections.singleton(KEY)).get(KEY)); tm.commit();
private void markTx(XidImpl xid, boolean commit, CacheNameCollector collector) { if (trace) { log.tracef("[%s] Set Transaction Decision to %s", xid, commit ? "Commit" : "Rollback"); } final List<CacheXid> cacheXids = getKeys(xid); if (trace) { log.tracef("[%s] Fetched CacheXids=%s", xid, cacheXids); } final int size = cacheXids.size(); if (size == 0) { collector.noTransactionFound(); return; } collector.expectedSize(size); SetDecisionFunction function = new SetDecisionFunction(commit); for (CacheXid cacheXid : cacheXids) { rwMap.eval(cacheXid, function).handle((statusValue, throwable) -> { Status status; if (throwable == null) { status = Status.valueOf(statusValue); } else { status = Status.ERROR; } collector.addCache(cacheXid.getCacheName(), status); return null; }); } } }
FunctionalMap.ReadWriteMap<Object, Object> rwMap = ReadWriteMapImpl.create(FunctionalMapImpl.create(c.getAdvancedCache())); try { assertEquals("value" + e.getKey(), rwMap.eval(e.getKey(), view -> { Object prev = view.get(); view.set(prev + "-other");
@Override public boolean putFromLoad(Object session, Object key, Object value, long txTimestamp, Object version, boolean minimalPutOverride) throws CacheException { long lastRegionInvalidation = region.getLastRegionInvalidation(); if (txTimestamp < lastRegionInvalidation) { log.tracef("putFromLoad not executed since tx started at %d, before last region invalidation finished = %d", txTimestamp, lastRegionInvalidation); return false; } if (minimalPutOverride) { Object prev = cache.get(key); if (prev instanceof Tombstone) { Tombstone tombstone = (Tombstone) prev; long lastTimestamp = tombstone.getLastTimestamp(); if (txTimestamp <= lastTimestamp) { log.tracef("putFromLoad not executed since tx started at %d, before last invalidation finished = %d", txTimestamp, lastTimestamp); return false; } } else if (prev != null) { log.tracef("putFromLoad not executed since cache contains %s", prev); return false; } } // we can't use putForExternalRead since the PFER flag means that entry is not wrapped into context // when it is present in the container. TombstoneCallInterceptor will deal with this. CompletableFuture<Void> future = putFromLoadMap.eval(key, new TombstoneUpdate<>(SESSION_ACCESS.getTimestamp(session), value)); assert future.isDone(); // async try-locking should be done immediately return true; }
public void testUnsupportedCommands() { Cache<String, String> cache0 = cache(0); assertTrue(cache0.isEmpty()); try { cache0.compute("test.proto", (k, v) -> "import \"missing.proto\";"); } catch (Exception e) { assertTrue(e instanceof CacheException); assertTrue(e.getMessage().contains("ISPN028014")); } assertTrue(cache0.isEmpty()); try { FunctionalMap.ReadWriteMap<String, String> rwMap = ReadWriteMapImpl.create(FunctionalMapImpl.create(cache0.getAdvancedCache())); rwMap.eval("test.proto", "val", (value, view) -> { view.set(value); return "ret"; }).join(); } catch (CompletionException e) { assertTrue(e.getCause() instanceof CacheException); assertTrue(e.getCause().getMessage().contains("ISPN028014")); } assertTrue(cache0.isEmpty()); assertNoTransactionsAndLocks(); }
private <K> void doUpdateSubsetAndReturnPrevs(Supplier<K> keySupplier, ReadOnlyMap<K, String> map1, WriteOnlyMap<K, String> map2, ReadWriteMap<K, String> map3) { K key1 = keySupplier.get(), key2 = keySupplier.get(), key3 = keySupplier.get(); Map<K, String> data = new HashMap<>(); data.put(key1, "one"); data.put(key2, "two"); data.put(key3, "three"); await(map2.evalMany(data, setValueConsumer())); Traversable<String> currentValues = map1.evalMany(data.keySet(), returnReadOnlyFindOrNull()); List<String> collectedValues = currentValues.collect(ArrayList::new, ArrayList::add, ArrayList::addAll); Collections.sort(collectedValues); List<String> dataValues = new ArrayList<>(data.values()); Collections.sort(dataValues); assertEquals(collectedValues, dataValues); Map<K, String> newData = new HashMap<>(); newData.put(key1, "bat"); newData.put(key2, "bi"); newData.put(key3, "hiru"); Traversable<String> prevTraversable = map3.evalMany(newData, setValueReturnPrevOrNull()); List<String> collectedPrev = prevTraversable.collect(ArrayList::new, ArrayList::add, ArrayList::addAll); Collections.sort(collectedPrev); assertEquals(dataValues, collectedPrev); Traversable<String> updatedValues = map1.evalMany(data.keySet(), returnReadOnlyFindOrNull()); List<String> collectedUpdates = updatedValues.collect(ArrayList::new, ArrayList::add, ArrayList::addAll); Collections.sort(collectedUpdates); List<String> newDataValues = new ArrayList<>(newData.values()); Collections.sort(newDataValues); assertEquals(newDataValues, collectedUpdates); }
@Override protected void invoke(boolean success) { // If the region was invalidated during this session, we can't know that the value we're inserting is valid // so we'll just null the tombstone if (sessionTimestamp < region.getLastRegionInvalidation()) { success = false; } // Exceptions in #afterCompletion() are silently ignored, since the transaction // is already committed in DB. However we must not return until we update the cache. FutureUpdate futureUpdate = new FutureUpdate(uuid, region.nextTimestamp(), success ? this.value : null); for (;;) { try { // We expect that when the transaction completes further reads from cache will return the updated value. // UnorderedDistributionInterceptor makes sure that the update is executed on the node first, and here // we're waiting for the local update. The remote update does not concern us - the cache is async and // we won't wait for that. rwMap.eval(key, futureUpdate).join(); return; } catch (Exception e) { log.failureInAfterCompletion(e); } } } }
public Status update(CacheXid key, TxFunction function, long timeoutMillis) { if (trace) { log.tracef("[%s] Updating with function: %s", key, function); } try { CompletableFuture<Byte> cf = rwMap.eval(key, function); Status status = Status.valueOf(cf.get(timeoutMillis, TimeUnit.MILLISECONDS)); if (trace) { log.tracef("[%s] Return value is %s", key, status); } return status; } catch (InterruptedException e) { if (trace) { log.tracef("[%s] Interrupted!", key); } Thread.currentThread().interrupt(); return Status.ERROR; } catch (ExecutionException | TimeoutException e) { if (trace) { log.tracef(e, "[%s] Error!", key); } return Status.ERROR; } }
public NonStrictAccessDelegate(InfinispanDataRegion region, Comparator versionComparator) { this.region = region; this.cache = region.getCache(); FunctionalMapImpl fmap = FunctionalMapImpl.create(cache).withParams(Param.PersistenceMode.SKIP_LOAD); this.writeMap = ReadWriteMapImpl.create(fmap); // Note that correct behaviour of local and async writes depends on LockingInterceptor (see there for details) this.putFromLoadMap = ReadWriteMapImpl.create(fmap).withParams(Param.LockingMode.TRY_LOCK, Param.ReplicationMode.ASYNC); Configuration configuration = cache.getCacheConfiguration(); if (configuration.clustering().cacheMode().isInvalidation()) { throw new IllegalArgumentException("Nonstrict-read-write mode cannot use invalidation."); } if (configuration.transaction().transactionMode().isTransactional()) { throw new IllegalArgumentException("Currently transactional caches are not supported."); } this.versionComparator = versionComparator; if (versionComparator == null) { throw new IllegalArgumentException("This strategy requires versioned entities/collections but region " + region.getName() + " contains non-versioned data!"); } }
public TombstoneAccessDelegate(InfinispanDataRegion region) { this.region = region; this.cache = region.getCache(); FunctionalMapImpl<Object, Object> fmap = FunctionalMapImpl.create(cache).withParams(Param.PersistenceMode.SKIP_LOAD); writeMap = ReadWriteMapImpl.create(fmap); // Note that correct behaviour of local and async writes depends on LockingInterceptor (see there for details) asyncWriteMap = ReadWriteMapImpl.create(fmap).withParams(Param.ReplicationMode.ASYNC); putFromLoadMap = ReadWriteMapImpl.create(fmap).withParams(Param.LockingMode.TRY_LOCK, Param.ReplicationMode.ASYNC); Configuration configuration = this.cache.getCacheConfiguration(); if (configuration.clustering().cacheMode().isInvalidation()) { throw new IllegalArgumentException("For tombstone-based caching, invalidation cache is not allowed."); } if (configuration.transaction().transactionMode().isTransactional()) { throw new IllegalArgumentException("Currently transactional caches are not supported."); } requiresTransaction = configuration.transaction().transactionMode().isTransactional() && !configuration.transaction().autoCommit(); }
@Override public CompletableFuture<Void> unlock() { if (trace) { log.tracef("LOCK[%s] unlock called from %s", getName(), originator); } CompletableFuture<Void> unlockRequest = new CompletableFuture<>(); readWriteMap.eval(lockKey, new UnlockFunction(originator)).whenComplete((unlockResult, ex) -> { if (ex == null) { if (trace) { log.tracef("LOCK[%s] Unlock result for %s is %b", getName(), originator, unlockResult); } unlockRequest.complete(null); } else { unlockRequest.completeExceptionally(handleException(ex)); } }); return unlockRequest; }