Collection<GridCacheMvccCandidate> locs; GridCacheEntryEx cached = entry.cached(); cached = entry.context().cache().entryEx(entry.key()); StringBuilder b = new StringBuilder(); b.append("TxEntry [cacheId=").append(bad.cacheId()). append(", key=").append(bad.key()). append(", queue=").append(qSize). append(", op=").append(bad.op()). append(", val=").append(bad.value()). append(", tx=").append(CU.txString(tx)). append(", other=["); continue; b.append(entry.toString()).append('\n');
if (old != null && old.filtersSet()) filter = old.filters(); old.addEntryProcessor(entryProcessor, invokeArgs); assert old.op() != TRANSFORM; old.op(op); old.value(val, op == CREATE || op == UPDATE || op == DELETE, op == READ); old.cached(entry); old.filters(filter); old.skipStore(skipStore); old.keepBinary(keepBinary); txEntry = new IgniteTxEntry(entry.context(), this, op, txEntry.conflictExpireTime(drExpireTime); txEntry.expiry(expiryPlc); txEntry.filtersSet(filtersSet); entry = entryEx(entry.context(), txEntry.txKey(), topologyVersion()); txEntry.cached(entry);
/** {@inheritDoc} */ @Override @Nullable protected IgniteInternalFuture<Boolean> addReader(long msgId, GridDhtCacheEntry cached, IgniteTxEntry entry, AffinityTopologyVersion topVer) { // Don't add local node as reader. if (entry.addReader() && !cctx.localNodeId().equals(nearNodeId)) { GridCacheContext cacheCtx = cached.context(); while (true) { try { return cached.addReader(nearNodeId, msgId, topVer); } catch (GridCacheEntryRemovedException ignore) { if (log.isDebugEnabled()) log.debug("Got removed entry when adding to DHT local transaction: " + cached); cached = cacheCtx.dht().entryExx(entry.key(), topVer); } } } return null; }
/** * @param key Key. * @param ttl TTL. * @param expireTime Expire time. * @return {@code true} if tx entry exists for this key, {@code false} otherwise. */ boolean entryTtlDr(IgniteTxKey key, long ttl, long expireTime) { assert key != null; assert ttl >= 0; IgniteTxEntry e = entry(key); if (e != null) { e.ttl(ttl); e.conflictExpireTime(expireTime); e.expiry(null); } return e != null; }
/** * @param key Key. * @param expiryPlc Expiry policy. */ void entryExpiry(IgniteTxKey key, @Nullable ExpiryPolicy expiryPlc) { assert key != null; IgniteTxEntry e = entry(key); if (e != null) { e.expiry(expiryPlc); e.conflictExpireTime(CU.EXPIRE_TIME_CALCULATE); } }
/** * Checks if this transaction needs previous value for the given tx entry. Will use passed in map to store * required key or will create new map if passed in map is {@code null}. * * @param e TX entry. * @param map Map with needed preload keys. * @return Map if it was created. */ private Map<Integer, Collection<KeyCacheObject>> checkNeedRebalanceKeys( IgniteTxEntry e, Map<Integer, Collection<KeyCacheObject>> map ) { if (retVal || !F.isEmpty(e.entryProcessors()) || !F.isEmpty(e.filters()) || e.entryReadVersion() != null) { if (map == null) map = new HashMap<>(); Collection<KeyCacheObject> keys = map.get(e.cacheId()); if (keys == null) { keys = new ArrayList<>(); map.put(e.cacheId(), keys); } keys.add(e.key()); } return map; }
GridCacheContext cacheCtx = txEntry.context(); UUID nodeId = txEntry.nodeId() == null ? this.nodeId : txEntry.nodeId(); GridCacheEntryEx cached = txEntry.cached(); boolean updateNearCache = updateNearCache(cacheCtx, txEntry.key(), topVer); if (!updateNearCache && cacheCtx.isNear() && txEntry.locallyMapped()) metrics = false; if (!F.isEmpty(txEntry.entryProcessors()) || !F.isEmpty(txEntry.filters())) txEntry.cached().unswap(false); if (txEntry.op() == CREATE || txEntry.op() == UPDATE || txEntry.op() == DELETE || txEntry.op() == TRANSFORM) dhtVer = txEntry.dhtVersion(); if ((txEntry.op() == CREATE || txEntry.op() == UPDATE) && txEntry.conflictExpireTime() == CU.EXPIRE_TIME_CALCULATE) { ExpiryPolicy expiry = cacheCtx.expiryForTxEntry(txEntry); txEntry.cached().unswap(false); txEntry.ttl(CU.toTtl(duration)); GridCacheVersion explicitVer = txEntry.conflictVersion() != null ? txEntry.conflictVersion() : writeVersion();
e.unmarshal(cctx, false, cctx.deploy().globalLoader()); checkInternal(e.txKey()); GridCacheContext cacheCtx = e.context(); IgniteTxEntry existing = entry(e.txKey()); existing.op(e.op()); // Absolutely must set operation, as default is DELETE. existing.value(e.value(), e.hasWriteValue(), e.hasReadValue()); existing.entryProcessors(e.entryProcessors()); existing.ttl(e.ttl()); existing.filters(e.filters()); existing.expiry(e.expiry()); existing.conflictExpireTime(e.conflictExpireTime()); existing.conflictVersion(e.conflictVersion()); GridDhtCacheEntry cached = dhtCache.entryExx(existing.key(), topologyVersion()); existing.cached(cached); GridCacheVersion explicit = existing.explicitVersion(); existing.explicitVersion(dhtVer); return addReader(msgId, dhtCache.entryExx(existing.key()), existing, topologyVersion());
IgniteTxEntry txEntry = tx.entry(writeEntry.txKey()); GridCacheContext cacheCtx = txEntry.context(); GridCacheEntryEx cached = txEntry.cached(); if ((txEntry.op() == CREATE || txEntry.op() == UPDATE) && txEntry.conflictExpireTime() == CU.EXPIRE_TIME_CALCULATE) { if (expiry != null) { cached.unswap(true); txEntry.ttl(CU.toTtl(duration)); boolean hasFilters = !F.isEmptyOrNulls(txEntry.filters()) && !F.isAlwaysTrue(txEntry.filters()); boolean readOld = hasFilters || retVal || txEntry.op() == DELETE || txEntry.op() == TRANSFORM || tx.nearOnOriginatingNode() || tx.hasInterceptor(); boolean readThrough = !txEntry.skipStore() && (txEntry.op() == TRANSFORM || ((retVal || hasFilters) && cacheCtx.config().isLoadPreviousValue())); boolean evt = retVal || txEntry.op() == TRANSFORM; if (evt && txEntry.op() == TRANSFORM) entryProc = F.first(txEntry.entryProcessors()).get1(); final boolean keepBinary = txEntry.keepBinary(); if (retVal || txEntry.op() == TRANSFORM) {
/** * @param entry Write entry. * @param ldr Class loader. * @throws IgniteCheckedException If failed. */ public void addWrite(IgniteTxEntry entry, ClassLoader ldr) throws IgniteCheckedException { entry.unmarshal(cctx, false, ldr); GridCacheContext cacheCtx = entry.context(); try { GridDhtCacheEntry cached = cacheCtx.dht().entryExx(entry.key(), topologyVersion()); checkInternal(entry.txKey()); // Initialize cache entry. entry.cached(cached); txState.addWriteEntry(entry.txKey(), entry); addExplicit(entry); } catch (GridDhtInvalidPartitionException e) { addInvalidPartition(cacheCtx, e.partition()); } }
GridCacheEntryEx cached = txEntry.cached(); CacheObject v = txEntry.previousValue(); boolean hasPrevVal = txEntry.hasPreviousValue(); filter = txEntry.filters(); boolean invoke = txEntry.op() == TRANSFORM; !txEntry.skipStore(); resolveTaskName(), null, txEntry.keepBinary()); if (txEntry.op() == TRANSFORM) { if (computeInvoke) { txEntry.readValue(v); ret.value(cacheCtx, v, txEntry.keepBinary()); txEntry.filtersPassed(pass); txEntry.markValid(); txEntry.setAndMarkValid(txEntry.previousOperation(), cacheCtx.toCacheObject(ret.value())); txEntry.filters(CU.empty0()); txEntry.filtersSet(false);
GridCacheEntryEx entry = txEntry.cached(); GridCacheVersion ver = txEntry.explicitVersion() != null ? txEntry.explicitVersion() : xidVer; txEntry.cached(txEntry.context().cache().entryEx(txEntry.key(), topologyVersion())); GridCacheContext cacheCtx = txEntry.context(); GridCacheEntryEx cached = txEntry.cached(); txEntry.cached(cached = cacheCtx.cache().entryEx(txEntry.key(), topologyVersion())); if (updateNearCache(cacheCtx, txEntry.key(), topVer)) nearCached = cacheCtx.dht().near().peekExx(txEntry.key()); if (!F.isEmpty(txEntry.entryProcessors())) txEntry.cached().unswap(false); GridCacheVersion explicitVer = txEntry.conflictVersion(); if (txEntry.ttl() == CU.TTL_ZERO) op = DELETE; op = NOOP; else if (conflictCtx.isUseNew()) { txEntry.ttl(conflictCtx.ttl()); txEntry.conflictExpireTime(conflictCtx.expireTime()); val = txEntry.context().toCacheObject(conflictCtx.mergeValue());
boolean metrics, @Nullable GridCacheReturn ret) throws GridCacheEntryRemovedException, IgniteCheckedException { assert txEntry.op() != TRANSFORM || !F.isEmpty(txEntry.entryProcessors()) : txEntry; GridCacheContext cacheCtx = txEntry.context(); if (F.isEmpty(txEntry.entryProcessors())) { if (ret != null) ret.value(cacheCtx, txEntry.value(), txEntry.keepBinary()); return F.t(txEntry.op(), txEntry.value()); T2<GridCacheOperation, CacheObject> calcVal = txEntry.entryProcessorCalculatedValue(); final boolean keepBinary = txEntry.keepBinary(); if (txEntry.hasValue()) cacheVal = txEntry.value(); else if (txEntry.hasOldValue()) cacheVal = txEntry.oldValue(); else { cacheVal = txEntry.cached().innerGet( null, this, /*closure name */recordEvt ? F.first(txEntry.entryProcessors()).get1() : null, resolveTaskName(), null,
CacheObject val = txEntry.value(); if (txEntry.hasValue()) { if (!F.isEmpty(txEntry.entryProcessors())) val = txEntry.applyEntryProcessors(val); if (txEntry.op() != READ) ver = IgniteTxEntry.GET_ENTRY_INVALID_VER_UPDATED; else { ver = txEntry.entryReadVersion(); GridCacheEntryEx cached = txEntry.cached(); txEntry.cached(entryEx(cacheCtx, txEntry.txKey(), topVer)); assert txEntry.op() == TRANSFORM; (txEntry.op() == TRANSFORM && cctx.gridEvents().isRecordable(EVT_CACHE_OBJECT_READ)) ? F.first(txEntry.entryProcessors()) : null; getRes = txEntry.cached().innerGetVersioned( null, this, resolveTaskName(), null, txEntry.keepBinary(), null);
GridCacheContext cacheCtx = entry.context(); int part = cacheCtx.affinity().partition(entry.key()); invalidateNearEntry(cacheCtx, entry.key(), req.version()); GridCacheEntryEx cached = entry.cached(); cached = cacheCtx.cache().entryEx(entry.key(), req.topologyVersion()); !entry.skipStore() && entry.op() == TRANSFORM && entry.oldValueOnPrimary() && !entry.hasValue()) { while (true) { try { GridCacheEntryEx cached = entry.cached(); cached = cacheCtx.cache().entryEx(entry.key(), req.topologyVersion()); entry.cached(cached); val = cacheCtx.toCacheObject(cacheCtx.store().load(null, entry.key())); entry.readValue(val); log.debug("Got entry removed exception, will retry: " + entry.txKey()); entry.cached(cacheCtx.cache().entryEx(entry.key(), req.topologyVersion()));
if (!txEntry1.markPrepared() || txEntry1.explicitVersion() != null) continue; GridCacheContext cacheCtx = txEntry1.context(); GridCacheEntryEx entry1 = txEntry1.cached(); "[locNodeId=" + cctx.localNodeId() + ", entry=" + entry1 + ']'; GridCacheVersion serReadVer = txEntry1.entryReadVersion(); boolean read = serOrder != null && txEntry1.op() == READ; txEntry1.cached(cacheCtx.cache().entryEx(txEntry1.key(), tx.topologyVersion()));
txEntry.readValue(cacheVal); if (!F.isEmpty(txEntry.entryProcessors())) visibleVal = txEntry.applyEntryProcessors(visibleVal); GridCacheEntryEx e = txEntry == null ? entryEx(cacheCtx, txKey, topVer) : txEntry.cached(); assert txEntry != null; txEntry.setAndMarkValid(cacheVal); txEntry.entryReadVersion(loadVer);
GridCacheEntryEx cached = txEntry.cached(); (!F.isEmpty(txEntry.entryProcessors()) && cctx.gridEvents().isRecordable(EVT_CACHE_OBJECT_READ)) ? F.first(txEntry.entryProcessors()) : null; resolveTaskName(), null, txEntry.keepBinary(), null); resolveTaskName(), null, txEntry.keepBinary()); missed.remove(cacheKey); txEntry.setAndMarkValid(val); if (!F.isEmpty(txEntry.entryProcessors())) val = txEntry.applyEntryProcessors(val); txEntry.entryReadVersion(readVer); cached); txEntry.cached(entryEx(cacheCtx, txKey, topologyVersion()));
assert loadVer != null; e.entryReadVersion(singleRmv && val != null ? SER_READ_NOT_EMPTY_VER : loadVer); CacheObject cacheVal = cacheCtx.toCacheObject(val); if (e.op() == TRANSFORM) { GridCacheVersion ver; e.readValue(cacheVal); ver = e.cached().version(); success = isAll(e.context(), key, cacheVal, filter); e.value(cacheVal, false, false); e.op(READ);