/** * Checks WAL disabled for cache group. * * @param grpId Group id. * @return {@code True} if WAL disable for group. {@code False} If not. */ public boolean isDisabled(int grpId) { CacheGroupContext ctx = cctx.cache().cacheGroup(grpId); return ctx != null && !ctx.walEnabled(); }
/** * For testing purposes only. * @param toState State to set. */ public void setState(GridDhtPartitionState toState) { if (grp.persistenceEnabled() && grp.walEnabled()) { synchronized (this) { long state0 = state.get(); this.state.compareAndSet(state0, setPartState(state0, toState)); try { ctx.wal().log(new PartitionMetaStateRecord(grp.groupId(), id, toState, updateCounter())); } catch (IgniteCheckedException e) { U.error(log, "Error while writing to log", e); } } } else restoreState(toState); }
/** * Invalidates page memory for given partition. Destroys partition store. * <b>NOTE:</b> This method can be invoked only within checkpoint lock or checkpointer thread. * * @param grpId Group ID. * @param partId Partition ID. * * @throws IgniteCheckedException If destroy has failed. */ public void destroyPartitionStore(int grpId, int partId) throws IgniteCheckedException { PageMemoryEx pageMemory = (PageMemoryEx)grp.dataRegion().pageMemory(); int tag = pageMemory.invalidate(grp.groupId(), partId); if (grp.walEnabled()) ctx.wal().log(new PartitionDestroyRecord(grp.groupId(), partId)); ctx.pageStore().onPartitionDestroyed(grpId, partId, tag); }
log.debug("Prepare change WAL state, grp=" + grp.cacheOrGroupName() + ", grpId=" + grp.groupId() + ", hasOwning=" + hasOwning + ", hasMoving=" + hasMoving + ", WALState=" + grp.walEnabled() + ", parts=" + parts);
/** * @param cacheId ID of cache initiated counter update. * @param topVer Topology version for current operation. * @return Next update index. */ public long nextUpdateCounter(int cacheId, AffinityTopologyVersion topVer, boolean primary, @Nullable Long primaryCntr) { long nextCntr = store.nextUpdateCounter(); if (grp.sharedGroup()) grp.onPartitionCounterUpdate(cacheId, id, primaryCntr != null ? primaryCntr : nextCntr, topVer, primary); // This is first update in partition, we should log partition state information for further crash recovery. if (nextCntr == 1) { if (grp.persistenceEnabled() && grp.walEnabled()) try { ctx.wal().log(new PartitionMetaStateRecord(grp.groupId(), id, state(), 0)); } catch (IgniteCheckedException e) { U.error(log, "Failed to log partition state snapshot to WAL.", e); ctx.kernalContext().failure().process(new FailureContext(FailureType.CRITICAL_ERROR, e)); } } return nextCntr; }
if (grp.persistenceEnabled() && grp.walEnabled()) { synchronized (this) { GridDhtPartitionState prevState = state();
/** * @param op Update operation. * @param val Write value. * @param writeVer Write version. * @param expireTime Expire time. * @param updCntr Update counter. */ protected void logUpdate(GridCacheOperation op, CacheObject val, GridCacheVersion writeVer, long expireTime, long updCntr) throws IgniteCheckedException { // We log individual updates only in ATOMIC cache. assert cctx.atomic(); try { if (cctx.group().persistenceEnabled() && cctx.group().walEnabled()) cctx.shared().wal().log(new DataRecord(new DataEntry( cctx.cacheId(), key, val, op, null, writeVer, expireTime, partition(), updCntr))); } catch (StorageException e) { throw new IgniteCheckedException("Failed to log ATOMIC cache update [key=" + key + ", op=" + op + ", val=" + val + ']', e); } }
/** * @throws Exception If failed. */ @Test public void testLocalAndGlobalWalStateInterdependence() throws Exception { Assume.assumeFalse("https://issues.apache.org/jira/browse/IGNITE-10421", MvccFeatureChecker.forcedMvcc()); Ignite ignite = startGrids(3); ignite.cluster().active(true); IgniteCache<Integer, Integer> cache = ignite.cache(DEFAULT_CACHE_NAME); for (int k = 0; k < getKeysCount(); k++) cache.put(k, k); IgniteEx newIgnite = startGrid(3); newIgnite.cluster().setBaselineTopology(ignite.cluster().nodes()); awaitExchange(newIgnite); CacheGroupContext grpCtx = newIgnite.cachex(DEFAULT_CACHE_NAME).context().group(); assertFalse(grpCtx.walEnabled()); ignite.cluster().disableWal(DEFAULT_CACHE_NAME); for (Ignite g : G.allGrids()) g.cache(DEFAULT_CACHE_NAME).rebalance(); awaitPartitionMapExchange(); assertFalse(grpCtx.walEnabled()); // WAL is globally disabled ignite.cluster().enableWal(DEFAULT_CACHE_NAME); assertTrue(grpCtx.walEnabled()); }
/** * @throws Exception If failed. */ @Test public void testWalNotDisabledAfterShrinkingBaselineTopology() throws Exception { Ignite ignite = startGrids(4); ignite.cluster().active(true); IgniteCache<Integer, Integer> cache = ignite.cache(DEFAULT_CACHE_NAME); int keysCnt = getKeysCount(); for (int k = 0; k < keysCnt; k++) cache.put(k, k); for (Ignite g : G.allGrids()) { CacheGroupContext grpCtx = ((IgniteEx)g).cachex(DEFAULT_CACHE_NAME).context().group(); assertTrue(grpCtx.walEnabled()); } stopGrid(2); ignite.cluster().setBaselineTopology(5); // Await fully exchange complete. awaitExchange((IgniteEx)ignite); for (Ignite g : G.allGrids()) { CacheGroupContext grpCtx = ((IgniteEx)g).cachex(DEFAULT_CACHE_NAME).context().group(); assertTrue(grpCtx.walEnabled()); g.cache(DEFAULT_CACHE_NAME).rebalance(); } awaitPartitionMapExchange(); for (Ignite g : G.allGrids()) { CacheGroupContext grpCtx = ((IgniteEx)g).cachex(DEFAULT_CACHE_NAME).context().group(); assertTrue(grpCtx.walEnabled()); } }
if (cctx.group().persistenceEnabled() && cctx.group().walEnabled()) logPtr = cctx.shared().wal().log(new MvccDataRecord(new MvccDataEntry( cctx.cacheId(),
CacheGroupContext grpCtx = grid(nodeIdx).cachex(REPL_CACHE).context().group(); assertFalse(grpCtx.walEnabled()); CacheGroupContext grpCtx = grid(nodeIdx).cachex(REPL_CACHE).context().group(); assertTrue(grpCtx.walEnabled());
updateCntr0 = updateCntr; if (tx != null && cctx.group().persistenceEnabled() && cctx.group().walEnabled()) logPtr = logTxUpdate(tx, val, expireTime, updateCntr0);
updateCntr0 = updateCntr; if (tx != null && cctx.group().persistenceEnabled() && cctx.group().walEnabled()) logPtr = logTxUpdate(tx, null, 0, updateCntr0);
if (!near() && cacheCtx.group().persistenceEnabled() && cacheCtx.group().walEnabled() && op != NOOP && op != RELOAD && (op != READ || cctx.snapshot().needTxReadLogging())) { if (dataEntries == null)
if (cctx.group().persistenceEnabled() && cctx.group().walEnabled()) logPtr = logMvccUpdate(tx, null, 0, 0L, mvccVer);
if (cctx.group().persistenceEnabled() && cctx.group().walEnabled()) entry.logMvccUpdate(tx, null, 0, 0, mvccVer);
checkObsolete(); boolean walEnabled = !cctx.isNear() && cctx.group().persistenceEnabled() && cctx.group().walEnabled();
if (cctx.group().persistenceEnabled() && cctx.group().walEnabled()) { logPtr = cctx.shared().wal().log(new MvccDataRecord(new MvccDataEntry( cctx.cacheId(),
if (cctx.group().persistenceEnabled() && cctx.group().walEnabled()) logPtr = cctx.shared().wal().log(new MvccDataRecord(new MvccDataEntry( cctx.cacheId(),
CacheGroupContext grp = cacheCtx.group(); if (grp.persistenceEnabled() && grp.walEnabled() && cctx.snapshot().needTxReadLogging()) { ptr = cctx.wal().log(new DataRecord(new DataEntry(