/** * Wait for in progress clears that were initiated by this member. */ private void waitForInProgressClear() { RegionVersionVector rvv = getVersionVector(); if (rvv != null) { synchronized(clearLock) { //do nothing; //DAN - I'm a little scared that the compiler might optimize //away this synchronization if we really do nothing. Hence //my fine log message below. This might not be necessary. if (logger.isDebugEnabled()) { logger.debug("Done waiting for clear"); } } } }
public void run() { DistributedRegion rr = (DistributedRegion)getCache().getRegion(REGION_NAME); rr.clear(); long region_version = getRegionVersionForMember(rr.getVersionVector(), memberID, false); long region_gc_version = getRegionVersionForMember(rr.getVersionVector(), memberID, true); assertEquals(regionVersionForThisOp, region_version); assertEquals(region_version, region_gc_version); } };
private DistributedTombstoneOperation(RegionEventImpl rev) { super(rev); // this.regionVersion = ((DistributedRegion)rev.getRegion()).getVersionVector().getMaxTombstoneGCVersion(); this.regionGCVersions = ((DistributedRegion) rev.getRegion()).getVersionVector().getTombstoneGCVector(); }
private DistributedTombstoneOperation(RegionEventImpl rev) { super(rev); // this.regionVersion = ((DistributedRegion)rev.getRegion()).getVersionVector().getMaxTombstoneGCVersion(); this.regionGCVersions = ((DistributedRegion) rev.getRegion()).getVersionVector().getTombstoneGCVector(); }
/** * Wait for in progress clears that were initiated by this member. */ private void waitForInProgressClear() { RegionVersionVector rvv = getVersionVector(); if (rvv != null) { synchronized(clearLock) { //do nothing; //DAN - I'm a little scared that the compiler might optimize //away this synchronization if we really do nothing. Hence //my fine log message below. This might not be necessary. getLogWriterI18n().fine("Done waiting for clear"); } } }
/** releases the locks obtained in obtainWriteLocksForClear * @param participants */ private void releaseWriteLocksForClear(RegionEventImpl regionEvent, Set<InternalDistributedMember> participants) { getVersionVector().unlockForClear(getMyId()); DistributedClearOperation.releaseLocks(regionEvent, participants); }
/** releases the locks obtained in obtainWriteLocksForClear * @param participants */ private void releaseWriteLocksForClear(RegionEventImpl regionEvent, Set<InternalDistributedMember> participants) { getVersionVector().unlockForClear(getMyId()); DistributedClearOperation.releaseLocks(regionEvent, participants); }
public void run() { GiiCallback cb = (GiiCallback)InitialImageOperation.getGIITestHookForCheckingPurpose( InitialImageOperation.GIITestHookType.BeforeGetInitialImage); synchronized(cb.lockObject) { cb.lockObject.notify(); } WaitCriterion wc = new WaitCriterion() { public boolean done() { return getCache().getRegion(regionName) != null; } public String description() { return "waiting for region " + regionName + " to initialize"; } }; waitForCriterion(wc, 20000, 1000, true); // ensure that the RVV has recorded the event DistributedRegion r = (DistributedRegion)getCache().getRegion(regionName); if (!r.getVersionVector().contains(Xid, 1)) { getLogWriter().info("r's version vector is " + r.getVersionVector().fullToString()); ((LocalRegion)r).dumpBackingMap(); } assertTrue(r.containsKey("keyFromX")); // if the test fails here then the op received from X was not correctly // picked up and recorded in the RVV assertTrue(r.getVersionVector().contains(Xid, 1)); } });
public boolean goWithFullGII(DistributedRegion rgn, RegionVersionVector requesterRVV) { if (getSender().getVersionObject().compareTo(Version.GFE_80) < 0) { // pre-8.0 could not handle a delta-GII return true; } if (!rgn.getDataPolicy().withPersistence()) { // non-persistent regions always do full GII if (logger.isDebugEnabled()) { logger.debug("Region {} is not a persistent region, do full GII", rgn.getFullPath()); } return true; } if (!rgn.getVersionVector().isRVVGCDominatedBy(requesterRVV)) { if (logger.isDebugEnabled()) { logger.debug("Region {}'s local RVVGC is not dominated by remote RVV={}, do full GII", rgn.getFullPath(), requesterRVV); } return true; } // TODO GGG: verify GII after UpgradeDiskStore return false; }
protected void saveReceivedRVV(RegionVersionVector rvv) { assert rvv != null; // Make sure the RVV is at least as current as // the provider's was when the GII began. This ensures that a // concurrent clear() doesn't prevent the new region's RVV from being // initialized and that any vector entries that are no longer represented // by stamps in the region are not lost if (logger.isTraceEnabled(LogMarker.GII)) { logger.trace(LogMarker.GII, "Applying received version vector {} to {}", rvv.fullToString(), region.getName()); } //TODO - RVV - Our current RVV might reflect some operations //that are concurrent updates. We want to keep those updates. However //it might also reflect things that we recovered from disk that we are going //to remove. We'll need to remove those from the RVV somehow. region.getVersionVector().recordVersions(rvv); if(region.getDataPolicy().withPersistence()) { region.getDiskRegion().writeRVV(region, false); region.getDiskRegion().writeRVVGC(region); } if (logger.isTraceEnabled(LogMarker.GII)) { logger.trace(LogMarker.GII, "version vector is now {}", region.getVersionVector().fullToString()); } }
int fid = flowControl.getId(); Map<VersionSource, Long> gcVersions = null; if(this.last && rgn.getVersionVector() != null) { gcVersions = rgn.getVersionVector().getMemberToGCVersion();
@Override protected void process(DistributionManager dm) { ReplyException exception = null; try { DistributedRegion region = DistributedClearOperation.regionUnlocked(getSender(), regionPath); if(region != null && region.getVersionVector() != null) { region.getVersionVector().unlockForClear(getSender()); } } catch(VirtualMachineError e) { SystemFailure.initiateFailure(e); throw e; } catch(Throwable t) { SystemFailure.checkFailure(); exception = new ReplyException(t); } finally { ReplyMessage replyMsg = new ReplyMessage(); replyMsg.setProcessorId(processorId); replyMsg.setRecipient(getSender()); if(exception != null) { replyMsg.setException(exception); } if(logger.isDebugEnabled()) { logger.debug("Received {}, replying with {}", this, replyMsg); } dm.putOutgoing(replyMsg); } }
/** * Distribute Tombstone garbage-collection information to all peers with storage */ protected EventID distributeTombstoneGC(Set<Object> keysRemoved) { this.getCachePerfStats().incTombstoneGCCount(); EventID eventId = new EventID(getSystem()); DistributedTombstoneOperation gc = DistributedTombstoneOperation.gc(this, eventId); gc.distribute(); notifyClientsOfTombstoneGC(getVersionVector().getTombstoneGCVector(), keysRemoved, eventId, null); return eventId; }
/** * Distribute Tombstone garbage-collection information to all peers with storage */ protected EventID distributeTombstoneGC(Set<Object> keysRemoved) { this.getCachePerfStats().incTombstoneGCCount(); EventID eventId = new EventID(getSystem()); DistributedTombstoneOperation gc = DistributedTombstoneOperation.gc(this, eventId); gc.distribute(); notifyClientsOfTombstoneGC(getVersionVector().getTombstoneGCVector(), keysRemoved, eventId, null); return eventId; }
/** * Does the "put" of one entry for a "putall" operation. Note it calls back * to AbstractUpdateOperation.UpdateMessage#basicOperationOnRegion * * @param entry * the entry being put * @param rgn * the region the entry is put in */ public void doEntryPut(PutAllEntryData entry, DistributedRegion rgn, boolean requiresRegionContext, boolean fetchFromHDFS, boolean isPutDML) { EntryEventImpl ev = PutAllMessage.createEntryEvent(entry, getSender(), this.context, rgn, requiresRegionContext, this.possibleDuplicate, this.needsRouting, this.callbackArg, true, skipCallbacks); ev.setFetchFromHDFS(fetchFromHDFS); ev.setPutDML(isPutDML); // we don't need to set old value here, because the msg is from remote. local old value will get from next step try { super.basicOperateOnRegion(ev, rgn); } finally { if (ev.getVersionTag() != null && !ev.getVersionTag().isRecorded()) { if (rgn.getVersionVector() != null) { rgn.getVersionVector().recordVersion(getSender(), ev.getVersionTag()); } } ev.release(); } }
/** pause local operations so that a clear() can be performed and flush comm channels to the given member */ public void lockLocallyForClear(DM dm, InternalDistributedMember locker) { RegionVersionVector rvv = getVersionVector(); if (rvv != null) { // block new operations from being applied to the region map rvv.lockForClear(getFullPath(), dm, locker); //Check for region destroyed after we have locked, to make sure //we don't continue a clear if the region has been destroyed. checkReadiness(); // wait for current operations to if (!locker.equals(dm.getDistributionManagerId())) { Set<InternalDistributedMember> mbrs = getDistributionAdvisor().adviseCacheOp(); StateFlushOperation.flushTo(mbrs, this); } } }
protected void saveReceivedRVV(RegionVersionVector rvv) { assert rvv != null; // Make sure the RVV is at least as current as // the provider's was when the GII began. This ensures that a // concurrent clear() doesn't prevent the new region's RVV from being // initialized and that any vector entries that are no longer represented // by stamps in the region are not lost if (TRACE_GII) { region.getLogWriterI18n().info(LocalizedStrings.DEBUG, "Applying received version vector "+rvv.fullToString()+ " to " + region.getName()); } //TODO - RVV - Our current RVV might reflect some operations //that are concurrent updates. We want to keep those updates. However //it might also reflect things that we recovered from disk that we are going //to remove. We'll need to remove those from the RVV somehow. region.getVersionVector().recordVersions(rvv, null); if(region.getDataPolicy().withPersistence()) { region.getDiskRegion().writeRVV(region, false); region.getDiskRegion().writeRVVGC(region); } if (TRACE_GII) { region.getLogWriterI18n().info(LocalizedStrings.DEBUG, "version vector is now " + region.getVersionVector().fullToString()); } }
/** pause local operations so that a clear() can be performed and flush comm channels to the given member */ public void lockLocallyForClear(DM dm, InternalDistributedMember locker) { RegionVersionVector rvv = getVersionVector(); if (rvv != null) { // block new operations from being applied to the region map rvv.lockForClear(getFullPath(), dm, locker); //Check for region destroyed after we have locked, to make sure //we don't continue a clear if the region has been destroyed. checkReadiness(); // wait for current operations to if (!locker.equals(dm.getDistributionManagerId())) { Set<InternalDistributedMember> mbrs = getDistributionAdvisor().adviseCacheOp(); StateFlushOperation.flushTo(mbrs, this); } } }
public boolean goWithFullGII(DistributedRegion rgn, RegionVersionVector requesterRVV) { LogWriterI18n logger = rgn.getLogWriterI18n(); if (!rgn.getDataPolicy().withPersistence()) { // non-persistent regions always do full GII if (logger.fineEnabled()) { logger.fine("Region " + rgn.getFullPath() + " is not a persistent region, do full GII"); } return true; } if (!rgn.getVersionVector().isRVVGCDominatedBy(requesterRVV)) { if (logger.fineEnabled()) { logger.fine("Region " + rgn.getFullPath() + "'s local RVVGC is not dominated by remote RVV="+requesterRVV+", do full GII"); } return true; } // TODO GGG: verify GII after UpgradeDiskStore return false; }
getLogWriter().info("version vector is now " + dr.getVersionVector().fullToString()); assertTrue("should hold entry Object3 now", dr.containsKey("Object3")); return true;