@Override protected void basicDestroy(final EntryEventImpl event, final boolean cacheWrite, Object expectedOldValue) throws EntryNotFoundException, CacheWriterException, TimeoutException { super.basicDestroy(event, cacheWrite, expectedOldValue); GatewaySenderEventImpl.release(event.getRawOldValue()); } @Override
/** * Returns true if there does not exist an initialized replicate for this * region not considering this node itself. */ public final boolean noInitializedReplicate() { return getCacheDistributionAdvisor().accept(hasReplicate, null); }
/** * {@inheritDoc} */ @Override public DistributionAdvisor getDistributionAdvisor() { return this.pbr != null ? this.pbr.getDistributionAdvisor() : this.dr .getDistributionAdvisor(); }
/** * Test hook for bug 48578. Returns true if it sees a net loader. * Returns false if it does not have one. */ public boolean hasNetLoader() { return this.hasNetLoader(getCacheDistributionAdvisor()); } }
@Override public void initialCriticalMembers(boolean localMemoryIsCritical, Set<InternalDistributedMember> critialMembers) { Set<InternalDistributedMember> others = getCacheDistributionAdvisor().adviseGeneric(); for (InternalDistributedMember idm: critialMembers) { if (others.contains(idm)) { setMemoryThresholdReachedCounterTrue(idm); } } }
@Override protected void checkForDataStoreAvailability(DistributedRegion region, Set<InternalDistributedMember> recipients) { if (region.getCache().isGFXDSystem()) { if (recipients.isEmpty() && !region.isUsedForMetaRegion() && !region.isUsedForPartitionedRegionAdmin() && !region.isUsedForPartitionedRegionBucket() && !region.getDataPolicy().withStorage()) { throw new NoDataStoreAvailableException(LocalizedStrings .DistributedRegion_NO_DATA_STORE_FOUND_FOR_DISTRIBUTION .toLocalizedString(region)); } } }
final long beginMapTime = this.timeStatsEnabled ? XPLAINUtil .recordTiming(-1) : 0; final DataPolicy policy = this.dr.getDataPolicy(); final CacheDistributionAdvisor advisor = this.dr .getCacheDistributionAdvisor(); Set<InternalDistributedMember> replicates = null; if (policy.withReplication() || policy.withPreloaded()) { member = this.dr.getRandomReplicate();
if (internalAfterReceivedRequestImage != null && internalAfterReceivedRequestImage.getRegionName().equals(rgn.getName())) { internalAfterReceivedRequestImage.run(); if (testLatch != null && rgn.getFullPath().equals(TEST_REGION_NAME)) { testLatchWaiting = true; try { if (this.versionVector.isForSynchronization() && !rgn.getConcurrencyChecksEnabled()) { if (TRACE_GII) { logger.info(LocalizedStrings.DEBUG, "ignoring synchronization request as this region has no version vector"); +rgn.getVersionVector().fullToString()+")"); if ( !rgn.getVersionVector().isNewerThanOrCanFillExceptionsFor(this.versionVector) ) { replyNoData(dm, true, rgn.getFailedEvents(sender)); sendFailureMessage = false; return; + rgn.getFullPath()); final ImageState imgState = rgn.getImageState(); boolean markedOngoingGII = false; try { RegionVersionHolder rvh = rgn.getVersionVector().getHolderForMember(this.lostMemberVersionID); if (rvh != null) { holderToSync = rvh.clone();
private final InternalDistributedMember pickTheTargetForGet( final LocalRegion r, final String opStr) { final InternalDistributedMember target; if (r.getScope().isLocal() || (r.getDataPolicy().withReplication() && (r.basicGetLoader() != null || !r .hasNetLoader(((DistributedRegion)r).getCacheDistributionAdvisor())))) { target = this.self; } else { // SH locks for READ_COMMITTED will be zero-duration locks so no need for // a TXState if (getLockingPolicy().readCanStartTX()) { addAffectedRegion(r); } target = ((DistributedRegion)r).getRandomReplicate(); } if (LOG_FINEST) { final LogWriterI18n logger = r.getLogWriterI18n(); logger.info(LocalizedStrings.DEBUG, "Selected member for TX " + opStr + " operation in region " + r.getFullPath() + " on " + target + " for " + this.txId.shortToString()); } return target; }
final ServerToClientFunctionResultSender sender) { final InternalDistributedMember target; final TXStateInterface tx = getTXState(); if (tx != null) { tx.flushPendingOps(getDistributionManager()); if (this.getAttributes().getDataPolicy().withReplication() || this.getAttributes().getDataPolicy().withPreloaded()) { .singleton(getMyId()); execution.validateExecution(function, singleMember); execution.setExecutionNodes(singleMember); return executeLocally(execution, function, args, 0, rc, filter, sender, tx); target = getRandomReplicate(); if (target == null) { throw new NoMemberFoundException(LocalizedStrings return executeOnReplicate(execution, function, args, localRC, filter, target, tx);
dlock = this.getDistributedLockIfGlobal(event.getKey()); if (getLogWriterI18n().finerEnabled()) { getLogWriterI18n().finer("virtualPut invoked for event " + event); if (!hasSeenEvent(event)) { if (this.requiresOneHopForMissingEntry(event)) { RegionEntry re = getRegionEntry(event.getKey()); if (re == null /*|| re.isTombstone()*/ || !this.generateVersionTag) { if (event.getPutAllOperation() == null || this.dataPolicy.withStorage()) { if (getLogWriterI18n().fineEnabled()) { if (didDistribute) { getLogWriterI18n().fine("Event after remotePut operation: " + event); } else { getLogWriterI18n().finer("Unable to perform one-hop messaging"); if (getLogWriterI18n().finerEnabled()) { getLogWriterI18n().finer("Event after remotePut operation: " + event); if (getCache().getLoggerI18n().fineEnabled()) { getCache().getLoggerI18n().fine("DR.virtualPut: this cache has already seen this event " + event); distributeUpdate(event, lastModified); event.invokeCallbacks(this,true, true); return true;
@Override protected boolean operateOnRegion(CacheEvent event, DistributionManager dm) throws EntryNotFoundException { DistributedRegion region = (DistributedRegion)event.getRegion(); switch (this.clearOp) { case OP_CLEAR: region.clearRegionLocally((RegionEventImpl)event, false, this.rvv); region.notifyBridgeClients(event); this.appliedOperation = true; break; case OP_LOCK_FOR_CLEAR: if (region.getDataPolicy().withStorage()) { DistributedClearOperation.regionLocked(this.getSender(), region.getFullPath(), region); region.lockLocallyForClear(dm, this.getSender()); } this.appliedOperation = true; break; } return true; }
protected void saveReceivedRVV(RegionVersionVector rvv) { assert rvv != null; // Make sure the RVV is at least as current as // the provider's was when the GII began. This ensures that a // concurrent clear() doesn't prevent the new region's RVV from being // initialized and that any vector entries that are no longer represented // by stamps in the region are not lost if (TRACE_GII) { region.getLogWriterI18n().info(LocalizedStrings.DEBUG, "Applying received version vector "+rvv.fullToString()+ " to " + region.getName()); } //TODO - RVV - Our current RVV might reflect some operations //that are concurrent updates. We want to keep those updates. However //it might also reflect things that we recovered from disk that we are going //to remove. We'll need to remove those from the RVV somehow. region.getVersionVector().recordVersions(rvv, null); if(region.getDataPolicy().withPersistence()) { region.getDiskRegion().writeRVV(region, false); region.getDiskRegion().writeRVVGC(region); } if (TRACE_GII) { region.getLogWriterI18n().info(LocalizedStrings.DEBUG, "version vector is now " + region.getVersionVector().fullToString()); } }
final ResultCollector rc, final Set filter, final ServerToClientFunctionResultSender sender) { DistributedMember target = getTransactionalNode(); if (target != null) { if (target.equals(getMyId())) { return executeLocally(execution, function, args, 0, rc, filter, sender); return executeOnReplicate(execution, function, args, rc, filter, target); } else if (this.getAttributes().getDataPolicy().withReplication() || this.getAttributes().getDataPolicy().withPreloaded()) { .singleton(getMyId()); execution.validateExecution(function, singleMember); execution.setExecutionNodes(singleMember); return executeLocally(execution, function, args, 0, rc, filter, sender); } else { target = getRandomReplicate(); if (target == null) { throw new FunctionException(LocalizedStrings return executeOnReplicate(execution, function, args, localRC, filter, target);
/** pause local operations so that a clear() can be performed and flush comm channels to the given member */ public void lockLocallyForClear(DM dm, InternalDistributedMember locker) { RegionVersionVector rvv = getVersionVector(); if (rvv != null) { // block new operations from being applied to the region map rvv.lockForClear(getFullPath(), dm, locker); //Check for region destroyed after we have locked, to make sure //we don't continue a clear if the region has been destroyed. checkReadiness(); // wait for current operations to if (!locker.equals(dm.getDistributionManagerId())) { Set<InternalDistributedMember> mbrs = getDistributionAdvisor().adviseCacheOp(); StateFlushOperation.flushTo(mbrs, this); } } }
dlock = this.getDistributedLockIfGlobal(event.getKey()); if (!hasSeenEvent(event)) { if (this.requiresOneHopForMissingEntry(event)) { RegionEntry re = getRegionEntry(event.getKey()); if (re == null /*|| re.isTombstone()*/ || !this.generateVersionTag) { if (!event.isBulkOpInProgress() || this.dataPolicy.withStorage()) { distributeUpdate(event, lastModified, ifNew, ifOld, expectedOldValue, requireOldValue); event.invokeCallbacks(this,true, true); return true;
/** * @see LocalRegion#basicInvalidateRegion(RegionEventImpl) */ @Override void basicInvalidateRegion(RegionEventImpl event) { // disallow local invalidation for replicated regions if (!event.isDistributed() && getScope().isDistributed() && getDataPolicy().withReplication()) { throw new IllegalStateException(LocalizedStrings.DistributedRegion_NOT_ALLOWED_TO_DO_A_LOCAL_INVALIDATION_ON_A_REPLICATED_REGION.toLocalizedString()); } if (shouldDistributeInvalidateRegion(event)) { distributeInvalidateRegion(event); } super.basicInvalidateRegion(event); }
((AbstractRegionMap)region.getRegionMap()).putEntryIfAbsentForTest(entry); cache.getLogger().info("entry inserted into cache: " + entry); tag.setDistributedSystemId(1); ev.setVersionTag(tag); cache.getLogger().info("destroyThread is trying to destroy the entry: " + region.getRegionEntry(key)); region.basicDestroy(ev, false, entry = (VersionedThinRegionEntryHeap)region.getRegionEntry(key); region.dumpBackingMap(); Assert.assertTrue(entry != null, "expected to find a region entry for " + key); Assert.assertTrue(entry.isTombstone(), "expected entry to be found and be a tombstone but it is " + entry); RegionMap map = region.getRegionMap(); tag = entry.asVersionTag(); map.removeTombstone(entry, tag, false, true); ((AbstractRegionMap)region.getRegionMap()).putEntryIfAbsentForTest(entry); cache.getLogger().info("entry inserted into cache: " + entry); tag.setDistributedSystemId(1); ev.setVersionTag(tag); cache.getLogger().info("destroyThread is trying to destroy the entry: " + region.getRegionEntry(key)); boolean caught = false; try { region.basicDestroy(ev, false,
/** * Used to bootstrap txState. * @param key * @return distributedRegions, * member with parimary bucket for partitionedRegions */ @Override public DistributedMember getOwnerForKey(KeyInfo key) { //Asif: fix for sqlfabric bug 42266 assert !this.isInternalRegion() || this.isMetaRegionWithTransactions(); if (!this.getAttributes().getDataPolicy().withStorage() || (this.concurrencyChecksEnabled && this.getAttributes() .getDataPolicy() == DataPolicy.NORMAL)) { // execute on random replicate return getRandomReplicate(); } // if we are non-persistent, forward transactions to // a persistent member if (this.concurrencyChecksEnabled && !generateVersionTag) { return getRandomPersistentReplicate(); } return super.getOwnerForKey(key); }
/** * Distribute Tombstone garbage-collection information to all peers with storage */ protected EventID distributeTombstoneGC(Set<Object> keysRemoved) { this.getCachePerfStats().incTombstoneGCCount(); EventID eventId = new EventID(getSystem()); DistributedTombstoneOperation gc = DistributedTombstoneOperation.gc(this, eventId); gc.distribute(); notifyClientsOfTombstoneGC(getVersionVector().getTombstoneGCVector(), keysRemoved, eventId, null); return eventId; }