@Override public void visit(Integer bucketId, Region r) { BucketRegion br = (BucketRegion) r; if (br.getBucketAdvisor().isPrimary()) { numPrimaries.incrementAndGet(); } } });
@Override public String toString() { return new StringBuilder().append("BucketRegion").append("[path='").append(getFullPath()) .append(";serial=").append(getSerialNumber()).append(";primary=") .append(getBucketAdvisor().getProxyBucketRegion().isPrimary()).append("]").toString(); }
public Set<Integer> getAllLocalPrimaryBucketIdsBetweenProvidedIds(int low, int high) { Set<Integer> bucketIds = new HashSet<Integer>(); for (Map.Entry<Integer, BucketRegion> bucketEntry : getAllLocalBuckets()) { BucketRegion bucket = bucketEntry.getValue(); if (bucket.getBucketAdvisor().isPrimary() && (bucket.getId() >= low) && (bucket.getId() < high)) { bucketIds.add(Integer.valueOf(bucket.getId())); } } return bucketIds; }
@Override public void basicInvalidate(EntryEventImpl event) throws EntryNotFoundException { basicInvalidate(event, isInitialized(), false); }
@Override public void cleanupFailedInitialization() { this.preDestroyBucket(this.getId()); super.cleanupFailedInitialization(); }
/** a fast estimate of total bucket size */ public long getEstimatedLocalBucketSize(boolean primaryOnly) { long size = 0; for (BucketRegion br : localBucket2RegionMap.values()) { if (!primaryOnly || br.getBucketAdvisor().isPrimary()) { size += br.getEstimatedLocalSize(); } } return size; }
@Override protected DistributedRegion createAndDefineRegion(boolean isConcurrencyChecksEnabled, RegionAttributes ra, InternalRegionArguments ira, GemFireCacheImpl cache) { BucketRegion br = new BucketRegion("testRegion", ra, null, cache, ira); // it is necessary to set the event tracker to initialized, since initialize() in not being // called on the instantiated region br.getEventTracker().setInitialized(); // since br is a real bucket region object, we need to tell mockito to monitor it br = spy(br); // doNothing().when(dm).addMembershipListener(any()); doNothing().when(br).distributeUpdateOperation(any(), anyLong()); doNothing().when(br).distributeDestroyOperation(any()); doNothing().when(br).distributeInvalidateOperation(any()); doNothing().when(br).distributeUpdateEntryVersionOperation(any()); doNothing().when(br).checkForPrimary(); doNothing().when(br).handleWANEvent(any()); doReturn(false).when(br).needWriteLock(any()); return br; }
throws EntryNotFoundException, CacheWriterException, TimeoutException { Assert.assertTrue(!isTX()); Assert.assertTrue(event.getOperation().isDistributed()); boolean locked = lockKeysAndPrimary(event); try { handleWANEvent(event); if (!hasSeenEvent(event)) { if (event.getOperation().isExpiration()) { // bug 39905 - invoke listeners for expiration DistributedSystem sys = cache.getDistributedSystem(); event.setEventId(newID); event.setInvokePRCallbacks(getBucketAdvisor().isPrimary()); boolean done = mapDestroy(event, cacheWrite, false, // isEviction //merge44610: In cheetah if (done && !getBucketAdvisor().isPrimary() && isEntryExpiryPossible()) { synchronized (pendingSecondaryExpires) { pendingSecondaryExpires.remove(event.getKey()); if (!getConcurrencyChecksEnabled() || event.hasValidVersionTag()) { distributeDestroyOperation(event); releaseLockForKeysAndPrimary(event);
if (map != null && lockAcquired) { try { map.releaseDestroyLock(); if (map.isBucketDestroyed()) { failedBuckets.add(map.getId()); continue; map.acquireDestroyLock(); lockAcquired = true; } catch (CancelException e) { if (logger.isDebugEnabled()) { logger.debug("sendReply: acquireDestroyLock failed due to cache closure, region = {}", map.getFullPath()); it = bucketKeys.get(map.getId()).iterator(); } else { // bucketIds != null if (regex == null) { it = new HashSet(map.keySet(allowTombstones)).iterator(); } else { it = map.getKeysWithInterest(InterestType.REGULAR_EXPRESSION, regex, allowTombstones) .iterator(); Object key = it.next(); VersionTagHolder clientEvent = new VersionTagHolder(); Object value = map.get(key, null, true, true, true, null, clientEvent, allowTombstones); DataSerializer.writePrimitiveInt(map.getId(), mos); needToWriteBucketInfo = false;
buk.getBucketAdvisor().getProxyBucketRegion().setHosting(false); if (removeFromDisk) { buk.localDestroyRegion(); } else { buk.close(); logger.debug("cleanup: Locally destroyed bucket {}", buk.getFullPath()); && buk.getPartitionedRegion().isShadowPR()) { if (buk.getPartitionedRegion().getColocatedWithRegion() != null) { buk.getPartitionedRegion().getColocatedWithRegion().getRegionAdvisor() .getBucketAdvisor(bucketId).setShadowBucketDestroyed(true); String.format("PartitionedRegion %s: cleanUp problem destroying bucket %s", new Object[] {this.partitionedRegion.getFullPath(), Integer.valueOf(buk.getId())}), ex);
Assert.assertTrue(!isTX()); Assert.assertTrue(event.getOperation().isDistributed()); boolean locked = lockKeysAndPrimary(event); try { if (!hasSeenEvent(event)) { if (event.getOperation().isExpiration()) { // bug 39905 - invoke listeners for expiration DistributedSystem sys = cache.getDistributedSystem(); EventID newID = new EventID(sys); event.setEventId(newID); event.setInvokePRCallbacks(getBucketAdvisor().isPrimary()); boolean forceCallbacks = isEntryEvictDestroyEnabled(); boolean done = this.entries.invalidate(event, invokeCallbacks, forceNewEntry, forceCallbacks); ExpirationAction expirationAction = getEntryExpirationAction(); if (done && !getBucketAdvisor().isPrimary() && expirationAction != null && expirationAction.isInvalidate()) { synchronized (pendingSecondaryExpires) { "LR.basicInvalidate: this cache has already seen this event {}", event); if (!getConcurrencyChecksEnabled() || event.hasValidVersionTag()) { distributeInvalidateOperation(event); releaseLockForKeysAndPrimary(event);
@Test public void lockKeysAndPrimaryReleaseLockHeldIfDoesNotLockForPrimary() { BucketRegion bucketRegion = spy(new BucketRegion(regionName, regionAttributes, partitionedRegion, cache, internalRegionArgs)); doReturn(keys).when(bucketRegion).getKeysToBeLocked(event); doReturn(true).when(bucketRegion).waitUntilLocked(keys); doReturn(true).when(bucketRegion).doLockForPrimary(false); bucketRegion.lockKeysAndPrimary(event); verify(bucketRegion, never()).removeAndNotifyKeys(keys); }
@Override void basicUpdateEntryVersion(EntryEventImpl event) throws EntryNotFoundException { Assert.assertTrue(!isTX()); Assert.assertTrue(event.getOperation().isDistributed()); final boolean locked = internalRegion.lockWhenRegionIsInitializing(); try { boolean keysAndPrimaryLocked = lockKeysAndPrimary(event); try { if (!hasSeenEvent(event)) { this.entries.updateEntryVersion(event); } else { if (!event.isOriginRemote() && getBucketAdvisor().isPrimary()) { if (!getConcurrencyChecksEnabled() || event.hasValidVersionTag()) { distributeUpdateEntryVersionOperation(event); } finally { if (keysAndPrimaryLocked) { releaseLockForKeysAndPrimary(event);
@Test(expected = RegionDestroyedException.class) public void basicUpdateEntryVersionDoesNotReleaseLockIfKeysAndPrimaryNotLocked() { BucketRegion bucketRegion = spy(new BucketRegion(regionName, regionAttributes, partitionedRegion, cache, internalRegionArgs)); doThrow(regionDestroyedException).when(bucketRegion).lockKeysAndPrimary(event); when(event.getRegion()).thenReturn(bucketRegion); doReturn(true).when(bucketRegion).hasSeenEvent(event); doReturn(mock(AbstractRegionMap.class)).when(bucketRegion).getRegionMap(); bucketRegion.basicUpdateEntryVersion(event); verify(bucketRegion, never()).releaseLockForKeysAndPrimary(eq(event)); }
protected BucketRegion setUpMockBucket(int id) throws BucketNotFoundException { BucketRegion mockBucket = Mockito.mock(BucketRegion.class); BucketRegion fileAndChunkBucket = Mockito.mock(BucketRegion.class); // Allowing the fileAndChunkBucket to behave like a map so that the IndexWriter operations don't // fail Fakes.addMapBehavior(fileAndChunkBucket); when(fileAndChunkBucket.getFullPath()).thenReturn("File" + id); when(mockBucket.getId()).thenReturn(id); when(userRegion.getBucketRegion(eq(id), eq(null))).thenReturn(mockBucket); when(userDataStore.getLocalBucketById(eq(id))).thenReturn(mockBucket); when(userRegion.getBucketRegion(eq(id + 113), eq(null))).thenReturn(mockBucket); when(userDataStore.getLocalBucketById(eq(id + 113))).thenReturn(mockBucket); when(fileDataStore.getLocalBucketById(eq(id))).thenReturn(fileAndChunkBucket); fileAndChunkBuckets.put(id, fileAndChunkBucket); dataBuckets.put(id, mockBucket); BucketAdvisor mockBucketAdvisor = Mockito.mock(BucketAdvisor.class); when(fileAndChunkBucket.getBucketAdvisor()).thenReturn(mockBucketAdvisor); when(mockBucketAdvisor.isPrimary()).thenReturn(true); return mockBucket; } }
if (getBucketAdvisor().isPrimary()) { long key = this.eventSeqNum.addAndGet(this.partitionedRegion.getTotalNumberOfBuckets()); if (key < 0 || key % getPartitionedRegion().getTotalNumberOfBuckets() != getId()) { logger.error("ERROR! The sequence number {} generated for the bucket {} is incorrect.", new Object[] {key, getId()}); logger.debug("WAN: On primary bucket {}, setting the seq number as {}", getId(), this.eventSeqNum.get()); logger.debug("WAN: On secondary bucket {}, setting the seq number as {}", getId(), event.getTailKey());
@Test public void basicDestroyReleaseLockIfKeysAndPrimaryLocked() { BucketRegion bucketRegion = spy(new BucketRegion(regionName, regionAttributes, partitionedRegion, cache, internalRegionArgs)); doReturn(true).when(bucketRegion).lockKeysAndPrimary(event); doReturn(true).when(bucketRegion).hasSeenEvent(event); bucketRegion.basicDestroy(event, false, null); verify(bucketRegion).releaseLockForKeysAndPrimary(eq(event)); }
@Test public void basicInvalidateReleaseLockIfKeysAndPrimaryLocked() { BucketRegion bucketRegion = spy(new BucketRegion(regionName, regionAttributes, partitionedRegion, cache, internalRegionArgs)); doReturn(true).when(bucketRegion).lockKeysAndPrimary(event); doReturn(true).when(bucketRegion).hasSeenEvent(event); bucketRegion.basicInvalidate(event, false, false); verify(bucketRegion).releaseLockForKeysAndPrimary(eq(event)); }
protected void invokePartitionListenerAfterBucketCreated() { PartitionListener[] partitionListeners = getPartitionedRegion().getPartitionListeners(); if (partitionListeners == null || partitionListeners.length == 0) { return; } for (int i = 0; i < partitionListeners.length; i++) { PartitionListener listener = partitionListeners[i]; if (listener != null) { listener.afterBucketCreated(getId(), keySet()); } } }
@Test public void virtualPutReleaseLockIfKeysAndPrimaryLocked() { BucketRegion bucketRegion = spy(new BucketRegion(regionName, regionAttributes, partitionedRegion, cache, internalRegionArgs)); doReturn(true).when(bucketRegion).lockKeysAndPrimary(event); doReturn(true).when(bucketRegion).hasSeenEvent(event); bucketRegion.virtualPut(event, false, true, null, false, 1, true); verify(bucketRegion).releaseLockForKeysAndPrimary(eq(event)); }