public Map<Integer, SizeEntry> getSizeForLocalPrimaryBuckets() { return getSizeLocallyForBuckets(getAllLocalPrimaryBucketIds()); }
/** * Returns the local BucketRegion given the key. Returns null if no BucketRegion exists. * * @since GemFire 6.1.2.9 */ public BucketRegion getBucketRegion(Object key) { if (this.dataStore == null) return null; Integer bucketId = PartitionedRegionHelper.getHashKey(this, null, key, null, null); return this.dataStore.getLocalBucketById(bucketId); }
@Test public void initializedPartitionedRegionWithoutColocationReturnsRegionReady() { PartitionedRegionDataStore partitionedRegionDataStore = spy(new PartitionedRegionDataStore()); List<PartitionedRegion> colocatedChildRegions = new ArrayList<PartitionedRegion>(); doReturn(colocatedChildRegions).when(partitionedRegionDataStore) .getColocatedChildRegions(partitionedRegion); assertThat(partitionedRegionDataStore.isPartitionedRegionReady(partitionedRegion, bucketId)) .isTrue(); }
public Map<Integer, SizeEntry> getSizeEstimateForLocalPrimaryBuckets() { return getSizeEstimateLocallyForBuckets(getAllLocalPrimaryBucketIds()); }
public boolean isPartitionedRegionReady(PartitionedRegion partitionedRegion, final int bucketId) { List<PartitionedRegion> colocatedWithList = getColocatedChildRegions(partitionedRegion); if (colocatedWithList.size() == 0) { return partitionedRegion.isInitialized(); } return areAllColocatedPartitionedRegionsReady(bucketId, colocatedWithList); }
@Test public void createLuceneIndexOnExistingRegionShouldNotThrowNPEIfBucketMovedDuringReindexing() { LuceneIndexImpl index = mock(LuceneIndexImpl.class); PartitionedRegionDataStore dataStore = mock(PartitionedRegionDataStore.class); when(region.getDataStore()).thenReturn(dataStore); Integer bucketIds[] = {1, 2, 3, 4, 5}; Set<Integer> primaryBucketIds = new HashSet(Arrays.asList(bucketIds)); when(dataStore.getAllLocalPrimaryBucketIds()).thenReturn(primaryBucketIds); when(dataStore.getLocalBucketById(3)).thenReturn(null); boolean result = service.createLuceneIndexOnDataRegion(region, index); assertTrue(result); }
final BucketRegion bucketRegion = getInitializedBucketForId(key, bucketId); invokeBucketReadHook(); boolean ret = false; try { ret = bucketRegion.containsValueForKey(key); checkIfBucketMoved(bucketRegion); } catch (RegionDestroyedException rde) { if (this.partitionedRegion.isLocallyDestroyed || this.partitionedRegion.isClosed) { throw new PRLocallyDestroyedException(rde); } else { this.getPartitionedRegion().checkReadiness(); if (bucketRegion.isBucketDestroyed()) {
public void updateEntryVersionLocally(Integer bucketId, EntryEventImpl event) throws ForceReattemptException { if (logger.isDebugEnabled()) { logger.debug("updateEntryVersionLocally: bucketId={}{}{} for key={}", this.partitionedRegion.getPRId(), PartitionedRegion.BUCKET_ID_SEPARATOR, bucketId, event.getKey()); } final BucketRegion bucketRegion = getInitializedBucketForId(event.getKey(), bucketId); try { event.setRegion(bucketRegion); bucketRegion.basicUpdateEntryVersion(event); // bug 34361: don't send a reply if bucket was destroyed during the op bucketRegion.checkReadiness(); } catch (RegionDestroyedException rde) { checkRegionDestroyedOnBucket(bucketRegion, event.isOriginRemote(), rde); } } }
public static Set<Integer> getAllPrimaryBucketsOnTheNode(String regionName) { PartitionedRegion region = (PartitionedRegion) cache.getRegion(regionName); return region.getDataStore().getAllLocalPrimaryBucketIds(); }
@Test public void sendAcceptanceReplyIfDataStoreGrabbedBucket() { ManageBackupBucketMessage message = spy(new ManageBackupBucketMessage(recipent, regionId, processor, bucketId, isReblance, replaceOfflineDate, source, forecCreation)); when(partitionedRegionDataStore.isPartitionedRegionReady(partitionedRegion, bucketId)) .thenReturn(true); when(partitionedRegionDataStore.grabBucket(bucketId, source, forecCreation, replaceOfflineDate, isReblance, null, false)).thenReturn(PartitionedRegionDataStore.CreateBucketResult.CREATED); doReturn(recipent).when(message).getSender(); message.operateOnPartitionedRegion(distributionManager, partitionedRegion, 1); verify(distributionManager, times(1)).putOutgoing(replyMessage.capture()); assertThat(replyMessage.getValue().isAcceptedBucket()).isTrue(); assertThat(replyMessage.getValue().isNotYetInitialized()).isFalse(); }
@Test public void returnRegionNotReadyIfColocationNotCompletedForAColocatedRegion() { PartitionedRegionDataStore partitionedRegionDataStore = spy(new PartitionedRegionDataStore()); setupColocatedRegions(partitionedRegionDataStore); when(grandChildRegionDateStore2_3.isColocationComplete(bucketId)).thenReturn(false); assertThat(partitionedRegionDataStore.isPartitionedRegionReady(partitionedRegion, bucketId)) .isFalse(); }
private boolean isColocatedPartitionedRegionInitialized(PartitionedRegion partitionedRegion, final int bucketId) { if (!partitionedRegion.isInitialized()) { return false; } if (!partitionedRegion.getDataStore().isColocationComplete(bucketId)) { return false; } List<PartitionedRegion> colocatedWithList = getColocatedChildRegions(partitionedRegion); return areAllColocatedPartitionedRegionsReady(bucketId, colocatedWithList); }
/** * Fetch the entries for the given bucket * * @param bucketId the id of the bucket * @return a Map containing all the entries */ public BucketRegion handleRemoteGetEntries(int bucketId) throws ForceReattemptException { if (logger.isDebugEnabled()) { logger.debug("handleRemoteGetEntries: bucketId: {}{}{}", this.partitionedRegion.getPRId(), PartitionedRegion.BUCKET_ID_SEPARATOR, bucketId); } BucketRegion br = getInitializedBucketForId(null, Integer.valueOf(bucketId)); // NOTE: this is a test method that does not take a snapshot so it does not // give a stable set of entries if the bucket is moved during a rebalance return br; }
@Test public void returnRegionReadyIfAllColocatedRegionsAreReady() { PartitionedRegionDataStore partitionedRegionDataStore = spy(new PartitionedRegionDataStore()); setupColocatedRegions(partitionedRegionDataStore); assertThat(partitionedRegionDataStore.isPartitionedRegionReady(partitionedRegion, bucketId)) .isTrue(); }
if (!forceCreation && !canAccommodateMoreBytesSafely(size)) { if (logger.isDebugEnabled()) { logger.debug( if (!forceCreation && !canAccommodateAnotherBucket()) { return false; if (grabBucket(bucketId, null, forceCreation, false, true, sender, false).nowExists()) { this.partitionedRegion.checkReadiness(); if (logger.isDebugEnabled()) { "handleManageBucketRequest: successful, returning:{} bucketId={}{}{} for PR = {}", this.partitionedRegion.getMyId(), this.partitionedRegion.getPRId(), PartitionedRegion.BUCKET_ID_SEPARATOR, bucketId, this.getName());
boolean returnTombstones) throws PrimaryBucketException, ForceReattemptException { final BucketRegion bucketRegion = getInitializedBucketForId(keyInfo.getKey(), keyInfo.getBucketId()); keyInfo.getBucketId(), bucketRegion.getName()); invokeBucketReadHook(); checkIfBucketMoved(bucketRegion); return result; } catch (RegionDestroyedException rde) {
try { final BucketRegion br = this.dataStore.getInitializedBucketForId(event.getKey(), bucketId); result = this.dataStore.createLocally(br, event, ifNew, ifOld, requireOldValue, lastModified); } else { result = this.dataStore.putLocally(br, event, ifNew, ifOld, expectedOldValue, requireOldValue, lastModified);
@Test public void removeAndNotifyKeysIsNotInvokedIfKeysNotLocked() throws Exception { PutAllPRMessage message = spy(new PutAllPRMessage(bucketId, 1, false, false, false, null)); RegionDestroyedException regionDestroyedException = new RegionDestroyedException("", ""); message.addEntry(entryData); doReturn(keys).when(message).getKeysToBeLocked(); when(bucketRegion.waitUntilLocked(keys)).thenThrow(regionDestroyedException); message.doLocalPutAll(partitionedRegion, mock(InternalDistributedMember.class), 1); verify(bucketRegion, never()).removeAndNotifyKeys(eq(keys)); verify(dataStore).checkRegionDestroyedOnBucket(eq(bucketRegion), eq(true), eq(regionDestroyedException)); }
public Set fetchAllLocalKeys(Integer id, Set<Integer> failures, String regex) { Set result = new HashSet(); try { Set keys = null; if (regex != null) { keys = this.dataStore.handleRemoteGetKeys(id, InterestType.REGULAR_EXPRESSION, regex, true); } else { keys = this.dataStore.getKeysLocally(id, true); } result.addAll(keys); } catch (ForceReattemptException ignore) { failures.add(id); } catch (PRLocallyDestroyedException ignore) { failures.add(id); } return result; }
/** * Puts the object with the given key locally. <br> * Step: <br> * 1) It finds out the bucket region for the bucket id. <br> * 2) If from step 1 it gets null, that means the bucket is re-mapped. <br> * 3) If it finds the bucket region from step 1, it tries to put the key-value on the region. <br> * 4) updateBucket2Size if bucket is on more than 1 node or else bucket listeners would take care * of size update. <br> * * @param bucketId the bucket id of the key * @param event the operation event * @param ifNew whether a create must be performed * @param ifOld whether an existing entry must be updated * @param lastModified time stamp for update operations * @throws ForceReattemptException if bucket region is null * @throws PrimaryBucketException if the bucket in this data store is not the primary bucket * @return true if put happened */ public boolean putLocally(final Integer bucketId, final EntryEventImpl event, boolean ifNew, boolean ifOld, Object expectedOldValue, boolean requireOldValue, final long lastModified) throws PrimaryBucketException, ForceReattemptException { final BucketRegion br = getInitializedBucketForId(event.getKey(), bucketId); return putLocally(br, event, ifNew, ifOld, expectedOldValue, requireOldValue, lastModified); }