private boolean areLocalBucketQueueRegionsPresent() { boolean bucketsAvailable = false; for (PartitionedRegion prQ : this.userRegionNameToshadowPRMap.values()) { if (prQ.getDataStore().getAllLocalBucketRegions().size() > 0) return true; } return false; }
private boolean areLocalBucketQueueRegionsPresent() { boolean bucketsAvailable = false; for (PartitionedRegion prQ : this.userRegionNameToshadowPRMap.values()) { if (prQ.getDataStore().getAllLocalBucketRegions().size() > 0) return true; } return false; }
/** * This will be case when the sender is started again after stop operation. */ private void handleShadowPRExistsScenario(Cache cache, PartitionedRegion prQ) { //Note: The region will not be null if the sender is started again after stop operation if (logger.isDebugEnabled()) { logger.debug("{}: No need to create the region as the region has been retrieved: {}", this, prQ); } // now, clean up the shadowPR's buckets on this node (primary as well as // secondary) for a fresh start Set<BucketRegion> localBucketRegions = prQ.getDataStore() .getAllLocalBucketRegions(); for (BucketRegion bucketRegion : localBucketRegions) { bucketRegion.clear(); } } protected boolean isUsedForHDFS()
public void run() { try { List<BucketRegion> bucketList = new ArrayList<BucketRegion>(); Set<BucketRegion> buckets = dataStore.getAllLocalBucketRegions(); for (BucketRegion br : buckets) { if (HeapEvictor.MINIMUM_ENTRIES_PER_BUCKET < br.getSizeForEviction()) {
@Override public void run2() throws CacheException { final PartitionedRegion pr = (PartitionedRegion)cache.getRegion(regionName); Set<BucketRegion> bucket=pr.getDataStore().getAllLocalBucketRegions(); Iterator itr=bucket.iterator(); while(itr.hasNext()) { BucketRegion br=(BucketRegion)itr.next(); getLogWriter().info("Print "+ br.size()); } } });
PartitionedRegion pr = (PartitionedRegion)listener; if (includePartitionedRegion(pr)) { allRegionList.addAll(pr.getDataStore().getAllLocalBucketRegions());
PartitionedRegion pr = (PartitionedRegion)listener; if (includePartitionedRegion(pr)) { allRegionList.addAll(pr.getDataStore().getAllLocalBucketRegions());
public void run() { try { List<BucketRegion> bucketList = new ArrayList<BucketRegion>(); Set<BucketRegion> buckets = dataStore.getAllLocalBucketRegions(); for (BucketRegion br : buckets) { if (HeapEvictor.MINIMUM_ENTRIES_PER_BUCKET < br.getSizeForEviction()) {
@Override public Object peek() throws InterruptedException, CacheException { Object object = null; int bucketId = -1; PartitionedRegion prQ = getRandomShadowPR(); if (prQ != null && prQ.getDataStore().getAllLocalBucketRegions() .size() > 0 && ((bucketId = getRandomPrimaryBucket(prQ)) != -1)) { BucketRegionQueue brq; try { brq = ((BucketRegionQueue)prQ.getDataStore() .getInitializedBucketForId(null, bucketId)); object = brq.peek(); } catch (BucketRegionQueueUnavailableException e) { return object;//since this is not set, it would be null } catch (ForceReattemptException e) { if (logger.isDebugEnabled()) { logger.debug("Remove: Got ForceReattemptException for {} for bucke = {}", this, bucketId); } } } return object; // OFFHEAP: ok since only callers uses it to check for empty queue }
/** * This will be case when the sender is started again after stop operation. */ private void handleShadowPRExistsScenario(Cache cache, PartitionedRegion prQ) { //Note: The region will not be null if the sender is started again after stop operation if (logger.fineEnabled()) { logger.fine(this + ": No need to create the region as the region has been retrieved: " + prQ); } // now, clean up the shadowPR's buckets on this node (primary as well as // secondary) for a fresh start Set<BucketRegion> localBucketRegions = prQ.getDataStore() .getAllLocalBucketRegions(); for (BucketRegion bucketRegion : localBucketRegions) { bucketRegion.clear(); } } protected boolean isUsedForHDFS()
Set<BucketRegion> buckets = shadowPR.getDataStore().getAllLocalBucketRegions();
@Override protected void clearBackingCHM(Region<Integer, String> r) { PartitionedRegion pr = (PartitionedRegion)r; for (BucketRegion br : pr.getDataStore().getAllLocalBucketRegions()) { assertTrue(br.getRegionMap() instanceof HDFSRegionMap); CustomEntryConcurrentHashMap chm = ((AbstractRegionMap)br.getRegionMap())._getMap(); Iterator it = chm.keySet().iterator(); while (it.hasNext()) { Object key = it.next(); OffHeapRegionEntry re = (OffHeapRegionEntry) chm.remove(key); assert re != null; re.release(); } // wait here to make sure that the queue has been flushed } sleep(pr.getFullPath()); }
public static void FIX_PREVIOUS_OPS_COUNT(String tableName) throws SQLException { Region region = Misc.getRegionForTable(tableName, true); if (region != null) { if (region instanceof PartitionedRegion) { PartitionedRegion pr = (PartitionedRegion)region; for (BucketRegion br : pr.getDataStore().getAllLocalBucketRegions()) { br.getBucketAdvisor().resetPrevOpCount(); } } else if (region instanceof DistributedRegion) { ((DistributedRegion)region).getDistributionAdvisor().resetPrevOpCount(); } } } /**
pr.concurrencyChecksEnabled) { pr.waitForData(); for (BucketRegion br : pr.getDataStore().getAllLocalBucketRegions()) {
private void getRegionLiveChunks(Region<?,?> r, List<Chunk> result) { if (r.getAttributes().getOffHeap()) { if (r instanceof PartitionedRegion) { PartitionedRegionDataStore prs = ((PartitionedRegion) r).getDataStore(); if (prs != null) { Set<BucketRegion> brs = prs.getAllLocalBucketRegions(); if (brs != null) { for (BucketRegion br : brs) { if (br != null && !br.isDestroyed()) { this.basicGetRegionLiveChunks(br, result); } } } } } else { this.basicGetRegionLiveChunks((LocalRegion) r, result); } } }
protected void clearBackingCHM(Region<Integer, String> r) { PartitionedRegion pr = (PartitionedRegion)r; for (BucketRegion br : pr.getDataStore().getAllLocalBucketRegions()) { assertTrue(br.getRegionMap() instanceof HDFSRegionMap); ((AbstractRegionMap)br.getRegionMap())._getMap().clear(); // wait here to make sure that the queue has been flushed } sleep(pr.getFullPath()); }
@Override public Object peek() throws InterruptedException, CacheException { Object object = null; int bucketId = -1; PartitionedRegion prQ = getRandomShadowPR(); if (prQ != null && prQ.getDataStore().getAllLocalBucketRegions() .size() > 0 && ((bucketId = getRandomPrimaryBucket(prQ)) != -1)) { BucketRegionQueue brq; try { brq = ((BucketRegionQueue)prQ.getDataStore() .getInitializedBucketForId(null, bucketId)); object = brq.peek(); } catch (BucketRegionQueueUnavailableException e) { return object;//since this is not set, it would be null } catch (ForceReattemptException e) { if (logger.warningEnabled()) { logger.fine("remove: " + "Got ForceReattemptException for " + this + " for bucket = " + bucketId); } } } return object; // OFFHEAP: ok since only callers uses it to check for empty queue }
private void getRegionLiveChunks(Region r, List<Chunk> result) { if (r.getAttributes().getEnableOffHeapMemory() || isShadowBucket(r)) { if (r instanceof PartitionedRegion) { PartitionedRegion pr = (PartitionedRegion) r; boolean includeHDFSResults = pr.includeHDFSResults(); try { pr.setQueryHDFS(false); // fixes #49675 PartitionedRegionDataStore prs = pr.getDataStore(); if (prs != null) { Set<BucketRegion> brs = prs.getAllLocalBucketRegions(); if (brs != null) { for (BucketRegion br : brs) { if (br != null) { this.basicGetRegionLiveChunks(br, result); } } } } } finally { pr.setQueryHDFS(includeHDFSResults); } } else { this.basicGetRegionLiveChunks((LocalRegion) r, result); } } }
continue; for (BucketRegion br : pr.getDataStore().getAllLocalBucketRegions()) { assertNotNull(br.getAttributes().getEvictionAttributes()); assertEquals(EvictionAlgorithm.LRU_HEAP, br.getAttributes().getEvictionAttributes().getAlgorithm());
Set<BucketRegion> s = prds.getAllLocalBucketRegions(); assertTrue(s.size() > 0); for (BucketRegion br: s) {