public Map<Integer, SizeEntry> getSizeForLocalPrimaryBuckets() { return getSizeLocallyForBuckets(getAllLocalPrimaryBucketIds()); }
public Map<Integer, SizeEntry> getSizeEstimateForLocalPrimaryBuckets() { return getSizeEstimateLocallyForBuckets(getAllLocalPrimaryBucketIds()); }
public Map<Integer, SizeEntry> getSizeEstimateForLocalPrimaryBuckets() { return getSizeEstimateLocallyForBuckets(getAllLocalPrimaryBucketIds()); }
public Map<Integer, SizeEntry> getSizeForLocalPrimaryBuckets() { return getSizeLocallyForBuckets(getAllLocalPrimaryBucketIds()); }
public static void checkStartingBucketIDs_Nodedown() { assertEquals( region_FPR.getDataStore().getAllLocalPrimaryBucketIds().size() % 3, 0); }
public static void checkStartingBucketIDs_Nodeup() { assertEquals( region_FPR.getDataStore().getAllLocalPrimaryBucketIds().size() % 3, 0); }
public static void checkStartingBucketIDs() { assertEquals( region_FPR.getDataStore().getAllLocalPrimaryBucketIds().size() % 3, 0); }
public static Set<Integer> getAllPrimaryBucketsOnTheNode(String regionName) { PartitionedRegion region = (PartitionedRegion)cache.getRegion(regionName); return region.getDataStore().getAllLocalPrimaryBucketIds(); }
public Object call() throws Exception { Cache cache = getCache(); PartitionedRegion region = (PartitionedRegion) cache.getRegion(regionName); return new TreeSet<Integer>(region.getDataStore().getAllLocalPrimaryBucketIds()); } };
public void run() { PartitionedRegion region = (PartitionedRegion) basicGetCache().getRegion(OrderPartitionedRegionName); MyResourceObserver observer = (MyResourceObserver) InternalResourceManager.getResourceObserver(); try { observer.waitForRegion(region, 60 * 1000); } catch (InterruptedException e) { throw new RuntimeException(e); } assertEquals(2, region.getDataStore().getAllLocalBucketIds().size()); assertEquals(2, region.getDataStore().getAllLocalPrimaryBucketIds().size()); } };
public void run() { PartitionedRegion region1 = (PartitionedRegion) basicGetCache().getRegion(CustomerPartitionedRegionName); MyResourceObserver observer = (MyResourceObserver) InternalResourceManager.getResourceObserver(); try { observer.waitForRegion(region1, 60 * 1000); } catch (InterruptedException e) { throw new RuntimeException(e); } assertEquals(0, region1.getDataStore().getAllLocalBucketIds().size()); assertEquals(0, region1.getDataStore().getAllLocalPrimaryBucketIds().size()); } };
/** * TODO: Optimization needed. We are creating 1 array list for each peek!! * @return BucketRegionQueue */ private final BucketRegionQueue getRandomBucketRegionQueue() { PartitionedRegion prQ = getRandomShadowPR(); if( prQ != null) { final PartitionedRegionDataStore ds = prQ.getDataStore(); final List<Integer> buckets = new ArrayList<Integer>( ds.getAllLocalPrimaryBucketIds()); if (buckets.isEmpty()) return null; final int index = new Random().nextInt(buckets.size()); final int brqId = buckets.get(index); final BucketRegionQueue brq = (BucketRegionQueue)ds .getLocalBucketById(brqId); if (brq.isReadyForPeek()) { return brq; } } return null; }
/** * TODO: Optimization needed. We are creating 1 array list for each peek!! * @return BucketRegionQueue */ private final BucketRegionQueue getRandomBucketRegionQueue() { PartitionedRegion prQ = getRandomShadowPR(); if( prQ != null) { final PartitionedRegionDataStore ds = prQ.getDataStore(); final List<Integer> buckets = new ArrayList<Integer>( ds.getAllLocalPrimaryBucketIds()); if (buckets.isEmpty()) return null; final int index = new Random().nextInt(buckets.size()); final int brqId = buckets.get(index); final BucketRegionQueue brq = (BucketRegionQueue)ds .getLocalBucketById(brqId); if (brq.isReadyForPeek()) { return brq; } } return null; }
public void run() { PartitionedRegion region1 = (PartitionedRegion) basicGetCache().getRegion(CustomerPartitionedRegionName); MyResourceObserver observer = (MyResourceObserver) InternalResourceManager.getResourceObserver(); try { observer.waitForRegion(region1, 60 * 1000); } catch (InterruptedException e) { throw new RuntimeException(e); } assertEquals(2, region1.getDataStore().getAllLocalBucketIds().size()); assertEquals(2, region1.getDataStore().getAllLocalPrimaryBucketIds().size()); } };
public void run() { PartitionedRegion region = (PartitionedRegion) basicGetCache().getRegion(OrderPartitionedRegionName); MyResourceObserver observer = (MyResourceObserver) InternalResourceManager.getResourceObserver(); try { observer.waitForRegion(region, 60 * 1000); } catch (InterruptedException e) { throw new RuntimeException(e); } assertEquals(50, region.getDataStore().getAllLocalBucketIds().size()); assertEquals(25, region.getDataStore().getAllLocalPrimaryBucketIds().size()); } };
protected int getRandomPrimaryBucket(PartitionedRegion prQ) { if (prQ != null) { List<Integer> buckets = new ArrayList<Integer>(prQ.getDataStore() .getAllLocalPrimaryBucketIds()); List<Integer> thisProcessorBuckets = new ArrayList<Integer>(); for(Integer bId : buckets) { if(bId % this.nDispatcher == this.index) { thisProcessorBuckets.add(bId); } } if (logger.fineEnabled()) { logger .fine("getRandomPrimaryBucket: total " + buckets.size() + " for this processor:" + thisProcessorBuckets.size() ); } // TODO:REF: instead of shuffle use random number, in this method we are // returning id instead we should return BRQ itself Collections.shuffle(thisProcessorBuckets); for (Integer bucketId : thisProcessorBuckets) { BucketRegionQueue br = (BucketRegionQueue)prQ.getDataStore() .getLocalBucketById(bucketId); if (br != null && br.isReadyForPeek()) { return br.getId(); } } } return -1; }
/** * This function ensures that if any of the buckets has lists that are beyond * its size, they gets rolled over into new skip lists. */ @Override public void run2() { Set<PartitionedRegion> prQs = getRegions(); for (PartitionedRegion prQ : prQs) { ArrayList<Integer> buckets = new ArrayList<Integer>(prQ .getDataStore().getAllLocalPrimaryBucketIds()); for (Integer bId : buckets) { HDFSBucketRegionQueue hrq = ((HDFSBucketRegionQueue)prQ .getDataStore().getLocalBucketById(bId)); if (hrq == null) { // bucket moved to another node after getAllLocalPrimaryBucketIds // was called. continue fixing the next bucket. continue; } if (logger.isDebugEnabled()) { logger.debug("Rolling over the list for bucket id: " + bId); } hrq.rolloverSkipList(); } } } }
public Object call() throws Exception { try { final PartitionedRegion pr = (PartitionedRegion)getRootRegion(name); assertNotNull(pr); return pr.entryCount(pr.getTXState(), pr.getDataStore() .getAllLocalPrimaryBucketIds()); } finally { } } };
public static void checkPrimaryBucketsForQuarter(Integer numBuckets, Integer primaryBuckets) { HashMap localBucket2RegionMap = (HashMap)region_FPR.getDataStore() .getSizeLocally(); getLogWriter().info( "Size of the " + region_FPR + " in this VM :- " + localBucket2RegionMap.size() + "List of buckets : " + localBucket2RegionMap.keySet()); assertEquals(numBuckets.intValue(), localBucket2RegionMap.size()); getLogWriter().info( "Size of primary buckets the " + region_FPR + " in this VM :- " + region_FPR.getDataStore().getNumberOfPrimaryBucketsManaged()); getLogWriter().info( "List of Primaries in this VM :- " + region_FPR.getDataStore().getAllLocalPrimaryBucketIds()); assertEquals(primaryBuckets.intValue(), region_FPR.getDataStore() .getNumberOfPrimaryBucketsManaged()); }
public static void checkPrimaryBucketsForQuarterAfterCacheClosed( Integer numBuckets, Integer primaryBuckets) { HashMap localBucket2RegionMap = (HashMap)region_FPR.getDataStore() .getSizeLocally(); getLogWriter().info( "Size of the " + region_FPR + " in this VM :- " + localBucket2RegionMap.size() + "List of buckets : " + localBucket2RegionMap.keySet()); assertEquals(numBuckets.intValue(), localBucket2RegionMap.size()); getLogWriter().info( "Size of primary buckets the " + region_FPR + " in this VM :- " + region_FPR.getDataStore().getNumberOfPrimaryBucketsManaged()); getLogWriter().info( "List of Primaries in this VM :- " + region_FPR.getDataStore().getAllLocalPrimaryBucketIds()); assertEquals(region_FPR.getDataStore().getNumberOfPrimaryBucketsManaged() % primaryBuckets.intValue(), 0); }