@Override protected StringBuilder getStringBuilder() { return super.getStringBuilder() .append("; prId=").append(this.partitionedRegionId) .append("; isDestroyed=").append(this.isDestroyed) .append("; isClosed=").append(this.isClosed) .append("; retryTimeout=").append(this.retryTimeout) .append("; serialNumber=").append(getSerialNumber()) .append("; hdfsStoreName=").append(getHDFSStoreName()) .append("; hdfsWriteOnly=").append(getHDFSWriteOnly()) .append("; partition attributes=").append(getPartitionAttributes()) .append("; on VM ").append(getMyId()); }
public void execute(Region region, Integer key) { PartitionedRegion r = (PartitionedRegion) region; //get a key that doesn't exist, but is in the same bucket as the given key region.get(key + r.getPartitionAttributes().getTotalNumBuckets()); } });
protected void populateSequentiallyAndIncrementally() { int totalNumBuckets = aRegion.getPartitionAttributes().getTotalNumBuckets(); // int numEntriesPerBucket = MAX_KEYS_TO_BE_POPULATED/totalNumBuckets; for (int bucketId = 0; bucketId < totalNumBuckets; bucketId++) { populateSpecificBucketSpecificSize(bucketId, bucketId + 10); } }
public boolean isRedundancyImpaired() { int numBuckets = this.prRegion.getPartitionAttributes().getTotalNumBuckets(); int targetRedundancy = this.prRegion.getPartitionAttributes().getRedundantCopies(); for (int i =0; i < numBuckets; i++) { int redundancy = this.prRegion.getRegionAdvisor().getBucketRedundancy(i); if (redundancy < targetRedundancy && redundancy != -1 || redundancy > targetRedundancy) { return true; } } return false; }
protected void populateSequentiallyAndEvenly() { int totalNumBuckets = aRegion.getPartitionAttributes().getTotalNumBuckets(); int numEntriesPerBucket = MAX_KEYS_TO_BE_POPULATED / totalNumBuckets; for (int bucketId = 0; bucketId < totalNumBuckets; bucketId++) { populateSpecificBucketSpecificSize(bucketId, numEntriesPerBucket); } }
public boolean isRedundancyImpaired() { int numBuckets = this.prRegion.getPartitionAttributes().getTotalNumBuckets(); int targetRedundancy = this.prRegion.getPartitionAttributes().getRedundantCopies(); for (int i =0; i < numBuckets; i++) { int redundancy = this.prRegion.getRegionAdvisor().getBucketRedundancy(i); if (redundancy < targetRedundancy && redundancy != -1 || redundancy > targetRedundancy) { return true; } } return false; }
private void init() { execution = FunctionService.onMembers(region.getCache().getDistributedSystem()); int totalNumBuckets = region.getPartitionAttributes().getTotalNumBuckets(); routingKeySet = new CopyOnWriteArraySet(); for (int i = 0; i < totalNumBuckets; i++) { routingKeySet.add(i); } }
private long getSize(PartitionedRegionDataStore ds) { long size = 0; int totalNumBuckets = ds.getPartitionedRegion().getPartitionAttributes().getTotalNumBuckets(); for(int i = 0; i < totalNumBuckets; i++) { size += ds.getBucketSize(i); } return size; }
/** * This task is done once the regions are populated till the eviction * threshold is reached. In this task only one region is populated to trigger * the eviction of both PRs. */ public synchronized static void HydraTask_putExtraKeysWithSleep() { hydra.MasterController.sleepForMs(20000); testInstance.populateSpecificBucketofRegionWithSleep(regionB, regionB .getPartitionAttributes().getTotalNumBuckets() - 1, extraKeysToBePopulatedInRegionBLastBucket); }
public void run() { final PartitionedRegion pr = (PartitionedRegion)getRootRegion(name); assertNotNull(pr); // Create three buckets, add enough stuff in each to force two // overflow // ops in each for (int i = 0; i < bucketsToCreate; i++) { // assume mod-based hashing for bucket creation pr.put(new Integer(i), "value0"); pr.put(new Integer(i + pr.getPartitionAttributes().getTotalNumBuckets()), "value1"); pr.put(new Integer(i + (pr.getPartitionAttributes().getTotalNumBuckets()) * 2), "value2"); } } };
public void run() { setEvictionPercentage(heapPercentage); final PartitionedRegion pr = (PartitionedRegion)getRootRegion(name); assertNotNull(pr); // Create three buckets, add enough stuff in each to force two // overflow // ops in each for (int i = 0; i < bucketsToCreate; i++) { // assume mod-based hashing for bucket creation pr.put(new Integer(i), "value0"); pr.put(new Integer(i + pr.getPartitionAttributes().getTotalNumBuckets()), "value1"); pr.put(new Integer(i + (pr.getPartitionAttributes().getTotalNumBuckets()) * 2), "value2"); } } };
public Object call() { Cache cache = getCache(); LocalRegion region = (LocalRegion) cache.getRegion("region"); if(region instanceof PartitionedRegion) { long total = 0; PartitionedRegion pr = ((PartitionedRegion) region); for(int i =0; i < pr.getPartitionAttributes().getTotalNumBuckets(); i++) { total += pr.getDataStore().getBucketSize(i); } return total; } else { return 0L; } } });
protected void populateSpecificBucketofRegionWithSleep( PartitionedRegion aRegion, int bucketId, int numEntries) { hydra.Log.getLogWriter().info( "Populating the bucketId " + bucketId + " with " + numEntries + "entries"); int entryKey = bucketId; // First entry will be 0 for bucket id 0, 1 for // bucket id 1 and 112 for bucketid 112. for (int numEntriesPerBucket = 0; numEntriesPerBucket < numEntries; numEntriesPerBucket++) { hydra.MasterController.sleepForMs(500); regionB.put(entryKey, new byte[ENTRY_SIZE_IN_BYTES]); entryKey = entryKey + regionB.getPartitionAttributes().getTotalNumBuckets(); } }
static void addBucketIdForMember(HashMapOrSet membersToBucketIds, DistributedMember member, int bucketId, PartitionedRegion region) { if (member != null) { if (membersToBucketIds.hasValues) { BitSetSet bucketSet = (BitSetSet)membersToBucketIds.get(member); if (bucketSet == null) { bucketSet = new BitSetSet(region.getPartitionAttributes() .getTotalNumBuckets()); membersToBucketIds.put(member, bucketSet); } bucketSet.addInt(bucketId); } else { membersToBucketIds.put(member, null); } } }
static void addBucketIdForMember(HashMapOrSet membersToBucketIds, DistributedMember member, int bucketId, PartitionedRegion region) { if (member != null) { if (membersToBucketIds.hasValues) { BitSetSet bucketSet = (BitSetSet)membersToBucketIds.get(member); if (bucketSet == null) { bucketSet = new BitSetSet(region.getPartitionAttributes() .getTotalNumBuckets()); membersToBucketIds.put(member, bucketSet); } bucketSet.addInt(bucketId); } else { membersToBucketIds.put(member, null); } } }
static void addBucketIdForMember(HashMapOrSet membersToBucketIds, DistributedMember member, int bucketId, PartitionedRegion region) { if (member != null) { if (membersToBucketIds.hasValues) { BitSetSet bucketSet = (BitSetSet)membersToBucketIds.get(member); if (bucketSet == null) { bucketSet = new BitSetSet(region.getPartitionAttributes() .getTotalNumBuckets()); membersToBucketIds.put(member, bucketSet); } bucketSet.addInt(bucketId); } else { membersToBucketIds.put(member, null); } } }
protected String getHDFSEventQueueName() { if (!this.getDataPolicy().withHDFS()) return null; String colocatedWith = this.getPartitionAttributes().getColocatedWith(); String eventQueueName; if (colocatedWith != null) { PartitionedRegion leader = ColocationHelper.getLeaderRegionName(this); eventQueueName = HDFSStoreFactoryImpl.getEventQueueName(leader .getFullPath()); } else { eventQueueName = HDFSStoreFactoryImpl.getEventQueueName(getFullPath()); } return eventQueueName; }
protected String getHDFSEventQueueName() { if (!this.getDataPolicy().withHDFS()) return null; String colocatedWith = this.getPartitionAttributes().getColocatedWith(); String eventQueueName; if (colocatedWith != null) { PartitionedRegion leader = ColocationHelper.getLeaderRegionName(this); eventQueueName = HDFSStoreFactoryImpl.getEventQueueName(leader .getFullPath()); } else { eventQueueName = HDFSStoreFactoryImpl.getEventQueueName(getFullPath()); } return eventQueueName; }
protected PartitionedRegionBridge(Region<K, V> region) { super(region); this.parRegion = (PartitionedRegion)region; this.prStats = parRegion.getPrStats(); PartitionAttributes<K, V> partAttrs = parRegion.getPartitionAttributes(); this.parRegionMonitor = new MBeanStatsMonitor(PAR_REGION_MONITOR); this.configurePartitionRegionMetrics(); this.configuredRedundancy = partAttrs.getRedundantCopies(); this.partitionAttributesData = RegionMBeanCompositeDataFactory.getPartitionAttributesData(partAttrs); if (partAttrs.getFixedPartitionAttributes() != null) { this.fixedPartitionAttributesTable = RegionMBeanCompositeDataFactory.getFixedPartitionAttributesData(partAttrs); } parRegionMonitor.addStatisticsToMonitor(prStats.getStats()); }
protected PartitionedRegionBridge(Region<K, V> region) { super(region); this.parRegion = (PartitionedRegion)region; this.prStats = parRegion.getPrStats(); PartitionAttributes<K, V> partAttrs = parRegion.getPartitionAttributes(); this.parRegionMonitor = new MBeanStatsMonitor(PAR_REGION_MONITOR); this.configurePartitionRegionMetrics(); this.configuredRedundancy = partAttrs.getRedundantCopies(); this.partitionAttributesData = RegionMBeanCompositeDataFactory.getPartitionAttributesData(partAttrs); if (partAttrs.getFixedPartitionAttributes() != null) { this.fixedPartitionAttributesTable = RegionMBeanCompositeDataFactory.getFixedPartitionAttributesData(partAttrs); } parRegionMonitor.addStatisticsToMonitor(prStats.getStats()); }