public static int returnPRSize(String string) { final PartitionedRegion pr = (PartitionedRegion)cache.getRegion(string); return pr.size(); }
@Override public int size() { int size = 0; for (PartitionedRegion prQ : this.userRegionNameToshadowPRMap.values()) { if (logger.isDebugEnabled()) { logger.debug("The name of the queue region is {} and the size is {}. keyset size is {}", prQ.getName(), prQ.size(), prQ.keys().size()); } size += prQ.size(); } return size + sender.getTmpQueuedEventSize(); }
protected void logRegionSize() { // if (aRegion.size() != MAX_KEYS_TO_BE_POPULATED){ // throw new TestException("Expected the region size to be // "+MAX_KEYS_TO_BE_POPULATED+" but found "+aRegion.size()); // }else { hydra.Log.getLogWriter().info("Region has expected size " + aRegion.size()); // } }
@Override public int size() { int size = 0; for (PartitionedRegion prQ : this.userRegionNameToshadowPRMap.values()) { if (logger.finerEnabled()) { logger.finer("The name of the queue region is " + prQ.getName() + " and the size is " + prQ.size() + " keyset size is " + prQ.keys().size()); } size += prQ.size(); } return size + sender.getTmpQueuedEventSize(); }
/** * Task to populate both the regions incrementally till it reaches the * threshold */ public synchronized static void HydraTask_putUnEvenlyTillThreshold() { testInstance.populateSequentiallyAndIncrementally(); hydra.Log.getLogWriter().info( "After putting till threshold the region sizes are " + regionA.size() + " and " + regionB.size()); //Verify no eviction happened testInstance.verifyNoEviction(regionA); testInstance.verifyNoEviction(regionB); }
protected void logRegionSize(PartitionedRegion aRegion) { hydra.Log.getLogWriter().info( "Region " + aRegion.getName() + " has expected size " + aRegion.size()); }
public void run2() throws CacheException { Cache cache = getCache(); PartitionedRegion pr = (PartitionedRegion) cache.getRegion(regionName); assertTrue(pr.isEmpty()); Integer k; // Create keys such that all buckets are created, Integer works well // assuming buckets are allocated on the mod of the key hashCode, x 2 just to be safe final int numEntries=pr.getTotalNumberOfBuckets()*2; for(int i=numEntries; i>=0; --i) { k = new Integer(i); pr.put(k, value); } assertEquals(numEntries+1,pr.size()); assertEquals(pr.getRegionAdvisor().getBucketSet().size(), pr.getTotalNumberOfBuckets()); } }
.createPartitionedRegion("testSize", String.valueOf(200), 0); int size = 0; size = pr.size(); pr.put(new Integer(key), val); Object tmpVal = pr.get(new Integer(key)); int tmpSize = pr.size(); assertEquals(num, tmpSize); size = pr.size(); assertEquals(size, count); pr.destroy(new Integer(num)); size = pr.size(); assertEquals(size, (count - removeCnt)); pr.size();
protected void verifyUniformBucketEviction() { int regionSize = aRegion.size(); int totalNumBuckets = aRegion.getPartitionAttributes().getTotalNumBuckets(); float averageEvictionPercentage = ((float)totalEvictedEntries * 100)
public void run2() throws CacheException { Cache cache = getCache(); AttributesFactory attr = new AttributesFactory(); PartitionAttributesFactory paf = new PartitionAttributesFactory() .setTotalNumBuckets(numBuckets); for (int redundancy = 0; redundancy < regions.length; redundancy++) { paf.setRedundantCopies(redundancy); attr.setPartitionAttributes(paf.create()); PartitionedRegion p = (PartitionedRegion) cache.createRegion(regions[redundancy], attr.create()); assertNotNull(p); assertEquals(0, p.size()); } } };
private void initialize(PartitionedRegion region) { setNumberOfEntries(region.size()); // If there is a data store (meaning that the PR has storage // in this VM), get the number of entries and heap size. Else, // set these to 0. PartitionedRegionDataStore ds = region.getDataStore(); int numLocalEntries = 0; long heapSize = 0; if (ds != null) { CachePerfStats cpStats = ds.getCachePerfStats(); numLocalEntries = (int) cpStats.getEntries(); heapSize = ds.currentAllocatedMemory(); } setNumberOfLocalEntries(numLocalEntries); setHeapSize(heapSize); }
private void initialize(PartitionedRegion region) { setNumberOfEntries(region.size()); // If there is a data store (meaning that the PR has storage // in this VM), get the number of entries and heap size. Else, // set these to 0. PartitionedRegionDataStore ds = region.getDataStore(); int numLocalEntries = 0; long heapSize = 0; if (ds != null) { CachePerfStats cpStats = ds.getCachePerfStats(); numLocalEntries = (int) cpStats.getEntries(); heapSize = ds.currentAllocatedMemory(); } setNumberOfLocalEntries(numLocalEntries); setHeapSize(heapSize); }
public void run2() throws CacheException { Cache cache = getCache(); AttributesFactory attr = new AttributesFactory(); PartitionAttributesFactory paf = new PartitionAttributesFactory() .setTotalNumBuckets(numBuckets) .setLocalMaxMemory(0); for (int redundancy = 0; redundancy < regions.length; redundancy++) { paf.setRedundantCopies(redundancy); attr.setPartitionAttributes(paf.create()); PartitionedRegion p = (PartitionedRegion) cache.createRegion(regions[redundancy], attr.create()); assertNotNull(p); assertEquals(0, p.size()); } } });
key++; pr.put(new Integer(key), val); assertEquals(num, pr.size());
if (cacheWrite && parallelQueueRegion.size() != 0 && keepWaiting) { continue;
public static void validateRefConstraintCheckability(TableDescriptor refTd, IndexRowGenerator refIndexDescriptor, TableDescriptor fkTd, LanguageConnectionContext lcc, String[] fkColumnNames) throws StandardException { //If the FK table is partitioned & Referential table is also partitioned // with referential index constraint type is local, and the FK table is // not colocated with the Referential table & FK table region size is > 0 // then we throw exception for now. //Ref : Bug 47289 boolean checkFurther = refTd.getDistributionDescriptor().isPartitioned() && fkTd.getDistributionDescriptor().isPartitioned(); if(checkFurther) { String prFKPath = Misc.getRegionPath(fkTd.getSchemaName(), fkTd.getName(), lcc); String prRefPath = Misc.getRegionPath(refTd.getSchemaName(), refTd.getName(), lcc); PartitionedRegion prFK = (PartitionedRegion)Misc.getRegionByPath(prFKPath, true); PartitionedRegion prRef = (PartitionedRegion)Misc.getRegionByPath(prRefPath, true); checkFurther = refIndexDescriptor.indexType().equals(GfxdConstants.LOCAL_SORTEDMAP_INDEX_TYPE) && prFK.size() > 0; if(checkFurther) { //check if the two tables are colocated && the FK columns contain the complete partitioning columns GfxdPartitionResolver resolverFK = (GfxdPartitionResolver)prFK.getPartitionResolver(); String partitioningCols [] = resolverFK.getColumnNames(); boolean fkColsContainPartitioningCols = doFKColsContainAllPartitioningCols(fkColumnNames, partitioningCols); GfxdPartitionResolver resolverRef = (GfxdPartitionResolver)prRef.getPartitionResolver(); if( !(fkColsContainPartitioningCols && resolverFK != null && resolverRef != null && resolverFK.getMasterTable(true).equals(resolverRef.getMasterTable(true)))) { throw StandardException.newException(SQLState.LANG_ALTER_TABLE_ADD_FK_CONSTRAINT_CHECK_FAIL,fkTd.tableName, refTd.tableName); } } } }
public static void validateRefConstraintCheckability(TableDescriptor refTd, IndexRowGenerator refIndexDescriptor, TableDescriptor fkTd, LanguageConnectionContext lcc, String[] fkColumnNames) throws StandardException { //If the FK table is partitioned & Referential table is also partitioned // with referential index constraint type is local, and the FK table is // not colocated with the Referential table & FK table region size is > 0 // then we throw exception for now. //Ref : Bug 47289 boolean checkFurther = refTd.getDistributionDescriptor().isPartitioned() && fkTd.getDistributionDescriptor().isPartitioned(); if(checkFurther) { String prFKPath = Misc.getRegionPath(fkTd.getSchemaName(), fkTd.getName(), lcc); String prRefPath = Misc.getRegionPath(refTd.getSchemaName(), refTd.getName(), lcc); PartitionedRegion prFK = (PartitionedRegion)Misc.getRegionByPath(prFKPath, true); PartitionedRegion prRef = (PartitionedRegion)Misc.getRegionByPath(prRefPath, true); checkFurther = refIndexDescriptor.indexType().equals(GfxdConstants.LOCAL_SORTEDMAP_INDEX_TYPE) && prFK.size() > 0; if(checkFurther) { //check if the two tables are colocated && the FK columns contain the complete partitioning columns GfxdPartitionResolver resolverFK = (GfxdPartitionResolver)prFK.getPartitionResolver(); String partitioningCols [] = resolverFK.getColumnNames(); boolean fkColsContainPartitioningCols = doFKColsContainAllPartitioningCols(fkColumnNames, partitioningCols); GfxdPartitionResolver resolverRef = (GfxdPartitionResolver)prRef.getPartitionResolver(); if( !(fkColsContainPartitioningCols && resolverFK != null && resolverRef != null && resolverFK.getMasterTable(true).equals(resolverRef.getMasterTable(true)))) { throw StandardException.newException(SQLState.LANG_ALTER_TABLE_ADD_FK_CONSTRAINT_CHECK_FAIL,fkTd.tableName, refTd.tableName); } } } }
public static void validateRefConstraintCheckability(TableDescriptor refTd, IndexRowGenerator refIndexDescriptor, TableDescriptor fkTd, LanguageConnectionContext lcc, String[] fkColumnNames) throws StandardException { //If the FK table is partitioned & Referential table is also partitioned // with referential index constraint type is local, and the FK table is // not colocated with the Referential table & FK table region size is > 0 // then we throw exception for now. //Ref : Bug 47289 boolean checkFurther = refTd.getDistributionDescriptor().isPartitioned() && fkTd.getDistributionDescriptor().isPartitioned(); if(checkFurther) { String prFKPath = Misc.getRegionPath(fkTd.getSchemaName(), fkTd.getName(), lcc); String prRefPath = Misc.getRegionPath(refTd.getSchemaName(), refTd.getName(), lcc); PartitionedRegion prFK = (PartitionedRegion)Misc.getRegionByPath(prFKPath, true); PartitionedRegion prRef = (PartitionedRegion)Misc.getRegionByPath(prRefPath, true); checkFurther = refIndexDescriptor.indexType().equals(GfxdConstants.LOCAL_SORTEDMAP_INDEX_TYPE) && prFK.size() > 0; if(checkFurther) { //check if the two tables are colocated && the FK columns contain the complete partitioning columns GfxdPartitionResolver resolverFK = (GfxdPartitionResolver)prFK.getPartitionResolver(); String partitioningCols [] = resolverFK.getColumnNames(); boolean fkColsContainPartitioningCols = doFKColsContainAllPartitioningCols(fkColumnNames, partitioningCols); GfxdPartitionResolver resolverRef = (GfxdPartitionResolver)prRef.getPartitionResolver(); if( !(fkColsContainPartitioningCols && resolverFK != null && resolverRef != null && resolverFK.getMasterTable(true).equals(resolverRef.getMasterTable(true)))) { throw StandardException.newException(SQLState.LANG_ALTER_TABLE_ADD_FK_CONSTRAINT_CHECK_FAIL,fkTd.tableName, refTd.tableName); } } } }
if (aRegion.size() > evictionThresholdKeys && TestConfig.tab().booleanAt( EvictionPrms.pauseAfterEvictionThreshold, false)) {
regionAck.destroy(new Integer(key)); assertEquals(0, regionAck.size()); assertTrue(regionAck.getDataStore().canAccommodateMoreBytesSafely(num-1)); assertFalse(regionAck.getDataStore().canAccommodateMoreBytesSafely(num));