public void run2() { Cache cache = getCache(); String regionName = PR_ZeroRedundancy; PartitionedRegion pr = (PartitionedRegion)cache .getRegion(Region.SEPARATOR + regionName); java.util.Iterator buckRegionIterator = pr.getDataStore().localBucket2RegionMap .values().iterator(); while (buckRegionIterator.hasNext()) { BucketRegion bucket = (BucketRegion)buckRegionIterator.next(); assertTrue(bucket.getAttributes().getScope().isDistributedAck()); } regionName = PR_SingleRedundancy; PartitionedRegion pr1 = (PartitionedRegion)cache .getRegion(Region.SEPARATOR + regionName); java.util.Iterator buckRegionIterator1 = pr1.getDataStore().localBucket2RegionMap .values().iterator(); while (buckRegionIterator1.hasNext()) { Region bucket = (Region)buckRegionIterator1.next(); assertEquals(DataPolicy.REPLICATE, bucket.getAttributes().getDataPolicy()); } }
public Object call() throws Exception { final PartitionedRegion pr = (PartitionedRegion)getRootRegion(name); assertNotNull(pr); // assert over-flow behavior in local buckets and number of // entries // overflowed int entriesEvicted = 0; for (final Iterator i = pr.getDataStore().getAllLocalBuckets() .iterator(); i.hasNext();) { final Map.Entry entry = (Map.Entry)i.next(); final BucketRegion bucketRegion = (BucketRegion)entry.getValue(); if (bucketRegion == null) { continue; } assertTrue(bucketRegion.getAttributes().getEvictionAttributes() .getAlgorithm().isLRUEntry()); assertTrue(bucketRegion.getAttributes().getEvictionAttributes() .getAction().isOverflowToDisk()); } entriesEvicted += pr.getDiskRegionStats().getNumOverflowOnDisk(); return new Integer(entriesEvicted); } };
public Object call() throws Exception { try { final PartitionedRegion pr = (PartitionedRegion)getRootRegion(name); assertNotNull(pr); long entriesEvicted = 0; for (final Iterator i = pr.getDataStore().getAllLocalBuckets() .iterator(); i.hasNext();) { final Map.Entry entry = (Map.Entry)i.next(); final BucketRegion bucketRegion = (BucketRegion)entry.getValue(); if (bucketRegion == null) { continue; } assertTrue(bucketRegion.getAttributes().getEvictionAttributes() .getAlgorithm().isLRUMemory()); assertTrue(bucketRegion.getAttributes().getEvictionAttributes() .getAction().isLocalDestroy()); } entriesEvicted = ((AbstractLRURegionMap)pr.entries)._getLruList().stats() .getEvictions(); return new Long(entriesEvicted); } finally { } } };
public Object call() throws Exception { try { final PartitionedRegion pr = (PartitionedRegion)getRootRegion(name); assertNotNull(pr); long entriesEvicted = 0; for (final Iterator i = pr.getDataStore().getAllLocalBuckets() .iterator(); i.hasNext();) { final Map.Entry entry = (Map.Entry)i.next(); final BucketRegion bucketRegion = (BucketRegion)entry.getValue(); if (bucketRegion == null) { continue; } assertTrue(bucketRegion.getAttributes().getEvictionAttributes() .getAlgorithm().isLRUEntry()); assertTrue(bucketRegion.getAttributes().getEvictionAttributes() .getAction().isLocalDestroy()); } entriesEvicted = ((AbstractLRURegionMap)pr.entries)._getLruList().stats() .getEvictions(); return new Long(entriesEvicted); } finally { } } };
public Object call() throws Exception { final PartitionedRegion pr = (PartitionedRegion)getRootRegion(name); assertNotNull(pr); assertNull(pr.getDiskRegion()); assertNotNull(pr.getEvictionController()); // assert over-flow behavior in local buckets and number of // entries // overflowed long entriesEvicted = 0; for (final Iterator i = pr.getDataStore().getAllLocalBuckets() .iterator(); i.hasNext();) { final Map.Entry entry = (Map.Entry)i.next(); final BucketRegion bucketRegion = (BucketRegion)entry.getValue(); if (bucketRegion == null) { continue; } assertTrue(bucketRegion.getAttributes().getEvictionAttributes() .getAlgorithm().isLRUMemory()); assertTrue(bucketRegion.getAttributes().getEvictionAttributes() .getAction().isOverflowToDisk()); } entriesEvicted += pr.getDiskRegionStats().getNumOverflowOnDisk(); return new Long(entriesEvicted); } };
continue; assertTrue(bucketRegion.getAttributes().getEvictionAttributes() .getAlgorithm().isLRUHeap()); assertTrue(bucketRegion.getAttributes().getEvictionAttributes() .getAction().isLocalDestroy());
continue; assertTrue(bucketRegion.getAttributes().getEvictionAttributes() .getAlgorithm().isLRUHeap()); assertTrue(bucketRegion.getAttributes().getEvictionAttributes() .getAction().isOverflowToDisk());
public void _test040NoAutoEviction() throws Exception { if (!cache.isClosed()) { tearDown(); cache.close(); System.setProperty("gemfire.disableAutoEviction", "true"); setUp(); } Region<Integer, String> r = createRegion(getName()); System.setProperty("gemfire.disableAutoEviction", "false"); for (int i =0; i<5; i++) { r.put(i, "value"+i); } PartitionedRegion pr = (PartitionedRegion) r; BucketRegion br = pr.getBucketRegion(1); assertNotNull(br.getAttributes().getEvictionAttributes()); assertEquals(EvictionAlgorithm.NONE, br.getAttributes().getEvictionAttributes().getAlgorithm()); GemFireCacheImpl cache = (GemFireCacheImpl) r.getCache(); assertEquals(0.0f, cache.getResourceManager().getEvictionHeapPercentage()); }
public int getSizeForEviction() { EvictionAttributes ea = this.getAttributes().getEvictionAttributes(); if (ea == null) return 0; EvictionAlgorithm algo = ea.getAlgorithm(); if (!algo.isLRUHeap()) return 0; EvictionAction action = ea.getAction(); int size = action.isLocalDestroy() ? this.getRegionMap().sizeInVM() : (int)this .getNumEntriesInVM(); return size; } @Override
public int getSizeForEviction() { EvictionAttributes ea = this.getAttributes().getEvictionAttributes(); if (ea == null) return 0; EvictionAlgorithm algo = ea.getAlgorithm(); if (!algo.isLRUHeap()) return 0; EvictionAction action = ea.getAction(); int size = action.isLocalDestroy() ? this.getRegionMap().sizeInVM() : (int)this .getNumEntriesInVM(); return size; } @Override
assertNotNull(br.getAttributes().getEvictionAttributes()); assertEquals(EvictionAlgorithm.LRU_HEAP, br.getAttributes().getEvictionAttributes().getAlgorithm()); assertEquals(EvictionAction.OVERFLOW_TO_DISK, br.getAttributes().getEvictionAttributes().getAction());
this.versions = new VersionedObjectList(this.removeAllPRDataSize, true, bucketRegion.getAttributes().getConcurrencyChecksEnabled());
this.versions = new VersionedObjectList(this.putAllPRDataSize, true, bucketRegion.getAttributes().getConcurrencyChecksEnabled());
assertTrue(s.size() > 0); for (BucketRegion br: s) { assertEquals(true, br.getAttributes().isDiskSynchronous()); assertTrue(s.size() > 0); for (BucketRegion br: s) { assertEquals(false, br.getAttributes().isDiskSynchronous()); assertTrue(s.size() > 0); for (BucketRegion br: s) { assertEquals(true, br.getAttributes().isDiskSynchronous()); assertTrue(s.size() > 0); for (BucketRegion br: s) { assertEquals(false, br.getAttributes().isDiskSynchronous());
.getAttributes().getConcurrencyChecksEnabled());