/** * Test hook to return the per entry overhead for a bucket region. * PRECONDITION: a bucket must exist and be using LRU. */ public int getPerEntryLRUOverhead() { BucketRegion br = (localBucket2RegionMap.values().iterator().next()); AbstractLRURegionMap map = (AbstractLRURegionMap)br.getRegionMap(); return map.getEntryOverHead(); }
/** * Test hook to return the per entry overhead for a bucket region. * PRECONDITION: a bucket must exist and be using LRU. * @since 6.1.2.9 */ public int getPerEntryLRUOverhead() { BucketRegion br = (localBucket2RegionMap.values().iterator().next()); AbstractLRURegionMap map = (AbstractLRURegionMap)br.getRegionMap(); return map.getEntryOverHead(); }
protected void clearBackingCHM(Region<Integer, String> r) { PartitionedRegion pr = (PartitionedRegion)r; for (BucketRegion br : pr.getDataStore().getAllLocalBucketRegions()) { assertTrue(br.getRegionMap() instanceof HDFSRegionMap); ((AbstractRegionMap)br.getRegionMap())._getMap().clear(); // wait here to make sure that the queue has been flushed } sleep(pr.getFullPath()); }
@Override protected void clearBackingCHM(Region<Integer, String> r) { PartitionedRegion pr = (PartitionedRegion)r; for (BucketRegion br : pr.getDataStore().getAllLocalBucketRegions()) { assertTrue(br.getRegionMap() instanceof HDFSRegionMap); CustomEntryConcurrentHashMap chm = ((AbstractRegionMap)br.getRegionMap())._getMap(); Iterator it = chm.keySet().iterator(); while (it.hasNext()) { Object key = it.next(); OffHeapRegionEntry re = (OffHeapRegionEntry) chm.remove(key); assert re != null; re.release(); } // wait here to make sure that the queue has been flushed } sleep(pr.getFullPath()); }
@Override public long getEntryCount() { if (parRegion.isDataStore()) { int numLocalEntries = 0; Set<BucketRegion> localPrimaryBucketRegions = parRegion.getDataStore().getAllLocalPrimaryBucketRegions(); if (localPrimaryBucketRegions != null && localPrimaryBucketRegions.size() > 0) { for (BucketRegion br : localPrimaryBucketRegions) { // TODO soplog, fix this for griddb regions numLocalEntries += br.getRegionMap().sizeInVM() - br.getTombstoneCount(); } } return numLocalEntries; } else { return ManagementConstants.ZERO; } }
@Override public long getEntryCount() { if (parRegion.isDataStore()) { int numLocalEntries = 0; Set<BucketRegion> localPrimaryBucketRegions = parRegion.getDataStore().getAllLocalPrimaryBucketRegions(); if (localPrimaryBucketRegions != null && localPrimaryBucketRegions.size() > 0) { for (BucketRegion br : localPrimaryBucketRegions) { // TODO soplog, fix this for griddb regions numLocalEntries += br.getRegionMap().sizeInVM() - br.getTombstoneCount(); } } return numLocalEntries; } else { return ManagementConstants.ZERO; } }
/** * partition region entry count is taken from all primary bucket entry count. * Ideally it should come from stats. * to be done in 8.0 * @return long */ @Override public long getEntryCount() { if (parRegion.isDataStore()) { int numLocalEntries = 0; Set<BucketRegion> localPrimaryBucketRegions = parRegion.getDataStore().getAllLocalPrimaryBucketRegions(); if (localPrimaryBucketRegions != null && localPrimaryBucketRegions.size() > 0) { for (BucketRegion br : localPrimaryBucketRegions) { // TODO soplog, fix this for griddb regions numLocalEntries += br.getRegionMap().sizeInVM() - br.getTombstoneCount(); } } return numLocalEntries; } else { return ManagementConstants.ZERO; } }
public int getSizeForEviction() { EvictionAttributes ea = this.getAttributes().getEvictionAttributes(); if (ea == null) return 0; EvictionAlgorithm algo = ea.getAlgorithm(); if (!algo.isLRUHeap()) return 0; EvictionAction action = ea.getAction(); int size = action.isLocalDestroy() ? this.getRegionMap().sizeInVM() : (int)this .getNumEntriesInVM(); return size; } @Override
public int getSizeForEviction() { EvictionAttributes ea = this.getAttributes().getEvictionAttributes(); if (ea == null) return 0; EvictionAlgorithm algo = ea.getAlgorithm(); if (!algo.isLRUHeap()) return 0; EvictionAction action = ea.getAction(); int size = action.isLocalDestroy() ? this.getRegionMap().sizeInVM() : (int)this .getNumEntriesInVM(); return size; } @Override
public Object call() throws Exception { Region r = getGemfireCache().createRegionFactory(RegionShortcut.PARTITION).create(regionName); r.put("key", "value"); r.put("key2", "value2"); r.put("key3", "value3"); PartitionedRegion pr = (PartitionedRegion)r; BucketRegion br = pr.getBucketRegion("key"); assertNotNull(br); // simulate a removed key br.getRegionMap().getEntry("key").setValue(pr,Token.REMOVED_PHASE1); return null; } });
private Object getFromQueue() { HDFSGatewayEventImpl evt = queue.current(); if (type == null) { return evt; } switch (type) { case KEYS: byte[] key = evt.getSerializedKey(); return deserialize ? EntryEventImpl.deserialize(key) : key; case VALUES: return evt.getValue(); default: Object keyObj = EntryEventImpl.deserialize(evt.getSerializedKey()); if(keyObj instanceof KeyWithRegionContext) { ((KeyWithRegionContext)keyObj).setRegionContext(region.getPartitionedRegion()); } return ((HDFSRegionMap) region.getRegionMap()).getDelegate().getEntryFromEvent(keyObj, evt, true, forUpdate); } }
private Object getFromHdfs() { if (type == null) { return hdfs.getValue(); } switch (type) { case KEYS: byte[] key = this.currentHdfsKey; return deserialize ? EntryEventImpl.deserialize(key) : key; case VALUES: PersistedEventImpl evt = hdfs.getValue(); return evt.getValue(); default: Object keyObj = EntryEventImpl.deserialize(this.currentHdfsKey); if(keyObj instanceof KeyWithRegionContext) { ((KeyWithRegionContext)keyObj).setRegionContext(region.getPartitionedRegion()); } return ((HDFSRegionMap) region.getRegionMap()).getDelegate().getEntryFromEvent(keyObj, hdfs.getValue(), true, forUpdate); } }
private Object getFromQueue() { HDFSGatewayEventImpl evt = queue.current(); if (type == null) { return evt; } switch (type) { case KEYS: byte[] key = evt.getSerializedKey(); return deserialize ? EntryEventImpl.deserialize(key) : key; case VALUES: return evt.getValue(); default: Object keyObj = EntryEventImpl.deserialize(evt.getSerializedKey()); if(keyObj instanceof KeyWithRegionContext) { ((KeyWithRegionContext)keyObj).setRegionContext(region.getPartitionedRegion()); } return ((HDFSRegionMap) region.getRegionMap()).getDelegate().getEntryFromEvent(keyObj, evt, true, forUpdate); } }
private Object getFromHdfs() { if (type == null) { return hdfs.getValue(); } switch (type) { case KEYS: byte[] key = this.currentHdfsKey; return deserialize ? EntryEventImpl.deserialize(key) : key; case VALUES: PersistedEventImpl evt = hdfs.getValue(); return evt.getValue(); default: Object keyObj = EntryEventImpl.deserialize(this.currentHdfsKey); if(keyObj instanceof KeyWithRegionContext) { ((KeyWithRegionContext)keyObj).setRegionContext(region.getPartitionedRegion()); } return ((HDFSRegionMap) region.getRegionMap()).getDelegate().getEntryFromEvent(keyObj, hdfs.getValue(), true, forUpdate); } }
event.setNewValue(new VMCachedDeserializable("value3", 12)); event.setVersionTag(tag); bucket.getRegionMap().basicPut(event, System.currentTimeMillis(), true, false, null, false, false); event.release(); event.setVersionTag(tag); getLogWriter().info("applying this event to the cache: " + event); bucket.getRegionMap().basicPut(event, System.currentTimeMillis(), true, false, null, false, false); event.release();