public final void removeEntry(Object key, RegionEntry re, boolean updateStat) { if (re.isTombstone() && _getMap().get(key) == re && !re.isMarkedForEviction()){ logger.fatal(LocalizedMessage.create(LocalizedStrings.AbstractRegionMap_ATTEMPT_TO_REMOVE_TOMBSTONE), new Exception("stack trace")); return; // can't remove tombstones except from the tombstone sweeper } if (_getMap().remove(key, re)) { re.removePhase2(); if (updateStat) { incEntryCount(-1); } } }
public final void removeEntry(Object key, RegionEntry re, boolean updateStat) { final LocalRegion owner = _getOwner(); if (re.isTombstone() && _getMap().get(key) == re && !re.isMarkedForEviction()) { owner.getLogWriterI18n().severe(LocalizedStrings.AbstractRegionMap_ATTEMPT_TO_REMOVE_TOMBSTONE, key, new Exception("stack trace")); return; // can't remove tombstones except from the tombstone sweeper } // _getOwner().getLogWriterI18n().info(LocalizedStrings.DEBUG, "DEBUG: removing entry " + re, new Exception("stack trace")); if (_getMap().remove(key, re)) { re.removePhase2(owner); if (updateStat) { incEntryCount(-1); } } }
private void updateSize(EntryEventImpl event, int oldSize, boolean isUpdate, boolean wasTombstone) { if (isUpdate && !wasTombstone) { _getOwner().updateSizeOnPut(event.getKey(), oldSize, event.getNewValueBucketSize()); } else { _getOwner().updateSizeOnCreate(event.getKey(), event.getNewValueBucketSize()); if (!wasTombstone) { incEntryCount(1); } } }
private void updateSize(EntryEventImpl event, int oldSize, boolean isUpdate, boolean wasTombstone) { if (isUpdate && !wasTombstone) { _getOwner().updateSizeOnPut(event.getKey(), oldSize, event.getNewValueBucketSize()); } else { _getOwner().updateSizeOnCreate(event.getKey(), event.getNewValueBucketSize()); if (!wasTombstone) { incEntryCount(1); } } }
public RegionEntry getEntry(Object key, EntryEventImpl event) { RegionEntry re = getEntry(key, event, true); // get from tx should put the entry back in map // it should be evicted once tx completes if (re != null && getTXState(event) != null) { // put the region entry in backing CHM of AbstractRegionMap so that // it can be locked in basicPut/destroy RegionEntry oldRe = backingRM.putEntryIfAbsent(key, re); if (oldRe != null) { if (re.isOffHeap() && !oldRe.equals(re)) { ((OffHeapRegionEntry)re).release(); } return oldRe; } re.setMarkedForEviction(); owner.updateSizeOnCreate(key, owner.calculateRegionEntryValueSize(re)); ((AbstractRegionMap)backingRM).incEntryCount(1); ((AbstractRegionMap)backingRM).lruEntryCreate(re); } return re; }
protected RegionEntry getEntry(EntryEventImpl event) { RegionEntry re = getEntry(event.getKey(), event, false); if (re != null && event.isLoadedFromHDFS()) { // put the region entry in backing CHM of AbstractRegionMap so that // it can be locked in basicPut/destroy RegionEntry oldRe = backingRM.putEntryIfAbsent(event.getKey(), re); if (oldRe != null) { if (re instanceof OffHeapRegionEntry && !oldRe.equals(re)) { ((OffHeapRegionEntry) re).release(); } return oldRe; } // since the entry is faulted in from HDFS, it must have // satisfied the eviction criteria in the past, so mark it for eviction re.setMarkedForEviction(); owner.updateSizeOnCreate(event.getKey(), owner.calculateRegionEntryValueSize(re)); ((AbstractRegionMap) backingRM).incEntryCount(1); ((AbstractRegionMap) backingRM).lruEntryCreate(re); } return re; }
incEntryCount(1); lruEntryUpdate(newRe); } finally { incEntryCount(size()); for (Iterator<RegionEntry> iter = regionEntries().iterator(); iter.hasNext(); ) { RegionEntry re = iter.next(); if (re.isTombstone()) { if (re.getVersionStamp() == null) { // bug #50992 - recovery from versioned to non-versioned incEntryCount(-1); iter.remove(); continue;
protected RegionEntry getEntry(EntryEventImpl event) { RegionEntry re = getEntry(event.getKey(), event, false); if (re != null && event.isLoadedFromHDFS()) { // put the region entry in backing CHM of AbstractRegionMap so that // it can be locked in basicPut/destroy RegionEntry oldRe = backingRM.putEntryIfAbsent(event.getKey(), re); if (oldRe != null) { if (re.isOffHeap() && !oldRe.equals(re)) { ((OffHeapRegionEntry) re).release(); } return oldRe; } // since the entry is faulted in from HDFS, it must have // satisfied the eviction criteria in the past, so mark it for eviction re.setMarkedForEviction(); owner.updateSizeOnCreate(event.getKey(), owner.calculateRegionEntryValueSize(re)); ((AbstractRegionMap) backingRM).incEntryCount(1); ((AbstractRegionMap) backingRM).lruEntryCreate(re); } return re; }
incEntryCount(1); lruEntryUpdate(oldRe); lruUpdateCallback(); incEntryCount(1); lruEntryUpdate(newRe); } finally { incEntryCount(size()); for (Iterator<RegionEntry> iter = regionEntries().iterator(); iter.hasNext(); ) { RegionEntry re = iter.next(); if (re.isTombstone()) { if (re.getVersionStamp() == null) { // bug #50992 - recovery from versioned to non-versioned incEntryCount(-1); iter.remove(); continue;
success = true; if (updateStat) { incEntryCount(-1);
_getOwner().incTombstoneCount(-tombstones); if (delta != 0) { incEntryCount(-delta); incEntryCount(-delta); incEntryCount(-tombstones); if (logger.isDebugEnabled()) { logger.debug("Size after clearing = {}", _getMap().size());
owner.updateSizeOnCreate(key, owner.calculateRegionEntryValueSize(re)); ((AbstractRegionMap)backingRM).incEntryCount(1); ((AbstractRegionMap)backingRM).lruEntryCreate(re);
owner.updateSizeOnCreate(key, owner.calculateRegionEntryValueSize(re)); ((AbstractRegionMap)backingRM).incEntryCount(1); ((AbstractRegionMap)backingRM).lruEntryCreate(re);
if (removeTombstone(re)) { result = true; incEntryCount(-1);
owner.updateSizeOnCreate(key, owner.calculateRegionEntryValueSize(re)); ((AbstractRegionMap)backingRM).incEntryCount(1); ((AbstractRegionMap)backingRM).lruEntryCreate(re);
owner.updateSizeOnCreate(key, owner.calculateRegionEntryValueSize(re)); ((AbstractRegionMap)backingRM).incEntryCount(1); ((AbstractRegionMap)backingRM).lruEntryCreate(re);
success = true; if (updateStat) { incEntryCount(-1);
if (removeTombstone(re)) { result = true; incEntryCount(-1);
incEntryCount(1); // we are creating an entry that was recovered from disk including tombstone
incEntryCount(1); // we are creating an entry that was recovered from disk including tombstone