@Override public DiskRegionView getDiskRegionView() { return getDiskRegion(); }
/** * Lets the customer do an explicit evict of a value to disk and removes the value from memory. */ public void evictValue(Object key) { if (getDiskRegion() != null) { this.entries.evictValue(key); } }
protected static void removeClearCountReference(LocalRegion rgn) { DiskRegion dr = rgn.getDiskRegion(); if (dr != null) { dr.removeClearCountReference(); } }
/** * methods to set/remove htree reference (Bug 40299). */ protected static void setClearCountReference(LocalRegion rgn) { DiskRegion dr = rgn.getDiskRegion(); if (dr != null) { dr.setClearCountReference(); } }
@Override public void initializeStats(long numEntriesInVM, long numOverflowOnDisk, long numOverflowBytesOnDisk) { getDiskRegion().getStats().incNumEntriesInVM(numEntriesInVM); getDiskRegion().getStats().incNumOverflowOnDisk(numOverflowOnDisk); }
@Override public void setRVVTrusted(boolean rvvTrusted) { if (this.getConcurrencyChecksEnabled()) { DiskRegion region = this.getDiskRegion(); // Update whether or not the RVV we have recovered is trusted (accurately represents what we // have on disk). if (region != null) { region.setRVVTrusted(rvvTrusted); } } }
@Override public void recordRecoveredGCVersion(VersionSource member, long gcVersion) { // TODO - RVV - I'm not sure about this recordGCVersion method. It seems like it's not doing the // right thing if the current member is the member we just recovered. We need to update the RVV // in memory this.versionVector.recordGCVersion(member, gcVersion); // We also need to update the RVV that represents what we have persisted on disk DiskRegion region = this.getDiskRegion(); if (region != null) { region.recordRecoveredGCVersion(member, gcVersion); } }
/** * get val from disk */ protected void validatePut(Region region) { // flush data to disk ((LocalRegion) region).getDiskRegion().flushForTesting(); try { ((LocalRegion) region).getValueOnDisk("testKey"); } catch (Exception ex) { ex.printStackTrace(); fail("Failed to get the value on disk"); } }
/** * * @return int array containing the IDs of the oplogs which will potentially get rolled else null * if no oplogs were available at the time of signal or region is not having disk * persistence. Pls note that the actual number of oplogs rolled may be more than what is * indicated * @since GemFire prPersistSprint1 */ @Override public boolean forceCompaction() { DiskRegion region = getDiskRegion(); if (region != null) { if (region.isCompactionPossible()) { return region.forceCompaction(); } else { throw new IllegalStateException( "To call notifyToCompact you must configure the region with <disk-write-attributes allow-force-compaction=true/>"); } } else { return false; } }
@Override public void recordRecoveredVersonHolder(VersionSource member, RegionVersionHolder versionHolder, boolean latestOplog) { if (this.getConcurrencyChecksEnabled()) { // We need to update the RVV in memory this.versionVector.initRecoveredVersion(member, versionHolder, latestOplog); DiskRegion region = this.getDiskRegion(); // We also need to update the RVV that represents what we have persisted on disk if (region != null) { region.recordRecoveredVersonHolder(member, versionHolder, latestOplog); } } }
@Test public void whenBasicUpdateButNotBackupAndEntrySet() throws Exception { StoredObject storedObject = mock(StoredObject.class); LocalRegion lr = mock(LocalRegion.class); DiskEntry diskEntry = mock(DiskEntry.class); when(diskEntry.getDiskId()).thenReturn(mock(DiskId.class)); EntryEventImpl entryEvent = mock(EntryEventImpl.class); DiskRegion diskRegion = mock(DiskRegion.class); when(diskRegion.isBackup()).thenReturn(false); when(lr.getDiskRegion()).thenReturn(diskRegion); DiskEntry.Helper.basicUpdateForTesting(diskEntry, lr, storedObject, entryEvent); verify(storedObject, times(0)).release(); }
@Test public void whenBasicUpdateButNotBackupAndDiskIdIsNullAndEntrySet() throws Exception { StoredObject storedObject = mock(StoredObject.class); LocalRegion lr = mock(LocalRegion.class); DiskEntry diskEntry = mock(DiskEntry.class); EntryEventImpl entryEvent = mock(EntryEventImpl.class); DiskRegion diskRegion = mock(DiskRegion.class); when(diskRegion.isBackup()).thenReturn(false); when(lr.getDiskRegion()).thenReturn(diskRegion); DiskEntry.Helper.basicUpdateForTesting(diskEntry, lr, storedObject, entryEvent); verify(storedObject, times(0)).release(); } }
@Override public void recordRecoveredVersionTag(VersionTag tag) { if (this.getConcurrencyChecksEnabled()) { this.versionVector.recordVersion(tag.getMemberID(), tag.getRegionVersion()); DiskRegion region = this.getDiskRegion(); // We also need to update the RVV that represents what we have persisted on disk if (region != null) { region.recordRecoveredVersionTag(tag); } } }
/** * This code needs to be evaluated. It was added quickly to help PR persistence not to consume as * much memory. */ @Override public void evictValue(Object key) { final LocalRegion owner = _getOwner(); RegionEntry re = getEntry(key); if (re != null) { synchronized (re) { if (!re.isValueNull()) { re.setValueToNull(); owner.getDiskRegion().incNumEntriesInVM(-1L); owner.getDiskRegion().incNumOverflowOnDisk(1L); if (owner instanceof BucketRegion) { ((BucketRegion) owner).incNumEntriesInVM(-1L); ((BucketRegion) owner).incNumOverflowOnDisk(1L); } } } } }
@Test public void whenHelperUpdateCalledAndDiskRegionAcquireReadLockThrowsRegionDestroyedExceptionThenStoredObjectShouldBeReleased() throws Exception { LocalRegion lr = mock(LocalRegion.class); DiskEntry diskEntry = mock(DiskEntry.class); when(diskEntry.getDiskId()).thenReturn(mock(DiskId.class)); EntryEventImpl entryEvent = mock(EntryEventImpl.class); DiskRegion diskRegion = mock(DiskRegion.class); when(lr.getDiskRegion()).thenReturn(diskRegion); Mockito.doThrow(new RegionDestroyedException("Region Destroyed", "mocked region")) .when(diskRegion).acquireReadLock(); StoredObject storedObject = mock(StoredObject.class); try { DiskEntry.Helper.update(diskEntry, lr, storedObject, entryEvent); fail(); } catch (RegionDestroyedException rde) { verify(storedObject, times(1)).release(); } }
@Test public void whenBasicUpdateWithDiskRegionBackupAndEntryNotSetThenReleaseOnStoredObjectShouldBeCalled() throws Exception { StoredObject storedObject = mock(StoredObject.class); LocalRegion lr = mock(LocalRegion.class); DiskEntry diskEntry = mock(DiskEntry.class); when(diskEntry.getDiskId()).thenReturn(mock(DiskId.class)); EntryEventImpl entryEvent = mock(EntryEventImpl.class); DiskRegion diskRegion = mock(DiskRegion.class); when(diskRegion.isBackup()).thenReturn(true); doThrow(new RegionDestroyedException("", "")).when(diskRegion).put(eq(diskEntry), eq(lr), ArgumentMatchers.any(DiskEntry.Helper.ValueWrapper.class), anyBoolean()); when(lr.getDiskRegion()).thenReturn(diskRegion); try { DiskEntry.Helper.basicUpdateForTesting(diskEntry, lr, storedObject, entryEvent); fail(); } catch (RegionDestroyedException rde) { verify(storedObject, times(1)).release(); } }
@Override public void removeIfDestroyed(Object key) { LocalRegion owner = _getOwner(); // boolean makeTombstones = owner.concurrencyChecksEnabled; DiskRegion dr = owner.getDiskRegion(); RegionEntry re = getEntry(key); if (re != null) { if (re.isDestroyed()) { synchronized (re) { if (re.isDestroyed()) { // [bruce] destroyed entries aren't in the LRU clock, so they can't be retained here // if (makeTombstones) { // re.makeTombstone(owner, re.getVersionStamp().asVersionTag()); // } else { re.removePhase2(); removeEntry(key, re, true); } } } } // } }
private void copyRecoveredEntry(RegionEntry oldRe, RegionEntry newRe) { if (newRe.getVersionStamp() != null) { newRe.getVersionStamp().setMemberID(oldRe.getVersionStamp().getMemberID()); newRe.getVersionStamp().setVersions(oldRe.getVersionStamp().asVersionTag()); } if (newRe instanceof AbstractOplogDiskRegionEntry) { ((AbstractOplogDiskRegionEntry) newRe).setDiskId(oldRe); _getOwner().getDiskRegion().replaceIncompatibleEntry((DiskEntry) oldRe, (DiskEntry) newRe); } getEntryMap().put(newRe.getKey(), newRe); }
public void removeRegion(Region region) { if (region.getAttributes().getPartitionAttributes() != null) { removePartionRegionStats(((PartitionedRegion) region).getPrStats()); } LocalRegion l = (LocalRegion) region; removeLRUStats(l.getEvictionStatistics()); DiskRegion dr = l.getDiskRegion(); if (dr != null) { for (DirectoryHolder dh : dr.getDirectories()) { removeDirectoryStats(dh.getDiskDirectoryStats()); } } }
@Override public void lruEntryCreate(RegionEntry re) { EvictableEntry e = (EvictableEntry) re; if (logger.isTraceEnabled(LogMarker.LRU_VERBOSE)) { logger.trace(LogMarker.LRU_VERBOSE, "lruEntryCreate for key={}; list size is: {}; actual size is: {}; map size is: {}; entry size: {}; in lru clock: {}", re.getKey(), getTotalEntrySize(), this.getEvictionList().size(), size(), e.getEntrySize(), !e.isEvicted()); } e.unsetEvicted(); EvictionList lruList = getEvictionList(); DiskRegion disk = _getOwner().getDiskRegion(); boolean possibleClear = disk != null && disk.didClearCountChange(); if (!possibleClear || this._getOwner().basicGetEntry(re.getKey()) == re) { lruList.appendEntry(e); lruEntryUpdate(e); } }