public static void doAsyncFlush(VersionTag tag, InternalRegion region) { if (region.isThisRegionBeingClosedOrDestroyed()) return; DiskRegion dr = region.getDiskRegion(); if (!dr.isBackup()) { return; } assert !dr.isSync(); dr.acquireReadLock(); try { dr.getDiskStore().putVersionTagOnly(region, tag, true); } finally { dr.releaseReadLock(); } }
@Test public void finishInitializeOwnerOnAsyncPersistentRegionWithTrustedRvvNeverCallsWriteRVV() { diskRegion.setRVVTrusted(true); diskRegion.finishInitializeOwner(mock(LocalRegion.class), GOTIMAGE_BY_FULLGII); verify(diskRegion, never()).writeRVV(any(), any()); }
/** * Used by tests to force everything out to disk. */ public void forceFlush() { if (this.diskRegion != null) { this.diskRegion.flushForTesting(); } }
public void endDestroyDataStorage(LocalRegion region, DiskRegion dr) { try { clear(region, dr, null); dr.resetRVV(); dr.setRVVTrusted(false); dr.writeRVV(null, null); // just persist the empty rvv with trust=false } catch (RegionDestroyedException ignore) { // ignore a RegionDestroyedException at this stage } if (this.initFile != null && dr.isBackup()) { this.initFile.endDestroyDataStorage(dr); } }
public void replaceIncompatibleEntry(DiskEntry old, DiskEntry repl) { acquireReadLock(); try { getOplogSet().getChild().replaceIncompatibleEntry(this, old, repl); } finally { releaseReadLock(); } }
/** * Called before LocalRegion clears the contents of its entries map */ void prepareForClose(LocalRegion region, DiskRegion dr) { if (dr.isBackup()) { // Need to flush any async ops done on dr. // The easiest way to do this is to flush the entire async queue. forceFlush(); } }
DiskRegion dr = region.getDiskRegion(); if (!asyncQueueWasFull) { dr.setClearCountReference(); dr.acquireReadLock(); try { DiskId did = entry.getDiskId(); if (region.isThisRegionBeingClosedOrDestroyed()) return; dr.remove(region, entry, true, false); if (dr.isBackup()) { && !dr.isBackup()) { assert !dr.isSync(); dr.releaseReadLock(); dr.removeClearCountReference();
if (dr.isRegionClosed()) { throw new RegionDestroyedException( "The DiskRegion has been closed or destroyed", dr.getName()); if (!dr.didClearCountChange()) { long start = getStats().startRemove(); OplogSet oplogSet = getOplogSet(dr); oplogSet.remove(region, entry, async, isClear); dr.getStats().endRemove(start, getStats().endRemove(start)); } else { throw new RegionClearedException(
if (this.diskRegion != null) { try { this.diskRegion.initializeOwner(this); this.diskRegion.finishInitializeOwner(this, GIIStatus.NO_GII); PersistentMemberID oldId = this.diskRegion.getMyInitializingID(); if (oldId == null) { oldId = this.diskRegion.getMyPersistentID(); PersistentMemberID newId = this.diskRegion.generatePersistentID(); this.diskRegion.setInitializing(newId); this.diskRegion.setInitialized();
/** * Get the serialized value directly from disk. Returned object may be a * {@link CachedDeserializable}. Goes straight to disk without faulting into memory. Only looks * at the disk storage, not at heap storage. * * @param entry the entry used to identify the value to fetch * @param dr the persistent storage from which to fetch the value * @return either null, byte array, or CacheDeserializable * @since GemFire 57_hotfix */ public static Object getSerializedValueOnDisk(DiskEntry entry, DiskRegion dr) { DiskId did = entry.getDiskId(); if (did == null) { return null; } dr.acquireReadLock(); try { synchronized (did) { if (dr.isBackup() && did.getKeyId() == DiskRegion.INVALID_ID) { return null; } else if (!entry.isValueNull() && did.needsToBeWritten() && !EntryBits.isRecoveredFromDisk(did.getUserBits())/* fix for bug 41942 */) { return null; } return dr.getSerializedData(did); } } finally { dr.releaseReadLock(); } }
dr.getRegionVersionVector().recordVersions(sourceRVV); } else { if (dr.getRVVTrusted()) { return; dr.setRVVTrusted(isRVVTrusted); Collections.<Long, AbstractDiskRegion>singletonMap(dr.getId(), dr), false); } catch (IOException ex) { dr.getCancelCriterion().checkCancelInProgress(ex); throw new DiskAccessException(String.format( "Failed in persisting the garbage collection of entries because of: %s", this.diskFile.getPath()), ex, dr.getName());
/** * Testing purpose only Get the value of an entry that is on disk without faulting it in and * without looking in the io buffer. * * @since GemFire 3.2.1 */ public static Object getValueOnDisk(DiskEntry entry, DiskRegion dr) { DiskId id = entry.getDiskId(); if (id == null) { return null; } dr.acquireReadLock(); try { synchronized (id) { if (dr.isBackup() && id.getKeyId() == DiskRegion.INVALID_ID || !entry.isValueNull() && id .needsToBeWritten() && !EntryBits .isRecoveredFromDisk(id.getUserBits())/* fix for bug 41942 */) { return null; } return dr.getNoBuffer(id); } } finally { dr.releaseReadLock(); } }
@Override public void endRead(long start, long end, long bytesRead) { getStats().endRead(start, end, bytesRead); }
@Test public void whenBasicUpdateWithDiskRegionBackupAndAsyncWritesAndEntryNotSetThenReleaseOnStoredObjectShouldBeCalled() throws Exception { StoredObject storedObject = mock(StoredObject.class); LocalRegion lr = mock(LocalRegion.class); DiskEntry diskEntry = mock(DiskEntry.class); when(diskEntry.getDiskId()).thenReturn(mock(DiskId.class)); EntryEventImpl entryEvent = mock(EntryEventImpl.class); DiskRegion diskRegion = mock(DiskRegion.class); when(diskRegion.isBackup()).thenReturn(true); doThrow(new RegionDestroyedException("", "")).when(diskRegion).put(eq(diskEntry), eq(lr), ArgumentMatchers.any(DiskEntry.Helper.ValueWrapper.class), anyBoolean()); when(lr.getDiskRegion()).thenReturn(diskRegion); when(diskRegion.isSync()).thenReturn(false); when(lr.isInitialized()).thenReturn(true); when(lr.getConcurrencyChecksEnabled()).thenThrow(new RegionDestroyedException("", "")); try { DiskEntry.Helper.basicUpdateForTesting(diskEntry, lr, storedObject, entryEvent); fail(); } catch (RegionDestroyedException rde) { verify(storedObject, times(1)).release(); } }
dr.acquireReadLock(); dr.releaseReadLock();
if (recoverFromDisk && dskRgn != null && dskRgn.isBackup()) { dskRgn.resetRecoveredEntries(this); return; closeEntries(); if (getDiskRegion() != null) { getDiskRegion().clear(this, null);
@Test public void cleanUpAfterFailedInitialImageDoesNotCloseEntriesIfIsPersistentRegionAndRecoveredFromDisk() { DistributedRegion distributedRegion = mock(DistributedRegion.class); DiskRegion diskRegion = mock(DiskRegion.class); doCallRealMethod().when(distributedRegion).cleanUpAfterFailedGII(true); when(distributedRegion.getDiskRegion()).thenReturn(diskRegion); when(diskRegion.isBackup()).thenReturn(true); distributedRegion.cleanUpAfterFailedGII(true); verify(diskRegion).resetRecoveredEntries(eq(distributedRegion)); verify(distributedRegion, never()).closeEntries(); }
@Test public void whenBasicUpdateWithDiskRegionBackupAndEntryNotSetThenReleaseOnStoredObjectShouldBeCalled() throws Exception { StoredObject storedObject = mock(StoredObject.class); LocalRegion lr = mock(LocalRegion.class); DiskEntry diskEntry = mock(DiskEntry.class); when(diskEntry.getDiskId()).thenReturn(mock(DiskId.class)); EntryEventImpl entryEvent = mock(EntryEventImpl.class); DiskRegion diskRegion = mock(DiskRegion.class); when(diskRegion.isBackup()).thenReturn(true); doThrow(new RegionDestroyedException("", "")).when(diskRegion).put(eq(diskEntry), eq(lr), ArgumentMatchers.any(DiskEntry.Helper.ValueWrapper.class), anyBoolean()); when(lr.getDiskRegion()).thenReturn(diskRegion); try { DiskEntry.Helper.basicUpdateForTesting(diskEntry, lr, storedObject, entryEvent); fail(); } catch (RegionDestroyedException rde) { verify(storedObject, times(1)).release(); } }
if (dskRgn != null && dskRgn.isBackup()) { dskRgn.finishInitializeOwner(this, giiStatus);