public void beginDestroyDataStorage(DiskRegion dr) { if (this.initFile != null && dr.isBackup()/* fixes bug 41389 */) { this.initFile.beginDestroyDataStorage(dr); } }
public void endRead(long start, long end, long bytesRead) { getStats().endRead(start, end, bytesRead); } /**
/** * This function is having a default visiblity as it is used in the * OplogJUnitTest for a bug verification of Bug # 35012 * * All callers must have {@link #releaseWriteLock(DiskRegion)} in a matching * finally block. * * Note that this is no longer implemented by getting a write lock but instead * locks the same lock that acquireReadLock does. * * @since 5.1 */ private void acquireWriteLock(DiskRegion dr) { // @todo darrel: this is no longer a write lock need to change method name dr.acquireWriteLock(); }
void cleanupFailedInitialization(LocalRegion region) { if (isRecreated() && !this.wasAboutToDestroy() && !this.wasAboutToDestroyDataStorage()) { close(region, isBucket()); } else { if(this.isBucket() && !this.wasAboutToDestroy()) { //Fix for 48642 //If this is a bucket, only destroy the data, if required. beginDestroyDataStorage(); } endDestroy(region); } } void prepareForClose(LocalRegion region) {
/** * stops the compactor outside the write lock. Once stopped then it proceeds to * close the current * old oplogs */ void close(LocalRegion region, boolean closeDataOnly) { try { getDiskStore().close(region, this, closeDataOnly); } finally { statsClose(region); } }
public void endDestroyDataStorage(LocalRegion region, DiskRegion dr) { try { clear(region, dr, null); dr.resetRVV(); dr.setRVVTrusted(false); dr.writeRVV(null, null); // just persist the empty rvv with trust=false } catch (RegionDestroyedException rde) { // ignore a RegionDestroyedException at this stage } if (this.initFile != null && dr.isBackup()) { this.initFile.endDestroyDataStorage(dr); } }
String compressorClassName, boolean enableOffHeapMemory) { super(ds, name, uuid); if(this.getPartitionName() != null){ if(this.getStartingBucketId() != startingBucketId || !this.getPartitionName().equals(partitionName)){ partitionName = this.getPartitionName(); startingBucketId = this.getStartingBucketId(); if (isRecreated() && isBackup() && !isPersistBackup) { + getName() + "\" has been persisted to disk so it can not be recreated on the same disk store without persistence. Either destroy the persistent region, recreate it as overflow and persistent, or create the overflow only region on a different disk store."); if (isRecreated() && isBucket != isBucket()) { if (isBucket()) { throw new IllegalStateException("The region \"" + getName() + "\" has been persisted to disk as a partition region bucket but is not being recreated as a bucket. This should not be possible."); } else { throw new IllegalStateException("The region \"" + getName() + "\" has not been persisted to disk as a partition region bucket but is now being recreated as a bucket. This should not be possible."); setBackup(isPersistBackup); if (isRecreated()) { if (raLruAlgorithm != getLruAlgorithm() || raLruAction != getLruAction()
PersistentMemberID myOffId = createNewPMID(); PersistentMemberID myEqualId = createNewPMID(); dr.setInitializing(myId); dr.setInitialized(); dr.memberOnline(myOnId); dr.memberOffline(myOffId); dr.memberOfflineAndEqual(myEqualId); assertEquals(Collections.singleton(myOnId), dr.getOnlineMembers()); assertEquals(Collections.singleton(myOffId), dr.getOfflineMembers()); assertEquals(Collections.singleton(myEqualId), dr.getOfflineAndEqualMembers()); assertEquals(null, dr.getMyInitializingID()); assertEquals(myId, dr.getMyPersistentID()); assertEquals(false, dr.wasAboutToDestroy()); dr.beginDestroy(lr); assertEquals(Collections.singleton(myOnId), dr.getOnlineMembers()); assertEquals(Collections.singleton(myOffId), dr.getOfflineMembers()); assertEquals(Collections.singleton(myEqualId), dr.getOfflineAndEqualMembers()); assertEquals(myId, dr.getMyPersistentID()); assertEquals(true, dr.wasAboutToDestroy()); close(lr); assertEquals(Collections.singleton(myOnId), dr.getOnlineMembers()); assertEquals(Collections.singleton(myOffId), dr.getOfflineMembers()); assertEquals(Collections.singleton(myEqualId), dr.getOfflineAndEqualMembers()); assertEquals(myId, dr.getMyPersistentID()); assertEquals(true, dr.wasAboutToDestroy()); dr.forceIFCompaction();
public static void doAsyncFlush(VersionTag tag, LocalRegion region) { if (region.isThisRegionBeingClosedOrDestroyed()) return; DiskRegion dr = region.getDiskRegion(); if (!dr.isBackup()) { return; } assert !dr.isSync(); dr.acquireReadLock(); try { dr.getDiskStore().putVersionTagOnly(region, tag, true); } finally { dr.releaseReadLock(); } }
void statsClear(LocalRegion region) { if (region instanceof BucketRegion) { BucketRegion owner=(BucketRegion)region; long curInVM = owner.getNumEntriesInVM()*-1; long curOnDisk = owner.getNumOverflowOnDisk()*-1; long curBytesOnDisk = owner.getNumOverflowBytesOnDisk()*-1; incNumEntriesInVM(curInVM); incNumOverflowOnDisk(curOnDisk); incNumOverflowBytesOnDisk(curBytesOnDisk); owner.incNumEntriesInVM(curInVM); owner.incNumOverflowOnDisk(curOnDisk); owner.incNumOverflowBytesOnDisk(curBytesOnDisk); } else { // set them both to zero incNumEntriesInVM(getNumEntriesInVM()*-1); incNumOverflowOnDisk(getNumOverflowOnDisk()*-1); incNumOverflowBytesOnDisk(getNumOverflowBytesOnDisk()*-1); } }
/** * Get serialized form of data off the disk * @param id * @since gemfire5.7_hotfix */ public Object getSerializedData(DiskId id) { return getDiskStore().getSerializedData(this, id); }
void getAfterFlush(final Region region) { alreadyComeHere = false; CacheObserverHolder.setInstance(new CacheObserverAdapter() { public void afterWritingBytes() { if (!alreadyComeHere) { region.get("key"); } alreadyComeHere = true; } }); ((LocalRegion)region).getDiskRegion().pauseFlusherForTesting(); region.put("Key", "Value1"); ((LocalRegion)region).getDiskRegion().flushForTesting(); try { Assert.assertEquals("Value1", getValueOnDisk(region)); } catch (EntryNotFoundException e) { logWriter.error("Exception occured", e); fail("Entry not found although was supposed to be there"); } }
public void run() { Cache cache = CacheFactory.getAnyInstance(); region.getDiskRegion().acquireWriteLock(); try { Thread putThread = new Thread(new putThread(region)); putThread.start(); Thread.sleep(2000); synchronized (mutex) { mutex.notify(); } Thread.sleep(5000); region.clear(); } catch (InterruptedException e) { if (cache.getLogger().fineEnabled()) { cache.getLogger().fine( "TestLRUClockHand#clearThread Got an interrupted Exception"); } fail("interrupted"); } catch (Exception ie) { fail("TestLRUClockHand#clearThread Got an Exception"); } finally { region.getDiskRegion().releaseWriteLock(); } } }
/** Get the number of entries in the region whose value resides in the VM * * @returns int The number of entries in this region whose value resides in the VM */ public static long getNumEntriesInVM(Region aRegion) { DiskRegion diskReg = ((LocalRegion)aRegion).getDiskRegion(); if (diskReg == null) // no disk is being used return aRegion.keys().size(); if (diskReg.getStats() == null) throw new TestException("stats is NULL"); // Added this check in response to BUG 41725 (as requested by darrel) long diskRegionCount = diskReg.getNumEntriesInVM(); long statsCount = diskReg.getStats().getNumEntriesInVM(); if (diskRegionCount != statsCount) { throw new TestException("BUG 41725 detected: DiskRegion.getEntriesInVM(" + diskRegionCount + ") != diskReg.getStats().getNumEntriesInVM(" + statsCount + ")" + util.TestHelper.getStackTrace()); } return diskReg.getStats().getNumEntriesInVM(); }
((LocalRegion)region).getDiskRegion().acquireWriteLock(); try { th.start(); Thread.yield(); DiskRegion dr = ((LocalRegion)region).getDiskRegion(); dr.testHook_getChild().forceRolling(dr, false); ((LocalRegion)region).getDiskRegion().releaseWriteLock();
/** * Used by tests to force everything out to disk. */ public void forceFlush() { if (this.diskRegion != null) { this.diskRegion.flushForTesting(); } }
dr.flushForTesting(); long size1 =0; for(DirectoryHolder dh:dr.getDirectories()) { size1 += dh.getDirStatsDiskSpaceUsage(); dr = ((LocalRegion)region).getDiskRegion(); long size2 =0; for(DirectoryHolder dh:dr.getDirectories()) { size2 += dh.getDirStatsDiskSpaceUsage();
DiskRegion diskRegion = pbr.getDiskRegion(); if(diskRegion != null) { diskRegion.beginDestroy(null); diskRegion.endDestroy(null);
/** * Record current RVV to disk and update into disk region RVV. */ public void writeRVV(LocalRegion region, Boolean isRVVTrusted) { if(this.getFlags().contains(DiskRegionFlag.IS_WITH_VERSIONING)) { getDiskStore().writeRVV(this, region, isRVVTrusted); } }
assertEquals(1, dr.getNumEntriesInVM()); assertEquals(0, dr.getNumOverflowOnDisk()); } else { assertEquals(0, dr.getNumEntriesInVM()); assertEquals(1, dr.getNumOverflowOnDisk()); assertEquals(0, dr.getNumEntriesInVM()); assertEquals(0, dr.getNumOverflowOnDisk()); dr = ((LocalRegion)region).getDiskRegion(); if (recovValues) { assertEquals(1, dr.getNumEntriesInVM()); assertEquals(0, dr.getNumOverflowOnDisk()); } else { assertEquals(1, dr.getNumEntriesInVM()); assertEquals(0, dr.getNumOverflowOnDisk()); assertEquals(0, dr.getNumEntriesInVM()); assertEquals(0, dr.getNumOverflowOnDisk()); dr = ((LocalRegion)region).getDiskRegion(); if (recovValues) { assertEquals(1, dr.getNumEntriesInVM()); assertEquals(0, dr.getNumOverflowOnDisk()); } else { assertEquals(0, dr.getNumEntriesInVM()); assertEquals(1, dr.getNumOverflowOnDisk()); assertEquals(0, dr.getNumEntriesInVM());