public PersistentOplogSet getPersistentOplogSet() { return getPersistentOplogs(); }
private OplogSet getOplogSet(DiskRegionView drv) { if (drv.isBackup()) { return getPersistentOplogs(); } else { return overflowOplogs; } }
/** * Reads the oplogs files and loads them into regions that are ready to be recovered. */ public void recoverRegionsThatAreReady() { getPersistentOplogs().recoverRegionsThatAreReady(); }
/** * Get all of the oplogs */ public Oplog[] getAllOplogsForBackup() { return getPersistentOplogs().getAllOplogs(); }
void initializeIfNeeded() { if (!getPersistentOplogs().alreadyRecoveredOnce.get()) { recoverRegionsThatAreReady(); } }
PersistentOplogSet getPersistentOplogSet(DiskRegionView drv) { assert drv.isBackup(); return getPersistentOplogs(); }
@Override public void forceRoll() { getPersistentOplogs().forceRoll(null); }
private void flushChild() { diskStore.getPersistentOplogs().flushChild(); }
public boolean hasPersistedData() { return getPersistentOplogs().getChild() != null; }
private CompactableOplog[] getOplogsToBeCompacted(boolean all) { ArrayList<CompactableOplog> l = new ArrayList<CompactableOplog>(); int max = Integer.MAX_VALUE; if (!all && max > MAX_OPLOGS_PER_COMPACTION && MAX_OPLOGS_PER_COMPACTION > 0) { max = MAX_OPLOGS_PER_COMPACTION; } getPersistentOplogs().getCompactableOplogs(l, max); // Note this always puts overflow oplogs on the end of the list. // They may get starved. overflowOplogs.getCompactableOplogs(l, max); if (l.isEmpty()) { return null; } return l.toArray(new CompactableOplog[l.size()]); }
/** * returns the active child */ Oplog testHook_getChild() { return getDiskStore().getPersistentOplogs().getChild(); }
public void prepareForClose() { forceFlush(); getPersistentOplogs().prepareForClose(); closeCompactor(true); }
@Before public void setup() { diskStoreImpl = mock(DiskStoreImpl.class); diskStoreStats = mock(DiskStoreStats.class); PersistentOplogSet persistentOpLogSet = mock(PersistentOplogSet.class); when(diskStoreImpl.getAsyncMonitor()).thenReturn(new Object()); when(diskStoreImpl.getForceFlushCount()).thenReturn(new AtomicInteger(1)); when(diskStoreImpl.fillDrainList()).thenReturn(DRAIN_LIST_SIZE).thenReturn(0); when(diskStoreImpl.getDrainList()).thenReturn(new ArrayList()); when(diskStoreImpl.getPersistentOplogs()).thenReturn(persistentOpLogSet); when(diskStoreImpl.getStats()).thenReturn(diskStoreStats); when(diskStoreImpl.checkAndClearForceFlush()).thenReturn(true); when(diskStoreImpl.isStopFlusher()).thenReturn(false).thenReturn(true); flusherThread = new DiskStoreImpl.FlusherThread(diskStoreImpl); }
private void offlineCompact() { assert isOfflineCompacting(); this.RECOVER_VALUES = false; this.deadRecordCount = 0; for (DiskRegionView drv : getKnown()) { scheduleForRecovery(OfflineCompactionDiskRegion.create(this, drv)); } getPersistentOplogs().recoverRegionsThatAreReady(); getPersistentOplogs().offlineCompact(); // TODO soplogs - we need to do offline compaction for // the soplog regions, but that is not currently implemented getDiskInitFile().forceCompaction(); if (this.upgradeVersionOnly) { System.out.println("Upgrade disk store " + this.name + " to version " + getRecoveredGFVersionName() + " finished."); } else { if (getDeadRecordCount() == 0) { System.out.println("Offline compaction did not find anything to compact."); } else { System.out.println("Offline compaction removed " + getDeadRecordCount() + " records."); } // If we have more than one oplog then the liveEntryCount may not be the // total // number of live entries in the disk store. So do not log the live entry // count } }
/** * Destroy all the oplogs * */ private void destroyAllOplogs() { getPersistentOplogs().destroyAllOplogs(); // Need to also remove all oplogs that logically belong to this DiskStore // even if we were not using them. { // delete all overflow oplog files FilenameFilter overflowFileFilter = new DiskStoreFilter(OplogType.OVERFLOW, true, getName()); deleteFiles(overflowFileFilter); } { // delete all backup oplog files FilenameFilter backupFileFilter = new DiskStoreFilter(OplogType.BACKUP, true, getName()); deleteFiles(backupFileFilter); } }
boolean basicForceCompaction(DiskRegion dr) { PersistentOplogSet oplogSet = getPersistentOplogs(); // see if the current active oplog is compactable; if so { Oplog active = oplogSet.getChild(); if (active != null) { if (active.hadLiveEntries() && active.needsCompaction()) { active.forceRolling(dr); } } } // Compact the oplogs CompactableOplog[] oplogs = getOplogsToBeCompacted(true/* fixes 41143 */); // schedule a compaction if at this point there are oplogs to be compacted if (oplogs != null) { if (this.oplogCompactor != null) { if (this.oplogCompactor.scheduleIfNeeded(oplogs)) { this.oplogCompactor.waitForRunToComplete(); } else { oplogs = null; // @todo darrel: still need to schedule oplogs and wait for them to // compact. } } } return oplogs != null; }
try { Map<File, DirectoryHolder> persistentBackupFiles = getPersistentOplogs().findFiles(partialFileName); getPersistentOplogs().createOplogs(needsOplogs, persistentBackupFiles); finished = true;
RuntimeException exception = getPersistentOplogs().close(); if (exception != null && rte != null) { rte = exception;