/** * Get all of the oplogs */ private Oplog[] getAllOplogsForBackup() { return persistentOplogs.getAllOplogs(); }
public boolean hasPersistedData() { return persistentOplogs.getChild() != null; }
public boolean isCompactionPossible() { return getParent().isCompactionPossible(); } }
public void initChild() { if (getChild() == null) { setFirstChild(getSortedOplogs(), true); } }
public void destroyAllOplogs() { // get a snapshot to prevent CME for (Oplog oplog : getAllOplogs()) { if (oplog != null) { oplog.destroy(); removeOplog(oplog.getOplogId()); } } }
if(!isCrfOplogIdPresent(oplogId)) { deleteFileOnRecovery(file); try deleteFileOnRecovery(krfFile); }catch(Exception ex) {//ignore if(!isDrfOplogIdPresent(oplogId)) { deleteFileOnRecovery(file); continue; //this file we unable to delete earlier if(!isIrfOplogIdPresent(oplogId)) { deleteFileOnRecovery(file); continue; Oplog oplog = getChild(oplogId); if (oplog == null) { oplog = new Oplog(oplogId, this); addRecoveredOplog(oplog); verifyOplogs(foundCrfs, foundDrfs);
TreeSet<Oplog> oplogSet = getSortedOplogs(); Set<Oplog> oplogsNeedingValueRecovery = new HashSet<Oplog>(); if (!this.alreadyRecoveredOnce.get()) { if (getChild() != null && !getChild().hasBeenUsed()) { oplogSet.remove(getChild()); latestOplog = false; if (!this.alreadyRecoveredOnce.get()) { updateOplogEntryId(oplog.getMaxRecoveredOplogEntryId()); recoverValues(), recoverValuesSync(), this.alreadyRecoveredOnce.get(), oplogsNeedingValueRecovery, latestOplog = false; if (!this.alreadyRecoveredOnce.get()) { updateOplogEntryId(oplog.getMaxRecoveredOplogEntryId()); if (oplog != getChild()) { oplog.initAfterRecovery(parent.isOffline()); if (getChild() == null) { setFirstChild(getSortedOplogs(), false); if(recoverValues() && !recoverValuesSync()) {
initOplogEntryId(); updateOplogEntryId(parent.getDiskInitFile().getMaxRecoveredClearEntryId()); EntryLogger.setSource(parent.getDiskStoreID(), "recovery"); try { byteCount = recoverOplogs(byteCount); for (Oplog oplog: getAllOplogs()) { if (oplog != null) {
DiskRecoveryStore drs = getOplogSet().getCurrentlyRecovering(drId); } else { // phase2 Assert.assertTrue(p2cr != null, "First pass did not find create a compaction record"); getOplogSet().getChild().copyForwardForOfflineCompact(oplogKeyId, p2cr.getKeyBytes(), objValue, userBits, drId, tag); if (isPersistRecoveryDebugEnabled) { logger.trace(LogMarker.PERSIST_RECOVERY, "readModifyEntry copyForward oplogKeyId=<{}>", oplogKeyId);
addToBeCompacted(); } else { getOplogSet().addInactive(this); DirectoryHolder nextDirHolder = getOplogSet().getNextDir(lengthOfOperationCausingSwitch, true); Oplog newOplog = new Oplog(this.oplogId + 1, nextDirHolder, this); newOplog.firstRecord = true; getOplogSet().setChild(newOplog); getOplogSet().removeOplog(this.oplogId); throw dae;
oplogSet.getPrefix() + n + "_" + this.oplogId); try { getOplogSet().removeOplog(getOplogId(), true, getHasDeletes() ? this : null); if (!getHasDeletes()) { getOplogSet().drfDelete(this.oplogId); deleteFile(this.drf); getOplogSet().removeOplog(getOplogId(), true/* say we are deleting so that undeletedOplogSize is not inced */, null); getOplogSet().addInactive(this);
private void readVersionTagOnlyEntry(CountingDataInputStream dis, byte opcode) throws IOException { long oplogOffset = -1; long drId = DiskInitFile.readDiskRegionID(dis); DiskRecoveryStore drs = getOplogSet().getCurrentlyRecovering(drId); // read versions VersionTag tag = readVersionsFromOplog(dis); if (logger.isTraceEnabled(LogMarker.PERSIST_RECOVERY)) { logger.trace(LogMarker.PERSIST_RECOVERY, "readVersionTagOnlyEntry drId={} tag={}", drId, tag); } readEndOfRecord(dis); // Update the RVV with the new entry if (drs != null) { drs.recordRecoveredVersionTag(tag); } }
private void setFirstChild(TreeSet<Oplog> oplogSet, boolean force) { if (parent.isOffline() && !parent.isOfflineCompacting()) return; if (!oplogSet.isEmpty()) { Oplog first = oplogSet.first(); DirectoryHolder dh = first.getDirectoryHolder(); dirCounter = dh.getArrayIndex(); dirCounter = (++dirCounter) % parent.dirLength; // we want the first child to go in the directory after the directory // used by the existing oplog with the max id. // This fixes bug 41822. } if (force || maxRecoveredOplogId > 0) { setChild(new Oplog(maxRecoveredOplogId + 1, this, getNextDir())); } }
allowing(parent).getAllDiskRegions(); will(returnValue(map)); allowing(oplogSet).getCurrentlyRecovering(5L); will(returnValue(drs)); allowing(oplogSet).getParent(); will(returnValue(parent)); ignoring(oplogSet);
void setChild(Oplog oplog) { getOplogSet().setChild(oplog); } DirectoryHolder getInfoFileDir() {
private void recreateAllLocalIndexes(final LogWriter logger) { Collection<DiskStoreImpl> diskStores = Misc.getGemFireCache().listDiskStores(); for (DiskStoreImpl ds : diskStores) { PersistentOplogSet oplogSet = ds.getPersistentOplogSet(null); ds.resetIndexRecoveryState(); // delete all idx file of all oplogs, so second arg as true below ds.scheduleIndexRecovery(oplogSet.getSortedOplogs(), true); logger.info("FabricDatabase: recreateAllLocalIndexes " + "waiting for index re-creation for disk store: " + ds.getName()); ds.waitForIndexRecoveryEnd(-1); logger.info("FabricDatabase: recreateAllLocalIndexes " + "index re-creation for disk store: " + ds.getName() + " ended"); } }
void inactiveReopened(Oplog oplog) { addInactive(oplog, true); }
addDrf(olgToAddToDrfOnly);
if(!isCrfOplogIdPresent(oplogId)) { deleteFileOnRecovery(file); try deleteFileOnRecovery(krfFile); }catch(Exception ex) {//ignore if(!isDrfOplogIdPresent(oplogId)) { deleteFileOnRecovery(file); continue; //this file we unable to delete earlier Oplog oplog = getChild(oplogId); if (oplog == null) { oplog = new Oplog(oplogId, this); addRecoveredOplog(oplog); verifyOplogs(foundCrfs, foundDrfs);