public boolean isCompactionPossible() { return getParent().isCompactionPossible(); } }
void addInactive(Oplog oplog) { addInactive(oplog, false); }
private synchronized void addToBeCompacted() { if (this.added) return; this.added = true; getOplogSet().addToBeCompacted(this); if (logger.isDebugEnabled()) { logger.debug("Oplog::switchOpLog: Added the Oplog = {} for compacting.", this.oplogId); } }
if (!isCrfOplogIdPresent(oplogId)) { deleteFileOnRecovery(file); try { String krfFileName = Oplog.getKRFFilenameFromCRFFilename(file.getAbsolutePath()); File krfFile = new File(krfFileName); deleteFileOnRecovery(krfFile); } catch (Exception ex) {// ignore if (!isDrfOplogIdPresent(oplogId)) { deleteFileOnRecovery(file); continue; // this file we unable to delete earlier Oplog oplog = getChild(oplogId); if (oplog == null) { oplog = new Oplog(oplogId, this); addRecoveredOplog(oplog); verifyOplogs(foundCrfs, foundDrfs);
new File(this.drf.f.getParentFile(), oplogSet.getPrefix() + n + "_" + this.oplogId); try { getOplogSet().removeOplog(getOplogId(), true, getHasDeletes() ? this : null); if (!getHasDeletes()) { getOplogSet().drfDelete(this.oplogId); deleteFile(this.drf); getOplogSet().removeOplog(getOplogId(), true/* getOplogSet().addInactive(this);
addToBeCompacted(); } else { getOplogSet().addInactive(this); getOplogSet().getNextDir(lengthOfOperationCausingSwitch, true); Oplog newOplog = new Oplog(this.oplogId + 1, nextDirHolder, this); newOplog.firstRecord = true; getOplogSet().setChild(newOplog); getOplogSet().removeOplog(this.oplogId); throw dae;
void addRecoveredOplog(Oplog oplog) { basicAddToBeCompacted(oplog); // don't schedule a compaction here. Wait for recovery to complete }
addDrf(olgToAddToDrfOnly);
/** * Destroy the given region */ private void basicDestroy(LocalRegion region, DiskRegion dr) { if (dr.isBackup()) { if (region != null) { region.closeEntries(); } PersistentOplogSet oplogSet = getPersistentOplogSet(dr); oplogSet.basicDestroy(dr); } else { dr.freeAllEntriesOnDisk(region); if (region != null) { region.closeEntries(); } } }
private void basicClose(LocalRegion region, DiskRegion dr, boolean closeDataOnly) { if (dr.isBackup()) { if (region != null) { region.closeEntries(); } if (!closeDataOnly) { getDiskInitFile().closeRegion(dr); } // call close(dr) on each oplog PersistentOplogSet oplogSet = getPersistentOplogSet(dr); oplogSet.basicClose(dr); } else { if (region != null) { // OVERFLOW ONLY clearAsyncQueue(region, true, null); // no need to try to write these to // disk any longer dr.freeAllEntriesOnDisk(region); region.closeEntries(); this.overflowMap.remove(dr); } } }
oplogSet.clear(dr, rvv); } else if (rvv == null) {
RuntimeException exception = getPersistentOplogs().close(); if (exception != null && rte != null) { rte = exception;
/** * Taking a lock on the LinkedHashMap oplogIdToOplog as it the operation of adding an Oplog to the * Map & notifying the Compactor thread , if not already compaction has to be an atomic operation. * add the oplog to the to be compacted set. if compactor thread is active and recovery is not * going on then the compactor thread is notified of the addition */ void addToBeCompacted(Oplog oplog) { basicAddToBeCompacted(oplog); parent.scheduleCompaction(); }
public boolean couldHaveKrf() { return getParent().couldHaveKrf(); }
void addToBeCompacted(Oplog oplog) { getOplogSet().addToBeCompacted(oplog); }
void inactiveReopened(Oplog oplog) { addInactive(oplog, true); }
public void crfCreate(long oplogId) { getParent().getDiskInitFile().crfCreate(oplogId); }
public void drfCreate(long oplogId) { getParent().getDiskInitFile().drfCreate(oplogId); }
public void drfDelete(long oplogId) { getParent().getDiskInitFile().drfDelete(oplogId); }