this.dirHolder = dirHolder; this.opState = new OpState(); long maxOplogSizeParam = getParent().getMaxOplogSizeInBytes(); long availableSpace = this.dirHolder.getAvailableSpace(); if (prevOplog.compactOplogs) { setMaxCrfDrfSize(); this.stats = prevOplog.stats; this.compactOplogs = prevOplog.compactOplogs; this.dataVersion = prevOplog.getDataVersionIfOld(); String n = getParent().getName(); this.diskFile = new File(this.dirHolder.getDir(), oplogSet.getPrefix() + n + "_" + oplogId); try { createDrf(prevOplog.drf); createCrf(prevOplog.crf); if (getParent().isOfflineCompacting()) { krfFileCreate(); close(); getParent().getCancelCriterion().checkCancelInProgress(ex); if (ex instanceof DiskAccessException) { throw (DiskAccessException) ex; getParent());
if (oplog.needsKrf()) { numEntries += oplog.recoverIndexes(singleIndex); else if (oplog.getIndexFileIfValid() != null) { Collection<DiskRegionInfo> regions = oplog .getTargetRegionsForIndexes(singleIndex.keySet()); numEntries += oplog.writeIRF(oplog.getSortedLiveEntries(regions), null, singleIndex.keySet(), singleIndex); .getInternalProductCallbacks().getAllLocalIndexes(dsImpl); Collection<DiskRegionInfo> regions = oplog .getTargetRegionsForIndexes(allIndexes); numEntries += oplog.writeIRF(oplog.getSortedLiveEntries(regions), null, allIndexes, singleIndex);
drName = dr.getName(); } else { drName = getParent().getName(); flushAll(); // needed in case of async lengthOfOperationCausingSwitch += 20; // for worstcase overhead of writing if (lengthOfOperationCausingSwitch > getParent().getMaxDirSize()) { throw new DiskAccessException( LocalizedStrings.Oplog_OPERATION_SIZE_CANNOT_EXCEED_THE_MAXIMUM_DIRECTORY_SIZE_SWITCHING_PROBLEM_FOR_ENTRY_HAVING_DISKID_0 if (needsCompaction()) { addToBeCompacted(); } else { getOplogSet().addInactive(this); DirectoryHolder nextDirHolder = getOplogSet().getNextDir(lengthOfOperationCausingSwitch, true); Oplog newOplog = new Oplog(this.oplogId + 1, nextDirHolder, this); newOplog.firstRecord = true; getOplogSet().setChild(newOplog); finishedAppending(); getParent().executeDelayedExpensiveWrite(new Runnable() { public void run() { if (getParent().isOfflineCompacting()) { krfClose(); } else {
private void writeGemfireVersionRecord(OplogFile olf) throws IOException { if (this.gfversion == null) { this.gfversion = Version.CURRENT; } Version dataVersion = getDataVersionIfOld(); if (dataVersion == null) { dataVersion = Version.CURRENT; } // if gfversion and dataVersion are not same, then write a special token // version and then write both, else write gfversion as before // this is for backward compatibility with 7.0 this.opState = new OpState(); if (this.gfversion == dataVersion) { writeProductVersionRecord(this.gfversion, olf); } else { writeProductVersionRecord(Version.TOKEN, olf); clearOpState(); writeProductVersionRecord(this.gfversion, olf); clearOpState(); writeProductVersionRecord(dataVersion, olf); } }
return; if (!couldHaveKrf()) { return; getParent().acquireCompactorReadLock(); try { if (!getParent().allowKrfCreation()) { return; lockCompactor(); List<KRFEntry> sortedLiveEntries = getSortedLiveEntries(regions); if (sortedLiveEntries == null) { krfFileCreate(); writeOneKeyEntryForKRF(ke); krfClose(); krfCreateSuccess = true; for (DiskRegionInfo dri : regions) { closeAndDeleteKrf(); unlockCompactor(); getParent().releaseCompactorReadLock();
if(getParent().getDiskInitFile().hasKrf(this.oplogId)) { File krfFile = new File(getKrfFilePath()); if(krfFile.exists()) { files.add(IOUtils.tryGetCanonicalFileElseGetAbsoluteFile(krfFile)); File idxFile = getIndexFileIfValid(false); if(idxFile != null && idxFile.exists()) { files.add(IOUtils.tryGetCanonicalFileElseGetAbsoluteFile(idxFile));
if (getOplogSet().getChild() != this) { useNextOplog = true; } else { initOpState(OPLOG_MOD_ENTRY_1ID, dr, entry, value, userBits, false); adjustment = getOpStateSize(); assert adjustment > 0; long temp = (this.crf.currSize + adjustment); if (temp > getMaxCrfSize() && !isFirstRecord()) { switchOpLog(dr, adjustment, entry); startPosForSynchOp = writeOpLogBytes(this.crf, async, true); this.crf.currSize = temp; startPosForSynchOp += getOpStateValueOffset(); if (logger.isTraceEnabled(LogMarker.PERSIST_WRITES)) { VersionTag tag = null; abs(id.getKeyId()), entry.getKey(), startPosForSynchOp, userBits, value.getLength(), value.getBytesAsString(), dr.getId(), tag, getOplogId()); oldOplogId = id.setOplogId(getOplogId()); if (EntryBits.isAnyInvalid(userBits) || EntryBits.isTombstone(userBits)) { id.setOffsetInOplog(-1); this.incTotalCount(); if (oldOplogId != getOplogId()) { Oplog oldOplog = getOplogSet().getChild(oldOplogId); if (oldOplog != null) { oldOplog.rmLive(dr, entry);
if (getOplogSet().getChild() != this) { useNextOplog = true; } else if ((this.drf.currSize + MAX_DELETE_ENTRY_RECORD_BYTES) > getMaxDrfSize() && !isFirstRecord()) { switchOpLog(dr, MAX_DELETE_ENTRY_RECORD_BYTES, entry); useNextOplog = true; } else { long oldOplogId = id.setOplogId(getOplogId()); if (!isClear) { this.firstRecord = false; initOpState(OPLOG_DEL_ENTRY_1ID, dr, entry, null, (byte) 0, true); int adjustment = getOpStateSize(); logger.trace( "Oplog::basicRemove: Recording the Deletion of entry in the Oplog with id = {} The Oplog Disk ID for the entry being deleted = {} Mode is Synch", getOplogId(), id); startPosForSynchOp = writeOpLogBytes(this.drf, async, true); setHasDeletes(true); if (logger.isTraceEnabled(LogMarker.PERSIST_WRITES)) { logger.debug("basicRemove: id=<{}> key=<{}> drId={} oplog#{}", abs(id.getKeyId()), entry.getKey(), dr.getId(), getOplogId()); if (oldOplogId == getOplogId()) { rmOplog = this; } else { rmOplog = getOplogSet().getChild(oldOplogId);
initOpState(OPLOG_NEW_ENTRY_0ID, dr, entry, value, userBits, false); long temp = (getOpStateSize() + this.crf.currSize); if (!this.wroteNewEntryBase) { temp += OPLOG_NEW_ENTRY_BASE_REC_SIZE; if (this != getOplogSet().getChild()) { useNextOplog = true; } else if (temp > getMaxCrfSize() && !isFirstRecord()) { switchOpLog(dr, getOpStateSize(), entry); useNextOplog = true; } else { writeNewEntryBaseRecord(async); long createOplogEntryId = getOplogSet().newOplogEntryId(); long old_kid = id.getKeyId(); id.setKeyId(createOplogEntryId); int dataLength = getOpStateSize(); id.setOplogId(getOplogId()); startPosForSynchOp = writeOpLogBytes(this.crf, async, true); startPosForSynchOp += getOpStateValueOffset(); if (logger.isTraceEnabled(LogMarker.PERSIST_WRITES)) { VersionTag tag = null;
File indexFile = oplog.getIndexFileIfValid(this.recreateIndexFile); boolean hasKrf = !oplog.needsKrf(); if (!hasKrf || !newIndexes.isEmpty() || indexFile == null) { .getTargetRegionsForIndexes(indexes); List<KRFEntry> sortedLiveEntries = oplog .getSortedLiveEntries(targetRegions); if (indexFile == null) { oplog.writeIRF(sortedLiveEntries, null, indexes, allIndexes); oplog.writeIRF(sortedLiveEntries, null, newIndexes, allIndexes); oplog.recoverIndexes(allIndexes); oplog.getOplogIndex().recoverIndexes(allIndexes); } finally { for (Oplog oplog : this.allOplogs) { oplog.clearInitRecoveryMap(); oplog.clearInitRecoveryMap();
Set<Oplog> oplogsNeedingValueRecovery = new HashSet<Oplog>(); if (!this.alreadyRecoveredOnce.get()) { if (getChild() != null && !getChild().hasBeenUsed()) { byteCount += oplog.recoverDrf(deletedIds, this.alreadyRecoveredOnce.get(), latestOplog); latestOplog = false; if (!this.alreadyRecoveredOnce.get()) { updateOplogEntryId(oplog.getMaxRecoveredOplogEntryId()); for (Oplog oplog: oplogSet) { long startOpLogRead = parent.getStats().startOplogRead(); long bytesRead = oplog.recoverCrf(deletedIds, latestOplog = false; if (!this.alreadyRecoveredOnce.get()) { updateOplogEntryId(oplog.getMaxRecoveredOplogEntryId()); for (Oplog oplog: oplogSet) { if (oplog != getChild()) { oplog.initAfterRecovery(parent.isOffline()); if(oplog.needsKrf()) { oplog.createKrfAsync();
DiskRegion dr = ((LocalRegion)region).getDiskRegion(); StatisticsFactory factory = region.getCache().getDistributedSystem(); Oplog oplog1 = new Oplog(11, dr.getOplogSet(), new DirectoryHolder(factory,dirs[1], 1000, 0)); Oplog oplog2 = new Oplog(12, dr.getOplogSet(), new DirectoryHolder(factory,dirs[2], 1000, 1)); Oplog oplog3 = new Oplog(13, dr.getOplogSet(), new DirectoryHolder(factory,dirs[3], 1000, 2)); oplog1.incTotalCount(); oplog1.incLiveCount(); oplog2.incTotalCount(); oplog2.incLiveCount(); oplog3.incTotalCount(); oplog3.incLiveCount(); dr.addToBeCompacted(oplog3); if (oplog1 != dr.removeOplog(oplog1.getOplogId())) { fail(" expected oplog1 to be the first oplog but not the case !"); if (oplog2 != dr.removeOplog(oplog2.getOplogId())) { fail(" expected oplog2 to be the first oplog but not the case !"); if (oplog3 != dr.removeOplog(oplog3.getOplogId())) { fail(" expected oplog3 to be the first oplog but not the case !"); oplog1.destroy(); oplog2.destroy(); oplog3.destroy();
if(Oplog.isCRFFile(file.getName())) { if(!isCrfOplogIdPresent(oplogId)) { deleteFileOnRecovery(file); try String krfFileName = Oplog.getKRFFilenameFromCRFFilename(file.getAbsolutePath()); File krfFile = new File(krfFileName); deleteFileOnRecovery(krfFile); } else if(Oplog.isDRFFile(file.getName())) { if(!isDrfOplogIdPresent(oplogId)) { deleteFileOnRecovery(file); continue; //this file we unable to delete earlier } else if (Oplog.isIRFFile(file.getName())) { if(!isIrfOplogIdPresent(oplogId)) { deleteFileOnRecovery(file); oplog = new Oplog(oplogId, this); oplog.addRecoveredFile(file, entry.getValue(), foundCrfs, foundDrfs);
Oplog oplog = new Oplog(1, oplogSet, dirHolder); oplog.close(); }}); oplog = new Oplog(1, oplogSet); File drfFile = FileUtil.find(testDirectory, ".*.drf"); File crfFile = FileUtil.find(testDirectory, ".*.crf"); oplog.addRecoveredFile(drfFile, dirHolder, new TLongHashSet(), new TLongHashSet()); oplog.addRecoveredFile(crfFile, dirHolder, new TLongHashSet(), new TLongHashSet()); OplogEntryIdSet deletedIds = new OplogEntryIdSet(); oplog.recoverDrf(deletedIds, false, true); oplog.recoverCrf(deletedIds, true, true, false, Collections.singleton(oplog), true, false); context.assertIsSatisfied();
Map<Long, DiskRecoveryStore> deferredRegions, Object sync) { if (getParent().isClosing() || diskRecoveryStores.isEmpty() || this.regionMap.isEmpty()) { return true; sortedLiveEntries = getSortedLiveEntries(targetRegions.values()); if(sortedLiveEntries == null) { "Oplog::recoverValuesIfNeeded: recovering values from " + toString()); if(getParent().isClosing() || diskRecoveryStores.isEmpty()) { return true; DiskEntry.Helper.recoverValue(diskEntry, getOplogId(), diskRecoveryStore); } catch(RegionDestroyedException e) { this.logger.info(LocalizedStrings.ONE_ARG, "Oplog::recoverValuesIfNeeded: got low memory exception." + "Stopping the recovery " + toString()); diskRecoveryStores.remove(diskRegionId);
synchronized (id) { long opId = id.getOplogId(); if (opId != getOplogId()) { retryOplog = getOplogSet().getChild(opId); } else { return retryOplog.getBytesAndBits(dr, id, faultingIn, bitOnly); "TRACE_READS getBytesAndBits: id=<" + abs(id.getKeyId()) + " valueOffset=" + offset + " userBits=" + id.getUserBits() + " valueLen=" + id.getValueLength() + " drId=" + dr.getId() + " oplog#" + getOplogId()); bb = basicGet(dr, offset, bitOnly, id.getValueLength(), id.getUserBits());
public void krfFileCreate() throws IOException { assert (getParent().isValidating() == false); this.krf.f = new File(getKrfFilePath()); if (this.krf.f.exists()) { throw new IllegalStateException("krf file " + this.krf.f + " already exists."); this.krf.dos = new DataOutputStream(this.krf.bos); this.krf.dos.writeLong(getParent().getDiskStoreID().getLeastSignificantBits()); this.krf.dos.writeLong(getParent().getDiskStoreID().getMostSignificantBits()); this.krf.dos.writeByte(END_OF_RECORD_ID); Version dataVersion = getDataVersionIfOld(); if (dataVersion == null) { dataVersion = Version.CURRENT; Map<Long, AbstractDiskRegion> drMap = getParent().getAllDiskRegions(); byte[] rvvBytes = serializeRVVs(drMap, false); this.krf.dos.write(rvvBytes); this.krf.dos.writeByte(END_OF_RECORD_ID);
public void testGetChild() { deleteFiles(); region = DiskRegionHelperFactory .getAsyncPersistOnlyRegion(cache, diskProps); DiskRegion dr = ((LocalRegion)region).getDiskRegion(); Oplog oplog = dr.testHook_getChild(); long id = oplog.getOplogId(); StatisticsFactory factory = region.getCache().getDistributedSystem(); Oplog newOplog = new Oplog(id, dr.getOplogSet(), new DirectoryHolder(factory, dirs[0], 1000000, 0)); dr.getDiskStore().persistentOplogs.setChild(newOplog); assertEquals(newOplog, dr.testHook_getChild()); dr.setChild(oplog); assertEquals(oplog, dr.testHook_getChild()); newOplog.close(); newOplog = null; closeDown(); }
if (getParent().isClosing()) { return; sortedLiveEntries = getSortedLiveEntries(targetRegions.values()); if (sortedLiveEntries == null) { for (KRFEntry entry : sortedLiveEntries) { if (getParent().isClosing()) { return; DiskEntry.Helper.recoverValue(diskEntry, getOplogId(), diskRecoveryStore, in); } catch (RegionDestroyedException e) {
public void krfFileCreate() throws IOException { assert (getParent().isValidating() == false); this.krf.dos.writeLong(getParent().getDiskStoreID().getLeastSignificantBits()); this.krf.dos.writeLong(getParent().getDiskStoreID().getMostSignificantBits()); this.krf.dos.writeByte(END_OF_RECORD_ID); Version dataVersion = getDataVersionIfOld(); if (dataVersion == null) { dataVersion = Version.CURRENT; Map<Long, AbstractDiskRegion> drMap = getParent().getAllDiskRegions(); byte[] rvvBytes = serializeRVVs(drMap, false); this.krf.dos.write(rvvBytes); this.krf.dos.writeByte(END_OF_RECORD_ID);