private boolean waitUntilFlushIsReady() throws InterruptedException { if (diskStore.maxAsyncItems > 0) { final long time = diskStore.getTimeInterval(); synchronized (diskStore.getAsyncMonitor()) { if (time > 0) { long nanosRemaining = TimeUnit.MILLISECONDS.toNanos(time); final long endTime = System.nanoTime() + nanosRemaining; boolean done = diskStore.checkAndClearForceFlush() || diskStore.checkAsyncItemLimit(); while (!done && nanosRemaining > 0) { TimeUnit.NANOSECONDS.timedWait(diskStore.getAsyncMonitor(), nanosRemaining); done = diskStore.checkAndClearForceFlush() || diskStore.checkAsyncItemLimit(); if (!done) { nanosRemaining = endTime - System.nanoTime(); boolean done = diskStore.checkAndClearForceFlush() || diskStore.checkAsyncItemLimit(); while (!done) { diskStore.getAsyncMonitor().wait(); done = diskStore.checkAndClearForceFlush() || diskStore.checkAsyncItemLimit(); long time = diskStore.getTimeInterval(); if (time > 0) { long nanosRemaining = TimeUnit.MILLISECONDS.toNanos(time); final long endTime = System.nanoTime() + nanosRemaining; synchronized (diskStore.getAsyncMonitor()) { boolean done = diskStore.checkAndClearForceFlush(); while (!done && nanosRemaining > 0) { TimeUnit.NANOSECONDS.timedWait(diskStore.getAsyncMonitor(), nanosRemaining); done = diskStore.checkAndClearForceFlush(); if (!done) { nanosRemaining = endTime - System.nanoTime();
@Override public CancelCriterion getCancelCriterion() { return getDiskStore().getCancelCriterion(); }
private void offlineCompact() { assert isOfflineCompacting(); this.RECOVER_VALUES = false; this.deadRecordCount = 0; for (DiskRegionView drv : getKnown()) { scheduleForRecovery(OfflineCompactionDiskRegion.create(this, drv)); } getPersistentOplogs().recoverRegionsThatAreReady(); getPersistentOplogs().offlineCompact(); // TODO soplogs - we need to do offline compaction for // the soplog regions, but that is not currently implemented getDiskInitFile().forceCompaction(); if (this.upgradeVersionOnly) { System.out.println("Upgrade disk store " + this.name + " to version " + getRecoveredGFVersionName() + " finished."); } else { if (getDeadRecordCount() == 0) { System.out.println("Offline compaction did not find anything to compact."); } else { System.out.println("Offline compaction removed " + getDeadRecordCount() + " records."); } // If we have more than one oplog then the liveEntryCount may not be the // total // number of live entries in the disk store. So do not log the live entry // count } }
@Override public Set<PersistentMemberPattern> getRevokedMembers() { DiskInitFile dif = this.ds.getDiskInitFile(); return ds.getRevokedMembers(); }
void initializeIfNeeded() { if (!getPersistentOplogs().alreadyRecoveredOnce.get()) { recoverRegionsThatAreReady(); } }
private void initDiskData() { this.name = diskStore.getName(); this.compactionThreshold = diskStore.getCompactionThreshold(); this.timeInterval = diskStore.getTimeInterval(); this.writeBufferSize = diskStore.getWriteBufferSize(); this.maxOpLogSize = diskStore.getMaxOplogSize(); this.queueSize = diskStore.getQueueSize(); this.isAutoCompact = diskStore.getAutoCompact(); this.isForceCompactionAllowed = diskStore.getAllowForceCompaction(); this.directoryHolders = diskStore.getDirectoryHolders(); File[] diskDirs = diskStore.getDiskDirs(); String[] diskDirStr = new String[diskDirs.length]; for (int i = 0; i < diskDirs.length; i++) { diskDirStr[i] = diskDirs[i].getAbsolutePath(); } this.diskDirectories = diskDirStr; }
String partialFileName = getName(); boolean foundIfFile = false; createLockFile(partialFileName); boolean finished = false; try { Map<File, DirectoryHolder> persistentBackupFiles = getPersistentOplogs().findFiles(partialFileName); boolean ifRequired = backupFilesExist || isOffline(); if (Version.CURRENT.compareTo(getRecoveredGFVersion()) <= 0) { if (getCache() != null) { getCache().close(); throw new IllegalStateException("Recovered version = " + getRecoveredGFVersion() + ": " + String.format("This disk store is already at version %s.", getRecoveredGFVersionName())); if (Version.GFE_70.compareTo(getRecoveredGFVersion()) > 0) { if (getCache() != null) { getCache().close(); throw new IllegalStateException("Recovered version = " + getRecoveredGFVersion() + ": " + String.format("This disk store is still at version %s.", getRecoveredGFVersionName())); deleteFiles(overflowFileFilter);
@Before public void setup() { diskStoreImpl = mock(DiskStoreImpl.class); diskStoreStats = mock(DiskStoreStats.class); PersistentOplogSet persistentOpLogSet = mock(PersistentOplogSet.class); when(diskStoreImpl.getAsyncMonitor()).thenReturn(new Object()); when(diskStoreImpl.getForceFlushCount()).thenReturn(new AtomicInteger(1)); when(diskStoreImpl.fillDrainList()).thenReturn(DRAIN_LIST_SIZE).thenReturn(0); when(diskStoreImpl.getDrainList()).thenReturn(new ArrayList()); when(diskStoreImpl.getPersistentOplogs()).thenReturn(persistentOpLogSet); when(diskStoreImpl.getStats()).thenReturn(diskStoreStats); when(diskStoreImpl.checkAndClearForceFlush()).thenReturn(true); when(diskStoreImpl.isStopFlusher()).thenReturn(false).thenReturn(true); flusherThread = new DiskStoreImpl.FlusherThread(diskStoreImpl); }
void close(boolean destroy) { this.closing = true; getCache().getDiskStoreMonitor().removeDiskStore(this); closeCompactor(false); } catch (RuntimeException e) { rte = e; if (!isOffline()) { try { stopAsyncFlusher(); } catch (RuntimeException e) { if (rte != null) { waitForBackgroundTasks(); if ((!destroy && getDiskInitFile().hasLiveRegions()) || isValidating()) { RuntimeException exception = getPersistentOplogs().close(); if (exception != null && rte != null) { rte = exception; getDiskInitFile().close(); } else { try { destroyAllOplogs(); } catch (RuntimeException e) { if (rte != null) { getDiskInitFile().close();
try { while (waitUntilFlushIsReady()) { int drainCount = diskStore.fillDrainList(); if (drainCount > 0) { Iterator it = diskStore.getDrainList().iterator(); while (it.hasNext()) { Object o = it.next(); diskStore.getStats().incQueueSize(-drainCount); flushChild(); Thread.currentThread().interrupt(); diskStore.getCache().getCancelCriterion().checkCancelInProgress(ie); throw new IllegalStateException("Async writer thread stopping due to unexpected interrupt"); } catch (DiskAccessException dae) { boolean okToIgnore = dae.getCause() instanceof ClosedByInterruptException; if (!okToIgnore || !diskStore.isStopFlusher()) { fatalDae = dae; if (logger.isDebugEnabled()) { logger.debug("Async writer thread stopped. Pending opcount={}", diskStore.getAsyncQueue().size()); diskStore.handleDiskAccessException(fatalDae);
long maxOplogSizeParam = getParent().getMaxOplogSizeInBytes(); long availableSpace = this.dirHolder.getAvailableSpace(); if (availableSpace < maxOplogSizeParam) { this.stats = getParent().getStats(); this.compactOplogs = getParent().getAutoCompact(); String n = getParent().getName(); this.diskFile = new File(this.dirHolder.getDir(), oplogSet.getPrefix() + n + "_" + oplogId); try { createCrf(null); if (getParent().isOfflineCompacting()) { krfFileCreate(); close(); getParent().getCancelCriterion().checkCancelInProgress(ex); if (ex instanceof DiskAccessException) { throw (DiskAccessException) ex;
checkForFlusherThreadTermination(); if (forceAsync) { getAsyncQueue().forcePut(item); } else { if (!getAsyncQueue().offer(item)) { handleFullAsyncQueue(item); getStats().incQueueSize(1); if (checkAsyncItemLimit()) { synchronized (getAsyncMonitor()) { getAsyncMonitor().notifyAll();
@Override public PersistentMemberID getMyInitializingID() { DiskInitFile dif = this.ds.getDiskInitFile(); if (dif == null) return this.myInitializingId; synchronized (dif) { return this.myInitializingId; } }
@Override public DiskStoreID getDiskStoreID() { return getDiskStore().getDiskStoreID(); }
public void removeDiskStore(DiskStoreImpl ds) { if (logger.isTraceEnabled(LogMarker.DISK_STORE_MONITOR_VERBOSE)) { logger.trace(LogMarker.DISK_STORE_MONITOR_VERBOSE, "No longer monitoring disk store {}", ds.getName()); } disks.remove(ds); }
this.dirHolder = dirHolder; this.opState = new OpState(); long maxOplogSizeParam = getParent().getMaxOplogSizeInBytes(); long availableSpace = this.dirHolder.getAvailableSpace(); if (prevOplog.compactOplogs) { String n = getParent().getName(); this.diskFile = new File(this.dirHolder.getDir(), oplogSet.getPrefix() + n + "_" + oplogId); try { createCrf(prevOplog.crf); if (getParent().isOfflineCompacting()) { krfFileCreate(); close(); getParent().getCancelCriterion().checkCancelInProgress(ex); if (ex instanceof DiskAccessException) { throw (DiskAccessException) ex;
public PersistentOplogSet getPersistentOplogSet() { return getPersistentOplogs(); }
/** * Destroy all the oplogs * */ private void destroyAllOplogs() { getPersistentOplogs().destroyAllOplogs(); // Need to also remove all oplogs that logically belong to this DiskStore // even if we were not using them. { // delete all overflow oplog files FilenameFilter overflowFileFilter = new DiskStoreFilter(OplogType.OVERFLOW, true, getName()); deleteFiles(overflowFileFilter); } { // delete all backup oplog files FilenameFilter backupFileFilter = new DiskStoreFilter(OplogType.BACKUP, true, getName()); deleteFiles(backupFileFilter); } }
/** * Returns the dir name used to back up this DiskStore's directories under. The name is a * concatenation of the disk store name and id. */ private String getBackupDirName(DiskStoreImpl diskStore) { String name = diskStore.getName(); if (name == null) { name = GemFireCacheImpl.getDefaultDiskStoreName(); } return name + "_" + diskStore.getDiskStoreID().toString(); }
public boolean copyForwardForOverflowCompact(DiskEntry entry, byte[] value, int length, byte userBits) { try { ValueWrapper vw = new DiskEntry.Helper.CompactorValueWrapper(value, length); return basicModify(entry, vw, userBits, true); } catch (IOException ex) { throw new DiskAccessException( String.format("Failed writing key to %s", this.diskFile.getPath()), ex, getParent().getName()); } catch (InterruptedException ie) { Thread.currentThread().interrupt(); getParent().getCancelCriterion().checkCancelInProgress(ie); throw new DiskAccessException( String.format( "Failed writing key to %s due to failure in acquiring read lock for asynch writing", this.diskFile.getPath()), ie, getParent().getName()); } }