private void initDiskData() { this.name = diskStore.getName(); this.compactionThreshold = diskStore.getCompactionThreshold(); this.timeInterval = diskStore.getTimeInterval(); this.writeBufferSize = diskStore.getWriteBufferSize(); this.maxOpLogSize = diskStore.getMaxOplogSize(); this.queueSize = diskStore.getQueueSize(); this.isAutoCompact = diskStore.getAutoCompact(); this.isForceCompactionAllowed = diskStore.getAllowForceCompaction(); this.directoryHolders = diskStore.getDirectoryHolders(); File[] diskDirs = diskStore.getDiskDirs(); String[] diskDirStr = new String[diskDirs.length]; for (int i = 0; i < diskDirs.length; i++) { diskDirStr[i] = diskDirs[i].getName(); } this.diskDirectories = diskDirStr; }
/** * @since 5.1 */ public void forceRolling(DiskRegion dr) { if (!dr.isBackup()) return; if (!dr.isSync() && this.maxAsyncItems == 0 && getTimeInterval() == 0) { forceFlush(); } acquireReadLock(dr); try { PersistentOplogSet oplogSet = getPersistentOplogSet(dr); oplogSet.forceRoll(dr); } finally { releaseReadLock(dr); } }
void decBackgroundTasks() { int v = this.backgroundTasks.decrementAndGet(); if (v == 0) { synchronized (this.backgroundTasks) { this.backgroundTasks.notifyAll(); } } getCache().getCachePerfStats().decDiskTasksWaiting(); }
public void prepareForClose() { forceFlush(); persistentOplogs.prepareForClose(); closeCompactor(true); }
/** * Execute a task which must be performed asnychronously, but has no requirement * for timely execution. This task pool is used for compactions, creating KRFS, etc. * So some of the queued tasks may take a while. */ public boolean executeDiskStoreTask(final Runnable runnable) { return executeDiskStoreTask(runnable, getCache().getDiskStoreTaskPool(), true) != null; }
public boolean sameAs(DiskStoreAttributes props) { if (getAllowForceCompaction() != props.getAllowForceCompaction()) { this.logger.info(LocalizedStrings.DEBUG, "DEBUG allowForceCompaction " + getAllowForceCompaction() + "!=" + props.getAllowForceCompaction()); if (getAutoCompact() != props.getAutoCompact()) { this.logger.info(LocalizedStrings.DEBUG, "DEBUG AutoCompact " + getAutoCompact() + "!=" + props.getAutoCompact()); if (getCompactionThreshold() != props.getCompactionThreshold()) { this.logger.info(LocalizedStrings.DEBUG, "DEBUG CompactionThreshold " + getCompactionThreshold() + "!=" + props.getCompactionThreshold()); if (getMaxOplogSizeInBytes() != props.getMaxOplogSizeInBytes()) { this.logger.info(LocalizedStrings.DEBUG, "DEBUG MaxOplogSizeInBytes " + getMaxOplogSizeInBytes() + "!=" + props.getMaxOplogSizeInBytes()); if (!getName().equals(props.getName())) { this.logger.info(LocalizedStrings.DEBUG, "DEBUG Name " + getName() + "!=" + props.getName()); if (getQueueSize() != props.getQueueSize()) { this.logger.info(LocalizedStrings.DEBUG, "DEBUG QueueSize " + getQueueSize() + "!=" + props.getQueueSize()); if (getTimeInterval() != props.getTimeInterval()) { this.logger.info(LocalizedStrings.DEBUG, "DEBUG TimeInterval " + getTimeInterval() + "!=" + props.getTimeInterval()); if (getWriteBufferSize() != props.getWriteBufferSize()) {
@Override public final void setDelayedDiskId(LocalRegion r) { DiskStoreImpl ds = r.getDiskStore(); long maxOplogSize = ds.getMaxOplogSize(); this.id = DiskId.createDiskId(maxOplogSize, false , ds.needsLinkedList()); } public final synchronized int updateEntrySize(EnableLRU capacityController) {
private void recreateAllLocalIndexes(final LogWriter logger) { Collection<DiskStoreImpl> diskStores = Misc.getGemFireCache().listDiskStores(); for (DiskStoreImpl ds : diskStores) { PersistentOplogSet oplogSet = ds.getPersistentOplogSet(null); ds.resetIndexRecoveryState(); // delete all idx file of all oplogs, so second arg as true below ds.scheduleIndexRecovery(oplogSet.getSortedOplogs(), true); logger.info("FabricDatabase: recreateAllLocalIndexes " + "waiting for index re-creation for disk store: " + ds.getName()); ds.waitForIndexRecoveryEnd(-1); logger.info("FabricDatabase: recreateAllLocalIndexes " + "index re-creation for disk store: " + ds.getName() + " ended"); } }
/** * Flush all async queue data, and fsync all oplogs to disk. */ public final void flushAndSync() { forceFlush(); acquireCompactorWriteLock(); try { for (Oplog oplog : getPersistentOplogSet(null).getAllOplogs()) { oplog.flushAllAndSync(); } } finally { releaseCompactorWriteLock(); } }
try { try { acquireWriteLock(dr); gotLock = true; } catch (CancelException e) { basicDestroy(region, dr); } finally { if (gotLock) { releaseWriteLock(dr); this.initFile.endDestroyRegion(dr); } else { rmById(dr.getId()); this.overflowMap.remove(dr); if (getOwnedByRegion()) { if (this.ownCount.decrementAndGet() <= 0) { destroy();
public PersistentMemberID generatePersistentID(DiskRegionView dr) { File firstDir = getInfoFileDir().getDir(); InternalDistributedSystem ids = getCache().getDistributedSystem(); InternalDistributedMember memberId = ids.getDistributionManager() .getDistributionManagerId(); //NOTE - do NOT use DM.cacheTimeMillis here. See bug #49920 long timestamp = System.currentTimeMillis(); PersistentMemberID id = new PersistentMemberID(getDiskStoreID(), memberId.getInetAddress(), firstDir.getAbsolutePath(), memberId.getName(), timestamp, (short) 0); return id; }
@Override public void execute(FunctionContext context) { final GemFireCacheImpl cache = GemFireCacheImpl.getInstance(); if (cache != null && !cache.isClosed()) { Collection<DiskStoreImpl> diskStores = cache.listDiskStoresIncludingRegionOwned(); ArrayList<DiskStoreInformation> diskStoreInfos = new ArrayList<>( diskStores.size()); for (DiskStoreImpl ds : diskStores) { StringBuilder dirs = new StringBuilder(); for (File dir : ds.getDiskDirs()) { if (dirs.length() > 0) { dirs.append(','); } String dirPath; try { dirPath = dir.getCanonicalPath(); } catch (IOException ioe) { dirPath = dir.getAbsolutePath(); } dirs.append(dirPath); } diskStoreInfos.add(new DiskStoreInformation(cache.getMyId().getId(), ds.getName(), ds.getDiskStoreUUID().toString(), dirs.toString())); } context.getResultSender().lastResult(diskStoreInfos); } else { context.getResultSender().lastResult(new ArrayList<>(0)); } }
public void removeDiskStore(DiskStoreImpl ds) { if (logger.isTraceEnabled(LogMarker.DISK_STORE_MONITOR)) { logger.trace(LogMarker.DISK_STORE_MONITOR, "No longer monitoring disk store {}", ds.getName()); } disks.remove(ds); }
public final boolean copyForwardForOverflowCompact(DiskEntry entry, DiskEntry.Helper.ValueWrapper value, byte userBits) { try { return basicModify(entry, value, userBits, true); } catch (IOException ex) { throw new DiskAccessException(LocalizedStrings.Oplog_FAILED_WRITING_KEY_TO_0.toLocalizedString(this.diskFile.getPath()), ex, getParent().getName()); } catch (InterruptedException ie) { Thread.currentThread().interrupt(); getParent().getCancelCriterion().checkCancelInProgress(ie); throw new DiskAccessException(LocalizedStrings.Oplog_FAILED_WRITING_KEY_TO_0_DUE_TO_FAILURE_IN_ACQUIRING_READ_LOCK_FOR_ASYNCH_WRITING.toLocalizedString(this.diskFile.getPath()), ie, getParent().getName()); } }
public void flush() { forceFlush(); }
private void addInternalDiskStore(DiskStoreImpl ds, UUIDFactory factory) throws StandardException { if (ds != null) { UUID id = factory.recreateUUID(ds.getName()); GfxdDiskStoreDescriptor dsd = new GfxdDiskStoreDescriptor(dd, id, ds, ds.getDiskDirs()[0].getAbsolutePath()); dd.addDescriptor(dsd, null, DataDictionary.SYSDISKSTORES_CATALOG_NUM, false, dd.getTransactionExecute()); } }
private static void cleanDiskStore(DiskStoreImpl diskStore, boolean deleteDiskStoreFiles) { GFXDDiskStoreImpl.closeDiskStoreFiles(diskStore); if (deleteDiskStoreFiles) { File [] diskDirs = diskStore.getDiskDirs(); for (File diskDir : diskDirs) { if (diskDir.exists()) { cleanFiles(diskDir, extensionFilter); } } } }
public void updateDiskRegion(AbstractDiskRegion dr) { PersistentOplogSet oplogSet = getPersistentOplogSet(dr); oplogSet.updateDiskRegion(dr); }
/** * */ public final void flushAndSync(boolean noCompactorLock) { forceFlush(); for (Oplog oplog : getPersistentOplogSet(null).getAllOplogs()) { oplog.flushAllAndSync(true); } }