/** * Allows a disk compaction to be forced on this disk store. The compaction is * done even if automatic compaction is not configured. If the current active * oplog has had data written to it and it is compactable then an implicit * call to forceRoll will be made so that the active oplog can be compacted. * This method will block until the compaction completes. return true if one * or more oplogs were compacted; False indicates that no oplogs were ready to * be compacted or that a compaction was already in progress. */ public boolean forceCompaction() { return diskStore.forceCompaction(); }
/** * Allows a disk compaction to be forced on this disk store. The compaction is * done even if automatic compaction is not configured. If the current active * oplog has had data written to it and it is compactable then an implicit * call to forceRoll will be made so that the active oplog can be compacted. * This method will block until the compaction completes. return true if one * or more oplogs were compacted; False indicates that no oplogs were ready to * be compacted or that a compaction was already in progress. */ public boolean forceCompaction() { return diskStore.forceCompaction(); }
public boolean forceCompaction() { return getDiskStore().forceCompaction(this); }
public boolean forceCompaction() { return getDiskStore().forceCompaction(this); }
/** * An instruction to members with cache that they should compact their disk * stores. * * @return a list of compacted Disk stores */ public String[] compactAllDiskStores() { GemFireCacheImpl cacheImpl = (GemFireCacheImpl) cache; List<String> compactedStores = new ArrayList<String>(); if (cache != null && !cache.isClosed()) { for (DiskStoreImpl store : cacheImpl.listDiskStoresIncludingRegionOwned()) { if (store.forceCompaction()) { compactedStores.add(store.getPersistentID().getDirectory()); } } } String[] compactedStoresAr = new String[compactedStores.size()]; return compactedStores.toArray(compactedStoresAr); }
/** * An instruction to members with cache that they should compact their disk * stores. * * @return a list of compacted Disk stores */ public String[] compactAllDiskStores() { GemFireCacheImpl cacheImpl = (GemFireCacheImpl) cache; List<String> compactedStores = new ArrayList<String>(); if (cache != null && !cache.isClosed()) { for (DiskStoreImpl store : cacheImpl.listDiskStoresIncludingRegionOwned()) { if (store.forceCompaction()) { compactedStores.add(store.getPersistentID().getDirectory()); } } } String[] compactedStoresAr = new String[compactedStores.size()]; return compactedStores.toArray(compactedStoresAr); }
@Override protected AdminResponse createResponse(DistributionManager dm) { GemFireCacheImpl cache = GemFireCacheImpl.getInstance(); HashSet<PersistentID> compactedStores = new HashSet<PersistentID>(); if(cache != null && !cache.isClosed()) { for(DiskStoreImpl store : cache.listDiskStoresIncludingRegionOwned()) { if(store.forceCompaction()) { compactedStores.add(store.getPersistentID()); } } } return new CompactResponse(this.getSender(), compactedStores); }
@Override protected AdminResponse createResponse(DistributionManager dm) { GemFireCacheImpl cache = GemFireCacheImpl.getInstance(); HashSet<PersistentID> compactedStores = new HashSet<PersistentID>(); if(cache != null && !cache.isClosed()) { for(DiskStoreImpl store : cache.listDiskStoresIncludingRegionOwned()) { if(store.forceCompaction()) { compactedStores.add(store.getPersistentID()); } } } return new CompactResponse(this.getSender(), compactedStores); }
public static PersistentID compactDiskStore(String diskStoreName) { PersistentID persistentID = null; GemFireCacheImpl cache = GemFireCacheImpl.getInstance(); if(cache != null && !cache.isClosed()) { DiskStoreImpl diskStore = (DiskStoreImpl) cache.findDiskStore(diskStoreName); if(diskStore != null && diskStore.forceCompaction()) { persistentID = diskStore.getPersistentID(); } } return persistentID; }
public static PersistentID compactDiskStore(String diskStoreName) { PersistentID persistentID = null; GemFireCacheImpl cache = GemFireCacheImpl.getInstance(); if(cache != null && !cache.isClosed()) { DiskStoreImpl diskStore = (DiskStoreImpl) cache.findDiskStore(diskStoreName); if(diskStore != null && diskStore.forceCompaction()) { persistentID = diskStore.getPersistentID(); } } return persistentID; }
region.getDiskStore().forceCompaction(); region.getDiskStore().forceCompaction(); assertEquals(0, region.getDiskStore().numCompactableOplogs());
public void testForceCompactionDoesRoll() { DiskRegionProperties props = new DiskRegionProperties(); props.setRegionName("testForceCompactionDoesRoll"); props.setRolling(false); props.setDiskDirs(dirs); props.setAllowForceCompaction(true); props.setPersistBackup(true); region = DiskRegionHelperFactory.getSyncPersistOnlyRegion(cache, props, Scope.LOCAL); DiskRegion dr = ((LocalRegion)region).getDiskRegion(); logWriter.info("calling noop forceCompaction"); assertEquals(false, ((LocalRegion)region).getDiskStore().forceCompaction()); logWriter.info("putting key1"); region.put("key1", "value1"); logWriter.info("putting key2"); region.put("key2", "value2"); logWriter.info("calling noop forceCompaction"); assertEquals(false, ((LocalRegion)region).getDiskStore().forceCompaction()); logWriter.info("removing key1"); region.remove("key1"); logWriter.info("removing key2"); region.remove("key2"); // now that it is compactable the following forceCompaction should // go ahead and do a roll and compact it. boolean compacted = ((LocalRegion)region).getDiskStore().forceCompaction(); assertEquals(true, compacted); }
/** For those regions that are persistent, force compaction. * */ private synchronized void doCompactionStep() { int coord = compactionCoordinator.incrementAndGet(); if (coord == 1) { // this is the one thread in this vm to do compaction for (Region aRegion: allRegions) { DataPolicy dataPolicy = aRegion.getAttributes().getDataPolicy(); if (dataPolicy.withPersistence() && dataPolicy.withReplication()) { Log.getLogWriter().info("Causing compaction of disk files for region " + aRegion.getFullPath()); long startTime = System.currentTimeMillis(); boolean compactionResult = ((LocalRegion)aRegion).getDiskStore().forceCompaction(); long duration = System.currentTimeMillis() - startTime; Log.getLogWriter().info("Done with compaction of disk files for region " + aRegion.getFullPath() +", compaction took " + duration + " ms"); if (!compactionResult) { throw new TestException("forceCompaction returned " + compactionResult + " for " + aRegion.getFullPath()); } } } RecoveryBB.getBB().getSharedCounters().increment(RecoveryBB.doneCompactingCounter); } else { TestHelper.waitForCounter(RecoveryBB.getBB(), "RecoverBB.doneCompactingCounter", RecoveryBB.doneCompactingCounter, TestHelper.getNumVMs() - 1, true, -1, 10000); } }
((LocalRegion)diskRegion).getDiskStore().forceCompaction();
oplogsIDsNotifiedToRoll = ((LocalRegion)region).getDiskStore().forceCompaction(); assertEquals(false, oplogsIDsNotifiedToRoll); region.remove("7"); region.remove("9"); oplogsIDsNotifiedToRoll = ((LocalRegion)region).getDiskStore().forceCompaction(); assertEquals(true, oplogsIDsNotifiedToRoll); assertEquals(true, rollingDone);
/** * Confirm that forceCompaction waits for the compaction to finish */ public void testForceCompactionIsSync() { DiskRegionProperties props = new DiskRegionProperties(); props.setRegionName("testForceCompactionDoesRoll"); props.setRolling(false); props.setDiskDirs(dirs); props.setAllowForceCompaction(true); props.setPersistBackup(true); region = DiskRegionHelperFactory.getSyncPersistOnlyRegion(cache, props, Scope.LOCAL); DiskRegion dr = ((LocalRegion)region).getDiskRegion(); logWriter.info("putting key1"); region.put("key1", "value1"); logWriter.info("putting key2"); region.put("key2", "value2"); logWriter.info("removing key1"); region.remove("key1"); logWriter.info("removing key2"); region.remove("key2"); // now that it is compactable the following forceCompaction should // go ahead and do a roll and compact it. Oplog oplog = dr.testHook_getChild(); boolean compacted = ((LocalRegion)region).getDiskStore().forceCompaction(); assertEquals(true, oplog.testConfirmCompacted()); assertEquals(true, compacted); }
addEntries(1 /* oplogNumber*/, 50 /* byte array size*/); ((LocalRegion)region).getDiskStore().forceCompaction(); waitForCompactor(3000/*wait for forceRolling to finish */); logWriter.info("testMultipleRolling after waitForCompactor");
region.getDiskStore().forceCompaction(); region.getDiskStore().forceCompaction(); assertTrue(tombstoneService.forceBatchExpirationForTests(entryCount/2)); region.getDiskStore().forceCompaction(); assertEquals(0, region.getDiskStore().numCompactableOplogs());
assertTrue(region.destroy(i) != null); assertTrue(ds.forceCompaction());
String command = "compact disk-store --name=" + diskStoreName; String result = CliHelper.execCommandOnRemoteCli(command, true)[1]; boolean compactionResult = ((LocalRegion)aRegion).getDiskStore().forceCompaction(); if (result.contains("Compaction was attempted but nothing to compact")) { if (compactionResult) {