json.put("Maximum Elements on Disk", config.getMaxBytesLocalDisk()); json.put("Memory Store Eviction Policy", config.getMemoryStoreEvictionPolicy().toString()); json.put("Clean or Flush", config.isClearOnFlush()); json.put("Eternal", config.isEternal()); json.put("Time To Idle Seconds", config.getTimeToIdleSeconds());
@Override public void flush() { // should be emptied if clearOnFlush is true if (cache.getCacheConfiguration().isClearOnFlush()) { backend.clear(); if (keyLookupCache != null) { keyLookupCache.clear(); } } }
@Override public synchronized void dispose() { if (status == Status.STATUS_SHUTDOWN) { return; } if (cacheConfiguration != null && cacheConfiguration.isClearOnFlush()) { cachingTier.clear(); } authoritativeTier.dispose(); status = Status.STATUS_SHUTDOWN; }
/** * Flush to disk only if the cache is diskPersistent. */ public void flush() { if (cache.getCacheConfiguration().isClearOnFlush()) { removeAll(); } }
@Override public void flush() throws IOException { if (authoritativeTier instanceof DiskStore && cacheConfiguration != null && cacheConfiguration.isClearOnFlush()) { final Lock lock = daLock.writeLock(); lock.lock(); try { cachingTier.clear(); ((DiskStore)authoritativeTier).clearFaultedBit(); } finally { lock.unlock(); } } else { authoritativeTier.flush(); } }
/** * Flush to disk only if the cache is diskPersistent. */ public final void flush() { if (cache.getCacheConfiguration().isDiskPersistent()) { if (LOG.isDebugEnabled()) { LOG.debug(cache.getName() + " is persistent. Spooling " + map.size() + " elements to the disk store."); } spoolAllToDisk(); } //should be emptied if clearOnFlush is true if (cache.getCacheConfiguration().isClearOnFlush()) { clear(); } }
/** * Spools all elements to disk, in preparation for shutdown. * <p> * This revised implementation is a little slower but avoids using increased memory during the method. */ protected final void spoolAllToDisk() { boolean clearOnFlush = cache.getCacheConfiguration().isClearOnFlush(); for (Object key : getKeys()) { Element element = (Element) map.get(key); if (element != null) { if (!element.isSerializable()) { if (LOG.isWarnEnabled()) { LOG.warn("Object with key " + element.getObjectKey() + " is not Serializable and is not being overflowed to disk."); } } else { spoolToDisk(element); //Don't notify listeners. They are not being removed from the cache, only a store //Leave it in the memory store for performance if do not want to clear on flush if (clearOnFlush) { remove(key); } } } } }
diskWriter.scheduleWithFixedDelay(new DiskExpiryTask(), expiryInterval, expiryInterval, TimeUnit.SECONDS); flushTask = new IndexWriteTask(indexFile, cache.getCacheConfiguration().isClearOnFlush());
public void flush() { // should be emptied if clearOnFlush is true if (cache.getCacheConfiguration().isClearOnFlush()) { clear(); } }
/** * Flush to disk only if the cache is diskPersistent. */ public void flush() { if (cache.getCacheConfiguration().isClearOnFlush()) { removeAll(); } }
@Override public synchronized void dispose() { if (status == Status.STATUS_SHUTDOWN) { return; } if (cacheConfiguration != null && cacheConfiguration.isClearOnFlush()) { cachingTier.clear(); } authoritativeTier.dispose(); status = Status.STATUS_SHUTDOWN; }
public void flush() { // should be emptied if clearOnFlush is true if (cache.getCacheConfiguration().isClearOnFlush()) { clear(); } }
/** * Flush to disk only if the cache is diskPersistent. */ public void flush() { if (cache.getCacheConfiguration().isClearOnFlush()) { removeAll(); } }
@Override public synchronized void dispose() { if (status == Status.STATUS_SHUTDOWN) { return; } if (cacheConfiguration != null && cacheConfiguration.isClearOnFlush()) { cachingTier.clear(); } authoritativeTier.dispose(); status = Status.STATUS_SHUTDOWN; }
/** * Flush to disk only if the cache is diskPersistent. */ public void flush() { if (cache.getCacheConfiguration().isClearOnFlush()) { removeAll(); } }
@Override public void flush() throws IOException { if (authoritativeTier instanceof DiskStore && cacheConfiguration != null && cacheConfiguration.isClearOnFlush()) { final Lock lock = daLock.writeLock(); lock.lock(); try { cachingTier.clear(); ((DiskStore)authoritativeTier).clearFaultedBit(); } finally { lock.unlock(); } } else { authoritativeTier.flush(); } }
@Override public void flush() throws IOException { if (authoritativeTier instanceof DiskStore && cacheConfiguration != null && cacheConfiguration.isClearOnFlush()) { final Lock lock = daLock.writeLock(); lock.lock(); try { cachingTier.clear(); ((DiskStore)authoritativeTier).clearFaultedBit(); } finally { lock.unlock(); } } else { authoritativeTier.flush(); } }
/** * Flush to disk only if the cache is diskPersistent. */ public final void flush() { if (cache.getCacheConfiguration().isDiskPersistent()) { if (LOG.isDebugEnabled()) { LOG.debug(cache.getName() + " is persistent. Spooling " + map.size() + " elements to the disk store."); } spoolAllToDisk(); } //should be emptied if clearOnFlush is true if (cache.getCacheConfiguration().isClearOnFlush()) { clear(); } }
/** * Flush to disk only if the cache is diskPersistent. */ public final void flush() { if (cache.getCacheConfiguration().isDiskPersistent()) { if (LOG.isDebugEnabled()) { LOG.debug(cache.getName() + " is persistent. Spooling " + map.size() + " elements to the disk store."); } spoolAllToDisk(); } //should be emptied if clearOnFlush is true if (cache.getCacheConfiguration().isClearOnFlush()) { clear(); } }
element.addAttribute(new SimpleNodeAttribute("maxEntriesLocalHeap", cacheConfiguration.getMaxEntriesLocalHeap()).optional(false)); element.addAttribute(new SimpleNodeAttribute("clearOnFlush", cacheConfiguration.isClearOnFlush()).optional(true).defaultValue( String.valueOf(CacheConfiguration.DEFAULT_CLEAR_ON_FLUSH))); element.addAttribute(new SimpleNodeAttribute("diskAccessStripes", cacheConfiguration.getDiskAccessStripes()).optional(true)