/** * Enable file caching. call this before calling acquireFile(). * When application terminates, call NetcdfDataset.shutdown(). * * @param minElementsInMemory keep this number in the cache * @param maxElementsInMemory trigger a cleanup if it goes over this number. * @param hardLimit if > 0, never allow more than this many elements. This causes a cleanup to be done in * the calling thread. * @param period (secs) do periodic cleanups every this number of seconds. */ static public synchronized void initNetcdfFileCache( int minElementsInMemory, int maxElementsInMemory, int hardLimit, int period) { netcdfFileCache = new ucar.nc2.util.cache.FileCache( "NetcdfFileCache ", minElementsInMemory, maxElementsInMemory, hardLimit, period); }
/** * Acquire a FileCacheable, and lock it so no one else can use it. * call FileCacheable.close() when done. * * @param factory use this factory to open the file; may not be null * @param location file location, also used as the cache name, will be passed to the NetcdfFileFactory * @param cancelTask user can cancel, ok to be null. * @return NetcdfFile corresponding to location. * @throws IOException on error */ public FileCacheable acquire(FileFactory factory, String location, ucar.nc2.util.CancelTask cancelTask) throws IOException { return acquire(factory, location, location, -1, cancelTask, null); }
public void run() { cleanup(softLimit); } }
/** * Call when application exits, if you have previously called initNetcdfFileCache. * This shuts down any background threads in order to get a clean process shutdown. */ static public void shutdown() { FileCache.shutdown(); }
FileCacheable ncfile = acquireCacheOnly(hashKey); if (ncfile != null) { hits.incrementAndGet(); if (debugCleanup) System.out.println("CleanupTask due to hard limit time=" + new Date().getTime()); // +" Thread="+Thread.currentThread().hashCode() cleanup(hardLimit); getExec().schedule(new CleanupTask(), 100, TimeUnit.MILLISECONDS); // immediate cleanup in 100 msec if (debugCleanup) System.out.println("CleanupTask scheduled due to soft limit time=" + new Date());
FileCacheable ncfile = acquireCacheOnly(hashKey); if (ncfile != null) { hits.incrementAndGet(); if (debugCleanup) System.out.println("CleanupTask due to hard limit time=" + new Date().getTime()); // +" Thread="+Thread.currentThread().hashCode() cleanup(hardLimit); schedule(new CleanupTask(), 100); // immediate cleanup in 100 msec
@Test public void testConcurrentAccess() throws InterruptedException { loadFilesIntoCache(new File(TestDir.cdmLocalTestDataDir), cache); Map<Object, FileCache.CacheElement> map = cache.getCache(); List<String> files = new ArrayList<>(); for (Object key : map.keySet()) { cache.showStats(format); cache.showStats(format); exec.awaitTermination(10, TimeUnit.SECONDS); format.format("done qsize= %4d%n", q.size()); cache.showStats(format); map = cache.getCache(); for (Object key : map.keySet()) { assert !checkUnique.contains(key); cache.clearCache(false); format.format("after cleanup qsize= %4d%n", q.size()); cache.showStats(format); cache.clearCache(true);
FileCacheable ncfile = acquireCacheOnly(hashKey); if (ncfile != null) { hits.incrementAndGet(); if (debugCleanup) System.out.println("CleanupTask due to hard limit time=" + new Date().getTime()); // +" Thread="+Thread.currentThread().hashCode() cleanup(hardLimit);
private void testPeriodicCleanup(FileCache cache) throws IOException { loadFilesIntoCache(new File(TestDir.cdmLocalTestDataDir), cache); System.out.println(" loaded " + count); // close all Map<Object, FileCache.CacheElement> map = cache.getCache(); List<FileCacheable> files = new ArrayList<>(); for (Object key : map.keySet()) { FileCache.CacheElement elem = map.get(key); assert elem.list.size() == 1; for (FileCache.CacheElement.CacheFile file : elem.list) { files.add(file.ncfile); } } System.out.println(" close " + files.size()); for (FileCacheable ncfile : files) { ncfile.close(); } cache.showCache(new Formatter(System.out)); cache.cleanup(10); }
cache.showCache(new Formatter(System.out)); Map<Object, FileCache.CacheElement> map = cache.getCache(); Assert.assertEquals(count, map.values().size()); map = cache.getCache(); cache.showCache(new Formatter(System.out)); cache.clearCache(true); map = cache.getCache(); Assert.assertEquals(0, map.values().size()); map = cache.getCache(); Assert.assertEquals(saveCount, map.values().size()); cache.clearCache(false); map = cache.getCache(); Assert.assertEquals(0, map.values().size()); map = cache.getCache(); Assert.assertEquals(saveCount, map.values().size()); map = cache.getCache(); Assert.assertEquals(saveCount, map.values().size()); cache.clearCache(false); map = cache.getCache(); Assert.assertEquals(saveCount, map.values().size());
@Test public void testPeriodicClear() throws IOException { FileCache cache = new FileCache(0, 10, 60 * 60); testPeriodicCleanup(cache); Map<Object, FileCache.CacheElement> map = cache.getCache(); assert map.values().size() == 0 : map.values().size(); cache = new FileCache(5, 10, 60 * 60); testPeriodicCleanup(cache); map = cache.getCache(); assert map.values().size() == 5 : map.values().size(); }
/** * Disable the cache, and force release all files. * You must still call shutdown() before exiting the application. */ public void disable() { this.disabled.set(true); clearCache(true); }
static public void disableNetcdfFileCache() { if (null != fileCache) fileCache.disable(); fileCache = null; }
/** * Show individual cache entries, add to formatter. * * @param format add to this */ @Override public void showCache(Formatter format) { ArrayList<CacheElement.CacheFile> allFiles = new ArrayList<>(files.size()); for (CacheElement elem : cache.values()) { synchronized (elem) { allFiles.addAll(elem.list); } } Collections.sort(allFiles); // sort so oldest are on top format.format("%nFileCache %s (min=%d softLimit=%d hardLimit=%d scour=%d):%n", name, minElements, softLimit, hardLimit, period); format.format(" isLocked accesses lastAccess location %n"); for (CacheElement.CacheFile file : allFiles) { String loc = file.ncfile != null ? file.ncfile.getLocation() : "null"; format.format("%8s %9d %s == %s %n", file.isLocked, file.countAccessed, CalendarDateFormatter.toDateTimeStringISO(file.lastAccessed), loc); } showStats(format); }
/** * Call when application exits, if you have previously called initNetcdfFileCache. * This shuts down any background threads in order to get a clean process shutdown. */ static public void shutdown() { FileCache.shutdown(); }
/** * Disable the cache, and force release all files. * You must still call shutdown() before exiting the application. */ @Override public void disable() { this.disabled.set(true); clearCache(true); }
static public void disableNetcdfFileCache() { if (null != fileCache) fileCache.disable(); fileCache = null; }
/** * Show individual cache entries, add to formatter. * * @param format add to this */ @Override public void showCache(Formatter format) { ArrayList<CacheElement.CacheFile> allFiles = new ArrayList<>(files.size()); for (CacheElement elem : cache.values()) { synchronized (elem) { allFiles.addAll(elem.list); } } Collections.sort(allFiles); // sort so oldest are on top format.format("%nFileCache %s (min=%d softLimit=%d hardLimit=%d scour=%d secs):%n", name, minElements, softLimit, hardLimit, period/1000); format.format(" isLocked accesses lastAccess location %n"); for (CacheElement.CacheFile file : allFiles) { String loc = file.ncfile != null ? file.ncfile.getLocation() : "null"; format.format("%8s %9d %s == %s %n", file.isLocked, file.countAccessed, CalendarDateFormatter.toDateTimeStringISO(file.lastAccessed), loc); } showStats(format); }
@Override public FileCacheable acquire(FileFactory factory, DatasetUrl durl) throws IOException { return acquire(factory, durl.trueurl, durl, -1, null, null); }
/** * Call when application exits, if you have previously called initNetcdfFileCache. * This shuts down any background threads in order to get a clean process shutdown. */ static public synchronized void shutdown() { disableNetcdfFileCache(); FileCache.shutdown(); }