m = m.cacheLRUEnable().cacheSize(storageConfiguration.getCacheSize()) ;
@Override public DBMaker<?> configure(DBMaker<?> maker) { return maker.cacheLRUEnable().readOnly(); } };
@Override public DBMaker<?> configure(DBMaker<?> maker) { return maker.cacheLRUEnable().readOnly(); } };
@Override public DBMaker<?> configure(DBMaker<?> maker) { return maker.cacheLRUEnable().readOnly(); } };
public static synchronized DBMaker<?> SafeDefaultDb(String loc,String name){ File dbFile = new File(loc); if (!dbFile.exists()) dbFile.mkdirs(); dbFile = new File(dbFile, "mapdb"); logger.info("Opening DB "+name+" at " + dbFile); // Storing gigantic maps in this temporary DB return DBMaker.newFileDB(dbFile).freeSpaceReclaimQ(3) .mmapFileEnablePartial() .cacheLRUEnable() .closeOnJvmShutdown(); } public static <K extends Comparable<K>, V> BTreeMap<K, V> batchCreate(final Map<K, V> map,
public static synchronized DBMaker<?> SafeDefaultDb(String loc,String name){ File dbFile = new File(loc); if (!dbFile.exists()) dbFile.mkdirs(); dbFile = new File(dbFile, "mapdb"); logger.info("Opening DB "+name+" at " + dbFile); // Storing gigantic maps in this temporary DB return DBMaker.newFileDB(dbFile).freeSpaceReclaimQ(3) .mmapFileEnablePartial() .cacheLRUEnable() .closeOnJvmShutdown(); } public static <K extends Comparable<K>, V> BTreeMap<K, V> batchCreate(final Map<K, V> map,
public static synchronized DBMaker<?> SafeDefaultDb(String loc,String name){ File dbFile = new File(loc); if (!dbFile.exists()) dbFile.mkdirs(); dbFile = new File(dbFile, "mapdb"); logger.info("Opening DB "+name+" at " + dbFile); // Storing gigantic maps in this temporary DB return DBMaker.newFileDB(dbFile).freeSpaceReclaimQ(3) .mmapFileEnablePartial() .cacheLRUEnable() .closeOnJvmShutdown(); } public static <K extends Comparable<K>, V> BTreeMap<K, V> batchCreate(final Map<K, V> map,
/** * Moderate space saving; * mmap file; * LRU instance cache; * close on shutdown; * write-through; * @param loc * @param name * @return */ public static synchronized DBMaker<?> newDefaultDb(String loc,String name){ File dbFile = new File(loc); if (!dbFile.exists()) dbFile.mkdirs(); dbFile = new File(dbFile, "mapdb"); logger.info("Opening DB "+name+" at " + dbFile); // Storing gigantic maps in this temporary DB return DBMaker.newFileDB(dbFile).freeSpaceReclaimQ(3) .mmapFileEnablePartial() .transactionDisable() .cacheLRUEnable() .closeOnJvmShutdown(); }
/** * Moderate space saving; * mmap file; * LRU instance cache; * close on shutdown; * write-through; * @param loc * @param name * @return */ public static synchronized DBMaker<?> newDefaultDb(String loc,String name){ File dbFile = new File(loc); if (!dbFile.exists()) dbFile.mkdirs(); dbFile = new File(dbFile, "mapdb"); logger.info("Opening DB "+name+" at " + dbFile); // Storing gigantic maps in this temporary DB return DBMaker.newFileDB(dbFile).freeSpaceReclaimQ(3) .mmapFileEnablePartial() .transactionDisable() .cacheLRUEnable() .closeOnJvmShutdown(); }
/** * Moderate space saving; * mmap file; * LRU instance cache; * close on shutdown; * write-through; * @param loc * @param name * @return */ public static synchronized DBMaker<?> newDefaultDb(String loc,String name){ File dbFile = new File(loc); if (!dbFile.exists()) dbFile.mkdirs(); dbFile = new File(dbFile, "mapdb"); logger.info("Opening DB "+name+" at " + dbFile); // Storing gigantic maps in this temporary DB return DBMaker.newFileDB(dbFile).freeSpaceReclaimQ(3) .mmapFileEnablePartial() .transactionDisable() .cacheLRUEnable() .closeOnJvmShutdown(); }
/** * Initialise a new cache with an identifier name and the maximum amount of objects allowed. * * @param name - Cache Identifier * @param maxItems - Number of object allowed */ protected Cache(String name, int maxItems){ this.name = name; File tempFolder = new File("/tmp/caches/"); if (!tempFolder.exists()) tempFolder.mkdirs(); File tempFile = new File("/tmp/caches/luzzu_"+name); if (!tempFile.exists()){ try { tempFile.createNewFile(); } catch (IOException e) { // TODO Auto-generated catch block e.printStackTrace(); } } this.db = DBMaker.newFileDB(tempFile) .cacheLRUEnable() .cacheSize(Integer.parseInt(PropertyManager.getInstance().getProperties("cache.properties").getProperty("CACHE_SIZE_IN_GB"))) .cacheSize(maxItems).make(); this.cache = db.getHashMap("cache_"+name); }
/** * Create a new DataStore. * @param directory Where should it be created? * @param dataFile What should it be called? */ public SpatialDataStore(File directory, String dataFile, Serializer serializer, Integer cacheSize) { this.dataFile = dataFile; if(!directory.exists()) directory.mkdirs(); spatialId = new IdStore(directory, dataFile); DBMaker dbm = DBMaker.newFileDB(new File(directory, dataFile + ".db")) .mmapFileEnableIfSupported() .cacheLRUEnable() .cacheSize(cacheSize) .closeOnJvmShutdown(); db = dbm.make(); BTreeMapMaker maker = db.createTreeMap(dataFile) .valueSerializer(serializer) .keySerializer(BTreeKeySerializer.ZERO_OR_POSITIVE_LONG); map = maker.makeOrGet(); tileIndex = db.createTreeSet(dataFile + "_tileIndex") .serializer(BTreeKeySerializer.TUPLE3).makeOrGet(); }
/** * Create a new DataStore. * @param directory Where should it be created? */ public JumperDataStore(File directory) { if(!directory.exists()) directory.mkdirs(); DBMaker dbm = DBMaker.newFileDB(new File(directory, "jumpers.db")) .mmapFileEnableIfSupported() .cacheLRUEnable() .cacheSize(100000) .asyncWriteEnable() .asyncWriteFlushDelay(1000) .closeOnJvmShutdown(); db = dbm.make(); jumperMap = db.createTreeMap("jumperMap") .valueSerializer(new JumperSerializer()) .makeOrGet(); jumperStartIndex = db.createTreeSet("startIndex") .serializer(BTreeKeySerializer.TUPLE2) .makeOrGet(); jumperEndIndex = db.createTreeSet("endIndex") .serializer(BTreeKeySerializer.TUPLE2) .makeOrGet(); }
dbMaker.cacheLRUEnable(); logger().debug("MapDB cacheLRU enabled for index provider {0}", getName());
dbMaker.cacheLRUEnable(); logger().debug("MapDB cacheLRU enabled for index provider {0}", getName());