.mmapFileEnable() .commitFileSyncDisable() .cacheSize(config.getNumBufferedEntries()) .make(); try {
m = m.cacheLRUEnable().cacheSize(storageConfiguration.getCacheSize()) ;
private void init() { DBMaker maker; if (file == null) maker = DBMaker.newMemoryDB(); else { if (overwrite) wipe(file); maker = DBMaker.newFileDB(new File(file)); maker = maker.cacheSize(cache_size); if (async) { maker = maker.asyncWriteEnable(); maker = maker.asyncWriteFlushDelay(10000); } if (mmap) maker = maker.mmapFileEnableIfSupported(); if (compression) maker = maker.compressionEnable(); if (snapshot) maker = maker.snapshotEnable(); if (notxn) maker = maker.transactionDisable(); } db = maker.make(); if (!db.exists("idmap")) idmap = db.createHashMap("idmap") .valueSerializer(new RecordSerializer()) .make(); else idmap = db.getHashMap("idmap"); }
/** * Initialise a new cache with an identifier name and the maximum amount of objects allowed. * * @param name - Cache Identifier * @param maxItems - Number of object allowed */ protected Cache(String name, int maxItems){ this.name = name; File tempFolder = new File("/tmp/caches/"); if (!tempFolder.exists()) tempFolder.mkdirs(); File tempFile = new File("/tmp/caches/luzzu_"+name); if (!tempFile.exists()){ try { tempFile.createNewFile(); } catch (IOException e) { // TODO Auto-generated catch block e.printStackTrace(); } } this.db = DBMaker.newFileDB(tempFile) .cacheLRUEnable() .cacheSize(Integer.parseInt(PropertyManager.getInstance().getProperties("cache.properties").getProperty("CACHE_SIZE_IN_GB"))) .cacheSize(maxItems).make(); this.cache = db.getHashMap("cache_"+name); }
private MapDb() { try { File file = File.createTempFile("geo-analyzer", ".db"); db = DBMaker.newFileDB(file).asyncWriteEnable().cacheSize(100000) .closeOnJvmShutdown().transactionDisable().make(); } catch (IOException e) { throw new RuntimeException(e); } }
public MapDbCachingIndexProvider(LuceneBatchInserterIndexProvider luceneIndex) { this.luceneIndex = luceneIndex; db = DBMaker.newTempFileDB(). asyncFlushDelay(1000). cacheSize(1024 * 1024). closeOnJvmShutdown(). deleteFilesAfterClose(). syncOnCommitDisable(). writeAheadLogDisable(). make(); }
/** * Create a new DataStore. * @param directory Where should it be created? * @param dataFile What should it be called? */ public SpatialDataStore(File directory, String dataFile, Serializer serializer, Integer cacheSize) { this.dataFile = dataFile; if(!directory.exists()) directory.mkdirs(); spatialId = new IdStore(directory, dataFile); DBMaker dbm = DBMaker.newFileDB(new File(directory, dataFile + ".db")) .mmapFileEnableIfSupported() .cacheLRUEnable() .cacheSize(cacheSize) .closeOnJvmShutdown(); db = dbm.make(); BTreeMapMaker maker = db.createTreeMap(dataFile) .valueSerializer(serializer) .keySerializer(BTreeKeySerializer.ZERO_OR_POSITIVE_LONG); map = maker.makeOrGet(); tileIndex = db.createTreeSet(dataFile + "_tileIndex") .serializer(BTreeKeySerializer.TUPLE3).makeOrGet(); }
/** * Create a new DataStore. * @param directory Where should it be created? */ public JumperDataStore(File directory) { if(!directory.exists()) directory.mkdirs(); DBMaker dbm = DBMaker.newFileDB(new File(directory, "jumpers.db")) .mmapFileEnableIfSupported() .cacheLRUEnable() .cacheSize(100000) .asyncWriteEnable() .asyncWriteFlushDelay(1000) .closeOnJvmShutdown(); db = dbm.make(); jumperMap = db.createTreeMap("jumperMap") .valueSerializer(new JumperSerializer()) .makeOrGet(); jumperStartIndex = db.createTreeSet("startIndex") .serializer(BTreeKeySerializer.TUPLE2) .makeOrGet(); jumperEndIndex = db.createTreeSet("endIndex") .serializer(BTreeKeySerializer.TUPLE2) .makeOrGet(); }
dbMaker.cacheSize(cacheSize); logger().debug("MapDB cache size set to {0} for index provider {1}", cacheSize, getName());
dbMaker.cacheSize(cacheSize); logger().debug("MapDB cache size set to {0} for index provider {1}", cacheSize, getName());
wipe(file); maker = DBMaker.newFileDB(new File(file)); maker = maker.cacheSize(cache_size); if (async) { maker = maker.asyncWriteEnable();