public FulgoraVertexMemory(int numVertices, final IDManager idManager, final VertexProgram<M> vertexProgram) { Preconditions.checkArgument(numVertices>=0 && vertexProgram!=null && idManager!=null); vertexStates = new NonBlockingHashMapLong<>(numVertices); partitionVertices = new NonBlockingHashMapLong<>(64); this.idManager = idManager; this.combiner = FulgoraUtil.getMessageCombiner(vertexProgram); this.elementKeyMap = getIdMap(vertexProgram.getElementComputeKeys()); this.previousScopes = ImmutableMap.of(); }
public GuavaVertexCache(final long maxCacheSize, final int concurrencyLevel, final int initialDirtySize) { volatileVertices = new NonBlockingHashMapLong<InternalVertex>(initialDirtySize); log.debug("Created dirty vertex map with initial size {}", initialDirtySize); cache = CacheBuilder.newBuilder().maximumSize(maxCacheSize).concurrencyLevel(concurrencyLevel) .removalListener(new RemovalListener<Long, InternalVertex>() { @Override public void onRemoval(RemovalNotification<Long, InternalVertex> notification) { if (notification.getCause() == RemovalCause.EXPLICIT) { //Due to invalidation at the end assert volatileVertices.isEmpty(); return; } //Should only get evicted based on size constraint or replaced through add assert (notification.getCause() == RemovalCause.SIZE || notification.getCause() == RemovalCause.REPLACED) : "Cause: " + notification.getCause(); InternalVertex v = notification.getValue(); if (v.isModified()) { volatileVertices.putIfAbsent(notification.getKey(), v); } } }) .build(); log.debug("Created vertex cache with max size {}", maxCacheSize); }
public FulgoraVertexMemory(int numVertices, final IDManager idManager, final VertexProgram<M> vertexProgram) { Preconditions.checkArgument(numVertices>=0 && vertexProgram!=null && idManager!=null); vertexStates = new NonBlockingHashMapLong<>(numVertices); partitionVertices = new NonBlockingHashMapLong<>(64); this.idManager = idManager; this.combiner = FulgoraUtil.getMessageCombiner(vertexProgram); this.computeKeys = vertexProgram.getVertexComputeKeys(); this.elementKeyMap = getIdMap(vertexProgram.getVertexComputeKeys().stream().map(VertexComputeKey::getKey).collect(Collectors.toCollection(HashSet::new))); this.previousScopes = ImmutableMap.of(); }
public StandardSchemaCache(final int size, final StoreRetrieval retriever) { Preconditions.checkArgument(size>0,"Size must be positive"); Preconditions.checkNotNull(retriever); maxCachedTypes = size; maxCachedRelations = maxCachedTypes *CACHE_RELATION_MULTIPLIER; this.retriever=retriever; typeNamesBackup = CacheBuilder.newBuilder() .concurrencyLevel(CONCURRENCY_LEVEL).initialCapacity(INITIAL_CACHE_SIZE) .maximumSize(maxCachedTypes).build(); typeNames = new ConcurrentHashMap<String, Long>(INITIAL_CAPACITY,0.75f,CONCURRENCY_LEVEL); schemaRelationsBackup = CacheBuilder.newBuilder() .concurrencyLevel(CONCURRENCY_LEVEL).initialCapacity(INITIAL_CACHE_SIZE *CACHE_RELATION_MULTIPLIER) .maximumSize(maxCachedRelations).build(); // typeRelations = new ConcurrentHashMap<Long, EntryList>(INITIAL_CAPACITY*CACHE_RELATION_MULTIPLIER,0.75f,CONCURRENCY_LEVEL); schemaRelations = new NonBlockingHashMapLong<EntryList>(INITIAL_CAPACITY*CACHE_RELATION_MULTIPLIER); //TODO: Is this data structure safe or should we go with ConcurrentHashMap (line above)? }
public ConcurrentLRUCache(int upperWaterMark, final int lowerWaterMark, int acceptableWatermark, int initialSize, boolean runCleanupThread, boolean runNewThreadForCleanup, EvictionListener<V> evictionListener) { if (upperWaterMark < 1) throw new IllegalArgumentException("upperWaterMark must be > 0"); if (lowerWaterMark >= upperWaterMark) throw new IllegalArgumentException("lowerWaterMark must be < upperWaterMark"); map = new NonBlockingHashMapLong<CacheEntry<Long, V>>(initialSize); newThreadForCleanup = runNewThreadForCleanup; this.upperWaterMark = upperWaterMark; this.lowerWaterMark = lowerWaterMark; this.acceptableWaterMark = acceptableWatermark; this.evictionListener = evictionListener; if (runCleanupThread) { cleanupThread = new CleanupThread(this); cleanupThread.start(); } }
public StandardSchemaCache(final int size, final StoreRetrieval retriever) { Preconditions.checkArgument(size>0,"Size must be positive"); Preconditions.checkNotNull(retriever); maxCachedTypes = size; maxCachedRelations = maxCachedTypes *CACHE_RELATION_MULTIPLIER; this.retriever=retriever; typeNamesBackup = CacheBuilder.newBuilder() .concurrencyLevel(CONCURRENCY_LEVEL).initialCapacity(INITIAL_CACHE_SIZE) .maximumSize(maxCachedTypes).build(); typeNames = new ConcurrentHashMap<>(INITIAL_CAPACITY, 0.75f, CONCURRENCY_LEVEL); schemaRelationsBackup = CacheBuilder.newBuilder() .concurrencyLevel(CONCURRENCY_LEVEL).initialCapacity(INITIAL_CACHE_SIZE *CACHE_RELATION_MULTIPLIER) .maximumSize(maxCachedRelations).build(); // typeRelations = new ConcurrentHashMap<Long, EntryList>(INITIAL_CAPACITY*CACHE_RELATION_MULTIPLIER,0.75f,CONCURRENCY_LEVEL); schemaRelations = new NonBlockingHashMapLong<>(INITIAL_CAPACITY * CACHE_RELATION_MULTIPLIER); //TODO: Is this data structure safe or should we go with ConcurrentHashMap (line above)? }
public ConcurrentLRUCache(int upperWaterMark, final int lowerWaterMark, int acceptableWatermark, int initialSize, boolean runCleanupThread, boolean runNewThreadForCleanup, EvictionListener<V> evictionListener) { if (upperWaterMark < 1) throw new IllegalArgumentException("upperWaterMark must be > 0"); if (lowerWaterMark >= upperWaterMark) throw new IllegalArgumentException("lowerWaterMark must be < upperWaterMark"); map = new NonBlockingHashMapLong<>(initialSize); newThreadForCleanup = runNewThreadForCleanup; this.upperWaterMark = upperWaterMark; this.lowerWaterMark = lowerWaterMark; this.acceptableWaterMark = acceptableWatermark; this.evictionListener = evictionListener; if (runCleanupThread) { cleanupThread = new CleanupThread(this); cleanupThread.start(); } }
public LRUVertexCache(int capacity) { volatileVertices = new NonBlockingHashMapLong<InternalVertex>(); cache = new ConcurrentLRUCache<InternalVertex>(capacity * 2, // upper is double capacity capacity + capacity / 3, // lower is capacity + 1/3 capacity, // acceptable watermark is capacity 100, true, false, // 100 items initial size + use only one thread for items cleanup new ConcurrentLRUCache.EvictionListener<InternalVertex>() { @Override public void evictedEntry(Long vertexId, InternalVertex vertex) { if (vertexId == null || vertex == null) return; if (vertex.isModified()) { volatileVertices.putIfAbsent(vertexId, vertex); } } }); cache.setAlive(true); //need counters to its actually LRU }
NonBlockingHashMapLong<String> id1 = new NonBlockingHashMapLong<String>(128); ConcurrentHashMap<Long,String> id2 = new ConcurrentHashMap<Long, String>(128,0.75f,2);
public GuavaVertexCache(final long maxCacheSize, final int concurrencyLevel, final int initialDirtySize) { volatileVertices = new NonBlockingHashMapLong<>(initialDirtySize); log.debug("Created dirty vertex map with initial size {}", initialDirtySize); cache = CacheBuilder.newBuilder().maximumSize(maxCacheSize).concurrencyLevel(concurrencyLevel) .removalListener((RemovalListener<Long, InternalVertex>) notification -> { if (notification.getCause() == RemovalCause.EXPLICIT) { //Due to invalidation at the end assert volatileVertices.isEmpty(); return; } //Should only get evicted based on size constraint or replaced through add assert (notification.getCause() == RemovalCause.SIZE || notification.getCause() == RemovalCause.REPLACED) : "Cause: " + notification.getCause(); final InternalVertex v = notification.getValue(); if (((AbstractVertex) v).isTxOpen() && v.isModified()) { volatileVertices.putIfAbsent(notification.getKey(), v); } }) .build(); log.debug("Created vertex cache with max size {}", maxCacheSize); }
public LRUVertexCache(int capacity) { volatileVertices = new NonBlockingHashMapLong<>(); cache = new ConcurrentLRUCache<>(capacity * 2, // upper is double capacity capacity + capacity / 3, // lower is capacity + 1/3 capacity, // acceptable watermark is capacity 100, true, false, // 100 items initial size + use only one thread for items cleanup (vertexId, vertex) -> { if (vertexId == null || vertex == null) { return; } if (vertex.isModified()) { volatileVertices.putIfAbsent(vertexId, vertex); } }); cache.setAlive(true); //need counters to its actually LRU }
/** Make a new empty {@link NonBlockingHashSetLong}. */ public NonBlockingHashSetLong() { super(); _map = new NonBlockingHashMapLong<Object>(); }
public DefaultAllocatedMemoryStorage() { this.allocatedMemories = new NonBlockingHashMapLong<Long>(1024, false); }
/** * Default constructor. */ public Leaf() { map = new NonBlockingHashMapLong<>(); referenceQueue = new ReferenceQueue<>(); }
public FulgoraResult(int numVertices, final IDManager idManager) { Preconditions.checkArgument(numVertices>=0); vertexStates = new NonBlockingHashMapLong<S>(numVertices); this.idManager = idManager; }
public FulgoraVertexMemory(int numVertices, final IDManager idManager, final VertexProgram<M> vertexProgram) { Preconditions.checkArgument(numVertices>=0 && vertexProgram!=null && idManager!=null); vertexStates = new NonBlockingHashMapLong<>(numVertices); partitionVertices = new NonBlockingHashMapLong<>(64); this.idManager = idManager; this.combiner = FulgoraUtil.getMessageCombiner(vertexProgram); this.elementKeyMap = getIdMap(vertexProgram.getElementComputeKeys()); this.previousScopes = ImmutableMap.of(); }
private ConcurrentMap<Long, ConcurrentMap<BreadcrumbKey, HitsPerInterval>> initBucketId2hitCountsMap(final Duration preAllocatedInterval) { final long bucketCount = getBucketCount(preAllocatedInterval); final NonBlockingHashMapLong<ConcurrentMap<BreadcrumbKey, HitsPerInterval>> bucketIds2hitCounts = new NonBlockingHashMapLong<>((int) bucketCount); for (long bucketId = 0; bucketId < bucketCount; bucketId++) { bucketIds2hitCounts.put(bucketId, new ConcurrentHashMap<>()); } return bucketIds2hitCounts; }
public StandardSchemaCache(final int size, final StoreRetrieval retriever) { Preconditions.checkArgument(size>0,"Size must be positive"); Preconditions.checkNotNull(retriever); maxCachedTypes = size; maxCachedRelations = maxCachedTypes *CACHE_RELATION_MULTIPLIER; this.retriever=retriever; typeNamesBackup = CacheBuilder.newBuilder() .concurrencyLevel(CONCURRENCY_LEVEL).initialCapacity(INITIAL_CACHE_SIZE) .maximumSize(maxCachedTypes).build(); typeNames = new ConcurrentHashMap<String, Long>(INITIAL_CAPACITY,0.75f,CONCURRENCY_LEVEL); schemaRelationsBackup = CacheBuilder.newBuilder() .concurrencyLevel(CONCURRENCY_LEVEL).initialCapacity(INITIAL_CACHE_SIZE *CACHE_RELATION_MULTIPLIER) .maximumSize(maxCachedRelations).build(); // typeRelations = new ConcurrentHashMap<Long, EntryList>(INITIAL_CAPACITY*CACHE_RELATION_MULTIPLIER,0.75f,CONCURRENCY_LEVEL); schemaRelations = new NonBlockingHashMapLong<EntryList>(INITIAL_CAPACITY*CACHE_RELATION_MULTIPLIER); //TODO: Is this data structure safe or should we go with ConcurrentHashMap (line above)? }
public StandardSchemaCache(final int size, final StoreRetrieval retriever) { Preconditions.checkArgument(size>0,"Size must be positive"); Preconditions.checkNotNull(retriever); maxCachedTypes = size; maxCachedRelations = maxCachedTypes *CACHE_RELATION_MULTIPLIER; this.retriever=retriever; typeNamesBackup = CacheBuilder.newBuilder() .concurrencyLevel(CONCURRENCY_LEVEL).initialCapacity(INITIAL_CACHE_SIZE) .maximumSize(maxCachedTypes).build(); typeNames = new ConcurrentHashMap<String, Long>(INITIAL_CAPACITY,0.75f,CONCURRENCY_LEVEL); schemaRelationsBackup = CacheBuilder.newBuilder() .concurrencyLevel(CONCURRENCY_LEVEL).initialCapacity(INITIAL_CACHE_SIZE *CACHE_RELATION_MULTIPLIER) .maximumSize(maxCachedRelations).build(); // typeRelations = new ConcurrentHashMap<Long, EntryList>(INITIAL_CAPACITY*CACHE_RELATION_MULTIPLIER,0.75f,CONCURRENCY_LEVEL); schemaRelations = new NonBlockingHashMapLong<EntryList>(INITIAL_CAPACITY*CACHE_RELATION_MULTIPLIER); //TODO: Is this data structure safe or should we go with ConcurrentHashMap (line above)? }
private ConcurrentMap<Long, HitsPerInterval> initBucketId2hitCountsMap(final Duration preAllocatedInterval) { final long bucketCount = getBucketCount(preAllocatedInterval); final NonBlockingHashMapLong<HitsPerInterval> bucketIds2hitCounts = new NonBlockingHashMapLong<>((int) bucketCount); for (long bucketId = 0; bucketId < bucketCount; bucketId++) { bucketIds2hitCounts.put(bucketId, HitsPerInterval.EMPTY); } return bucketIds2hitCounts; }