private void evict() { Node victim = null; for (Node head : headQ) { if (head.next != head) { victim = head.next; break; } } if (victim == null) { return; } victim.remove(); data.remove(victim.key); out.put(victim.key, victim); if (out.size() > maxOut) { out.remove(out.firstLongKey()); } }
@Override public void close() throws IOException { if (currentDocList != null && currentDocList.size() > 0) terms.put(term, currentDocList); intFields.put(field, terms); memoryUsageEstimate += usage(field, terms); } };
public VALUE put(long timeStamp, KEY key, VALUE value) { Preconditions.checkNotNull(key); Preconditions.checkNotNull(value); changed.add(key); Long oldTimeStamp = keyToTimeStamp.put(key, timeStamp); if (oldTimeStamp == null || oldTimeStamp != timeStamp) { Set<KEY> keys = timeStampToKey.get(timeStamp); if (keys == null) { keys = Sets.newHashSet(); timeStampToKey.put(timeStamp, keys); } keys.add(key); } if (oldTimeStamp != null) { timeStampToKey.get(oldTimeStamp).remove(key); } return keyToValue.put(key, value); }
@Override public void nextTerm(long term) throws IOException { if (currentDocList != null && currentDocList.size() > 0) terms.put(this.term, currentDocList); this.term = term; currentDocList = new IntArrayList(); }
/** * Removes a bucket. * * @param bucket the bucket to be removed. * @return false if the bucket was not present. */ @SuppressWarnings("unchecked") public boolean remove( final T bucket ) { if ( !sizes.containsKey( bucket ) ) return false; final XoRoShiRo128PlusRandomGenerator random = new XoRoShiRo128PlusRandomGenerator( bucket.hashCode() ); final int size = sizes.removeInt( bucket ); for ( int i = 0; i < size * REPLICAE_PER_BUCKET; i++ ) { final long point = random.nextLong(); final Object o = replicae.remove( point ); if ( o instanceof ObjectAVLTreeSet ) { if ( DEBUG ) System.err.println( "Removing from " + point + " conflict set..." ); final ObjectAVLTreeSet<T> conflictSet = (ObjectAVLTreeSet<T>)o; conflictSet.remove( bucket ); if ( conflictSet.size() > 1 ) replicae.put( point, conflictSet ); else replicae.put( point, conflictSet.first() ); } else if ( o != null && ( (T)o ).compareTo( bucket ) != 0 ) replicae.put( point, o ); } return true; }
private void evict() { Node victim = null; for (Node head : headQ) { if (head.next != head) { victim = head.next; break; } } if (victim == null) { return; } victim.remove(); data.remove(victim.key); out.put(victim.key, victim); if (out.size() > maxOut) { out.remove(out.firstLongKey()); } }
myIntTerms.put(term, docList = new IntArrayList()); memoryUsageEstimate += INT_2_OBJECT_RB_TREE_MAP_ENTRY_USAGE + INT_ARRAY_LIST_USAGE + (4 * docList.elements().length);
/** * Returns a copy of this consistent hash function. * * <p>Note that for this method to work properly, the skip strategy {@linkplain #ConsistentHashFunction(SkipStrategy) provided * at construction time}, if any, must be stateless, as it will be shared by the copy. * * @return a copy of this consistent hash function. */ @SuppressWarnings("unchecked") public ConsistentHashFunction<T> copy() { final Long2ObjectSortedMap<Object> replicaeCopy = new Long2ObjectAVLTreeMap<Object>(); for( Long2ObjectMap.Entry<Object> entry: replicae.long2ObjectEntrySet() ) { final Object value = entry.getValue(); replicaeCopy.put( entry.getLongKey(), value instanceof ObjectAVLTreeSet ? ((ObjectAVLTreeSet<T>)value).clone() : value ); } return new ConsistentHashFunction<T>( replicaeCopy, new Object2IntOpenHashMap<T>( sizes ), skipStrategy ); }