public boolean trim(KBucket<NID> kbucket, NID toAdd) { long now = _ctx.clock().now(); if (kbucket.getLastChanged() > now - MIN_BUCKET_AGE) return false; Set<NID> entries = kbucket.getEntries(); for (NID nid : entries) { if (nid.lastSeen() < now - MAX_NODE_AGE) { if (kbucket.remove(nid)) return true; } } return entries.size() < _max; } }
public int compare(KBucket<T> l, KBucket<T> r) { if (l.getRangeEnd() < r.getRangeBegin()) return -1; if (l.getRangeBegin() > r.getRangeEnd()) return 1; return 0; } }
/** * No lock required. * FIXME will split the closest buckets too far if B > 1 and K < 2**B * Won't ever really happen and if it does it still works. */ private boolean shouldSplit(KBucket<T> b) { return b.getRangeBegin() != b.getRangeEnd() && b.getKeyCount() > BUCKET_SIZE; }
public boolean trim(KBucket<T> kbucket, T toAdd) { List<T> e = new ArrayList<T>(kbucket.getEntries()); int sz = e.size(); // concurrency if (sz < _max) return true; T toRemove = e.get(_ctx.random().nextInt(sz)); kbucket.remove(toRemove); return true; } }
s1 = b0.getRangeBegin(); e2 = b0.getRangeEnd(); if (B_VALUE == 1 || ((s1 & (B_FACTOR - 1)) == 0 && KBucket<T> b1 = createBucket(s1, e1); KBucket<T> b2 = createBucket(s2, e2); for (T key : b0.getEntries()) { if (getRange(key) < s2) b1.add(key); else b2.add(key); if (b2.getKeyCount() > BUCKET_SIZE) {
/** @since 0.9.10 */ public void testAudit() { int errors = 0; for (KBucket<Hash> b : set.getBuckets()) { for (Hash sds : b.getEntries()) { int range = set.getRange(sds); if (range < b.getRangeBegin() || range > b.getRangeEnd()) { log.error("Hash " + sds + " with range " + range + " does not belong in " + b); errors++; } } } assertTrue(errors == 0); }
/** * @return a copy in a new set */ public Set<T> getAll() { Set<T> all = new HashSet<T>(256); getReadLock(); try { for (KBucket<T> b : _buckets) { all.addAll(b.getEntries()); } } finally { releaseReadLock(); } return all; }
/** * For every bucket that hasn't been updated in this long, * or isn't close to full, * generate a random key that would be a member of that bucket. * The returned keys may be searched for to "refresh" the buckets. * @return non-null, closest first */ public List<T> getExploreKeys(long age) { List<T> rv = new ArrayList<T>(_buckets.size()); long old = _context.clock().now() - age; getReadLock(); try { for (KBucket<T> b : _buckets) { int curSize = b.getKeyCount(); // Always explore the closest bucket if ((b.getRangeBegin() == 0) || (b.getLastChanged() < old || curSize < BUCKET_SIZE * 3 / 4)) rv.add(generateRandomKey(b)); } } finally { releaseReadLock(); } return rv; }
} finally { releaseReadLock(); } if (bucket != null) { if (bucket.add(peer)) { if (_log.shouldLog(Log.DEBUG)) _log.debug("Peer " + peer + " added to bucket " + bucket); if (_log.shouldLog(Log.DEBUG)) _log.debug("Splitting bucket " + bucket); split(bucket.getRangeBegin());
public boolean remove(T entry) { KBucket<T> kbucket; getReadLock(); try { kbucket = getBucket(entry); } finally { releaseReadLock(); } if (kbucket == null) // us return false; boolean removed = kbucket.remove(entry); return removed; }
public boolean trim(KBucket<T> kbucket, T toAdd) { if (kbucket.getLastChanged() > _ctx.clock().now() - 5*60*1000) return false; return super.trim(kbucket, toAdd); } }
/** * The current number of entries. */ public int size() { int rv = 0; getReadLock(); try { for (KBucket<T> b : _buckets) { rv += b.getKeyCount(); } } finally { releaseReadLock(); } return rv; }
/** @since 0.8.8 */ public void clear() { getReadLock(); try { for (KBucket<T> b : _buckets) { b.clear(); } } finally { releaseReadLock(); } _rangeCalc.clear(); }
public void getAll(SelectionCollector<T> collector) { getReadLock(); try { for (KBucket<T> b : _buckets) { b.getEntries(collector); } } finally { releaseReadLock(); } }
/** * The bucket number that contains this range number * Caller must hold read lock or write lock * @return 0 to max-1 or -1 for us */ private int pickBucket(int range) { // If B is small, a linear search from back to front // is most efficient since most of the keys are at the end... // If B is larger, there's a lot of sub-buckets // of equal size to be checked so a binary search is better if (B_VALUE <= 3) { for (int i = _buckets.size() - 1; i >= 0; i--) { KBucket<T> b = _buckets.get(i); if (range >= b.getRangeBegin() && range <= b.getRangeEnd()) return i; } return -1; } else { KBucket<T> dummy = new DummyBucket<T>(range); return Collections.binarySearch(_buckets, dummy, new BucketComparator<T>()); } }
Set<T> entries = _buckets.get(i).getEntries();
int begin = bucket.getRangeBegin(); int end = bucket.getRangeEnd();
Set<T> entries = _buckets.get(i).getEntries(); for (T e : entries) { if (!toIgnore.contains(e)) { Set<T> entries = _buckets.get(i).getEntries(); for (T e : entries) { if (!toIgnore.contains(e)) {
/** @since 0.9.10 */ public void testOrder() { int bits = Hash.HASH_LENGTH * 8; int errors = 0; int lastEnd = -1; for (KBucket<Hash> b : set.getBuckets()) { int beg = b.getRangeBegin(); if (beg != lastEnd + 1) { log.error("Out of order: " + b); errors++; } lastEnd = b.getRangeEnd(); } if (lastEnd != (bits * (1 << (B-1))) - 1) { log.error("Out of order: last=" + lastEnd); errors++; } assertTrue(errors == 0); }
/** @since 0.9.10 */ public void testGenRandom() { int errors = 0; for (KBucket<Hash> b : set.getBuckets()) { for (int j = 0; j < 4000; j++) { Hash rand = set.generateRandomKey(b); int range = set.getRange(rand); if (range < b.getRangeBegin() || range > b.getRangeEnd()) { log.error("Generate random key failed range=" + range + " for " + rand + " meant for bucket " + b); errors++; } } } assertTrue(errors == 0); }