@Nonnull @Override public final HashConfig hashConfig() { return configWrapper.config(); }
AbstractHashFactory(HashConfig hashConf, int defaultExpectedSize) { this.hashConf = hashConf; configWrapper = new HashConfigWrapper(hashConf); this.defaultExpectedSize = defaultExpectedSize; }
private int maxSize(int capacity) { // No sense in trying to rehash after each insertion // if the capacity is already reached the limit. return !isMaxCapacity(capacity) ? configWrapper.maxSize(capacity) : capacity - 1; }
/** * Same as {@link #chooseBetter(HashConfigWrapper, int, int, int, int, int)}. * * @see #chooseBetter(HashConfigWrapper, int, int, int, int, int) */ static long chooseBetter(HashConfigWrapper conf, long size, long desiredCapacity, long lesserCapacity, long greaterCapacity, long onFail) { assert 0L <= size; assert size < lesserCapacity && lesserCapacity < desiredCapacity; assert desiredCapacity < greaterCapacity; if (greaterCapacity - desiredCapacity <= desiredCapacity - lesserCapacity && greaterCapacity <= conf.maxCapacity(size)) { return greaterCapacity; } return lesserCapacity >= conf.minCapacity(size) ? lesserCapacity : onFail; }
private boolean tryRehashForExpansion() { int newCapacity = nearestGreaterCapacity(config.grow(capacity()), currentOrdinal, false); if (newCapacity > capacity()) { rehashTimer.start(); rehash(newCapacity); rehashCount++; rehashTimer.stop(); return true; } else { return false; } }
/** For initial hash table construction and rehash to target load (shrink, tombstones purge). */ public static int capacity(HashConfigWrapper conf, int size, boolean doubleSizedArrays) { assert size >= 0 : "size must be non-negative"; return capacity(conf, size, conf.targetCapacity(size), doubleSizedArrays); }
static boolean configIsSuitableForImmutableHash(HashConfigWrapper conf, int size) { assert size >= 0; int desiredCapacity = conf.targetCapacity(size); if (desiredCapacity <= MIN_CAPACITY) return MIN_CAPACITY <= conf.maxCapacity(size); if (desiredCapacity < MAX_INT_CAPACITY) { if (isPowerOf2(desiredCapacity)) return true; int lesserCapacity = highestOneBit(desiredCapacity); int greaterCapacity = lesserCapacity << 1; int c = chooseBetter(conf, size, desiredCapacity, lesserCapacity, greaterCapacity, -1); return c > 0; } return false; }
/** * Same as {@link #chooseBetter(HashConfigWrapper, int, int, int, int, int)}. * * @see #chooseBetter(HashConfigWrapper, int, int, int, int, int) */ static long chooseBetter(HashConfigWrapper conf, long size, long desiredCapacity, long lesserCapacity, long greaterCapacity, long onFail) { assert 0L <= size; assert size < lesserCapacity && lesserCapacity < desiredCapacity; assert desiredCapacity < greaterCapacity; if (greaterCapacity - desiredCapacity <= desiredCapacity - lesserCapacity && greaterCapacity <= conf.maxCapacity(size)) { return greaterCapacity; } return lesserCapacity >= conf.minCapacity(size) ? lesserCapacity : onFail; }
private int grownCapacity() { return nearestGreaterCapacity(configWrapper.grow(capacity()), size, doubleSizedArrays()); } }
/** For initial hash table construction and rehash to target load (shrink, tombstones purge). */ public static int capacity(HashConfigWrapper conf, int size, boolean doubleSizedArrays) { assert size >= 0 : "size must be non-negative"; return capacity(conf, size, conf.targetCapacity(size), doubleSizedArrays); }
static boolean configIsSuitableForImmutableHash(HashConfigWrapper conf, int size) { assert size >= 0; int desiredCapacity = conf.targetCapacity(size); if (desiredCapacity <= MIN_CAPACITY) return MIN_CAPACITY <= conf.maxCapacity(size); if (desiredCapacity < MAX_INT_CAPACITY) { if (isPowerOf2(desiredCapacity)) return true; int lesserCapacity = highestOneBit(desiredCapacity); int greaterCapacity = lesserCapacity << 1; int c = chooseBetter(conf, size, desiredCapacity, lesserCapacity, greaterCapacity, -1); return c > 0; } return false; }
@Nonnull @Override public final HashConfig hashConfig() { return configWrapper.config(); }
private int maxSize(int capacity) { // No sense in trying to rehash after each insertion // if the capacity is already reached the limit. return !isMaxCapacity(capacity) ? configWrapper.maxSize(capacity) : capacity - 1; }
AbstractHashFactory(HashConfig hashConf, int defaultExpectedSize) { this.hashConf = hashConf; configWrapper = new HashConfigWrapper(hashConf); this.defaultExpectedSize = defaultExpectedSize; }
assert desiredCapacity < greaterCapacity; if (greaterCapacity - desiredCapacity <= desiredCapacity - lesserCapacity && greaterCapacity <= conf.maxCapacity(size)) { return greaterCapacity; return lesserCapacity >= conf.minCapacity(size) ? lesserCapacity : onFail;
private int grownCapacity() { return nearestGreaterCapacity(configWrapper.grow(capacity()), size, doubleSizedArrays()); } }
public static int capacity(HashConfigWrapper conf, int size, boolean doubleSizedArrays) { assert size >= 0 : "size must be non-negative"; int desiredCapacity = conf.targetCapacity(size); int lesserCapacity, greaterCapacity; boolean simpleArrays;
@Override public boolean equals(Object obj) { if (obj == null) return false; if (obj == this) return true; assert obj.getClass() != HashConfig.class; // dangerous confusion return obj instanceof HashConfigWrapper && config.equals(((HashConfigWrapper) obj).config()); }
private int maxSize(int capacity) { // No sense in trying to rehash after each insertion // if the capacity is already reached the limit. return !isMaxCapacity(capacity) ? configWrapper.maxSize(capacity) : capacity - 1; }
KolobokeLongEntityMap(HashConfig hashConfig, int expectedSize) { this.init(new HashConfigWrapper(hashConfig), expectedSize); }