/** * Returns the index of the next value in the data structure or a negative value if the iterator * is exhausted. * * @return an <code>long</code> value */ protected final int nextIndex() { if (_expectedSize != _hash.size()) { throw new ConcurrentModificationException(); } Object[] set = _map._set; int i = _index; while (i-- > 0 && (set[i] == null || set[i] == TObjectHash.REMOVED || set[i] == TObjectHash.FREE)) ; return i; }
/** * Returns the index of the next value in the data structure or a negative value if the iterator * is exhausted. * * @return an <code>short</code> value */ protected final int nextIndex() { if (_expectedSize != _hash.size()) { throw new ConcurrentModificationException(); } Object[] set = _map._set; int i = _index; while (i-- > 0 && (set[i] == null || set[i] == TObjectHash.REMOVED || set[i] == TObjectHash.FREE)) ; return i; }
/** * Returns the index of the next value in the data structure or a negative value if the iterator * is exhausted. * * @return an <code>int</code> value */ protected final int nextIndex() { if (_expectedSize != _hash.size()) { throw new ConcurrentModificationException(); } Object[] set = _map._set; int i = _index; while (i-- > 0 && (set[i] == null || set[i] == TObjectHash.REMOVED || set[i] == TObjectHash.FREE)) ; return i; }
/** * Returns the index of the next value in the data structure or a negative value if the iterator * is exhausted. * * @return an <code>int</code> value * @throws ConcurrentModificationException if the underlying collection's size has been modified * since the iterator was created. */ protected final int nextIndex() { if (_expectedSize != _hash.size()) { throw new ConcurrentModificationException(); } Object[] set = _object_hash._set; int i = _index; while (i-- > 0 && (set[i] == TObjectHash.FREE || set[i] == TObjectHash.REMOVED)) ; return i; }
/** * Create an instance of TIterator over the specified THash. */ public TIterator(THash hash) { _hash = hash; _expectedSize = _hash.size(); _index = _hash.capacity(); }
/** * Ensure that this hashtable has sufficient capacity to hold <tt>desiredCapacity</tt> * <b>additional</b> elements without requiring a rehash. This is a tuning method you can call * before doing a large insert. * * @param desiredCapacity an <code>int</code> value */ public void ensureCapacity(int desiredCapacity) { if (desiredCapacity > (_maxSize - size())) { rehash(PrimeFinder.nextPrime((int) Math.ceil( (desiredCapacity + size()) / _loadFactor) + 1)); computeMaxSize(capacity()); } }
/** * Removes the last entry returned by the iterator. Invoking this method more than once for a * single entry will leave the underlying data structure in a confused state. */ public void remove() { if (_expectedSize != _hash.size()) { throw new ConcurrentModificationException(); } // Disable auto compaction during the remove. This is a workaround for bug 1642768. try { _hash.tempDisableAutoCompaction(); _hash.removeAt(_index); } finally { _hash.reenableAutoCompaction(false); } _expectedSize--; }
/** * Compresses the hashtable to the minimum prime size (as defined by PrimeFinder) that will hold * all of the elements currently in the table. If you have done a lot of <tt>remove</tt> * operations and plan to do a lot of queries or insertions or iteration, it is a good idea to * invoke this method. Doing so will accomplish two things: * * <ol> <li> You'll free memory allocated to the table but no longer needed because of the * remove()s.</li> * * <li> You'll get better query/insert/iterator performance because there won't be any * <tt>REMOVED</tt> slots to skip over when probing for indices in the table.</li> </ol> */ public void compact() { // need at least one free spot for open addressing rehash(PrimeFinder.nextPrime((int) Math.ceil(size() / _loadFactor) + 1)); computeMaxSize(capacity()); // If auto-compaction is enabled, re-determine the compaction interval if (_autoCompactionFactor != 0) { computeNextAutoCompactionAmount(size()); } }