void schedulePropagation(Transition consumer, DependencyType[] types) { if (types.length == 0) { return; } if (types.length == 1) { schedulePropagation(consumer, types[0]); return; } if (consumer.pendingTypes == null && propagationDepth < PROPAGATION_STACK_THRESHOLD && consumer.pointsToDomainOrigin() && consumer.destination.propagateCount < 20) { ++propagationDepth; consumer.consume(types); --propagationDepth; } else { if (consumer.pendingTypes == null) { pendingTransitions.add(consumer); consumer.pendingTypes = new IntHashSet(Math.max(50, types.length)); } consumer.pendingTypes.ensureCapacity(types.length + consumer.pendingTypes.size()); for (DependencyType type : types) { consumer.pendingTypes.add(type.index); } } }
/** * {@inheritDoc} */ @Override public boolean isEmpty() { return size() == 0; }
/** * {@inheritDoc} */ @Override public int [] toArray() { final int[] cloned = (new int [size()]); int j = 0; if (hasEmptyKey) { cloned[j++] = 0; } final int[] keys = this.keys; for (int slot = 0, max = mask; slot <= max; slot++) { int existing; if (!((existing = keys[slot]) == 0)) { cloned[j++] = existing; } } return cloned; }
/** * Return true if all keys of some other container exist in this container. */ private boolean sameKeys(IntSet other) { if (other.size() != size()) { return false; } for (IntCursor c : other) { if (!contains( c.value)) { return false; } } return true; }
/** * {@inheritDoc} */ @Override public int removeAll(IntPredicate predicate) { int before = size(); if (hasEmptyKey) { if (predicate.apply(0)) { hasEmptyKey = false; } } final int[] keys = this.keys; for (int slot = 0, max = this.mask; slot <= max;) { int existing; if (!((existing = keys[slot]) == 0)) { if (predicate.apply(existing)) { shiftConflictingKeys(slot); continue; // Repeat the check for the same slot i (shifted). } } slot++; } return before - size(); }
/** * Allocate new internal buffers. This method attempts to allocate * and assign internal buffers atomically (either allocations succeed or not). */ protected void allocateBuffers(int arraySize) { assert Integer.bitCount(arraySize) == 1; // Compute new hash mixer candidate before expanding. final int newKeyMixer = this.orderMixer.newKeyMixer(arraySize); // Ensure no change is done if we hit an OOM. int[] prevKeys = this.keys; try { int emptyElementSlot = 1; this.keys = (new int [arraySize + emptyElementSlot]); } catch (OutOfMemoryError e) { this.keys = prevKeys; throw new BufferAllocationException( "Not enough memory to allocate buffers for rehashing: %,d -> %,d", e, this.keys == null ? 0 : size(), arraySize); } this.resizeAt = expandAtCount(arraySize, loadFactor); this.keyMixer = newKeyMixer; this.mask = arraySize - 1; }
/** * This method is invoked when there is a new key to be inserted into * the buffer but there is not enough empty slots to do so. * * New buffers are allocated. If this succeeds, we know we can proceed * with rehashing so we assign the pending element to the previous buffer * (possibly violating the invariant of having at least one empty slot) * and rehash all keys, substituting new buffers at the end. */ protected void allocateThenInsertThenRehash(int slot, int pendingKey) { assert assigned == resizeAt && (( keys[slot]) == 0) && !((pendingKey) == 0); // Try to allocate new buffers first. If we OOM, we leave in a consistent state. final int[] prevKeys = this.keys; allocateBuffers(nextBufferSize(mask + 1, size(), loadFactor)); assert this.keys.length > prevKeys.length; // We have succeeded at allocating new data so insert the pending key/value at // the free slot in the old arrays before rehashing. prevKeys[slot] = pendingKey; // Rehash old keys, including the pending key. rehash(prevKeys); }
/** * {@inheritDoc} */ @Override public boolean isEmpty() { return size() == 0; }
@Override public int size() { return this.set.size(); }
/** * {@inheritDoc} */ @Override public int [] toArray() { final int[] cloned = (new int [size()]); int j = 0; if (hasEmptyKey) { cloned[j++] = 0; } final int[] keys = this.keys; for (int slot = 0, max = mask; slot <= max; slot++) { int existing; if (!((existing = keys[slot]) == 0)) { cloned[j++] = existing; } } return cloned; }
public static void main(String[] args) { final long start = System.currentTimeMillis(); final IntHashSet a = new com.carrotsearch.hppc.IntHashSet(); for( int i = 10000000; i-- != 0; ) a.add(i); IntHashSet b = new com.carrotsearch.hppc.IntHashSet(a.size()); b.addAll(a); b = new com.carrotsearch.hppc.IntHashSet(); b.addAll(a); final long time = System.currentTimeMillis() - start; System.out.println( time / 1000.0 ); System.out.println( b.size() ); } }
/** * Return true if all keys of some other container exist in this container. */ private boolean sameKeys(IntSet other) { if (other.size() != size()) { return false; } for (IntCursor c : other) { if (!contains( c.value)) { return false; } } return true; }
/** * {@inheritDoc} */ @Override public int removeAll(IntPredicate predicate) { int before = size(); if (hasEmptyKey) { if (predicate.apply(0)) { hasEmptyKey = false; } } final int[] keys = this.keys; for (int slot = 0, max = this.mask; slot <= max;) { int existing; if (!((existing = keys[slot]) == 0)) { if (predicate.apply(existing)) { shiftConflictingKeys(slot); continue; // Repeat the check for the same slot i (shifted). } } slot++; } return before - size(); }
private void relax(int nodeId) { IntHashSet connected = new IntHashSet(); int w; do { w = stack.pop(); onStack.clear(w); connected.add(w); } while (w != nodeId); connectedComponents.add(connected); int size = connected.size(); if (size < minSetSize) { minSetSize = size; } if (size > maxSetSize) { maxSetSize = size; } }
/** * Allocate new internal buffers. This method attempts to allocate * and assign internal buffers atomically (either allocations succeed or not). */ protected void allocateBuffers(int arraySize) { assert Integer.bitCount(arraySize) == 1; // Compute new hash mixer candidate before expanding. final int newKeyMixer = this.orderMixer.newKeyMixer(arraySize); // Ensure no change is done if we hit an OOM. int[] prevKeys = this.keys; try { int emptyElementSlot = 1; this.keys = (new int [arraySize + emptyElementSlot]); } catch (OutOfMemoryError e) { this.keys = prevKeys; throw new BufferAllocationException( "Not enough memory to allocate buffers for rehashing: %,d -> %,d", e, this.keys == null ? 0 : size(), arraySize); } this.resizeAt = expandAtCount(arraySize, loadFactor); this.keyMixer = newKeyMixer; this.mask = arraySize - 1; }
private void relax(int nodeId) { IntHashSet connected = new IntHashSet(); int w; do { w = stack.pop(); onStack.clear(w); connected.add(w); } while (w != nodeId); connectedComponents.add(connected); int size = connected.size(); if (size < minSetSize) { minSetSize = size; } if (size > maxSetSize) { maxSetSize = size; } }
/** * This method is invoked when there is a new key to be inserted into * the buffer but there is not enough empty slots to do so. * * New buffers are allocated. If this succeeds, we know we can proceed * with rehashing so we assign the pending element to the previous buffer * (possibly violating the invariant of having at least one empty slot) * and rehash all keys, substituting new buffers at the end. */ protected void allocateThenInsertThenRehash(int slot, int pendingKey) { assert assigned == resizeAt && (( keys[slot]) == 0) && !((pendingKey) == 0); // Try to allocate new buffers first. If we OOM, we leave in a consistent state. final int[] prevKeys = this.keys; allocateBuffers(nextBufferSize(mask + 1, size(), loadFactor)); assert this.keys.length > prevKeys.length; // We have succeeded at allocating new data so insert the pending key/value at // the free slot in the old arrays before rehashing. prevKeys[slot] = pendingKey; // Rehash old keys, including the pending key. rehash(prevKeys); }
@Override public BytesRef writeToBytes() { long start = System.nanoTime(); int size = set.size(); BytesRef bytesRef = new BytesRef(new byte[HEADER_SIZE + size * 5]); // Encode encoding type Bytes.writeInt(bytesRef, this.getEncoding().ordinal()); // Encode flag bytesRef.bytes[bytesRef.offset++] = (byte) (this.isPruned() ? 1 : 0); // Encode size of list Bytes.writeInt(bytesRef, size); // Encode ints for (IntCursor i : set) { Bytes.writeVInt(bytesRef, i.value); } logger.debug("Serialized {} terms - took {} ms", this.size(), (System.nanoTime() - start) / 1000000); bytesRef.length = bytesRef.offset; bytesRef.offset = 0; return bytesRef; }
/** * Serialize the list of terms to the {@link StreamOutput}. * <br> * Given the low performance of {@link org.elasticsearch.common.io.stream.BytesStreamOutput} when writing a large number * of longs (5 to 10 times slower than writing directly to a byte[]), we use a small buffer of 8kb * to optimise the throughput. 8kb seems to be the optimal buffer size, larger buffer size did not improve * the throughput. * * @param out the output */ @Override public void writeTo(StreamOutput out) throws IOException { // Encode flag out.writeBoolean(this.isPruned()); // Encode size of list out.writeInt(set.size()); // Encode ints BytesRef buffer = new BytesRef(new byte[1024 * 8]); Iterator<IntCursor> it = set.iterator(); while (it.hasNext()) { Bytes.writeVInt(buffer, it.next().value); if (buffer.offset > buffer.bytes.length - 5) { out.write(buffer.bytes, 0, buffer.offset); buffer.offset = 0; } } // flush the remaining bytes from the buffer out.write(buffer.bytes, 0, buffer.offset); }