private void checkBlock(IDBlock block) { assertTrue(blockSize<10000); LongSet ids = new LongHashSet((int)blockSize); checkBlock(block,ids); }
/** * Adds all elements from the given iterable to this set. * * @return Returns the number of elements actually added as a result of this * call (not previously present in the set). */ public int addAll(Iterable<? extends LongCursor> iterable) { int count = 0; for (LongCursor cursor : iterable) { if (add(cursor.value)) { count++; } } return count; }
/** * Create a set from a variable number of arguments or an array of * <code>long</code>. The elements are copied from the argument to the * internal buffer. */ /* */ public static LongHashSet from(long... elements) { final LongHashSet set = new LongHashSet(elements.length); set.addAll(elements); return set; }
/** * Adds all elements from the given list (vararg) to this set. * * @return Returns the number of elements actually added as a result of this * call (not previously present in the set). */ /* */ public final int addAll(long... elements) { ensureCapacity(elements.length); int count = 0; for (long e : elements) { if (add(e)) { count++; } } return count; }
/** * Adds all elements from the given {@link LongContainer} to this set. * * @return Returns the number of elements actually added as a result of this * call (not previously present in the set). */ public int addAll(LongContainer container) { ensureCapacity(container.size()); return addAll((Iterable<? extends LongCursor>) container); }
public static long intersection(LongHashSet targets1, LongHashSet targets2) { LongHashSet intersectionSet = new LongHashSet(targets1); intersectionSet.retainAll(targets2); return intersectionSet.size(); }
/** * This method is invoked when there is a new key to be inserted into * the buffer but there is not enough empty slots to do so. * * New buffers are allocated. If this succeeds, we know we can proceed * with rehashing so we assign the pending element to the previous buffer * (possibly violating the invariant of having at least one empty slot) * and rehash all keys, substituting new buffers at the end. */ protected void allocateThenInsertThenRehash(int slot, long pendingKey) { assert assigned == resizeAt && (( keys[slot]) == 0) && !((pendingKey) == 0); // Try to allocate new buffers first. If we OOM, we leave in a consistent state. final long[] prevKeys = this.keys; allocateBuffers(nextBufferSize(mask + 1, size(), loadFactor)); assert this.keys.length > prevKeys.length; // We have succeeded at allocating new data so insert the pending key/value at // the free slot in the old arrays before rehashing. prevKeys[slot] = pendingKey; // Rehash old keys, including the pending key. rehash(prevKeys); }
/** * {@inheritDoc} */ @Override public boolean isEmpty() { return size() == 0; }
/** * Ensure this container can hold at least the * given number of elements without resizing its buffers. * * @param expectedElements The total number of elements, inclusive. */ @Override public void ensureCapacity(int expectedElements) { if (expectedElements > resizeAt || keys == null) { final long[] prevKeys = this.keys; allocateBuffers(minBufferSize(expectedElements, loadFactor)); if (prevKeys != null && !isEmpty()) { rehash(prevKeys); } } }
/** * {@inheritDoc} */ @Override public int removeAll(LongPredicate predicate) { int before = size(); if (hasEmptyKey) { if (predicate.apply(0L)) { hasEmptyKey = false; } } final long[] keys = this.keys; for (int slot = 0, max = this.mask; slot <= max;) { long existing; if (!((existing = keys[slot]) == 0)) { if (predicate.apply(existing)) { shiftConflictingKeys(slot); continue; // Repeat the check for the same slot i (shifted). } } slot++; } return before - size(); }
/** * Serialize the list of terms to the {@link StreamOutput}. * <br> * Given the low performance of {@link org.elasticsearch.common.io.stream.BytesStreamOutput} when writing a large number * of longs (5 to 10 times slower than writing directly to a byte[]), we use a small buffer of 8kb * to optimise the throughput. 8kb seems to be the optimal buffer size, larger buffer size did not improve * the throughput. * * @param out the output */ @Override public void writeTo(StreamOutput out) throws IOException { // Encode flag out.writeBoolean(this.isPruned()); // Encode size of list out.writeInt(set.size()); // Encode longs BytesRef buffer = new BytesRef(new byte[1024 * 8]); Iterator<LongCursor> it = set.iterator(); while (it.hasNext()) { Bytes.writeLong(buffer, it.next().value); if (buffer.offset == buffer.length) { out.write(buffer.bytes, 0, buffer.offset); buffer.offset = 0; } } // flush the remaining bytes from the buffer out.write(buffer.bytes, 0, buffer.offset); }
public int isContained(int isSet, long value) { if(isSet == 0) { return 0; } return longSet.contains(value) ? 1 : 0; } }
/** * New instance copying elements from another {@link LongContainer}. */ public LongHashSet(LongContainer container) { this(container.size()); addAll(container); }
@Override protected void allocateBuffers(int arraySize) { long newMemSize = (arraySize + 1) * 8l; // array size + emptyElementSlot long oldMemSize = keys == null ? 0 : keys.length * 8l; // Adjust the breaker with the new memory size breaker.addEstimateBytesAndMaybeBreak(newMemSize, "<terms_set>"); try { // Allocate the new buffer super.allocateBuffers(arraySize); // Adjust the breaker by removing old memory size breaker.addWithoutBreaking(-oldMemSize); } catch (BufferAllocationException e) { // If the allocation failed, remove breaker.addWithoutBreaking(-newMemSize); throw e; } }
public static long intersection(LongHashSet targets1, LongHashSet targets2) { LongHashSet intersectionSet = new LongHashSet(targets1); intersectionSet.retainAll(targets2); return intersectionSet.size(); }
/** * Create a set from a variable number of arguments or an array of * <code>long</code>. The elements are copied from the argument to the * internal buffer. */ /* */ public static LongHashSet from(long... elements) { final LongHashSet set = new LongHashSet(elements.length); set.addAll(elements); return set; }
/** * This method is invoked when there is a new key to be inserted into * the buffer but there is not enough empty slots to do so. * * New buffers are allocated. If this succeeds, we know we can proceed * with rehashing so we assign the pending element to the previous buffer * (possibly violating the invariant of having at least one empty slot) * and rehash all keys, substituting new buffers at the end. */ protected void allocateThenInsertThenRehash(int slot, long pendingKey) { assert assigned == resizeAt && (( keys[slot]) == 0) && !((pendingKey) == 0); // Try to allocate new buffers first. If we OOM, we leave in a consistent state. final long[] prevKeys = this.keys; allocateBuffers(nextBufferSize(mask + 1, size(), loadFactor)); assert this.keys.length > prevKeys.length; // We have succeeded at allocating new data so insert the pending key/value at // the free slot in the old arrays before rehashing. prevKeys[slot] = pendingKey; // Rehash old keys, including the pending key. rehash(prevKeys); }
/** * {@inheritDoc} */ @Override public long [] toArray() { final long[] cloned = (new long [size()]); int j = 0; if (hasEmptyKey) { cloned[j++] = 0L; } final long[] keys = this.keys; for (int slot = 0, max = mask; slot <= max; slot++) { long existing; if (!((existing = keys[slot]) == 0)) { cloned[j++] = existing; } } return cloned; }