@Override public void copyNormalizedKey(MemorySegment target, int offset, int len) { // see IntValue for an explanation of the logic if (len == 8) { // default case, full normalized key target.putLongBigEndian(offset, value - Long.MIN_VALUE); } else if (len <= 0) { } else if (len < 8) { long value = this.value - Long.MIN_VALUE; for (int i = 0; len > 0; len--, i++) { target.put(offset + i, (byte) (value >>> ((7-i)<<3))); } } else { target.putLongBigEndian(offset, value - Long.MIN_VALUE); for (int i = 8; i < len; i++) { target.put(offset + i, (byte) 0); } } }
public GlobalBufferPool(int numBuffers, int bufferSize) { this.numBuffers = numBuffers; this.bufferSize = bufferSize; this.buffers = new ArrayBlockingQueue<MemorySegment>(this.numBuffers); for (int i = 0; i < this.numBuffers; i++) { this.buffers.add(new MemorySegment(new byte[this.bufferSize])); } LOG.info(String.format("Initialized global buffer pool with %d buffers (%d bytes each).", this.numBuffers, this.bufferSize)); }
@Override public int compare(int i, int j) { final int bufferNumI = i / this.recordsPerSegment; final int segmentOffsetI = (i % this.recordsPerSegment) * this.recordSize; final int bufferNumJ = j / this.recordsPerSegment; final int segmentOffsetJ = (j % this.recordsPerSegment) * this.recordSize; final MemorySegment segI = this.sortBuffer.get(bufferNumI); final MemorySegment segJ = this.sortBuffer.get(bufferNumJ); int val = MemorySegment.compare(segI, segJ, segmentOffsetI, segmentOffsetJ, this.numKeyBytes); return this.useNormKeyUninverted ? val : -val; }
private final void writeSegment(MemorySegment segment, int writePosition, boolean lastSegment) throws IOException { segment.putShort(0, HEADER_MAGIC_NUMBER); segment.putShort(HEADER_FLAGS_OFFSET, lastSegment ? FLAG_LAST_BLOCK : 0); segment.putInt(HEAD_BLOCK_LENGTH_OFFSET, writePosition); this.writer.writeBlock(segment); this.bytesBeforeSegment += writePosition - HEADER_LENGTH; } }
@Override public void putNormalizedKey(Integer iValue, MemorySegment target, int offset, int numBytes) { int value = iValue.intValue() - Integer.MIN_VALUE; // see IntValue for an explanation of the logic if (numBytes == 4) { // default case, full normalized key target.putIntBigEndian(offset, value); } else if (numBytes <= 0) { } else if (numBytes < 4) { for (int i = 0; numBytes > 0; numBytes--, i++) { target.put(offset + i, (byte) (value >>> ((3-i)<<3))); } } else { target.putLongBigEndian(offset, value); for (int i = 4; i < numBytes; i++) { target.put(offset + i, (byte) 0); } } }
@Override public void copyNormalizedKey(MemorySegment target, int offset, int len) { for (int i = offset; i < offset + len; i++) { target.put(i, (byte) 0); } }
private void initTable(int numBuckets, byte numPartitions) { final int bucketsPerSegment = this.bucketsPerSegmentMask + 1; final int numSegs = (numBuckets >>> this.bucketsPerSegmentBits) + ( (numBuckets & this.bucketsPerSegmentMask) == 0 ? 0 : 1); final MemorySegment[] table = new MemorySegment[numSegs]; // go over all segments that are part of the table for (int i = 0, bucket = 0; i < numSegs && bucket < numBuckets; i++) { final MemorySegment seg = getNextBuffer(); // go over all buckets in the segment for (int k = 0; k < bucketsPerSegment && bucket < numBuckets; k++, bucket++) { final int bucketOffset = k * HASH_BUCKET_SIZE; // compute the partition that the bucket corresponds to final byte partition = assignPartition(bucket, numPartitions); // initialize the header fields seg.put(bucketOffset + HEADER_PARTITION_OFFSET, partition); seg.putInt(bucketOffset + HEADER_COUNT_OFFSET, 0); seg.putLong(bucketOffset + HEADER_FORWARD_OFFSET, BUCKET_FORWARD_POINTER_NOT_SET); } table[i] = seg; } this.buckets = table; this.numBuckets = numBuckets; }
/** * Bulk get method. Copies dst.length memory from the specified position to * the destination memory. * * @param index The position at which the first byte will be read. * @param dst The memory into which the memory will be copied. * * @throws IndexOutOfBoundsException Thrown, if the index is negative, or too large that the data between the * index and the memory segment end is not enough to fill the destination array. */ public final void get(int index, byte[] dst) { get(index, dst, 0, dst.length); }
@Override protected int getLimitForSegment(MemorySegment segment) { return segment.getInt(ChannelWriterOutputView.HEAD_BLOCK_LENGTH_OFFSET); }
private final long readPointer(int logicalPosition) { if (logicalPosition < 0 | logicalPosition >= this.numRecords) { throw new IndexOutOfBoundsException(); } final int bufferNum = logicalPosition / this.indexEntriesPerSegment; final int segmentOffset = logicalPosition % this.indexEntriesPerSegment; return this.sortIndex.get(bufferNum).getLong(segmentOffset * this.indexEntrySize); }
final int thisCode = this.bucket.getInt(this.posInSegment); this.posInSegment += HASH_CODE_LEN; final long pointer = this.bucket.getLong(this.bucketInSegmentOffset + BUCKET_POINTER_START_OFFSET + (this.numInSegment * POINTER_LEN)); this.numInSegment++; final long forwardPointer = this.bucket.getLong(this.bucketInSegmentOffset + HEADER_FORWARD_OFFSET); if (forwardPointer == BUCKET_FORWARD_POINTER_NOT_SET) { return null; this.bucket = this.overflowSegments[overflowSegNum]; this.bucketInSegmentOffset = (int) (forwardPointer & 0xffffffff); this.countInSegment = this.bucket.getShort(this.bucketInSegmentOffset + HEADER_COUNT_OFFSET); this.posInSegment = this.bucketInSegmentOffset + BUCKET_HEADER_LENGTH; this.numInSegment = 0;
/** * Writes the given double-precision floating-point value (64bit, 8 bytes) to the given position in big endian * byte order. This method's speed depends on the system's native byte order, and it * is possibly slower than {@link #putDouble(int, double)}. For most cases (such as * transient storage in memory or serialization for I/O and network), * it suffices to know that the byte order in which the value is written is the same as the * one in which it is read, and {@link #putDouble(int, double)} is the preferable choice. * * @param index The position at which the value will be written. * @param value The long value to be written. * * @throws IndexOutOfBoundsException Thrown, if the index is negative, or larger then the segment * size minus 8. */ public final void putDoubleBigEndian(int index, double value) { putLongBigEndian(index, Double.doubleToRawLongBits(value)); }
@Override public int compare(int i, int j) { final int bufferNumI = i / this.indexEntriesPerSegment; final int segmentOffsetI = (i % this.indexEntriesPerSegment) * this.indexEntrySize; final int bufferNumJ = j / this.indexEntriesPerSegment; final int segmentOffsetJ = (j % this.indexEntriesPerSegment) * this.indexEntrySize; final MemorySegment segI = this.sortIndex.get(bufferNumI); final MemorySegment segJ = this.sortIndex.get(bufferNumJ); int val = MemorySegment.compare(segI, segJ, segmentOffsetI + OFFSET_LEN, segmentOffsetJ + OFFSET_LEN, this.numKeyBytes); if (val != 0 || this.normalizedKeyFullyDetermines) { return this.useNormKeyUninverted ? val : -val; } final long pointerI = segI.getLong(segmentOffsetI); final long pointerJ = segJ.getLong(segmentOffsetJ); return compareRecords(pointerI, pointerJ); }
/** * Writes the given int value (32bit, 4 bytes) to the given position in little endian * byte order. This method's speed depends on the system's native byte order, and it * is possibly slower than {@link #putInt(int, int)}. For most cases (such as * transient storage in memory or serialization for I/O and network), * it suffices to know that the byte order in which the value is written is the same as the * one in which it is read, and {@link #putInt(int, int)} is the preferable choice. * * @param index The position at which the value will be written. * @param value The int value to be written. * * @throws IndexOutOfBoundsException Thrown, if the index is negative, or larger then the segment * size minus 4. */ public final void putIntLittleEndian(int index, int value) { if (LITTLE_ENDIAN) { putInt(index, value); } else { putInt(index, Integer.reverseBytes(value)); } }
private final void insertBucketEntryFromSearch(InMemoryPartition<T> partition, MemorySegment originalBucket, MemorySegment currentBucket, int originalBucketOffset, int currentBucketOffset, int countInCurrentBucket, long currentForwardPointer, int hashCode, long pointer) { if (countInCurrentBucket < NUM_ENTRIES_PER_BUCKET) { currentBucket.putInt(currentBucketOffset + BUCKET_HEADER_LENGTH + (countInCurrentBucket * HASH_CODE_LEN), hashCode); // hash code currentBucket.putLong(currentBucketOffset + BUCKET_POINTER_START_OFFSET + (countInCurrentBucket * POINTER_LEN), pointer); // pointer currentBucket.putInt(currentBucketOffset + HEADER_COUNT_OFFSET, countInCurrentBucket + 1); // update count overflowSeg.putLong(overflowBucketOffset + HEADER_FORWARD_OFFSET, currentForwardPointer); final long pointerToNewBucket = (((long) overflowBucketNum) << 32) | ((long) overflowBucketOffset); originalBucket.putLong(originalBucketOffset + HEADER_FORWARD_OFFSET, pointerToNewBucket); overflowSeg.putInt(overflowBucketOffset + BUCKET_HEADER_LENGTH, hashCode); // hash code overflowSeg.putLong(overflowBucketOffset + BUCKET_POINTER_START_OFFSET, pointer); // pointer overflowSeg.putInt(overflowBucketOffset + HEADER_COUNT_OFFSET, 1);
@Override public final long readLong() throws IOException { final long v = this.segment.getLongBigEndian(this.position); this.position += 8; return v; }
@Override public final int readInt() throws IOException { final int v = this.segment.getIntBigEndian(this.position); this.position += 4; return v; }
@Override public void putNormalizedKey(Integer iValue, MemorySegment target, int offset, int numBytes) { int value = iValue.intValue() - Integer.MIN_VALUE; // see IntValue for an explanation of the logic if (numBytes == 4) { // default case, full normalized key target.putIntBigEndian(offset, value); } else if (numBytes <= 0) { } else if (numBytes < 4) { for (int i = 0; numBytes > 0; numBytes--, i++) { target.put(offset + i, (byte) (value >>> ((3-i)<<3))); } } else { target.putLongBigEndian(offset, value); for (int i = 4; i < numBytes; i++) { target.put(offset + i, (byte) 0); } } }
private void initTable(int numBuckets, byte numPartitions) { final int bucketsPerSegment = this.bucketsPerSegmentMask + 1; final int numSegs = (numBuckets >>> this.bucketsPerSegmentBits) + ( (numBuckets & this.bucketsPerSegmentMask) == 0 ? 0 : 1); final MemorySegment[] table = new MemorySegment[numSegs]; // go over all segments that are part of the table for (int i = 0, bucket = 0; i < numSegs && bucket < numBuckets; i++) { final MemorySegment seg = getNextBuffer(); // go over all buckets in the segment for (int k = 0; k < bucketsPerSegment && bucket < numBuckets; k++, bucket++) { final int bucketOffset = k * HASH_BUCKET_SIZE; // compute the partition that the bucket corresponds to final byte partition = assignPartition(bucket, numPartitions); // initialize the header fields seg.put(bucketOffset + HEADER_PARTITION_OFFSET, partition); seg.putInt(bucketOffset + HEADER_COUNT_OFFSET, 0); seg.putLong(bucketOffset + HEADER_FORWARD_OFFSET, BUCKET_FORWARD_POINTER_NOT_SET); } table[i] = seg; } this.buckets = table; this.numBuckets = numBuckets; }