/** * Checks if we can run fast path: we got well known buffer is already aligned. * * @param srcBuf buffer to check if it is known buffer. * @return {@code true} if this buffer was allocated with alignment, may be used directly. */ private boolean isKnownAligned(ByteBuffer srcBuf) { return srcBuf.isDirect() && managedAlignedBuffers != null && managedAlignedBuffers.containsKey(GridUnsafe.bufferAddress(srcBuf)); }
/** {@inheritDoc} */ @Override public void setBuffer(ByteBuffer buf) { assert buf != null; if (this.buf != buf) { this.buf = buf; heapArr = buf.isDirect() ? null : buf.array(); baseOff = buf.isDirect() ? GridUnsafe.bufferAddress(buf) : GridUnsafe.BYTE_ARR_OFF; } }
/** {@inheritDoc} */ @Override public void setBuffer(ByteBuffer buf) { assert buf != null; if (this.buf != buf) { this.buf = buf; heapArr = buf.isDirect() ? null : buf.array(); baseOff = buf.isDirect() ? GridUnsafe.bufferAddress(buf) : BYTE_ARR_OFF; } }
/** * @param buf Direct buffer allocated by {@link #allocateBuffer(int)}. */ public static void freeBuffer(ByteBuffer buf) { long ptr = bufferAddress(buf); freeMemory(ptr); }
/** * Frees the memory space used by direct buffer, which must have been returned by a previous call * {@link #allocate(int, int)}. * * @param buf direct buffer to free. */ public static void free(ByteBuffer buf) { free(GridUnsafe.bufferAddress(buf)); }
/** {@inheritDoc} */ @Override public void writePage(FullPageId fullPageId, ByteBuffer byteBuf, int tag) { tracker.lock(fullPageId); ByteBuffer tlb = byteBufThreadLoc.get(); tlb.rewind(); long writeAddr = GridUnsafe.bufferAddress(tlb); long origBufAddr = GridUnsafe.bufferAddress(byteBuf); GridUnsafe.copyMemory(origBufAddr, writeAddr, pageSize); this.fullPageId = fullPageId; this.byteBuf = tlb; this.tag = tag; }
/** * <b>Note: </b> Use only if {@link #isDirectIoAvailable()}. * * @param size buffer size to allocate. * @return new byte buffer. */ @NotNull ByteBuffer createManagedBuffer(int size) { assert !useBackupFactory : "Direct IO is disabled, aligned managed buffer creation is disabled now"; assert managedAlignedBuffers != null : "Direct buffers not available"; ByteBuffer allocate = AlignedBuffers.allocate(ioBlockSize, size).order(ByteOrder.nativeOrder()); managedAlignedBuffers.put(GridUnsafe.bufferAddress(allocate), Thread.currentThread()); return allocate; }
/** * Will zero memory in buf * @param buf Buffer. * @param off Offset. * @param len Length. */ public static void zeroMemory(ByteBuffer buf, int off, int len) { if (buf.isDirect()) GridUnsafe.setMemory(GridUnsafe.bufferAddress(buf) + off, len, (byte)0); else { for (int i = off; i < off + len; i++) buf.put(i, (byte)0); //TODO Optimize! } }
/** * @param src Source. * @param srcOff Source offset in bytes. * @param dst Destination. * @param dstOff Destination offset in bytes. * @param cnt Bytes count to copy. */ public static void copyMemory(ByteBuffer src, long srcOff, ByteBuffer dst, long dstOff, long cnt) { byte[] srcArr = src.hasArray() ? src.array() : null; byte[] dstArr = dst.hasArray() ? dst.array() : null; long srcArrOff = src.hasArray() ? src.arrayOffset() + GridUnsafe.BYTE_ARR_OFF : 0; long dstArrOff = dst.hasArray() ? dst.arrayOffset() + GridUnsafe.BYTE_ARR_OFF : 0; long srcPtr = src.isDirect() ? GridUnsafe.bufferAddress(src) : 0; long dstPtr = dst.isDirect() ? GridUnsafe.bufferAddress(dst) : 0; GridUnsafe.copyMemory(srcArr, srcPtr + srcArrOff + srcOff, dstArr, dstPtr + dstArrOff + dstOff, cnt); }
/** {@inheritDoc} */ @Override public void compactPage(ByteBuffer page, ByteBuffer out, int pageSize) { copyPage(page, out, pageSize); long pageAddr = GridUnsafe.bufferAddress(out); // Just drop all the extra garbage at the end. out.limit(getItemsEnd(pageAddr)); }
/** * * @param buf Buffer. * @param len New length. * @return Reallocated direct buffer. */ public static ByteBuffer reallocateBuffer(ByteBuffer buf, int len) { long ptr = bufferAddress(buf); long newPtr = reallocateMemory(ptr, len); return wrapPointer(newPtr, len); }
/** {@inheritDoc} */ @Override public void restorePage(ByteBuffer page, int pageSize) { assert page.isDirect(); assert page.position() == 0; assert page.limit() <= pageSize; long pageAddr = bufferAddress(page); int freeSpace = getRealFreeSpace(pageAddr); if (freeSpace != 0) { int firstOff = getFirstEntryOffset(pageAddr); int cnt = pageSize - firstOff; if (cnt != 0) { int off = page.limit() - cnt; assert off > PageIO.COMMON_HEADER_END: off; assert cnt > 0 : cnt; moveBytes(pageAddr, off, cnt, freeSpace, pageSize); } } page.limit(pageSize); }
/** * Gets address in memory for direct aligned buffer taking into account its current {@code position()} as offset. * Produces warnings if data or offset seems to be not aligned. * * @param buf Direct aligned buffer. * @param pos position, used as offset for resulting pointer. * @return Buffer memory address. */ @NotNull private Pointer bufferPtrAtPosition(ByteBuffer buf, int pos) { long alignedPointer = GridUnsafe.bufferAddress(buf); if (pos < 0) throw new IllegalArgumentException(); if (pos > buf.capacity()) throw new BufferOverflowException(); if ((alignedPointer + pos) % ioBlockSize != 0) { U.warn(log, String.format("IO Buffer Pointer [%d] and/or offset [%d] seems to be not aligned " + "for IO block size [%d]. Direct IO may fail.", alignedPointer, buf.position(), ioBlockSize)); } return new Pointer(alignedPointer + pos); }
/** * @param record page snapshot record. * @return string identifier of page (IO) type. */ private static String getPageType(PageSnapshot record) { byte[] pageData = record.pageData(); ByteBuffer buf = ByteBuffer.allocateDirect(pageData.length); try { buf.order(ByteOrder.nativeOrder()); buf.put(pageData); long addr = GridUnsafe.bufferAddress(buf); int type = PageIO.getType(addr); int ver = PageIO.getVersion(addr); return PageIO.getPageIO(type, ver).getClass().getSimpleName(); } catch (IgniteCheckedException ignored) { } finally { GridUnsafe.cleanDirectBuffer(buf); } return ""; }
/** * @param page Page. * @return Page header. */ private static byte[] getPageCommonHeader(ByteBuffer page) { return PageUtils.getBytes(GridUnsafe.bufferAddress(page), 0, PageIO.COMMON_HEADER_END); }
/** {@inheritDoc} */ @Override public String toString() { ByteBuffer buf = ByteBuffer.allocateDirect(pageData.length); buf.order(ByteOrder.nativeOrder()); buf.put(pageData); long addr = GridUnsafe.bufferAddress(buf); try { return "PageSnapshot [fullPageId = " + fullPageId() + ", page = [\n" + PageIO.printPage(addr, realPageSize) + "],\nsuper = [" + super.toString() + "]]"; } catch (IgniteCheckedException ignored) { return "Error during call'toString' of PageSnapshot [fullPageId=" + fullPageId() + ", pageData = " + Arrays.toString(pageData) + ", super=" + super.toString() + "]"; } finally { GridUnsafe.cleanDirectBuffer(buf); } }
/** * @param absPtr Absolute ptr. * @param buf Tmp buffer. */ private void copyInBuffer(long absPtr, ByteBuffer buf) { if (buf.isDirect()) { long tmpPtr = GridUnsafe.bufferAddress(buf); GridUnsafe.copyMemory(absPtr + PAGE_OVERHEAD, tmpPtr, pageSize()); assert PageIO.getCrc(absPtr + PAGE_OVERHEAD) == 0; //TODO GG-11480 assert PageIO.getCrc(tmpPtr) == 0; //TODO GG-11480 } else { byte[] arr = buf.array(); assert arr != null; assert arr.length == pageSize(); GridUnsafe.copyMemory(null, absPtr + PAGE_OVERHEAD, arr, GridUnsafe.BYTE_ARR_OFF, pageSize()); } }
/** {@inheritDoc} */ @Override public void compactPage(ByteBuffer page, ByteBuffer out, int pageSize) { // TODO May we compactDataEntries in-place and then copy compacted data to out? copyPage(page, out, pageSize); long pageAddr = bufferAddress(out); int freeSpace = getRealFreeSpace(pageAddr); if (freeSpace == 0) return; // No garbage: nothing to compact here. int directCnt = getDirectCount(pageAddr); if (directCnt != 0) { int firstOff = getFirstEntryOffset(pageAddr); if (firstOff - freeSpace != getHeaderSizeWithItems(pageAddr, directCnt)) { firstOff = compactDataEntries(pageAddr, directCnt, pageSize); setFirstEntryOffset(pageAddr, firstOff, pageSize); } // Move all the data entries from page end to the page header to close the gap. moveBytes(pageAddr, firstOff, pageSize - firstOff, -freeSpace, pageSize); } out.limit(pageSize - freeSpace); // Here we have only meaningful data of this page. }
private void checkIo(PageIO io, ByteBuffer page) throws IgniteCheckedException { assertSame(io, PageIO.getPageIO(bufferAddress(page))); assertSame(io, PageIO.getPageIO(page)); }
/** * */ @Test public void testMerging() throws Exception { ByteBuffer buf = createBuffer(); ThreadLocalRandom rand = ThreadLocalRandom.current(); int track = io.countOfPageToTrack(PAGE_SIZE); long basePageId = io.trackingPageFor(Math.max(rand.nextLong(Integer.MAX_VALUE - track), 0), PAGE_SIZE); assert basePageId >= 0; PageIO.setPageId(GridUnsafe.bufferAddress(buf), basePageId); TreeSet<Long> setIdx = new TreeSet<>(); for (int i = 0; i < 4; i++) generateMarking(buf, track, basePageId, basePageId + rand.nextInt(1, track), setIdx, i, -1); TreeSet<Long> setIdx2 = new TreeSet<>(); generateMarking(buf, track, basePageId, basePageId + rand.nextInt(1, track), setIdx2, 4, -1); assertEquals(setIdx2.size(), io.countOfChangedPage(buf, 4, PAGE_SIZE)); assertEquals(setIdx.size(), io.countOfChangedPage(buf, 3, PAGE_SIZE)); for (long i = basePageId; i < basePageId + track; i++) assertEquals("pageId = " + i, setIdx.contains(i), io.wasChanged(buf, i, 3, -1, PAGE_SIZE)); for (long i = basePageId; i < basePageId + track; i++) assertEquals("pageId = " + i, setIdx2.contains(i), io.wasChanged(buf, i, 4, 3, PAGE_SIZE)); for (long i = basePageId; i < basePageId + track; i++) assertFalse(io.wasChanged(buf, i, 5, 4, PAGE_SIZE)); }