private void init() throws HyracksDataException { final int numOfPages = bufferCache.getNumPagesOfFile(fileId); //Maximum number of entries in a page final int numOfEntriesPerPage = bufferCache.getPageSize() / ENTRY_LENGTH; //get the last page which may contain less entries than maxNumOfEntries final long dpid = getDiskPageId(numOfPages - 1); final ICachedPage page = bufferCache.pin(dpid, false); try { final ByteBuffer buf = page.getBuffer(); //Start at 1 since it is impossible to have EOF at the first entry of a page int i = 1; //Seek EOF and count number of entries while (i < numOfEntriesPerPage && buf.getLong(i * ENTRY_LENGTH) != EOF) { i++; } totalNumOfPages = (numOfPages - 1) * numOfEntriesPerPage + i; } finally { bufferCache.unpin(page); } }
@Override public ICachedPage confiscatePage(long dpid) throws HyracksDataException { return bufferCache.confiscatePage(dpid); }
@Override public synchronized void clear() throws HyracksDataException { if (!isOpen) { throw new HyracksDataException("Failed to clear since index is not open."); } btree.clear(); bufferCache.closeFile(fileId); bufferCache.deleteFile(fileId); fileId = bufferCache.createFile(invListsFile); bufferCache.openFile(fileId); }
@Override public long getMemoryAllocationSize() { IBufferCache virtualBufferCache = btree.getBufferCache(); return (long) virtualBufferCache.getPageBudget() * virtualBufferCache.getPageSize(); }
private ICachedPage pinAndGetPage(int compressedPageId) throws HyracksDataException { final int pageId = compressedPageId * ENTRY_LENGTH / bufferCache.getPageSize(); return bufferCache.pin(getDiskPageId(pageId), false); }
@Override public boolean doHasNext() throws HyracksDataException { if (numTuples <= currentTupleIndex) { return false; } // We don't latch pages since this code is only used by flush () before // bulk-loading the r-tree to disk and flush is not concurrent. // ICachedPage node1 = bufferCache.pin(BufferedFileHandle.getDiskPageId(fileId, tPointers[currentTupleIndex * 2]), false); try { leafFrame1.setPage(node1); frameTuple1.resetByTupleOffset(leafFrame1.getBuffer().array(), tPointers[currentTupleIndex * 2 + 1]); } finally { bufferCache.unpin(node1); } return true; }
public void warmup(ITreeIndexFrame frame, ITreeIndexMetadataFrame metaFrame, int[] warmupTreeLevels, int[] warmupRepeats) throws HyracksDataException { int fileId = bufferCache.openFile(fileRef); ICachedPage page = bufferCache.pin(BufferedFileHandle.getDiskPageId(fileId, pageId), false); page.acquireReadLatch(); try { bufferCache.unpin(page); ICachedPage page = bufferCache.pin(BufferedFileHandle.getDiskPageId(fileId, pageId), false); page.acquireReadLatch(); page.releaseReadLatch(); bufferCache.unpin(page); bufferCache.closeFile(fileId);
public synchronized void unpinAllPages() throws HyracksDataException { if (pinCount == 1) { for (int i = 0; i < numPages; i++) { bufferCache.unpin(pages[i]); } pagesPinned = false; } pinCount--; }
private void createNewRoot(BTreeOpContext ctx) throws HyracksDataException { bufferCache.pin(BufferedFileHandle.getDiskPageId(getFileId(), ctx.getSplitKey().getLeftPage()), false); leftNode.acquireWriteLatch(); try { int newLeftId = freePageManager.takePage(ctx.getMetaFrame()); ICachedPage newLeftNode = bufferCache.pin(BufferedFileHandle.getDiskPageId(getFileId(), newLeftId), true); newLeftNode.acquireWriteLatch(); try { boolean largePage = false; if (leftNode.getBuffer().capacity() > newLeftNode.getBuffer().capacity()) { bufferCache.resizePage(newLeftNode, leftNode.getBuffer().capacity() / bufferCache.getPageSize(), ctx); largePage = true; bufferCache.resizePage(leftNode, 1, ctx); ctx.getInteriorFrame().setPage(leftNode); ctx.getInteriorFrame().setLargeFlag(false); } finally { newLeftNode.releaseWriteLatch(true); bufferCache.unpin(newLeftNode); bufferCache.unpin(leftNode);
@Override public ICachedPage pin(long dpid, boolean newPage) throws HyracksDataException { ICachedPage page = bufferCache.pin(dpid, newPage); pinCount.addAndGet(1); return page; }
@Override public long getFileOffset(ITreeIndexMetadataFrame frame, IValueReference key) throws HyracksDataException { int metadataPageNum = getMetadataPageId(); if (metadataPageNum != IBufferCache.INVALID_PAGEID) { ICachedPage metaNode = bufferCache.pin(BufferedFileHandle.getDiskPageId(fileId, getMetadataPageId()), false); metaNode.acquireReadLatch(); try { frame.setPage(metaNode); return ((long) metadataPageNum * bufferCache.getPageSizeWithHeader()) + frame.getOffset(key); } finally { metaNode.releaseReadLatch(); bufferCache.unpin(metaNode); } } return -1; } }
@Override public void close(IPageWriteFailureCallback callback) throws HyracksDataException { if (ready) { ICachedPage metaNode = bufferCache.pin(BufferedFileHandle.getDiskPageId(fileId, getMetadataPageId()), false); ITreeIndexMetadataFrame metaFrame = frameFactory.createFrame(); metaNode.acquireWriteLatch(); try { metaFrame.setPage(metaNode); metaFrame.setValid(true); } finally { metaNode.releaseWriteLatch(true); bufferCache.flush(metaNode); bufferCache.unpin(metaNode); ready = true; } ready = false; } }
public BloomFilter(IBufferCache bufferCache, FileReference file, int[] keyFields) throws HyracksDataException { this.bufferCache = bufferCache; this.file = file; this.keyFields = keyFields; this.numBitsPerPage = bufferCache.getPageSize() * Byte.SIZE; this.numBlocksPerPage = this.numBitsPerPage / NUM_BITS_PER_BLOCK; }
bufferCache.returnPage(leafFrontier.page, false); } else { leafFrontier.lastTuple.resetByTupleIndex(leafFrame, leafFrame.getTupleCount() - 1); (int) Math.ceil((double) tupleSize / (bufferCache.getPageSize() - headerSize)); if (multiplier > 1) { leafFrontier.page = bufferCache.confiscateLargePage(dpid, multiplier, freePageManager.takeBlock(metaFrame, multiplier - 1)); } else { leafFrontier.page = bufferCache.confiscatePage(dpid); } else { final long dpid = BufferedFileHandle.getDiskPageId(getFileId(), leafFrontier.pageId); leafFrontier.page = bufferCache.confiscatePage(dpid); leafFrame.setPage(leafFrontier.page); leafFrame.initBuffer((byte) 0);
throw HyracksDataException.create(ErrorCode.CANNOT_CREATE_BLOOM_FILTER_BUILDER_FOR_INACTIVE_FILTER); queue = bufferCache.createFIFOQueue(); this.estimatedNumElements = estimatedNumElemenets; this.numHashes = numHashes; while (currentPageId <= numPages) { ICachedPage page = bufferCache.confiscatePage(BufferedFileHandle.getDiskPageId(fileId, currentPageId)); initPage(page.getBuffer().array()); pages[currentPageId - 1] = page; bufferCache.returnPage(pages[i]);
private void setCompressedPageInfo(int compressedPageId, ICachedPageInternal compressedPage) throws HyracksDataException { ensureState(READABLE); if (totalNumOfPages == 0) { /* * It seems it is legal to pin empty file. * Return the page information as it is not compressed. */ compressedPage.setCompressedPageOffset(0); compressedPage.setCompressedPageSize(bufferCache.getPageSize()); return; } final ICachedPage page = pinAndGetPage(compressedPageId); try { // No need for read latches as pages are immutable. final ByteBuffer buf = page.getBuffer(); final int entryOffset = compressedPageId * ENTRY_LENGTH % bufferCache.getPageSize(); compressedPage.setCompressedPageOffset(buf.getLong(entryOffset)); compressedPage.setCompressedPageSize((int) buf.getLong(entryOffset + SIZE_ENTRY_OFFSET)); } finally { bufferCache.unpin(page); } }
@Override public void close(IPageWriteFailureCallback callback) throws HyracksDataException { if (ready) { IFIFOPageQueue queue = bufferCache.createFIFOQueue(); ITreeIndexMetadataFrame metaFrame = frameFactory.createFrame(); confiscatedPage.acquireWriteLatch(); try { metaFrame.setPage(confiscatedPage); metaFrame.setValid(true); } finally { confiscatedPage.releaseWriteLatch(false); } int finalMetaPage = getMaxPageId(metaFrame) + 1; confiscatedPage.setDiskPageId(BufferedFileHandle.getDiskPageId(fileId, finalMetaPage)); final ICompressedPageWriter compressedPageWriter = bufferCache.getCompressedPageWriter(fileId); compressedPageWriter.prepareWrite(confiscatedPage); // WARNING: flushing the metadata page should be done after releasing the write latch; otherwise, the page // won't be flushed to disk because it won't be dirty until the write latch has been released. queue.put(confiscatedPage, callback); bufferCache.finishQueue(); compressedPageWriter.endWriting(); metadataPage = getMetadataPageId(); ready = false; } else if (confiscatedPage != null) { bufferCache.returnPage(confiscatedPage, false); } confiscatedPage = null; }
@Override public void deleteFile(FileReference file) throws HyracksDataException { deleteFileCount.incrementAndGet(); bufferCache.deleteFile(file); }
int tupleBytes = tupleWriter.bytesRequired(tuple, 0, cmp.getKeyFieldCount()); int spaceNeeded = tupleBytes + slotSize + 4; if (tupleBytes > interiorFrame.getMaxTupleSize(BTree.this.bufferCache.getPageSize())) { throw HyracksDataException.create(ErrorCode.RECORD_IS_TOO_LARGE, tupleBytes, interiorFrame.getMaxTupleSize(BTree.this.bufferCache.getPageSize())); frontier.page = bufferCache.confiscatePage(BufferCache.INVALID_DPID); interiorFrame.setPage(frontier.page); interiorFrame.initBuffer((byte) level);
public LAFWriter(CompressedFileManager compressedFileManager, IBufferCache bufferCache) { this.compressedFileManager = compressedFileManager; this.bufferCache = bufferCache; queue = bufferCache.createFIFOQueue(); cachedFrames = new HashMap<>(); recycledFrames = new ArrayDeque<>(); this.fileId = compressedFileManager.getFileId(); callBack = new PageWriteFailureCallback(); maxNumOfEntries = bufferCache.getPageSize() / ENTRY_LENGTH; lastOffset = 0; totalNumOfPages = 0; maxPageId = -1; currentPageId = -1; }