@Override public void finishQueue() throws HyracksDataException { bufferCache.finishQueue(); }
@Override public void end() throws HyracksDataException { if (hasFailed()) { throw HyracksDataException.create(getFailure()); } bufferCache.finishQueue(); freePageManager.setRootPageId(rootPage); }
@Override public void endWriting() throws HyracksDataException { if (callBack.hasFailed()) { //if write failed, return confiscated pages abort(); throw HyracksDataException.create(callBack.getFailure()); } synchronized (cachedFrames) { final LAFFrame lastPage = cachedFrames.get(maxPageId); if (lastPage != null && !lastPage.isFull()) { /* * The last page may or may not be filled. In case it is not filled (i.e do not have * the max number of entries). Then, write an indicator after the last entry. * If it has been written (i.e lastPage = null), that means it has been filled. */ lastPage.setEOF(); } for (Entry<Integer, LAFFrame> entry : cachedFrames.entrySet()) { queue.put(entry.getValue().cPage, callBack); } bufferCache.finishQueue(); //Signal the compressedFileManager to change its state compressedFileManager.endWriting(totalNumOfPages); } }
@Override public void end() throws HyracksDataException { allocateAndInitMetaDataPage(); queue.put(metaDataPage, this); for (ICachedPage p : pages) { queue.put(p, this); } bufferCache.finishQueue(); if (hasFailed()) { throw HyracksDataException.create(getFailure()); } BloomFilter.this.numBits = numBits; BloomFilter.this.numHashes = numHashes; BloomFilter.this.numElements = actualNumElements; BloomFilter.this.numPages = numPages; BloomFilter.this.version = BLOCKED_BLOOM_FILTER_VERSION; }
@Override public void end() throws HyracksDataException { if (btreeTupleBuilder.getSize() != 0) { insertBTreeTuple(); } btreeBulkloader.end(); if (currentPage != null) { queue.put(currentPage, this); } invListsMaxPageId = currentPageId; bufferCache.finishQueue(); if (hasFailed()) { throw HyracksDataException.create(getFailure()); } }
@Override public void close(IPageWriteFailureCallback callback) throws HyracksDataException { if (ready) { IFIFOPageQueue queue = bufferCache.createFIFOQueue(); ITreeIndexMetadataFrame metaFrame = frameFactory.createFrame(); confiscatedPage.acquireWriteLatch(); try { metaFrame.setPage(confiscatedPage); metaFrame.setValid(true); } finally { confiscatedPage.releaseWriteLatch(false); } int finalMetaPage = getMaxPageId(metaFrame) + 1; confiscatedPage.setDiskPageId(BufferedFileHandle.getDiskPageId(fileId, finalMetaPage)); final ICompressedPageWriter compressedPageWriter = bufferCache.getCompressedPageWriter(fileId); compressedPageWriter.prepareWrite(confiscatedPage); // WARNING: flushing the metadata page should be done after releasing the write latch; otherwise, the page // won't be flushed to disk because it won't be dirty until the write latch has been released. queue.put(confiscatedPage, callback); bufferCache.finishQueue(); compressedPageWriter.endWriting(); metadataPage = getMetadataPageId(); ready = false; } else if (confiscatedPage != null) { bufferCache.returnPage(confiscatedPage, false); } confiscatedPage = null; }