/** * Get the estimated memory size. * * @return number of double words (4 bytes) */ @Override public int getMemory() { return (Constants.MEMORY_PAGE_DATA_OVERFLOW + store.getPageSize()) >> 2; }
/** * Get the estimated memory size. * * @return number of double words (4 bytes) */ @Override public int getMemory() { return store.getPageSize() >> 2; }
/** * Get the estimated memory size. * * @return number of double words (4 bytes) */ @Override public int getMemory() { return store.getPageSize() >> 2; }
/** * Get the estimated memory size. * * @return number of double words (4 bytes) */ @Override public int getMemory() { return store.getPageSize() >> 2; }
long getSize() { return pageCount * store.getPageSize(); }
@Override long getDiskSpaceUsed() { return index.getPageStore().getPageSize(); }
PageLog(PageStore store) { this.store = store; dataBuffer = store.createData(); trace = store.getTrace(); compress = new CompressLZF(); compressBuffer = new byte[store.getPageSize() * 2]; }
private void memoryChange(boolean add, Row r) { int diff = r == null ? 0 : 4 + 8 + Constants.MEMORY_POINTER + r.getMemory(); memoryData += add ? diff : -diff; index.memoryChange((Constants.MEMORY_PAGE_DATA + memoryData + index.getPageStore().getPageSize()) >> 2); }
private PageFreeList(PageStore store, int pageId) { // kept in cache, and array list in page store setPos(pageId); this.store = store; pageCount = (store.getPageSize() - DATA_START) * 8; used = new BitField(pageCount); used.set(0); }
/** * Add a row. If it is possible this method returns -1, otherwise * the split point. It is always possible to add two rows. * * @param row the now to add * @return the split point of this page, or -1 if no split is required */ private int addChildTry(SearchRow row) { if (entryCount < 4) { return -1; } int startData; if (onlyPosition) { // if we only store the position, we may at most store as many // entries as there is space for keys, because the current data area // might get larger when _removing_ a child (if the new key needs // more space) - and removing a child can't split this page startData = entryCount + 1 * MAX_KEY_LENGTH; } else { int rowLength = index.getRowSize(data, row, onlyPosition); int pageSize = index.getPageStore().getPageSize(); int last = entryCount == 0 ? pageSize : offsets[entryCount - 1]; startData = last - rowLength; } if (startData < start + CHILD_OFFSET_PAIR_LENGTH) { return entryCount / 2; } return -1; }
/** * Allocate the required pages so that no pages need to be allocated while * writing. * * @param minBuffer the number of bytes to allocate */ void reserve(int minBuffer) { if (reserved < minBuffer) { int pageSize = store.getPageSize(); int capacityPerPage = PageStreamData.getCapacity(pageSize); int pages = PageStreamTrunk.getPagesAddressed(pageSize); int pagesToAllocate = 0, totalCapacity = 0; do { // allocate x data pages plus one trunk page pagesToAllocate += pages + 1; totalCapacity += pages * capacityPerPage; } while (totalCapacity < minBuffer); int firstPageToUse = atEnd ? trunkPageId : 0; store.allocatePages(reservedPages, pagesToAllocate, exclude, firstPageToUse); reserved += totalCapacity; if (data == null) { initNextData(); } } }
@Override public void write() { writeData(); index.getPageStore().writePage(getPos(), data); data.truncate(index.getPageStore().getPageSize()); }
/** * Write the header data. */ void initWrite() { data = store.createData(); data.writeByte((byte) Page.TYPE_STREAM_DATA); data.writeShortInt(0); data.writeInt(trunk); data.writeInt(logKey); remaining = store.getPageSize() - data.length(); }
@Override protected void memoryChange() { if (!PageBtreeIndex.isMemoryChangeRequired()) { return; } int memory = Constants.MEMORY_PAGE_BTREE + index.getPageStore().getPageSize(); if (rows != null) { memory += getEntryCount() * (4 + Constants.MEMORY_POINTER); for (int i = 0; i < entryCount; i++) { SearchRow r = rows[i]; if (r != null) { memory += r.getMemory(); } } } index.memoryChange(memory >> 2); }
@Override public long getFileSize() { if (!database.isPersistent()) { return 0; } PageStore p = database.getPageStore(); if (p != null) { return p.getPageCount() * p.getPageSize() / 1024; } return database.getMvStore().getStore().getFileStore().size(); }
private void removeRow(int at) { if (!optimizeUpdate) { readAllRows(); } index.getPageStore().logUndo(this, data); entryCount--; written = false; changeCount = index.getPageStore().getChangeCount(); if (entryCount <= 0) { DbException.throwInternalError("" + entryCount); } int startNext = at > 0 ? offsets[at - 1] : index.getPageStore().getPageSize(); int rowLength = startNext - offsets[at]; start -= OFFSET_LENGTH; if (optimizeUpdate) { if (writtenData) { byte[] d = data.getBytes(); int dataStart = offsets[entryCount]; System.arraycopy(d, dataStart, d, dataStart + rowLength, offsets[at] - dataStart); Arrays.fill(d, dataStart, dataStart + rowLength, (byte) 0); } } offsets = remove(offsets, entryCount + 1, at); add(offsets, at, entryCount, rowLength); rows = remove(rows, entryCount + 1, at); }
/** * Read the page. */ private void read() { data.reset(); type = data.readByte(); data.readShortInt(); parentPageId = data.readInt(); if (type == (Page.TYPE_DATA_OVERFLOW | Page.FLAG_LAST)) { size = data.readShortInt(); nextPage = 0; } else if (type == Page.TYPE_DATA_OVERFLOW) { nextPage = data.readInt(); size = store.getPageSize() - data.length(); } else { throw DbException.get(ErrorCode.FILE_CORRUPTED_1, "page:" + getPos() + " type:" + type); } start = data.length(); }
private void removeChild(int i) { readAllRows(); entryCount--; if (pageStoreInternalCount) { updateRowCount(-index.getPage(childPageIds[i]).getRowCount()); } written = false; changeCount = index.getPageStore().getChangeCount(); if (entryCount < 0) { DbException.throwInternalError("" + entryCount); } if (entryCount > i) { int startNext = i > 0 ? offsets[i - 1] : index.getPageStore().getPageSize(); int rowLength = startNext - offsets[i]; add(offsets, i, entryCount + 1, rowLength); } rows = remove(rows, entryCount + 1, i); offsets = remove(offsets, entryCount + 1, i); childPageIds = remove(childPageIds, entryCount + 2, i); start -= CHILD_OFFSET_PAIR_LENGTH; }
private void initNextData() { int nextData = trunk == null ? -1 : trunk.getPageData(trunkIndex++); if (nextData == -1) { int parent = trunkPageId; if (trunkNext != 0) { trunkPageId = trunkNext; } int len = PageStreamTrunk.getPagesAddressed(store.getPageSize()); int[] pageIds = new int[len]; for (int i = 0; i < len; i++) { pageIds[i] = reservedPages.get(i); } trunkNext = reservedPages.get(len); logKey++; trunk = PageStreamTrunk.create(store, parent, trunkPageId, trunkNext, logKey, pageIds); trunkIndex = 0; pageCount++; trunk.write(); reservedPages.removeRange(0, len + 1); nextData = trunk.getPageData(trunkIndex++); } data = PageStreamData.create(store, nextData, trunk.getPos(), logKey); pageCount++; data.initWrite(); }
@Override int addRowTry(Row row) { index.getPageStore().logUndo(this, data); int keyOffsetPairLen = 4 + Data.getVarLongLen(row.getKey()); while (true) { int x = find(row.getKey()); PageData page = index.getPage(childPageIds[x], getPos()); int splitPoint = page.addRowTry(row); if (splitPoint == -1) { break; } if (length + keyOffsetPairLen > index.getPageStore().getPageSize()) { return entryCount / 2; } long pivot = splitPoint == 0 ? row.getKey() : page.getKey(splitPoint - 1); PageData page2 = page.split(splitPoint); index.getPageStore().update(page); index.getPageStore().update(page2); addChild(x, page2.getPos(), pivot); index.getPageStore().update(this); } updateRowCount(1); return -1; }