public PageStore getPageStore() { if (dbSettings.mvStore) { if (mvStore == null) { mvStore = MVTableEngine.init(this); } return null; } if (pageStore == null) { pageStore = new PageStore(this, databaseName + Constants.SUFFIX_PAGE_FILE, accessModeData, cacheSize); if (pageSize != Constants.DEFAULT_PAGE_SIZE) { pageStore.setPageSize(pageSize); } if (!readOnly && fileLockMethod == FileLockMethod.FS) { pageStore.setLockFile(true); } pageStore.setLogMode(logMode); pageStore.open(); } return pageStore; }
store.addIndex(this); if (create) { rootPageId = store.allocatePage(); store.addMeta(this, session); PageBtreeLeaf root = PageBtreeLeaf.create(this, rootPageId, PageBtree.ROOT); store.logUndo(root, null); store.update(root); } else { rootPageId = store.getRootPageId(id); PageBtree root = getPage(rootPageId); rowCount = root.getRowCount(); this.needRebuild = create || (rowCount == 0 && store.isRecoveryRunning()); if (trace.isDebugEnabled()) { trace.debug("opened {0} rows: {1}", getName() , rowCount); memoryPerPage = (Constants.MEMORY_PAGE_BTREE + store.getPageSize()) >> 2;
/** * Allocate the required pages so that no pages need to be allocated while * writing. * * @param minBuffer the number of bytes to allocate */ void reserve(int minBuffer) { if (reserved < minBuffer) { int pageSize = store.getPageSize(); int capacityPerPage = PageStreamData.getCapacity(pageSize); int pages = PageStreamTrunk.getPagesAddressed(pageSize); int pagesToAllocate = 0, totalCapacity = 0; do { // allocate x data pages plus one trunk page pagesToAllocate += pages + 1; totalCapacity += pages * capacityPerPage; } while (totalCapacity < minBuffer); int firstPageToUse = atEnd ? trunkPageId : 0; store.allocatePages(reservedPages, pagesToAllocate, exclude, firstPageToUse); reserved += totalCapacity; if (data == null) { initNextData(); } } }
/** * The root page has changed. * * @param session the session * @param newPos the new position */ void setRootPageId(Session session, int newPos) { store.removeMeta(this, session); this.rootPageId = newPos; store.addMeta(this, session); store.addIndex(this); }
/** * Allocate a page. * * @return the page id */ public synchronized int allocatePage() { openForWriting(); int pos = allocatePage(null, 0); if (!recoveryRunning) { if (logMode != LOG_MODE_OFF) { log.addUndo(pos, emptyPage); } } return pos; }
openForWriting(); for (int i = getFreeListId(pageCount); i >= 0; i--) { lastUsed = getFreeList(i).getLastUsed(); if (lastUsed != -1) { break; writeBack(); log.free(); recoveryRunning = true; try { logFirstTrunkPage = lastUsed + 1; allocatePage(logFirstTrunkPage); log.openForWriting(logFirstTrunkPage, true); j < maxMove; x -= blockSize) { for (int full = x - blockSize + 1; full <= x; full++) { if (full > MIN_PAGE_COUNT && isUsed(full)) { synchronized (this) { firstFree = getFirstFree(firstFree); if (firstFree == -1 || firstFree >= full) { j = maxMove; break; if (compact(full, firstFree)) { j++; long now = System.nanoTime(); writeBack();
DataReader in = new DataReader(pageIn); int logId = 0; Data data = store.createData(); boolean isEmpty = true; try { int size = in.readVarInt(); if (size == 0) { in.readFully(data.getBytes(), store.getPageSize()); } else if (size == 1) { Arrays.fill(data.getBytes(), 0, store.getPageSize(), (byte) 0); } else { in.readFully(compressBuffer, size); try { compress.expand(compressBuffer, 0, size, data.getBytes(), 0, store.getPageSize()); } catch (ArrayIndexOutOfBoundsException e) { DbException.convertToIOException(e); store.writePage(pageId, data); undo.set(pageId); undoAll.set(pageId); int sessionId = in.readVarInt(); int tableId = in.readVarInt(); Row row = readRow(store.getDatabase().getRowFactory(), in, data); if (stage == RECOVERY_STAGE_UNDO) { store.allocateIfIndexRoot(pos, tableId, row);
private void openNew() { setPageSize(pageSize); freeListPagesPerList = PageFreeList.getPagesAddressed(pageSize); file = database.openFile(fileName, accessMode, false); lockFile(); recoveryRunning = true; writeStaticHeader(); writeVariableHeader(); log = new PageLog(this); increaseFileSize(MIN_PAGE_COUNT); openMetaIndex(); logFirstTrunkPage = allocatePage(); log.openForWriting(logFirstTrunkPage, false); isNew = true; recoveryRunning = false; increaseFileSize(); }
store.addIndex(this); if (!database.isPersistent()) { throw DbException.throwInternalError(table.getName()); rootPageId = store.allocatePage(); store.addMeta(this, session); PageDataLeaf root = PageDataLeaf.create(this, rootPageId, PageData.ROOT); store.update(root); } else { rootPageId = store.getRootPageId(id); PageData root = getPage(rootPageId, 0); lastKey = root.getLastKey(); memoryPerPage = (Constants.MEMORY_PAGE_DATA + store.getPageSize()) >> 2;
trace.debug("reserve " + r); allocatePage(r); openMetaIndex(); readMetaData(); isEmpty &= log.recover(PageLog.RECOVERY_STAGE_REDO); boolean setReadOnly = false; if (log.getInDoubtTransactions().isEmpty()) { log.recoverEnd(); int firstUncommittedSection = getFirstUncommittedSection(); log.removeUntil(firstUncommittedSection); } else { allocatePage(PAGE_ID_META_ROOT); writeIndexRowCounts(); recoveryRunning = false; reservedPages = null; writeBack();
@Override int addRowTry(Row row) { index.getPageStore().logUndo(this, data); int rowLength = getRowLength(row); int pageSize = index.getPageStore().getPageSize(); int last = entryCount == 0 ? pageSize : offsets[entryCount - 1]; int keyOffsetPairLen = 2 + Data.getVarLongLen(row.getKey()); index.getPageStore().logUndo(this, data); int x; if (entryCount == 0) { changeCount = index.getPageStore().getChangeCount(); last = x == 0 ? pageSize : offsets[x - 1]; int offset = last - rowLength; rows = insert(rows, entryCount, x, row); entryCount++; index.getPageStore().update(this); if (optimizeUpdate) { if (writtenData && offset >= start) { int previous = getPos(); int dataOffset = pageSize; int page = index.getPageStore().allocatePage(); firstOverflowPageId = page; this.overflowRowSize = pageSize + rowLength; rowRef = new SoftReference<>(r); rows[0] = null;
/** * Add a page to the free list. * * @param pageId the page id * @param undo if the undo record must have been written */ void free(int pageId, boolean undo) { if (trace.isDebugEnabled()) { // trace.debug("free " + pageId + " " + undo); } cache.remove(pageId); if (SysProperties.CHECK && !recoveryRunning && undo) { // ensure the undo entry is already written if (logMode != LOG_MODE_OFF) { log.addUndo(pageId, null); } } freePage(pageId); if (recoveryRunning) { writePage(pageId, createData()); if (reservedPages != null && reservedPages.containsKey(pageId)) { // re-allocate the page if it is used later on again int latestPos = reservedPages.get(pageId); if (latestPos > log.getLogPos()) { allocatePage(pageId); } } } }
Data data = createData(); readPage(pageId, data); int type = data.readByte(); if (type == Page.TYPE_EMPTY) { if (!checksumTest(data.getBytes(), pageId, pageSize)) { throw DbException.get(ErrorCode.FILE_CORRUPTED_1, "wrong checksum"); statisticsIncrement(index.getTable().getName() + "." + index.getName() + " read"); statisticsIncrement(index.getTable().getName() + "." + index.getName() + " read"); p = PageDataOverflow.read(this, data, pageId); if (statistics != null) { statisticsIncrement("overflow read"); statisticsIncrement(index.getTable().getName() + "." + index.getName() + " read"); statisticsIncrement(index.getTable().getName() + "." + index.getName() + " read");
/** * Write an undo log entry if required. * * @param page the page * @param old the old data (if known) or null */ public synchronized void logUndo(Page page, Data old) { if (logMode == LOG_MODE_OFF) { return; } checkOpen(); database.checkWritingAllowed(); if (!recoveryRunning) { int pos = page.getPos(); if (!log.getUndo(pos)) { if (old == null) { old = readPage(pos); } openForWriting(); log.addUndo(pos, old); } } }
store.logUndo(root, root.data); PageBtree page1 = root; PageBtree page2 = root.split(splitPoint); store.logUndo(page2, null); int id = store.allocatePage(); page1.setPageId(id); page1.setParentPageId(rootPageId); PageBtreeNode newRoot = PageBtreeNode.create( this, rootPageId, PageBtree.ROOT); store.logUndo(newRoot, null); newRoot.init(page1, pivot, page2); store.update(page1); store.update(page2); store.update(newRoot); root = newRoot;
checkOpen(); openForWriting(); log.commit(session.getId()); long size = log.getSize(); if (size - logSizeBase > maxLogSize / 2) { int firstSection = log.getLogFirstSectionId(); checkpoint(); int newSection = log.getLogSectionId(); if (newSection - firstSection <= 2) {
PageData page1 = root; PageData page2 = root.split(splitPoint); int id = store.allocatePage(); page1.setPageId(id); page1.setParentPageId(rootPageId); PageDataNode newRoot = PageDataNode.create(this, rootPageId, PageData.ROOT); newRoot.init(page1, pivot, page2); store.update(page1); store.update(page2); store.update(newRoot); root = newRoot; store.logAddOrRemoveRow(session, tableData.getId(), row, true);
if (flush) { try { pageStore.checkpoint(); if (!readOnly) { lockMeta(pageStore.getPageStoreSession()); pageStore.compact(compactMode); unlockMeta(pageStore.getPageStoreSession());
writePage(rootPageId, createData()); allocatePage(rootPageId);
/** * Allocate a number of pages. * * @param list the list where to add the allocated pages * @param pagesToAllocate the number of pages to allocate * @param exclude the exclude list * @param after all allocated pages are higher than this page */ void allocatePages(IntArray list, int pagesToAllocate, BitField exclude, int after) { list.ensureCapacity(list.size() + pagesToAllocate); for (int i = 0; i < pagesToAllocate; i++) { int page = allocatePage(exclude, after); after = page; list.add(page); } }