/** * Read the page from the disk. */ private void read() { data.reset(); data.readByte(); data.readShortInt(); parent = data.readInt(); logKey = data.readInt(); nextTrunk = data.readInt(); pageCount = data.readShortInt(); pageIds = new int[pageCount]; for (int i = 0; i < pageCount; i++) { pageIds[i] = data.readInt(); } }
private void dumpPageDataNode(PrintWriter writer, Data s, long pageId, int entryCount) { int[] children = new int[entryCount + 1]; long[] keys = new long[entryCount]; children[entryCount] = s.readInt(); checkParent(writer, pageId, children, entryCount); for (int i = 0; i < entryCount; i++) { children[i] = s.readInt(); checkParent(writer, pageId, children, i); keys[i] = s.readVarLong(); } if (!trace) { return; } for (int i = 0; i < entryCount; i++) { writer.println("-- [" + i + "] child: " + children[i] + " key: " + keys[i]); } writer.println("-- [" + entryCount + "] child: " + children[entryCount]); }
private void readVariableHeader() { Data page = createData(); for (int i = 1;; i++) { if (i == 3) { throw DbException.get( ErrorCode.FILE_CORRUPTED_1, fileName); } page.reset(); readPage(i, page); CRC32 crc = new CRC32(); crc.update(page.getBytes(), 4, pageSize - 4); int expected = (int) crc.getValue(); int got = page.readInt(); if (expected == got) { writeCountBase = page.readLong(); logKey = page.readInt(); logFirstTrunkPage = page.readInt(); logFirstDataPage = page.readInt(); break; } } }
private void load(Data buff, UndoLog log) { operation = (short) buff.readInt(); boolean deleted = buff.readByte() == 1; table = log.getTable(buff.readInt()); long key = buff.readLong(); int sessionId = buff.readInt(); int columnCount = buff.readInt(); Value[] values = new Value[columnCount]; for (int i = 0; i < columnCount; i++) { values[i] = buff.readValue(); } row = getTable().getDatabase().createRow(values, Row.MEMORY_CALCULATE); row.setKey(key); row.setDeleted(deleted); row.setSessionId(sessionId); state = IN_MEMORY_INVALID; }
return null; int mem = buff.readInt(); int columnCount = buff.readInt(); long key = buff.readLong(); int version = buff.readInt(); if (readUncached) { key = 0; boolean deleted = buff.readInt() == 1; int sessionId = buff.readInt(); Value[] values = new Value[columnCount]; for (int i = 0; i < columnCount; i++) {
/** * Load an undo log record row using a buffer. * * @param buff the buffer * @param log the log * @return the undo log record */ static UndoLogRecord loadFromBuffer(Data buff, UndoLog log) { UndoLogRecord rec = new UndoLogRecord(null, (short) 0, null); int pos = buff.length(); int len = buff.readInt() * Constants.FILE_BLOCK_SIZE; rec.load(buff, log); buff.setPos(pos + len); return rec; }
int min = Constants.FILE_BLOCK_SIZE; file.readFully(buff.getBytes(), 0, min); int len = buff.readInt() * Constants.FILE_BLOCK_SIZE; buff.checkCapacity(len); if (len - min > 0) {
private void dumpPageBtreeNode(PrintWriter writer, Data s, long pageId, boolean positionOnly) { int rowCount = s.readInt(); int entryCount = s.readShortInt(); int[] children = new int[entryCount + 1]; int[] offsets = new int[entryCount]; children[entryCount] = s.readInt(); checkParent(writer, pageId, children, entryCount); int empty = Integer.MAX_VALUE; for (int i = 0; i < entryCount; i++) { children[i] = s.readInt(); checkParent(writer, pageId, children, i); int off = s.readShortInt();
private void read() { data.reset(); int type = data.readByte(); data.readShortInt(); this.parentPageId = data.readInt(); onlyPosition = (type & Page.FLAG_LAST) == 0; int indexId = data.readVarInt(); if (indexId != index.getId()) { throw DbException.get(ErrorCode.FILE_CORRUPTED_1, "page:" + getPos() + " expected index:" + index.getId() + "got:" + indexId); } rowCount = rowCountStored = data.readInt(); entryCount = data.readShortInt(); childPageIds = new int[entryCount + 1]; childPageIds[entryCount] = data.readInt(); rows = entryCount == 0 ? SearchRow.EMPTY_ARRAY : new SearchRow[entryCount]; offsets = Utils.newIntArray(entryCount); for (int i = 0; i < entryCount; i++) { childPageIds[i] = data.readInt(); offsets[i] = data.readShortInt(); } check(); start = data.length(); written = true; }
private void read() { data.reset(); data.readByte(); data.readShortInt(); this.parentPageId = data.readInt(); int indexId = data.readVarInt(); if (indexId != index.getId()) { throw DbException.get(ErrorCode.FILE_CORRUPTED_1, "page:" + getPos() + " expected index:" + index.getId() + "got:" + indexId); } rowCount = rowCountStored = data.readInt(); entryCount = data.readShortInt(); childPageIds = new int[entryCount + 1]; childPageIds[entryCount] = data.readInt(); keys = Utils.newLongArray(entryCount); for (int i = 0; i < entryCount; i++) { childPageIds[i] = data.readInt(); keys[i] = data.readVarLong(); } length = data.length(); check(); written = true; }
/** * Read the page. */ private void read() { data.reset(); type = data.readByte(); data.readShortInt(); parentPageId = data.readInt(); if (type == (Page.TYPE_DATA_OVERFLOW | Page.FLAG_LAST)) { size = data.readShortInt(); nextPage = 0; } else if (type == Page.TYPE_DATA_OVERFLOW) { nextPage = data.readInt(); size = store.getPageSize() - data.length(); } else { throw DbException.get(ErrorCode.FILE_CORRUPTED_1, "page:" + getPos() + " type:" + type); } start = data.length(); }
return; page.readInt(); int key = page.readInt(); logKey++; if (key != logKey) { " type: " + t + " expected key: " + logKey + " got: " + key); nextTrunkPage = page.readInt(); writer.println("-- log " + key + ":" + trunkPage + " next: " + nextTrunkPage); int pageCount = page.readShortInt(); for (int i = 0; i < pageCount; i++) { int d = page.readInt(); if (dataPage != 0) { if (d == dataPage) { int p = page.readInt(); int k = page.readInt(); writer.println("-- log " + k + ":" + trunkPage + "/" + nextPage); if (t != Page.TYPE_STREAM_DATA) {
int parentPageId = s.readInt(); setStorage(s.readVarInt()); int columnCount = s.readVarInt(); int parentPageId = s.readInt(); setStorage(s.readVarInt()); int rowCount = s.readInt(); int entries = s.readShortInt(); writer.println("-- page " + page + ": data node " + int parentPageId = s.readInt(); setStorage(s.readVarInt()); int entries = s.readShortInt(); int parentPageId = s.readInt(); setStorage(s.readVarInt()); writer.println("-- page " + page + ": b-tree node " +
long next = 0; if (!last) { next = s.readInt(); writer.println("-- next: " + next); int type = s2.readByte(); s2.readShortInt(); s2.readInt(); if (type == (Page.TYPE_DATA_OVERFLOW | Page.FLAG_LAST)) { int size = s2.readShortInt(); break; } else if (type == Page.TYPE_DATA_OVERFLOW) { next = s2.readInt(); if (next == 0) { writeDataError(writer, "next:0", s2.getBytes());
remainingInBuffer = page.readInt(); if (remainingInBuffer < 0) { close(); page.readInt(); int len = page.length() - Constants.FILE_BLOCK_SIZE; page.reset(); page.readInt(); store.readFully(page.getBytes(), Constants.FILE_BLOCK_SIZE, len); page.reset(); page.readInt(); if (compress != null) { int uncompressed = page.readInt(); byte[] buff = Utils.newBytes(remainingInBuffer); page.read(buff, 0, remainingInBuffer);
private void read() { data.reset(); int type = data.readByte(); data.readShortInt(); this.parentPageId = data.readInt(); int tableId = data.readVarInt(); if (tableId != index.getId()) { throw DbException.get(ErrorCode.FILE_CORRUPTED_1, "page:" + getPos() + " expected table:" + index.getId() + " got:" + tableId + " type:" + type); } columnCount = data.readVarInt(); entryCount = data.readShortInt(); offsets = new int[entryCount]; keys = new long[entryCount]; rows = new Row[entryCount]; if (type == Page.TYPE_DATA_LEAF) { if (entryCount != 1) { DbException.throwInternalError("entries: " + entryCount); } firstOverflowPageId = data.readInt(); } for (int i = 0; i < entryCount; i++) { keys[i] = data.readVarLong(); offsets[i] = data.readShortInt(); } start = data.length(); written = true; writtenData = true; }
/** * Load an undo log record row using a buffer. * * @param buff the buffer * @param file the source file * @param log the log */ void load(Data buff, FileStore file, UndoLog log) { int min = Constants.FILE_BLOCK_SIZE; log.seek(filePos); buff.reset(); file.readFully(buff.getBytes(), 0, min); int len = buff.readInt() * Constants.FILE_BLOCK_SIZE; buff.checkCapacity(len); if (len - min > 0) { file.readFully(buff.getBytes(), min, len - min); } int oldOp = operation; load(buff, log); if (SysProperties.CHECK) { if (operation != oldOp) { DbException.throwInternalError("operation=" + operation + " op=" + oldOp); } } }
private void readStaticHeader() { file.seek(FileStore.HEADER_LENGTH); Data page = Data.create(database, new byte[PAGE_SIZE_MIN - FileStore.HEADER_LENGTH]); file.readFully(page.getBytes(), 0, PAGE_SIZE_MIN - FileStore.HEADER_LENGTH); readCount++; setPageSize(page.readInt()); int writeVersion = page.readByte(); int readVersion = page.readByte(); if (readVersion > READ_VERSION) { throw DbException.get( ErrorCode.FILE_VERSION_ERROR_1, fileName); } if (writeVersion > WRITE_VERSION) { close(); database.setReadOnly(true); accessMode = "r"; file = database.openFile(fileName, accessMode, true); } }
private void read() { data.reset(); int type = data.readByte(); data.readShortInt(); this.parentPageId = data.readInt(); onlyPosition = (type & Page.FLAG_LAST) == 0; int indexId = data.readVarInt(); if (indexId != index.getId()) { throw DbException.get(ErrorCode.FILE_CORRUPTED_1, "page:" + getPos() + " expected index:" + index.getId() + "got:" + indexId); } entryCount = data.readShortInt(); offsets = new int[entryCount]; rows = new SearchRow[entryCount]; for (int i = 0; i < entryCount; i++) { offsets[i] = data.readShortInt(); } start = data.length(); written = true; writtenData = true; }