private void writeStaticHeader() { Data page = Data.create(database, new byte[pageSize - FileStore.HEADER_LENGTH]); page.writeInt(pageSize); page.writeByte((byte) WRITE_VERSION); page.writeByte((byte) READ_VERSION); file.seek(FileStore.HEADER_LENGTH); file.write(page.getBytes(), 0, pageSize - FileStore.HEADER_LENGTH); writeCount++; }
/** * Append the row to the buffer. * * @param buff the buffer * @param log the undo log */ void append(Data buff, UndoLog log) { int p = buff.length(); buff.writeInt(0); buff.writeInt(operation); buff.writeByte(row.isDeleted() ? (byte) 1 : (byte) 0); buff.writeInt(log.getTableId(table)); buff.writeLong(row.getKey()); buff.writeInt(row.getSessionId()); int count = row.getColumnCount(); buff.writeInt(count); for (int i = 0; i < count; i++) { Value v = row.getValue(i); buff.checkCapacity(buff.getValueLen(v)); buff.writeValue(v); } buff.fillAligned(); buff.setInt(p, (buff.length() - p) / Constants.FILE_BLOCK_SIZE); }
private void dumpPageBtreeLeaf(PrintWriter writer, Data s, int entryCount, boolean positionOnly) { int[] offsets = new int[entryCount]; int empty = Integer.MAX_VALUE; for (int i = 0; i < entryCount; i++) { int off = s.readShortInt(); empty = Math.min(off, empty); offsets[i] = off; } empty = empty - s.length(); writer.println("-- empty: " + empty); for (int i = 0; i < entryCount; i++) { int off = offsets[i]; s.setPos(off); long key = s.readVarLong(); Value data; if (positionOnly) { data = ValueLong.get(key); } else { try { data = s.readValue(); } catch (Throwable e) { writeDataError(writer, "exception " + e, s.getBytes()); continue; } } writer.println("-- [" + i + "] key: " + key + " data: " + data); } }
/** * Read the data into a target buffer. * * @param target the target data page * @return the next page, or 0 if no next page */ int readInto(Data target) { target.checkCapacity(size); if (type == (Page.TYPE_DATA_OVERFLOW | Page.FLAG_LAST)) { target.write(data.getBytes(), START_LAST, size); return 0; } target.write(data.getBytes(), START_MORE, size); return nextPage; }
private void flushBuffer(Data buff) { buff.checkCapacity(1); buff.writeByte((byte) 0); buff.fillAligned(); buff.setInt(0, buff.length() / Constants.FILE_BLOCK_SIZE); file.write(buff.getBytes(), 0, buff.length()); }
private void writeHead() { data.reset(); data.writeByte((byte) Page.TYPE_DATA_NODE); data.writeShortInt(0); if (SysProperties.CHECK2) { if (data.length() != START_PARENT) { DbException.throwInternalError(); } } data.writeInt(parentPageId); data.writeVarInt(index.getId()); data.writeInt(rowCountStored); data.writeShortInt(entryCount); }
page.reset(); store.openFile(); if (store.length() == store.getFilePointer()) { store.readFully(page.getBytes(), 0, Constants.FILE_BLOCK_SIZE); page.reset(); remainingInBuffer = page.readInt(); if (remainingInBuffer < 0) { close(); return; page.checkCapacity(remainingInBuffer); page.checkCapacity(Data.LENGTH_INT); page.readInt(); page.setPos(page.length() + remainingInBuffer); page.fillAligned(); int len = page.length() - Constants.FILE_BLOCK_SIZE; page.reset(); page.readInt(); store.readFully(page.getBytes(), Constants.FILE_BLOCK_SIZE, len); page.reset(); page.readInt(); if (compress != null) { int uncompressed = page.readInt(); byte[] buff = Utils.newBytes(remainingInBuffer); page.read(buff, 0, remainingInBuffer); page.reset();
long next = 0; if (!last) { next = s.readInt(); writer.println("-- next: " + next); keys[i] = s.readVarLong(); int off = s.readShortInt(); empty = Math.min(off, empty); offsets[i] = off; empty = empty - s.length(); stat.pageDataHead += s.length(); stat.pageDataEmpty += empty; if (trace) { Data s2 = Data.create(this, pageSize); s.setPos(pageSize); long parent = pageId; while (true) { parent = next; seek(next); store.readFully(s2.getBytes(), 0, pageSize); s2.reset(); int type = s2.readByte(); s2.readShortInt(); s2.readInt(); if (type == (Page.TYPE_DATA_OVERFLOW | Page.FLAG_LAST)) { int size = s2.readShortInt();
@Override public void write(byte[] buff, int off, int len) { if (len > 0) { page.reset(); if (compress != null) { if (off != 0 || len != buff.length) { buff = Arrays.copyOfRange(buff, off, off + len); off = 0; } int uncompressed = len; buff = compress.compress(buff, compressionAlgorithm); len = buff.length; page.checkCapacity(2 * Data.LENGTH_INT + len); page.writeInt(len); page.writeInt(uncompressed); page.write(buff, off, len); } else { page.checkCapacity(Data.LENGTH_INT + len); page.writeInt(len); page.write(buff, off, len); } page.fillAligned(); store.write(page.getBytes(), 0, page.length()); } }
logPos++; Data data = dataBuffer; data.reset(); int columns = row.getColumnCount(); data.writeVarInt(columns); data.checkCapacity(row.getByteCount(data)); if (session.isRedoLogBinaryEnabled()) { for (int i = 0; i < columns; i++) { data.writeValue(row.getValue(i)); Value v = row.getValue(i); if (v.getType() == Value.BYTES) { data.writeValue(ValueNull.INSTANCE); } else { data.writeValue(v); buffer.writeByte((byte) (add ? ADD : REMOVE)); buffer.writeVarInt(session.getId()); buffer.writeVarInt(tableId); buffer.writeVarLong(row.getKey()); if (add) { buffer.writeVarInt(data.length()); buffer.checkCapacity(data.length()); buffer.write(data.getBytes(), 0, data.length());
private void writeData() { if (written) { return; } if (!optimizeUpdate) { readAllRows(); } writeHead(); if (firstOverflowPageId != 0) { data.writeInt(firstOverflowPageId); data.checkCapacity(overflowRowSize); } for (int i = 0; i < entryCount; i++) { data.writeVarLong(keys[i]); data.writeShortInt(offsets[i]); } if (!writtenData || !optimizeUpdate) { for (int i = 0; i < entryCount; i++) { data.setPos(offsets[i]); Row r = getRowAt(i); for (int j = 0; j < columnCount; j++) { data.writeValue(r.getValue(j)); } } writtenData = true; } written = true; }
/** * Read a row from an input stream. * * @param rowFactory the row factory * @param in the input stream * @param data a temporary buffer * @return the row */ public static Row readRow(RowFactory rowFactory, DataReader in, Data data) throws IOException { long key = in.readVarLong(); int len = in.readVarInt(); data.reset(); data.checkCapacity(len); in.readFully(data.getBytes(), len); int columnCount = data.readVarInt(); Value[] values = new Value[columnCount]; for (int i = 0; i < columnCount; i++) { values[i] = data.readValue(); } Row row = rowFactory.createRow(values, Row.MEMORY_CALCULATE); row.setKey(key); return row; }
Data s = Data.create(this, 128); seek(0); store.readFully(s.getBytes(), 0, 128); s.setPos(48); pageSize = s.readInt(); int writeVersion = s.readByte(); int readVersion = s.readByte(); writer.println("-- pageSize: " + pageSize + " writeVersion: " + writeVersion + s = Data.create(this, pageSize); for (long i = 3; i < pageCount; i++) { s.reset(); seek(i); store.readFully(s.getBytes(), 0, 32); s.readByte(); s.readShortInt(); parents[(int) i] = s.readInt(); s = Data.create(this, pageSize); for (long i = 1;; i++) { if (i == 3) { break; s.reset(); seek(i); store.readFully(s.getBytes(), 0, pageSize); CRC32 crc = new CRC32(); crc.update(s.getBytes(), 4, pageSize - 4);
int pageSize = index.getPageStore().getPageSize(); int last = entryCount == 0 ? pageSize : offsets[entryCount - 1]; int keyOffsetPairLen = 2 + Data.getVarLongLen(row.getKey()); if (entryCount > 0 && last - rowLength < start + keyOffsetPairLen) { int x = findInsertionPoint(row.getKey()); if (optimizeUpdate) { if (writtenData && offset >= start) { byte[] d = data.getBytes(); int dataStart = offsets[entryCount - 1] + rowLength; int dataEnd = offsets[x]; System.arraycopy(d, dataStart, d, dataStart - rowLength, dataEnd - dataStart + rowLength); data.setPos(dataEnd); for (int j = 0; j < columnCount; j++) { data.writeValue(row.getValue(j)); rows[0] = null; Data all = index.getPageStore().createData(); all.checkCapacity(data.length()); all.write(data.getBytes(), 0, data.length()); data.truncate(index.getPageStore().getPageSize()); do { int type, size, next;
private void load(Data buff, UndoLog log) { operation = (short) buff.readInt(); boolean deleted = buff.readByte() == 1; table = log.getTable(buff.readInt()); long key = buff.readLong(); int sessionId = buff.readInt(); int columnCount = buff.readInt(); Value[] values = new Value[columnCount]; for (int i = 0; i < columnCount; i++) { values[i] = buff.readValue(); } row = getTable().getDatabase().createRow(values, Row.MEMORY_CALCULATE); row.setKey(key); row.setDeleted(deleted); row.setSessionId(sessionId); state = IN_MEMORY_INVALID; }
private void writeRow(Data buff, Row r) { buff.checkCapacity(1 + Data.LENGTH_INT * 8); buff.writeByte((byte) 1); buff.writeInt(r.getMemory()); int columnCount = r.getColumnCount(); buff.writeInt(columnCount); buff.writeLong(r.getKey()); buff.writeInt(r.getVersion()); buff.writeInt(r.isDeleted() ? 1 : 0); buff.writeInt(r.getSessionId()); for (int i = 0; i < columnCount; i++) { Value v = r.getValue(i); buff.checkCapacity(1); if (v == null) { buff.writeByte((byte) 0); } else { buff.writeByte((byte) 1); if (v.getType() == Value.CLOB || v.getType() == Value.BLOB) { buff.checkCapacity(buff.getValueLen(v)); buff.writeValue(v);
/** * Read the page. */ private void read() { data.reset(); type = data.readByte(); data.readShortInt(); parentPageId = data.readInt(); if (type == (Page.TYPE_DATA_OVERFLOW | Page.FLAG_LAST)) { size = data.readShortInt(); nextPage = 0; } else if (type == Page.TYPE_DATA_OVERFLOW) { nextPage = data.readInt(); size = store.getPageSize() - data.length(); } else { throw DbException.get(ErrorCode.FILE_CORRUPTED_1, "page:" + getPos() + " type:" + type); } start = data.length(); }
assert val != null; Data data = Data.create(null, new byte[SIZE_CALCULATOR.getValueLen(key)]); data.writeValue(key); int keySize = data.length(); mem.writeLong(p + OFFSET_EXPIRATION, expirationTime); mem.writeInt(p + OFFSET_KEY_SIZE, keySize); mem.writeBytes(p + OFFSET_KEY, data.getBytes(), 0, keySize); data = Data.create(null, new byte[SIZE_CALCULATOR.getValueLen(val)]); data.writeValue(val); int valSize = data.length(); mem.writeBytes(valPtr + OFFSET_VALUE, data.getBytes(), 0, valSize);
file.setLength(FileStore.HEADER_LENGTH); Data buff = Data.create(database, Constants.DEFAULT_PAGE_SIZE); for (int i = 0; i < records.size(); i++) { UndoLogRecord r = records.get(i); buff.checkCapacity(Constants.DEFAULT_PAGE_SIZE); r.append(buff, this); if (i == records.size() - 1 || buff.length() > Constants.UNDO_BLOCK_SIZE) { storedEntriesPos.add(file.getFilePointer()); file.write(buff.getBytes(), 0, buff.length()); buff.reset(); file.setCheckedWriting(false); file.seek(FileStore.HEADER_LENGTH); rowBuff = Data.create(database, Constants.DEFAULT_PAGE_SIZE); Data buff = rowBuff; for (UndoLogRecord r : records) {
undoAll.set(pageId); Data buffer = getBuffer(); buffer.writeByte((byte) UNDO); buffer.writeVarInt(pageId); if (page.getBytes()[0] == 0) { buffer.writeVarInt(1); } else { int pageSize = store.getPageSize(); if (COMPRESS_UNDO) { int size = compress.compress(page.getBytes(), pageSize, compressBuffer, 0); if (size < pageSize) { buffer.writeVarInt(size); buffer.checkCapacity(size); buffer.write(compressBuffer, 0, size); } else { buffer.writeVarInt(0); buffer.checkCapacity(pageSize); buffer.write(page.getBytes(), 0, pageSize); buffer.writeVarInt(0); buffer.checkCapacity(pageSize); buffer.write(page.getBytes(), 0, pageSize);