static long getFilePos(int offset) { long filePos = offset + CHUNK_HEADER_SIZE; if (filePos < 0) { throw DataUtils.newIllegalStateException(DataUtils.ERROR_FILE_CORRUPT, "Negative position {0}", filePos); } return filePos; }
protected void checkNotClosed() { if (status == STATUS_CLOSED) { throw DataUtils.newIllegalStateException(DataUtils.ERROR_CLOSED, "Transaction is closed"); } }
static void checkPageLength(int chunkId, int pageLength, int maxLength) { if (pageLength > maxLength || pageLength < 4) { throw DataUtils.newIllegalStateException(DataUtils.ERROR_FILE_CORRUPT, "File corrupted in chunk {0}, expected page length 4..{1}, got {2}", chunkId, maxLength, pageLength); } }
private void checkOpen() { if (closed) { throw DataUtils.newIllegalStateException(DataUtils.ERROR_CLOSED, "This storage is closed", panicException); } }
@Override public long getTotalCount() { if (ASSERT) { long check = 0; for (PageReference x : children) { check += x.count; } if (check != totalCount) { throw DataUtils.newIllegalStateException(DataUtils.ERROR_INTERNAL, "Expected: {0} got: {1}", check, totalCount); } } return totalCount; }
protected IllegalStateException newInternalError() { return DataUtils.newIllegalStateException(DataUtils.ERROR_INTERNAL, "Internal error"); }
public void free(long pos, int length) { ByteBuffer buff = memory.remove(pos); if (buff == null) { // nothing was written (just allocated) } else if (buff.remaining() != length) { throw DataUtils.newIllegalStateException(DataUtils.ERROR_READING_FAILED, "Partial remove is not supported at position {0}", pos); } }
/** * Truncate the file. * * @param size the new file size */ public void truncate(long size) { try { writeCount++; file.truncate(size); fileSize = Math.min(fileSize, size); } catch (IOException e) { throw DataUtils.newIllegalStateException(DataUtils.ERROR_WRITING_FAILED, "Could not truncate file {0} to size {1}", fileName, size, e); } }
/** * Flush all changes. */ public void sync() { try { file.force(true); } catch (IOException e) { closeFileSilently(); throw DataUtils.newIllegalStateException(DataUtils.ERROR_WRITING_FAILED, "Could not sync file {0}", fileName, e); } }
/** * Get the block. * * @param key the key * @return the block */ byte[] getBlock(long key) { byte[] data = map.get(key); if (data == null) { throw DataUtils.newIllegalStateException(DataUtils.ERROR_BLOCK_NOT_FOUND, "Block {0} not found", key); } return data; }
private synchronized TreeSet<Long> readRemovedPages() { try { TreeSet<Long> removedPages = new TreeSet<>(); if (chunkMetaData.length() > 8) { chunkMetaData.seek(4); int removedPagesCount = chunkMetaData.readInt(); for (int i = 0; i < removedPagesCount; i++) removedPages.add(chunkMetaData.readLong()); } return removedPages; } catch (IOException e) { throw panic(DataUtils.newIllegalStateException(DataUtils.ERROR_READING_FAILED, "Failed to readRemovedPages", e)); } }
/** * Get the chunk for the given position. * * @param pos the position * @return the chunk */ BTreeChunk getChunk(long pos) { int chunkId = PageUtils.getPageChunkId(pos); BTreeChunk c = chunks.get(chunkId); if (c == null) c = readChunkHeader(chunkId); if (c == null) throw DataUtils.newIllegalStateException(DataUtils.ERROR_FILE_CORRUPT, "Chunk {0} not found", chunkId); return c; }
void updateChunkAndCachePage(BTreeChunk chunk, int start, int pageLength, int type) { if (pos != 0) { throw DataUtils.newIllegalStateException(DataUtils.ERROR_INTERNAL, "Page already stored"); } pos = PageUtils.getPagePos(chunk.id, start, pageLength, type); chunk.pagePositions.add(pos); chunk.pageLengths.add(pageLength); chunk.sumOfPageLength += pageLength; chunk.pageCount++; map.getBTreeStorage().cachePage(pos, this, getMemory()); if (chunk.sumOfPageLength > BTreeChunk.MAX_SIZE) throw DataUtils.newIllegalStateException(DataUtils.ERROR_WRITING_FAILED, "Chunk too large, max size: {0}, current size: {1}", BTreeChunk.MAX_SIZE, chunk.sumOfPageLength); } }
@Override public V put(K key, V oldValue, V newValue, int[] columnIndexes) { TransactionalValue oldTV = TransactionalValue.createCommitted(oldValue); boolean ok = trySet(key, newValue, oldTV, columnIndexes); if (ok) { return oldValue; } throw DataUtils.newIllegalStateException(DataUtils.ERROR_TRANSACTION_LOCKED, "Entry is locked"); }
@Override public MVCCTransaction beginTransaction(boolean autoCommit, boolean isShardingMode) { if (!init.get()) { throw DataUtils.newIllegalStateException(DataUtils.ERROR_TRANSACTION_ILLEGAL_STATE, "Not initialized"); } long tid = getTransactionId(autoCommit, isShardingMode); MVCCTransaction t = createTransaction(tid); t.setAutoCommit(autoCommit); currentTransactions.put(tid, t); return t; }
BTreePage readPage(PageReference ref, long pos) { if (pos == 0) { throw DataUtils.newIllegalStateException(DataUtils.ERROR_FILE_CORRUPT, "Position 0"); } else if (ref != null && pos < 0) { return ref.readRemotePage(map); } return readLocalPageAsync(pos); }
/** * This method is called before writing to the map. * The default implementation checks whether writing is allowed. * * @throws UnsupportedOperationException if the map is read-only. */ protected void beforeWrite() { if (btreeStorage.isClosed()) { throw DataUtils.newIllegalStateException(DataUtils.ERROR_CLOSED, "This map is closed"); } if (readOnly) { throw DataUtils.newUnsupportedOperationException("This map is read-only"); } }
static void readCheckValue(ByteBuffer buff, int chunkId, int offset, int pageLength, boolean disableCheck) { short check = buff.getShort(); int checkTest = DataUtils.getCheckValue(chunkId) ^ DataUtils.getCheckValue(offset) ^ DataUtils.getCheckValue(pageLength); if (!disableCheck && check != (short) checkTest) { throw DataUtils.newIllegalStateException(DataUtils.ERROR_FILE_CORRUPT, "File corrupted in chunk {0}, expected check value {1}, got {2}", chunkId, checkTest, check); } }
@SuppressWarnings("unchecked") private V set(K key, V value) { transaction.checkNotClosed(); TransactionalValue oldValue = map.get(key); boolean ok = trySet(key, value, oldValue); if (ok) { oldValue = getValue(key, oldValue); return oldValue == null ? null : (V) oldValue.value; } throw DataUtils.newIllegalStateException(DataUtils.ERROR_TRANSACTION_LOCKED, "Entry is locked"); }
@Override void writeEnd() { for (int i = 0, len = children.length; i < len; i++) { PageReference ref = children[i]; if (ref.page != null) { if (ref.page.getPos() == 0) { throw DataUtils.newIllegalStateException(DataUtils.ERROR_INTERNAL, "Page not written"); } ref.page.writeEnd(); children[i] = new PageReference(null, ref.pos, ref.count); children[i].replicationHostIds = ref.page.getReplicationHostIds(); } } setChildrenPageKeys(); }