private void loadDatabase(File dbPath, ChunkCache cache) throws IndexException { this.fPath= dbPath; clearCaches(); this.db = new Database(this.fPath, cache, getDefaultVersion(), isPermanentlyReadOnly()); this.db.setExclusiveLock(); if (!isSupportedVersion()) { Package.logInfo("Index database uses the unsupported version " + this.db.getVersion() //$NON-NLS-1$ + ". Deleting and recreating."); //$NON-NLS-1$ this.db.close(); this.fPath.delete(); this.db = new Database(this.fPath, cache, getDefaultVersion(), isPermanentlyReadOnly()); this.db.setExclusiveLock(); } this.db.giveUpExclusiveLock(); this.fWriteNumber = this.db.getLong(Database.WRITE_NUMBER_OFFSET); this.db.setLocked(this.lockCount != 0); }
public synchronized void clear() { for (int i = 0; i < this.fPageTable.length; i++) { Chunk chunk = this.fPageTable[i]; if (chunk == null) { continue; } chunk.fCacheIndex = -1; chunk.fDatabase.checkIfChunkReleased(chunk); this.fPageTable[i] = null; } this.fTableIsFull = false; this.fPointer = 0; } }
public void makeDirty() { if (this.fSequenceNumber >= Database.NUM_HEADER_CHUNKS) { Chunk chunk = this.fDatabase.fChunks[this.fSequenceNumber]; if (chunk != this) { throw new IllegalStateException("CHUNK " + this.fSequenceNumber + ": found two copies. Copy 1: " //$NON-NLS-1$ //$NON-NLS-2$ + System.identityHashCode(this) + ", Copy 2: " + System.identityHashCode(chunk)); //$NON-NLS-1$ } } if (!this.fDirty) { if (Database.DEBUG_PAGE_CACHE) { System.out.println( "CHUNK " + this.fSequenceNumber + ": dirtied - instance " + System.identityHashCode(this)); //$NON-NLS-1$ //$NON-NLS-2$ } if (this.fSequenceNumber >= Database.NUM_HEADER_CHUNKS && this.fDatabase.fMostRecentlyFetchedChunk != this) { throw new IllegalStateException("CHUNK " + this.fSequenceNumber //$NON-NLS-1$ + " dirtied out of order: Only the most-recently-fetched chunk is allowed to be dirtied"); //$NON-NLS-1$ } this.fDirty = true; this.fDatabase.chunkDirtied(this); } }
try { if (datasize >= MAX_SINGLE_BLOCK_MALLOC_SIZE) { int newChunkNum = createLargeBlock(datasize); usedSize = Math.abs(getBlockHeaderForChunkNum(newChunkNum)) * CHUNK_SIZE; result = (long) newChunkNum * CHUNK_SIZE + LargeBlock.HEADER_SIZE; clearRange(result, usedSize - LargeBlock.HEADER_SIZE - LargeBlock.FOOTER_SIZE); result = result + BLOCK_HEADER_SIZE; } else { long freeBlock = 0; int needDeltas = divideRoundingUp(datasize + BLOCK_HEADER_SIZE, BLOCK_SIZE_DELTA); if (needDeltas < MIN_BLOCK_DELTAS) { needDeltas = MIN_BLOCK_DELTAS; freeBlock = getFirstBlock(useDeltas * BLOCK_SIZE_DELTA); if (freeBlock != 0) break; freeBlock = (long) (createLargeBlock(datasize)) * (long) CHUNK_SIZE + LargeBlock.HEADER_SIZE; useDeltas = MAX_BLOCK_DELTAS; chunk = getChunk(freeBlock); } else { chunk = getChunk(freeBlock); chunk.makeDirty(); int blockReportedSize = chunk.getShort(freeBlock); if (blockReportedSize != useDeltas * BLOCK_SIZE_DELTA) { throw describeProblem() .addProblemAddress("block size", freeBlock, SHORT_SIZE) //$NON-NLS-1$ .build(
getLog().start(this.freeTag); try { assert this.fExclusiveLock; Chunk chunk = getChunk(block); blockSize = -chunk.getShort(block); int numChunks = -getBlockHeaderForChunkNum(chunkNum); if (numChunks < 0) { IndexExceptionBuilder builder = describeProblem(); if (chunkNum < this.fChunksUsed) { builder.addProblemAddress("block header", (long) chunkNum * CHUNK_SIZE, INT_SIZE); //$NON-NLS-1$ freeLargeChunk(chunkNum, numChunks); } else { throw describeProblem() .addProblemAddress("block size", block, SHORT_SIZE) //$NON-NLS-1$ .build("Already freed record " + address); //$NON-NLS-1$ int offset = Chunk.recPtrToIndex(address); if (offset + blockSize > CHUNK_SIZE) { throw describeProblem() .addProblemAddress("block size", block, SHORT_SIZE) //$NON-NLS-1$ .build("Attempting to free chunk of impossible size. The block at address " //$NON-NLS-1$ addBlock(chunk, (int) blockSize, block); periodicValidateFreeSpace();
private void validateFreeSpaceNode(Set<Integer> visited, int chunkNum, int parent) { if (visited.contains(chunkNum)) { throw describeProblem().build("Chunk " + chunkNum + "(parent = " + parent //$NON-NLS-1$//$NON-NLS-2$ + " appeared twice in the free space tree"); //$NON-NLS-1$ int parentChunk = getInt(chunkStart + LargeBlock.PARENT_OFFSET); if (parentChunk != parent) { throw describeProblem() .addProblemAddress("parent pointer", chunkStart + LargeBlock.PARENT_OFFSET, Database.INT_SIZE) //$NON-NLS-1$ .build("Chunk " + chunkNum + " has the wrong parent. Expected " + parent //$NON-NLS-1$//$NON-NLS-2$ int numChunks = getBlockHeaderForChunkNum(chunkNum); for (int testPosition = 0; testPosition < LargeBlock.ENTRIES_IN_CHILD_TABLE; testPosition++) { long nextChildChunkNumAddress = chunkStart + LargeBlock.CHILD_TABLE_OFFSET + (testPosition * INT_SIZE); int nextChildChunkNum = getInt(nextChildChunkNumAddress); int nextSize = getBlockHeaderForChunkNum(nextChildChunkNum); int sizeDifference = nextSize ^ numChunks; int firstDifference = LargeBlock.SIZE_OF_SIZE_FIELD * 8 - Integer.numberOfLeadingZeros( IndexExceptionBuilder descriptor = describeProblem(); attachBlockHeaderForChunkNum(descriptor, chunkNum); attachBlockHeaderForChunkNum(descriptor, nextChildChunkNum); throw descriptor.build("Chunk " + nextChildChunkNum + " contained an incorrect size of " //$NON-NLS-1$//$NON-NLS-2$ + nextSize + ". It was at position " + testPosition + " in parent " + chunkNum //$NON-NLS-1$ //$NON-NLS-2$ validateFreeSpaceNode(visited, nextChildChunkNum, chunkNum); } catch (IndexException e) { describeProblem() .addProblemAddress("child pointer from parent " + chunkNum, nextChildChunkNumAddress, //$NON-NLS-1$
@Override public void destruct(Nd nd, long address) { Database db = nd.getDB(); db.getLog().start(this.destructTag); try { short poolId = getMemoryPoolId(nd); long headerStartAddress = address + this.offset; long firstBlockAddress = FIRST_BLOCK.get(nd, headerStartAddress); long nextBlockAddress = firstBlockAddress; while (nextBlockAddress != 0) { long currentBlockAddress = nextBlockAddress; nextBlockAddress = BlockHeader.NEXT_BLOCK.get(nd, currentBlockAddress); int elementsInBlock = BlockHeader.ELEMENTS_IN_USE.get(nd, currentBlockAddress); destructElements(nd, currentBlockAddress + BlockHeader.BLOCK_HEADER_BYTES, elementsInBlock); db.free(currentBlockAddress, poolId); } db.clearRange(headerStartAddress, getRecordSize()); } finally { db.getLog().end(this.destructTag); } }
final boolean haveDirtyChunks = !dirtyChunks.isEmpty(); if (haveDirtyChunks || this.fHeaderChunk.fDirty) { wasInterrupted = markFileIncomplete() || wasInterrupted; synchronized (this.fCache) { if (this.cacheMisses > 100) { double measuredReadBytesPerMs = getAverageReadBytesPerMs(); if (measuredReadBytesPerMs > 0) { desiredWriteBytesPerMs = measuredReadBytesPerMs / 2; nextBytes = chunk.getBytes(); chunk.fDirty = false; chunkCleaned(chunk);
/** * Uninterruptable. Returns true iff an attempt was made to interrupt the flush with * {@link Thread#interrupt()}. */ boolean flush() throws IndexException { if (Database.DEBUG_PAGE_CACHE) { System.out.println( "CHUNK " + this.fSequenceNumber + ": flushing - instance " + System.identityHashCode(this)); //$NON-NLS-1$//$NON-NLS-2$ } boolean wasCanceled = false; try { final ByteBuffer buf= ByteBuffer.wrap(this.fBuffer); wasCanceled = this.fDatabase.write(buf, (long) this.fSequenceNumber * Database.CHUNK_SIZE); } catch (IOException e) { throw new IndexException(new DBStatus(e)); } this.fDirty = false; this.fDatabase.chunkCleaned(this); return wasCanceled; }
assertLocked(); if (offset < CHUNK_SIZE) { this.fMostRecentlyFetchedChunk = this.fHeaderChunk; assert this.fLocked; if (index < 0 || index >= this.fChunks.length) { databaseCorruptionDetected();
public void close() throws IndexException { this.db.close(); clearCaches(); }
public void clear(IProgressMonitor monitor) { this.pendingDeletions.clear(); getDB().clear(getDefaultVersion()); }
try { if (datasize >= MAX_SINGLE_BLOCK_MALLOC_SIZE) { int newChunkNum = createLargeBlock(datasize); usedSize = Math.abs(getBlockHeaderForChunkNum(newChunkNum)) * CHUNK_SIZE; result = (long) newChunkNum * CHUNK_SIZE + LargeBlock.HEADER_SIZE; clearRange(result, usedSize - LargeBlock.HEADER_SIZE - LargeBlock.FOOTER_SIZE); result = result + BLOCK_HEADER_SIZE; } else { long freeBlock = 0; int needDeltas = divideRoundingUp(datasize + BLOCK_HEADER_SIZE, BLOCK_SIZE_DELTA); if (needDeltas < MIN_BLOCK_DELTAS) { needDeltas = MIN_BLOCK_DELTAS; freeBlock = getFirstBlock(useDeltas * BLOCK_SIZE_DELTA); if (freeBlock != 0) break; freeBlock = (long) (createLargeBlock(datasize)) * (long) CHUNK_SIZE + LargeBlock.HEADER_SIZE; useDeltas = MAX_BLOCK_DELTAS; chunk = getChunk(freeBlock); } else { chunk = getChunk(freeBlock); chunk.makeDirty(); int blockReportedSize = chunk.getShort(freeBlock); if (blockReportedSize != useDeltas * BLOCK_SIZE_DELTA) { throw describeProblem() .addProblemAddress("block size", freeBlock, SHORT_SIZE) //$NON-NLS-1$ .build(
getLog().start(this.freeTag); try { assert this.fExclusiveLock; Chunk chunk = getChunk(block); blockSize = -chunk.getShort(block); int numChunks = -getBlockHeaderForChunkNum(chunkNum); if (numChunks < 0) { IndexExceptionBuilder builder = describeProblem(); if (chunkNum < this.fChunksUsed) { builder.addProblemAddress("block header", (long) chunkNum * CHUNK_SIZE, INT_SIZE); //$NON-NLS-1$ freeLargeChunk(chunkNum, numChunks); } else { throw describeProblem() .addProblemAddress("block size", block, SHORT_SIZE) //$NON-NLS-1$ .build("Already freed record " + address); //$NON-NLS-1$ int offset = Chunk.recPtrToIndex(address); if (offset + blockSize > CHUNK_SIZE) { throw describeProblem() .addProblemAddress("block size", block, SHORT_SIZE) //$NON-NLS-1$ .build("Attempting to free chunk of impossible size. The block at address " //$NON-NLS-1$ addBlock(chunk, (int) blockSize, block); periodicValidateFreeSpace();
private void validateFreeSpaceNode(Set<Integer> visited, int chunkNum, int parent) { if (visited.contains(chunkNum)) { throw describeProblem().build("Chunk " + chunkNum + "(parent = " + parent //$NON-NLS-1$//$NON-NLS-2$ + " appeared twice in the free space tree"); //$NON-NLS-1$ int parentChunk = getInt(chunkStart + LargeBlock.PARENT_OFFSET); if (parentChunk != parent) { throw describeProblem() .addProblemAddress("parent pointer", chunkStart + LargeBlock.PARENT_OFFSET, Database.INT_SIZE) //$NON-NLS-1$ .build("Chunk " + chunkNum + " has the wrong parent. Expected " + parent //$NON-NLS-1$//$NON-NLS-2$ int numChunks = getBlockHeaderForChunkNum(chunkNum); for (int testPosition = 0; testPosition < LargeBlock.ENTRIES_IN_CHILD_TABLE; testPosition++) { long nextChildChunkNumAddress = chunkStart + LargeBlock.CHILD_TABLE_OFFSET + (testPosition * INT_SIZE); int nextChildChunkNum = getInt(nextChildChunkNumAddress); int nextSize = getBlockHeaderForChunkNum(nextChildChunkNum); int sizeDifference = nextSize ^ numChunks; int firstDifference = LargeBlock.SIZE_OF_SIZE_FIELD * 8 - Integer.numberOfLeadingZeros( IndexExceptionBuilder descriptor = describeProblem(); attachBlockHeaderForChunkNum(descriptor, chunkNum); attachBlockHeaderForChunkNum(descriptor, nextChildChunkNum); throw descriptor.build("Chunk " + nextChildChunkNum + " contained an incorrect size of " //$NON-NLS-1$//$NON-NLS-2$ + nextSize + ". It was at position " + testPosition + " in parent " + chunkNum //$NON-NLS-1$ //$NON-NLS-2$ validateFreeSpaceNode(visited, nextChildChunkNum, chunkNum); } catch (IndexException e) { describeProblem() .addProblemAddress("child pointer from parent " + chunkNum, nextChildChunkNumAddress, //$NON-NLS-1$
@Override public void destruct(Nd nd, long address) { Database db = nd.getDB(); db.getLog().start(this.destructTag); try { short poolId = getMemoryPoolId(nd); long headerStartAddress = address + this.offset; long firstBlockAddress = FIRST_BLOCK.get(nd, headerStartAddress); long nextBlockAddress = firstBlockAddress; while (nextBlockAddress != 0) { long currentBlockAddress = nextBlockAddress; nextBlockAddress = BlockHeader.NEXT_BLOCK.get(nd, currentBlockAddress); int elementsInBlock = BlockHeader.ELEMENTS_IN_USE.get(nd, currentBlockAddress); destructElements(nd, currentBlockAddress + BlockHeader.BLOCK_HEADER_BYTES, elementsInBlock); db.free(currentBlockAddress, poolId); } db.clearRange(headerStartAddress, getRecordSize()); } finally { db.getLog().end(this.destructTag); } }
final boolean haveDirtyChunks = !dirtyChunks.isEmpty(); if (haveDirtyChunks || this.fHeaderChunk.fDirty) { wasInterrupted = markFileIncomplete() || wasInterrupted; synchronized (this.fCache) { if (this.cacheMisses > 100) { double measuredReadBytesPerMs = getAverageReadBytesPerMs(); if (measuredReadBytesPerMs > 0) { desiredWriteBytesPerMs = measuredReadBytesPerMs / 2; nextBytes = chunk.getBytes(); chunk.fDirty = false; chunkCleaned(chunk);
/** * Uninterruptable. Returns true iff an attempt was made to interrupt the flush with * {@link Thread#interrupt()}. */ boolean flush() throws IndexException { if (Database.DEBUG_PAGE_CACHE) { System.out.println( "CHUNK " + this.fSequenceNumber + ": flushing - instance " + System.identityHashCode(this)); //$NON-NLS-1$//$NON-NLS-2$ } boolean wasCanceled = false; try { final ByteBuffer buf= ByteBuffer.wrap(this.fBuffer); wasCanceled = this.fDatabase.write(buf, (long) this.fSequenceNumber * Database.CHUNK_SIZE); } catch (IOException e) { throw new IndexException(new DBStatus(e)); } this.fDirty = false; this.fDatabase.chunkCleaned(this); return wasCanceled; }
assertLocked(); if (offset < CHUNK_SIZE) { this.fMostRecentlyFetchedChunk = this.fHeaderChunk; assert this.fLocked; if (index < 0 || index >= this.fChunks.length) { databaseCorruptionDetected();
public void close() throws IndexException { this.db.close(); clearCaches(); }