private PageFile createPageFile() throws IOException { if (indexDirectory == null) { indexDirectory = directory; } IOHelper.mkdirs(indexDirectory); PageFile index = new PageFile(indexDirectory, "db"); index.setEnableWriteThread(isEnableIndexWriteAsync()); index.setWriteBatchSize(getIndexWriteBatchSize()); index.setPageCacheSize(indexCacheSize); index.setUseLFRUEviction(isUseIndexLFRUEviction()); index.setLFUEvictionFactor(getIndexLFUEvictionFactor()); index.setEnableDiskSyncs(isEnableIndexDiskSyncs()); index.setEnableRecoveryFile(isEnableIndexRecoveryFile()); index.setEnablePageCaching(isEnableIndexPageCaching()); return index; }
pageFile.assertLoaded(); pageFile.addToCache(copy); DataByteArrayOutputStream out = new DataByteArrayOutputStream(pageFile.getPageSize() * 2) { Page current = copy;
@Override public void allowIOResumption() { if (pageFile != null) { pageFile.allowIOResumption(); } }
/** * Deletes the files used by the PageFile object. This method can only be used when this object is not loaded. * * @throws IOException if the files cannot be deleted. * @throws IllegalStateException if this PageFile is loaded */ public void delete() throws IOException { if (loaded.get()) { throw new IllegalStateException("Cannot delete page file data when the page file is loaded"); } delete(getMainPageFile()); delete(getFreeFile()); delete(getRecoveryFile()); }
public void archive() throws IOException { if (loaded.get()) { throw new IllegalStateException("Cannot delete page file data when the page file is loaded"); } long timestamp = System.currentTimeMillis(); archive(getMainPageFile(), String.valueOf(timestamp)); archive(getFreeFile(), String.valueOf(timestamp)); archive(getRecoveryFile(), String.valueOf(timestamp)); }
private PageFile createPageFile() { PageFile index = new PageFile(directory, "temp-db"); index.setEnableWriteThread(isEnableIndexWriteAsync()); index.setWriteBatchSize(getIndexWriteBatchSize()); index.setEnableDiskSyncs(false); index.setEnableRecoveryFile(false); return index; }
if (isUseLFRUEviction()) { pageCache = Collections.synchronizedMap(new LFUCache<Long, Page>(pageCacheSize, getLFUEvictionFactor())); } else { pageCache = Collections.synchronizedMap(new LRUCache<Long, Page>(pageCacheSize, pageCacheSize, 0.75f, true)); File file = getMainPageFile(); IOHelper.mkdirs(file.getParentFile()); writeFile = new RecoverableRandomAccessFile(file, "rw", false); loadMetaData(); pageSize = metaData.getPageSize(); } else { metaData.setFileType(PageFile.class.getName()); metaData.setFileTypeVersion("1"); metaData.setPageSize(getPageSize()); metaData.setCleanShutdown(true); metaData.setFreePages(-1); metaData.setLastTxId(0); storeMetaData(); recoveryFile = new RecoverableRandomAccessFile(getRecoveryFile(), "rw"); nextTxid.set(metaData.getLastTxId() + 1); if (metaData.getFreePages() > 0) { loadFreeList(); LOG.debug(toString() + ", Recovering page file..."); nextTxid.set(redoRecoveryUpdates());
this.journal.setWriteBatchSize(getJournalMaxWriteBatchSize()); this.journal.start(); this.pageFile = new PageFile(getIndexDirectory(), "tmpDB"); this.pageFile.setEnablePageCaching(getIndexEnablePageCaching()); this.pageFile.setPageSize(getIndexPageSize()); this.pageFile.setWriteBatchSize(getIndexWriteBatchSize()); this.pageFile.setPageCacheSize(getIndexCacheSize()); this.pageFile.load(); this.pageFile.tx().execute(new Transaction.Closure<IOException>() { @Override public void execute(Transaction tx) throws IOException { this.pageFile.flush();
public void open() throws IOException { if( opened.compareAndSet(false, true) ) { getJournal().start(); try { loadPageFile(); } catch (Throwable t) { LOG.warn("Index corrupted. Recovering the index through journal replay. Cause:" + t); if (LOG.isDebugEnabled()) { LOG.debug("Index load failure", t); } // try to recover index try { pageFile.unload(); } catch (Exception ignore) {} if (archiveCorruptedIndex) { pageFile.archive(); } else { pageFile.delete(); } metadata = createMetadata(); //The metadata was recreated after a detect corruption so we need to //reconfigure anything that was configured on the old metadata on startup configureMetadata(); pageFile = null; loadPageFile(); } recover(); startCheckpoint(); } }
this.journal.setSizeAccumulator(this.journalSize); this.journal.start(); this.pageFile = new PageFile(directory, "scheduleDB"); this.pageFile.setWriteBatchSize(1); this.pageFile.load(); this.pageFile.tx().execute(new Transaction.Closure<IOException>() { @Override public void execute(Transaction tx) throws IOException { this.pageFile.flush(); LOG.info(this + " started");
pageFile.assertLoaded(); Page<T> t = pageFile.getFromCache(pageId); if (t != null) { page.copy(t); pageFile.readPage(pageId, in.getRawData()); page.read(in); page.set(null); pageFile.addToCache(page);
assertLoaded(); if (count <= 0) { throw new IllegalArgumentException("The allocation count must be larger than zero"); addToCache(page); DataByteArrayOutputStream out = new DataByteArrayOutputStream(pageSize); page.write(out); write(page, out.getData());
private void recoverFreePages(final long lastRecoveryPage) throws Exception { LOG.info(toString() + ". Recovering pageFile free list due to prior unclean shutdown.."); SequenceSet newFreePages = new SequenceSet(); // need new pageFile instance to get unshared readFile PageFile recoveryPageFile = new PageFile(directory, name); recoveryPageFile.loadForRecovery(nextFreePageId.get()); try { for (Iterator<Page> i = new Transaction(recoveryPageFile).iterator(true); i.hasNext(); ) { Page page = i.next(); if (page.getPageId() >= lastRecoveryPage) { break; } if (page.getType() == Page.PAGE_FREE_TYPE) { newFreePages.add(page.getPageId()); } } } finally { recoveryPageFile.readFile.close(); } LOG.info(toString() + ". Recovered pageFile free list of size: " + newFreePages.rangeSize()); if (!newFreePages.isEmpty()) { // allow flush (with index lock held) to merge eventually recoveredFreeList.lazySet(newFreePages); } }
if (longTx || canStartWriteBatch()) { writeBatch();
/** * Configures the page size used by the page file. By default it is 4k. Once a page file is created on disk, * subsequent loads of that file will use the original pageSize. Once the PageFile is loaded, this setting * can no longer be changed. * * @param pageSize the pageSize to set * @throws IllegalStateException once the page file is loaded. */ public void setPageSize(int pageSize) throws IllegalStateException { assertNotLoaded(); this.pageSize = pageSize; }
pageFile.assertLoaded();
final int pageSize = pageFile.getPageSize(); if (pos >= pageSize) { pageFile.addToCache(current);
/** * Allocates a block of free pages that you can write data to. * * @param count the number of sequential pages to allocate * @return the first page of the sequential set. * @throws IOException * If an disk error occurred. * @throws IllegalStateException * if the PageFile is not loaded */ public <T> Page<T> allocate(int count) throws IOException { Page<T> rc = pageFile.allocate(count); allocateList.add(new Sequence(rc.getPageId(), rc.getPageId()+count-1)); return rc; }
@Override public void close() throws IOException { super.close(); // We need to free up the rest of the page chain.. if (current.getType() == Page.PAGE_PART_TYPE) { free(current.getNext()); } current.makePageEnd(pos, getWriteTransactionId()); // make visible as end page pageFile.addToCache(current); // Write the header.. pos = 0; current.write(this); Transaction.this.write(current, buf); } };