private void dump(PrintStream out, Journal journal) { JournalControl.print(journal); }
recoverNodeDat(sConf.location, fileRef) ; try { x = scanForCommit(jrnl, posn) ; } catch (TDBException ex) { x = -1 ; } recoverSegment(jrnl, posn, x, sConf) ; posn = x ; syncAll(sConf) ;
@Override protected void exec() { DatasetGraphTDB dsg = super.getDatasetGraphTDB() ; // Just creating the DSG does a recovery so this is not (currently) necessary: // This may change (not immediately recovering on start up). JournalControl.recovery(dsg) ; } }
/** Replay a journal onto a dataset */ public static void replay(Journal journal, DatasetGraphTDB dsg) { replay(journal, dsg.getConfig()) ; }
/** Recover a base storage DatasetGraph */ public static void recovery(DatasetGraphTDB dsg) { if ( dsg.getLocation().isMem() ) return ; // Do we need to recover? Journal journal = findJournal(dsg) ; if ( journal == null || journal.isEmpty() ) return ; recoverFromJournal(dsg.getConfig(), journal) ; journal.close(); // Recovery complete. Tidy up. Node journal files have already been handled. if ( journal.getFilename() != null ) { if ( FileOps.exists(journal.getFilename()) ) FileOps.delete(journal.getFilename()) ; } }
/** Flush the journal regardless - use with great case - do not use when transactions may be active. */ public void forceRecoverFromJournal() { JournalControl.recoverFromJournal(getBaseDataset().getConfig(), transactionManager.getJournal()); }
@Override public void run() { for ( ;; ) { // Wait until the reader count goes to zero. // This wakes up for every transation but maybe // able to play several transactions at once (later). try { Transaction txn = queue.take() ; // This takes a Write lock on the DSG - this is where it blocks. JournalControl.replay(txn) ; synchronized (TransactionManager.this) { commitedAwaitingFlush.remove(txn) ; } } catch (InterruptedException ex) { Log.error(this, "Interruped!", ex) ; } } } }
private void dump(PrintStream out, Journal journal) { JournalControl.print(journal); }
public static void replay(Transaction transaction) { if ( syslog.isDebugEnabled()) syslog.debug("Replay "+transaction.getLabel()) ; Journal journal = transaction.getJournal() ; DatasetGraphTDB dsg = transaction.getBaseDataset() ; // Currently, we (crudely) replay the whole journal. replay(journal, dsg.getConfig()) ; }
@Override protected void exec() { DatasetGraphTDB dsg = super.getDatasetGraphTDB() ; // Just creating the DSG does a recovery so this is not (currently) necessary: // This may change (not immediately recovering on start up). JournalControl.recovery(dsg) ; } }
/** Highly risky! */ public void printJournal() { JournalControl.print(transactionManager.getJournal()); }
replay(e, sConf) ;
/** Dump a journal - debug support function - opens the journal specially - inconsistent views possible */ public static void print(String filename) { BufferChannelFile chan = BufferChannelFile.createUnmanaged(filename, "r") ; Journal journal = new Journal(chan) ; JournalControl.print(journal) ; chan.close() ; }
private void writerCommitsWorker(Transaction txn) { if ( activeReaders.get() == 0 && checkForJournalFlush() ) { // Can commit immediately. // Ensure the queue is empty though. // Could simply add txn to the commit queue and do it that way. if ( log() ) log("Commit immediately", txn) ; // Currently, all we need is // JournalControl.replay(txn) ; // because that plays queued transactions. // But for long term generallity, at the cost of one check of the journal size // we do this sequence. processDelayedReplayQueue(txn) ; enactTransaction(txn) ; JournalControl.replay(txn) ; } else { // Can't write back to the base database at the moment. commitedAwaitingFlush.add(txn) ; maxQueue = Math.max(commitedAwaitingFlush.size(), maxQueue) ; if ( log() ) log("Add to pending queue", txn) ; queue.add(txn) ; } }
/** Recover one transaction from the start position given. * Scan to see if there is a commit; if found, play the * journal from the start point to the commit. * Return true is a commit was found. * Leave journal positioned just after commit or at end if none found. */ private static void recoverSegment(Journal jrnl, long startPosn, long endPosn, StorageConfig sConf) { //System.out.printf("Segment: %d %d\n", startPosn, endPosn); Iterator<JournalEntry> iter = jrnl.entries(startPosn) ; iter = jrnl.entries(startPosn) ; try { for ( ; iter.hasNext() ; ) { JournalEntry e = iter.next() ; if ( e.getType() == JournalEntryType.Commit ) { if ( e.getEndPosition() != endPosn ) log.warn(format("Inconsistent: end at %d; expected %d", e.getEndPosition(), endPosn)) ; return ; } replay(e, sConf) ; } } finally { Iter.close(iter) ; } }
JournalControl.replay(journal, baseDataset) ;