private void writeToChannel( TransactionRepresentation transaction, FlushableChannel channel ) throws IOException { TransactionLogWriter writer = new TransactionLogWriter( new LogEntryWriter( channel ) ); writer.append( transaction, 2 ); }
@Override public void checkPoint( LogPosition logPosition, LogCheckPointEvent logCheckPointEvent ) throws IOException { try { // Synchronized with logFile to get absolute control over concurrent rotations happening synchronized ( logFile ) { transactionLogWriter.checkPoint( logPosition ); } } catch ( Throwable cause ) { databaseHealth.panic( cause ); throw cause; } forceAfterAppend( logCheckPointEvent ); }
transactionLogWriter.append( transaction, transactionId ); LogPosition logPositionAfterCommit = writer.getCurrentPosition( positionMarker ).newPosition();
final TransactionLogWriter writer = new TransactionLogWriter( new LogEntryWriter( channel ) ); final AtomicLong firstTxId = new AtomicLong( BASE_TX_ID ); writer.checkPoint( new LogPosition( currentLogVersion, LOG_HEADER_SIZE ) );
@Override public void start() { this.writer = logFile.getWriter(); this.indexCommandDetector = new IndexCommandDetector(); this.transactionLogWriter = new TransactionLogWriter( new LogEntryWriter( writer ) ); }
@Override public Visitor<CommittedTransactionRepresentation,Exception> transactions() { return transaction -> { long txId = transaction.getCommitEntry().getTxId(); if ( firstTxId.compareAndSet( BASE_TX_ID, txId ) ) { monitor.startReceivingTransactions( txId ); } writer.append( transaction.getTransactionRepresentation(), txId ); return false; }; } } );
@Override public void start() { this.writer = logFile.getWriter(); this.indexCommandDetector = new IndexCommandDetector(); this.transactionLogWriter = new TransactionLogWriter( new LogEntryWriter( writer ) ); }
private void appendCorruptedTransaction() throws IOException { FlushablePositionAwareChannel channel = logFile.getWriter(); TransactionLogWriter writer = new TransactionLogWriter( new CorruptedLogEntryWriter( channel ) ); writer.append( tx( random.intBetween( 100, 1000 ) ), ++txId ); }
@Override public void onTxReceived( TxPullResponse txPullResponse ) { CommittedTransactionRepresentation tx = txPullResponse.tx(); try { txId = tx.getCommitEntry().getTxId(); writer.append( tx.getTransactionRepresentation(), txId ); } catch ( IOException e ) { log.error( "Failed when appending to transaction log", e ); } }
TransactionLogCatchUpWriter( File storeDir, FileSystemAbstraction fs, PageCache pageCache, LogProvider logProvider ) throws IOException { this.neoStoreFile = new File( storeDir, MetaDataStore.DEFAULT_NAME ); this.pageCache = pageCache; this.log = logProvider.getLog( getClass() ); PhysicalLogFiles logFiles = new PhysicalLogFiles( storeDir, fs ); ReadOnlyLogVersionRepository logVersionRepository = new ReadOnlyLogVersionRepository( pageCache, storeDir ); ReadOnlyTransactionIdStore readOnlyTransactionIdStore = new ReadOnlyTransactionIdStore( pageCache, storeDir ); LogFile logFile = lifespan.add( new PhysicalLogFile( fs, logFiles, Long.MAX_VALUE /*don't rotate*/, () -> readOnlyTransactionIdStore.getLastCommittedTransactionId() - 1, logVersionRepository, new Monitors().newMonitor( PhysicalLogFile.Monitor.class ), new LogHeaderCache( 10 ) ) ); this.writer = new TransactionLogWriter( new LogEntryWriter( logFile.getWriter() ) ); }
@Override public void checkPoint( LogPosition logPosition, LogCheckPointEvent logCheckPointEvent ) throws IOException { try { // Synchronized with logFile to get absolute control over concurrent rotations happening synchronized ( logFile ) { transactionLogWriter.checkPoint( logPosition ); } } catch ( Throwable cause ) { databaseHealth.panic( cause ); throw cause; } forceAfterAppend( logCheckPointEvent ); }
private void writeTransactions( int transactionCount, int minTransactionSize, int maxTransactionSize ) throws IOException { FlushablePositionAwareChannel channel = logFile.getWriter(); TransactionLogWriter writer = new TransactionLogWriter( new LogEntryWriter( channel ) ); for ( int i = 0; i < transactionCount; i++ ) { writer.append( tx( random.intBetween( minTransactionSize, maxTransactionSize ) ), ++txId ); } channel.prepareForFlush().flush(); // Don't close the channel, LogFile owns it }
@Override public synchronized void onTxReceived( TxPullResponse txPullResponse ) { CommittedTransactionRepresentation tx = txPullResponse.tx(); long receivedTxId = tx.getCommitEntry().getTxId(); // neo4j admin backup clients pull transactions indefinitely and have no monitoring mechanism for tx log rotation // Other cases, ex. Read Replicas have an external mechanism that rotates independently of this process and don't need to // manually rotate while pulling if ( rotateTransactionsManually && logFiles.getLogFile().rotationNeeded() ) { rotateTransactionLogs( logFiles ); } if ( receivedTxId != expectedTxId ) { throw new RuntimeException( format( "Expected txId: %d but got: %d", expectedTxId, receivedTxId ) ); } lastTxId = receivedTxId; expectedTxId++; try { writer.append( tx.getTransactionRepresentation(), lastTxId ); } catch ( IOException e ) { log.error( "Failed when appending to transaction log", e ); } }
TransactionLogCatchUpWriter( File storeDir, FileSystemAbstraction fs, PageCache pageCache, Config config, LogProvider logProvider, long fromTxId, boolean asPartOfStoreCopy, boolean keepTxLogsInStoreDir, boolean forceTransactionRotations ) throws IOException { this.pageCache = pageCache; this.log = logProvider.getLog( getClass() ); this.asPartOfStoreCopy = asPartOfStoreCopy; this.rotateTransactionsManually = forceTransactionRotations; RecordFormats recordFormats = RecordFormatSelector.selectForStoreOrConfig( Config.defaults(), storeDir, pageCache, logProvider ); this.stores = new StoreFactory( storeDir, config, new DefaultIdGeneratorFactory( fs ), pageCache, fs, recordFormats, logProvider, EMPTY ) .openNeoStores( META_DATA ); Dependencies dependencies = new Dependencies(); dependencies.satisfyDependency( stores.getMetaDataStore() ); LogFilesBuilder logFilesBuilder = LogFilesBuilder .builder( storeDir, fs ) .withDependencies( dependencies ) .withLastCommittedTransactionIdSupplier( () -> fromTxId - 1 ) .withConfig( customisedConfig( config, keepTxLogsInStoreDir, forceTransactionRotations ) ) .withLogVersionRepository( stores.getMetaDataStore() ); this.logFiles = logFilesBuilder.build(); this.lifespan.add( logFiles ); this.writer = new TransactionLogWriter( new LogEntryWriter( logFiles.getLogFile().getWriter() ) ); this.storeDir = storeDir; this.expectedTxId = fromTxId; }
writer.checkPoint( checkPointPosition );
private void addCorruptedCommandsToLastLogFile() throws IOException { PositiveLogFilesBasedLogVersionRepository versionRepository = new PositiveLogFilesBasedLogVersionRepository( logFiles ); LogFiles internalLogFiles = LogFilesBuilder.builder( directory.databaseLayout(), fileSystemRule ) .withLogVersionRepository( versionRepository ) .withTransactionIdStore( new SimpleTransactionIdStore() ).build(); try ( Lifespan lifespan = new Lifespan( internalLogFiles ) ) { LogFile transactionLogFile = internalLogFiles.getLogFile(); FlushablePositionAwareChannel channel = transactionLogFile.getWriter(); TransactionLogWriter writer = new TransactionLogWriter( new CorruptedLogEntryWriter( channel ) ); Collection<StorageCommand> commands = new ArrayList<>(); commands.add( new Command.PropertyCommand( new PropertyRecord( 1 ), new PropertyRecord( 2 ) ) ); commands.add( new Command.NodeCommand( new NodeRecord( 2 ), new NodeRecord( 3 ) ) ); PhysicalTransactionRepresentation transaction = new PhysicalTransactionRepresentation( commands ); writer.append( transaction, 1000 ); } }
transactionLogWriter.append( transaction, transactionId ); LogPosition logPositionAfterCommit = writer.getCurrentPosition( positionMarker ).newPosition();
private void appendNullTransactionLogEntryToSetRaftIndexToMinusOne() throws IOException { ReadOnlyTransactionIdStore readOnlyTransactionIdStore = new ReadOnlyTransactionIdStore( pageCache, storeDir ); LogFiles logFiles = LogFilesBuilder.activeFilesBuilder( storeDir, fs, pageCache ) .withConfig( config ) .withLastCommittedTransactionIdSupplier( () -> readOnlyTransactionIdStore.getLastClosedTransactionId() - 1 ) .build(); long dummyTransactionId; try ( Lifespan lifespan = new Lifespan( logFiles ) ) { FlushableChannel channel = logFiles.getLogFile().getWriter(); TransactionLogWriter writer = new TransactionLogWriter( new LogEntryWriter( channel ) ); long lastCommittedTransactionId = readOnlyTransactionIdStore.getLastCommittedTransactionId(); PhysicalTransactionRepresentation tx = new PhysicalTransactionRepresentation( Collections.emptyList() ); byte[] txHeaderBytes = LogIndexTxHeaderEncoding.encodeLogIndexAsTxHeader( -1 ); tx.setHeader( txHeaderBytes, -1, -1, -1, lastCommittedTransactionId, -1, -1 ); dummyTransactionId = lastCommittedTransactionId + 1; writer.append( tx, dummyTransactionId ); channel.prepareForFlush().flush(); } File neoStoreFile = new File( storeDir, MetaDataStore.DEFAULT_NAME ); MetaDataStore.setRecord( pageCache, neoStoreFile, LAST_TRANSACTION_ID, dummyTransactionId ); }
private void appendNullTransactionLogEntryToSetRaftIndexToMinusOne() throws IOException { PhysicalLogFiles logFiles = new PhysicalLogFiles( storeDir, fs ); ReadOnlyLogVersionRepository logVersionRepository = new ReadOnlyLogVersionRepository( pageCache, storeDir ); ReadOnlyTransactionIdStore readOnlyTransactionIdStore = new ReadOnlyTransactionIdStore( pageCache, storeDir ); PhysicalLogFile logFile = new PhysicalLogFile( fs, logFiles, Long.MAX_VALUE /*don't rotate*/, () -> readOnlyTransactionIdStore.getLastClosedTransactionId() - 1, logVersionRepository, new Monitors().newMonitor( PhysicalLogFile.Monitor.class ), new LogHeaderCache( 10 ) ); long dummyTransactionId; try ( Lifespan lifespan = new Lifespan( logFile ) ) { FlushableChannel channel = logFile.getWriter(); TransactionLogWriter writer = new TransactionLogWriter( new LogEntryWriter( channel ) ); long lastCommittedTransactionId = readOnlyTransactionIdStore.getLastCommittedTransactionId(); PhysicalTransactionRepresentation tx = new PhysicalTransactionRepresentation( Collections.emptyList() ); byte[] txHeaderBytes = LogIndexTxHeaderEncoding.encodeLogIndexAsTxHeader( -1 ); tx.setHeader( txHeaderBytes, -1, -1, -1, lastCommittedTransactionId, -1, -1 ); dummyTransactionId = lastCommittedTransactionId + 1; writer.append( tx, dummyTransactionId ); channel.prepareForFlush().flush(); } File neoStoreFile = new File( storeDir, MetaDataStore.DEFAULT_NAME ); MetaDataStore.setRecord( pageCache, neoStoreFile, LAST_TRANSACTION_ID, dummyTransactionId ); }