public ReplicationStartSyncMessage(JournalFile[] datafiles, AbstractJournalStorageManager.JournalContent contentType, String nodeID, boolean allowsAutoFailBack) { this(); this.nodeID = nodeID; this.allowsAutoFailBack = allowsAutoFailBack; synchronizationIsFinished = false; ids = new long[datafiles.length]; for (int i = 0; i < datafiles.length; i++) { ids[i] = datafiles[i].getFileID(); } switch (contentType) { case MESSAGES: dataType = SyncDataType.JournalMessages; break; case BINDINGS: dataType = SyncDataType.JournalBindings; break; default: throw new IllegalArgumentException(); } }
public void calculateNextfileID(final List<JournalFile> files) { for (JournalFile file : files) { final long fileIdFromFile = file.getFileID(); final long fileIdFromName = getFileNameID(file.getFile().getFileName()); // The compactor could create a fileName but use a previously assigned ID. // Because of that we need to take both parts into account setNextFileID(Math.max(fileIdFromName, fileIdFromFile)); } }
public void calculateNextfileID(final List<JournalFile> files) { for (JournalFile file : files) { final long fileIdFromFile = file.getFileID(); final long fileIdFromName = getFileNameID(file.getFile().getFileName()); // The compactor could create a fileName but use a previously assigned ID. // Because of that we need to take both parts into account setNextFileID(Math.max(fileIdFromName, fileIdFromFile)); } }
public synchronized void checkDataFiles() { long seq = -1; for (JournalFile file : dataFiles) { if (file.getFileID() <= seq) { ActiveMQJournalLogger.LOGGER.checkFiles(); ActiveMQJournalLogger.LOGGER.info(debugFiles()); if (journal.getCurrentFile() != null && journal.getCurrentFile().getFileID() <= file.getFileID()) { ActiveMQJournalLogger.LOGGER.checkFiles(); ActiveMQJournalLogger.LOGGER.info(debugFiles()); ActiveMQJournalLogger.LOGGER.currentFile(file.getFileID(), journal.getCurrentFile().getFileID(), file.getFileID(), (journal.getCurrentFile() == file)); seq = file.getFileID(); if (file.getFileID() <= lastFreeId) { ActiveMQJournalLogger.LOGGER.checkFiles(); ActiveMQJournalLogger.LOGGER.info(debugFiles()); lastFreeId = file.getFileID(); if (file.getFileID() < seq) { ActiveMQJournalLogger.LOGGER.checkFiles(); ActiveMQJournalLogger.LOGGER.info(debugFiles());
public synchronized void checkDataFiles() { long seq = -1; for (JournalFile file : dataFiles) { if (file.getFileID() <= seq) { ActiveMQJournalLogger.LOGGER.checkFiles(); ActiveMQJournalLogger.LOGGER.info(debugFiles()); if (journal.getCurrentFile() != null && journal.getCurrentFile().getFileID() <= file.getFileID()) { ActiveMQJournalLogger.LOGGER.checkFiles(); ActiveMQJournalLogger.LOGGER.info(debugFiles()); ActiveMQJournalLogger.LOGGER.currentFile(file.getFileID(), journal.getCurrentFile().getFileID(), file.getFileID(), (journal.getCurrentFile() == file)); seq = file.getFileID(); if (file.getFileID() <= lastFreeId) { ActiveMQJournalLogger.LOGGER.checkFiles(); ActiveMQJournalLogger.LOGGER.info(debugFiles()); lastFreeId = file.getFileID(); if (file.getFileID() < seq) { ActiveMQJournalLogger.LOGGER.checkFiles(); ActiveMQJournalLogger.LOGGER.info(debugFiles());
/** * @param jf * @return */ private Pair<Long, Integer> getPair(JournalFile jf) { return new Pair<>(jf.getFileID(), jf.getPosCount()); }
/** * Sends the whole content of the file to be duplicated. * * @throws ActiveMQException * @throws Exception */ public void syncJournalFile(JournalFile jf, AbstractJournalStorageManager.JournalContent content) throws Exception { if (!enabled) { return; } SequentialFile file = jf.getFile().cloneFile(); try { ActiveMQServerLogger.LOGGER.replicaSyncFile(file, file.size()); sendLargeFile(content, null, jf.getFileID(), file, Long.MAX_VALUE); } finally { if (file.isOpen()) file.close(); } }
/** * @throws Exception */ protected void openFile() throws Exception { flush(false); currentFile = filesRepository.openFileCMP(); sequentialFile = currentFile.getFile(); sequentialFile.open(1, false); currentFile = new JournalFileImpl(sequentialFile, nextOrderingID++, JournalImpl.FORMAT_VERSION); final int fileSize = journal.getFileSize(); if (bufferWrite != null && bufferWrite.capacity() < fileSize) { fileFactory.releaseDirectBuffer(bufferWrite); bufferWrite = null; writingChannel = null; } if (bufferWrite == null) { final ByteBuffer bufferWrite = fileFactory.allocateDirectBuffer(fileSize); this.bufferWrite = bufferWrite; writingChannel = ActiveMQBuffers.wrappedBuffer(bufferWrite); } else { writingChannel.clear(); bufferWrite.clear(); } JournalImpl.writeHeader(writingChannel, journal.getUserVersion(), currentFile.getFileID()); }
compactor = new JournalCompactor(fileFactory, this, filesRepository, records.keysLongHashSet(), dataFilesToProcess.get(0).getFileID());
compactor = new JournalCompactor(fileFactory, this, filesRepository, records.keysLongHashSet(), dataFilesToProcess.get(0).getFileID());
/** * @throws Exception */ protected void openFile() throws Exception { flush(false); currentFile = filesRepository.openFileCMP(); sequentialFile = currentFile.getFile(); sequentialFile.open(1, false); currentFile = new JournalFileImpl(sequentialFile, nextOrderingID++, JournalImpl.FORMAT_VERSION); final int fileSize = journal.getFileSize(); if (bufferWrite != null && bufferWrite.capacity() < fileSize) { fileFactory.releaseDirectBuffer(bufferWrite); bufferWrite = null; writingChannel = null; } if (bufferWrite == null) { final ByteBuffer bufferWrite = fileFactory.allocateDirectBuffer(fileSize); this.bufferWrite = bufferWrite; writingChannel = ActiveMQBuffers.wrappedBuffer(bufferWrite); } else { writingChannel.clear(); bufferWrite.clear(); } JournalImpl.writeHeader(writingChannel, journal.getUserVersion(), currentFile.getFileID()); }