@Override public long size() throws Exception { return this.sequentialFile.size(); }
@Override public long size() throws Exception { return this.sequentialFile.size(); }
@Override public int getRemainingBytes() { try { final long position = sequentialFile.position(); final long size = sequentialFile.size(); final long remaining = size - position; if (remaining > Integer.MAX_VALUE) { return Integer.MAX_VALUE; } else { return (int) remaining; } } catch (Exception e) { throw new IllegalStateException(e); } }
@Override public int getRemainingBytes() { try { final long position = sequentialFile.position(); final long size = sequentialFile.size(); final long remaining = size - position; if (remaining > Integer.MAX_VALUE) { return Integer.MAX_VALUE; } else { return (int) remaining; } } catch (Exception e) { throw new IllegalStateException(e); } }
public synchronized void validateFile() throws ActiveMQException { try { if (file == null) { if (messageID <= 0) { throw new RuntimeException("MessageID not set on LargeMessage"); } file = createFile(); openFile(); bodySize = file.size(); } } catch (Exception e) { // TODO: There is an IO_ERROR on trunk now, this should be used here instead throw new ActiveMQInternalErrorException(e.getMessage(), e); } }
public void open() throws Exception { if (!file.isOpen()) { file.open(); } size.set((int) file.size()); file.position(0); }
public synchronized void joinSyncedData(ByteBuffer buffer) throws Exception { if (deleted) return; SequentialFile mainSeqFile = mainLM.getFile(); if (!mainSeqFile.isOpen()) { mainSeqFile.open(); } try { if (appendFile != null) { if (logger.isTraceEnabled()) { logger.trace("joinSyncedData on " + mainLM + ", currentSize on mainMessage=" + mainSeqFile.size() + ", appendFile size = " + appendFile.size()); } FileIOUtil.copyData(appendFile, mainSeqFile, buffer); deleteAppendFile(); } else { if (logger.isTraceEnabled()) { logger.trace("joinSyncedData, appendFile is null, ignoring joinSyncedData on " + mainLM); } } } catch (Throwable e) { ActiveMQServerLogger.LOGGER.errorWhileSyncingData(mainLM.toString(), e); } if (logger.isTraceEnabled()) { logger.trace("joinedSyncData on " + mainLM + " finished with " + mainSeqFile.size()); } syncDone = true; }
private long getBodySize() throws ActiveMQException { try { if (bodySize < 0) { if (file != null) { bodySize = file.size(); } else { SequentialFile tmpFile = createFile(); bodySize = tmpFile.size(); tmpFile.close(); } } return bodySize; } catch (Exception e) { ActiveMQIOErrorException errorException = new ActiveMQIOErrorException(); errorException.initCause(e); throw errorException; } }
@Override public ActiveMQBuffer getReadOnlyBodyBuffer() { try { validateFile(); file.open(); int fileSize = (int) file.size(); ByteBuffer buffer = ByteBuffer.allocate(fileSize); file.read(buffer); return new ChannelBufferWrapper(Unpooled.wrappedBuffer(buffer)); } catch (Exception e) { throw new RuntimeException(e); } finally { try { file.close(); } catch (Exception ignored) { } } }
public synchronized List<PagedMessage> read(StorageManager storage) throws Exception { if (logger.isDebugEnabled()) { logger.debug("reading page " + this.pageId + " on address = " + storeName); } if (!file.isOpen()) { throw ActiveMQMessageBundle.BUNDLE.invalidPageIO(); } final List<PagedMessage> messages = new ArrayList<>(); size.lazySet((int) file.size()); if (this.canBeMapped) { readFromMapped(storage, messages); // if the file is open to be written // it needs to updated the position file.position(file.size()); } else { readFromSequentialFile(storage, messages); } numberOfMessages.lazySet(messages.size()); return messages; }
private void readFromSequentialFile(StorageManager storage, List<PagedMessage> messages) throws Exception { final int fileSize = (int) file.size(); //doesn't need to be a direct buffer: that case is covered using the MMAP read final ByteBuffer buffer = this.fileFactory.newBuffer(fileSize); try { file.position(0); file.read(buffer); buffer.rewind(); assert (buffer.limit() == fileSize) : "buffer doesn't contains the whole file"; ChannelBufferWrapper activeMQBuffer = wrapBuffer(fileSize, buffer); read(storage, activeMQBuffer, messages); } finally { this.fileFactory.releaseBuffer(buffer); } }
/** * Sets a list of large message files into the replicationManager for synchronization. * <p> * Collects a list of existing large messages and their current size, passing re. * <p> * So we know how much of a given message to sync with the backup. Further data appends to the * messages will be replicated normally. * * @throws Exception */ private Map<Long, Pair<String, Long>> recoverPendingLargeMessages() throws Exception { Map<Long, Pair<String, Long>> largeMessages = new HashMap<>(); // only send durable messages... // listFiles append a "." to anything... List<String> filenames = largeMessagesFactory.listFiles("msg"); for (String filename : filenames) { long id = getLargeMessageIdFromFilename(filename); if (!largeMessagesToDelete.containsKey(id)) { SequentialFile seqFile = largeMessagesFactory.createSequentialFile(filename); long size = seqFile.size(); largeMessages.put(id, new Pair<>(filename, size)); } } return largeMessages; }
/** * this method is used internally only however tools may use it to maintenance. * It won't be part of the interface as the tools should be specific to the implementation */ public List<JournalFile> orderFiles() throws Exception { List<String> fileNames = fileFactory.listFiles(filesRepository.getFileExtension()); List<JournalFile> orderedFiles = new ArrayList<>(fileNames.size()); for (String fileName : fileNames) { SequentialFile file = fileFactory.createSequentialFile(fileName); if (file.size() >= SIZE_HEADER) { file.open(); try { JournalFileImpl jrnFile = readFileHeader(file); orderedFiles.add(jrnFile); } finally { file.close(); } } else { ActiveMQJournalLogger.LOGGER.ignoringShortFile(fileName); file.delete(); } } // Now order them by ordering id - we can't use the file name for ordering // since we can re-use dataFiles Collections.sort(orderedFiles, JOURNAL_FILE_COMPARATOR); return orderedFiles; }
@Override public int getBodyBufferSize() { final boolean closeFile = file == null || !file.isOpen(); try { openFile(); final long fileSize = file.size(); int fileSizeAsInt = (int) fileSize; if (fileSizeAsInt < 0) { logger.warnf("suspicious large message file size of %d bytes for %s, will use %d instead.", fileSize, file.getFileName(), Integer.MAX_VALUE); fileSizeAsInt = Integer.MAX_VALUE; } return fileSizeAsInt; } catch (Exception e) { throw new RuntimeException(e); } finally { if (closeFile) { try { file.close(); } catch (Exception ignored) { } } } }
@Override public final void addBytesToLargeMessage(final SequentialFile file, final long messageId, final byte[] bytes) throws Exception { readLock(); try { file.position(file.size()); file.writeDirect(ByteBuffer.wrap(bytes), false); if (isReplicated()) { replicator.largeMessageWrite(messageId, bytes); } } finally { readUnLock(); } }
/** * this method is used internally only however tools may use it to maintenance. * It won't be part of the interface as the tools should be specific to the implementation */ public List<JournalFile> orderFiles() throws Exception { List<String> fileNames = fileFactory.listFiles(filesRepository.getFileExtension()); List<JournalFile> orderedFiles = new ArrayList<>(fileNames.size()); for (String fileName : fileNames) { SequentialFile file = fileFactory.createSequentialFile(fileName); if (file.size() >= SIZE_HEADER) { file.open(); try { JournalFileImpl jrnFile = readFileHeader(file); orderedFiles.add(jrnFile); } finally { file.close(); } } else { ActiveMQJournalLogger.LOGGER.ignoringShortFile(fileName); file.delete(); } } // Now order them by ordering id - we can't use the file name for ordering // since we can re-use dataFiles Collections.sort(orderedFiles, JOURNAL_FILE_COMPARATOR); return orderedFiles; }
@Override public void sendPages(ReplicationManager replicator, Collection<Integer> pageIds) throws Exception { for (Integer id : pageIds) { SequentialFile sFile = fileFactory.createSequentialFile(createFileName(id)); if (!sFile.exists()) { continue; } ActiveMQServerLogger.LOGGER.replicaSyncFile(sFile, sFile.size()); replicator.syncPages(sFile, id, getAddress()); } }
/** * Sends the whole content of the file to be duplicated. * * @throws ActiveMQException * @throws Exception */ public void syncJournalFile(JournalFile jf, AbstractJournalStorageManager.JournalContent content) throws Exception { if (!enabled) { return; } SequentialFile file = jf.getFile().cloneFile(); try { ActiveMQServerLogger.LOGGER.replicaSyncFile(file, file.size()); sendLargeFile(content, null, jf.getFileID(), file, Long.MAX_VALUE); } finally { if (file.isOpen()) file.close(); } }
@Override @After public void tearDown() throws Exception { //stop journal first to let it manage its files stopComponent(journal); List<String> files = fileFactory.listFiles(fileExtension); for (String file : files) { SequentialFile seqFile = fileFactory.createSequentialFile(file); Assert.assertEquals(fileSize, seqFile.size()); } super.tearDown(); }
public final void addBytesToLargeMessage(final SequentialFile file, final long messageId, final ActiveMQBuffer bytes) throws Exception { readLock(); try { file.position(file.size()); if (bytes.byteBuf() != null && bytes.byteBuf().nioBufferCount() == 1) { final ByteBuffer nioBytes = bytes.byteBuf().internalNioBuffer(bytes.readerIndex(), bytes.readableBytes()); file.writeDirect(nioBytes, false); if (isReplicated()) { //copy defensively bytes final byte[] bytesCopy = new byte[bytes.readableBytes()]; bytes.getBytes(bytes.readerIndex(), bytesCopy); replicator.largeMessageWrite(messageId, bytesCopy); } } else { final byte[] bytesCopy = new byte[bytes.readableBytes()]; bytes.readBytes(bytesCopy); addBytesToLargeMessage(file, messageId, bytesCopy); } } finally { readUnLock(); } }