@Override public long generateID() { return idGenerator.generateID(); }
@Override public long getCurrentID() { return idGenerator.getCurrentID(); }
@Override public synchronized void persistIdGenerator() { if (journalLoaded && idGenerator != null) { // Must call close to make sure last id is persisted idGenerator.persistCurrentID(); } }
BatchingIDGenerator batch = new BatchingIDGenerator(0, 1000, getJournalStorageManager(journal)); long id1 = batch.generateID(); long id2 = batch.generateID(); batch = new BatchingIDGenerator(0, 1000, getJournalStorageManager(journal)); loadIDs(journal, batch); long id3 = batch.generateID(); long id4 = batch.generateID(); batch.persistCurrentID(); batch = new BatchingIDGenerator(0, 1000, getJournalStorageManager(journal)); loadIDs(journal, batch); long id5 = batch.generateID(); Assert.assertTrue(id5 > id4 && id5 < 2000); batch.persistCurrentID(); batch = new BatchingIDGenerator(0, 1000, getJournalStorageManager(journal)); loadIDs(journal, batch); long id = batch.generateID(); batch.persistCurrentID(); journal.stop(); batch = new BatchingIDGenerator(0, 1000, getJournalStorageManager(journal));
mapBindings.put(bindingEncoding.getId(), bindingEncoding); } else if (rec == JournalRecordIds.ID_COUNTER_RECORD) { idGenerator.loadState(record.id, buffer); } else if (rec == JournalRecordIds.ADDRESS_BINDING_RECORD) { PersistentAddressBindingEncoding bindingEncoding = newAddressBindingEncoding(id, buffer); idGenerator.cleanup();
@Override public void storeID(final long journalID, final long id) throws Exception { readLock(); try { bindingsJournal.appendAddRecord(journalID, JournalRecordIds.ID_COUNTER_RECORD, BatchingIDGenerator.createIDEncodingSupport(id), true); } finally { readUnLock(); } }
public AbstractJournalStorageManager(Configuration config, CriticalAnalyzer analyzer, ExecutorFactory executorFactory, ScheduledExecutorService scheduledExecutorService, ExecutorFactory ioExecutorFactory, IOCriticalErrorListener criticalErrorListener) { super(analyzer, CRITICAL_PATHS); this.executorFactory = executorFactory; this.ioCriticalErrorListener = criticalErrorListener; this.ioExecutorFactory = ioExecutorFactory; this.scheduledExecutorService = scheduledExecutorService; this.config = config; executor = executorFactory.getExecutor(); syncNonTransactional = config.isJournalSyncNonTransactional(); syncTransactional = config.isJournalSyncTransactional(); init(config, criticalErrorListener); idGenerator = new BatchingIDGenerator(0, CHECKPOINT_BATCH_SIZE, this); }
protected void loadIDs(final Journal journal, final BatchingIDGenerator batch) throws Exception { ArrayList<RecordInfo> records = new ArrayList<>(); ArrayList<PreparedTransactionInfo> tx = new ArrayList<>(); journal.start(); journal.load(records, tx, null); Assert.assertEquals(0, tx.size()); Assert.assertTrue("Contains " + records.size(), records.size() > 0); for (RecordInfo record : records) { if (record.userRecordType == JournalRecordIds.ID_COUNTER_RECORD) { ActiveMQBuffer buffer = ActiveMQBuffers.wrappedBuffer(record.data); batch.loadState(record.id, buffer); } } }
/** * A method to cleanup old records after started */ public void cleanup() { if (cleanupRecords != null) { Iterator<Long> iterRecord = cleanupRecords.iterator(); while (iterRecord.hasNext()) { Long record = iterRecord.next(); if (iterRecord.hasNext()) { // we don't want to remove the last record deleteID(record.longValue()); } } cleanupRecords.clear(); // help GC cleanupRecords = null; } }
public void loadState(final long journalID, final ActiveMQBuffer buffer) { addCleanupRecord(journalID); IDCounterEncoding encoding = new IDCounterEncoding(); encoding.decode(buffer); // Keep nextID and counter the same, the next generateID will update the checkpoint nextID = encoding.id + 1; counter.set(nextID); }
protected void checkForLongs() { if (forceLongs) { JournalStorageManager manager = (JournalStorageManager) server.getStorageManager(); BatchingIDGenerator idGenerator = (BatchingIDGenerator) manager.getIDGenerator(); idGenerator.forceNextID((Integer.MAX_VALUE) + 1L); } }
@Override public synchronized void storeID(long journalID, long id) throws Exception { bindingsJournal.appendAddRecord(journalID, JournalRecordIds.ID_COUNTER_RECORD, BatchingIDGenerator.createIDEncodingSupport(id), true); } };
@Override public void storePageCompleteTransactional(long txID, long queueID, PagePosition position) throws Exception { long recordID = idGenerator.generateID(); position.setRecordID(recordID); messageJournal.appendAddRecordTransactional(txID, recordID, JournalRecordIds.PAGE_CURSOR_COMPLETE, new CursorAckRecordEncoding(queueID, position)); }
@Override public synchronized void stop(boolean ioCriticalError, boolean sendFailover) throws Exception { if (!started) { return; } if (!ioCriticalError) { performCachedLargeMessageDeletes(); // Must call close to make sure last id is persisted if (journalLoaded && idGenerator != null) idGenerator.persistCurrentID(); } final CountDownLatch latch = new CountDownLatch(1); executor.execute(new Runnable() { @Override public void run() { latch.countDown(); } }); latch.await(30, TimeUnit.SECONDS); beforeStop(); bindingsJournal.stop(); messageJournal.stop(); journalLoaded = false; started = false; }
@Override public long storePageCounterInc(long txID, long queueID, int value, long persistentSize) throws Exception { readLock(); try { long recordID = idGenerator.generateID(); messageJournal.appendAddRecordTransactional(txID, recordID, JournalRecordIds.PAGE_CURSOR_COUNTER_INC, new PageCountRecordInc(queueID, value, persistentSize)); return recordID; } finally { readUnLock(); } }
@Override public synchronized void stop(boolean ioCriticalError, boolean sendFailover) throws Exception { if (!started) { return; } if (!ioCriticalError) { performCachedLargeMessageDeletes(); // Must call close to make sure last id is persisted if (journalLoaded && idGenerator != null) idGenerator.persistCurrentID(); } final CountDownLatch latch = new CountDownLatch(1); executor.execute(new Runnable() { @Override public void run() { latch.countDown(); } }); latch.await(30, TimeUnit.SECONDS); beforeStop(); bindingsJournal.stop(); messageJournal.stop(); largeMessagesFactory.stop(); journalLoaded = false; started = false; }
@Override public long storePageCounter(long txID, long queueID, long value, long persistentSize) throws Exception { readLock(); try { final long recordID = idGenerator.generateID(); messageJournal.appendAddRecordTransactional(txID, recordID, JournalRecordIds.PAGE_CURSOR_COUNTER_VALUE, new PageCountRecord(queueID, value, persistentSize)); return recordID; } finally { readUnLock(); } }
idGenerator.persistCurrentID();
@Override public long storeQueueStatus(long queueID, QueueStatus status) throws Exception { long recordID = idGenerator.generateID(); readLock(); try { bindingsJournal.appendAddRecord(recordID, JournalRecordIds.QUEUE_STATUS_RECORD, new QueueStatusEncoding(queueID, status), true); } finally { readUnLock(); } return recordID; }
@Override public long storePendingCounter(final long queueID, final long pageID) throws Exception { readLock(); try { final long recordID = idGenerator.generateID(); PageCountPendingImpl pendingInc = new PageCountPendingImpl(queueID, pageID); // We must guarantee the record sync before we actually write on the page otherwise we may get out of sync // on the counter messageJournal.appendAddRecord(recordID, JournalRecordIds.PAGE_CURSOR_PENDING_COUNTER, pendingInc, true); return recordID; } finally { readUnLock(); } }