lastCheckpointLSN = logManager.getReadableSmallestLSN(); currentLogLSN = logManager.getAppendLSN();
private void logWait() throws ACIDException { logManager.log(logRecord); }
public synchronized void close() throws IOException { if (open) { logManager.closeLogFile(this, fileChannel); open = false; } }
public static void checkAndSetFirstLSN(AbstractLSMIndex lsmIndex, ILogManager logManager) throws HyracksDataException { // If the index has an empty memory component, we need to set its first LSN (For soft checkpoint) if (lsmIndex.isCurrentMutableComponentEmpty()) { //prevent transactions from incorrectly setting the first LSN on a modified component by checking the index is still empty synchronized (lsmIndex.getOperationTracker()) { if (lsmIndex.isCurrentMutableComponentEmpty()) { LSMIOOperationCallback ioOpCallback = (LSMIOOperationCallback) lsmIndex.getIOOperationCallback(); ioOpCallback.setFirstLsnForCurrentMemoryComponent(logManager.getAppendLSN()); } } } }
Assert.assertTrue(forgedCheckpoint.getMinMCTFirstLsn() < minFirstLSN); final long readableSmallestLSN = txnSubsystem.getLogManager().getReadableSmallestLSN(); Assert.assertTrue(forgedCheckpoint.getMinMCTFirstLsn() <= readableSmallestLSN); } finally {
private void getLogFile() { try { // close existing file (if any) before opening another one close(); logFile = logMgr.getLogFile(readLSN); fileBeginLSN = logFile.getFileBeginLSN(); } catch (IOException e) { throw new ACIDException(e); } }
/*** * Attempts to perform a soft checkpoint at the specified {@code checkpointTargetLSN}. * If a checkpoint cannot be captured due to datasets having LSN < {@code checkpointTargetLSN}, * an asynchronous flush is triggered on them. When a checkpoint is successful, all transaction * log files that end with LSN < {@code checkpointTargetLSN} are deleted. */ @Override public synchronized long tryCheckpoint(long checkpointTargetLSN) throws HyracksDataException { LOGGER.info("Attemping soft checkpoint..."); final long minSecuredLSN = getMinSecuredLSN(); if (minSecuredLSN != NO_SECURED_LSN && checkpointTargetLSN >= minSecuredLSN) { return minSecuredLSN; } final long minFirstLSN = txnSubsystem.getRecoveryManager().getMinFirstLSN(); boolean checkpointSucceeded = minFirstLSN >= checkpointTargetLSN; if (!checkpointSucceeded) { // Flush datasets with indexes behind target checkpoint LSN final IDatasetLifecycleManager dlcm = txnSubsystem.getApplicationContext().getDatasetLifecycleManager(); dlcm.asyncFlushMatchingIndexes(newLaggingDatasetPredicate(checkpointTargetLSN)); } capture(minFirstLSN, false); if (checkpointSucceeded) { txnSubsystem.getLogManager().deleteOldLogFiles(minFirstLSN); LOGGER.info(String.format("soft checkpoint succeeded at LSN(%s)", minFirstLSN)); } return minFirstLSN; }
@Override public void perform(INcApplicationContext appCtx, IReplicationWorker worker) { final ReplicationChannel replicationChannel = (ReplicationChannel) appCtx.getReplicationChannel(); final RemoteLogsProcessor logsProcessor = replicationChannel.getRemoteLogsProcessor(); final ILogManager logManager = appCtx.getTransactionSubsystem().getLogManager(); final RemoteLogRecord reusableLog = new RemoteLogRecord(); final ISocketChannel channel = worker.getChannel(); ByteBuffer logsBuffer = ByteBuffer.allocate(logManager.getLogPageSize()); try { while (true) { // read a batch of logs logsBuffer = ReplicationProtocol.readRequest(channel, logsBuffer); // check if it is end of handshake if (logsBuffer.remaining() == END_REPLICATION_LOG_SIZE) { break; } logsProcessor.process(logsBuffer, reusableLog, worker); } } catch (IOException e) { throw new ReplicationException(e); } }
private void initIndexCheckpoint(INcApplicationContext appCtx) throws HyracksDataException { final ResourceReference indexRef = ResourceReference.of(file); final IIndexCheckpointManagerProvider checkpointManagerProvider = appCtx.getIndexCheckpointManagerProvider(); final IIndexCheckpointManager indexCheckpointManager = checkpointManagerProvider.get(indexRef); final long currentLSN = appCtx.getTransactionSubsystem().getLogManager().getAppendLSN(); indexCheckpointManager.delete(); indexCheckpointManager.init(Long.MIN_VALUE, currentLSN, LSMComponentId.EMPTY_INDEX_LAST_COMPONENT_ID.getMaxId()); LOGGER.info(() -> "Checkpoint index: " + indexRef); }
public void waitForIO() throws HyracksDataException { logManager.log(waitLog); synchronized (this) { while (numActiveIOOps > 0) { try { /** * Will be Notified by {@link DatasetInfo#undeclareActiveIOOperation()} */ wait(); } catch (InterruptedException e) { Thread.currentThread().interrupt(); throw HyracksDataException.create(e); } } if (numActiveIOOps < 0) { if (LOGGER.isErrorEnabled()) { LOGGER.error("Number of IO operations cannot be negative for dataset: " + this); } throw new IllegalStateException("Number of IO operations cannot be negative"); } } } }
protected void capture(long minMCTFirstLSN, boolean sharp) throws HyracksDataException { ILogManager logMgr = txnSubsystem.getLogManager(); ITransactionManager txnMgr = txnSubsystem.getTransactionManager(); final long nextCheckpointId = getNextCheckpointId(); final Checkpoint checkpointObject = new Checkpoint(nextCheckpointId, logMgr.getAppendLSN(), minMCTFirstLSN, txnMgr.getMaxTxnId(), sharp, StorageConstants.VERSION); persist(checkpointObject); cleanup(); }
private void logWait() throws ACIDException { indexRecord.setLogType(LogType.WAIT); indexRecord.computeAndSetLogSize(); txnSubsystem.getLogManager().log(indexRecord); // set the log type back to UPDATE for normal updates indexRecord.setLogType(LogType.UPDATE); } }
@Override public void perform(INcApplicationContext appCtx, IReplicationWorker worker) throws HyracksDataException { final IIndexCheckpointManagerProvider indexCheckpointManagerProvider = appCtx.getIndexCheckpointManagerProvider(); PersistentLocalResourceRepository resRepo = (PersistentLocalResourceRepository) appCtx.getLocalResourceRepository(); final IIOManager ioManager = appCtx.getIoManager(); final Collection<LocalResource> partitionResources = resRepo.getPartitionResources(partition).values(); final long currentLSN = appCtx.getTransactionSubsystem().getLogManager().getAppendLSN(); for (LocalResource ls : partitionResources) { DatasetResourceReference ref = DatasetResourceReference.of(ls); final IIndexCheckpointManager indexCheckpointManager = indexCheckpointManagerProvider.get(ref); indexCheckpointManager.delete(); // Get most recent sequence of existing files to avoid deletion Path indexPath = StoragePathUtil.getIndexPath(ioManager, ref); String[] files = indexPath.toFile().list(AbstractLSMIndexFileManager.COMPONENT_FILES_FILTER); if (files == null) { throw HyracksDataException .create(new IOException(indexPath + " is not a directory or an IO Error occurred")); } long maxComponentSequence = Long.MIN_VALUE; for (String file : files) { maxComponentSequence = Math.max(maxComponentSequence, IndexComponentFileReference.of(file).getSequenceEnd()); } indexCheckpointManager.init(maxComponentSequence, currentLSN, maxComponentId); } ReplicationProtocol.sendAck(worker.getChannel(), worker.getReusableBuffer()); }
private void flushDatasetOpenIndexes(DatasetResource dsr, boolean asyncFlush) throws HyracksDataException { DatasetInfo dsInfo = dsr.getDatasetInfo(); if (!dsInfo.isOpen()) { throw new IllegalStateException("flushDatasetOpenIndexes is called on a dataset that is closed"); } if (dsInfo.isExternal()) { // no memory components for external dataset return; } // ensure all in-flight flushes gets scheduled logManager.log(waitLog); for (PrimaryIndexOperationTracker primaryOpTracker : dsr.getOpTrackers()) { // flush each partition one by one if (primaryOpTracker.getNumActiveOperations() > 0) { throw new IllegalStateException( "flushDatasetOpenIndexes is called on a dataset with currently active operations"); } primaryOpTracker.setFlushOnExit(true); primaryOpTracker.flushIfNeeded(); } // ensure requested flushes were scheduled logManager.log(waitLog); if (!asyncFlush) { List<FlushOperation> flushes = new ArrayList<>(); for (PrimaryIndexOperationTracker primaryOpTracker : dsr.getOpTrackers()) { flushes.addAll(primaryOpTracker.getScheduledFlushes()); } LSMIndexUtil.waitFor(flushes); } }
@Override public void run() { ILSMIndexAccessor lsmAccessor = lsmBtree.createAccessor(NoOpIndexAccessParameters.INSTANCE); try { dsLifecycleMgr.getComponentIdGenerator(StorageTestUtils.DATASET_ID, PARTITION, indexPath) .refresh(); ILSMComponentId next = dsLifecycleMgr .getComponentIdGenerator(StorageTestUtils.DATASET_ID, PARTITION, indexPath).getId(); long flushLsn = nc.getTransactionSubsystem().getLogManager().getAppendLSN(); Map<String, Object> flushMap = new HashMap<>(); flushMap.put(LSMIOOperationCallback.KEY_FLUSH_LOG_LSN, flushLsn); flushMap.put(LSMIOOperationCallback.KEY_NEXT_COMPONENT_ID, next); lsmAccessor.getOpContext().setParameters(flushMap); lsmAccessor.deleteComponents(predicate); } catch (HyracksDataException e) { failure = e; } } };
public void after(ITupleReference newValue) throws HyracksDataException { if (newValue != null) { filterRecord.setNewValueSize(SimpleTupleWriter.INSTANCE.bytesRequired(newValue)); filterRecord.setNewValue(newValue); filterRecord.computeAndSetLogSize(); txnSubsystem.getLogManager().log(filterRecord); } }
ILSMComponentId next = dsLifecycleMgr.getComponentIdGenerator(StorageTestUtils.DATASET_ID, PARTITION, indexPath).getId(); long flushLsn = nc.getTransactionSubsystem().getLogManager().getAppendLSN(); Map<String, Object> flushMap = new HashMap<>(); flushMap.put(LSMIOOperationCallback.KEY_FLUSH_LOG_LSN, flushLsn); dsLifecycleMgr.getComponentIdGenerator(StorageTestUtils.DATASET_ID, PARTITION, indexPath).refresh(); next = dsLifecycleMgr.getComponentIdGenerator(StorageTestUtils.DATASET_ID, PARTITION, indexPath).getId(); flushLsn = nc.getTransactionSubsystem().getLogManager().getAppendLSN(); flushMap = new HashMap<>(); flushMap.put(LSMIOOperationCallback.KEY_FLUSH_LOG_LSN, flushLsn);
@Override public void nextFrame(ByteBuffer buffer) throws HyracksDataException { tAccess.reset(buffer); int nTuple = tAccess.getTupleCount(); for (int t = 0; t < nTuple; t++) { tRef.reset(tAccess, t); try { formLogRecord(buffer, t); logMgr.log(logRecord); if (!isSink) { appendTupleToFrame(t); } } catch (ACIDException e) { throw HyracksDataException.create(e); } } IFrame message = TaskUtil.get(HyracksConstants.KEY_MESSAGE, ctx); if (message != null && MessagingFrameTupleAppender.getMessageType(message) == MessagingFrameTupleAppender.MARKER_MESSAGE) { try { formMarkerLogRecords(message.getBuffer()); logMgr.log(logRecord); } catch (ACIDException e) { throw HyracksDataException.create(e); } message.reset(); message.getBuffer().put(MessagingFrameTupleAppender.NULL_FEED_MESSAGE); message.getBuffer().flip(); } }
ILSMComponentId next = dsLifecycleMgr.getComponentIdGenerator(StorageTestUtils.DATASET_ID, PARTITION, indexPath).getId(); long flushLsn = nc.getTransactionSubsystem().getLogManager().getAppendLSN(); Map<String, Object> flushMap = new HashMap<>(); flushMap.put(LSMIOOperationCallback.KEY_FLUSH_LOG_LSN, flushLsn); dsLifecycleMgr.getComponentIdGenerator(StorageTestUtils.DATASET_ID, PARTITION, indexPath).refresh(); next = dsLifecycleMgr.getComponentIdGenerator(StorageTestUtils.DATASET_ID, PARTITION, indexPath).getId(); flushLsn = nc.getTransactionSubsystem().getLogManager().getAppendLSN(); flushMap = new HashMap<>(); flushMap.put(LSMIOOperationCallback.KEY_FLUSH_LOG_LSN, flushLsn);
@Override public void commitTransaction(TxnId txnId) throws ACIDException { final ITransactionContext txnCtx = getTransactionContext(txnId); try { if (txnCtx.isWriteTxn()) { LogRecord logRecord = new LogRecord(); TransactionUtil.formJobTerminateLogRecord(txnCtx, logRecord, true); txnSubsystem.getLogManager().log(logRecord); txnCtx.setTxnState(ITransactionManager.COMMITTED); } } catch (Exception e) { if (LOGGER.isErrorEnabled()) { LOGGER.error(" caused exception in commit !" + txnCtx.getTxnId()); } throw e; } finally { txnCtx.complete(); txnSubsystem.getLockManager().releaseLocks(txnCtx); txnCtxRepository.remove(txnCtx.getTxnId()); } }