public LogBufferTailReader(ByteBuffer buffer) { this.buffer = buffer; logRecord = new LogRecord(); }
public static void formEntityCommitLogRecord(LogRecord logRecord, ITransactionContext txnCtx, int datasetId, int PKHashValue, ITupleReference PKValue, int[] PKFields, int resourcePartition, byte entityCommitType) { logRecord.setTxnCtx(txnCtx); logRecord.setLogType(entityCommitType); logRecord.setTxnId(txnCtx.getTxnId().getId()); logRecord.setDatasetId(datasetId); logRecord.setPKHashValue(PKHashValue); logRecord.setPKFieldCnt(PKFields.length); logRecord.setPKValue(PKValue); logRecord.setPKFields(PKFields); logRecord.setResourcePartition(resourcePartition); logRecord.computeAndSetPKValueSize(); logRecord.computeAndSetLogSize(); }
public void notifyReplicationTermination() { LogRecord logRecord = null; while (logRecord == null) { try { logRecord = (LogRecord) remoteJobsQ.take(); } catch (InterruptedException e) { //NOSONAR LogFlusher should survive interrupts //ignore } } logRecord.isFlushed(true); final ILogRequester logRequester = logRecord.getRequester(); if (logRequester != null) { logRequester.notifyFlushed(logRecord); } }
public static void formFlushLogRecord(LogRecord logRecord, int datasetId, int resourcePartition, long flushingComponentMinId, long flushingComponentMaxId, PrimaryIndexOperationTracker opTracker) { logRecord.setLogType(LogType.FLUSH); logRecord.setTxnId(-1); logRecord.setDatasetId(datasetId); logRecord.setResourcePartition(resourcePartition); logRecord.setFlushingComponentMinId(flushingComponentMinId); logRecord.setFlushingComponentMaxId(flushingComponentMaxId); logRecord.setOpTracker(opTracker); logRecord.computeAndSetLogSize(); }
public static void formJobTerminateLogRecord(LogRecord logRecord, long txnId, boolean isCommit) { logRecord.setLogType(isCommit ? LogType.JOB_COMMIT : LogType.ABORT); logRecord.setDatasetId(-1); logRecord.setPKHashValue(-1); logRecord.setTxnId(txnId); logRecord.computeAndSetLogSize(); }
public DatasetLifecycleManager(StorageProperties storageProperties, ILocalResourceRepository resourceRepository, ILogManager logManager, IDatasetMemoryManager memoryManager, IIndexCheckpointManagerProvider indexCheckpointManagerProvider, int numPartitions) { this.logManager = logManager; this.storageProperties = storageProperties; this.resourceRepository = resourceRepository; this.memoryManager = memoryManager; this.indexCheckpointManagerProvider = indexCheckpointManagerProvider; this.numPartitions = numPartitions; waitLog = new LogRecord(); waitLog.setLogType(LogType.WAIT_FOR_FLUSHES); waitLog.computeAndSetLogSize(); }
public static void formMarkerLogRecord(LogRecord logRecord, ITransactionContext txnCtx, int datasetId, int resourcePartition, ByteBuffer marker) { logRecord.setTxnCtx(txnCtx); logRecord.setLogSource(LogSource.LOCAL); logRecord.setLogType(LogType.MARKER); logRecord.setTxnId(txnCtx.getTxnId().getId()); logRecord.setDatasetId(datasetId); logRecord.setResourcePartition(resourcePartition); marker.get(); // read the first byte since it is not part of the marker object logRecord.setMarker(marker); logRecord.computeAndSetLogSize(); } }
@Test public void waitLogTest() throws Exception { final INcApplicationContext ncAppCtx = (INcApplicationContext) integrationUtil.ncs[0].getApplicationContext(); LogRecord logRecord = new LogRecord(); final long txnId = 1; logRecord.setTxnCtx(TransactionContextFactory.create(new TxnId(txnId), new TransactionOptions(ITransactionManager.AtomicityLevel.ENTITY_LEVEL))); logRecord.setLogSource(LogSource.LOCAL); logRecord.setLogType(LogType.WAIT); logRecord.setTxnId(txnId); logRecord.isFlushed(false); logRecord.computeAndSetLogSize(); Thread transactor = new Thread(() -> { final LogManager logManager = (LogManager) ncAppCtx.getTransactionSubsystem().getLogManager(); logManager.log(logRecord); }); transactor.start(); transactor.join(TimeUnit.SECONDS.toMillis(30)); Assert.assertTrue(logRecord.isFlushed()); }
flushingComponentMaxId = buffer.getLong(); resourceId = 0l; computeAndSetLogSize(); break; case LogType.WAIT: case LogType.WAIT_FOR_FLUSHES: computeAndSetLogSize(); break; case LogType.JOB_COMMIT: datasetId = -1; PKHashValue = -1; computeAndSetLogSize(); break; case LogType.ENTITY_COMMIT: if (readEntityResource(buffer) && readEntityValue(buffer)) { computeAndSetLogSize(); } else { return RecordReadStatus.TRUNCATED; if (readEntityResource(buffer) && readEntityValue(buffer)) { return readUpdateInfo(buffer); } else { return RecordReadStatus.TRUNCATED; break; case LogType.FILTER: if (readEntityResource(buffer)) { return readUpdateInfo(buffer);
public DatasetInfo(int datasetID, ILogManager logManager) { this.partitionIndexes = new HashMap<>(); this.indexes = new HashMap<>(); this.setLastAccess(-1); this.datasetID = datasetID; this.setRegistered(false); this.setMemoryAllocated(false); this.logManager = logManager; waitLog.setLogType(LogType.WAIT_FOR_FLUSHES); waitLog.computeAndSetLogSize(); }
public void notifyFlushTermination() throws ACIDException { LogRecord logRecord = null; while (logRecord == null) { try { logRecord = (LogRecord) flushQ.take(); } catch (InterruptedException e) { //NOSONAR LogFlusher should survive interrupts //ignore } } synchronized (logRecord) { logRecord.isFlushed(true); logRecord.notifyAll(); } PrimaryIndexOperationTracker opTracker = logRecord.getOpTracker(); if (opTracker != null) { try { opTracker.triggerScheduleFlush(logRecord); } catch (HyracksDataException e) { throw new ACIDException(e); } } }
@Override public RecordReadStatus readLogRecord(ByteBuffer buffer) { int beginOffset = buffer.position(); //read common fields RecordReadStatus status = doReadLogRecord(buffer); if (status != RecordReadStatus.OK) { buffer.position(beginOffset); return status; } // attempt to read checksum if (buffer.remaining() < CHKSUM_LEN) { buffer.position(beginOffset); return RecordReadStatus.TRUNCATED; } checksum = buffer.getLong(); if (checksum != generateChecksum(buffer, beginOffset, logSize - CHKSUM_LEN)) { return RecordReadStatus.BAD_CHKSUM; } return RecordReadStatus.OK; }
public static void formJobTerminateLogRecord(ITransactionContext txnCtx, LogRecord logRecord, boolean isCommit) { logRecord.setTxnCtx(txnCtx); TransactionUtil.formJobTerminateLogRecord(logRecord, txnCtx.getTxnId().getId(), isCommit); }
@Override public void readRemoteLog(ByteBuffer buffer) { //read common fields doReadLogRecord(buffer); if (logType == LogType.FLUSH) { LSN = buffer.getLong(); } }
public LogReader(ILogManager logMgr, long logFileSize, int logPageSize, MutableLong flushLSN, boolean isRecoveryMode) { this.logMgr = logMgr; this.logFileSize = logFileSize; this.logPageSize = logPageSize; this.flushLSN = flushLSN; this.isRecoveryMode = isRecoveryMode; this.readBuffer = ByteBuffer.allocate(logPageSize); this.logRecord = new LogRecord(); }
@Override public void open() throws HyracksDataException { try { transactionContext = transactionManager.getTransactionContext(txnId); transactionContext.setWriteTxn(isWriteTransaction); ILogMarkerCallback callback = TaskUtil.get(ILogMarkerCallback.KEY_MARKER_CALLBACK, ctx); logRecord = new LogRecord(callback); if (isSink) { return; } initAccessAppend(ctx); super.open(); } catch (ACIDException e) { throw HyracksDataException.create(e); } }
@Override public void commitTransaction(TxnId txnId) throws ACIDException { final ITransactionContext txnCtx = getTransactionContext(txnId); try { if (txnCtx.isWriteTxn()) { LogRecord logRecord = new LogRecord(); TransactionUtil.formJobTerminateLogRecord(txnCtx, logRecord, true); txnSubsystem.getLogManager().log(logRecord); txnCtx.setTxnState(ITransactionManager.COMMITTED); } } catch (Exception e) { if (LOGGER.isErrorEnabled()) { LOGGER.error(" caused exception in commit !" + txnCtx.getTxnId()); } throw e; } finally { txnCtx.complete(); txnSubsystem.getLockManager().releaseLocks(txnCtx); txnCtxRepository.remove(txnCtx.getTxnId()); } }
public LockThenSearchOperationCallback(DatasetId datasetId, long resourceId, int[] entityIdFields, ITransactionSubsystem txnSubsystem, ITransactionContext txnCtx, IOperatorNodePushable operatorNodePushable) { super(datasetId, resourceId, entityIdFields, txnCtx, txnSubsystem.getLockManager()); this.operatorNodePushable = (LSMIndexInsertUpdateDeleteOperatorNodePushable) operatorNodePushable; this.logManager = txnSubsystem.getLogManager(); this.logRecord = new LogRecord(); logRecord.setTxnCtx(txnCtx); logRecord.setLogSource(LogSource.LOCAL); logRecord.setLogType(LogType.WAIT); logRecord.setTxnId(txnCtx.getTxnId().getId()); logRecord.computeAndSetLogSize(); }
+ " and partition " + partition + " and is modified but its component id is null"); LogRecord logRecord = new LogRecord(); if (dsInfo.isDurable()) {
protected AbstractIndexModificationOperationCallback(DatasetId datasetId, int[] primaryKeyFields, ITransactionContext txnCtx, ILockManager lockManager, ITransactionSubsystem txnSubsystem, long resourceId, int resourcePartition, byte resourceType, Operation indexOp) { super(datasetId, resourceId, primaryKeyFields, txnCtx, lockManager); this.resourceType = resourceType; this.indexOp = indexOp; this.txnSubsystem = txnSubsystem; indexRecord = new LogRecord(); indexRecord.setTxnCtx(txnCtx); indexRecord.setLogType(LogType.UPDATE); indexRecord.setTxnId(txnCtx.getTxnId().getId()); indexRecord.setDatasetId(datasetId.getId()); indexRecord.setResourceId(resourceId); indexRecord.setResourcePartition(resourcePartition); indexRecord.setNewOp(indexOp.value()); filterRecord = new LogRecord(); filterRecord.setTxnCtx(txnCtx); filterRecord.setLogType(LogType.FILTER); filterRecord.setDatasetId(datasetId.getId()); filterRecord.setTxnId(txnCtx.getTxnId().getId()); filterRecord.setResourceId(resourceId); filterRecord.setResourcePartition(resourcePartition); filterRecord.setNewOp(Operation.FILTER_MOD.value()); }