@Override public void beginTransaction(TxnId transactionId) throws RemoteException { TransactionOptions options = new TransactionOptions(AtomicityLevel.ATOMIC); transactionSubsystem.getTransactionManager().beginTransaction(transactionId, options); }
public LockThenSearchOperationCallback(DatasetId datasetId, long resourceId, int[] entityIdFields, ITransactionSubsystem txnSubsystem, ITransactionContext txnCtx, IOperatorNodePushable operatorNodePushable) { super(datasetId, resourceId, entityIdFields, txnCtx, txnSubsystem.getLockManager()); this.operatorNodePushable = (LSMIndexInsertUpdateDeleteOperatorNodePushable) operatorNodePushable; this.logManager = txnSubsystem.getLogManager(); this.logRecord = new LogRecord(); logRecord.setTxnCtx(txnCtx); logRecord.setLogSource(LogSource.LOCAL); logRecord.setLogType(LogType.WAIT); logRecord.setTxnId(txnCtx.getTxnId().getId()); logRecord.computeAndSetLogSize(); }
private void logWait() throws ACIDException { indexRecord.setLogType(LogType.WAIT); indexRecord.computeAndSetLogSize(); txnSubsystem.getLogManager().log(indexRecord); // set the log type back to UPDATE for normal updates indexRecord.setLogType(LogType.UPDATE); } }
public static void formEntityCommitLogRecord(LogRecord logRecord, ITransactionContext txnCtx, int datasetId, int PKHashValue, ITupleReference PKValue, int[] PKFields, int resourcePartition, byte entityCommitType) { logRecord.setTxnCtx(txnCtx); logRecord.setLogType(entityCommitType); logRecord.setTxnId(txnCtx.getTxnId().getId()); logRecord.setDatasetId(datasetId); logRecord.setPKHashValue(PKHashValue); logRecord.setPKFieldCnt(PKFields.length); logRecord.setPKValue(PKValue); logRecord.setPKFields(PKFields); logRecord.setResourcePartition(resourcePartition); logRecord.computeAndSetPKValueSize(); logRecord.computeAndSetLogSize(); }
public static void formMarkerLogRecord(LogRecord logRecord, ITransactionContext txnCtx, int datasetId, int resourcePartition, ByteBuffer marker) { logRecord.setTxnCtx(txnCtx); logRecord.setLogSource(LogSource.LOCAL); logRecord.setLogType(LogType.MARKER); logRecord.setTxnId(txnCtx.getTxnId().getId()); logRecord.setDatasetId(datasetId); logRecord.setResourcePartition(resourcePartition); marker.get(); // read the first byte since it is not part of the marker object logRecord.setMarker(marker); logRecord.computeAndSetLogSize(); } }
protected void capture(long minMCTFirstLSN, boolean sharp) throws HyracksDataException { ILogManager logMgr = txnSubsystem.getLogManager(); ITransactionManager txnMgr = txnSubsystem.getTransactionManager(); final long nextCheckpointId = getNextCheckpointId(); final Checkpoint checkpointObject = new Checkpoint(nextCheckpointId, logMgr.getAppendLSN(), minMCTFirstLSN, txnMgr.getMaxTxnId(), sharp, StorageConstants.VERSION); persist(checkpointObject); cleanup(); }
private static ITransactionContext beingTransaction(INcApplicationContext ncAppCtx, ILSMIndex index, long resourceId) { final TxnId txnId = new TxnId(1); final TransactionOptions options = new TransactionOptions(ITransactionManager.AtomicityLevel.ENTITY_LEVEL); final ITransactionManager transactionManager = ncAppCtx.getTransactionSubsystem().getTransactionManager(); final ITransactionContext txnCtx = transactionManager.beginTransaction(txnId, options); txnCtx.register(resourceId, 0, index, NoOpOperationCallback.INSTANCE, true); return txnCtx; }
public void after(ITupleReference newValue) throws HyracksDataException { if (newValue != null) { filterRecord.setNewValueSize(SimpleTupleWriter.INSTANCE.bytesRequired(newValue)); filterRecord.setNewValue(newValue); filterRecord.computeAndSetLogSize(); txnSubsystem.getLogManager().log(filterRecord); } }
public static void formFlushLogRecord(LogRecord logRecord, int datasetId, int resourcePartition, long flushingComponentMinId, long flushingComponentMaxId, PrimaryIndexOperationTracker opTracker) { logRecord.setLogType(LogType.FLUSH); logRecord.setTxnId(-1); logRecord.setDatasetId(datasetId); logRecord.setResourcePartition(resourcePartition); logRecord.setFlushingComponentMinId(flushingComponentMinId); logRecord.setFlushingComponentMaxId(flushingComponentMaxId); logRecord.setOpTracker(opTracker); logRecord.computeAndSetLogSize(); }
@Override public void unlock(DatasetId datasetId, int entityHashValue, byte lockMode, ITransactionContext txnContext) throws ACIDException { log("unlock", datasetId.getId(), entityHashValue, lockMode, txnContext); final long txnId = txnContext.getTxnId().getId(); final long jobSlot = txnId2TxnSlotMap.get(txnId); unlock(datasetId.getId(), entityHashValue, lockMode, jobSlot); }
public static void formJobTerminateLogRecord(LogRecord logRecord, long txnId, boolean isCommit) { logRecord.setLogType(isCommit ? LogType.JOB_COMMIT : LogType.ABORT); logRecord.setDatasetId(-1); logRecord.setPKHashValue(-1); logRecord.setTxnId(txnId); logRecord.computeAndSetLogSize(); }
public DatasetLifecycleManager(StorageProperties storageProperties, ILocalResourceRepository resourceRepository, ILogManager logManager, IDatasetMemoryManager memoryManager, IIndexCheckpointManagerProvider indexCheckpointManagerProvider, int numPartitions) { this.logManager = logManager; this.storageProperties = storageProperties; this.resourceRepository = resourceRepository; this.memoryManager = memoryManager; this.indexCheckpointManagerProvider = indexCheckpointManagerProvider; this.numPartitions = numPartitions; waitLog = new LogRecord(); waitLog.setLogType(LogType.WAIT_FOR_FLUSHES); waitLog.computeAndSetLogSize(); }
private void initializeTestCtx() throws Exception { JobId jobId = nc.newJobId(); testCtxs = new IHyracksTaskContext[NUM_PARTITIONS]; for (int i = 0; i < NUM_PARTITIONS; i++) { testCtxs[i] = nc.createTestContext(jobId, i, false); } txnCtx = nc.getTransactionManager().beginTransaction(nc.getTxnJobId(jobId), new TransactionOptions(ITransactionManager.AtomicityLevel.ENTITY_LEVEL)); }
private boolean syncPendingNonFlushLog(ILogRecord logRecord) { return logRecord.getLogType() == LogType.JOB_COMMIT || logRecord.getLogType() == LogType.ABORT || logRecord.getLogType() == LogType.WAIT || logRecord.getLogType() == LogType.WAIT_FOR_FLUSHES; }
private Checkpoint forgeForceRecoveryCheckpoint() { /* * By setting the checkpoint first LSN (low watermark) to Long.MIN_VALUE, the recovery manager will start from * the first available log. * We set the storage version to the current version. If there is a version mismatch, it will be detected * during recovery. */ return new Checkpoint(Long.MIN_VALUE, Long.MIN_VALUE, Integer.MIN_VALUE, FIRST_CHECKPOINT_ID, false, StorageConstants.VERSION); }
public FlushDatasetOperatorDescriptor(IOperatorDescriptorRegistry spec, TxnId txnId, int datasetId) { super(spec, 1, 0); this.txnId = txnId; this.datasetId = new ImmutableDatasetId(datasetId); }
public synchronized void close() throws IOException { if (open) { logManager.closeLogFile(this, fileChannel); open = false; } }
private void setUpdateLogSize() { logSize = getUpdateLogSizeWithoutOldValue(); if (oldValueSize > 0) { logSize += /*size*/Integer.BYTES + /*fieldCount*/Integer.BYTES + /*tuple*/oldValueSize; } }
@Override public long getMaxTxnId() { return txnIdFactory.getMaxTxnId(); } }