@Override public void reconcile(ITupleReference tuple) throws HyracksDataException { try { lockManager.lock(datasetId, -1, LockMode.S, txnCtx); } catch (ACIDException e) { throw HyracksDataException.create(e); } }
@Override public boolean proceed(ITupleReference tuple) throws HyracksDataException { int pkHash = computePrimaryKeyHashValue(tuple, primaryKeyFields); try { return lockManager.instantTryLock(datasetId, pkHash, LockMode.S, txnCtx); } catch (ACIDException e) { throw HyracksDataException.create(e); } }
@Override public void commitTransaction(TxnId txnId) throws ACIDException { final ITransactionContext txnCtx = getTransactionContext(txnId); try { if (txnCtx.isWriteTxn()) { LogRecord logRecord = new LogRecord(); TransactionUtil.formJobTerminateLogRecord(txnCtx, logRecord, true); txnSubsystem.getLogManager().log(logRecord); txnCtx.setTxnState(ITransactionManager.COMMITTED); } } catch (Exception e) { if (LOGGER.isErrorEnabled()) { LOGGER.error(" caused exception in commit !" + txnCtx.getTxnId()); } throw e; } finally { txnCtx.complete(); txnSubsystem.getLockManager().releaseLocks(txnCtx); txnCtxRepository.remove(txnCtx.getTxnId()); } }
public void release() throws ACIDException { lockManager.unlock(datasetId, pkHash, LockMode.X, txnCtx); } }
@Override public boolean proceed(ITupleReference tuple) throws HyracksDataException { try { return lockManager.tryLock(datasetId, -1, LockMode.S, txnCtx); } catch (ACIDException e) { throw HyracksDataException.create(e); } }
@Override public void abortTransaction(TxnId txnId) throws ACIDException { final ITransactionContext txnCtx = getTransactionContext(txnId); try { if (txnCtx.isWriteTxn()) { LogRecord logRecord = new LogRecord(); TransactionUtil.formJobTerminateLogRecord(txnCtx, logRecord, false); txnSubsystem.getLogManager().log(logRecord); txnSubsystem.getCheckpointManager().secure(txnId); txnSubsystem.getRecoveryManager().rollbackTransaction(txnCtx); txnCtx.setTxnState(ITransactionManager.ABORTED); } } catch (HyracksDataException e) { String msg = "Could not complete rollback! System is in an inconsistent state"; if (LOGGER.isErrorEnabled()) { LOGGER.log(Level.ERROR, msg, e); } throw new ACIDException(msg, e); } finally { txnCtx.complete(); txnSubsystem.getLockManager().releaseLocks(txnCtx); txnCtxRepository.remove(txnCtx.getTxnId()); txnSubsystem.getCheckpointManager().completed(txnId); } }
@Override public void complete(ITupleReference tuple) throws HyracksDataException { int pkHash = computePrimaryKeyHashValue(tuple, primaryKeyFields); try { lockManager.unlock(datasetId, pkHash, LockMode.S, txnCtx); } catch (ACIDException e) { throw HyracksDataException.create(e); } }
@Override public void reconcile(ITupleReference tuple) throws HyracksDataException { int pkHash = computePrimaryKeyHashValue(tuple, primaryKeyFields); try { lockManager.lock(datasetId, pkHash, LockMode.S, txnCtx); } catch (ACIDException e) { throw HyracksDataException.create(e); } }
private void stallAbortTxn(Thread t, ITransactionContext txnCtx, ITransactionSubsystem txnSubsystem, TxnId txnId) throws InterruptedException, HyracksDataException { try { if (txnCtx.isWriteTxn()) { LogRecord logRecord = new LogRecord(); TransactionUtil.formJobTerminateLogRecord(txnCtx, logRecord, false); txnSubsystem.getLogManager().log(logRecord); txnSubsystem.getCheckpointManager().secure(txnId); synchronized (t) { t.notifyAll(); t.wait(); } txnSubsystem.getRecoveryManager().rollbackTransaction(txnCtx); txnCtx.setTxnState(ITransactionManager.ABORTED); } } catch (ACIDException | HyracksDataException e) { String msg = "Could not complete rollback! System is in an inconsistent state"; throw new ACIDException(msg, e); } finally { txnCtx.complete(); txnSubsystem.getLockManager().releaseLocks(txnCtx); txnSubsystem.getCheckpointManager().completed(txnId); } }
/** * Cancels the reconcile() operation. Since reconcile() gets a lock, this lock * needs to be unlocked to reverse the effect of reconcile(). */ @Override public void cancel(ITupleReference tuple) throws HyracksDataException { int pkHash = computePrimaryKeyHashValue(tuple, primaryKeyFields); try { lockManager.unlock(datasetId, pkHash, LockMode.S, txnCtx); } catch (ACIDException e) { throw HyracksDataException.create(e); } }
@Override public boolean proceed(ITupleReference tuple) throws HyracksDataException { int pkHash = computePrimaryKeyHashValue(tuple, primaryKeyFields); try { return lockManager.instantTryLock(datasetId, pkHash, LockMode.S, txnCtx); } catch (ACIDException e) { throw HyracksDataException.create(e); } }
try { for (int i = 0; i < 10000; i++) { lockManager.lock(datasetId, i, LockManagerConstants.LockMode.S, txnCtx); LogRecord logRecord = new LogRecord(); TransactionUtil.formEntityCommitLogRecord(logRecord, txnCtx, datasetId.getId(), i, tuple, pkFields, try { for (int i = 0; i < 10000; i++) { lockManager.lock(datasetId, i, LockManagerConstants.LockMode.S, txnCtx); LogRecord logRecord = new LogRecord(); TransactionUtil.formEntityCommitLogRecord(logRecord, txnCtx, datasetId.getId(), i, tuple, pkFields,
reusableDatasetId.setId(logRecord.getDatasetId()); txnCtx = txnSubsystem.getTransactionManager().getTransactionContext(reusableTxnId); txnSubsystem.getLockManager().unlock(reusableDatasetId, logRecord.getPKHashValue(), LockMode.ANY, txnCtx); txnCtx.notifyEntityCommitted(logRecord.getResourcePartition());
@Override public void close() throws HyracksDataException { try { INcApplicationContext appCtx = (INcApplicationContext) ctx.getJobletContext().getServiceContext().getApplicationContext(); IDatasetLifecycleManager datasetLifeCycleManager = appCtx.getDatasetLifecycleManager(); ILockManager lockManager = appCtx.getTransactionSubsystem().getLockManager(); ITransactionManager txnManager = appCtx.getTransactionSubsystem().getTransactionManager(); // get the local transaction ITransactionContext txnCtx = txnManager.getTransactionContext(txnId); // lock the dataset granule lockManager.lock(datasetId, -1, LockMode.S, txnCtx); // flush the dataset synchronously DatasetInfo datasetInfo = datasetLifeCycleManager.getDatasetInfo(datasetId.getId()); // TODO: Remove the isOpen check and let it fail if flush is requested for a dataset that is closed synchronized (datasetLifeCycleManager) { if (datasetInfo.isOpen()) { datasetLifeCycleManager.flushDataset(datasetId.getId(), false); } } datasetInfo.waitForIO(); } catch (ACIDException e) { throw HyracksDataException.create(e); } }