@Override public void throwIfConditionInvalid(long timestamp) { throw new TransactionFailedRetriableException("Condition failed"); }
@Override public void throwIfConditionInvalid(long timestamp) { throw new TransactionFailedRetriableException("Condition failed"); }
protected void checkGetPreconditions(TableReference tableRef) { markTableAsInvolvedInThisTransaction(tableRef); if (transactionReadTimeoutMillis != null && System.currentTimeMillis() - timeCreated > transactionReadTimeoutMillis) { throw new TransactionFailedRetriableException("Transaction timed out."); } Preconditions.checkArgument(allowHiddenTableAccess || !AtlasDbConstants.HIDDEN_TABLES.contains(tableRef)); if (!(state.get() == State.UNCOMMITTED || state.get() == State.COMMITTING)) { throw new CommittedTransactionException(); } }
/** * Rollback a someone else's transaction. * @return true if the other transaction was rolled back */ private boolean rollbackOtherTransaction(long startTs, TransactionService transactionService) { try { transactionService.putUnlessExists(startTs, TransactionConstants.FAILED_COMMIT_TS); transactionOutcomeMetrics.markRollbackOtherTransaction(); return true; } catch (KeyAlreadyExistsException e) { log.info("This isn't a bug but it should be very infrequent. Two transactions tried to roll back someone" + " else's request with start: {}", SafeArg.of("startTs", startTs), new TransactionFailedRetriableException( "Two transactions tried to roll back someone else's request with start: " + startTs, e)); return false; } }
@Override protected void putMetadataAndHashIndexTask(Transaction t, Map<Long, StreamMetadata> streamIdsToMetadata) { ValueStreamMetadataTable mdTable = tables.getValueStreamMetadataTable(t); Map<Long, StreamMetadata> prevMetadatas = getMetadata(t, streamIdsToMetadata.keySet()); Map<ValueStreamMetadataTable.ValueStreamMetadataRow, StreamMetadata> rowsToStoredMetadata = Maps.newHashMap(); Map<ValueStreamMetadataTable.ValueStreamMetadataRow, StreamMetadata> rowsToUnstoredMetadata = Maps.newHashMap(); for (Entry<Long, StreamMetadata> e : streamIdsToMetadata.entrySet()) { long streamId = e.getKey(); StreamMetadata metadata = e.getValue(); StreamMetadata prevMetadata = prevMetadatas.get(streamId); if (metadata.getStatus() == Status.STORED) { if (prevMetadata == null || prevMetadata.getStatus() != Status.STORING) { // This can happen if we cleanup old streams. throw new TransactionFailedRetriableException("Cannot mark a stream as stored that isn't currently storing: " + prevMetadata); } rowsToStoredMetadata.put(ValueStreamMetadataTable.ValueStreamMetadataRow.of(streamId), metadata); } else if (metadata.getStatus() == Status.STORING) { // This will prevent two users trying to store the same id. if (prevMetadata != null) { throw new TransactionFailedRetriableException("Cannot reuse the same stream id: " + streamId); } rowsToUnstoredMetadata.put(ValueStreamMetadataTable.ValueStreamMetadataRow.of(streamId), metadata); } } putHashIndexTask(t, rowsToStoredMetadata); Map<ValueStreamMetadataTable.ValueStreamMetadataRow, StreamMetadata> rowsToMetadata = Maps.newHashMap(); rowsToMetadata.putAll(rowsToStoredMetadata); rowsToMetadata.putAll(rowsToUnstoredMetadata); mdTable.putMetadata(rowsToMetadata); }
@Override protected void putMetadataAndHashIndexTask(Transaction t, Map<Long, StreamMetadata> streamIdsToMetadata) { HotspottyDataStreamMetadataTable mdTable = tables.getHotspottyDataStreamMetadataTable(t); Map<Long, StreamMetadata> prevMetadatas = getMetadata(t, streamIdsToMetadata.keySet()); Map<HotspottyDataStreamMetadataTable.HotspottyDataStreamMetadataRow, StreamMetadata> rowsToStoredMetadata = Maps.newHashMap(); Map<HotspottyDataStreamMetadataTable.HotspottyDataStreamMetadataRow, StreamMetadata> rowsToUnstoredMetadata = Maps.newHashMap(); for (Entry<Long, StreamMetadata> e : streamIdsToMetadata.entrySet()) { long streamId = e.getKey(); StreamMetadata metadata = e.getValue(); StreamMetadata prevMetadata = prevMetadatas.get(streamId); if (metadata.getStatus() == Status.STORED) { if (prevMetadata == null || prevMetadata.getStatus() != Status.STORING) { // This can happen if we cleanup old streams. throw new TransactionFailedRetriableException("Cannot mark a stream as stored that isn't currently storing: " + prevMetadata); } rowsToStoredMetadata.put(HotspottyDataStreamMetadataTable.HotspottyDataStreamMetadataRow.of(streamId), metadata); } else if (metadata.getStatus() == Status.STORING) { // This will prevent two users trying to store the same id. if (prevMetadata != null) { throw new TransactionFailedRetriableException("Cannot reuse the same stream id: " + streamId); } rowsToUnstoredMetadata.put(HotspottyDataStreamMetadataTable.HotspottyDataStreamMetadataRow.of(streamId), metadata); } } putHashIndexTask(t, rowsToStoredMetadata); Map<HotspottyDataStreamMetadataTable.HotspottyDataStreamMetadataRow, StreamMetadata> rowsToMetadata = Maps.newHashMap(); rowsToMetadata.putAll(rowsToStoredMetadata); rowsToMetadata.putAll(rowsToUnstoredMetadata); mdTable.putMetadata(rowsToMetadata); }
@Override protected void putMetadataAndHashIndexTask(Transaction t, Map<Long, StreamMetadata> streamIdsToMetadata) { SnapshotsStreamMetadataTable mdTable = tables.getSnapshotsStreamMetadataTable(t); Map<Long, StreamMetadata> prevMetadatas = getMetadata(t, streamIdsToMetadata.keySet()); Map<SnapshotsStreamMetadataTable.SnapshotsStreamMetadataRow, StreamMetadata> rowsToStoredMetadata = Maps.newHashMap(); Map<SnapshotsStreamMetadataTable.SnapshotsStreamMetadataRow, StreamMetadata> rowsToUnstoredMetadata = Maps.newHashMap(); for (Entry<Long, StreamMetadata> e : streamIdsToMetadata.entrySet()) { long streamId = e.getKey(); StreamMetadata metadata = e.getValue(); StreamMetadata prevMetadata = prevMetadatas.get(streamId); if (metadata.getStatus() == Status.STORED) { if (prevMetadata == null || prevMetadata.getStatus() != Status.STORING) { // This can happen if we cleanup old streams. throw new TransactionFailedRetriableException("Cannot mark a stream as stored that isn't currently storing: " + prevMetadata); } rowsToStoredMetadata.put(SnapshotsStreamMetadataTable.SnapshotsStreamMetadataRow.of(streamId), metadata); } else if (metadata.getStatus() == Status.STORING) { // This will prevent two users trying to store the same id. if (prevMetadata != null) { throw new TransactionFailedRetriableException("Cannot reuse the same stream id: " + streamId); } rowsToUnstoredMetadata.put(SnapshotsStreamMetadataTable.SnapshotsStreamMetadataRow.of(streamId), metadata); } } putHashIndexTask(t, rowsToStoredMetadata); Map<SnapshotsStreamMetadataTable.SnapshotsStreamMetadataRow, StreamMetadata> rowsToMetadata = Maps.newHashMap(); rowsToMetadata.putAll(rowsToStoredMetadata); rowsToMetadata.putAll(rowsToUnstoredMetadata); mdTable.putMetadata(rowsToMetadata); }
@Override protected void putMetadataAndHashIndexTask(Transaction t, Map<Long, StreamMetadata> streamIdsToMetadata) { DataStreamMetadataTable mdTable = tables.getDataStreamMetadataTable(t); Map<Long, StreamMetadata> prevMetadatas = getMetadata(t, streamIdsToMetadata.keySet()); Map<DataStreamMetadataTable.DataStreamMetadataRow, StreamMetadata> rowsToStoredMetadata = Maps.newHashMap(); Map<DataStreamMetadataTable.DataStreamMetadataRow, StreamMetadata> rowsToUnstoredMetadata = Maps.newHashMap(); for (Entry<Long, StreamMetadata> e : streamIdsToMetadata.entrySet()) { long streamId = e.getKey(); StreamMetadata metadata = e.getValue(); StreamMetadata prevMetadata = prevMetadatas.get(streamId); if (metadata.getStatus() == Status.STORED) { if (prevMetadata == null || prevMetadata.getStatus() != Status.STORING) { // This can happen if we cleanup old streams. throw new TransactionFailedRetriableException("Cannot mark a stream as stored that isn't currently storing: " + prevMetadata); } rowsToStoredMetadata.put(DataStreamMetadataTable.DataStreamMetadataRow.of(streamId), metadata); } else if (metadata.getStatus() == Status.STORING) { // This will prevent two users trying to store the same id. if (prevMetadata != null) { throw new TransactionFailedRetriableException("Cannot reuse the same stream id: " + streamId); } rowsToUnstoredMetadata.put(DataStreamMetadataTable.DataStreamMetadataRow.of(streamId), metadata); } } putHashIndexTask(t, rowsToStoredMetadata); Map<DataStreamMetadataTable.DataStreamMetadataRow, StreamMetadata> rowsToMetadata = Maps.newHashMap(); rowsToMetadata.putAll(rowsToStoredMetadata); rowsToMetadata.putAll(rowsToUnstoredMetadata); mdTable.putMetadata(rowsToMetadata); }
@Override protected void putMetadataAndHashIndexTask(Transaction t, Map<Long, StreamMetadata> streamIdsToMetadata) { UserPhotosStreamMetadataTable mdTable = tables.getUserPhotosStreamMetadataTable(t); Map<Long, StreamMetadata> prevMetadatas = getMetadata(t, streamIdsToMetadata.keySet()); Map<UserPhotosStreamMetadataTable.UserPhotosStreamMetadataRow, StreamMetadata> rowsToStoredMetadata = Maps.newHashMap(); Map<UserPhotosStreamMetadataTable.UserPhotosStreamMetadataRow, StreamMetadata> rowsToUnstoredMetadata = Maps.newHashMap(); for (Entry<Long, StreamMetadata> e : streamIdsToMetadata.entrySet()) { long streamId = e.getKey(); StreamMetadata metadata = e.getValue(); StreamMetadata prevMetadata = prevMetadatas.get(streamId); if (metadata.getStatus() == Status.STORED) { if (prevMetadata == null || prevMetadata.getStatus() != Status.STORING) { // This can happen if we cleanup old streams. throw new TransactionFailedRetriableException("Cannot mark a stream as stored that isn't currently storing: " + prevMetadata); } rowsToStoredMetadata.put(UserPhotosStreamMetadataTable.UserPhotosStreamMetadataRow.of(streamId), metadata); } else if (metadata.getStatus() == Status.STORING) { // This will prevent two users trying to store the same id. if (prevMetadata != null) { throw new TransactionFailedRetriableException("Cannot reuse the same stream id: " + streamId); } rowsToUnstoredMetadata.put(UserPhotosStreamMetadataTable.UserPhotosStreamMetadataRow.of(streamId), metadata); } } putHashIndexTask(t, rowsToStoredMetadata); Map<UserPhotosStreamMetadataTable.UserPhotosStreamMetadataRow, StreamMetadata> rowsToMetadata = Maps.newHashMap(); rowsToMetadata.putAll(rowsToStoredMetadata); rowsToMetadata.putAll(rowsToUnstoredMetadata); mdTable.putMetadata(rowsToMetadata); }
private long getCommitTimestampRollBackIfNecessary(long startTimestamp, Multimap<TableReference, Cell> tableNameToCell) { Long commitTimestamp = transactionService.get(startTimestamp); if (commitTimestamp == null) { // Roll back this transaction (note that rolling back arbitrary transactions // can never cause correctness issues, only liveness issues) try { transactionService.putUnlessExists(startTimestamp, TransactionConstants.FAILED_COMMIT_TS); } catch (KeyAlreadyExistsException e) { String msg = "Could not roll back transaction with start timestamp " + startTimestamp + "; either" + " it was already rolled back (by a different transaction), or it committed successfully" + " before we could roll it back."; log.error("This isn't a bug but it should be very infrequent. {}", msg, new TransactionFailedRetriableException(msg, e)); } commitTimestamp = transactionService.get(startTimestamp); } if (commitTimestamp == null) { throw new RuntimeException("expected commit timestamp to be non-null for startTs: " + startTimestamp); } if (commitTimestamp == TransactionConstants.FAILED_COMMIT_TS) { for (TableReference table : tableNameToCell.keySet()) { Map<Cell, Long> toDelete = Maps2.createConstantValueMap(tableNameToCell.get(table), startTimestamp); keyValueService.delete(table, Multimaps.forMap(toDelete)); } } return commitTimestamp; }
case THROW_EXCEPTION: if (!orphanedSentinels.contains(key)) { throw new TransactionFailedRetriableException("Tried to read a value that has been " + "deleted. This can be caused by hard delete transactions using the type " + TransactionType.AGGRESSIVE_HARD_DELETE
@Override protected void putMetadataAndHashIndexTask(Transaction t, Map<Long, StreamMetadata> streamIdsToMetadata) { TestHashComponentsStreamMetadataTable mdTable = tables.getTestHashComponentsStreamMetadataTable(t); Map<Long, StreamMetadata> prevMetadatas = getMetadata(t, streamIdsToMetadata.keySet()); Map<TestHashComponentsStreamMetadataTable.TestHashComponentsStreamMetadataRow, StreamMetadata> rowsToStoredMetadata = Maps.newHashMap(); Map<TestHashComponentsStreamMetadataTable.TestHashComponentsStreamMetadataRow, StreamMetadata> rowsToUnstoredMetadata = Maps.newHashMap(); for (Entry<Long, StreamMetadata> e : streamIdsToMetadata.entrySet()) { long streamId = e.getKey(); StreamMetadata metadata = e.getValue(); StreamMetadata prevMetadata = prevMetadatas.get(streamId); if (metadata.getStatus() == Status.STORED) { if (prevMetadata == null || prevMetadata.getStatus() != Status.STORING) { // This can happen if we cleanup old streams. throw new TransactionFailedRetriableException("Cannot mark a stream as stored that isn't currently storing: " + prevMetadata); } rowsToStoredMetadata.put(TestHashComponentsStreamMetadataTable.TestHashComponentsStreamMetadataRow.of(streamId), metadata); } else if (metadata.getStatus() == Status.STORING) { // This will prevent two users trying to store the same id. if (prevMetadata != null) { throw new TransactionFailedRetriableException("Cannot reuse the same stream id: " + streamId); } rowsToUnstoredMetadata.put(TestHashComponentsStreamMetadataTable.TestHashComponentsStreamMetadataRow.of(streamId), metadata); } } putHashIndexTask(t, rowsToStoredMetadata); Map<TestHashComponentsStreamMetadataTable.TestHashComponentsStreamMetadataRow, StreamMetadata> rowsToMetadata = Maps.newHashMap(); rowsToMetadata.putAll(rowsToStoredMetadata); rowsToMetadata.putAll(rowsToUnstoredMetadata); mdTable.putMetadata(rowsToMetadata); }
@Override protected void putMetadataAndHashIndexTask(Transaction t, Map<Long, StreamMetadata> streamIdsToMetadata) { StreamTestMaxMemStreamMetadataTable mdTable = tables.getStreamTestMaxMemStreamMetadataTable(t); Map<Long, StreamMetadata> prevMetadatas = getMetadata(t, streamIdsToMetadata.keySet()); Map<StreamTestMaxMemStreamMetadataTable.StreamTestMaxMemStreamMetadataRow, StreamMetadata> rowsToStoredMetadata = Maps.newHashMap(); Map<StreamTestMaxMemStreamMetadataTable.StreamTestMaxMemStreamMetadataRow, StreamMetadata> rowsToUnstoredMetadata = Maps.newHashMap(); for (Entry<Long, StreamMetadata> e : streamIdsToMetadata.entrySet()) { long streamId = e.getKey(); StreamMetadata metadata = e.getValue(); StreamMetadata prevMetadata = prevMetadatas.get(streamId); if (metadata.getStatus() == Status.STORED) { if (prevMetadata == null || prevMetadata.getStatus() != Status.STORING) { // This can happen if we cleanup old streams. throw new TransactionFailedRetriableException("Cannot mark a stream as stored that isn't currently storing: " + prevMetadata); } rowsToStoredMetadata.put(StreamTestMaxMemStreamMetadataTable.StreamTestMaxMemStreamMetadataRow.of(streamId), metadata); } else if (metadata.getStatus() == Status.STORING) { // This will prevent two users trying to store the same id. if (prevMetadata != null) { throw new TransactionFailedRetriableException("Cannot reuse the same stream id: " + streamId); } rowsToUnstoredMetadata.put(StreamTestMaxMemStreamMetadataTable.StreamTestMaxMemStreamMetadataRow.of(streamId), metadata); } } putHashIndexTask(t, rowsToStoredMetadata); Map<StreamTestMaxMemStreamMetadataTable.StreamTestMaxMemStreamMetadataRow, StreamMetadata> rowsToMetadata = Maps.newHashMap(); rowsToMetadata.putAll(rowsToStoredMetadata); rowsToMetadata.putAll(rowsToUnstoredMetadata); mdTable.putMetadata(rowsToMetadata); }
@Override protected void putMetadataAndHashIndexTask(Transaction t, Map<Long, StreamMetadata> streamIdsToMetadata) { StreamTestStreamMetadataTable mdTable = tables.getStreamTestStreamMetadataTable(t); Map<Long, StreamMetadata> prevMetadatas = getMetadata(t, streamIdsToMetadata.keySet()); Map<StreamTestStreamMetadataTable.StreamTestStreamMetadataRow, StreamMetadata> rowsToStoredMetadata = Maps.newHashMap(); Map<StreamTestStreamMetadataTable.StreamTestStreamMetadataRow, StreamMetadata> rowsToUnstoredMetadata = Maps.newHashMap(); for (Entry<Long, StreamMetadata> e : streamIdsToMetadata.entrySet()) { long streamId = e.getKey(); StreamMetadata metadata = e.getValue(); StreamMetadata prevMetadata = prevMetadatas.get(streamId); if (metadata.getStatus() == Status.STORED) { if (prevMetadata == null || prevMetadata.getStatus() != Status.STORING) { // This can happen if we cleanup old streams. throw new TransactionFailedRetriableException("Cannot mark a stream as stored that isn't currently storing: " + prevMetadata); } rowsToStoredMetadata.put(StreamTestStreamMetadataTable.StreamTestStreamMetadataRow.of(streamId), metadata); } else if (metadata.getStatus() == Status.STORING) { // This will prevent two users trying to store the same id. if (prevMetadata != null) { throw new TransactionFailedRetriableException("Cannot reuse the same stream id: " + streamId); } rowsToUnstoredMetadata.put(StreamTestStreamMetadataTable.StreamTestStreamMetadataRow.of(streamId), metadata); } } putHashIndexTask(t, rowsToStoredMetadata); Map<StreamTestStreamMetadataTable.StreamTestStreamMetadataRow, StreamMetadata> rowsToMetadata = Maps.newHashMap(); rowsToMetadata.putAll(rowsToStoredMetadata); rowsToMetadata.putAll(rowsToUnstoredMetadata); mdTable.putMetadata(rowsToMetadata); }
@Override protected void putMetadataAndHashIndexTask(Transaction t, Map<Long, StreamMetadata> streamIdsToMetadata) { StreamTestWithHashStreamMetadataTable mdTable = tables.getStreamTestWithHashStreamMetadataTable(t); Map<Long, StreamMetadata> prevMetadatas = getMetadata(t, streamIdsToMetadata.keySet()); Map<StreamTestWithHashStreamMetadataTable.StreamTestWithHashStreamMetadataRow, StreamMetadata> rowsToStoredMetadata = Maps.newHashMap(); Map<StreamTestWithHashStreamMetadataTable.StreamTestWithHashStreamMetadataRow, StreamMetadata> rowsToUnstoredMetadata = Maps.newHashMap(); for (Entry<Long, StreamMetadata> e : streamIdsToMetadata.entrySet()) { long streamId = e.getKey(); StreamMetadata metadata = e.getValue(); StreamMetadata prevMetadata = prevMetadatas.get(streamId); if (metadata.getStatus() == Status.STORED) { if (prevMetadata == null || prevMetadata.getStatus() != Status.STORING) { // This can happen if we cleanup old streams. throw new TransactionFailedRetriableException("Cannot mark a stream as stored that isn't currently storing: " + prevMetadata); } rowsToStoredMetadata.put(StreamTestWithHashStreamMetadataTable.StreamTestWithHashStreamMetadataRow.of(streamId), metadata); } else if (metadata.getStatus() == Status.STORING) { // This will prevent two users trying to store the same id. if (prevMetadata != null) { throw new TransactionFailedRetriableException("Cannot reuse the same stream id: " + streamId); } rowsToUnstoredMetadata.put(StreamTestWithHashStreamMetadataTable.StreamTestWithHashStreamMetadataRow.of(streamId), metadata); } } putHashIndexTask(t, rowsToStoredMetadata); Map<StreamTestWithHashStreamMetadataTable.StreamTestWithHashStreamMetadataRow, StreamMetadata> rowsToMetadata = Maps.newHashMap(); rowsToMetadata.putAll(rowsToStoredMetadata); rowsToMetadata.putAll(rowsToUnstoredMetadata); mdTable.putMetadata(rowsToMetadata); }
protected void checkGetPreconditions(TableReference tableRef) { markTableAsInvolvedInThisTransaction(tableRef); if (transactionReadTimeoutMillis != null && System.currentTimeMillis() - timeCreated > transactionReadTimeoutMillis) { throw new TransactionFailedRetriableException("Transaction timed out."); } Preconditions.checkArgument(allowHiddenTableAccess || !AtlasDbConstants.HIDDEN_TABLES.contains(tableRef)); if (!(state.get() == State.UNCOMMITTED || state.get() == State.COMMITTING)) { throw new CommittedTransactionException(); } }
/** * Rollback a someone else's transaction. * @return true if the other transaction was rolled back */ private boolean rollbackOtherTransaction(long startTs, TransactionService transactionService) { try { transactionService.putUnlessExists(startTs, TransactionConstants.FAILED_COMMIT_TS); transactionOutcomeMetrics.markRollbackOtherTransaction(); return true; } catch (KeyAlreadyExistsException e) { log.info("This isn't a bug but it should be very infrequent. Two transactions tried to roll back someone" + " else's request with start: {}", SafeArg.of("startTs", startTs), new TransactionFailedRetriableException( "Two transactions tried to roll back someone else's request with start: " + startTs, e)); return false; } }
@Override protected void putMetadataAndHashIndexTask(Transaction t, Map<Long, StreamMetadata> streamIdsToMetadata) { SnapshotsStreamMetadataTable mdTable = tables.getSnapshotsStreamMetadataTable(t); Map<Long, StreamMetadata> prevMetadatas = getMetadata(t, streamIdsToMetadata.keySet()); Map<SnapshotsStreamMetadataTable.SnapshotsStreamMetadataRow, StreamMetadata> rowsToStoredMetadata = Maps.newHashMap(); Map<SnapshotsStreamMetadataTable.SnapshotsStreamMetadataRow, StreamMetadata> rowsToUnstoredMetadata = Maps.newHashMap(); for (Entry<Long, StreamMetadata> e : streamIdsToMetadata.entrySet()) { long streamId = e.getKey(); StreamMetadata metadata = e.getValue(); StreamMetadata prevMetadata = prevMetadatas.get(streamId); if (metadata.getStatus() == Status.STORED) { if (prevMetadata == null || prevMetadata.getStatus() != Status.STORING) { // This can happen if we cleanup old streams. throw new TransactionFailedRetriableException("Cannot mark a stream as stored that isn't currently storing: " + prevMetadata); } rowsToStoredMetadata.put(SnapshotsStreamMetadataTable.SnapshotsStreamMetadataRow.of(streamId), metadata); } else if (metadata.getStatus() == Status.STORING) { // This will prevent two users trying to store the same id. if (prevMetadata != null) { throw new TransactionFailedRetriableException("Cannot reuse the same stream id: " + streamId); } rowsToUnstoredMetadata.put(SnapshotsStreamMetadataTable.SnapshotsStreamMetadataRow.of(streamId), metadata); } } putHashIndexTask(t, rowsToStoredMetadata); Map<SnapshotsStreamMetadataTable.SnapshotsStreamMetadataRow, StreamMetadata> rowsToMetadata = Maps.newHashMap(); rowsToMetadata.putAll(rowsToStoredMetadata); rowsToMetadata.putAll(rowsToUnstoredMetadata); mdTable.putMetadata(rowsToMetadata); }
private long getCommitTimestampRollBackIfNecessary(long startTimestamp, Multimap<TableReference, Cell> tableNameToCell) { Long commitTimestamp = transactionService.get(startTimestamp); if (commitTimestamp == null) { // Roll back this transaction (note that rolling back arbitrary transactions // can never cause correctness issues, only liveness issues) try { transactionService.putUnlessExists(startTimestamp, TransactionConstants.FAILED_COMMIT_TS); } catch (KeyAlreadyExistsException e) { String msg = "Could not roll back transaction with start timestamp " + startTimestamp + "; either" + " it was already rolled back (by a different transaction), or it committed successfully" + " before we could roll it back."; log.error("This isn't a bug but it should be very infrequent. {}", msg, new TransactionFailedRetriableException(msg, e)); } commitTimestamp = transactionService.get(startTimestamp); } if (commitTimestamp == null) { throw new RuntimeException("expected commit timestamp to be non-null for startTs: " + startTimestamp); } if (commitTimestamp == TransactionConstants.FAILED_COMMIT_TS) { for (TableReference table : tableNameToCell.keySet()) { Map<Cell, Long> toDelete = Maps2.createConstantValueMap(tableNameToCell.get(table), startTimestamp); keyValueService.delete(table, Multimaps.forMap(toDelete)); } } return commitTimestamp; }
case THROW_EXCEPTION: if (!orphanedSentinels.contains(key)) { throw new TransactionFailedRetriableException("Tried to read a value that has been " + "deleted. This can be caused by hard delete transactions using the type " + TransactionType.AGGRESSIVE_HARD_DELETE