public ChunkedResultIteratorFactory(ParallelIteratorFactory delegateFactory, MutationState mutationState, TableRef tableRef) { this.delegateFactory = delegateFactory; this.tableRef = tableRef; // Clone MutationState, as the one on the connection may change if auto commit is on // while we need a handle to the original one (for it's transaction state). this.mutationState = new MutationState(mutationState); }
private void writeBatch(MutationState mutationState, Context context) throws IOException, SQLException, InterruptedException { final Iterator<Pair<byte[], List<Mutation>>> iterator = mutationState.toMutations(true, null); while (iterator.hasNext()) { Pair<byte[], List<Mutation>> mutationPair = iterator.next(); List<Mutation> batchMutations = mutationPair.getSecond(); List<List<Mutation>> batchOfBatchMutations = MutationState.getMutationBatchList(batchSize, batchSizeBytes, batchMutations); for (List<Mutation> mutationList : batchOfBatchMutations) { writer.write(mutationList); } context.getCounter(PhoenixJobCounters.OUTPUT_RECORDS).increment( mutationPair.getSecond().size()); } connection.rollback(); currentBatchCount = 0; }
private void addUncommittedStatementIndexes(Collection<RowMutationState> rowMutations) { for (RowMutationState rowMutationState : rowMutations) { uncommittedStatementIndexes = joinSortedIntArrays(uncommittedStatementIndexes, rowMutationState.getStatementIndexes()); } }
public static Long getTableTimestamp(PhoenixConnection connection, boolean transactional, TransactionFactory.Provider provider) throws SQLException { Long timestamp = null; if (!transactional) { return timestamp; } MutationState mutationState = connection.getMutationState(); if (!mutationState.isTransactionStarted()) { mutationState.startTransaction(provider); } timestamp = convertToMilliseconds(mutationState.getInitialWritePointer()); return timestamp; }
public void setTransactionContext(PhoenixTransactionContext txContext) throws SQLException { if (!this.services.getProps().getBoolean( QueryServices.TRANSACTIONS_ENABLED, QueryServicesOptions.DEFAULT_TRANSACTIONS_ENABLED)) { throw new SQLExceptionInfo.Builder( SQLExceptionCode.TX_MUST_BE_ENABLED_TO_SET_TX_CONTEXT) .build().buildException(); } this.mutationState.rollback(); this.mutationState = new MutationState(this.mutationState.getMaxSize(), this.mutationState.getMaxSizeBytes(), this, txContext); // Write data to HBase after each statement execution as the commit may // not // come through Phoenix APIs. setAutoFlush(true); }
boolean sendAll = false; if (tableRefIterator == null) { serverTimeStamps = validateAll(); tableRefIterator = mutations.keySet().iterator(); sendAll = true; long serverTimestamp = serverTimeStamps == null ? validateAndGetServerTimestamp(tableRef, multiRowMutationState) : serverTimeStamps[i++]; Long scn = connection.getSCN(); long mutationTimestamp = scn == null ? HConstants.LATEST_TIMESTAMP : scn; final PTable table = tableRef.getTable(); Iterator<Pair<PName, List<Mutation>>> mutationsIterator = addRowMutations(tableRef, multiRowMutationState, mutationTimestamp, serverTimestamp, false, sendAll); addUncommittedStatementIndexes(multiRowMutationState.values()); if (txMutations.isEmpty()) { txMutations = Maps.newHashMapWithExpectedSize(mutations.size()); joinMutationState(new TableRef(tableRef), multiRowMutationState, txMutations); mutationSizeBytes = calculateMutationSize(mutationList); List<List<Mutation>> mutationBatchList = getMutationBatchList(batchSize, batchSizeBytes, mutationList); for (final List<Mutation> mutationBatch : mutationBatchList) { int[] uncommittedStatementIndexes = getUncommittedStatementIndexes(); sqlE = new CommitException(e, uncommittedStatementIndexes, serverTimestamp); numFailedMutations = uncommittedStatementIndexes.length;
SQLException sqlE = null; try { send(); txMutations = this.txMutations; sendSuccessful = true; logger.info(e.getClass().getName() + " at timestamp " + getInitialWritePointer() + " with retry count of " + retryCount); retryCommit = (e.getErrorCode() == SQLExceptionCode.TRANSACTION_CONFLICT_EXCEPTION TransactionFactory.Provider provider = phoenixTransactionContext.getProvider(); try { resetState(); } finally { if (retryCommit) { startTransaction(provider); retryCommit = shouldResubmitTransaction(txTableRefs); } catch (SQLException e) { retryCommit = false;
addFunctionArgMutation(function.getFunctionName(), arg, argUpsert, i); functionData.addAll(connection.getMutationState().toMutations().next().getSecond()); connection.rollback(); functionUpsert.setString(6, function.getReturnType()); functionUpsert.execute(); functionData.addAll(connection.getMutationState().toMutations(null).next().getSecond()); connection.rollback(); connection.setAutoCommit(wasAutoCommit); return new MutationState(1, 1000, connection);
/** * Get the unsorted list of HBase mutations for the tables with uncommitted data. * * @return list of HBase mutations for uncommitted data. */ public Iterator<Pair<byte[], List<Mutation>>> toMutations(Long timestamp) { return toMutations(false, timestamp); }
} catch (ColumnNotFoundException e) { if (statement.ifExists()) { return new MutationState(0, 0, connection); long indexTableSeqNum = incrementTableSeqNum(index, index.getType(), -indexColumnsToDrop.size(), null, null); dropColumnMutations(index, indexColumnsToDrop); long clientTimestamp = MutationState.getTableTimestamp(timeStamp, connection.getSCN()); connection.removeColumn(tenantId, index.getName().getString(), indexColumnsToDrop, clientTimestamp, indexTableSeqNum, tableMetaData.addAll(connection.getMutationState().toMutations(timeStamp).next().getSecond()); connection.rollback(); tableMetaData.addAll(connection.getMutationState().toMutations(timeStamp).next().getSecond()); connection.rollback(); throw new ColumnNotFoundException(schemaName, tableName, Bytes.toString(result.getFamilyName()), Bytes.toString(result.getColumnName())); return new MutationState(0, 0, connection); return new MutationState(0, 0, connection); } catch (ConcurrentTableMutationException e) { if (retried) {
MutationState state = new MutationState(tableRef, mutations, 0, maxSize, maxSizeBytes, connection); connection.getMutationState().join(state); for (int i = 0; i < otherTableRefs.size(); i++) { MutationState indexState = new MutationState(otherTableRefs.get(i), otherMutations.get(i), 0, maxSize, maxSizeBytes, connection); connection.getMutationState().join(indexState); connection.getMutationState().send(); mutations.clear(); if (otherMutations != null) { MutationState state = new MutationState(tableRef, mutations, nCommittedRows, maxSize, maxSizeBytes, connection); for (int i = 0; i < otherTableRefs.size(); i++) { MutationState indexState = new MutationState(otherTableRefs.get(i), otherMutations.get(i), 0, maxSize, maxSizeBytes, connection); state.join(indexState);
@Override public MutationState execute() throws SQLException { connection.getMutationState().commitDDLFence(dataTable); Tuple tuple = plan.iterator().next(); long rowCount = 0; if (tuple != null) { Cell kv = tuple.getValue(0); ImmutableBytesWritable tmpPtr = new ImmutableBytesWritable(kv.getValueArray(), kv.getValueOffset(), kv.getValueLength()); // A single Cell will be returned with the count(*) - we decode that here rowCount = PLong.INSTANCE.getCodec().decodeLong(tmpPtr, SortOrder.getDefault()); } // The contract is to return a MutationState that contains the number of rows modified. In this // case, it's the number of rows in the data table which corresponds to the number of index // rows that were added. return new MutationState(0, 0, connection, rowCount); }
startTransaction(tableRef.getTable().getTransactionProvider()); send(strippedAliases.iterator()); return true;
MutationState mutationState = pconn.getMutationState(); if (table.isTransactional()) { mutationState.startTransaction(table.getTransactionProvider()); try (Table htable = mutationState.getHTable(table)) { byte[] markerRowKey = Bytes.toBytes("TO_DELETE"); htable.close(); if (table.isTransactional()) { mutationState.commit();
MutationState state = new MutationState(maxSize, maxSizeBytes, connection, totalRowCount/totalTablesUpdateClientSide); state.setReadMetricQueue(context.getReadMetricsQueue());
public TableResultIterator(MutationState mutationState, Scan scan, ScanMetricsHolder scanMetricsHolder, long renewLeaseThreshold, QueryPlan plan, ParallelScanGrouper scanGrouper,Map<ImmutableBytesPtr,ServerCache> caches) throws SQLException { this.scan = scan; this.scanMetricsHolder = scanMetricsHolder; this.plan = plan; PTable table = plan.getTableRef().getTable(); htable = mutationState.getHTable(table); this.scanIterator = UNINITIALIZED_SCANNER; this.renewLeaseThreshold = renewLeaseThreshold; this.scanGrouper = scanGrouper; this.hashCacheClient = new HashCacheClient(plan.getContext().getConnection()); this.caches = caches; this.retry=plan.getContext().getConnection().getQueryServices().getProps() .getInt(QueryConstants.HASH_JOIN_CACHE_RETRIES, QueryConstants.DEFAULT_HASH_JOIN_CACHE_RETRIES); }
connection.getMutationState().startTransaction(table.getTransactionProvider());
List<List<Mutation>> batchLists = MutationState.getMutationBatchList(2, 10, list); assertTrue(batchLists.size() == 2); assertEquals(batchLists.get(0).size(), 1); List<List<Mutation>> batchLists = MutationState.getMutationBatchList(2, 10, list); assertTrue(batchLists.size() == 2); assertEquals(batchLists.get(0).size(), 2); List<List<Mutation>> batchLists = MutationState.getMutationBatchList(2, 10, list); assertTrue(batchLists.size() == 4); assertEquals(batchLists.get(0).size(), 1);
@Override public MutationState execute() throws SQLException { connection.getMutationState().commitDDLFence(dataTable); return super.execute(); } };
private int[] getUncommittedStatementIndexes() { for (MultiRowMutationState rowMutationMap : mutations.values()) { addUncommittedStatementIndexes(rowMutationMap.values()); } return uncommittedStatementIndexes; }