@Override public PhoenixTransactionContext getTransactionContext() { PhoenixTransactionContext context = connection.getMutationState().getPhoenixTransactionContext(); return context.newTransactionContext(context, true); }
@Override public MutationState execute() throws SQLException { connection.getMutationState().commitDDLFence(dataTable); return super.execute(); } };
final Iterator<Pair<byte[],List<Mutation>>> iterator = pconn.getMutationState().toMutations(includeMutableIndexes); return new Iterator<Pair<byte[],List<Cell>>>() {
@Override public void close() throws SQLException { try { /* * Join the child mutation states in close, since this is called in a single threaded manner * after the parallel results have been processed. * If auto-commit is on for the cloned child connection, then the finalState here is an empty mutation * state (with no mutations). However, it still has the metrics for mutation work done by the * mutating-iterator. Joining the mutation state makes sure those metrics are passed over * to the parent connection. */ MutatingParallelIteratorFactory.this.connection.getMutationState().join(finalState); } finally { clonedConnection.close(); } }
@Override public MutationState execute() throws SQLException { MutationState state = firstPlan.execute(); statement.getConnection().getMutationState().join(state); for (MutationPlan plan : plans.subList(1, plans.size())) { statement.getConnection().getMutationState().join(plan.execute()); } return state; }
public PhoenixConnection(PhoenixConnection connection, boolean isDescRowKeyOrderUpgrade, boolean isRunningUpgrade) throws SQLException { this(connection.getQueryServices(), connection.getURL(), connection .getClientInfo(), connection.metaData, connection .getMutationState(), isDescRowKeyOrderUpgrade, isRunningUpgrade, connection.buildingIndex); this.isAutoCommit = connection.isAutoCommit; this.isAutoFlush = connection.isAutoFlush; this.sampler = connection.sampler; this.statementExecutionCounter = connection.statementExecutionCounter; }
public PhoenixConnection(PhoenixConnection connection, Properties props) throws SQLException { this(connection.getQueryServices(), connection.getURL(), props, connection.metaData, connection .getMutationState(), connection.isDescVarLengthRowKeyUpgrade(), connection.isRunningUpgrade(), connection.buildingIndex); this.isAutoCommit = connection.isAutoCommit; this.isAutoFlush = connection.isAutoFlush; this.sampler = connection.sampler; this.statementExecutionCounter = connection.statementExecutionCounter; }
public static long getResolvedTimestamp(PhoenixConnection connection, boolean isTransactional, long defaultResolvedTimestamp) { MutationState mutationState = connection.getMutationState(); Long scn = connection.getSCN(); return scn != null ? scn : (isTransactional && mutationState.isTransactionStarted()) ? convertToMilliseconds(mutationState.getInitialWritePointer()) : defaultResolvedTimestamp; }
public static Long getTableTimestamp(PhoenixConnection connection, boolean transactional, TransactionFactory.Provider provider) throws SQLException { Long timestamp = null; if (!transactional) { return timestamp; } MutationState mutationState = connection.getMutationState(); if (!mutationState.isTransactionStarted()) { mutationState.startTransaction(provider); } timestamp = convertToMilliseconds(mutationState.getInitialWritePointer()); return timestamp; }
@Override protected void map(NullWritable key, PhoenixIndexDBWritable record, Context context) throws IOException, InterruptedException { try { currentBatchCount++; final List<Object> values = record.getValues(); indxWritable.setValues(values); indxWritable.write(this.pStatement); this.pStatement.execute(); final PhoenixConnection pconn = connection.unwrap(PhoenixConnection.class); MutationState currentMutationState = pconn.getMutationState(); if (mutationState == null) { mutationState = currentMutationState; } // Keep accumulating Mutations till batch size mutationState.join(currentMutationState); // Write Mutation Batch if (currentBatchCount % batchSize == 0) { writeBatch(mutationState, context); mutationState = null; } // Make sure progress is reported to Application Master. context.progress(); } catch (SQLException e) { LOG.error(" Error {} while read/write of a record ", e.getMessage()); context.getCounter(PhoenixJobCounters.FAILED_RECORDS).increment(currentBatchCount); throw new RuntimeException(e); } context.getCounter(PhoenixJobCounters.INPUT_RECORDS).increment(1); }
public static long getResolvedTimestamp(PhoenixConnection connection, MetaDataMutationResult result) { PTable table = result.getTable(); MutationState mutationState = connection.getMutationState(); boolean txInProgress = table != null && table.isTransactional() && mutationState.isTransactionStarted(); return txInProgress ? convertToMilliseconds(mutationState.getInitialWritePointer()) : result.getMutationTime(); }
private void assertDurability(Connection conn, Durability durability) throws SQLException { PhoenixConnection pconn = conn.unwrap(PhoenixConnection.class); Iterator<Pair<byte[], List<Mutation>>> it = pconn.getMutationState().toMutations(); assertTrue(it.hasNext()); while (it.hasNext()) { Pair<byte[], List<Mutation>> pair = it.next(); assertFalse(pair.getSecond().isEmpty()); for (Mutation m : pair.getSecond()) { assertEquals(durability, m.getDurability()); } } }
@Test public void testMaxMutationSizeInBytesSetCorrectly() throws Exception { Properties connectionProperties = new Properties(); connectionProperties.setProperty(QueryServices.MUTATE_BATCH_SIZE_BYTES_ATTRIB,"100"); PhoenixConnection connection = (PhoenixConnection) DriverManager.getConnection(getUrl(), connectionProperties); assertEquals(100L, connection.getMutateBatchSizeBytes()); assertEquals(100L, connection.getMutationState().getBatchSizeBytes()); }
this.mutationState = new MutationState(context.getConnection().getMutationState()); TableRef tableRef = plan.getTableRef(); PTable table = tableRef.getTable();
private void serializeIndexMaintainerIntoScan(Scan scan, PTable dataTable) throws SQLException { PName name = context.getCurrentTable().getTable().getName(); List<PTable> indexes = Lists.newArrayListWithExpectedSize(1); for (PTable index : dataTable.getIndexes()) { if (index.getName().equals(name) && index.getIndexType() == IndexType.LOCAL) { indexes.add(index); break; } } ImmutableBytesWritable ptr = new ImmutableBytesWritable(); IndexMaintainer.serialize(dataTable, ptr, indexes, context.getConnection()); scan.setAttribute(BaseScannerRegionObserver.LOCAL_INDEX_BUILD_PROTO, ByteUtil.copyKeyBytesIfNecessary(ptr)); if (dataTable.isTransactional()) { scan.setAttribute(BaseScannerRegionObserver.TX_STATE, context.getConnection().getMutationState().encodeTransaction()); } }
private void upsertRows(Connection conn, String fullTableName) throws SQLException { ResultSet rs; MutationState state = conn.unwrap(PhoenixConnection.class) .getMutationState(); conn.createStatement().executeQuery("select 1 from " + fullTableName + " LIMIT 1").next(); long wp = state.getWritePointer();
@SuppressWarnings("deprecation") private static ParallelIteratorFactory buildResultIteratorFactory(StatementContext context, FilterableStatement statement, TableRef tableRef, OrderBy orderBy, Integer limit,Integer offset, boolean allowPageFilter) throws SQLException { if ((isSerial(context, statement, tableRef, orderBy, getEstimateOfDataSizeToScanIfWithinThreshold(context, tableRef.getTable(), QueryUtil.getOffsetLimit(limit, offset)) != null) || isRoundRobinPossible(orderBy, context) || isPacingScannersPossible(context))) { return ParallelIteratorFactory.NOOP_FACTORY; } ParallelIteratorFactory spoolingResultIteratorFactory = new SpoolingResultIterator.SpoolingResultIteratorFactory( context.getConnection().getQueryServices()); // If we're doing an order by then we need the full result before we can do anything, // so we don't bother chunking it. If we're just doing a simple scan then we chunk // the scan to have a quicker initial response. if (!orderBy.getOrderByExpressions().isEmpty()) { return spoolingResultIteratorFactory; } else { return new ChunkedResultIterator.ChunkedResultIteratorFactory( spoolingResultIteratorFactory, context.getConnection().getMutationState(), tableRef); } }
@Override public MutationState execute() throws SQLException { connection.getMutationState().commitDDLFence(dataTable); Tuple tuple = plan.iterator().next(); long rowCount = 0; if (tuple != null) { Cell kv = tuple.getValue(0); ImmutableBytesWritable tmpPtr = new ImmutableBytesWritable(kv.getValueArray(), kv.getValueOffset(), kv.getValueLength()); // A single Cell will be returned with the count(*) - we decode that here rowCount = PLong.INSTANCE.getCodec().decodeLong(tmpPtr, SortOrder.getDefault()); } // The contract is to return a MutationState that contains the number of rows modified. In this // case, it's the number of rows in the data table which corresponds to the number of index // rows that were added. return new MutationState(0, 0, connection, rowCount); }
final TableResultIterator tableResultIterator = new TableResultIterator( queryPlan.getContext().getConnection().getMutationState(), scan, scanMetricsHolder, renewScannerLeaseThreshold, queryPlan, MapReduceParallelScanGrouper.getInstance());
PTable table = dataPlan.getTableRef().getTable(); table.getIndexMaintainers(ptr, context.getConnection()); byte[] txState = table.isTransactional() ? connection.getMutationState().encodeTransaction() : ByteUtil.EMPTY_BYTE_ARRAY; ServerCache cache = null; try {