public ChunkedResultIteratorFactory(ParallelIteratorFactory delegateFactory, MutationState mutationState, TableRef tableRef) { this.delegateFactory = delegateFactory; this.tableRef = tableRef; // Clone MutationState, as the one on the connection may change if auto commit is on // while we need a handle to the original one (for it's transaction state). this.mutationState = new MutationState(mutationState); }
protected MutationState newMutationState(int maxSize, int maxSizeBytes) { return new MutationState(maxSize, maxSizeBytes, this); }
public static MutationState emptyMutationState(long maxSize, long maxSizeBytes, PhoenixConnection connection) { MutationState state = new MutationState(maxSize, maxSizeBytes, connection, Collections.<TableRef, MultiRowMutationState> emptyMap(), false, null); state.sizeOffset = 0; return state; }
@Override protected MutationState newMutationState(int maxSize, int maxSizeBytes) { return new MutationState(maxSize, maxSizeBytes, this, mutations, false, null); }; };
public MutationState declareCursor(DeclareCursorStatement statement, QueryPlan queryPlan) throws SQLException { CursorUtil.declareCursor(statement, queryPlan); return new MutationState(0, 0, connection); }
public MutationState open(OpenStatement statement) throws SQLException { CursorUtil.openCursor(statement, connection); return new MutationState(0, 0, connection); }
public MutationState close(CloseStatement statement) throws SQLException { CursorUtil.closeCursor(statement); return new MutationState(0, 0, connection); }
private MutationState createSequence(String tenantId, String schemaName, String sequenceName, boolean ifNotExists, long startWith, long incrementBy, long cacheSize, boolean cycle, long minValue, long maxValue, long timestamp) throws SQLException { try { connection.getQueryServices().createSequence(tenantId, schemaName, sequenceName, startWith, incrementBy, cacheSize, minValue, maxValue, cycle, timestamp); } catch (SequenceAlreadyExistsException e) { if (ifNotExists) { return new MutationState(0, 0, connection); } throw e; } return new MutationState(1, 1000, connection); }
@Override public MutationState updateData(MutationPlan plan) throws SQLException { return new MutationState(0, 0, plan.getContext().getConnection()); }
@Override public MutationState execute() throws SQLException { String dynamicJarsDir = stmt.getConnection().getQueryServices().getProps().get(QueryServices.DYNAMIC_JARS_DIR_KEY); if (dynamicJarsDir == null) { throw new SQLException(QueryServices.DYNAMIC_JARS_DIR_KEY + " is not configured."); } dynamicJarsDir = dynamicJarsDir.endsWith("/") ? dynamicJarsDir : dynamicJarsDir + '/'; Configuration conf = HBaseFactoryProvider.getConfigurationFactory().getConfiguration(); Path dynamicJarsDirPath = new Path(dynamicJarsDir); try { FileSystem fs = dynamicJarsDirPath.getFileSystem(conf); String jarPathStr = (String)getJarPath().getValue(); if(!jarPathStr.endsWith(".jar")) { throw new SQLException(jarPathStr + " is not a valid jar file path."); } Path p = new Path(jarPathStr); if(fs.exists(p)) { fs.delete(p, false); } } catch(IOException e) { throw new SQLException(e); } return new MutationState(0, 0, context.getConnection()); } };
@Override public MutationState execute() throws SQLException { String dynamicJarsDir = stmt.getConnection().getQueryServices().getProps().get(QueryServices.DYNAMIC_JARS_DIR_KEY); if(dynamicJarsDir == null) { throw new SQLException(QueryServices.DYNAMIC_JARS_DIR_KEY+" is not configured for placing the jars."); } dynamicJarsDir = dynamicJarsDir.endsWith("/") ? dynamicJarsDir : dynamicJarsDir + '/'; Configuration conf = HBaseFactoryProvider.getConfigurationFactory().getConfiguration(); Path dynamicJarsDirPath = new Path(dynamicJarsDir); for (LiteralParseNode jarPath : getJarPaths()) { String jarPathStr = (String)jarPath.getValue(); if(!jarPathStr.endsWith(".jar")) { throw new SQLException(jarPathStr + " is not a valid jar file path."); } } try { FileSystem fs = dynamicJarsDirPath.getFileSystem(conf); List<LiteralParseNode> jarPaths = getJarPaths(); for (LiteralParseNode jarPath : jarPaths) { File f = new File((String) jarPath.getValue()); fs.copyFromLocalFile(new Path(f.getAbsolutePath()), new Path( dynamicJarsDir + f.getName())); } } catch(IOException e) { throw new SQLException(e); } return new MutationState(0, 0, context.getConnection()); } };
@Override public MutationState execute() throws SQLException { Object consistency = getProps().get(PhoenixRuntime.CONSISTENCY_ATTRIB.toUpperCase()); if(consistency != null) { if (((String)consistency).equalsIgnoreCase(Consistency.TIMELINE.toString())){ getContext().getConnection().setConsistency(Consistency.TIMELINE); } else { getContext().getConnection().setConsistency(Consistency.STRONG); } } return new MutationState(0, 0, context.getConnection()); } };
public MutationState useSchema(UseSchemaStatement useSchemaStatement) throws SQLException { // As we allow default namespace mapped to empty schema, so this is to reset schema in connection if (useSchemaStatement.getSchemaName().equals(StringUtil.EMPTY_STRING)) { connection.setSchema(null); } else { FromCompiler.getResolverForSchema(useSchemaStatement, connection) .resolveSchema(useSchemaStatement.getSchemaName()); connection.setSchema(useSchemaStatement.getSchemaName()); } return new MutationState(0, 0, connection); }
@Override public MutationState execute() throws SQLException { // We have a point lookup, so we know we have a simple set of fully qualified // keys for our ranges ScanRanges ranges = context.getScanRanges(); Iterator<KeyRange> iterator = ranges.getPointLookupKeyIterator(); MultiRowMutationState mutation = new MultiRowMutationState(ranges.getPointLookupCount()); while (iterator.hasNext()) { mutation.put(new ImmutableBytesPtr(iterator.next().getLowerRange()), new RowMutationState(PRow.DELETE_MARKER, 0, statement.getConnection().getStatementExecutionCounter(), NULL_ROWTIMESTAMP_INFO, null)); } return new MutationState(dataPlan.getTableRef(), mutation, 0, maxSize, maxSizeBytes, connection); }
this.mutationState = new MutationState(context.getConnection().getMutationState()); TableRef tableRef = plan.getTableRef(); PTable table = tableRef.getTable();
public MutationState dropSequence(DropSequenceStatement statement) throws SQLException { Long scn = connection.getSCN(); long timestamp = scn == null ? HConstants.LATEST_TIMESTAMP : scn; String schemaName = connection.getSchema() != null && statement.getSequenceName().getSchemaName() == null ? connection.getSchema() : statement.getSequenceName().getSchemaName(); String sequenceName = statement.getSequenceName().getTableName(); String tenantId = connection.getTenantId() == null ? null : connection.getTenantId().getString(); try { connection.getQueryServices().dropSequence(tenantId, schemaName, sequenceName, timestamp); } catch (SequenceNotFoundException e) { if (statement.ifExists()) { return new MutationState(0, 0, connection); } throw e; } return new MutationState(1, 1000, connection); }
public void setTransactionContext(PhoenixTransactionContext txContext) throws SQLException { if (!this.services.getProps().getBoolean( QueryServices.TRANSACTIONS_ENABLED, QueryServicesOptions.DEFAULT_TRANSACTIONS_ENABLED)) { throw new SQLExceptionInfo.Builder( SQLExceptionCode.TX_MUST_BE_ENABLED_TO_SET_TX_CONTEXT) .build().buildException(); } this.mutationState.rollback(); this.mutationState = new MutationState(this.mutationState.getMaxSize(), this.mutationState.getMaxSizeBytes(), this, txContext); // Write data to HBase after each statement execution as the commit may // not // come through Phoenix APIs. setAutoFlush(true); }
@Override public MutationState execute() throws SQLException { connection.getMutationState().commitDDLFence(dataTable); Tuple tuple = plan.iterator().next(); long rowCount = 0; if (tuple != null) { Cell kv = tuple.getValue(0); ImmutableBytesWritable tmpPtr = new ImmutableBytesWritable(kv.getValueArray(), kv.getValueOffset(), kv.getValueLength()); // A single Cell will be returned with the count(*) - we decode that here rowCount = PLong.INSTANCE.getCodec().decodeLong(tmpPtr, SortOrder.getDefault()); } // The contract is to return a MutationState that contains the number of rows modified. In this // case, it's the number of rows in the data table which corresponds to the number of index // rows that were added. return new MutationState(0, 0, connection, rowCount); }
break; return new MutationState(0, 0, connection); } finally { connection.setAutoCommit(wasAutoCommit);
MutationState mutationState = new MutationState(maxSize, maxSizeBytes, statement.getConnection(), totalRowCount);