public PhoenixConnection(PhoenixConnection connection, MutationState mutationState) throws SQLException { this(connection.getQueryServices(), connection.getURL(), connection .getClientInfo(), connection.getMetaDataCache(), mutationState, connection.isDescVarLengthRowKeyUpgrade(), connection .isRunningUpgrade(), connection.buildingIndex); }
private QueryLogger(PhoenixConnection connection) { this.queryId = UUID.randomUUID().toString(); this.queryDisruptor = connection.getQueryServices().getQueryDisruptor(); logLevel = connection.getLogLevel(); log(QueryLogInfo.QUERY_ID_I, queryId); log(QueryLogInfo.START_TIME_I, EnvironmentEdgeManager.currentTimeMillis()); }
public PhoenixConnection(PhoenixConnection connection, Properties props) throws SQLException { this(connection.getQueryServices(), connection.getURL(), props, connection.metaData, connection .getMutationState(), connection.isDescVarLengthRowKeyUpgrade(), connection.isRunningUpgrade(), connection.buildingIndex); this.isAutoCommit = connection.isAutoCommit; this.isAutoFlush = connection.isAutoFlush; this.sampler = connection.sampler; this.statementExecutionCounter = connection.statementExecutionCounter; }
public PhoenixConnection(PhoenixConnection connection, boolean isDescRowKeyOrderUpgrade, boolean isRunningUpgrade) throws SQLException { this(connection.getQueryServices(), connection.getURL(), connection .getClientInfo(), connection.metaData, connection .getMutationState(), isDescRowKeyOrderUpgrade, isRunningUpgrade, connection.buildingIndex); this.isAutoCommit = connection.isAutoCommit; this.isAutoFlush = connection.isAutoFlush; this.sampler = connection.sampler; this.statementExecutionCounter = connection.statementExecutionCounter; }
boolean removeTableProps, NamedTableNode namedTableNode, PTableType tableType) throws SQLException { connection.rollback(); boolean wasAutoCommit = connection.getAutoCommit(); List<PColumn> columns = Lists.newArrayListWithExpectedSize(origColumnDefs != null ? origColumnDefs.size() : 0); PName tenantId = connection.getTenantId(); String schemaName = table.getSchemaName().getString(); String tableName = table.getTableName().getString(); boolean acquiredMutex = false; try { connection.setAutoCommit(false); if (numCols > 0 ) { StatementContext context = new StatementContext(new PhoenixStatement(connection), resolver); String addColumnSqlToUse = connection.isRunningUpgrade() && tableName.equals(PhoenixDatabaseMetaData.SYSTEM_CATALOG_TABLE) && schemaName.equals(PhoenixDatabaseMetaData.SYSTEM_CATALOG_SCHEMA) ? ALTER_SYSCATALOG_TABLE_UPGRADE : INSERT_COLUMN_ALTER_TABLE; try (PreparedStatement colUpsert = connection.prepareStatement(addColumnSqlToUse)) { short nextKeySeq = SchemaUtil.getMaxKeySeq(table); for( ColumnDef colDef : columnDefs) { columnMetaData.addAll(connection.getMutationState().toMutations(timeStamp).next().getSecond()); connection.rollback(); int hbaseVersion = connection.getQueryServices().getLowestClusterHBaseVersion(); if (hbaseVersion < MetaDataProtocol.MUTABLE_SI_VERSION_THRESHOLD) { throw new SQLExceptionInfo.Builder(SQLExceptionCode.NO_MUTABLE_INDEXES)
long tableResolvedTimestamp = HConstants.LATEST_TIMESTAMP; try { tableRef = connection.getTableRef(new PTableKey(tenantId, fullTableName)); table = tableRef.getTable(); tableTimestamp = table.getTimeStamp(); connection.getMutationState().startTransaction(table.getTransactionProvider()); (table.getRowTimestampColPos() == -1 && connection.getMetaDataCache().getAge(tableRef) < table.getUpdateCacheFrequency() ))) { return new MetaDataMutationResult(MutationCode.TABLE_ALREADY_EXISTS, QueryConstants.UNSET_TIMESTAMP, table); if (result.getTable() != null) { try { tableRef = connection.getTableRef(new PTableKey(tenantId, fullTableName)); table = tableRef.getTable(); return new MetaDataMutationResult(MutationCode.TABLE_ALREADY_EXISTS, final byte[] schemaBytes = PVarchar.INSTANCE.toBytes(schemaName); final byte[] tableBytes = PVarchar.INSTANCE.toBytes(tableName); ConnectionQueryServices queryServices = connection.getQueryServices(); result = queryServices.getTable(tenantId, schemaBytes, tableBytes, tableTimestamp, connection.addTable(result.getTable(), resolvedTime); } else { connection.updateResolvedTimestamp(table, resolvedTime); .removeTable(origTenantId, fullTableName,
Map<String,Object> commonFamilyProps) throws SQLException { final PTableType tableType = statement.getTableType(); boolean wasAutoCommit = connection.getAutoCommit(); boolean allowSystemCatalogRollback = connection.getQueryServices().getProps().getBoolean( QueryServices.ALLOW_SPLITTABLE_SYSTEM_CATALOG_ROLLBACK, QueryServicesOptions.DEFAULT_ALLOW_SPLITTABLE_SYSTEM_CATALOG_ROLLBACK); connection.rollback(); try { connection.setAutoCommit(false); List<Mutation> tableMetaData = Lists.newArrayListWithExpectedSize(statement.getColumnDefs().size() + 3); final String schemaName = connection.getSchema() != null && tableNameNode.getSchemaName() == null ? connection.getSchema() : tableNameNode.getSchemaName(); final String tableName = tableNameNode.getTableName(); String parentTableName = null; PName tenantId = connection.getTenantId(); String tenantIdStr = tenantId == null ? null : tenantId.getString(); Long scn = connection.getSCN(); long clientTimeStamp = scn == null ? HConstants.LATEST_TIMESTAMP : scn; boolean multiTenant = false; Long timestamp = null; boolean isNamespaceMapped = parent == null ? SchemaUtil.isNamespaceMappingEnabled(tableType, connection.getQueryServices().getProps()) : parent.isNamespaceMapped(); boolean isLocalIndex = indexType == IndexType.LOCAL; PreparedStatement incrementStatement = connection.prepareStatement(INCREMENT_SEQ_NUM); incrementStatement.setString(1, tenantIdStr);
final int smallScanThreshold = connection.getQueryServices().getProps().getInt(QueryServices.SMALL_SCAN_THRESHOLD_ATTRIB, QueryServicesOptions.DEFAULT_SMALL_SCAN_THRESHOLD); scan.setConsistency(connection.getConsistency()); Long scn = connection.getSCN(); if (scn == null) { tenantIdBytes = connection.getTenantId() == null ? null : ScanUtil.getTenantIdBytes( table.getRowKeySchema(), table.getBucketNum() != null, connection.getTenantId(), table.getViewIndexId() != null); } else { tenantIdBytes = connection.getTenantId() == null ? null : connection.getTenantId().getBytes();
public static void addViewIndexToParentLinks(PhoenixConnection oldMetaConnection) throws SQLException { try (PhoenixConnection queryConn = new PhoenixConnection(oldMetaConnection, HConstants.LATEST_TIMESTAMP); PhoenixConnection upsertConn = new PhoenixConnection(oldMetaConnection, HConstants.LATEST_TIMESTAMP)) { logger.info("Upgrading metadata to add parent links for indexes on views"); String indexQuery = "SELECT TENANT_ID, TABLE_SCHEM, TABLE_NAME, COLUMN_FAMILY FROM SYSTEM.CATALOG WHERE LINK_TYPE = " + LinkType.INDEX_TABLE.getSerializedValue(); String createViewIndexLink = "UPSERT INTO SYSTEM.CATALOG (TENANT_ID, TABLE_SCHEM, TABLE_NAME, COLUMN_FAMILY, LINK_TYPE) VALUES (?,?,?,?,?) "; ResultSet rs = queryConn.createStatement().executeQuery(indexQuery); String prevTenantId = null; PhoenixConnection metaConn = queryConn; Properties props = new Properties(queryConn.getClientInfo()); props.setProperty(PhoenixRuntime.CURRENT_SCN_ATTRIB, Long.toString(HConstants.LATEST_TIMESTAMP)); while (rs.next()) { prevTenantId = tenantId; props.setProperty(PhoenixRuntime.TENANT_ID_ATTRIB, tenantId); metaConn = new PhoenixConnection(oldMetaConnection, props); PreparedStatement prepareStatement = upsertConn.prepareStatement(createViewIndexLink); prepareStatement.setString(1, tenantId); prepareStatement.setString(2, schemaName); prepareStatement.setByte(5, LinkType.VIEW_INDEX_PARENT_TABLE.getSerializedValue()); prepareStatement.execute(); upsertConn.commit(); queryConn.getQueryServices().clearCache();
/** * Move child links form SYSTEM.CATALOG to SYSTEM.CHILD_LINK * @param oldMetaConnection caller should take care of closing the passed connection appropriately * @throws SQLException */ public static void moveChildLinks(PhoenixConnection oldMetaConnection) throws SQLException { PhoenixConnection metaConnection = null; try { // Need to use own connection with max time stamp to be able to read all data from SYSTEM.CATALOG metaConnection = new PhoenixConnection(oldMetaConnection, HConstants.LATEST_TIMESTAMP); logger.info("Upgrading metadata to add parent to child links for views"); metaConnection.commit(); String createChildLink = "UPSERT INTO SYSTEM.CHILD_LINK(TENANT_ID, TABLE_SCHEM, TABLE_NAME, COLUMN_NAME, COLUMN_FAMILY, LINK_TYPE) " + "SELECT TENANT_ID, TABLE_SCHEM, TABLE_NAME, COLUMN_NAME, COLUMN_FAMILY, LINK_TYPE " + "FROM SYSTEM.CATALOG " + "WHERE LINK_TYPE = 4"; metaConnection.createStatement().execute(createChildLink); metaConnection.commit(); String deleteChildLink = "DELETE FROM SYSTEM.CATALOG WHERE LINK_TYPE = 4 "; metaConnection.createStatement().execute(deleteChildLink); metaConnection.commit(); metaConnection.getQueryServices().clearCache(); } finally { if (metaConnection != null) { metaConnection.close(); } } }
private MutationState buildIndexAtTimeStamp(PTable index, NamedTableNode dataTableNode) throws SQLException { Properties props = new Properties(connection.getClientInfo()); props.setProperty(PhoenixRuntime.BUILD_INDEX_AT_ATTRIB, Long.toString(connection.getSCN()+1)); PhoenixConnection conn = new PhoenixConnection(connection, connection.getQueryServices(), props); MetaDataClient newClientAtNextTimeStamp = new MetaDataClient(conn); conn.setAutoCommit(true); ColumnResolver resolver = FromCompiler.getResolver(dataTableNode, conn); TableRef tableRef = resolver.getTables().get(0); } finally { try { conn.close(); } catch (SQLException e) { if (sqlException == null) {
private PhoenixConnection dropStatsTable(PhoenixConnection oldMetaConnection, long timestamp) throws SQLException, IOException { Properties props = PropertiesUtil.deepCopy(oldMetaConnection.getClientInfo()); props.setProperty(PhoenixRuntime.CURRENT_SCN_ATTRIB, Long.toString(timestamp)); PhoenixConnection metaConnection = new PhoenixConnection(oldMetaConnection, this, props); SQLException sqlE = null; boolean wasCommit = metaConnection.getAutoCommit(); try { metaConnection.setAutoCommit(true); metaConnection.createStatement() .executeUpdate("DELETE FROM " + PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME + " WHERE " + PhoenixDatabaseMetaData.TABLE_NAME + "='" + PhoenixDatabaseMetaData.SYSTEM_STATS_TABLE } finally { try { metaConnection.setAutoCommit(wasCommit); oldMetaConnection.close(); } catch (SQLException e) { if (sqlE != null) {
@Override public String getUserName() throws SQLException { String userName = connection.getQueryServices().getUserName(); return userName == null ? StringUtil.EMPTY_STRING : userName; }
+ tableName + " (PK1 VARCHAR NOT NULL, PK2 VARCHAR, KV1 VARCHAR, KV2 VARCHAR CONSTRAINT PK PRIMARY KEY(PK1, PK2))"); final ConnectionQueryServices delegate = conn.unwrap(PhoenixConnection.class).getQueryServices(); ConnectionQueryServices servicesWithUpgrade = new DelegateConnectionQueryServices(delegate) { @Override try (PhoenixConnection phxConn = new PhoenixConnection(conn.unwrap(PhoenixConnection.class), servicesWithUpgrade, conn.getClientInfo())) { phxConn.createStatement().executeQuery("SELECT * FROM " + tableName); fail("SELECT should have failed with UpgradeRequiredException"); } catch (UpgradeRequiredException expected) { phxConn.setRunningUpgrade(true); phxConn.createStatement().execute( "UPSERT INTO " + tableName + " VALUES ('PK1', 'PK2', 'KV1', 'KV2')" ); phxConn.commit(); try (ResultSet rs = phxConn.createStatement().executeQuery("SELECT * FROM " + tableName)) { assertTrue(rs.next()); assertFalse(rs.next());
ConnectionQueryServices services = queryPlan.getContext().getConnection().getQueryServices(); services.clearTableRegionCache(TableName.valueOf(tableNameBytes)); long renewScannerLeaseThreshold = queryPlan.getContext().getConnection().getQueryServices().getRenewLeaseThresholdMilliSeconds(); for (Scan scan : scans) { ScanMetricsHolder scanMetricsHolder = ScanMetricsHolder.getInstance(readMetrics, tableName, scan, queryPlan.getContext().getConnection().getLogLevel()); final TableResultIterator tableResultIterator = new TableResultIterator( queryPlan.getContext().getConnection().getMutationState(), scan, scanMetricsHolder, renewScannerLeaseThreshold, queryPlan, MapReduceParallelScanGrouper.getInstance());
props.put("phoenix.mutate.batchSize", "2"); try (PhoenixConnection conn = DriverManager.getConnection(getUrl(), props).unwrap(PhoenixConnection.class)) { conn.setAutoCommit(false); conn.createStatement().executeUpdate( "CREATE TABLE " + tableName + " (" + "A VARCHAR NOT NULL PRIMARY KEY," + "C VARCHAR," + "D VARCHAR) COLUMN_ENCODED_BYTES = 0"); conn.createStatement().executeUpdate("CREATE INDEX " + indexName + " on " + tableName + " (C) INCLUDE(D)"); conn.createStatement().executeUpdate("UPSERT INTO " + tableName + "(A,B,C,D) VALUES ('A2','B2','C2','D2')"); conn.createStatement().executeUpdate("UPSERT INTO " + tableName + "(A,B,C,D) VALUES ('A3','B3', 'C3', null)"); conn.commit(); Table htable = conn.getQueryServices().getTable(Bytes.toBytes(tableName)); Scan scan = new Scan(); scan.setRaw(true);
private PhoenixConnection removeNotNullConstraint(PhoenixConnection oldMetaConnection, String schemaName, String tableName, long timestamp, String columnName) throws SQLException { Properties props = PropertiesUtil.deepCopy(oldMetaConnection.getClientInfo()); props.setProperty(PhoenixRuntime.CURRENT_SCN_ATTRIB, Long.toString(timestamp)); PhoenixConnection metaConnection = new PhoenixConnection(oldMetaConnection, this, props); SQLException sqlE = null; try { + PhoenixDatabaseMetaData.COLUMN_NAME + "," + PhoenixDatabaseMetaData.NULLABLE + ") VALUES (null, ?, ?, ?, ?)"; PreparedStatement stmt = metaConnection.prepareStatement(dml); stmt.setString(1, schemaName); stmt.setString(2, tableName); stmt.setInt(4, ResultSetMetaData.columnNullable); stmt.executeUpdate(); metaConnection.commit(); } catch (NewerTableAlreadyExistsException e) { logger.warn("Table already modified at this timestamp, so assuming column already nullable: " + columnName); } finally { try { oldMetaConnection.close(); } catch (SQLException e) { if (sqlE != null) {
final PTable table = conn.unwrap(PhoenixConnection.class).getTable(new PTableKey(null, "PERF.BIG_OLAP_DOC")); GuidePostsInfoBuilder gpWriter = new GuidePostsInfoBuilder(); for (byte[] gp : guidePosts) { pConn.addTable(table, System.currentTimeMillis()); ((ConnectionlessQueryServicesImpl) pConn.getQueryServices()) .addTableStats(new GuidePostsKey(table.getName().getBytes(), QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES), info);
@Override public MutationState execute() throws SQLException { connection.getMutationState().commitDDLFence(dataTable); return super.execute(); } };
Properties props = PropertiesUtil.deepCopy(oldMetaConnection.getClientInfo()); props.setProperty(PhoenixRuntime.CURRENT_SCN_ATTRIB, Long.toString(timestamp)); PhoenixConnection metaConnection = new PhoenixConnection(oldMetaConnection, this, props); SQLException sqlE = null; try { metaConnection.createStatement().executeUpdate("ALTER TABLE " + tableName + " ADD " + (addIfNotExists ? " IF NOT EXISTS " : "") + columns ); } catch (NewerTableAlreadyExistsException e) { logger.warn("Table already modified at this timestamp, so assuming add of these columns already done: " + columns); } finally { try { oldMetaConnection.close(); } catch (SQLException e) { if (sqlE != null) {