@Override public Object getPTableValue(PTable table) { return table.getBucketNum(); } },
@Override public Integer getBucketNum() { return delegate.getBucketNum(); }
private static int getSaltBuckets(TableAlreadyExistsException e) { PTable table = e.getTable(); Integer sequenceSaltBuckets = table == null ? null : table.getBucketNum(); return sequenceSaltBuckets == null ? 0 : sequenceSaltBuckets; }
public static short getMaxKeySeq(PTable table) { int offset = 0; if (table.getBucketNum() != null) { offset++; } // TODO: for tenant-specific table on tenant-specific connection, // we should subtract one for tenant column and another one for // index ID return (short)(table.getPKColumns().size() - offset); }
public static int getAutoPartitionColIndex(PTable parentTable) { boolean isMultiTenant = parentTable.isMultiTenant(); boolean isSalted = parentTable.getBucketNum()!=null; return (isMultiTenant && isSalted) ? 2 : (isMultiTenant || isSalted) ? 1 : 0; }
private static int getMinPKOffset(PTable table, PName tenantId) { // In SELECT *, don't include tenant column or index ID column for tenant connection int posOffset = table.getBucketNum() == null ? 0 : 1; if (table.isMultiTenant() && tenantId != null) { posOffset++; } if (table.getViewIndexId() != null) { posOffset++; } return posOffset; }
public static List<PColumn> getColumnsToClone(PTable table) { return table.getBucketNum() == null ? table.getColumns() : table.getColumns().subList(1, table.getColumns().size()); }
public static final boolean canQueryBeExecutedSerially(PTable table, OrderBy orderBy, StatementContext context) { /* * If ordering by columns not on the PK axis, we can't execute a query serially because we * need to do a merge sort across all the scans which isn't possible with SerialIterators. * Similar reasoning follows for salted and local index tables when ordering rows in a row * key order. Serial execution is OK in other cases since SerialIterators will execute scans * in the correct order. */ if (!orderBy.getOrderByExpressions().isEmpty() || ((table.getBucketNum() != null || table.getIndexType() == IndexType.LOCAL) && shouldRowsBeInRowKeyOrder( orderBy, context))) { return false; } return true; }
public static byte[][] getViewConstants(PTable dataTable) { if (dataTable.getType() != PTableType.VIEW && dataTable.getType() != PTableType.PROJECTED) return null; int dataPosOffset = (dataTable.getBucketNum() != null ? 1 : 0) + (dataTable.isMultiTenant() ? 1 : 0); ImmutableBytesWritable ptr = new ImmutableBytesWritable(); List<byte[]> viewConstants = new ArrayList<byte[]>(); List<PColumn> dataPkColumns = dataTable.getPKColumns(); for (int i = dataPosOffset; i < dataPkColumns.size(); i++) { PColumn dataPKColumn = dataPkColumns.get(i); if (dataPKColumn.getViewConstant() != null) { if (IndexUtil.getViewConstantValue(dataPKColumn, ptr)) { viewConstants.add(ByteUtil.copyKeyBytesIfNecessary(ptr)); } else { throw new IllegalStateException(); } } } return viewConstants.isEmpty() ? null : viewConstants .toArray(new byte[viewConstants.size()][]); }
private static Expression getFirstPKColumnExpression(PTable table) throws SQLException { if (table.getIndexType() == IndexType.LOCAL) { /* * With some hackery, we could deduce the tenant ID from a multi-tenant local index, * however it's not clear that we'd want to maintain the same prefixing of the region * start key, as the region boundaries may end up being different on a cluster being * replicated/backed-up to (which is the use case driving the method). */ throw new SQLFeatureNotSupportedException(); } // skip salt and viewIndexId columns. int pkPosition = (table.getBucketNum() == null ? 0 : 1) + (table.getViewIndexId() == null ? 0 : 1); List<PColumn> pkColumns = table.getPKColumns(); return new RowKeyColumnExpression(pkColumns.get(pkPosition), new RowKeyValueAccessor(pkColumns, pkPosition)); }
@Deprecated private static List<PColumn> getPkColumns(PTable ptable, Connection conn, boolean forDataTable) throws SQLException { PhoenixConnection pConn = conn.unwrap(PhoenixConnection.class); List<PColumn> pkColumns = ptable.getPKColumns(); // Skip the salting column and the view index id column if present. // Skip the tenant id column too if the connection is tenant specific and the table used by the query plan is multi-tenant int offset = (ptable.getBucketNum() == null ? 0 : 1) + (ptable.isMultiTenant() && pConn.getTenantId() != null ? 1 : 0) + (ptable.getViewIndexId() == null ? 0 : 1); // get a sublist of pkColumns by skipping the offset columns. pkColumns = pkColumns.subList(offset, pkColumns.size()); if (ptable.getType() == PTableType.INDEX && forDataTable) { // index tables have the same schema name as their parent/data tables. String fullDataTableName = ptable.getParentName().getString(); // Get the corresponding columns of the data table. List<PColumn> dataColumns = IndexUtil.getDataColumns(fullDataTableName, pkColumns, pConn); pkColumns = dataColumns; } return pkColumns; }
@Override public boolean shouldStartNewScan(QueryPlan plan, List<Scan> scans, byte[] startKey, boolean crossedRegionBoundary) { PTable table = plan.getTableRef().getTable(); boolean startNewScanGroup = false; if (!plan.isRowKeyOrdered()) { startNewScanGroup = true; } else if (crossedRegionBoundary) { if (table.getIndexType() == IndexType.LOCAL) { startNewScanGroup = true; } else if (table.getBucketNum() != null) { startNewScanGroup = scans.isEmpty() || ScanUtil.crossesPrefixBoundary(startKey, ScanUtil.getPrefix(scans.get(scans.size()-1).getStartRow(), SaltingUtil.NUM_SALTING_BYTES), SaltingUtil.NUM_SALTING_BYTES); } } return startNewScanGroup; }
private byte[][] getViewConstants(PTable dataTable) { int dataPosOffset = (dataTable.getBucketNum() != null ? 1 : 0) + (dataTable.isMultiTenant() ? 1 : 0); byte[][] viewConstants = null; int nViewConstants = 0;
private static List<PColumn> getPkColumns(PTable ptable, Connection conn) throws SQLException { PhoenixConnection pConn = conn.unwrap(PhoenixConnection.class); List<PColumn> pkColumns = ptable.getPKColumns(); // Skip the salting column and the view index id column if present. // Skip the tenant id column too if the connection is tenant specific and the table used by the query plan is multi-tenant int offset = (ptable.getBucketNum() == null ? 0 : 1) + (ptable.isMultiTenant() && pConn.getTenantId() != null ? 1 : 0) + (ptable.getViewIndexId() == null ? 0 : 1); // get a sublist of pkColumns by skipping the offset columns. pkColumns = pkColumns.subList(offset, pkColumns.size()); if (ptable.getType() == PTableType.INDEX) { // index tables have the same schema name as their parent/data tables. String fullDataTableName = ptable.getParentName().getString(); // Get the corresponding columns of the data table. List<PColumn> dataColumns = IndexUtil.getDataColumns(fullDataTableName, pkColumns, pConn); pkColumns = dataColumns; } return pkColumns; }
private void serializeViewConstantsIntoScan(Scan scan, PTable dataTable) { int dataPosOffset = (dataTable.getBucketNum() != null ? 1 : 0) + (dataTable.isMultiTenant() ? 1 : 0); int nViewConstants = 0; if (dataTable.getType() == PTableType.VIEW) { ImmutableBytesWritable ptr = new ImmutableBytesWritable(); List<PColumn> dataPkColumns = dataTable.getPKColumns(); for (int i = dataPosOffset; i < dataPkColumns.size(); i++) { PColumn dataPKColumn = dataPkColumns.get(i); if (dataPKColumn.getViewConstant() != null) { nViewConstants++; } } if (nViewConstants > 0) { byte[][] viewConstants = new byte[nViewConstants][]; int j = 0; for (int i = dataPosOffset; i < dataPkColumns.size(); i++) { PColumn dataPkColumn = dataPkColumns.get(i); if (dataPkColumn.getViewConstant() != null) { if (IndexUtil.getViewConstantValue(dataPkColumn, ptr)) { viewConstants[j++] = ByteUtil.copyKeyBytesIfNecessary(ptr); } else { throw new IllegalStateException(); } } } serializeViewConstantsIntoScan(viewConstants, scan); } } }
private ProjectedTableColumnResolver(PTable projectedTable, PhoenixConnection conn, Map<String, UDFParseNode> udfParseNodes) throws SQLException { super(conn, 0, udfParseNodes, null); Preconditions.checkArgument(projectedTable.getType() == PTableType.PROJECTED); this.isLocalIndex = projectedTable.getIndexType() == IndexType.LOCAL; this.columnRefMap = new HashMap<ColumnRef, Integer>(); long ts = Long.MAX_VALUE; for (int i = projectedTable.getBucketNum() == null ? 0 : 1; i < projectedTable.getColumns().size(); i++) { PColumn column = projectedTable.getColumns().get(i); ColumnRef colRef = ((ProjectedColumn) column).getSourceColumnRef(); TableRef tableRef = colRef.getTableRef(); if (!tables.contains(tableRef)) { String alias = tableRef.getTableAlias(); if (alias != null) { this.tableMap.put(alias, tableRef); } String name = tableRef.getTable().getName().getString(); if (alias == null || !alias.equals(name)) { tableMap.put(name, tableRef); } tables.add(tableRef); if (tableRef.getLowerBoundTimeStamp() < ts) { ts = tableRef.getLowerBoundTimeStamp(); } } this.columnRefMap.put(new ColumnRef(tableRef, colRef.getColumnPosition()), column.getPosition()); } this.theTableRefs = ImmutableList.of(new TableRef(ParseNodeFactory.createTempAlias(), projectedTable, ts, false)); }
protected PTable addDynamicColumns(List<ColumnDef> dynColumns, PTable theTable) throws SQLException { if (!dynColumns.isEmpty()) { List<PColumn> allcolumns = new ArrayList<PColumn>(); List<PColumn> existingColumns = theTable.getColumns(); // Need to skip the salting column, as it's handled in the PTable builder call below allcolumns.addAll(theTable.getBucketNum() == null ? existingColumns : existingColumns.subList(1, existingColumns.size())); // Position still based on with the salting columns int position = existingColumns.size(); PName defaultFamilyName = PNameFactory.newName(SchemaUtil.getEmptyColumnFamily(theTable)); for (ColumnDef dynColumn : dynColumns) { PName familyName = defaultFamilyName; PName name = PNameFactory.newName(dynColumn.getColumnDefName().getColumnName()); String family = dynColumn.getColumnDefName().getFamilyName(); if (family != null) { theTable.getColumnFamily(family); // Verifies that column family exists familyName = PNameFactory.newName(family); } allcolumns.add(new PColumnImpl(name, familyName, dynColumn.getDataType(), dynColumn.getMaxLength(), dynColumn.getScale(), dynColumn.isNull(), position, dynColumn.getSortOrder(), dynColumn.getArraySize(), null, false, dynColumn.getExpression(), false, true, Bytes.toBytes(dynColumn.getColumnDefName().getColumnName()), HConstants.LATEST_TIMESTAMP)); position++; } theTable = PTableImpl.builderWithColumns(theTable, allcolumns) .build(); } return theTable; }
public OrderPreservingTracker( StatementContext context, GroupBy groupBy, Ordering ordering, int nNodes, TupleProjector projector, Expression whereExpression) { this.context = context; if (groupBy.isEmpty()) { PTable table = context.getResolver().getTables().get(0).getTable(); this.isOrderPreserving = table.rowKeyOrderOptimizable(); boolean isSalted = table.getBucketNum() != null; boolean isMultiTenant = context.getConnection().getTenantId() != null && table.isMultiTenant(); boolean isSharedViewIndex = table.getViewIndexId() != null; // TODO: util for this offset, as it's computed in numerous places this.pkPositionOffset = (isSalted ? 1 : 0) + (isMultiTenant ? 1 : 0) + (isSharedViewIndex ? 1 : 0); } else { this.isOrderPreserving = true; this.pkPositionOffset = 0; } this.groupBy = groupBy; this.visitor = new TrackOrderPreservingExpressionVisitor(projector); this.orderPreservingInfos = Lists.newArrayListWithExpectedSize(nNodes); this.ordering = ordering; this.whereExpression = whereExpression; }
@Test public void testCreateSequence() throws Exception { String sequenceName = generateSequenceNameWithSchema(); String sequenceNameWithoutSchema = getNameWithoutSchema(sequenceName); String schemaName = getSchemaName(sequenceName); conn.createStatement().execute("CREATE SEQUENCE " + sequenceName + " START WITH 2 INCREMENT BY 4"); int bucketNum = PhoenixRuntime.getTableNoCache(conn, SYSTEM_CATALOG_SCHEMA + "." + TYPE_SEQUENCE).getBucketNum(); assertEquals("Salt bucket for SYSTEM.SEQUENCE should be test default",bucketNum , QueryServicesTestImpl.DEFAULT_SEQUENCE_TABLE_SALT_BUCKETS); String query = "SELECT sequence_schema, sequence_name, current_value, increment_by FROM \"SYSTEM\".\"SEQUENCE\" WHERE sequence_name='" + sequenceNameWithoutSchema + "'"; ResultSet rs = conn.prepareStatement(query).executeQuery(); assertTrue(rs.next()); assertEquals(schemaName, rs.getString("sequence_schema")); assertEquals(sequenceNameWithoutSchema, rs.getString("sequence_name")); assertEquals(2, rs.getInt("current_value")); assertEquals(4, rs.getInt("increment_by")); assertFalse(rs.next()); }
public IndexExpressionParseNodeRewriter(PTable index, String alias, PhoenixConnection connection, Map<String, UDFParseNode> udfParseNodes) throws SQLException { indexedParseNodeToColumnParseNodeMap = Maps.newHashMapWithExpectedSize(index.getColumns().size()); NamedTableNode tableNode = NamedTableNode.create(alias, TableName.create(index.getParentSchemaName().getString(), index.getParentTableName().getString()), Collections.<ColumnDef> emptyList()); ColumnResolver dataResolver = FromCompiler.getResolver(tableNode, connection, udfParseNodes); StatementContext context = new StatementContext(new PhoenixStatement(connection), dataResolver); IndexStatementRewriter rewriter = new IndexStatementRewriter(dataResolver, null, true); ExpressionCompiler expressionCompiler = new ExpressionCompiler(context); int indexPosOffset = (index.getBucketNum() == null ? 0 : 1) + (index.isMultiTenant() ? 1 : 0) + (index.getViewIndexId() == null ? 0 : 1); List<PColumn> pkColumns = index.getPKColumns(); for (int i=indexPosOffset; i<pkColumns.size(); ++i) { PColumn column = pkColumns.get(i); String expressionStr = IndexUtil.getIndexColumnExpressionStr(column); ParseNode expressionParseNode = SQLParser.parseCondition(expressionStr); String colName = "\"" + column.getName().getString() + "\""; Expression dataExpression = expressionParseNode.accept(expressionCompiler); PDataType expressionDataType = dataExpression.getDataType(); ParseNode indexedParseNode = expressionParseNode.accept(rewriter); PDataType indexColType = IndexUtil.getIndexColumnDataType(dataExpression.isNullable(), expressionDataType); ParseNode columnParseNode = new ColumnParseNode(alias!=null ? TableName.create(null, alias) : null, colName, null); if ( indexColType != expressionDataType) { columnParseNode = NODE_FACTORY.cast(columnParseNode, expressionDataType, null, null); } indexedParseNodeToColumnParseNodeMap.put(indexedParseNode, columnParseNode); } }