private ArrayList<Integer> getSortOrders(String dest, QB qb, Table tab, Operator input) throws SemanticException { List<Order> tabSortCols = tab.getSortCols(); List<FieldSchema> tabCols = tab.getCols(); ArrayList<Integer> orders = new ArrayList<Integer>(); for (Order sortCol : tabSortCols) { for (FieldSchema tabCol : tabCols) { if (sortCol.getCol().equals(tabCol.getName())) { orders.add(sortCol.getOrder()); break; } } } return orders; }
private ArrayList<ExprNodeDesc> getSortCols(String dest, QB qb, Table tab, TableDesc table_desc, Operator input, boolean convert) throws SemanticException { List<Order> tabSortCols = tab.getSortCols(); List<FieldSchema> tabCols = tab.getCols(); // Partition by the bucketing column List<Integer> posns = new ArrayList<Integer>(); for (Order sortCol : tabSortCols) { int pos = 0; for (FieldSchema tabCol : tabCols) { if (sortCol.getCol().equals(tabCol.getName())) { posns.add(pos); break; } pos++; } } return genConvertCol(dest, qb, tab, table_desc, input, posns, convert); }
private ArrayList<Integer> getSortOrders(String dest, QB qb, Table tab, Operator input) throws SemanticException { List<Order> tabSortCols = tab.getSortCols(); List<FieldSchema> tabCols = tab.getCols(); ArrayList<Integer> orders = new ArrayList<Integer>(); for (Order sortCol : tabSortCols) { for (FieldSchema tabCol : tabCols) { if (sortCol.getCol().equals(tabCol.getName())) { orders.add(sortCol.getOrder()); break; } } } return orders; }
private ArrayList<ExprNodeDesc> getSortCols(String dest, QB qb, Table tab, TableDesc table_desc, Operator input, boolean convert) throws SemanticException { List<Order> tabSortCols = tab.getSortCols(); List<FieldSchema> tabCols = tab.getCols(); // Partition by the bucketing column List<Integer> posns = new ArrayList<Integer>(); for (Order sortCol : tabSortCols) { int pos = 0; for (FieldSchema tabCol : tabCols) { if (sortCol.getCol().equals(tabCol.getName())) { posns.add(pos); break; } pos++; } } return genConvertCol(dest, qb, tab, table_desc, input, posns, convert); }
private boolean checkTable(Table table, List<Integer> bucketPositionsDest, List<Integer> sortPositionsDest, List<Integer> sortOrderDest, int numBucketsDest) { // The bucketing and sorting positions should exactly match int numBuckets = table.getNumBuckets(); if (numBucketsDest != numBuckets) { return false; } List<Integer> tableBucketPositions = getBucketPositions(table.getBucketCols(), table.getCols()); List<Integer> sortPositions = getSortPositions(table.getSortCols(), table.getCols()); List<Integer> sortOrder = getSortOrder(table.getSortCols(), table.getCols()); return bucketPositionsDest.equals(tableBucketPositions) && sortPositionsDest.equals(sortPositions) && sortOrderDest.equals(sortOrder); }
@Override public List<RelCollation> getCollationList() { ImmutableList.Builder<RelFieldCollation> collationList = new ImmutableList.Builder<RelFieldCollation>(); for (Order sortColumn : this.hiveTblMetadata.getSortCols()) { for (int i=0; i<this.hiveTblMetadata.getSd().getCols().size(); i++) { FieldSchema field = this.hiveTblMetadata.getSd().getCols().get(i); if (field.getName().equals(sortColumn.getCol())) { Direction direction; NullDirection nullDirection; if (sortColumn.getOrder() == BaseSemanticAnalyzer.HIVE_COLUMN_ORDER_ASC) { direction = Direction.ASCENDING; nullDirection = NullDirection.FIRST; } else { direction = Direction.DESCENDING; nullDirection = NullDirection.LAST; } collationList.add(new RelFieldCollation(i, direction, nullDirection)); break; } } } return new ImmutableList.Builder<RelCollation>() .add(RelCollationTraitDef.INSTANCE.canonize( new HiveRelCollation(collationList.build()))) .build(); }
private boolean checkTable(Table table, List<Integer> bucketPositionsDest, List<Integer> sortPositionsDest, List<Integer> sortOrderDest, int numBucketsDest) { // The bucketing and sorting positions should exactly match int numBuckets = table.getNumBuckets(); if (numBucketsDest != numBuckets) { return false; } List<Integer> tableBucketPositions = getBucketPositions(table.getBucketCols(), table.getCols()); List<Integer> sortPositions = getSortPositions(table.getSortCols(), table.getCols()); List<Integer> sortOrder = getSortOrder(table.getSortCols(), table.getCols()); return bucketPositionsDest.equals(tableBucketPositions) && sortPositionsDest.equals(sortPositions) && sortOrderDest.equals(sortOrder); }
private void genPartnCols(String dest, Operator input, QB qb, TableDesc table_desc, Table dest_tab, SortBucketRSCtx ctx) throws SemanticException { boolean enforceBucketing = false; ArrayList<ExprNodeDesc> partnColsNoConvert = new ArrayList<ExprNodeDesc>(); if ((dest_tab.getNumBuckets() > 0)) { enforceBucketing = true; if (updating(dest) || deleting(dest)) { partnColsNoConvert = getPartitionColsFromBucketColsForUpdateDelete(input, false); } else { partnColsNoConvert = getPartitionColsFromBucketCols(dest, qb, dest_tab, table_desc, input, false); } } if ((dest_tab.getSortCols() != null) && (dest_tab.getSortCols().size() > 0)) { if (!enforceBucketing) { throw new SemanticException(ErrorMsg.TBL_SORTED_NOT_BUCKETED.getErrorCodedMsg(dest_tab.getCompleteName())); } else { if(!enforceBucketing) { partnColsNoConvert = getSortCols(dest, qb, dest_tab, table_desc, input, false); } } enforceBucketing = true; } if (enforceBucketing) { ctx.setPartnCols(partnColsNoConvert); } }
private void checkAcidConstraints(QB qb, TableDesc tableDesc, Table table) throws SemanticException { String tableName = tableDesc.getTableName(); if (!qb.getParseInfo().isInsertIntoTable(tableName)) { LOG.debug("Couldn't find table " + tableName + " in insertIntoTable"); throw new SemanticException(ErrorMsg.NO_INSERT_OVERWRITE_WITH_ACID.getMsg()); } /* LOG.info("Modifying config values for ACID write"); conf.setBoolVar(ConfVars.HIVEOPTREDUCEDEDUPLICATION, true); conf.setIntVar(ConfVars.HIVEOPTREDUCEDEDUPLICATIONMINREDUCER, 1); These props are now enabled elsewhere (see commit diffs). It would be better instead to throw if they are not set. For exmaple, if user has set hive.optimize.reducededuplication=false for some reason, we'll run a query contrary to what they wanted... But throwing now would be backwards incompatible. */ conf.set(AcidUtils.CONF_ACID_KEY, "true"); if (table.getNumBuckets() < 1) { throw new SemanticException(ErrorMsg.ACID_OP_ON_NONACID_TABLE, table.getTableName()); } if (table.getSortCols() != null && table.getSortCols().size() > 0) { throw new SemanticException(ErrorMsg.ACID_NO_SORTED_BUCKETS, table.getTableName()); } }
sortColumnsFirstTable.addAll(tbl.getSortCols()); return checkSortColsAndJoinCols(tbl.getSortCols(), joinCols, sortColumnsFirstTable);
private void genPartnCols(String dest, Operator input, QB qb, TableDesc table_desc, Table dest_tab, SortBucketRSCtx ctx) throws SemanticException { boolean enforceBucketing = false; ArrayList<ExprNodeDesc> partnColsNoConvert = new ArrayList<ExprNodeDesc>(); if ((dest_tab.getNumBuckets() > 0)) { enforceBucketing = true; if (updating(dest) || deleting(dest)) { partnColsNoConvert = getPartitionColsFromBucketColsForUpdateDelete(input, false); } else { partnColsNoConvert = getPartitionColsFromBucketCols(dest, qb, dest_tab, table_desc, input, false); } } if ((dest_tab.getSortCols() != null) && (dest_tab.getSortCols().size() > 0)) { if (!enforceBucketing && !dest_tab.isIndexTable()) { throw new SemanticException(ErrorMsg.TBL_SORTED_NOT_BUCKETED.getErrorCodedMsg(dest_tab.getCompleteName())); } else { if(!enforceBucketing) { partnColsNoConvert = getSortCols(dest, qb, dest_tab, table_desc, input, false); } } enforceBucketing = true; } if (enforceBucketing) { ctx.setPartnCols(partnColsNoConvert); } }
numBuckets = table.getNumBuckets(); List<String> sortCols = new ArrayList<String>(); for (Order colSortOrder : table.getSortCols()) { sortCols.add(colSortOrder.getCol());
numBuckets = table.getNumBuckets(); List<String> sortCols = new ArrayList<String>(); for (Order colSortOrder : table.getSortCols()) { sortCols.add(colSortOrder.getCol());
List<String> sortCols = Utilities.getColumnNamesFromSortCols(table.getSortCols()); List<String> bucketCols = table.getBucketCols(); return matchBucketSortCols(groupByCols, bucketCols, sortCols);
if ((dest_tab.getSortCols() != null) && (dest_tab.getSortCols().size() > 0)) { sortCols = getSortCols(dest, qb, dest_tab, table_desc, input, true); sortOrders = getSortOrders(dest, qb, dest_tab, input);
if ((dest_tab.getSortCols() != null) && (dest_tab.getSortCols().size() > 0)) { sortCols = getSortCols(dest, qb, dest_tab, table_desc, input, true); sortOrders = getSortOrders(dest, qb, dest_tab, input);
if (!destTable.getSortCols().isEmpty()) { sortPositions = getSortPositions(destTable.getSortCols(), destTable.getCols()); sortOrder = getSortOrders(destTable.getSortCols(), destTable.getCols()); } else {
@Override public List<RelCollation> getCollationList() { ImmutableList.Builder<RelFieldCollation> collationList = new ImmutableList.Builder<RelFieldCollation>(); for (Order sortColumn : this.hiveTblMetadata.getSortCols()) { for (int i=0; i<this.hiveTblMetadata.getSd().getCols().size(); i++) { FieldSchema field = this.hiveTblMetadata.getSd().getCols().get(i); if (field.getName().equals(sortColumn.getCol())) { Direction direction; NullDirection nullDirection; if (sortColumn.getOrder() == BaseSemanticAnalyzer.HIVE_COLUMN_ORDER_ASC) { direction = Direction.ASCENDING; nullDirection = NullDirection.FIRST; } else { direction = Direction.DESCENDING; nullDirection = NullDirection.LAST; } collationList.add(new RelFieldCollation(i,direction,nullDirection)); break; } } } return new ImmutableList.Builder<RelCollation>() .add(RelCollationTraitDef.INSTANCE.canonize( new HiveRelCollation(collationList.build()))) .build(); }
private void alterPartitionSpecInMemory(Table tbl, Map<String, String> partSpec, org.apache.hadoop.hive.metastore.api.Partition tpart, boolean inheritTableSpecs, String partPath) throws HiveException, InvalidOperationException { LOG.debug("altering partition for table " + tbl.getTableName() + " with partition spec : " + partSpec); if (inheritTableSpecs) { tpart.getSd().setOutputFormat(tbl.getTTable().getSd().getOutputFormat()); tpart.getSd().setInputFormat(tbl.getTTable().getSd().getInputFormat()); tpart.getSd().getSerdeInfo().setSerializationLib(tbl.getSerializationLib()); tpart.getSd().getSerdeInfo().setParameters( tbl.getTTable().getSd().getSerdeInfo().getParameters()); tpart.getSd().setBucketCols(tbl.getBucketCols()); tpart.getSd().setNumBuckets(tbl.getNumBuckets()); tpart.getSd().setSortCols(tbl.getSortCols()); } if (partPath == null || partPath.trim().equals("")) { throw new HiveException("new partition path should not be null or empty."); } tpart.getSd().setLocation(partPath); }
private void alterPartitionSpecInMemory(Table tbl, Map<String, String> partSpec, org.apache.hadoop.hive.metastore.api.Partition tpart, boolean inheritTableSpecs, String partPath) throws HiveException, InvalidOperationException { LOG.debug("altering partition for table " + tbl.getTableName() + " with partition spec : " + partSpec); if (inheritTableSpecs) { tpart.getSd().setOutputFormat(tbl.getTTable().getSd().getOutputFormat()); tpart.getSd().setInputFormat(tbl.getTTable().getSd().getInputFormat()); tpart.getSd().getSerdeInfo().setSerializationLib(tbl.getSerializationLib()); tpart.getSd().getSerdeInfo().setParameters( tbl.getTTable().getSd().getSerdeInfo().getParameters()); tpart.getSd().setBucketCols(tbl.getBucketCols()); tpart.getSd().setNumBuckets(tbl.getNumBuckets()); tpart.getSd().setSortCols(tbl.getSortCols()); } if (partPath == null || partPath.trim().equals("")) { throw new HiveException("new partition path should not be null or empty."); } tpart.getSd().setLocation(partPath); }