public synchronized void increSd(StorageDescriptor sd, byte[] sdHash) { ByteArrayWrapper byteArray = new ByteArrayWrapper(sdHash); if (sdCache.containsKey(byteArray)) { sdCache.get(byteArray).refCount++; } else { StorageDescriptor sdToCache = sd.deepCopy(); sdToCache.setLocation(null); sdToCache.setParameters(null); sdCache.put(byteArray, new StorageDescriptorWrapper(sdToCache, 1)); } }
Partition build() { Partition partition = new Partition(); List<String> partitionNames = table.getPartitionKeys() .stream() .map(FieldSchema::getName) .collect(Collectors.toList()); if (partitionNames.size() != values.size()) { throw new RuntimeException("Partition values do not match table schema"); } List<String> spec = IntStream.range(0, values.size()) .mapToObj(i -> partitionNames.get(i) + "=" + values.get(i)) .collect(Collectors.toList()); partition.setDbName(table.getDbName()); partition.setTableName(table.getTableName()); partition.setParameters(parameters); partition.setValues(values); partition.setSd(table.getSd().deepCopy()); if (this.location == null) { partition.getSd().setLocation(table.getSd().getLocation() + "/" + Joiner.on("/").join(spec)); } else { partition.getSd().setLocation(location); } return partition; } }
@Override public void analyzeIndexDefinition(Table baseTable, Index index, Table indexTable) throws HiveException { StorageDescriptor storageDesc = index.getSd(); if (this.usesIndexTable() && indexTable != null) { StorageDescriptor indexTableSd = storageDesc.deepCopy(); List<FieldSchema> indexTblCols = indexTableSd.getCols(); FieldSchema bucketFileName = new FieldSchema("_bucketname", "string", ""); indexTblCols.add(bucketFileName); FieldSchema offSets = new FieldSchema("_offsets", "array<bigint>", ""); indexTblCols.add(offSets); indexTable.setSd(indexTableSd); } }
@Override public void analyzeIndexDefinition(Table baseTable, Index index, Table indexTable) throws HiveException { StorageDescriptor storageDesc = index.getSd(); if (this.usesIndexTable() && indexTable != null) { StorageDescriptor indexTableSd = storageDesc.deepCopy(); List<FieldSchema> indexTblCols = indexTableSd.getCols(); FieldSchema bucketFileName = new FieldSchema("_bucketname", "string", ""); indexTblCols.add(bucketFileName); FieldSchema offSets = new FieldSchema("_offset", "bigint", ""); indexTblCols.add(offSets); FieldSchema bitmaps = new FieldSchema("_bitmaps", "array<bigint>", ""); indexTblCols.add(bitmaps); indexTable.setSd(indexTableSd); } }
@Override public void analyzeIndexDefinition(Table baseTable, Index index, Table indexTable) throws HiveException { StorageDescriptor storageDesc = index.getSd(); if (this.usesIndexTable() && indexTable != null) { StorageDescriptor indexTableSd = storageDesc.deepCopy(); List<FieldSchema> indexTblCols = indexTableSd.getCols(); FieldSchema bucketFileName = new FieldSchema("_bucketname", "string", ""); indexTblCols.add(bucketFileName); FieldSchema offSets = new FieldSchema("_offsets", "array<bigint>", ""); indexTblCols.add(offSets); Map<String, String> paraList = index.getParameters(); if(paraList != null && paraList.containsKey("AGGREGATES")){ String propValue = paraList.get("AGGREGATES"); if(propValue.contains(",")){ String[] aggFuncs = propValue.split(","); for (int i = 0; i < aggFuncs.length; i++) { createAggregationFunction(indexTblCols, aggFuncs[i]); } }else{ createAggregationFunction(indexTblCols, propValue); } } indexTable.setSd(indexTableSd); } }
/** * create an empty partition. * SemanticAnalyzer code requires that an empty partition when the table is not partitioned. */ public Partition(Table tbl) throws HiveException { org.apache.hadoop.hive.metastore.api.Partition tPart = new org.apache.hadoop.hive.metastore.api.Partition(); if (!tbl.isView()) { tPart.setSd(tbl.getTTable().getSd().deepCopy()); } initialize(tbl, tPart); }
/** * create an empty partition. * SemanticAnalyzer code requires that an empty partition when the table is not partitioned. */ public Partition(Table tbl) throws HiveException { org.apache.hadoop.hive.metastore.api.Partition tPart = new org.apache.hadoop.hive.metastore.api.Partition(); if (!tbl.isView()) { tPart.setSd(tbl.getTTable().getSd().deepCopy()); } initialize(tbl, tPart); }
public static org.apache.hadoop.hive.metastore.api.Partition createMetaPartitionObject( Table tbl, Map<String, String> partSpec, Path location) throws HiveException { List<String> pvals = new ArrayList<String>(); for (FieldSchema field : tbl.getPartCols()) { String val = partSpec.get(field.getName()); if (val == null || val.isEmpty()) { throw new HiveException("partition spec is invalid; field " + field.getName() + " does not exist or is empty"); } pvals.add(val); } org.apache.hadoop.hive.metastore.api.Partition tpart = new org.apache.hadoop.hive.metastore.api.Partition(); tpart.setDbName(tbl.getDbName()); tpart.setTableName(tbl.getTableName()); tpart.setValues(pvals); if (!tbl.isView()) { tpart.setSd(tbl.getSd().deepCopy()); tpart.getSd().setLocation((location != null) ? location.toString() : null); } return tpart; }
static Table assemble(TableWrapper wrapper, SharedCache sharedCache) { Table t = wrapper.getTable().deepCopy(); if (wrapper.getSdHash() != null) { StorageDescriptor sdCopy = sharedCache.getSdFromCache(wrapper.getSdHash()).deepCopy(); if (sdCopy.getBucketCols() == null) { sdCopy.setBucketCols(Collections.emptyList()); } if (sdCopy.getSortCols() == null) { sdCopy.setSortCols(Collections.emptyList()); } if (sdCopy.getSkewedInfo() == null) { sdCopy.setSkewedInfo(new SkewedInfo(Collections.emptyList(), Collections.emptyList(), Collections.emptyMap())); } sdCopy.setLocation(wrapper.getLocation()); sdCopy.setParameters(wrapper.getParameters()); t.setSd(sdCopy); } return t; }
static Partition assemble(PartitionWrapper wrapper, SharedCache sharedCache) { Partition p = wrapper.getPartition().deepCopy(); if (wrapper.getSdHash() != null) { StorageDescriptor sdCopy = sharedCache.getSdFromCache(wrapper.getSdHash()).deepCopy(); if (sdCopy.getBucketCols() == null) { sdCopy.setBucketCols(Collections.emptyList()); } if (sdCopy.getSortCols() == null) { sdCopy.setSortCols(Collections.emptyList()); } if (sdCopy.getSkewedInfo() == null) { sdCopy.setSkewedInfo(new SkewedInfo(Collections.emptyList(), Collections.emptyList(), Collections.emptyMap())); } sdCopy.setLocation(wrapper.getLocation()); sdCopy.setParameters(wrapper.getParameters()); p.setSd(sdCopy); } return p; }
public static org.apache.hadoop.hive.metastore.api.Partition createMetaPartitionObject( Table tbl, Map<String, String> partSpec, Path location) throws HiveException { List<String> pvals = new ArrayList<String>(); for (FieldSchema field : tbl.getPartCols()) { String val = partSpec.get(field.getName()); if (val == null || val.isEmpty()) { throw new HiveException("partition spec is invalid; field " + field.getName() + " does not exist or is empty"); } pvals.add(val); } org.apache.hadoop.hive.metastore.api.Partition tpart = new org.apache.hadoop.hive.metastore.api.Partition(); tpart.setDbName(tbl.getDbName()); tpart.setTableName(tbl.getTableName()); tpart.setValues(pvals); if (!tbl.isView()) { tpart.setSd(tbl.getSd().deepCopy()); tpart.getSd().setLocation((location != null) ? location.toString() : null); } return tpart; }
public static Partition createMetaPartitionObject(Table tbl, Map<String, String> partSpec, Path location) throws MetastoreException { List<String> pvals = new ArrayList<String>(); for (FieldSchema field : getPartCols(tbl)) { String val = partSpec.get(field.getName()); if (val == null || val.isEmpty()) { throw new MetastoreException("partition spec is invalid; field " + field.getName() + " does not exist or is empty"); } pvals.add(val); } Partition tpart = new Partition(); tpart.setCatName(tbl.getCatName()); tpart.setDbName(tbl.getDbName()); tpart.setTableName(tbl.getTableName()); tpart.setValues(pvals); if (!MetaStoreUtils.isView(tbl)) { tpart.setSd(tbl.getSd().deepCopy()); tpart.getSd().setLocation((location != null) ? location.toString() : null); } return tpart; } }
public PartitionWrapper(org.apache.hadoop.hive.metastore.api.Partition mapiPart, PreEventContext context) throws HiveException, NoSuchObjectException, MetaException { org.apache.hadoop.hive.metastore.api.Partition wrapperApiPart = mapiPart.deepCopy(); org.apache.hadoop.hive.metastore.api.Table t = context.getHandler().get_table_core( mapiPart.getDbName(), mapiPart.getTableName()); if (wrapperApiPart.getSd() == null){ // In the cases of create partition, by the time this event fires, the partition // object has not yet come into existence, and thus will not yet have a // location or an SD, but these are needed to create a ql.metadata.Partition, // so we use the table's SD. The only place this is used is by the // authorization hooks, so we will not affect code flow in the metastore itself. wrapperApiPart.setSd(t.getSd().deepCopy()); } initialize(new TableWrapper(t),wrapperApiPart); } }
part.setValues(vals); part.setParameters(new HashMap<>()); part.setSd(tbl.getSd().deepCopy()); part.getSd().setSerdeInfo(tbl.getSd().getSerdeInfo()); part.getSd().setLocation(tbl.getSd().getLocation() + "/partCol=1");
public PartitionWrapper(org.apache.hadoop.hive.metastore.api.Partition mapiPart, PreEventContext context) throws HiveException, NoSuchObjectException, MetaException { org.apache.hadoop.hive.metastore.api.Partition wrapperApiPart = mapiPart.deepCopy(); String catName = mapiPart.isSetCatName() ? mapiPart.getCatName() : MetaStoreUtils.getDefaultCatalog(context.getHandler().getConf()); org.apache.hadoop.hive.metastore.api.Table t = context.getHandler().get_table_core( catName, mapiPart.getDbName(), mapiPart.getTableName()); if (wrapperApiPart.getSd() == null){ // In the cases of create partition, by the time this event fires, the partition // object has not yet come into existence, and thus will not yet have a // location or an SD, but these are needed to create a ql.metadata.Partition, // so we use the table's SD. The only place this is used is by the // authorization hooks, so we will not affect code flow in the metastore itself. wrapperApiPart.setSd(t.getSd().deepCopy()); } initialize(new TableWrapper(t),wrapperApiPart); } }
part.setSd(tbl.getSd().deepCopy()); partLocation = new Path(tbl.getSd().getLocation(), Warehouse .makePartName(tbl.getPartitionKeys(), part_vals));
private void add_partition(HiveMetaStoreClient client, Table table, List<String> vals, String location) throws TException { Partition part = new Partition(); part.setDbName(table.getDbName()); part.setTableName(table.getTableName()); part.setValues(vals); part.setParameters(new HashMap<>()); part.setSd(table.getSd().deepCopy()); part.getSd().setSerdeInfo(table.getSd().getSerdeInfo()); part.getSd().setLocation(table.getSd().getLocation() + location); client.add_partition(part); }
private static Partition makePartitionObject(String dbName, String tblName, List<String> ptnVals, Table tbl, String ptnLocationSuffix) throws MetaException { Partition part4 = new Partition(); part4.setDbName(dbName); part4.setTableName(tblName); part4.setValues(ptnVals); part4.setParameters(new HashMap<>()); part4.setSd(tbl.getSd().deepCopy()); part4.getSd().setSerdeInfo(tbl.getSd().getSerdeInfo().deepCopy()); part4.getSd().setLocation(tbl.getSd().getLocation() + ptnLocationSuffix); MetaStoreServerUtils.updatePartitionStatsFast(part4, tbl, warehouse, false, false, null, true); return part4; }
private void addPartition(HiveMetaStoreClient client, Table table, List<String> vals, String location) throws TException { Partition part = new Partition(); part.setDbName(table.getDbName()); part.setTableName(table.getTableName()); part.setValues(vals); part.setParameters(new HashMap<String, String>()); part.setSd(table.getSd().deepCopy()); part.getSd().setSerdeInfo(table.getSd().getSerdeInfo()); part.getSd().setLocation(table.getSd().getLocation() + location); client.add_partition(part); } }