/** * create an empty partition. * SemanticAnalyzer code requires that an empty partition when the table is not partitioned. */ public Partition(Table tbl) throws HiveException { org.apache.hadoop.hive.metastore.api.Partition tPart = new org.apache.hadoop.hive.metastore.api.Partition(); if (!tbl.isView()) { tPart.setSd(tbl.getTTable().getSd().deepCopy()); } initialize(tbl, tPart); }
@Test(expected = MetaException.class) public void testAddPartitionsNullSd() throws Exception { createTable(); Partition partition = buildPartition(DB_NAME, TABLE_NAME, DEFAULT_YEAR_VALUE); partition.setSd(null); List<Partition> partitions = new ArrayList<>(); partitions.add(partition); client.add_partitions(partitions); }
/** * create an empty partition. * SemanticAnalyzer code requires that an empty partition when the table is not partitioned. */ public Partition(Table tbl) throws HiveException { org.apache.hadoop.hive.metastore.api.Partition tPart = new org.apache.hadoop.hive.metastore.api.Partition(); if (!tbl.isView()) { tPart.setSd(tbl.getTTable().getSd().deepCopy()); } initialize(tbl, tPart); }
static Partition assemble(PartitionWrapper wrapper, SharedCache sharedCache) { Partition p = wrapper.getPartition().deepCopy(); if (wrapper.getSdHash() != null) { StorageDescriptor sdCopy = sharedCache.getSdFromCache(wrapper.getSdHash()).deepCopy(); if (sdCopy.getBucketCols() == null) { sdCopy.setBucketCols(Collections.emptyList()); } if (sdCopy.getSortCols() == null) { sdCopy.setSortCols(Collections.emptyList()); } if (sdCopy.getSkewedInfo() == null) { sdCopy.setSkewedInfo(new SkewedInfo(Collections.emptyList(), Collections.emptyList(), Collections.emptyMap())); } sdCopy.setLocation(wrapper.getLocation()); sdCopy.setParameters(wrapper.getParameters()); p.setSd(sdCopy); } return p; }
private Partition getTestPartition(Table table) throws HiveException { Partition partition = new Partition(table, ImmutableMap.of("partition_key", "1"), null); StorageDescriptor sd = new StorageDescriptor(); sd.setSerdeInfo(new SerDeInfo("avro", AvroSerDe.class.getName(), null)); sd.setCols(Lists.newArrayList(new FieldSchema("foo", "int", null))); partition.getTPartition().setSd(sd); return partition; } }
@Test(expected = MetaException.class) public void testAddPartitionSpecNullSd() throws Exception { createTable(); Partition partition = buildPartition(DB_NAME, TABLE_NAME, DEFAULT_YEAR_VALUE); partition.setSd(null); PartitionSpecProxy partitionSpecProxy = buildPartitionSpec(DB_NAME, TABLE_NAME, null, Lists.newArrayList(partition)); client.add_partitions_pspec(partitionSpecProxy); }
Partition toHivePartition() throws HCatException { Partition hivePtn = new Partition(); hivePtn.setDbName(dbName); hivePtn.setTableName(tableName); hivePtn.setValues(values); hivePtn.setParameters(parameters); if (sd.getLocation() == null) { LOG.warn("Partition location is not set! Attempting to construct default partition location."); try { String partName = Warehouse.makePartName(HCatSchemaUtils.getFieldSchemas(hcatTable.getPartCols()), values); sd.setLocation(new Path(hcatTable.getSd().getLocation(), partName).toString()); } catch(MetaException exception) { throw new HCatException("Could not construct default partition-path for " + hcatTable.getDbName() + "." + hcatTable.getTableName() + "[" + values + "]"); } } hivePtn.setSd(sd); hivePtn.setCreateTime((int) (System.currentTimeMillis() / 1000)); hivePtn.setLastAccessTimeIsSet(false); return hivePtn; }
@Test(expected = MetaException.class) public void testAddPartitionNullSd() throws Exception { createTable(); Partition partition = buildPartition(DB_NAME, TABLE_NAME, DEFAULT_YEAR_VALUE); partition.setSd(null); client.add_partition(partition); }
public static org.apache.hadoop.hive.metastore.api.Partition toMetastoreApiPartition(Partition partition) { org.apache.hadoop.hive.metastore.api.Partition result = new org.apache.hadoop.hive.metastore.api.Partition(); result.setDbName(partition.getDatabaseName()); result.setTableName(partition.getTableName()); result.setValues(partition.getValues()); result.setSd(makeStorageDescriptor(partition.getTableName(), partition.getColumns(), partition.getStorage())); result.setParameters(partition.getParameters()); return result; }
@Test public void testAddPartitionsForViewNullPartSd() throws Exception { String tableName = "test_add_partition_view"; createView(tableName); Partition partition = buildPartition(DB_NAME, tableName, DEFAULT_YEAR_VALUE); partition.setSd(null); List<Partition> partitions = Lists.newArrayList(partition); client.add_partitions(partitions); Partition part = client.getPartition(DB_NAME, tableName, "year=2017"); Assert.assertNull(part.getSd()); }
private PartitionWrapper makePartitionWrapper(Partition part, SharedCache sharedCache) { Partition partCopy = part.deepCopy(); PartitionWrapper wrapper; if (part.getSd() != null) { byte[] sdHash = MetaStoreServerUtils.hashStorageDescriptor(part.getSd(), md); StorageDescriptor sd = part.getSd(); sharedCache.increSd(sd, sdHash); partCopy.setSd(null); wrapper = new PartitionWrapper(partCopy, sdHash, sd.getLocation(), sd.getParameters()); } else { wrapper = new PartitionWrapper(partCopy, null, null, null); } return wrapper; } }
/** * Convert a {@link HivePartition} into a {@link Partition}. */ public static Partition getPartition(HivePartition hivePartition) { State props = hivePartition.getProps(); Partition partition = new Partition(); partition.setDbName(hivePartition.getDbName()); partition.setTableName(hivePartition.getTableName()); partition.setValues(hivePartition.getValues()); partition.setParameters(getParameters(props)); if (hivePartition.getCreateTime().isPresent()) { partition.setCreateTime(Ints.checkedCast(hivePartition.getCreateTime().get())); } else if (props.contains(HiveConstants.CREATE_TIME)) { partition.setCreateTime(props.getPropAsInt(HiveConstants.CREATE_TIME)); } if (props.contains(HiveConstants.LAST_ACCESS_TIME)) { partition.setLastAccessTime(props.getPropAsInt(HiveConstants.LAST_ACCESS_TIME)); } partition.setSd(getStorageDescriptor(hivePartition)); return partition; }
@Test public void testAddPartitionsForViewNullPartSd() throws Exception { String tableName = "test_add_partition_view"; createView(tableName); Partition partition = buildPartition(DB_NAME, tableName, DEFAULT_YEAR_VALUE); partition.setSd(null); PartitionSpecProxy partitionSpecProxy = buildPartitionSpec(DB_NAME, tableName, null, Lists.newArrayList(partition)); client.add_partitions_pspec(partitionSpecProxy); Partition part = client.getPartition(DB_NAME, tableName, "year=2017"); Assert.assertNull(part.getSd()); }
public PartitionWrapper(org.apache.hadoop.hive.metastore.api.Partition mapiPart, PreEventContext context) throws HiveException, NoSuchObjectException, MetaException { org.apache.hadoop.hive.metastore.api.Partition wrapperApiPart = mapiPart.deepCopy(); org.apache.hadoop.hive.metastore.api.Table t = context.getHandler().get_table_core( mapiPart.getDbName(), mapiPart.getTableName()); if (wrapperApiPart.getSd() == null){ // In the cases of create partition, by the time this event fires, the partition // object has not yet come into existence, and thus will not yet have a // location or an SD, but these are needed to create a ql.metadata.Partition, // so we use the table's SD. The only place this is used is by the // authorization hooks, so we will not affect code flow in the metastore itself. wrapperApiPart.setSd(t.getSd().deepCopy()); } initialize(new TableWrapper(t),wrapperApiPart); } }
protected Partition newPartition(Table t, String value, List<Order> sortCols) throws Exception { Partition part = new Partition(); part.addToValues(value); part.setDbName(t.getDbName()); part.setTableName(t.getTableName()); part.setSd(newStorageDescriptor(getLocation(t.getTableName(), value), sortCols)); part.setParameters(new HashMap<String, String>()); ms.add_partition(part); return part; }
public PartitionWrapper(org.apache.hadoop.hive.metastore.api.Partition mapiPart, PreEventContext context) throws HiveException, NoSuchObjectException, MetaException { org.apache.hadoop.hive.metastore.api.Partition wrapperApiPart = mapiPart.deepCopy(); String catName = mapiPart.isSetCatName() ? mapiPart.getCatName() : MetaStoreUtils.getDefaultCatalog(context.getHandler().getConf()); org.apache.hadoop.hive.metastore.api.Table t = context.getHandler().get_table_core( catName, mapiPart.getDbName(), mapiPart.getTableName()); if (wrapperApiPart.getSd() == null){ // In the cases of create partition, by the time this event fires, the partition // object has not yet come into existence, and thus will not yet have a // location or an SD, but these are needed to create a ql.metadata.Partition, // so we use the table's SD. The only place this is used is by the // authorization hooks, so we will not affect code flow in the metastore itself. wrapperApiPart.setSd(t.getSd().deepCopy()); } initialize(new TableWrapper(t),wrapperApiPart); } }
private static void addPartition(IMetaStoreClient client, Table tbl , List<String> partValues) throws IOException, TException { Partition part = new Partition(); part.setDbName(tbl.getDbName()); part.setTableName(tbl.getTableName()); StorageDescriptor sd = new StorageDescriptor(tbl.getSd()); sd.setLocation(sd.getLocation() + Path.SEPARATOR + makePartPath(tbl.getPartitionKeys(), partValues)); part.setSd(sd); part.setValues(partValues); client.add_partition(part); }
private void add_partition(HiveMetaStoreClient client, Table table, List<String> vals, String location) throws TException { Partition part = new Partition(); part.setDbName(table.getDbName()); part.setTableName(table.getTableName()); part.setValues(vals); part.setParameters(new HashMap<>()); part.setSd(table.getSd().deepCopy()); part.getSd().setSerdeInfo(table.getSd().getSerdeInfo()); part.getSd().setLocation(table.getSd().getLocation() + location); client.add_partition(part); }
private static Partition makePartitionObject(String dbName, String tblName, List<String> ptnVals, Table tbl, String ptnLocationSuffix) throws MetaException { Partition part4 = new Partition(); part4.setDbName(dbName); part4.setTableName(tblName); part4.setValues(ptnVals); part4.setParameters(new HashMap<>()); part4.setSd(tbl.getSd().deepCopy()); part4.getSd().setSerdeInfo(tbl.getSd().getSerdeInfo().deepCopy()); part4.getSd().setLocation(tbl.getSd().getLocation() + ptnLocationSuffix); MetaStoreServerUtils.updatePartitionStatsFast(part4, tbl, warehouse, false, false, null, true); return part4; }
private void addPartition(HiveMetaStoreClient client, Table table, List<String> vals, String location) throws TException { Partition part = new Partition(); part.setDbName(table.getDbName()); part.setTableName(table.getTableName()); part.setValues(vals); part.setParameters(new HashMap<String, String>()); part.setSd(table.getSd().deepCopy()); part.getSd().setSerdeInfo(table.getSd().getSerdeInfo()); part.getSd().setLocation(table.getSd().getLocation() + location); client.add_partition(part); } }