@Override public void setTableName(String tableName) { partitionSpec.setTableName(tableName); for (Partition partition : partitionSpec.getPartitionList().getPartitions()) { partition.setTableName(tableName); } }
@Test(expected = MetaException.class) public void testAlterPartitionsChangeTableName() throws Exception { createTable4PartColsParts(client); List<Partition> partitions = client.listPartitions(DB_NAME, TABLE_NAME, (short)-1); Partition p = partitions.get(3); p.setTableName(TABLE_NAME+"_changed"); client.alter_partitions(DB_NAME, TABLE_NAME, Lists.newArrayList(p)); }
@Test(expected = MetaException.class) public void testRenamePartitionNullTableInPartition() throws Exception { List<List<String>> oldValues = createTable4PartColsParts(client); List<Partition> oldParts = client.listPartitions(DB_NAME, TABLE_NAME, (short)-1); Partition partToRename = oldParts.get(3); partToRename.setValues(Lists.newArrayList("2018", "01", "16")); partToRename.setTableName(null); client.renamePartition(DB_NAME, TABLE_NAME, Lists.newArrayList("2017", "11", "27"), partToRename); }
@Test(expected = MetaException.class) public void testRenamePartitionChangeTblName() throws Exception { List<List<String>> oldValues = createTable4PartColsParts(client); List<Partition> oldParts = client.listPartitions(DB_NAME, TABLE_NAME, (short)-1); Partition partToRename = oldParts.get(3); partToRename.setValues(Lists.newArrayList("2018", "01", "16")); partToRename.setTableName(TABLE_NAME + "_2"); client.renamePartition(DB_NAME, TABLE_NAME, oldValues.get(3), partToRename); }
@Test(expected = MetaException.class) public void testAddPartitionsNullTable() throws Exception { createTable(); Partition partition1 = buildPartition(DB_NAME, TABLE_NAME, "2016"); Partition partition2 = buildPartition(DB_NAME, TABLE_NAME, "2017"); partition2.setTableName(null); List<Partition> partitions = new ArrayList<>(); partitions.add(partition1); partitions.add(partition2); client.add_partitions(partitions); }
@Test(expected = MetaException.class) public void testAlterPartitionChangeTableName() throws Exception { createTable4PartColsParts(client); List<Partition> partitions = client.listPartitions(DB_NAME, TABLE_NAME, (short)-1); Partition partition = partitions.get(3); partition.setTableName(TABLE_NAME+"_changed"); client.alter_partition(DB_NAME, TABLE_NAME, partition); }
public static org.apache.hadoop.hive.metastore.api.Partition createMetaPartitionObject( Table tbl, Map<String, String> partSpec, Path location) throws HiveException { List<String> pvals = new ArrayList<String>(); for (FieldSchema field : tbl.getPartCols()) { String val = partSpec.get(field.getName()); if (val == null || val.isEmpty()) { throw new HiveException("partition spec is invalid; field " + field.getName() + " does not exist or is empty"); } pvals.add(val); } org.apache.hadoop.hive.metastore.api.Partition tpart = new org.apache.hadoop.hive.metastore.api.Partition(); tpart.setDbName(tbl.getDbName()); tpart.setTableName(tbl.getTableName()); tpart.setValues(pvals); if (!tbl.isView()) { tpart.setSd(tbl.getSd().deepCopy()); tpart.getSd().setLocation((location != null) ? location.toString() : null); } return tpart; }
@Test(expected = MetaException.class) public void testAlterPartitionsWithEnvironmentCtxChangeTableName() throws Exception { createTable4PartColsParts(client); List<Partition> partitions = client.listPartitions(DB_NAME, TABLE_NAME, (short)-1); Partition p = partitions.get(3); p.setTableName(TABLE_NAME+"_changed"); client.alter_partitions(DB_NAME, TABLE_NAME, Lists.newArrayList(p), new EnvironmentContext()); }
@Test(expected = MetaException.class) public void testAlterPartitionWithEnvironmentCtxChangeTableName() throws Exception { createTable4PartColsParts(client); List<Partition> partitions = client.listPartitions(DB_NAME, TABLE_NAME, (short)-1); Partition partition = partitions.get(3); partition.setTableName(TABLE_NAME+"_changed"); client.alter_partition(DB_NAME, TABLE_NAME, partition, new EnvironmentContext()); }
Partition toHivePartition() throws HCatException { Partition hivePtn = new Partition(); hivePtn.setDbName(dbName); hivePtn.setTableName(tableName); hivePtn.setValues(values); hivePtn.setParameters(parameters); if (sd.getLocation() == null) { LOG.warn("Partition location is not set! Attempting to construct default partition location."); try { String partName = Warehouse.makePartName(HCatSchemaUtils.getFieldSchemas(hcatTable.getPartCols()), values); sd.setLocation(new Path(hcatTable.getSd().getLocation(), partName).toString()); } catch(MetaException exception) { throw new HCatException("Could not construct default partition-path for " + hcatTable.getDbName() + "." + hcatTable.getTableName() + "[" + values + "]"); } } hivePtn.setSd(sd); hivePtn.setCreateTime((int) (System.currentTimeMillis() / 1000)); hivePtn.setLastAccessTimeIsSet(false); return hivePtn; }
@Test(expected = MetaException.class) public void testAddPartitionSpecNoDBAndTableInPartition() throws Exception { createTable(); Partition partition = buildPartition(DB_NAME, TABLE_NAME, DEFAULT_YEAR_VALUE); partition.setDbName(null); partition.setTableName(null); PartitionSpecProxy partitionSpecProxy = buildPartitionSpec(DB_NAME, TABLE_NAME, null, Lists.newArrayList(partition)); client.add_partitions_pspec(partitionSpecProxy); }
public static org.apache.hadoop.hive.metastore.api.Partition toMetastoreApiPartition(Partition partition) { org.apache.hadoop.hive.metastore.api.Partition result = new org.apache.hadoop.hive.metastore.api.Partition(); result.setDbName(partition.getDatabaseName()); result.setTableName(partition.getTableName()); result.setValues(partition.getValues()); result.setSd(makeStorageDescriptor(partition.getTableName(), partition.getColumns(), partition.getStorage())); result.setParameters(partition.getParameters()); return result; }
private static Partition createPtn(Table t, List<String> pvals) { Partition ptn = new Partition(); ptn.setDbName(t.getDbName()); ptn.setTableName(t.getTableName()); ptn.setValues(pvals); return ptn; }
/** * Convert a {@link HivePartition} into a {@link Partition}. */ public static Partition getPartition(HivePartition hivePartition) { State props = hivePartition.getProps(); Partition partition = new Partition(); partition.setDbName(hivePartition.getDbName()); partition.setTableName(hivePartition.getTableName()); partition.setValues(hivePartition.getValues()); partition.setParameters(getParameters(props)); if (hivePartition.getCreateTime().isPresent()) { partition.setCreateTime(Ints.checkedCast(hivePartition.getCreateTime().get())); } else if (props.contains(HiveConstants.CREATE_TIME)) { partition.setCreateTime(props.getPropAsInt(HiveConstants.CREATE_TIME)); } if (props.contains(HiveConstants.LAST_ACCESS_TIME)) { partition.setLastAccessTime(props.getPropAsInt(HiveConstants.LAST_ACCESS_TIME)); } partition.setSd(getStorageDescriptor(hivePartition)); return partition; }
@Test public void testAddPartitionSpecDBAndTableSetFromSpecProxy() throws Exception { createTable(); Partition partition = buildPartition(DB_NAME, TABLE_NAME, DEFAULT_YEAR_VALUE); partition.setDbName(null); partition.setTableName(null); PartitionSpecProxy partitionSpecProxy = buildPartitionSpec(null, null, null, Lists.newArrayList(partition)); partitionSpecProxy.setDbName(DB_NAME); partitionSpecProxy.setTableName(TABLE_NAME); client.add_partitions_pspec(partitionSpecProxy); Partition resultPart = client.getPartition(DB_NAME, TABLE_NAME, Lists.newArrayList(DEFAULT_YEAR_VALUE)); Assert.assertNotNull(resultPart); }
protected Partition newPartition(Table t, String value, List<Order> sortCols) throws Exception { Partition part = new Partition(); part.addToValues(value); part.setDbName(t.getDbName()); part.setTableName(t.getTableName()); part.setSd(newStorageDescriptor(getLocation(t.getTableName(), value), sortCols)); part.setParameters(new HashMap<String, String>()); ms.add_partition(part); return part; }
private static void addPartition(IMetaStoreClient client, Table tbl , List<String> partValues) throws IOException, TException { Partition part = new Partition(); part.setDbName(tbl.getDbName()); part.setTableName(tbl.getTableName()); StorageDescriptor sd = new StorageDescriptor(tbl.getSd()); sd.setLocation(sd.getLocation() + Path.SEPARATOR + makePartPath(tbl.getPartitionKeys(), partValues)); part.setSd(sd); part.setValues(partValues); client.add_partition(part); }
private void add_partition(HiveMetaStoreClient client, Table table, List<String> vals, String location) throws TException { Partition part = new Partition(); part.setDbName(table.getDbName()); part.setTableName(table.getTableName()); part.setValues(vals); part.setParameters(new HashMap<>()); part.setSd(table.getSd().deepCopy()); part.getSd().setSerdeInfo(table.getSd().getSerdeInfo()); part.getSd().setLocation(table.getSd().getLocation() + location); client.add_partition(part); }
private void addPartition(HiveMetaStoreClient client, Table table, List<String> vals, String location) throws TException { Partition part = new Partition(); part.setDbName(table.getDbName()); part.setTableName(table.getTableName()); part.setValues(vals); part.setParameters(new HashMap<String, String>()); part.setSd(table.getSd().deepCopy()); part.getSd().setSerdeInfo(table.getSd().getSerdeInfo()); part.getSd().setLocation(table.getSd().getLocation() + location); client.add_partition(part); } }
private static Partition makePartitionObject(String dbName, String tblName, List<String> ptnVals, Table tbl, String ptnLocationSuffix) throws MetaException { Partition part4 = new Partition(); part4.setDbName(dbName); part4.setTableName(tblName); part4.setValues(ptnVals); part4.setParameters(new HashMap<>()); part4.setSd(tbl.getSd().deepCopy()); part4.getSd().setSerdeInfo(tbl.getSd().getSerdeInfo().deepCopy()); part4.getSd().setLocation(tbl.getSd().getLocation() + ptnLocationSuffix); MetaStoreServerUtils.updatePartitionStatsFast(part4, tbl, warehouse, false, false, null, true); return part4; }