@Override public void setDbName(String dbName) { partitionSpec.setDbName(dbName); for (Partition partition : partitionSpec.getPartitionList().getPartitions()) { partition.setDbName(dbName); } }
@Test(expected = MetaException.class) public void testAddPartitionsNullDb() throws Exception { createTable(); Partition partition1 = buildPartition(DB_NAME, TABLE_NAME, "2016"); Partition partition2 = buildPartition(DB_NAME, TABLE_NAME, "2017"); partition2.setDbName(null); List<Partition> partitions = new ArrayList<>(); partitions.add(partition1); partitions.add(partition2); client.add_partitions(partitions); }
@Test(expected = MetaException.class) public void testAlterPartitionsChangeDbName() throws Exception { createTable4PartColsParts(client); List<Partition> partitions = client.listPartitions(DB_NAME, TABLE_NAME, (short)-1); Partition p = partitions.get(3); p.setDbName(DB_NAME+"_changed"); client.alter_partitions(DB_NAME, TABLE_NAME, Lists.newArrayList(p)); }
@Test(expected = MetaException.class) public void testRenamePartitionNullDbInPartition() throws Exception { List<List<String>> oldValues = createTable4PartColsParts(client); List<Partition> oldParts = client.listPartitions(DB_NAME, TABLE_NAME, (short)-1); Partition partToRename = oldParts.get(3); partToRename.setValues(Lists.newArrayList("2018", "01", "16")); partToRename.setDbName(null); client.renamePartition(DB_NAME, TABLE_NAME, Lists.newArrayList("2017", "11", "27"), partToRename); }
@Test(expected = MetaException.class) public void testRenamePartitionChangeDbName() throws Exception { List<List<String>> oldValues = createTable4PartColsParts(client); List<Partition> oldParts = client.listPartitions(DB_NAME, TABLE_NAME, (short)-1); Partition partToRename = oldParts.get(3); partToRename.setValues(Lists.newArrayList("2018", "01", "16")); partToRename.setDbName(DB_NAME + "_2"); client.renamePartition(DB_NAME, TABLE_NAME, oldValues.get(3), partToRename); }
@Test(expected = MetaException.class) public void testAlterPartitionChangeDbName() throws Exception { createTable4PartColsParts(client); List<Partition> partitions = client.listPartitions(DB_NAME, TABLE_NAME, (short)-1); Partition partition = partitions.get(3); partition.setDbName(DB_NAME+"_changed"); client.alter_partition(DB_NAME, TABLE_NAME, partition); }
@Test(expected = MetaException.class) public void testAlterPartitionsWithEnvironmentCtxChangeDbName() throws Exception { createTable4PartColsParts(client); List<Partition> partitions = client.listPartitions(DB_NAME, TABLE_NAME, (short)-1); Partition p = partitions.get(3); p.setDbName(DB_NAME+"_changed"); client.alter_partitions(DB_NAME, TABLE_NAME, Lists.newArrayList(p), new EnvironmentContext()); }
@Test(expected = MetaException.class) public void testAlterPartitionWithEnvironmentCtxChangeDbName() throws Exception { createTable4PartColsParts(client); List<Partition> partitions = client.listPartitions(DB_NAME, TABLE_NAME, (short)-1); Partition partition = partitions.get(3); partition.setDbName(DB_NAME+"_changed"); client.alter_partition(DB_NAME, TABLE_NAME, partition, new EnvironmentContext()); }
Partition toHivePartition() throws HCatException { Partition hivePtn = new Partition(); hivePtn.setDbName(dbName); hivePtn.setTableName(tableName); hivePtn.setValues(values); hivePtn.setParameters(parameters); if (sd.getLocation() == null) { LOG.warn("Partition location is not set! Attempting to construct default partition location."); try { String partName = Warehouse.makePartName(HCatSchemaUtils.getFieldSchemas(hcatTable.getPartCols()), values); sd.setLocation(new Path(hcatTable.getSd().getLocation(), partName).toString()); } catch(MetaException exception) { throw new HCatException("Could not construct default partition-path for " + hcatTable.getDbName() + "." + hcatTable.getTableName() + "[" + values + "]"); } } hivePtn.setSd(sd); hivePtn.setCreateTime((int) (System.currentTimeMillis() / 1000)); hivePtn.setLastAccessTimeIsSet(false); return hivePtn; }
private Partition getTargetPartition(Partition originPartition, Path targetLocation) throws IOException { try { Partition targetPartition = new Partition(this.hiveCopyEntityHelper.getTargetTable(), originPartition.getTPartition().deepCopy()); targetPartition.getTable().setDbName(this.hiveCopyEntityHelper.getTargetDatabase()); targetPartition.getTPartition().setDbName(this.hiveCopyEntityHelper.getTargetDatabase()); targetPartition.getTPartition().putToParameters(HiveDataset.REGISTERER, HiveCopyEntityHelper.GOBBLIN_DISTCP); targetPartition.getTPartition().putToParameters(HiveDataset.REGISTRATION_GENERATION_TIME_MILLIS, Long.toString(this.hiveCopyEntityHelper.getStartTime())); targetPartition.setLocation(targetLocation.toString()); targetPartition.getTPartition().unsetCreateTime(); return targetPartition; } catch (HiveException he) { throw new IOException(he); } }
@Test(expected = MetaException.class) public void testAddPartitionSpecNoDBAndTableInPartition() throws Exception { createTable(); Partition partition = buildPartition(DB_NAME, TABLE_NAME, DEFAULT_YEAR_VALUE); partition.setDbName(null); partition.setTableName(null); PartitionSpecProxy partitionSpecProxy = buildPartitionSpec(DB_NAME, TABLE_NAME, null, Lists.newArrayList(partition)); client.add_partitions_pspec(partitionSpecProxy); }
public static org.apache.hadoop.hive.metastore.api.Partition toMetastoreApiPartition(Partition partition) { org.apache.hadoop.hive.metastore.api.Partition result = new org.apache.hadoop.hive.metastore.api.Partition(); result.setDbName(partition.getDatabaseName()); result.setTableName(partition.getTableName()); result.setValues(partition.getValues()); result.setSd(makeStorageDescriptor(partition.getTableName(), partition.getColumns(), partition.getStorage())); result.setParameters(partition.getParameters()); return result; }
private static Partition createPtn(Table t, List<String> pvals) { Partition ptn = new Partition(); ptn.setDbName(t.getDbName()); ptn.setTableName(t.getTableName()); ptn.setValues(pvals); return ptn; }
/** * Convert a {@link HivePartition} into a {@link Partition}. */ public static Partition getPartition(HivePartition hivePartition) { State props = hivePartition.getProps(); Partition partition = new Partition(); partition.setDbName(hivePartition.getDbName()); partition.setTableName(hivePartition.getTableName()); partition.setValues(hivePartition.getValues()); partition.setParameters(getParameters(props)); if (hivePartition.getCreateTime().isPresent()) { partition.setCreateTime(Ints.checkedCast(hivePartition.getCreateTime().get())); } else if (props.contains(HiveConstants.CREATE_TIME)) { partition.setCreateTime(props.getPropAsInt(HiveConstants.CREATE_TIME)); } if (props.contains(HiveConstants.LAST_ACCESS_TIME)) { partition.setLastAccessTime(props.getPropAsInt(HiveConstants.LAST_ACCESS_TIME)); } partition.setSd(getStorageDescriptor(hivePartition)); return partition; }
@Test public void testAddPartitionSpecDBAndTableSetFromSpecProxy() throws Exception { createTable(); Partition partition = buildPartition(DB_NAME, TABLE_NAME, DEFAULT_YEAR_VALUE); partition.setDbName(null); partition.setTableName(null); PartitionSpecProxy partitionSpecProxy = buildPartitionSpec(null, null, null, Lists.newArrayList(partition)); partitionSpecProxy.setDbName(DB_NAME); partitionSpecProxy.setTableName(TABLE_NAME); client.add_partitions_pspec(partitionSpecProxy); Partition resultPart = client.getPartition(DB_NAME, TABLE_NAME, Lists.newArrayList(DEFAULT_YEAR_VALUE)); Assert.assertNotNull(resultPart); }
protected Partition newPartition(Table t, String value, List<Order> sortCols) throws Exception { Partition part = new Partition(); part.addToValues(value); part.setDbName(t.getDbName()); part.setTableName(t.getTableName()); part.setSd(newStorageDescriptor(getLocation(t.getTableName(), value), sortCols)); part.setParameters(new HashMap<String, String>()); ms.add_partition(part); return part; }
private static void addPartition(IMetaStoreClient client, Table tbl , List<String> partValues) throws IOException, TException { Partition part = new Partition(); part.setDbName(tbl.getDbName()); part.setTableName(tbl.getTableName()); StorageDescriptor sd = new StorageDescriptor(tbl.getSd()); sd.setLocation(sd.getLocation() + Path.SEPARATOR + makePartPath(tbl.getPartitionKeys(), partValues)); part.setSd(sd); part.setValues(partValues); client.add_partition(part); }
private void add_partition(HiveMetaStoreClient client, Table table, List<String> vals, String location) throws TException { Partition part = new Partition(); part.setDbName(table.getDbName()); part.setTableName(table.getTableName()); part.setValues(vals); part.setParameters(new HashMap<>()); part.setSd(table.getSd().deepCopy()); part.getSd().setSerdeInfo(table.getSd().getSerdeInfo()); part.getSd().setLocation(table.getSd().getLocation() + location); client.add_partition(part); }
private static Partition makePartitionObject(String dbName, String tblName, List<String> ptnVals, Table tbl, String ptnLocationSuffix) throws MetaException { Partition part4 = new Partition(); part4.setDbName(dbName); part4.setTableName(tblName); part4.setValues(ptnVals); part4.setParameters(new HashMap<>()); part4.setSd(tbl.getSd().deepCopy()); part4.getSd().setSerdeInfo(tbl.getSd().getSerdeInfo().deepCopy()); part4.getSd().setLocation(tbl.getSd().getLocation() + ptnLocationSuffix); MetaStoreServerUtils.updatePartitionStatsFast(part4, tbl, warehouse, false, false, null, true); return part4; }
private void addPartition(HiveMetaStoreClient client, Table table, List<String> vals, String location) throws TException { Partition part = new Partition(); part.setDbName(table.getDbName()); part.setTableName(table.getTableName()); part.setValues(vals); part.setParameters(new HashMap<String, String>()); part.setSd(table.getSd().deepCopy()); part.getSd().setSerdeInfo(table.getSd().getSerdeInfo()); part.getSd().setLocation(table.getSd().getLocation() + location); client.add_partition(part); } }