public static Map<String, String> getPartitionSpec(Table table, Partition partition) { return Warehouse.makeSpecFromValues(getPartCols(table), partition.getValues()); }
private static void createPartitionIfNotExists(HiveEndPoint ep, IMetaStoreClient msClient, HiveConf conf) throws PartitionCreationFailed { if (ep.partitionVals.isEmpty()) { return; } try { org.apache.hadoop.hive.ql.metadata.Table tableObject = new org.apache.hadoop.hive.ql.metadata.Table(msClient.getTable(ep.database, ep.table)); Map<String, String> partSpec = Warehouse.makeSpecFromValues(tableObject.getPartitionKeys(), ep.partitionVals); AddPartitionDesc addPartitionDesc = new AddPartitionDesc(ep.database, ep.table, true); String partLocation = new Path(tableObject.getDataLocation(), Warehouse.makePartPath(partSpec)).toString(); addPartitionDesc.addPartition(partSpec, partLocation); Partition partition = Hive.convertAddSpecToMetaPartition(tableObject, addPartitionDesc.getPartition(0), conf); msClient.add_partition(partition); } catch (AlreadyExistsException e) { //ignore this - multiple clients may be trying to create the same partition //AddPartitionDesc has ifExists flag but it's not propagated to // HMSHnalder.add_partitions_core() and so it throws... } catch(HiveException|TException e) { LOG.error("Failed to create partition : " + ep, e); throw new PartitionCreationFailed(ep, e); } }
@Override public PartitionInfo createPartitionIfNotExists(final List<String> partitionValues) throws StreamingException { String partLocation = null; String partName = null; boolean exists = false; try { Map<String, String> partSpec = Warehouse.makeSpecFromValues(tableObject.getPartitionKeys(), partitionValues); AddPartitionDesc addPartitionDesc = new AddPartitionDesc(database, table, true); partName = Warehouse.makePartName(tableObject.getPartitionKeys(), partitionValues); partLocation = new Path(tableObject.getDataLocation(), Warehouse.makePartPath(partSpec)).toString(); addPartitionDesc.addPartition(partSpec, partLocation); Partition partition = Hive.convertAddSpecToMetaPartition(tableObject, addPartitionDesc.getPartition(0), conf); if (getMSC() == null) { // We assume it doesn't exist if we can't check it // so the driver will decide return new PartitionInfo(partName, partLocation, false); } getMSC().add_partition(partition); if (LOG.isDebugEnabled()) { LOG.debug("Created partition {} for table {}", partName, tableObject.getFullyQualifiedName()); } } catch (AlreadyExistsException e) { exists = true; } catch (HiveException | TException e) { throw new StreamingException("Unable to creation partition for values: " + partitionValues + " connection: " + toConnectionInfoString(), e); } return new PartitionInfo(partName, partLocation, exists); }
Partition partObj = hms.getPartition(dbName, tableName, partName); Map<String, String> partSpec = Warehouse.makeSpecFromValues(tableObj.getPartitionKeys(), partObj.getValues()); if (shouldModifyPartitionLocation(dbObj, tableObj, partObj, partSpec)) {
destLocation = new Path(table.getSd().getLocation()); } else { Map<String, String> partSpec = Warehouse.makeSpecFromValues( table.getPartitionKeys(), partitionValues); try {
Warehouse.makeSpecFromValues(tableObj.getPartitionKeys(), privObject.getPartKeys()); Partition partObj = hive.getPartition(tableObj, partSpec, false).getTPartition(); partValues = partObj.getValues();
Warehouse.makeSpecFromValues(tableObj.getPartitionKeys(), privObject.getPartKeys()); Partition partObj = hive.getPartition(tableObj, partSpec, false).getTPartition(); partValues = partObj.getValues();
public static List<ObjectPair<Integer, byte[]>> toObjectPairs(Table table, List<Partition> partitions) { List<ObjectPair<Integer, byte[]>> pairs = new ArrayList<>(partitions.size()); for (Partition partition : partitions) { Map<String, String> partitionSpec = Warehouse.makeSpecFromValues(table.getPartitionKeys(), partition.getValues()); ExprNodeGenericFuncDesc partitionExpression; try { partitionExpression = new ExpressionBuilder(table, partitionSpec).build(); } catch (SemanticException e) { throw new RuntimeException("Unable to build expression", e); } ObjectPair<Integer, byte[]> serializedPartitionExpression = new ObjectPair<>(partitionSpec.size(), SerializationUtilities.serializeExpressionToKryo(partitionExpression)); pairs.add(serializedPartitionExpression); } return pairs; }
@Override public PartitionInfo createPartitionIfNotExists(final List<String> partitionValues) throws StreamingException { String partLocation = null; String partName = null; boolean exists = false; try { Map<String, String> partSpec = Warehouse.makeSpecFromValues(tableObject.getPartitionKeys(), partitionValues); AddPartitionDesc addPartitionDesc = new AddPartitionDesc(database, table, true); partName = Warehouse.makePartName(tableObject.getPartitionKeys(), partitionValues); partLocation = new Path(tableObject.getDataLocation(), Warehouse.makePartPath(partSpec)).toString(); addPartitionDesc.addPartition(partSpec, partLocation); Partition partition = Hive.convertAddSpecToMetaPartition(tableObject, addPartitionDesc.getPartition(0), conf); getMSC().add_partition(partition); } catch (AlreadyExistsException e) { exists = true; } catch (HiveException | TException e) { throw new StreamingException("Unable to creation partition for values: " + partitionValues + " connection: " + toConnectionInfoString(), e); } return new PartitionInfo(partName, partLocation, exists); }
Warehouse.makeSpecFromValues(tableObj.getPartitionKeys(), privObject.getPartKeys()); Partition partObj = hive.getPartition(tableObj, partSpec, false).getTPartition(); partValues = partObj.getValues();