public void addPartition(Map<String, String> partSpec, String location) { addPartition(partSpec, location, null); }
public void addPartition(Map<String, String> partSpec, String location) { addPartition(partSpec, location, null); }
/** * Legacy single-partition ctor for ImportSemanticAnalyzer * @param dbName * database to add to. * @param tableName * table to add to. * @param partSpec * partition specification. * @param location * partition location, relative to table location. * @param params * partition parameters. */ @Deprecated public AddPartitionDesc(String dbName, String tableName, Map<String, String> partSpec, String location, Map<String, String> params) { super(); this.dbName = dbName; this.tableName = tableName; this.ifNotExists = true; addPartition(partSpec, location, params); }
/** * Legacy single-partition ctor for ImportSemanticAnalyzer * @param dbName * database to add to. * @param tableName * table to add to. * @param partSpec * partition specification. * @param location * partition location, relative to table location. * @param params * partition parameters. */ @Deprecated public AddPartitionDesc(String dbName, String tableName, Map<String, String> partSpec, String location, Map<String, String> params) { super(); this.dbName = dbName; this.tableName = tableName; this.ifNotExists = true; addPartition(partSpec, location, params); }
private static void createPartitionIfNotExists(HiveEndPoint ep, IMetaStoreClient msClient, HiveConf conf) throws PartitionCreationFailed { if (ep.partitionVals.isEmpty()) { return; } try { org.apache.hadoop.hive.ql.metadata.Table tableObject = new org.apache.hadoop.hive.ql.metadata.Table(msClient.getTable(ep.database, ep.table)); Map<String, String> partSpec = Warehouse.makeSpecFromValues(tableObject.getPartitionKeys(), ep.partitionVals); AddPartitionDesc addPartitionDesc = new AddPartitionDesc(ep.database, ep.table, true); String partLocation = new Path(tableObject.getDataLocation(), Warehouse.makePartPath(partSpec)).toString(); addPartitionDesc.addPartition(partSpec, partLocation); Partition partition = Hive.convertAddSpecToMetaPartition(tableObject, addPartitionDesc.getPartition(0), conf); msClient.add_partition(partition); } catch (AlreadyExistsException e) { //ignore this - multiple clients may be trying to create the same partition //AddPartitionDesc has ifExists flag but it's not propagated to // HMSHnalder.add_partitions_core() and so it throws... } catch(HiveException|TException e) { LOG.error("Failed to create partition : " + ep, e); throw new PartitionCreationFailed(ep, e); } }
@Override public PartitionInfo createPartitionIfNotExists(final List<String> partitionValues) throws StreamingException { String partLocation = null; String partName = null; boolean exists = false; try { Map<String, String> partSpec = Warehouse.makeSpecFromValues(tableObject.getPartitionKeys(), partitionValues); AddPartitionDesc addPartitionDesc = new AddPartitionDesc(database, table, true); partName = Warehouse.makePartName(tableObject.getPartitionKeys(), partitionValues); partLocation = new Path(tableObject.getDataLocation(), Warehouse.makePartPath(partSpec)).toString(); addPartitionDesc.addPartition(partSpec, partLocation); Partition partition = Hive.convertAddSpecToMetaPartition(tableObject, addPartitionDesc.getPartition(0), conf); if (getMSC() == null) { // We assume it doesn't exist if we can't check it // so the driver will decide return new PartitionInfo(partName, partLocation, false); } getMSC().add_partition(partition); if (LOG.isDebugEnabled()) { LOG.debug("Created partition {} for table {}", partName, tableObject.getFullyQualifiedName()); } } catch (AlreadyExistsException e) { exists = true; } catch (HiveException | TException e) { throw new StreamingException("Unable to creation partition for values: " + partitionValues + " connection: " + toConnectionInfoString(), e); } return new PartitionInfo(partName, partLocation, exists); }
for (CheckResult.PartitionResult part : partsNotInMs) { counter++; apd.addPartition(Warehouse.makeSpecFromName(part.getPartitionName()), null); repairOutput.add("Repair: Added partition to metastore " + msckDesc.getTableName() + ':' + part.getPartitionName()); apd.addPartition(Warehouse.makeSpecFromName(part.getPartitionName()), null); repairOutput.add("Repair: Added partition to metastore " + msckDesc.getTableName() + ':' + part.getPartitionName());
case HiveParser.TOK_PARTSPEC: if (currentPart != null) { addPartitionDesc.addPartition(currentPart, currentLocation); currentLocation = null; addPartitionDesc.addPartition(currentPart, currentLocation);
case HiveParser.TOK_PARTSPEC: if (currentPart != null) { addPartitionDesc.addPartition(currentPart, currentLocation); currentLocation = null; addPartitionDesc.addPartition(currentPart, currentLocation);
public void addPartition(Map<String, String> partSpec, String location) { addPartition(partSpec, location, null); }
/** * Legacy single-partition ctor for ImportSemanticAnalyzer * @param dbName * database to add to. * @param tableName * table to add to. * @param partSpec * partition specification. * @param location * partition location, relative to table location. * @param params * partition parameters. */ @Deprecated public AddPartitionDesc(String dbName, String tableName, Map<String, String> partSpec, String location, Map<String, String> params) { super(); this.dbName = dbName; this.tableName = tableName; this.ifNotExists = true; addPartition(partSpec, location, params); }
Map<String, String> partSpec = new HashMap<String, String>(); partSpec.put("dummy_partition_col", "dummy_val"); partitionDesc.addPartition(partSpec, partDir.toUri().toString()); Hive.get(conf).createPartitions(partitionDesc); log.info("{}: Added partition {}", tableName, partDir.toUri().toString());
Map<String, String> partSpec = new HashMap<String, String>(); partSpec.put(TEMP_TABLE_PART_COL, TEMP_TABLE_PART_VAL); partitionDesc.addPartition(partSpec, dataLocation); hiveClient.createPartitions(partitionDesc); log.info("Created partition in {} for data in {}", tableName, dataLocation);
case HiveParser.TOK_PARTSPEC: if (currentPart != null) { addPartitionDesc.addPartition(currentPart, currentLocation); currentLocation = null; addPartitionDesc.addPartition(currentPart, currentLocation);
@Override public PartitionInfo createPartitionIfNotExists(final List<String> partitionValues) throws StreamingException { String partLocation = null; String partName = null; boolean exists = false; try { Map<String, String> partSpec = Warehouse.makeSpecFromValues(tableObject.getPartitionKeys(), partitionValues); AddPartitionDesc addPartitionDesc = new AddPartitionDesc(database, table, true); partName = Warehouse.makePartName(tableObject.getPartitionKeys(), partitionValues); partLocation = new Path(tableObject.getDataLocation(), Warehouse.makePartPath(partSpec)).toString(); addPartitionDesc.addPartition(partSpec, partLocation); Partition partition = Hive.convertAddSpecToMetaPartition(tableObject, addPartitionDesc.getPartition(0), conf); getMSC().add_partition(partition); } catch (AlreadyExistsException e) { exists = true; } catch (HiveException | TException e) { throw new StreamingException("Unable to creation partition for values: " + partitionValues + " connection: " + toConnectionInfoString(), e); } return new PartitionInfo(partName, partLocation, exists); }
try { for (CheckResult.PartitionResult part : partsNotInMs) { apd.addPartition(Warehouse.makeSpecFromName(part.getPartitionName()), null); repairOutput.add("Repair: Added partition to metastore " + msckDesc.getTableName() + ':' + part.getPartitionName());
Map<String, String> partSpec = new HashMap<String, String>(); partSpec.put("dummy_partition_col", "dummy_val"); partitionDesc.addPartition(partSpec, partDir.toUri().toString()); Hive.get(conf).createPartitions(partitionDesc); log.info("{}: Added partition {}", tableName, partDir.toUri().toString());
addParts.addPartition(addPartitionDesc.getStoragePartSpec(), location); int curIndex = addParts.getPartitionCount() - 1; addParts.getPartition(curIndex).setPartParams(partParams); AddPartitionDesc.OnePartitionDesc latestPartWithFullTimestamp = addParts.getPartition( latestPartIndexForPartCols.get(nonTimeParts).get(latestPartCol)); addParts.addPartition( StorageConstants.getLatestPartSpec(latestPartWithFullTimestamp.getPartSpec(), latestPartCol), latestPartWithFullTimestamp.getLocation());
if (latest != null && latest.part != null) { AddPartitionDesc latestPart = new AddPartitionDesc(dbName, storageTableName, true); latestPart.addPartition(StorageConstants.getLatestPartSpec(latest.part.getSpec(), latestPartCol), latest.part.getLocation()); latestPart.getPartition(0).setPartParams(
if (latest != null && latest.part != null) { AddPartitionDesc latestPart = new AddPartitionDesc(dbName, storageTableName, true); latestPart.addPartition(StorageConstants.getLatestPartSpec(latest.part.getSpec(), latestPartCol), latest.part.getLocation()); latestPart.getPartition(0).setPartParams(