/** * Given a partition specification, return the path corresponding to the * partition spec. By default, the specification does not include dynamic partitions. * @param spec * @return string representation of the partition specification. * @throws MetaException */ public static String makePartPath(Map<String, String> spec) throws MetaException { return makePartName(spec, true); }
public static String makePartName(List<FieldSchema> partCols, List<String> vals) throws MetaException { return makePartName(partCols, vals, null); }
@Override public boolean doesPartitionExist(String catName, String dbName, String tableName, List<FieldSchema> partKeys, List<String> partVals) throws MetaException { String name = Warehouse.makePartName(partKeys, partVals); return this.getMPartition(catName, dbName, tableName, name) != null; }
/** * Generates name for prefix partial partition specification. */ public String getName() throws HiveException { try { return Warehouse.makePartName(fields, values); } catch (MetaException e) { throw new HiveException("Unable to create partial name", e); } } }
/** * Generates name for prefix partial partition specification. */ public String getName() throws HiveException { try { return Warehouse.makePartName(fields, values); } catch (MetaException e) { throw new HiveException("Unable to create partial name", e); } } }
private void addPreviousPartitions(Table t, List<String> allPartNames, int currentBatchStart, List<Partition> currentBatch, int currentIxInBatch, List<String> cols, Map<String, List<String>> partsToAnalyze) throws MetaException { // Add all the names for previous batches. for (int i = 0; i < currentBatchStart; ++i) { LOG.trace("Adding previous {}, {}", allPartNames.get(i), cols); partsToAnalyze.put(allPartNames.get(i), cols); } // Current match may be out of order w.r.t. the global name list, so add specific parts. for (int i = 0; i < currentIxInBatch; ++i) { String name = Warehouse.makePartName(t.getPartitionKeys(), currentBatch.get(i).getValues()); LOG.trace("Adding previous {}, {}", name, cols); partsToAnalyze.put(name, cols); } }
public String getName() { try { return Warehouse.makePartName(table.getPartCols(), tPartition.getValues()); } catch (MetaException e) { throw new RuntimeException(e); } }
public static String getPartitionName(Table table, Partition partition) { try { return Warehouse.makePartName(getPartCols(table), partition.getValues()); } catch (MetaException e) { throw new RuntimeException(e); } }
public String getName() { try { return Warehouse.makePartName(table.getPartCols(), tPartition.getValues()); } catch (MetaException e) { throw new RuntimeException(e); } }
@SuppressWarnings("nls") @Override public String toString() { String pn = "Invalid Partition"; try { pn = Warehouse.makePartName(getSpec(), false); } catch (MetaException e) { // ignore as we most probably in an exception path already otherwise this // error wouldn't occur } return table.toString() + "(" + pn + ")"; }
private void addPartition(Partition p) throws AlreadyExistsException, MetaException { String partName = Warehouse.makePartName(tTable.getPartitionKeys(), p.getValues()); if(parts.putIfAbsent(partName, p) != null) { throw new AlreadyExistsException("Partition " + partName + " already exists"); } } /**
@SuppressWarnings("nls") @Override public String toString() { String pn = "Invalid Partition"; try { pn = Warehouse.makePartName(getSpec(), false); } catch (MetaException e) { // ignore as we most probably in an exception path already otherwise this // error wouldn't occur } return table.toString() + "(" + pn + ")"; }
@Override public List<String> partitions(ImportTableDesc tblDesc) throws SemanticException { List<String> partitions = new ArrayList<>(); try { for (Partition partition : metadata.getPartitions()) { String partName = Warehouse.makePartName(tblDesc.getPartCols(), partition.getValues()); partitions.add(partName); } } catch (MetaException e) { throw new SemanticException(e); } return partitions; }
/** * Creates path where partitions matching prefix should lie in filesystem * @param tbl table in which partition is * @return expected location of partitions matching prefix in filesystem */ public Path createPath(Table tbl) throws HiveException { String prefixSubdir; try { prefixSubdir = Warehouse.makePartName(fields, values); } catch (MetaException e) { throw new HiveException("Unable to get partitions directories prefix", e); } Path tableDir = tbl.getDataLocation(); if (tableDir == null) { throw new HiveException("Table has no location set"); } return new Path(tableDir, prefixSubdir); } /**
private void addTxnWriteNotificationLog(Table tableObj, Partition ptnObj, WriteNotificationLogRequest rqst) throws MetaException { String partition = ""; //Empty string is an invalid partition name. Can be used for non partitioned table. if (ptnObj != null) { partition = Warehouse.makePartName(tableObj.getPartitionKeys(), rqst.getPartitionVals()); } AcidWriteEvent event = new AcidWriteEvent(partition, tableObj, ptnObj, rqst); getTxnHandler().addWriteNotificationLog(event); if (listeners != null && !listeners.isEmpty()) { MetaStoreListenerNotifier.notifyEvent(listeners, EventType.ACID_WRITE, event); } }
private static void scheduleCompaction(Table t, Partition p, Hive db, CompactionMetaInfo compactionMetaInfo) throws HiveException, MetaException { String partName = p == null ? null : Warehouse.makePartName(t.getPartitionKeys(), p.getValues()); CompactionResponse resp = //this gives an easy way to get at compaction ID so we can only wait for those this //utility started db.compact2(t.getDbName(), t.getTableName(), partName, "major", null); if(!resp.isAccepted()) { LOG.info(Warehouse.getQualifiedName(t) + (p == null ? "" : "/" + partName) + " is already being compacted with id=" + resp.getId()); } else { LOG.info("Scheduled compaction for " + Warehouse.getQualifiedName(t) + (p == null ? "" : "/" + partName) + " with id=" + resp.getId()); } compactionMetaInfo.compactionIds.add(resp.getId()); }
private boolean getPartitionNamesPrunedByExprNoTxn(Table table, byte[] expr, String defaultPartName, short maxParts, List<String> result, SharedCache sharedCache) throws MetaException, NoSuchObjectException { List<Partition> parts = sharedCache.listCachedPartitions(StringUtils.normalizeIdentifier(table.getCatName()), StringUtils.normalizeIdentifier(table.getDbName()), StringUtils.normalizeIdentifier(table.getTableName()), maxParts); for (Partition part : parts) { result.add(Warehouse.makePartName(table.getPartitionKeys(), part.getValues())); } if (defaultPartName == null || defaultPartName.isEmpty()) { defaultPartName = MetastoreConf.getVar(getConf(), ConfVars.DEFAULTPARTITIONNAME); } return expressionProxy.filterPartitionsByExpr(table.getPartitionKeys(), expr, defaultPartName, result); }
private String getPartName(HiveObjectRef hiveObject) throws MetaException { String partName = null; List<String> partValue = hiveObject.getPartValues(); if (partValue != null && partValue.size() > 0) { try { String catName = hiveObject.isSetCatName() ? hiveObject.getCatName() : getDefaultCatalog(conf); Table table = get_table_core(catName, hiveObject.getDbName(), hiveObject .getObjectName()); partName = Warehouse .makePartName(table.getPartitionKeys(), partValue); } catch (NoSuchObjectException e) { throw new MetaException(e.getMessage()); } } return partName; }
@Override public void onAlterPartition(AlterPartitionEvent partitionEvent) throws MetaException { if (!TxnUtils.isTransactionalTable(partitionEvent.getTable())) { return; } Partition oldPart = partitionEvent.getOldPartition(); Partition newPart = partitionEvent.getNewPartition(); Table t = partitionEvent.getTable(); String oldPartName = Warehouse.makePartName(t.getPartitionKeys(), oldPart.getValues()); String newPartName = Warehouse.makePartName(t.getPartitionKeys(), newPart.getValues()); if(!oldPartName.equals(newPartName)) { txnHandler = getTxnHandler(); txnHandler.onRename(t.getCatName(), t.getDbName(), t.getTableName(), oldPartName, t.getCatName(), t.getDbName(), t.getTableName(), newPartName); } } @Override
@Test public void testExchangePartitions() throws Exception { Map<String, String> partitionSpecs = getPartitionSpec(partitions[1]); List<Partition> exchangedPartitions = client.exchange_partitions(partitionSpecs, sourceTable.getDbName(), sourceTable.getTableName(), destTable.getDbName(), destTable.getTableName()); Assert.assertEquals(1, exchangedPartitions.size()); String partitionName = Warehouse.makePartName(sourceTable.getPartitionKeys(), partitions[1].getValues()); String exchangedPartitionName = Warehouse.makePartName(sourceTable.getPartitionKeys(), exchangedPartitions.get(0).getValues()); Assert.assertEquals(partitionName, exchangedPartitionName); checkExchangedPartitions(sourceTable, destTable, Lists.newArrayList(partitions[1])); checkRemainingPartitions(sourceTable, destTable, Lists.newArrayList(partitions[0], partitions[2], partitions[3], partitions[4])); }