Refine search
/** * get all the partitions of the table that matches the given partial * specification. partition columns whose value is can be anything should be * an empty string. * * @param tbl * object for which partition is needed. Must be partitioned. * @param partialPartSpec * partial partition specification (some subpartitions can be empty). * @return list of partition objects * @throws HiveException */ public List<Partition> getPartitionsByNames(Table tbl, Map<String, String> partialPartSpec) throws HiveException { if (!tbl.isPartitioned()) { throw new HiveException(ErrorMsg.TABLE_NOT_PARTITIONED, tbl.getTableName()); } List<String> names = getPartitionNames(tbl.getDbName(), tbl.getTableName(), partialPartSpec, (short)-1); List<Partition> partitions = getPartitionsByNames(tbl, names); return partitions; }
/** * Get all the partitions; unlike {@link #getPartitions(Table)}, does not include auth. * @param tbl table for which partitions are needed * @return list of partition objects */ public Set<Partition> getAllPartitionsOf(Table tbl) throws HiveException { if (!tbl.isPartitioned()) { return Sets.newHashSet(new Partition(tbl)); } List<org.apache.hadoop.hive.metastore.api.Partition> tParts; try { tParts = getMSC().listPartitions(tbl.getDbName(), tbl.getTableName(), (short)-1); } catch (Exception e) { LOG.error(StringUtils.stringifyException(e)); throw new HiveException(e); } Set<Partition> parts = new LinkedHashSet<Partition>(tParts.size()); for (org.apache.hadoop.hive.metastore.api.Partition tpart : tParts) { parts.add(new Partition(tbl, tpart)); } return parts; }
/** * Get a list of Partitions by filter. * @param tbl The table containing the partitions. * @param filter A string represent partition predicates. * @return a list of partitions satisfying the partition predicates. * @throws HiveException * @throws MetaException * @throws NoSuchObjectException * @throws TException */ public List<Partition> getPartitionsByFilter(Table tbl, String filter) throws HiveException, MetaException, NoSuchObjectException, TException { if (!tbl.isPartitioned()) { throw new HiveException(ErrorMsg.TABLE_NOT_PARTITIONED, tbl.getTableName()); } List<org.apache.hadoop.hive.metastore.api.Partition> tParts = getMSC().listPartitionsByFilter( tbl.getDbName(), tbl.getTableName(), filter, (short)-1); return convertFromMetastore(tbl, tParts); }
/** * Get a number of Partitions by filter. * @param tbl The table containing the partitions. * @param filter A string represent partition predicates. * @return the number of partitions satisfying the partition predicates. * @throws HiveException * @throws MetaException * @throws NoSuchObjectException * @throws TException */ public int getNumPartitionsByFilter(Table tbl, String filter) throws HiveException, MetaException, NoSuchObjectException, TException { if (!tbl.isPartitioned()) { throw new HiveException("Partition spec should only be supplied for a " + "partitioned table"); } int numParts = getMSC().getNumPartitionsByFilter( tbl.getDbName(), tbl.getTableName(), filter); return numParts; }
/** * get all the partitions of the table that matches the given partial * specification. partition columns whose value is can be anything should be * an empty string. * * @param tbl * object for which partition is needed. Must be partitioned. * @param partialPartSpec * partial partition specification (some subpartitions can be empty). * @return list of partition objects * @throws HiveException */ public List<Partition> getPartitionsByNames(Table tbl, Map<String, String> partialPartSpec) throws HiveException { if (!tbl.isPartitioned()) { throw new HiveException(ErrorMsg.TABLE_NOT_PARTITIONED, tbl.getTableName()); } List<String> names = getPartitionNames(tbl.getDbName(), tbl.getTableName(), partialPartSpec, (short)-1); List<Partition> partitions = getPartitionsByNames(tbl, names); return partitions; }
if (tbl.isPartitioned() && Boolean.TRUE.equals(tableUsePartLevelAuth.get(tbl.getTableName()))) { String alias_id = topOpMap.getKey();
/** * get all the partitions that the table has * * @param tbl * object for which partition is needed * @return list of partition objects */ public List<Partition> getPartitions(Table tbl) throws HiveException { if (tbl.isPartitioned()) { List<org.apache.hadoop.hive.metastore.api.Partition> tParts; try { tParts = getMSC().listPartitionsWithAuthInfo(tbl.getDbName(), tbl.getTableName(), (short) -1, getUserName(), getGroupNames()); } catch (Exception e) { LOG.error(StringUtils.stringifyException(e)); throw new HiveException(e); } List<Partition> parts = new ArrayList<Partition>(tParts.size()); for (org.apache.hadoop.hive.metastore.api.Partition tpart : tParts) { parts.add(new Partition(tbl, tpart)); } return parts; } else { Partition part = new Partition(tbl); ArrayList<Partition> parts = new ArrayList<Partition>(1); parts.add(part); return parts; } }
/** * Get a list of Partitions by filter. * @param tbl The table containing the partitions. * @param filter A string represent partition predicates. * @return a list of partitions satisfying the partition predicates. * @throws HiveException * @throws MetaException * @throws NoSuchObjectException * @throws TException */ public List<Partition> getPartitionsByFilter(Table tbl, String filter) throws HiveException, MetaException, NoSuchObjectException, TException { if (!tbl.isPartitioned()) { throw new HiveException(ErrorMsg.TABLE_NOT_PARTITIONED, tbl.getTableName()); } List<org.apache.hadoop.hive.metastore.api.Partition> tParts = getMSC().listPartitionsByFilter( tbl.getDbName(), tbl.getTableName(), filter, (short)-1); return convertFromMetastore(tbl, tParts); }
if (isValuesTempTable(part.getTable().getTableName())) { continue; if (part.getTable().isPartitioned()) { newInput = new ReadEntity(part, parentViewInfo, isDirectRead); } else {
/** * Get a number of Partitions by filter. * @param tbl The table containing the partitions. * @param filter A string represent partition predicates. * @return the number of partitions satisfying the partition predicates. * @throws HiveException * @throws MetaException * @throws NoSuchObjectException * @throws TException */ public int getNumPartitionsByFilter(Table tbl, String filter) throws HiveException, MetaException, NoSuchObjectException, TException { if (!tbl.isPartitioned()) { throw new HiveException("Partition spec should only be supplied for a " + "partitioned table"); } int numParts = getMSC().getNumPartitionsByFilter( tbl.getDbName(), tbl.getTableName(), filter); return numParts; }
/** * Get all the partitions; unlike {@link #getPartitions(Table)}, does not include auth. * @param tbl table for which partitions are needed * @return list of partition objects */ public Set<Partition> getAllPartitionsOf(Table tbl) throws HiveException { if (!tbl.isPartitioned()) { return Sets.newHashSet(new Partition(tbl)); } List<org.apache.hadoop.hive.metastore.api.Partition> tParts; try { tParts = getMSC().listPartitions(tbl.getDbName(), tbl.getTableName(), (short)-1); } catch (Exception e) { LOG.error(StringUtils.stringifyException(e)); throw new HiveException(e); } Set<Partition> parts = new LinkedHashSet<Partition>(tParts.size()); for (org.apache.hadoop.hive.metastore.api.Partition tpart : tParts) { parts.add(new Partition(tbl, tpart)); } return parts; }
if (!baseTbl.isPartitioned()) { new PartitionDesc(desc, null), indexTbl.getTableName(), new PartitionDesc(Utilities.getTableDesc(baseTbl), null), baseTbl.getTableName(), indexTbl.getDbName()); indexBuilderTasks.add(indexBuilder); } else { new PartitionDesc(indexPart), indexTbl.getTableName(), new PartitionDesc(basePart), baseTbl.getTableName(), indexTbl.getDbName()); indexBuilderTasks.add(indexBuilder);
/** * get all the partitions that the table has * * @param tbl * object for which partition is needed * @return list of partition objects * @throws HiveException */ public List<Partition> getPartitions(Table tbl) throws HiveException { if (tbl.isPartitioned()) { List<org.apache.hadoop.hive.metastore.api.Partition> tParts; try { tParts = getMSC().listPartitionsWithAuthInfo(tbl.getDbName(), tbl.getTableName(), (short) -1, getUserName(), getGroupNames()); } catch (Exception e) { LOG.error(StringUtils.stringifyException(e)); throw new HiveException(e); } List<Partition> parts = new ArrayList<Partition>(tParts.size()); for (org.apache.hadoop.hive.metastore.api.Partition tpart : tParts) { parts.add(new Partition(tbl, tpart)); } return parts; } else { Partition part = new Partition(tbl); ArrayList<Partition> parts = new ArrayList<Partition>(1); parts.add(part); return parts; } }
if (isValuesTempTable(part.getTable().getTableName())) { continue; if (part.getTable().isPartitioned()) { newInput = new ReadEntity(part, parentViewInfo, isDirectRead); } else {
throws HiveException { if (!tbl.isPartitioned()) { throw new HiveException(ErrorMsg.TABLE_NOT_PARTITIONED, tbl.getTableName()); for (int i = 0; i < nBatches; ++i) { List<org.apache.hadoop.hive.metastore.api.Partition> tParts = getMSC().getPartitionsByNames(tbl.getDbName(), tbl.getTableName(), partNames.subList(i*batchSize, (i+1)*batchSize), getColStats); if (tParts != null) { getMSC().getPartitionsByNames(tbl.getDbName(), tbl.getTableName(), partNames.subList(nBatches*batchSize, nParts), getColStats); if (tParts != null) {
throws HiveException { if (!tbl.isPartitioned()) { throw new HiveException(ErrorMsg.TABLE_NOT_PARTITIONED, tbl.getTableName()); for (int i = 0; i < nBatches; ++i) { List<org.apache.hadoop.hive.metastore.api.Partition> tParts = getMSC().getPartitionsByNames(tbl.getDbName(), tbl.getTableName(), partNames.subList(i*batchSize, (i+1)*batchSize)); if (tParts != null) { getMSC().getPartitionsByNames(tbl.getDbName(), tbl.getTableName(), partNames.subList(nBatches*batchSize, nParts)); if (tParts != null) {
private Long getRowCnt( ParseContext pCtx, TableScanOperator tsOp, Table tbl) throws HiveException { Long rowCnt = 0L; if (tbl.isPartitioned()) { for (Partition part : pctx.getPrunedPartitions( tsOp.getConf().getAlias(), tsOp).getPartitions()) { Logger.debug("Table doesn't have up to date stats " + tbl.getTableName()); rowCnt = null;
private Long getRowCnt( ParseContext pCtx, TableScanOperator tsOp, Table tbl) throws HiveException { Long rowCnt = 0L; if (tbl.isPartitioned()) { for (Partition part : pctx.getPrunedPartitions( tsOp.getConf().getAlias(), tsOp).getPartitions()) { Logger.debug("Table doesn't have up to date stats " + tbl.getTableName()); rowCnt = null;
private void analyzeCacheMetadata(ASTNode ast) throws SemanticException { Table tbl = AnalyzeCommandUtils.getTable(ast, this); Map<String,String> partSpec = null; CacheMetadataDesc desc; // In 2 cases out of 3, we could pass the path and type directly to metastore... if (AnalyzeCommandUtils.isPartitionLevelStats(ast)) { partSpec = AnalyzeCommandUtils.getPartKeyValuePairsFromAST(tbl, ast, conf); Partition part = getPartition(tbl, partSpec, true); desc = new CacheMetadataDesc(tbl.getDbName(), tbl.getTableName(), part.getName()); inputs.add(new ReadEntity(part)); } else { // Should we get all partitions for a partitioned table? desc = new CacheMetadataDesc(tbl.getDbName(), tbl.getTableName(), tbl.isPartitioned()); inputs.add(new ReadEntity(tbl)); } rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), desc))); }
short limit) throws HiveException { if (!tbl.isPartitioned()) { throw new HiveException(ErrorMsg.TABLE_NOT_PARTITIONED, tbl.getTableName()); partitions = getMSC().listPartitionsWithAuthInfo(tbl.getDbName(), tbl.getTableName(), partialPvals, limit, getUserName(), getGroupNames()); } catch (Exception e) {