TempTable(org.apache.hadoop.hive.metastore.api.Table t) { assert t != null; this.tTable = t; pTree = t.getPartitionKeysSize() > 0 ? new PartitionTree(tTable) : null; } private void addPartition(Partition p) throws AlreadyExistsException, MetaException {
private String getPartitionStr(Table tbl, Map<String,String> partName) throws InvalidPartitionException{ if(tbl.getPartitionKeysSize() != partName.size()){ throw new InvalidPartitionException("Number of partition columns in table: "+ tbl.getPartitionKeysSize() + " doesn't match with number of supplied partition values: "+partName.size()); } final List<String> storedVals = new ArrayList<>(tbl.getPartitionKeysSize()); for(FieldSchema partKey : tbl.getPartitionKeys()){ String partVal = partName.get(partKey.getName()); if(null == partVal) { throw new InvalidPartitionException("No value found for partition column: "+partKey.getName()); } storedVals.add(partVal); } return join(storedVals,','); }
public static Map<String, String> getPartitionKeyValues(Table table, Partition partition) { Map<String, String> partitionKeys = new LinkedHashMap<>(); for (int i = 0; i < table.getPartitionKeysSize(); ++i) { partitionKeys.put(table.getPartitionKeys().get(i).getName(), partition.getValues().get(i)); } return partitionKeys; }
private String buildPartColStr(Table table) { String partColStr = ""; for (int i = 0; i < table.getPartitionKeysSize(); ++i) { if (i != 0) { partColStr += ","; } partColStr += table.getPartitionKeys().get(i).getName(); } return partColStr; }
private static String getCompactionCommand(Table t, Partition p) { StringBuilder sb = new StringBuilder("ALTER TABLE ").append(Warehouse.getQualifiedName(t)); if(t.getPartitionKeysSize() > 0) { assert p != null : "must supply partition for partitioned table " + Warehouse.getQualifiedName(t); sb.append(" PARTITION("); for (int i = 0; i < t.getPartitionKeysSize(); i++) { sb.append(t.getPartitionKeys().get(i).getName()).append('=').append( genPartValueString(t.getPartitionKeys().get(i).getType(), p.getValues().get(i))). append(","); } sb.setCharAt(sb.length() - 1, ')');//replace trailing ',' } return sb.append(" COMPACT 'major'").toString(); }
private void assertPartitioned() throws MetaException { if(tTable.getPartitionKeysSize() <= 0) { throw new MetaException(Warehouse.getQualifiedName(tTable) + " is not partitioned"); } }
private int getSerDeOverheadFactor() { final int projectedColumnCount; if (Utilities.isStarQuery(columns)) { Table hiveTable = hiveReadEntry.getTable(); projectedColumnCount = hiveTable.getSd().getColsSize() + hiveTable.getPartitionKeysSize(); } else { // In cost estimation, # of project columns should be >= 1, even for skipAll query. projectedColumnCount = Math.max(columns.size(), 1); } return projectedColumnCount * HIVE_SERDE_SCAN_OVERHEAD_FACTOR_PER_COLUMN; } }
private static void createTempTable(org.apache.hadoop.hive.metastore.api.Table t) { if(t.getPartitionKeysSize() <= 0) { //do nothing as it's not a partitioned table return; } String qualifiedTableName = Warehouse. getQualifiedName(t.getDbName().toLowerCase(), t.getTableName().toLowerCase()); SessionState ss = SessionState.get(); if (ss == null) { LOG.warn("No current SessionState, skipping temp partitions for " + qualifiedTableName); return; } TempTable tt = new TempTable(t); if(ss.getTempPartitions().putIfAbsent(qualifiedTableName, tt) != null) { throw new IllegalStateException("TempTable for " + qualifiedTableName + " already exists"); } } }
private List<Path> getLocationsForTruncate(final RawStore ms, final String catName, final String dbName, final String tableName, final Table table, final List<String> partNames) throws Exception { List<Path> locations = new ArrayList<>(); if (partNames == null) { if (0 != table.getPartitionKeysSize()) { for (Partition partition : ms.getPartitions(catName, dbName, tableName, Integer.MAX_VALUE)) { locations.add(new Path(partition.getSd().getLocation())); } } else { locations.add(new Path(table.getSd().getLocation())); } } else { for (Partition partition : ms.getPartitionsByNames(catName, dbName, tableName, partNames)) { locations.add(new Path(partition.getSd().getLocation())); } } return locations; }
if (!updateStats || newDir || tbl.getPartitionKeysSize() != 0) { return;
if (table.getPartitionKeysSize() == 0) { Map<String, String> params = table.getParameters(); List<String> colsToUpdate = null;
if (!customDynamicLocationUsed) { src = new Path(getPartitionRootLocation(jobInfo.getLocation(), jobInfo.getTableInfo().getTable() .getPartitionKeysSize())); } else { src = new Path(getCustomPartitionRootLocation(jobInfo, jobContext.getConfiguration()));
private static Map<String, String> getPtnDesc(Table t, Partition p) { assertEquals(t.getPartitionKeysSize(),p.getValuesSize()); Map<String,String> retval = new HashMap<String,String>(); Iterator<String> pval = p.getValuesIterator(); for (FieldSchema fs : t.getPartitionKeys()){ retval.put(fs.getName(),pval.next()); } return retval; }
return Collections.emptyList(); if(t.getPartitionKeysSize() <= 0) {
@Before public void before() throws Throwable { tableWorkingPath = temporaryFolder.newFolder().getAbsolutePath(); segmentsTable = derbyConnectorRule.metadataTablesConfigSupplier().get().getSegmentsTable(); Map<String, String> params = new HashMap<>(); params.put("external.table.purge", "TRUE"); Mockito.when(tableMock.getParameters()).thenReturn(params); Mockito.when(tableMock.getPartitionKeysSize()).thenReturn(0); StorageDescriptor storageDes = Mockito.mock(StorageDescriptor.class); Mockito.when(storageDes.getBucketColsSize()).thenReturn(0); Mockito.when(tableMock.getSd()).thenReturn(storageDes); Mockito.when(tableMock.getDbName()).thenReturn(DB_NAME); Mockito.when(tableMock.getTableName()).thenReturn(TABLE_NAME); config = new Configuration(); config.set(String.valueOf(HiveConf.ConfVars.HIVEQUERYID), "hive-" + UUID.randomUUID().toString()); config.set(String.valueOf(HiveConf.ConfVars.DRUID_WORKING_DIR), tableWorkingPath); config.set(String.valueOf(HiveConf.ConfVars.DRUID_SEGMENT_DIRECTORY), new Path(tableWorkingPath, "finalSegmentDir").toString()); config.set("hive.druid.maxTries", "0"); druidStorageHandler = new DruidStorageHandler(derbyConnectorRule.getConnector(), derbyConnectorRule.metadataTablesConfigSupplier().get()); druidStorageHandler.setConf(config); }
throw new MetaException("LOCATION may not be specified for Druid"); if (table.getPartitionKeysSize() != 0) { throw new MetaException("PARTITIONED BY may not be specified for Druid");
Partition nativePartition = HiveMetaStoreUtils.getPartition(partition); Preconditions.checkArgument(table.getPartitionKeysSize() == nativePartition.getValues().size(), String.format("Partition key size is %s but partition value size is %s", table.getPartitionKeys().size(), nativePartition.getValues().size()));
String validWriteIds, long writeId) throws Exception { if (partNames == null) { if (0 != table.getPartitionKeysSize()) { for (Partition partition : ms.getPartitions(catName, dbName, tableName, Integer.MAX_VALUE)) { alterPartitionForTruncate(ms, catName, dbName, tableName, table, partition,
StatsSetupConst.setBasicStatsState(tbl.getParameters(), StatsSetupConst.FALSE); LOG.info("Removed COLUMN_STATS_ACCURATE from Table's parameters."); } else if (isTxn && tbl.getPartitionKeysSize() == 0) { if (isCurrentStatsValidForTheQuery(mtable, writeIdList, false)) { tbl.setIsStatsCompliant(true);
partInfo.getTableName()); if (tbl.getPartitionKeysSize() == 0) { throw new HCatException("The table " + partInfo.getTableName() + " is not partitioned.");