static boolean isPartitionedTable(Table tableObj) { List<FieldSchema> partKeys = tableObj.getPartitionKeys(); if (partKeys != null && partKeys.size() > 0) { return true; } return false; }
private String buildPartColStr(Table table) { String partColStr = ""; for (int i = 0; i < table.getPartitionKeysSize(); ++i) { if (i != 0) { partColStr += ","; } partColStr += table.getPartitionKeys().get(i).getName(); } return partColStr; }
public List<FieldSchema> getPartCols() { List<FieldSchema> partKeys = tTable.getPartitionKeys(); if (partKeys == null) { partKeys = new ArrayList<FieldSchema>(); tTable.setPartitionKeys(partKeys); } return partKeys; }
public static Map<String, String> getPartitionKeyValues(Table table, Partition partition) { Map<String, String> partitionKeys = new LinkedHashMap<>(); for (int i = 0; i < table.getPartitionKeysSize(); ++i) { partitionKeys.put(table.getPartitionKeys().get(i).getName(), partition.getValues().get(i)); } return partitionKeys; }
public static List<FieldSchema> getPartCols(Table table) { List<FieldSchema> partKeys = table.getPartitionKeys(); if (partKeys == null) { partKeys = new ArrayList<>(); table.setPartitionKeys(partKeys); } return partKeys; }
private static Map<String, String> getPtnDesc(Table t, Partition p) { assertEquals(t.getPartitionKeysSize(),p.getValuesSize()); Map<String,String> retval = new HashMap<String,String>(); Iterator<String> pval = p.getValuesIterator(); for (FieldSchema fs : t.getPartitionKeys()){ retval.put(fs.getName(),pval.next()); } return retval; }
private Partition getPartitionObj(String db, String table, List<String> partitionVals, Table tableObj) throws MetaException, NoSuchObjectException { if (tableObj.isSetPartitionKeys() && !tableObj.getPartitionKeys().isEmpty()) { return get_partition(db, table, partitionVals); } return null; }
public ExpressionBuilder(Table table, Map<String, String> partSpecs) { this.partSpecs = partSpecs; for (FieldSchema partField : table.getPartitionKeys()) { partColumnTypesMap.put(partField.getName().toLowerCase(), TypeInfoFactory.getPrimitiveTypeInfo(partField.getType())); } }
private void addPartition(Partition p) throws AlreadyExistsException, MetaException { String partName = Warehouse.makePartName(tTable.getPartitionKeys(), p.getValues()); if(parts.putIfAbsent(partName, p) != null) { throw new AlreadyExistsException("Partition " + partName + " already exists"); } } /**
private PartitionHelper newWarehousePartitionHelper() throws MetaException, WorkerException { String location = table.getTable().getSd().getLocation(); Path tablePath = new Path(location); List<FieldSchema> partitionFields = table.getTable().getPartitionKeys(); List<String> partitionColumns = new ArrayList<>(partitionFields.size()); for (FieldSchema field : partitionFields) { partitionColumns.add(field.getName()); } return new WarehousePartitionHelper(configuration, tablePath, partitionColumns); }
public static Table fromMetastoreApiTable(org.apache.hadoop.hive.metastore.api.Table table, List<FieldSchema> schema) { StorageDescriptor storageDescriptor = table.getSd(); if (storageDescriptor == null) { throw new PrestoException(HIVE_INVALID_METADATA, "Table is missing storage descriptor"); } Table.Builder tableBuilder = Table.builder() .setDatabaseName(table.getDbName()) .setTableName(table.getTableName()) .setOwner(nullToEmpty(table.getOwner())) .setTableType(table.getTableType()) .setDataColumns(schema.stream() .map(ThriftMetastoreUtil::fromMetastoreApiFieldSchema) .collect(toList())) .setPartitionColumns(table.getPartitionKeys().stream() .map(ThriftMetastoreUtil::fromMetastoreApiFieldSchema) .collect(toList())) .setParameters(table.getParameters() == null ? ImmutableMap.of() : table.getParameters()) .setViewOriginalText(Optional.ofNullable(emptyToNull(table.getViewOriginalText()))) .setViewExpandedText(Optional.ofNullable(emptyToNull(table.getViewExpandedText()))); fromMetastoreApiStorageDescriptor(storageDescriptor, tableBuilder.getStorageBuilder(), table.getTableName()); return tableBuilder.build(); }
@Override public void renameColumn(String databaseName, String tableName, String oldColumnName, String newColumnName) { Optional<org.apache.hadoop.hive.metastore.api.Table> source = delegate.getTable(databaseName, tableName); if (!source.isPresent()) { throw new TableNotFoundException(new SchemaTableName(databaseName, tableName)); } org.apache.hadoop.hive.metastore.api.Table table = source.get(); for (FieldSchema fieldSchema : table.getPartitionKeys()) { if (fieldSchema.getName().equals(oldColumnName)) { throw new PrestoException(NOT_SUPPORTED, "Renaming partition columns is not supported"); } } for (FieldSchema fieldSchema : table.getSd().getCols()) { if (fieldSchema.getName().equals(oldColumnName)) { fieldSchema.setName(newColumnName); } } alterTable(databaseName, tableName, table); }
private void addTxnWriteNotificationLog(Table tableObj, Partition ptnObj, WriteNotificationLogRequest rqst) throws MetaException { String partition = ""; //Empty string is an invalid partition name. Can be used for non partitioned table. if (ptnObj != null) { partition = Warehouse.makePartName(tableObj.getPartitionKeys(), rqst.getPartitionVals()); } AcidWriteEvent event = new AcidWriteEvent(partition, tableObj, ptnObj, rqst); getTxnHandler().addWriteNotificationLog(event); if (listeners != null && !listeners.isEmpty()) { MetaStoreListenerNotifier.notifyEvent(listeners, EventType.ACID_WRITE, event); } }
public static Properties getSchema( org.apache.hadoop.hive.metastore.api.Partition part, org.apache.hadoop.hive.metastore.api.Table table) { return MetaStoreUtils.getSchema(part.getSd(), table.getSd(), table .getParameters(), table.getDbName(), table.getTableName(), table.getPartitionKeys()); }
public static Properties getTableMetadata( org.apache.hadoop.hive.metastore.api.Table table) { return MetaStoreUtils.getSchema(table.getSd(), table.getSd(), table .getParameters(), table.getDbName(), table.getTableName(), table.getPartitionKeys()); }
public static Properties getPartitionMetadata( org.apache.hadoop.hive.metastore.api.Partition partition, org.apache.hadoop.hive.metastore.api.Table table) { return MetaStoreUtils .getSchema(partition.getSd(), partition.getSd(), partition .getParameters(), table.getDbName(), table.getTableName(), table.getPartitionKeys()); }
@Test(expected = InvalidOperationException.class) public void testAlterTableInvalidStorageDescriptorRemovePartitionColumn() throws Exception { Table originalTable = partitionedTable; Table newTable = originalTable.deepCopy(); newTable.getPartitionKeys().remove(0); client.alter_table(originalTable.getDbName(), originalTable.getTableName(), newTable); }
@Test(expected = InvalidOperationException.class) public void testAlterTableInvalidStorageDescriptorAlterPartitionColumnName() throws Exception { Table originalTable = partitionedTable; Table newTable = originalTable.deepCopy(); newTable.getPartitionKeys().get(0).setName("altered_name"); client.alter_table(originalTable.getDbName(), originalTable.getTableName(), newTable); }
@Test public void testCreateTblWithLowerCasePartNames() throws Exception { driver.run("drop table junit_sem_analysis"); CommandProcessorResponse resp = driver.run("create table junit_sem_analysis (a int) partitioned by (B string) stored as TEXTFILE"); assertEquals(resp.getResponseCode(), 0); assertEquals(null, resp.getErrorMessage()); Table tbl = client.getTable(Warehouse.DEFAULT_DATABASE_NAME, TBL_NAME); assertEquals("Partition key name case problem", "b", tbl.getPartitionKeys().get(0).getName()); driver.run("drop table junit_sem_analysis"); }
private static void addPartition(IMetaStoreClient client, Table tbl , List<String> partValues) throws IOException, TException { Partition part = new Partition(); part.setDbName(tbl.getDbName()); part.setTableName(tbl.getTableName()); StorageDescriptor sd = new StorageDescriptor(tbl.getSd()); sd.setLocation(sd.getLocation() + Path.SEPARATOR + makePartPath(tbl.getPartitionKeys(), partValues)); part.setSd(sd); part.setValues(partValues); client.add_partition(part); }