public List<FieldSchema> getResultSchema() { FieldSchema tmpFieldSchema = new FieldSchema(); List<FieldSchema> colList = new ArrayList<FieldSchema>(); tmpFieldSchema.setName(ExplainTask.EXPL_COLUMN_NAME); tmpFieldSchema.setType(STRING_TYPE_NAME); colList.add(tmpFieldSchema); return colList; }
public static List<FieldSchema> getResultSchema() { FieldSchema tmpFieldSchema = new FieldSchema(); List<FieldSchema> colList = new ArrayList<FieldSchema>(); tmpFieldSchema.setName(EXPL_COLUMN_NAME); tmpFieldSchema.setType(STRING_TYPE_NAME); colList.add(tmpFieldSchema); return colList; }
public List<FieldSchema> getResultSchema() { FieldSchema tmpFieldSchema = new FieldSchema(); List<FieldSchema> colList = new ArrayList<FieldSchema>(); tmpFieldSchema.setName(ExplainTask.EXPL_COLUMN_NAME); tmpFieldSchema.setType(STRING_TYPE_NAME); colList.add(tmpFieldSchema); return colList; } }
public List<FieldSchema> getResultSchema() { FieldSchema tmpFieldSchema = new FieldSchema(); List<FieldSchema> colList = new ArrayList<FieldSchema>(); tmpFieldSchema.setName(EXPL_COLUMN_NAME); tmpFieldSchema.setType(STRING_TYPE_NAME); colList.add(tmpFieldSchema); return colList; } }
@Override public void apply(List<FieldSchema> t, Object[] fields) { FieldSchema fieldSchema = new FieldSchema(); if (finalCounter > 0) { fieldSchema.setName((String) fields[1]); } if (finalCounter > 1) { fieldSchema.setType(extractSqlClob(fields[2])); } if (finalCounter > 2) { fieldSchema.setComment((String) fields[3]); } t.add(fieldSchema); }}); }
private Schema getLogSchema() { Schema schema = new Schema(); FieldSchema fieldSchema = new FieldSchema(); fieldSchema.setName("operation_log"); fieldSchema.setType("string"); schema.addToFieldSchemas(fieldSchema); return schema; }
@Override public void renameColumn(String databaseName, String tableName, String oldColumnName, String newColumnName) { Optional<org.apache.hadoop.hive.metastore.api.Table> source = delegate.getTable(databaseName, tableName); if (!source.isPresent()) { throw new TableNotFoundException(new SchemaTableName(databaseName, tableName)); } org.apache.hadoop.hive.metastore.api.Table table = source.get(); for (FieldSchema fieldSchema : table.getPartitionKeys()) { if (fieldSchema.getName().equals(oldColumnName)) { throw new PrestoException(NOT_SUPPORTED, "Renaming partition columns is not supported"); } } for (FieldSchema fieldSchema : table.getSd().getCols()) { if (fieldSchema.getName().equals(oldColumnName)) { fieldSchema.setName(newColumnName); } } alterTable(databaseName, tableName, table); }
private Schema getSchema() { Schema sch = new Schema(); FieldSchema tmpFieldSchema = new FieldSchema(); tmpFieldSchema.setName(SET_COLUMN_NAME); tmpFieldSchema.setType(STRING_TYPE_NAME); sch.putToProperties(SERIALIZATION_NULL_FORMAT, defaultNullString); sch.addToFieldSchemas(tmpFieldSchema); return sch; }
private Schema getSchema() { Schema sch = new Schema(); FieldSchema tmpFieldSchema = new FieldSchema(); tmpFieldSchema.setName(SET_COLUMN_NAME); tmpFieldSchema.setType(STRING_TYPE_NAME); sch.putToProperties(SERIALIZATION_NULL_FORMAT, defaultNullString); sch.addToFieldSchemas(tmpFieldSchema); return sch; }
private Table newTable(boolean isPartitioned) { Table t = new Table("default", "table" + Integer.toString(nextInput++)); if (isPartitioned) { FieldSchema fs = new FieldSchema(); fs.setName("version"); fs.setType("String"); List<FieldSchema> partCols = new ArrayList<FieldSchema>(1); partCols.add(fs); t.setPartCols(partCols); } return t; } }
private Table newTable(boolean isPartitioned) { Table t = new Table("default", "table" + Integer.toString(nextInput++)); if (isPartitioned) { FieldSchema fs = new FieldSchema(); fs.setName("version"); fs.setType("String"); List<FieldSchema> partCols = new ArrayList<FieldSchema>(1); partCols.add(fs); t.setPartCols(partCols); } Map<String, String> tblProps = t.getParameters(); if(tblProps == null) { tblProps = new HashMap<>(); } tblProps.put(hive_metastoreConstants.TABLE_IS_TRANSACTIONAL, "true"); t.setParameters(tblProps); return t; }
public void setFieldValue(_Fields field, Object value) { switch (field) { case NAME: if (value == null) { unsetName(); } else { setName((String)value); } break; case TYPE: if (value == null) { unsetType(); } else { setType((String)value); } break; case COMMENT: if (value == null) { unsetComment(); } else { setComment((String)value); } break; } }
@Test(expected = MetaException.class) public void testAddPartitionsNullColNameInSd() throws Exception { createTable(); Partition partition = buildPartition(DB_NAME, TABLE_NAME, DEFAULT_YEAR_VALUE); partition.getSd().getCols().get(0).setName(null); client.add_partitions(Lists.newArrayList(partition)); }
for (String partCol : partCols) { FieldSchema part = new FieldSchema(); part.setName(partCol);
@Test(expected = InvalidOperationException.class) public void testAlterTableInvalidStorageDescriptorAlterPartitionColumnName() throws Exception { Table originalTable = partitionedTable; Table newTable = originalTable.deepCopy(); newTable.getPartitionKeys().get(0).setName("altered_name"); client.alter_table(originalTable.getDbName(), originalTable.getTableName(), newTable); }
@Test(expected = MetaException.class) public void testAddPartitionNullColNameInSd() throws Exception { createTable(); Partition partition = buildPartition(DB_NAME, TABLE_NAME, DEFAULT_YEAR_VALUE); partition.getSd().getCols().get(0).setName(null); client.add_partition(partition); }
newCol.setName(unescapeIdentifier(name));
@Test public void testAlterTableChangeCols() throws Exception { Table originalTable = partitionedTable; Table newTable = originalTable.deepCopy(); List<FieldSchema> cols = newTable.getSd().getCols(); // Change a column cols.get(0).setName("modified_col"); // Remove a column cols.remove(1); // Add a new column cols.add(new FieldSchema("new_col", "int", null)); // Store the changes client.alter_table(originalTable.getDbName(), originalTable.getTableName(), newTable); Table alteredTable = client.getTable(newTable.getDbName(), newTable.getTableName()); Assert.assertTrue("Original table directory should be kept", metaStore.isPathExists(new Path(originalTable.getSd().getLocation()))); // The following data might be changed alteredTable.setParameters(newTable.getParameters()); Assert.assertEquals("The table data should be the same", newTable, alteredTable); // Modify partition column type, and comment newTable.getPartitionKeys().get(0).setType("string"); newTable.getPartitionKeys().get(0).setComment("changed comment"); client.alter_table(originalTable.getDbName(), originalTable.getTableName(), newTable); alteredTable = client.getTable(newTable.getDbName(), newTable.getTableName()); // The following data might be changed alteredTable.setParameters(newTable.getParameters()); Assert.assertEquals("The table data should be the same", newTable, alteredTable); }
private TableWrapper createTableWrapper(String catName, String dbName, String tblName, Table tbl) { TableWrapper wrapper; Table tblCopy = tbl.deepCopy(); tblCopy.setCatName(normalizeIdentifier(catName)); tblCopy.setDbName(normalizeIdentifier(dbName)); tblCopy.setTableName(normalizeIdentifier(tblName)); if (tblCopy.getPartitionKeys() != null) { for (FieldSchema fs : tblCopy.getPartitionKeys()) { fs.setName(normalizeIdentifier(fs.getName())); } } if (tbl.getSd() != null) { byte[] sdHash = MetaStoreServerUtils.hashStorageDescriptor(tbl.getSd(), md); StorageDescriptor sd = tbl.getSd(); increSd(sd, sdHash); tblCopy.setSd(null); wrapper = new TableWrapper(tblCopy, sdHash, sd.getLocation(), sd.getParameters()); } else { wrapper = new TableWrapper(tblCopy, null, null, null); } return wrapper; }
private void updateTableObj(Table newTable, SharedCache sharedCache) { byte[] sdHash = getSdHash(); // Remove old table object's sd hash if (sdHash != null) { sharedCache.decrSd(sdHash); } Table tblCopy = newTable.deepCopy(); if (tblCopy.getPartitionKeys() != null) { for (FieldSchema fs : tblCopy.getPartitionKeys()) { fs.setName(StringUtils.normalizeIdentifier(fs.getName())); } } setTable(tblCopy); if (tblCopy.getSd() != null) { sdHash = MetaStoreServerUtils.hashStorageDescriptor(tblCopy.getSd(), md); StorageDescriptor sd = tblCopy.getSd(); sharedCache.increSd(sd, sdHash); tblCopy.setSd(null); setSdHash(sdHash); setLocation(sd.getLocation()); setParameters(sd.getParameters()); } else { setSdHash(null); setLocation(null); setParameters(null); } }