public static List<FieldSchema> getResultSchema() { FieldSchema tmpFieldSchema = new FieldSchema(); List<FieldSchema> colList = new ArrayList<FieldSchema>(); tmpFieldSchema.setName(EXPL_COLUMN_NAME); tmpFieldSchema.setType(STRING_TYPE_NAME); colList.add(tmpFieldSchema); return colList; }
public List<FieldSchema> getResultSchema() { FieldSchema tmpFieldSchema = new FieldSchema(); List<FieldSchema> colList = new ArrayList<FieldSchema>(); tmpFieldSchema.setName(ExplainTask.EXPL_COLUMN_NAME); tmpFieldSchema.setType(STRING_TYPE_NAME); colList.add(tmpFieldSchema); return colList; }
public List<FieldSchema> getResultSchema() { FieldSchema tmpFieldSchema = new FieldSchema(); List<FieldSchema> colList = new ArrayList<FieldSchema>(); tmpFieldSchema.setName(EXPL_COLUMN_NAME); tmpFieldSchema.setType(STRING_TYPE_NAME); colList.add(tmpFieldSchema); return colList; } }
public List<FieldSchema> getResultSchema() { FieldSchema tmpFieldSchema = new FieldSchema(); List<FieldSchema> colList = new ArrayList<FieldSchema>(); tmpFieldSchema.setName(ExplainTask.EXPL_COLUMN_NAME); tmpFieldSchema.setType(STRING_TYPE_NAME); colList.add(tmpFieldSchema); return colList; } }
/** * Fix the type name of a column of type decimal w/o precision/scale specified. This makes * the describe table show "decimal(10,0)" instead of "decimal" even if the type stored * in metastore is "decimal", which is possible with previous hive. * * @param cols columns that to be fixed as such */ private static void fixDecimalColumnTypeName(List<FieldSchema> cols) { for (FieldSchema col : cols) { if (serdeConstants.DECIMAL_TYPE_NAME.equals(col.getType())) { col.setType(DecimalTypeInfo.getQualifiedName(HiveDecimal.USER_DEFAULT_PRECISION, HiveDecimal.USER_DEFAULT_SCALE)); } } }
public static List<FieldSchema> getFieldsFromDeserializerForMsStorage( Table tbl, Deserializer deserializer) throws SerDeException, MetaException { List<FieldSchema> schema = HiveMetaStoreUtils.getFieldsFromDeserializer( tbl.getTableName(), deserializer); for (FieldSchema field : schema) { field.setType(MetaStoreUtils.TYPE_FROM_DESERIALIZER); } return schema; }
public static List<FieldSchema> getFieldsFromDeserializerForMsStorage( Table tbl, Deserializer deserializer) throws SerDeException, MetaException { List<FieldSchema> schema = MetaStoreUtils.getFieldsFromDeserializer( tbl.getTableName(), deserializer); for (FieldSchema field : schema) { field.setType(MetaStoreUtils.TYPE_FROM_DESERIALIZER); } return schema; }
@Override public void apply(List<FieldSchema> t, Object[] fields) { FieldSchema fieldSchema = new FieldSchema(); if (finalCounter > 0) { fieldSchema.setName((String) fields[1]); } if (finalCounter > 1) { fieldSchema.setType(extractSqlClob(fields[2])); } if (finalCounter > 2) { fieldSchema.setComment((String) fields[3]); } t.add(fieldSchema); }}); }
private Schema getLogSchema() { Schema schema = new Schema(); FieldSchema fieldSchema = new FieldSchema(); fieldSchema.setName("operation_log"); fieldSchema.setType("string"); schema.addToFieldSchemas(fieldSchema); return schema; }
private Schema getSchema() { Schema sch = new Schema(); FieldSchema tmpFieldSchema = new FieldSchema(); tmpFieldSchema.setName(SET_COLUMN_NAME); tmpFieldSchema.setType(STRING_TYPE_NAME); sch.putToProperties(SERIALIZATION_NULL_FORMAT, defaultNullString); sch.addToFieldSchemas(tmpFieldSchema); return sch; }
private Table newTable(boolean isPartitioned) { Table t = new Table("default", "table" + Integer.toString(nextInput++)); if (isPartitioned) { FieldSchema fs = new FieldSchema(); fs.setName("version"); fs.setType("String"); List<FieldSchema> partCols = new ArrayList<FieldSchema>(1); partCols.add(fs); t.setPartCols(partCols); } return t; } }
private Schema getSchema() { Schema sch = new Schema(); FieldSchema tmpFieldSchema = new FieldSchema(); tmpFieldSchema.setName(SET_COLUMN_NAME); tmpFieldSchema.setType(STRING_TYPE_NAME); sch.putToProperties(SERIALIZATION_NULL_FORMAT, defaultNullString); sch.addToFieldSchemas(tmpFieldSchema); return sch; }
@Test(expected = MetaException.class) public void testAddPartitionsNullColTypeInSd() throws Exception { createTable(); Partition partition = buildPartition(DB_NAME, TABLE_NAME, DEFAULT_YEAR_VALUE); partition.getSd().getCols().get(0).setType(null); client.add_partitions(Lists.newArrayList(partition)); }
@Test(expected = MetaException.class) public void testCreateTableInvalidStorageDescriptorNullColumnType() throws Exception { Table table = getNewTable(); table.getSd().getCols().get(0).setType(null); client.createTable(table); }
@Test(expected = InvalidObjectException.class) public void testCreateTableInvalidStorageDescriptorInvalidColumnType() throws Exception { Table table = getNewTable(); table.getSd().getCols().get(0).setType("xyz"); client.createTable(table); }
@Test(expected = MetaException.class) public void testAddPartitionNullColTypeInSd() throws Exception { createTable(); Partition partition = buildPartition(DB_NAME, TABLE_NAME, DEFAULT_YEAR_VALUE); partition.getSd().getCols().get(0).setType(null); client.add_partition(partition); }
@Test(expected = MetaException.class) public void testAlterTableInvalidStorageDescriptorNullColumnType() throws Exception { Table originalTable = testTables[0]; Table newTable = originalTable.deepCopy(); newTable.getSd().getCols().get(0).setType(null); client.alter_table(originalTable.getDbName(), originalTable.getTableName(), newTable); }
@Test public void testAddPartitionsInvalidColTypeInSd() throws Exception { createTable(); Partition partition = buildPartition(DB_NAME, TABLE_NAME, DEFAULT_YEAR_VALUE); partition.getSd().getCols().get(0).setType("xyz"); client.add_partitions(Lists.newArrayList(partition)); // TODO: Not sure that this is the correct behavior. It doesn't make sense to create the // partition with column with invalid type. This should be investigated later. Partition part = client.getPartition(DB_NAME, TABLE_NAME, Lists.newArrayList(DEFAULT_YEAR_VALUE)); Assert.assertNotNull(part); Assert.assertEquals("xyz", part.getSd().getCols().get(0).getType()); }
@Test(expected = InvalidOperationException.class) public void testAlterTableInvalidStorageDescriptorInvalidColumnType() throws Exception { Table originalTable = testTables[0]; Table newTable = originalTable.deepCopy(); newTable.getSd().getCols().get(0).setType("xyz"); client.alter_table(originalTable.getDbName(), originalTable.getTableName(), newTable); }
@Test public void testAddPartitionInvalidColTypeInSd() throws Exception { createTable(); Partition partition = buildPartition(DB_NAME, TABLE_NAME, DEFAULT_YEAR_VALUE); partition.getSd().getCols().get(0).setType("xyz"); client.add_partition(partition); // TODO: Not sure that this is the correct behavior. It doesn't make sense to create the // partition with column with invalid type. This should be investigated later. Partition part = client.getPartition(DB_NAME, TABLE_NAME, Lists.newArrayList(DEFAULT_YEAR_VALUE)); Assert.assertNotNull(part); Assert.assertEquals("xyz", part.getSd().getCols().get(0).getType()); }