public void setTableType(TableType tableType) { tTable.setTableType(tableType.toString()); }
public void setTableType(TableType tableType) { tTable.setTableType(tableType.toString()); }
private void validateTableType(Table tbl) { // If the table has property EXTERNAL set, update table type // accordingly String tableType = tbl.getTableType(); boolean isExternal = Boolean.parseBoolean(tbl.getParameters().get("EXTERNAL")); if (TableType.MANAGED_TABLE.toString().equals(tableType)) { if (isExternal) { tableType = TableType.EXTERNAL_TABLE.toString(); } } if (TableType.EXTERNAL_TABLE.toString().equals(tableType)) { if (!isExternal) { tableType = TableType.MANAGED_TABLE.toString(); } } tbl.setTableType(tableType); }
public TableWrapper(org.apache.hadoop.hive.metastore.api.Table apiTable) { org.apache.hadoop.hive.metastore.api.Table wrapperApiTable = apiTable.deepCopy(); if (wrapperApiTable.getTableType() == null){ // TableType specified was null, we need to figure out what type it was. if (MetaStoreUtils.isExternalTable(wrapperApiTable)){ wrapperApiTable.setTableType(TableType.EXTERNAL_TABLE.toString()); } else if (MetaStoreUtils.isMaterializedViewTable(wrapperApiTable)) { wrapperApiTable.setTableType(TableType.MATERIALIZED_VIEW.toString()); } else if ((wrapperApiTable.getSd() == null) || (wrapperApiTable.getSd().getLocation() == null)) { wrapperApiTable.setTableType(TableType.VIRTUAL_VIEW.toString()); } else { wrapperApiTable.setTableType(TableType.MANAGED_TABLE.toString()); } } initialize(wrapperApiTable); } }
public TableWrapper(org.apache.hadoop.hive.metastore.api.Table apiTable) { org.apache.hadoop.hive.metastore.api.Table wrapperApiTable = apiTable.deepCopy(); if (wrapperApiTable.getTableType() == null){ // TableType specified was null, we need to figure out what type it was. if (MetaStoreUtils.isExternalTable(wrapperApiTable)){ wrapperApiTable.setTableType(TableType.EXTERNAL_TABLE.toString()); } else if (MetaStoreUtils.isIndexTable(wrapperApiTable)) { wrapperApiTable.setTableType(TableType.INDEX_TABLE.toString()); } else if (MetaStoreUtils.isMaterializedViewTable(wrapperApiTable)) { wrapperApiTable.setTableType(TableType.MATERIALIZED_VIEW.toString()); } else if ((wrapperApiTable.getSd() == null) || (wrapperApiTable.getSd().getLocation() == null)) { wrapperApiTable.setTableType(TableType.VIRTUAL_VIEW.toString()); } else { wrapperApiTable.setTableType(TableType.MANAGED_TABLE.toString()); } } initialize(wrapperApiTable); } }
static boolean migrateToExternalTable(Table tableObj, TableType tableType, boolean dryRun, HiveUpdater hiveUpdater) throws HiveException { String msg; switch (tableType) { case MANAGED_TABLE: if (AcidUtils.isTransactionalTable(tableObj)) { msg = createExternalConversionExcuse(tableObj, "Table is a transactional table"); LOG.debug(msg); return false; } LOG.info("Converting {} to external table ...", getQualifiedName(tableObj)); if (!dryRun) { tableObj.setTableType(TableType.EXTERNAL_TABLE.toString()); hiveUpdater.updateTableProperties(tableObj, convertToExternalTableProps); } return true; case EXTERNAL_TABLE: msg = createExternalConversionExcuse(tableObj, "Table is already an external table"); LOG.debug(msg); break; default: // VIEW/MATERIALIZED_VIEW msg = createExternalConversionExcuse(tableObj, "Table type " + tableType + " cannot be converted"); LOG.debug(msg); break; } return false; }
public static org.apache.hadoop.hive.metastore.api.Table toMetastoreApiTable(Table table, PrincipalPrivileges privileges) { org.apache.hadoop.hive.metastore.api.Table result = new org.apache.hadoop.hive.metastore.api.Table(); result.setDbName(table.getDatabaseName()); result.setTableName(table.getTableName()); result.setOwner(table.getOwner()); result.setTableType(table.getTableType()); result.setParameters(table.getParameters()); result.setPartitionKeys(table.getPartitionColumns().stream().map(ThriftMetastoreUtil::toMetastoreApiFieldSchema).collect(toList())); result.setSd(makeStorageDescriptor(table.getTableName(), table.getDataColumns(), table.getStorage())); result.setPrivileges(toMetastoreApiPrincipalPrivilegeSet(table.getOwner(), privileges)); result.setViewOriginalText(table.getViewOriginalText().orElse(null)); result.setViewExpandedText(table.getViewExpandedText().orElse(null)); return result; }
newTable.setTableType(TableType.EXTERNAL_TABLE.toString()); } else { newTable.setTableType(TableType.MANAGED_TABLE.toString());
public TableBuilder(Database database) { this.database = database; partitions = new ArrayList<>(); columnNames = new ArrayList<>(); columnTypes = new ArrayList<>(); partitionKeys = Collections.emptyList(); table = new Table(); table.setDbName(database.getName()); table.setTableType(TableType.MANAGED_TABLE.toString()); Map<String, String> tableParams = new HashMap<String, String>(); tableParams.put("transactional", Boolean.TRUE.toString()); table.setParameters(tableParams); sd = new StorageDescriptor(); sd.setInputFormat(OrcInputFormat.class.getName()); sd.setOutputFormat(OrcOutputFormat.class.getName()); sd.setNumBuckets(1); table.setSd(sd); serDeInfo = new SerDeInfo(); serDeInfo.setParameters(new HashMap<String, String>()); serDeInfo.getParameters().put(serdeConstants.SERIALIZATION_FORMAT, "1"); serDeInfo.setSerializationLib(OrcSerde.class.getName()); sd.setSerdeInfo(serDeInfo); }
@Test public void dropReplacedPartitionsTest() throws Exception { Table table = ConvertibleHiveDatasetTest.getTestTable("dbName", "tableName"); table.setTableType("VIRTUAL_VIEW"); table.setPartitionKeys(ImmutableList.of(new FieldSchema("year", "string", ""), new FieldSchema("month", "string", ""))); Partition part = new Partition(); part.setParameters(ImmutableMap.of("gobblin.replaced.partitions", "2015,12|2016,01")); SchemaAwareHiveTable hiveTable = new SchemaAwareHiveTable(table, null); SchemaAwareHivePartition partition = new SchemaAwareHivePartition(table, part, null); QueryBasedHiveConversionEntity conversionEntity = new QueryBasedHiveConversionEntity(null, hiveTable, Optional.of(partition)); List<ImmutableMap<String, String>> expected = ImmutableList.of(ImmutableMap.of("year", "2015", "month", "12"), ImmutableMap.of("year", "2016", "month", "01")); Assert.assertEquals(AbstractAvroToOrcConverter.getDropPartitionsDDLInfo(conversionEntity), expected); // Make sure that a partition itself is not dropped Partition replacedSelf = new Partition(); replacedSelf.setParameters(ImmutableMap.of("gobblin.replaced.partitions", "2015,12|2016,01|2016,02")); replacedSelf.setValues(ImmutableList.of("2016", "02")); conversionEntity = new QueryBasedHiveConversionEntity(null, hiveTable, Optional.of(new SchemaAwareHivePartition(table, replacedSelf, null))); Assert.assertEquals(AbstractAvroToOrcConverter.getDropPartitionsDDLInfo(conversionEntity), expected); }
preCreateTable.setTableType(TableType.EXTERNAL_TABLE.toString()); preCreateTable.putToParameters(KafkaTableProperties.HIVE_KAFKA_TOPIC.getName(), TEST_TOPIC); preCreateTable.putToParameters(KafkaTableProperties.HIVE_KAFKA_BOOTSTRAP_SERVERS.getName(), LOCALHOST_9291);
@Test public void configureJobPropertiesWithDefaultValues() throws MetaException { KafkaStorageHandler kafkaStorageHandler = new KafkaStorageHandler(); TableDesc tableDesc = Mockito.mock(TableDesc.class); Properties properties = new Properties(); Table preCreateTable = new Table(); preCreateTable.putToParameters(KafkaTableProperties.HIVE_KAFKA_TOPIC.getName(), TEST_TOPIC); preCreateTable.putToParameters(KafkaTableProperties.HIVE_KAFKA_BOOTSTRAP_SERVERS.getName(), LOCALHOST_9291); preCreateTable.setTableType(TableType.EXTERNAL_TABLE.toString()); kafkaStorageHandler.preCreateTable(preCreateTable); preCreateTable.getParameters().forEach(properties::setProperty); Mockito.when(tableDesc.getProperties()).thenReturn(properties); Map<String, String> jobProperties = new HashMap<>(); kafkaStorageHandler.configureInputJobProperties(tableDesc, jobProperties); kafkaStorageHandler.configureOutputJobProperties(tableDesc, jobProperties); Assert.assertEquals(jobProperties.get(KafkaTableProperties.HIVE_KAFKA_TOPIC.getName()), TEST_TOPIC); Assert.assertEquals(jobProperties.get(KafkaTableProperties.HIVE_KAFKA_BOOTSTRAP_SERVERS.getName()), LOCALHOST_9291); Arrays.stream(KafkaTableProperties.values()) .filter(key -> !key.isMandatory()) .forEach((key) -> Assert.assertEquals("Wrong match for key " + key.getName(), key.getDefaultValue(), jobProperties.get(key.getName()))); }
preCreateTable.putToParameters(KafkaTableProperties.HIVE_KAFKA_TOPIC.getName(), TEST_TOPIC); preCreateTable.putToParameters(KafkaTableProperties.HIVE_KAFKA_BOOTSTRAP_SERVERS.getName(), LOCALHOST_9291); preCreateTable.setTableType(TableType.EXTERNAL_TABLE.toString()); kafkaStorageHandler.preCreateTable(preCreateTable); preCreateTable.getParameters().forEach(properties::setProperty);
throws TException { Table table = new Table(); table.setTableType(TableType.MANAGED_TABLE.name()); table.setTableName(tableName); table.setDbName(dbName);
public static Table getTestTable(String dbName, String tableName) { Table table = new Table(); table.setDbName(dbName); table.setTableName(tableName); table.setTableType(TableType.EXTERNAL_TABLE.name()); StorageDescriptor sd = new StorageDescriptor(); sd.setLocation("/tmp/test"); table.setSd(sd); return table; }
tbl.setTableType(tableType); return tbl;
Table build() { StorageDescriptor sd = new StorageDescriptor(); if (columns == null) { sd.setCols(Collections.emptyList()); } else { sd.setCols(columns); } SerDeInfo serdeInfo = new SerDeInfo(); serdeInfo.setSerializationLib(serde); serdeInfo.setName(tableName); sd.setSerdeInfo(serdeInfo); sd.setInputFormat(inputFormat); sd.setOutputFormat(outputFormat); if (location != null) { sd.setLocation(location); } Table table = new Table(); table.setDbName(dbName); table.setTableName(tableName); table.setSd(sd); table.setParameters(parameters); table.setOwner(owner); if (partitionKeys != null) { table.setPartitionKeys(partitionKeys); } table.setTableType(tableType.toString()); return table; } }
private void createTable(String dbName, String tableName) throws Exception { String databaseName = (dbName == null) ? Warehouse.DEFAULT_DATABASE_NAME : dbName; try { msc.dropTable(databaseName, tableName); } catch (Exception e) { } // can fail with NoSuchObjectException Table tbl = new Table(); tbl.setDbName(databaseName); tbl.setTableName(tableName); tbl.setTableType("MANAGED_TABLE"); StorageDescriptor sd = new StorageDescriptor(); sd.setCols(getTableColumns()); tbl.setPartitionKeys(getPartitionKeys()); tbl.setSd(sd); sd.setBucketCols(new ArrayList<String>(2)); sd.setSerdeInfo(new SerDeInfo()); sd.getSerdeInfo().setName(tbl.getTableName()); sd.getSerdeInfo().setParameters(new HashMap<String, String>()); sd.getSerdeInfo().getParameters().put(serdeConstants.SERIALIZATION_FORMAT, "1"); sd.getSerdeInfo().setSerializationLib(ColumnarSerDe.class.getName()); sd.setInputFormat(RCFileInputFormat.class.getName()); sd.setOutputFormat(RCFileOutputFormat.class.getName()); Map<String, String> tableParams = new HashMap<String, String>(); tbl.setParameters(tableParams); msc.createTable(tbl); }
@Test public void testCreateTableDefaultValuesView() throws Exception { Table table = new Table(); StorageDescriptor sd = new StorageDescriptor(); List<FieldSchema> cols = new ArrayList<>(); table.setDbName(DEFAULT_DATABASE); table.setTableName("test_table_2"); table.setTableType("VIRTUAL_VIEW"); cols.add(new FieldSchema("column_name", "int", null)); sd.setCols(cols); sd.setSerdeInfo(new SerDeInfo()); table.setSd(sd); client.createTable(table); Table createdTable = client.getTable(table.getDbName(), table.getTableName()); // No location should be created for views Assert.assertNull("Storage descriptor location should be null", createdTable.getSd().getLocation()); }
tbl.setDbName(databaseName); tbl.setTableName(tableName); tbl.setTableType(TableType.MANAGED_TABLE.toString()); StorageDescriptor sd = new StorageDescriptor(); sd.setCols(getTableColumns(colNames, colTypes));