@Override public void dropDatabase(String name, boolean deleteData, boolean ignoreUnknownDb) throws NoSuchObjectException, InvalidOperationException, MetaException, TException { dropDatabase(getDefaultCatalog(conf), name, deleteData, ignoreUnknownDb, false); }
@Override public void dropDatabase(String name, boolean deleteData, boolean ignoreUnknownDb, boolean cascade) throws NoSuchObjectException, InvalidOperationException, MetaException, TException { dropDatabase(getDefaultCatalog(conf), name, deleteData, ignoreUnknownDb, cascade); }
/** * @param name * @throws NoSuchObjectException * @throws InvalidOperationException * @throws MetaException * @throws TException * @see org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface#drop_database(java.lang.String, boolean, boolean) */ @Override public void dropDatabase(String name) throws NoSuchObjectException, InvalidOperationException, MetaException, TException { dropDatabase(getDefaultCatalog(conf), name, true, false, false); }
@Override protected void tearDown() throws Exception { try { super.tearDown(); client.dropTable(dbName, tblName); client.dropDatabase(dbName); client.close(); } catch (Throwable e) { LOG.error("Unable to close metastore", e); throw new Exception(e); } }
@AfterClass public static void tearDown() throws Exception { LOG.info("Shutting down metastore."); HiveMetaStoreClient hmsc = new HiveMetaStoreClient(conf); hmsc.dropDatabase(dbName, true, true, true); }
private static void silentDropDatabase(String dbName) throws TException { try { for (String tableName : client.getTables(dbName, "*")) { client.dropTable(dbName, tableName); } client.dropDatabase(dbName); } catch (NoSuchObjectException|InvalidOperationException e) { // NOP } }
@Test public void testNoTimeout() throws Exception { HiveMetaStore.TEST_TIMEOUT_VALUE = 250; String dbName = "db"; client.dropDatabase(dbName, true, true); new DatabaseBuilder() .setName(dbName) .create(client, conf); client.dropDatabase(dbName, true, true); }
@AfterClass public static void tearDown() throws Exception { try (HiveMetaStoreClient client = createClient()) { client.dropDatabase(dbName, true, true, true); } }
private static void silentDropDatabase(String dbName) throws TException { try { for (String tableName : client.getTables(dbName, "*")) { client.dropTable(dbName, tableName); } client.dropDatabase(dbName); } catch (NoSuchObjectException ignore) { } catch (InvalidOperationException ignore) { } }
private static void clearAndRecreateDB(HiveMetaStoreClient hmsc) throws Exception { hmsc.dropDatabase(dbName, true, // Delete data. true, // Ignore unknownDB. true // Cascade. ); new DatabaseBuilder() .setName(dbName) .create(hmsc, conf); }
@Before public void setUp() throws Exception { System.setProperty("hive.metastore.event.listeners", DummyListener.class.getName()); System.setProperty("hive.metastore.pre.event.listeners", DummyPreListener.class.getName()); conf = MetastoreConf.newMetastoreConf(); MetastoreConf.setVar(conf, ConfVars.PARTITION_NAME_WHITELIST_PATTERN, metaConfVal); MetastoreConf.setLongVar(conf, ConfVars.THRIFT_CONNECTION_RETRIES, 3); MetastoreConf.setBoolVar(conf, ConfVars.HIVE_SUPPORT_CONCURRENCY, false); MetaStoreTestUtils.setConfForStandloneMode(conf); MetaStoreTestUtils.startMetaStoreWithRetry(HadoopThriftAuthBridge.getBridge(), conf); msc = new HiveMetaStoreClient(conf); msc.dropDatabase(dbName, true, true, true); DummyListener.notifyList.clear(); DummyPreListener.notifyList.clear(); }
@Test public void testIpAddress() throws Exception { Database db = new DatabaseBuilder() .setName("testIpAddressIp") .create(msc, conf); msc.dropDatabase(db.getName()); } }
/** * Test table objects can be retrieved in batches */ @Test public void testGetTableObjects() throws Exception { String dbName = "db"; List<String> tableNames = Arrays.asList("table1", "table2", "table3", "table4", "table5"); // Setup silentDropDatabase(dbName); new DatabaseBuilder() .setName(dbName) .create(client, conf); for (String tableName : tableNames) { createTable(dbName, tableName); } createMaterializedView(dbName, "mv1", Sets.newHashSet("db.table1", "db.table2")); // Test List<Table> tableObjs = client.getTableObjectsByName(dbName, tableNames); // Verify assertEquals(tableNames.size(), tableObjs.size()); for(Table table : tableObjs) { assertTrue(tableNames.contains(table.getTableName().toLowerCase())); } // Cleanup client.dropDatabase(dbName, true, true, true); }
client.dropDatabase(dbName1, true, true, true); } catch (Exception e) { exceptionFound = true; client.dropDatabase(dbName2, true, true, true); client.dropDatabase(dbName1, true, true, true);
client.dropDatabase(dbName, true, true); Database db = new DatabaseBuilder() .setName(dbName) Assert.fail("should not throw timeout exception: " + e.getMessage()); client.dropDatabase(dbName, true, true); client.dropDatabase(dbName, true, true); client.setMetaConf(ConfVars.CLIENT_SOCKET_TIMEOUT.getVarname(), "10s");
@Test public void testTimeout() throws Exception { HiveMetaStore.TEST_TIMEOUT_VALUE = 2 * 1000; String dbName = "db"; client.dropDatabase(dbName, true, true); Database db = new DatabaseBuilder() .setName(dbName) .build(conf); try { client.createDatabase(db); Assert.fail("should throw timeout exception."); } catch (MetaException e) { Assert.assertTrue("unexpected MetaException", e.getMessage().contains("Timeout when " + "executing method: create_database")); } // restore HiveMetaStore.TEST_TIMEOUT_VALUE = 1; }
@Test public void testDBLocationChange() throws IOException, TException { final String dbName = "alterDbLocation"; String defaultUri = MetastoreConf.getVar(conf, ConfVars.WAREHOUSE) + "/default_location.db"; String newUri = MetastoreConf.getVar(conf, ConfVars.WAREHOUSE) + "/new_location.db"; new DatabaseBuilder() .setName(dbName) .setLocation(defaultUri) .create(client, conf); Database db = client.getDatabase(dbName); assertEquals("Incorrect default location of the database", warehouse.getDnsPath(new Path(defaultUri)).toString(), db.getLocationUri()); db.setLocationUri(newUri); client.alterDatabase(dbName, db); db = client.getDatabase(dbName); assertEquals("Incorrect new location of the database", warehouse.getDnsPath(new Path(newUri)).toString(), db.getLocationUri()); client.dropDatabase(dbName); silentDropDatabase(dbName); }
@Before public void setUp() throws Exception { System.setProperty("hive.metastore.event.listeners", DummyListener.class.getName()); conf = MetastoreConf.newMetastoreConf(); MetastoreConf.setLongVar(conf, ConfVars.THRIFT_CONNECTION_RETRIES, 3); MetastoreConf.setBoolVar(conf, ConfVars.HIVE_SUPPORT_CONCURRENCY, false); MetaStoreTestUtils.setConfForStandloneMode(conf); MetaStoreTestUtils.startMetaStoreWithRetry(HadoopThriftAuthBridge.getBridge(), conf); msc = new HiveMetaStoreClient(conf); msc.dropDatabase(dbName, true, true); Map<String, String> envProperties = new HashMap<>(); envProperties.put("hadoop.job.ugi", "test_user"); envContext = new EnvironmentContext(envProperties); db.setName(dbName); db.setCatalogName(DEFAULT_CATALOG_NAME); table = new TableBuilder() .setDbName(dbName) .setTableName(tblName) .addTableParam("a", "string") .addPartCol("b", "string") .addCol("a", "string") .addCol("b", "string") .build(conf); partition = new PartitionBuilder() .inTable(table) .addValue("2011") .build(conf); DummyListener.notifyList.clear(); }
assertTrue("second database is not " + TEST_DB2_NAME, dbs.contains(TEST_DB2_NAME)); client.dropDatabase(TEST_DB1_NAME); client.dropDatabase(TEST_DB2_NAME); silentDropDatabase(TEST_DB1_NAME); silentDropDatabase(TEST_DB2_NAME);
client = createClient(conf); client.dropDatabase(DBNAME1, true, true, true); client.dropDatabase(DBNAME2, true, true, true); Database db1 = new DatabaseBuilder() .setName(DBNAME1)