public Object getFieldValue(_Fields field) { switch (field) { case MESSAGE: return getMessage(); } throw new IllegalStateException(); }
/** * Find the table being compacted * @param ci compaction info returned from the compaction queue * @return metastore table * @throws org.apache.hadoop.hive.metastore.api.MetaException if the table cannot be found. */ protected Table resolveTable(CompactionInfo ci) throws MetaException { try { return rs.getTable(ci.dbname, ci.tableName); } catch (MetaException e) { LOG.error("Unable to find table " + ci.getFullTableName() + ", " + e.getMessage()); throw e; } }
@Override Table resolveTable(CompactionInfo ci) throws MetaException { try { return rs.getTable(getDefaultCatalog(conf), ci.dbname, ci.tableName); } catch (MetaException e) { LOG.error("Unable to find table " + ci.getFullTableName() + ", " + e.getMessage()); throw e; } }
@Override public Database getDatabase(String catalogName, String name) throws NoSuchObjectException { MetaException ex = null; Database db = null; try { db = getDatabaseInternal(catalogName, name); } catch (MetaException e) { // Signature restriction to NSOE, and NSOE being a flat exception prevents us from // setting the cause of the NSOE as the MetaException. We should not lose the info // we got here, but it's very likely that the MetaException is irrelevant and is // actually an NSOE message, so we should log it and throw an NSOE with the msg. ex = e; } if (db == null) { LOG.warn("Failed to get database {}.{}, returning NoSuchObjectException", catalogName, name, ex); throw new NoSuchObjectException(name + (ex == null ? "" : (": " + ex.getMessage()))); } return db; }
/** * Creates the proxy used to evaluate expressions. This is here to prevent circular * dependency - ql -> metastore client <-> metastore server -> ql. If server and * client are split, this can be removed. * @param conf Configuration. * @return The partition expression proxy. */ private static PartitionExpressionProxy createExpressionProxy(Configuration conf) { String className = MetastoreConf.getVar(conf, ConfVars.EXPRESSION_PROXY_CLASS); try { Class<? extends PartitionExpressionProxy> clazz = JavaUtils.getClass(className, PartitionExpressionProxy.class); return JavaUtils.newInstance(clazz, new Class<?>[0], new Object[0]); } catch (MetaException e) { LOG.error("Error loading PartitionExpressionProxy", e); throw new RuntimeException("Error loading PartitionExpressionProxy: " + e.getMessage()); } }
/** * Returns archiving level, which is how many fields were set in partial * specification ARCHIVE was run for */ public static int getArchivingLevel(Partition p) throws HiveException { try { return MetaStoreUtils.getArchivingLevel(p.getTPartition()); } catch (MetaException ex) { throw new HiveException(ex.getMessage(), ex); } }
private static IMetaStoreClient getHMS(HiveConf conf) { UserGroupInformation loggedInUser = null; try { loggedInUser = UserGroupInformation.getLoginUser(); } catch (IOException e) { LOG.warn("Unable to get logged in user via UGI. err: {}", e.getMessage()); } boolean secureMode = loggedInUser != null && loggedInUser.hasKerberosCredentials(); if (secureMode) { MetastoreConf.setBoolVar(conf, MetastoreConf.ConfVars.USE_THRIFT_SASL, true); } try { LOG.info("Creating metastore client for {}", "PreUpgradeTool"); return RetryingMetaStoreClient.getProxy(conf, true); } catch (MetaException e) { throw new RuntimeException("Error connecting to Hive Metastore URI: " + conf.getVar(HiveConf.ConfVars.METASTOREURIS) + ". " + e.getMessage(), e); } } /**
/** * Returns archiving level, which is how many fields were set in partial * specification ARCHIVE was run for */ public static int getArchivingLevel(Partition p) throws HiveException { try { return MetaStoreUtils.getArchivingLevel(p.getTPartition()); } catch (MetaException ex) { throw new HiveException(ex.getMessage(), ex); } }
/** * Makes expression tree out of expr. * @param filter Filter. * @return Expression tree. Null if there was an error. */ private static ExpressionTree makeExpressionTree(String filter) throws MetaException { // TODO: ExprNodeDesc is an expression tree, we could just use that and be rid of Filter.g. if (filter == null || filter.isEmpty()) { return ExpressionTree.EMPTY_TREE; } LOG.debug("Filter specified is " + filter); ExpressionTree tree = null; try { tree = getFilterParser(filter).tree; } catch (MetaException ex) { LOG.info("Unable to make the expression tree from expression string [" + filter + "]" + ex.getMessage()); // Don't log the stack, this is normal. } if (tree == null) { return null; } // We suspect that LIKE pushdown into JDO is invalid; see HIVE-5134. Check for like here. LikeChecker lc = new LikeChecker(); tree.accept(lc); return lc.hasLike() ? null : tree; }
private static IMetaStoreClient getHMS(HiveConf conf) { UserGroupInformation loggedInUser = null; try { loggedInUser = UserGroupInformation.getLoginUser(); } catch (IOException e) { LOG.warn("Unable to get logged in user via UGI. err: {}", e.getMessage()); } boolean secureMode = loggedInUser != null && loggedInUser.hasKerberosCredentials(); if (secureMode) { conf.setBoolVar(HiveConf.ConfVars.METASTORE_USE_THRIFT_SASL, true); } try { LOG.info("Creating metastore client for {}", "PreUpgradeTool"); /* I'd rather call return RetryingMetaStoreClient.getProxy(conf, true) which calls HiveMetaStoreClient(HiveConf, Boolean) which exists in (at least) 2.1.0.2.6.5.0-292 and later but not in 2.1.0.2.6.0.3-8 (the HDP 2.6 release) i.e. RetryingMetaStoreClient.getProxy(conf, true) is broken in 2.6.0*/ return RetryingMetaStoreClient.getProxy(conf, new Class[]{HiveConf.class, HiveMetaHookLoader.class, Boolean.class}, new Object[]{conf, getHookLoader(), Boolean.TRUE}, null, HiveMetaStoreClient.class.getName()); } catch (MetaException e) { throw new RuntimeException("Error connecting to Hive Metastore URI: " + conf.getVar(HiveConf.ConfVars.METASTOREURIS) + ". " + e.getMessage(), e); } }
public static List<FieldSchema> getFieldsFromDeserializer(String name, Deserializer serde) throws HiveException { try { return HiveMetaStoreUtils.getFieldsFromDeserializer(name, serde); } catch (SerDeException e) { throw new HiveException("Error in getting fields from serde. " + e.getMessage(), e); } catch (MetaException e) { throw new HiveException("Error in getting fields from serde." + e.getMessage(), e); } }
@Override public List<String> listDatabaseNamesByPattern(String pattern) throws HCatException { List<String> dbNames = null; try { dbNames = hmsClient.getDatabases(pattern); } catch (MetaException exp) { throw new HCatException("MetaException while listing db names. " + exp.getMessage(), exp); } catch (TException e) { throw new HCatException("Transport Exception while listing db names. " + e.getMessage(), e); } return dbNames; }
public static List<FieldSchema> getFieldsFromDeserializer(String name, Deserializer serde) throws HiveException { try { return MetaStoreUtils.getFieldsFromDeserializer(name, serde); } catch (SerDeException e) { throw new HiveException("Error in getting fields from serde. " + e.getMessage(), e); } catch (MetaException e) { throw new HiveException("Error in getting fields from serde." + e.getMessage(), e); } }
private static IMetaStoreClient getMetaStoreClient(HiveEndPoint endPoint, HiveConf conf, boolean secureMode) throws ConnectionError { if (endPoint.metaStoreUri!= null) { conf.setVar(HiveConf.ConfVars.METASTOREURIS, endPoint.metaStoreUri); } if(secureMode) { conf.setBoolVar(HiveConf.ConfVars.METASTORE_USE_THRIFT_SASL,true); } try { return HCatUtil.getHiveMetastoreClient(conf); } catch (MetaException e) { throw new ConnectionError("Error connecting to Hive Metastore URI: " + endPoint.metaStoreUri + ". " + e.getMessage(), e); } catch (IOException e) { throw new ConnectionError("Error connecting to Hive Metastore URI: " + endPoint.metaStoreUri + ". " + e.getMessage(), e); } } } // class ConnectionImpl
public static ExpressionTree makeExpressionTree(PartitionExpressionProxy expressionProxy, byte[] expr, String defaultPartitionName) throws MetaException { // We will try pushdown first, so make the filter. This will also validate the expression, // if serialization fails we will throw incompatible metastore error to the client. String filter = null; try { filter = expressionProxy.convertExprToFilter(expr, defaultPartitionName); } catch (MetaException ex) { // TODO MS-SPLIT - for now we have construct this by reflection because IMetaStoreClient // can't be // moved until after HiveMetaStore is moved, which can't be moved until this is moved. Class<? extends MetaException> exClass = JavaUtils.getClass( "org.apache.hadoop.hive.metastore.IMetaStoreClient$IncompatibleMetastoreException", MetaException.class); throw JavaUtils.newInstance(exClass, new Class<?>[]{String.class}, new Object[]{ex.getMessage()}); } // Make a tree out of the filter. // TODO: this is all pretty ugly. The only reason we need all these transformations // is to maintain support for simple filters for HCat users that query metastore. // If forcing everyone to use thick client is out of the question, maybe we could // parse the filter into standard hive expressions and not all this separate tree // Filter.g stuff. That way this method and ...ByFilter would just be merged. return PartFilterExprUtil.makeExpressionTree(filter); }
public void alterDatabase(String dbName, Database db) throws HiveException { try { getMSC().alterDatabase(dbName, db); } catch (MetaException e) { throw new HiveException("Unable to alter database " + dbName + ". " + e.getMessage(), e); } catch (NoSuchObjectException e) { throw new HiveException("Database " + dbName + " does not exists.", e); } catch (TException e) { throw new HiveException("Unable to alter database " + dbName + ". " + e.getMessage(), e); } } /**
@Override public List<String> listTableNamesByPattern(String dbName, String tablePattern) throws HCatException { List<String> tableNames = null; try { tableNames = hmsClient.getTables(checkDB(dbName), tablePattern); } catch (MetaException e) { throw new HCatException("MetaException while fetching table names. " + e.getMessage(), e); } catch (UnknownDBException e) { throw new HCatException("UnknownDB " + dbName + " while fetching table names.", e); } catch (TException e) { throw new HCatException("Transport exception while fetching table names. " + e.getMessage(), e); } return tableNames; }
public void alterDatabase(String dbName, Database db) throws HiveException { try { getMSC().alterDatabase(dbName, db); } catch (MetaException e) { throw new HiveException("Unable to alter database " + dbName + ". " + e.getMessage(), e); } catch (NoSuchObjectException e) { throw new HiveException("Database " + dbName + " does not exists.", e); } catch (TException e) { throw new HiveException("Unable to alter database " + dbName + ". " + e.getMessage(), e); } } /**
@Test public void testTimeout() throws Exception { HiveMetaStore.TEST_TIMEOUT_VALUE = 2 * 1000; String dbName = "db"; client.dropDatabase(dbName, true, true); Database db = new DatabaseBuilder() .setName(dbName) .build(conf); try { client.createDatabase(db); Assert.fail("should throw timeout exception."); } catch (MetaException e) { Assert.assertTrue("unexpected MetaException", e.getMessage().contains("Timeout when " + "executing method: create_database")); } // restore HiveMetaStore.TEST_TIMEOUT_VALUE = 1; }
@Test public void doubleAddPrimaryKey() throws TException { Table table = testTables[0]; // Make sure get on a table with no key returns empty list PrimaryKeysRequest rqst = new PrimaryKeysRequest(table.getDbName(), table.getTableName()); rqst.setCatName(table.getCatName()); List<SQLPrimaryKey> fetched = client.getPrimaryKeys(rqst); Assert.assertTrue(fetched.isEmpty()); // Single column unnamed primary key in default catalog and database List<SQLPrimaryKey> pk = new SQLPrimaryKeyBuilder() .onTable(table) .addColumn("col1") .build(metaStore.getConf()); client.addPrimaryKey(pk); try { pk = new SQLPrimaryKeyBuilder() .onTable(table) .addColumn("col2") .build(metaStore.getConf()); client.addPrimaryKey(pk); Assert.fail(); } catch (MetaException e) { Assert.assertTrue(e.getMessage().contains("Primary key already exists for")); } }