private MetaException metaException(HiveException e) { MetaException ex = new MetaException(e.getMessage()); ex.initCause(e); return ex; }
private MetaException metaException(HiveException e) { MetaException ex = new MetaException(e.getMessage()); ex.initCause(e); return ex; }
static private HiveException handlePoolException(ExecutorService pool, Exception e) { HiveException he = null; if (e instanceof HiveException) { he = (HiveException) e; if (he.getCanonicalErrorMsg() != ErrorMsg.GENERIC_ERROR) { if (he.getCanonicalErrorMsg() == ErrorMsg.UNRESOLVED_RT_EXCEPTION) { LOG.error("Failed to move: {}", he.getMessage()); } else { LOG.error("Failed to move: {}", he.getRemoteErrorMsg()); } } } else { LOG.error("Failed to move: {}", e.getMessage()); he = new HiveException(e.getCause()); } pool.shutdownNow(); return he; }
@Override public boolean filterPartitionsByExpr(List<String> partColumnNames, List<PrimitiveTypeInfo> partColumnTypeInfos, byte[] exprBytes, String defaultPartitionName, List<String> partitionNames) throws MetaException { ExprNodeGenericFuncDesc expr = deserializeExpr(exprBytes); try { long startTime = System.nanoTime(), len = partitionNames.size(); boolean result = PartitionPruner.prunePartitionNames( partColumnNames, partColumnTypeInfos, expr, defaultPartitionName, partitionNames); double timeMs = (System.nanoTime() - startTime) / 1000000.0; LOG.debug("Pruning " + len + " partition names took " + timeMs + "ms"); return result; } catch (HiveException ex) { LOG.error("Failed to apply the expression", ex); throw new MetaException(ex.getMessage()); } }
@Nullable private HiveStorageHandler createStorageHandler(org.apache.hadoop.hive.metastore.api.Table tbl) throws MetaException { try { if (tbl == null) { return null; } HiveStorageHandler storageHandler = HiveUtils.getStorageHandler(conf, tbl.getParameters().get(META_TABLE_STORAGE)); return storageHandler; } catch (HiveException ex) { LOG.error(StringUtils.stringifyException(ex)); throw new MetaException( "Failed to load storage handler: " + ex.getMessage()); } }
public void getMaterializationMetadata(QB qb) throws SemanticException { try { gatherCTEReferences(qb, rootClause); int threshold = HiveConf.getIntVar(conf, HiveConf.ConfVars.HIVE_CTE_MATERIALIZE_THRESHOLD); for (CTEClause cte : Sets.newHashSet(aliasToCTEs.values())) { if (threshold >= 0 && cte.reference >= threshold) { cte.materialize = true; } } } catch (HiveException e) { // Has to use full name to make sure it does not conflict with // org.apache.commons.lang.StringUtils LOG.error(org.apache.hadoop.util.StringUtils.stringifyException(e)); if (e instanceof SemanticException) { throw (SemanticException)e; } throw new SemanticException(e.getMessage(), e); } }
public void getMetaData(QB qb, boolean enableMaterialization) throws SemanticException { try { if (enableMaterialization) { getMaterializationMetadata(qb); } getMetaData(qb, null); } catch (HiveException e) { if (e instanceof SemanticException) { throw (SemanticException)e; } throw new SemanticException(e.getMessage(), e); } }
public void getMetaData(QB qb, boolean enableMaterialization) throws SemanticException { try { if (enableMaterialization) { getMaterializationMetadata(qb); } getMetaData(qb, null); } catch (HiveException e) { // Has to use full name to make sure it does not conflict with // org.apache.commons.lang.StringUtils LOG.error(org.apache.hadoop.util.StringUtils.stringifyException(e)); if (e instanceof SemanticException) { throw (SemanticException)e; } throw new SemanticException(e.getMessage(), e); } }
public void getMaterializationMetadata(QB qb) throws SemanticException { try { gatherCTEReferences(qb, rootClause); int threshold = HiveConf.getIntVar(conf, HiveConf.ConfVars.HIVE_CTE_MATERIALIZE_THRESHOLD); for (CTEClause cte : Sets.newHashSet(aliasToCTEs.values())) { if (threshold >= 0 && cte.reference >= threshold) { cte.materialize = true; } } } catch (HiveException e) { // Has to use full name to make sure it does not conflict with // org.apache.commons.lang.StringUtils LOG.error(org.apache.hadoop.util.StringUtils.stringifyException(e)); if (e instanceof SemanticException) { throw (SemanticException)e; } throw new SemanticException(e.getMessage(), e); } }
private void genAutoColumnStatsGatheringPipeline(QB qb, TableDesc table_desc, Map<String, String> partSpec, Operator curr, boolean isInsertInto) throws SemanticException { String tableName = table_desc.getTableName(); Table table = null; try { table = db.getTable(tableName); } catch (HiveException e) { throw new SemanticException(e.getMessage()); } LOG.info("Generate an operator pipeline to autogather column stats for table " + tableName + " in query " + ctx.getCmd()); ColumnStatsAutoGatherContext columnStatsAutoGatherContext = null; columnStatsAutoGatherContext = new ColumnStatsAutoGatherContext(this, conf, curr, table, partSpec, isInsertInto, ctx); columnStatsAutoGatherContext.insertAnalyzePipeline(); columnStatsAutoGatherContexts.add(columnStatsAutoGatherContext); }
@Override protected void generatePredicate(NodeProcessorCtx procCtx, FilterOperator fop, TableScanOperator top) throws SemanticException, UDFArgumentException { LBOpPartitionWalkerCtx owc = (LBOpPartitionWalkerCtx) procCtx; Table tbl = top.getConf().getTableMetadata(); if (tbl.isPartitioned()) { // Run partition pruner to get partitions ParseContext parseCtx = owc.getParseContext(); PrunedPartitionList prunedPartList; try { String alias = (String) parseCtx.getTopOps().keySet().toArray()[0]; prunedPartList = PartitionPruner.prune(top, parseCtx, alias); } catch (HiveException e) { // Has to use full name to make sure it does not conflict with // org.apache.commons.lang.StringUtils throw new SemanticException(e.getMessage(), e); } if (prunedPartList != null) { owc.setPartitions(prunedPartList); } } }
@Override public ParseContext transform(ParseContext pctx) throws SemanticException { parseContext = pctx; hiveConf = parseContext.getConf(); try { hiveDb = Hive.get(hiveConf); } catch (HiveException e) { LOG.error(org.apache.hadoop.util.StringUtils.stringifyException(e)); throw new SemanticException(e.getMessage(), e); } // Don't try to index optimize the query to build the index HiveConf.setBoolVar(hiveConf, HiveConf.ConfVars.HIVEOPTINDEXFILTER, false); /* Check if the input query passes all the tests to be eligible for a rewrite * If yes, rewrite original query; else, return the current parseContext */ if (shouldApplyOptimization()) { LOG.info("Rewriting Original Query using " + getName() + " optimization."); rewriteOriginalQuery(); } return parseContext; }
private void checkHiveException(SparkSessionImpl ss, Throwable e, ErrorMsg expectedErrMsg, String expectedMatchedStr) { HiveException he = ss.getHiveException(e); assertEquals(expectedErrMsg, he.getCanonicalErrorMsg()); if (expectedMatchedStr != null) { assertTrue(he.getMessage().contains(expectedMatchedStr)); } }
/** * @param throwException if false, return null if table doesn't exist, else throw */ protected static Table getTable(ASTNode tabRef, Hive db, boolean throwException) throws SemanticException { String[] tableName; switch (tabRef.getType()) { case HiveParser.TOK_TABREF: tableName = getQualifiedTableName((ASTNode) tabRef.getChild(0)); break; case HiveParser.TOK_TABNAME: tableName = getQualifiedTableName(tabRef); break; default: throw raiseWrongType("TOK_TABREF|TOK_TABNAME", tabRef); } Table mTable; try { mTable = db.getTable(tableName[0], tableName[1], throwException); } catch (InvalidTableException e) { LOG.error("Failed to find table " + getDotName(tableName) + " got exception " + e.getMessage()); throw new SemanticException(ErrorMsg.INVALID_TABLE.getMsg(getDotName(tableName)), e); } catch (HiveException e) { LOG.error("Failed to find table " + getDotName(tableName) + " got exception " + e.getMessage()); throw new SemanticException(e.getMessage(), e); } return mTable; }
public void testInvalidArgumentsLength() throws HiveException { try { GenericUDFEnforceConstraint udf = new GenericUDFEnforceConstraint(); ObjectInspector valueOI1 = PrimitiveObjectInspectorFactory.writableBooleanObjectInspector; ObjectInspector valueOI2 = PrimitiveObjectInspectorFactory.writableBooleanObjectInspector; ObjectInspector[] arguments = {valueOI1, valueOI2 }; udf.initialize(arguments); fail("Unreachable line"); } catch (HiveException e) { //HiveException is expected assertTrue(e.getMessage().contains("Invalid number of arguments")); } }
private CommandProcessorResponse handleHiveException(HiveException e, int ret, String rootMsg) throws CommandProcessorResponse { errorMessage = "FAILED: Hive Internal Error: " + Utilities.getNameMessage(e); if(rootMsg != null) { errorMessage += "\n" + rootMsg; } SQLState = e.getCanonicalErrorMsg() != null ? e.getCanonicalErrorMsg().getSQLState() : ErrorMsg.findSQLState(e.getMessage()); downstreamError = e; console.printError(errorMessage + "\n" + org.apache.hadoop.util.StringUtils.stringifyException(e)); throw createProcessorResponse(ret); } private boolean requiresLock() {
private CommandProcessorResponse handleHiveException(HiveException e, int ret, String rootMsg) { errorMessage = "FAILED: Hive Internal Error: " + Utilities.getNameMessage(e); if(rootMsg != null) { errorMessage += "\n" + rootMsg; } SQLState = e.getCanonicalErrorMsg() != null ? e.getCanonicalErrorMsg().getSQLState() : ErrorMsg.findSQLState(e.getMessage()); downstreamError = e; console.printError(errorMessage + "\n" + org.apache.hadoop.util.StringUtils.stringifyException(e)); return createProcessorResponse(ret); } private boolean requiresLock() {
protected void authorizeMetaGets(HiveOperationType opType, List<HivePrivilegeObject> inpObjs, String cmdString) throws HiveSQLException { SessionState ss = SessionState.get(); HiveAuthzContext.Builder ctxBuilder = new HiveAuthzContext.Builder(); ctxBuilder.setUserIpAddress(ss.getUserIpAddress()); ctxBuilder.setForwardedAddresses(ss.getForwardedAddresses()); ctxBuilder.setCommandString(cmdString); try { ss.getAuthorizerV2().checkPrivileges(opType, inpObjs, null, ctxBuilder.build()); } catch (HiveAuthzPluginException | HiveAccessControlException e) { throw new HiveSQLException(e.getMessage(), e); } }
/** * Validate noscan command * * @param tree * @throws SemanticException */ private void validateAnalyzeNoscan(ASTNode tree) throws SemanticException { // since it is noscan, it is true table name in command String tableName = getUnescapedName((ASTNode) tree.getChild(0).getChild(0)); Table tbl; try { tbl = this.getTableObjectByName(tableName); } catch (InvalidTableException e) { throw new SemanticException(ErrorMsg.INVALID_TABLE.getMsg(tableName), e); } catch (HiveException e) { throw new SemanticException(e.getMessage(), e); } /* noscan uses hdfs apis to retrieve such information from Namenode. */ /* But that will be specific to hdfs. Through storagehandler mechanism, */ /* storage of table could be on any storage system: hbase, cassandra etc. */ /* A nice error message should be given to user. */ if (tbl.isNonNative()) { throw new SemanticException(ErrorMsg.ANALYZE_TABLE_NOSCAN_NON_NATIVE.getMsg(tbl .getTableName())); } }
@Test public void testgetDbTableName() throws HiveException{ String tablename; String [] dbtab; SessionState.start(new HiveConf(this.getClass())); String curDefaultdb = SessionState.get().getCurrentDatabase(); //test table without db portion tablename = "tab1"; dbtab = Utilities.getDbTableName(tablename); assertEquals("db name", curDefaultdb, dbtab[0]); assertEquals("table name", tablename, dbtab[1]); //test table with db portion tablename = "dab1.tab1"; dbtab = Utilities.getDbTableName(tablename); assertEquals("db name", "dab1", dbtab[0]); assertEquals("table name", "tab1", dbtab[1]); //test invalid table name tablename = "dab1.tab1.x1"; try { dbtab = Utilities.getDbTableName(tablename); fail("exception was expected for invalid table name"); } catch(HiveException ex){ assertEquals("Invalid table name " + tablename, ex.getMessage()); } }