public static boolean isPartitionLevelStats(ASTNode tree) { boolean isPartitioned = false; ASTNode child = (ASTNode) tree.getChild(0); if (child.getChildCount() > 1) { child = (ASTNode) child.getChild(1); if (child.getToken().getType() == HiveParser.TOK_PARTSPEC) { isPartitioned = true; } } return isPartitioned; }
protected ASTNode findLHSofAssignment(ASTNode assignment) { assert assignment.getToken().getType() == HiveParser.EQUAL : "Expected set assignments to use equals operator but found " + assignment.getName(); ASTNode tableOrColTok = (ASTNode)assignment.getChildren().get(0); assert tableOrColTok.getToken().getType() == HiveParser.TOK_TABLE_OR_COL : "Expected left side of assignment to be table or column"; ASTNode colName = (ASTNode)tableOrColTok.getChildren().get(0); assert colName.getToken().getType() == HiveParser.Identifier : "Expected column name"; return colName; }
public String getTokenErrorDisplay(Token t) { if (!verboseErrors) { String s = t.getText(); if ( s==null ) { if ( t.getType()==Token.EOF ) { s = "<EOF>"; s = "<"+tokenNames[t.getType()]+">"; channelStr=",channel="+t.getChannel(); String txt = t.getText(); if ( txt!=null ) { txt = txt.replaceAll("\n","\\\\n"); txt = "<no text>"; return "[@"+t.getTokenIndex()+","+ct.getStartIndex()+":"+ct.getStopIndex()+"='"+txt+"',<"+tokenNames[t.getType()]+">"+channelStr+","+t.getLine()+":"+t.getCharPositionInLine()+"]";
private String getQueryStringFromAst(ASTNode ast) { StringBuilder sb = new StringBuilder(); int startIdx = ast.getTokenStartIndex(); int endIdx = ast.getTokenStopIndex(); boolean queryNeedsQuotes = true; if (conf.getVar(ConfVars.HIVE_QUOTEDID_SUPPORT).equals("none")) { queryNeedsQuotes = false; } for (int idx = startIdx; idx <= endIdx; idx++) { Token curTok = ctx.getTokenRewriteStream().get(idx); if (curTok.getType() == Token.EOF) { continue; } else if (queryNeedsQuotes && curTok.getType() == HiveLexer.Identifier) { // The Tokens have no distinction between Identifiers and QuotedIdentifiers. // Ugly solution is just to surround all identifiers with quotes. sb.append('`'); // Re-escape any backtick (`) characters in the identifier. sb.append(curTok.getText().replaceAll("`", "``")); sb.append('`'); } else { sb.append(curTok.getText()); } } return sb.toString(); }
if (ast.getChildCount() > 0 && HiveParser.TOK_CTE == ((ASTNode) ast.getChild(0)).getToken().getType()) { ASTNode cte = (ASTNode) ast.getChild(0); for (int index = cte.getChildCount() - 1; index >= 0; index--) { ASTNode subq = (ASTNode) cte.getChild(index); String alias = unescapeIdentifier(subq.getChild(1).getText()); if (cteAlias.contains(alias)) { throw new SemanticException("Duplicate definition of " + alias); } else { cteAlias.add(alias); for (int index = 1; index < ast.getChildCount(); index++) { walkASTAndQualifyNames(ast, cteAlias, ctx, db, ignoredTokens, unparseTranslator);
String str = null; switch (expr.getToken().getType()) { case HiveParser.StringLiteral: str = BaseSemanticAnalyzer.unescapeSQLString(expr.getText()); break; case HiveParser.TOK_CHARSETLITERAL: str = BaseSemanticAnalyzer.charSetString(expr.getChild(0).getText(), expr.getChild(1).getText()); break; default:
private void analyzeAlterTableAddConstraint(ASTNode ast, String tableName) throws SemanticException { ASTNode parent = (ASTNode) ast.getParent(); ASTNode child = (ASTNode) ast.getChild(0); List<SQLPrimaryKey> primaryKeys = new ArrayList<SQLPrimaryKey>(); List<SQLForeignKey> foreignKeys = new ArrayList<SQLForeignKey>(); if (child.getToken().getType() == HiveParser.TOK_PRIMARY_KEY) { BaseSemanticAnalyzer.processPrimaryKeys(parent, child, primaryKeys); } else if (child.getToken().getType() == HiveParser.TOK_FOREIGN_KEY) { BaseSemanticAnalyzer.processForeignKeys(parent, child, foreignKeys); } AlterTableDesc alterTblDesc = new AlterTableDesc(tableName, primaryKeys, foreignKeys); rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), alterTblDesc), conf)); }
/** * process stored as directories * * @param child * @return */ protected boolean analyzeStoredAdDirs(ASTNode child) { boolean storedAsDirs = false; if ((child.getChildCount() == 3) && (((ASTNode) child.getChild(2)).getToken().getType() == HiveParser.TOK_STOREDASDIRS)) { storedAsDirs = true; } return storedAsDirs; }
if (ast.getChildCount() > 0 && HiveParser.TOK_CTE == ((ASTNode) ast.getChild(0)).getToken().getType()) { ASTNode cte = (ASTNode) ast.getChild(0); for (int index = cte.getChildCount() - 1; index >= 0; index--) { ASTNode subq = (ASTNode) cte.getChild(index); String alias = unescapeIdentifier(subq.getChild(1).getText()); if (cteAlias.contains(alias)) { throw new SemanticException("Duplicate definition of " + alias); } else { cteAlias.add(alias); for (int index = 1; index < ast.getChildCount(); index++) { walkASTMarkTABREF(tableMask, (ASTNode) ast.getChild(index), cteAlias, ctx, db, tabNameToTabObject, ignoredTokens); rewrittenTree = ParseUtils.parse(rewrittenQuery); } catch (ParseException e) { throw new SemanticException(e);
private void addSetRCols(ASTNode node, Set<String> setRCols) { // See if this node is a TOK_TABLE_OR_COL. If so, find the value and put it in the list. If // not, recurse on any children if (node.getToken().getType() == HiveParser.TOK_TABLE_OR_COL) { ASTNode colName = (ASTNode)node.getChildren().get(0); assert colName.getToken().getType() == HiveParser.Identifier : "Expected column name"; setRCols.add(normalizeColName(colName.getText())); } else if (node.getChildren() != null) { for (Node n : node.getChildren()) { addSetRCols((ASTNode)n, setRCols); } } }
private boolean shouldRewrite(ASTNode tree) { boolean rwt = false; if (tree.getChildCount() > 1) { ASTNode child0 = (ASTNode) tree.getChild(0); ASTNode child1; if (child0.getToken().getType() == HiveParser.TOK_TAB) { child0 = (ASTNode) child0.getChild(0); if (child0.getToken().getType() == HiveParser.TOK_TABNAME) { child1 = (ASTNode) tree.getChild(1); if (child1.getToken().getType() == HiveParser.KW_COLUMNS) { rwt = true; } } } } return rwt; }
private static List<TokenLocation> buildApexDocTokenLocations(String source) { ANTLRStringStream stream = new ANTLRStringStream(source); ApexLexer lexer = new ApexLexer(stream); ArrayList<TokenLocation> tokenLocations = new ArrayList<>(); int startIndex = 0; Token token = lexer.nextToken(); int endIndex = lexer.getCharIndex(); while (token.getType() != Token.EOF) { if (token.getType() == ApexLexer.BLOCK_COMMENT) { // Filter only block comments starting with "/**" if (token.getText().startsWith("/**")) { tokenLocations.add(new TokenLocation(startIndex, token.getText())); } } // TODO : Check other non-doc comments and tokens of type ApexLexer.EOL_COMMENT for "NOPMD" suppressions startIndex = endIndex; token = lexer.nextToken(); endIndex = lexer.getCharIndex(); } return tokenLocations; }
public static boolean isPartitionLevelStats(ASTNode tree) { boolean isPartitioned = false; ASTNode child = (ASTNode) tree.getChild(0); if (child.getChildCount() > 1) { child = (ASTNode) child.getChild(1); if (child.getToken().getType() == HiveParser.TOK_PARTSPEC) { isPartitioned = true; } } return isPartitioned; }
try { Token t = lexer.nextToken(); while (t.getType() != EOF) { switch (t.getType()) { case COMMENT: case LITERAL: case QUOTED_TEXT: case DOUBLE_QUOTED_TEXT: b.append(t.getText()); break; case DEFINE: String text = t.getText(); String key = text.substring(1, text.length() - 1); Object value = ctx.getAttribute(key); break; case ESCAPED_TEXT: b.append(t.getText().substring(1)); break; default:
/** * It will check if this is analyze ... compute statistics noscan * @param tree */ private void checkNoScan(ASTNode tree) { if (tree.getChildCount() > 1) { ASTNode child0 = (ASTNode) tree.getChild(0); ASTNode child1; if (child0.getToken().getType() == HiveParser.TOK_TAB) { child0 = (ASTNode) child0.getChild(0); if (child0.getToken().getType() == HiveParser.TOK_TABNAME) { child1 = (ASTNode) tree.getChild(1); if (child1.getToken().getType() == HiveParser.KW_NOSCAN) { this.noscan = true; } } } } }