@Override public OperationHandle getPrimaryKeys(String catalog, String schema, String table) throws HiveSQLException { acquire(true, true); OperationManager operationManager = getOperationManager(); GetPrimaryKeysOperation operation = operationManager .newGetPrimaryKeysOperation(getSession(), catalog, schema, table); OperationHandle opHandle = operation.getHandle(); try { addOpHandle(opHandle); operation.run(); return opHandle; } catch (HiveSQLException e) { removeOpHandle(opHandle); operationManager.closeOperation(opHandle); throw e; } finally { release(true, true); } }
@Override public void close() throws HiveSQLException { try { acquire(true, false); cleanupSessionLogDir(); HiveHistory hiveHist = sessionState.getHiveHistory(); if (null != hiveHist) { LOG.warn("Error closing thread local Hive", t); release(true, false);
@Override public void closeExpiredOperations() { OperationHandle[] handles; synchronized (opHandleSet) { handles = opHandleSet.toArray(new OperationHandle[opHandleSet.size()]); } if (handles.length > 0) { List<Operation> operations = operationManager.removeExpiredOperations(handles); if (!operations.isEmpty()) { closeTimedOutOperations(operations); } } }
@Override public void cancelDelegationToken(HiveAuthFactory authFactory, String tokenStr) throws HiveSQLException { HiveAuthFactory.verifyProxyAccess(getUserName(), getUserFromToken(authFactory, tokenStr), getIpAddress(), getHiveConf()); authFactory.cancelDelegationToken(tokenStr); }
@Override public String getDelegationToken(HiveAuthFactory authFactory, String owner, String renewer) throws HiveSQLException { HiveAuthFactory.verifyProxyAccess(getUserName(), owner, getIpAddress(), getHiveConf()); return authFactory.getDelegationToken(owner, renewer, getIpAddress()); }
@Override public void cancelDelegationToken(HiveAuthFactory authFactory, String tokenStr) throws HiveSQLException { HiveAuthFactory.verifyProxyAccess(getUsername(), getUserFromToken(authFactory, tokenStr), getIpAddress(), getHiveConf()); authFactory.cancelDelegationToken(tokenStr); }
@Override public String getDelegationToken(HiveAuthFactory authFactory, String owner, String renewer) throws HiveSQLException { HiveAuthFactory.verifyProxyAccess(getUsername(), owner, getIpAddress(), getHiveConf()); return authFactory.getDelegationToken(owner, renewer); }
@Override public void close() throws HiveSQLException { try { acquire(true); cleanupSessionLogDir(); cleanupPipeoutFile(); HiveHistory hiveHist = sessionState.getHiveHistory(); if (null != hiveHist) { release(true);
@Override public OperationHandle executeStatement(String statement, Map<String, String> confOverlay, long queryTimeout) throws HiveSQLException { return executeStatementInternal(statement, confOverlay, false, queryTimeout); }
@Override /** * Opens a new HiveServer2 session for the client connection. * Creates a new SessionState object that will be associated with this HiveServer2 session. * When the server executes multiple queries in the same session, * this SessionState object is reused across multiple queries. * Note that if doAs is true, this call goes through a proxy object, * which wraps the method logic in a UserGroupInformation#doAs. * That's why it is important to create SessionState here rather than in the constructor. */ public void open(Map<String, String> sessionConfMap) throws HiveSQLException { sessionState = new SessionState(hiveConf, username); sessionState.setUserIpAddress(ipAddress); sessionState.setIsHiveServerQuery(true); SessionState.start(sessionState); try { sessionState.reloadAuxJars(); } catch (IOException e) { String msg = "Failed to load reloadable jar file path: " + e; LOG.error(msg, e); throw new HiveSQLException(msg, e); } // Process global init file: .hiverc processGlobalInitFile(); if (sessionConfMap != null) { configureSession(sessionConfMap); } lastAccessTime = System.currentTimeMillis(); lastIdleTime = lastAccessTime; }
private void closeTimedOutOperations(List<Operation> operations) { acquire(false, false); try { for (Operation operation : operations) { removeOpHandle(operation.getHandle()); try { operation.close(); } catch (Exception e) { LOG.warn("Exception is thrown closing timed-out operation, reported open_operations metrics may be incorrect " + operation.getHandle(), e); } } } finally { release(false, false); } }
sessionState.setIsHiveServerQuery(true); sessionState.setForwardedAddresses(SessionManager.getForwardedAddresses()); sessionState.setIsUsingThriftJDBCBinarySerDe(updateIsUsingThriftJDBCBinarySerDe()); try { if (sessionManager != null) { sessionHive = Hive.get(getHiveConf()); } catch (HiveException e) { throw new HiveSQLException("Failed to get metastore connection", e); processGlobalInitFile(); sessionConfMap = setFetchSize(sessionConfMap); configureSession(sessionConfMap);
/** * Close the file systems for the session and remove it from the FileSystem cache. * Cancel the session's delegation token and close the metastore connection */ @Override public void close() throws HiveSQLException { try { acquire(true, false); cancelDelegationToken(); } finally { release(true, false); try { super.close(); } finally { try { FileSystem.closeAllForUGI(sessionUgi); } catch (IOException ioe) { throw new HiveSQLException("Could not clean up file-system handles for UGI: " + sessionUgi, ioe); } } } }
sessionState.setIsHiveServerQuery(true); sessionState.setForwardedAddresses(SessionManager.getForwardedAddresses()); sessionState.setIsUsingThriftJDBCBinarySerDe(updateIsUsingThriftJDBCBinarySerDe()); try { if (sessionManager != null) { setSessionHive(); processGlobalInitFile(); sessionConfMap = setFetchSize(sessionConfMap); configureSession(sessionConfMap);
} else { if (sessionImplclassName == null) { session = new HiveSessionImpl(sessionHandle, protocol, username, password, hiveConf, ipAddress, forwardedAddresses); } else {
/** * Sets sessionHive object created based on sessionConf. * @throws HiveSQLException */ private void setSessionHive() throws HiveSQLException { Hive newSessionHive; try { newSessionHive = Hive.get(getHiveConf()); // HMS connections from sessionHive shouldn't be closed by any query execution thread when it // recreates the Hive object. It is allowed to be closed only when session is closed/released. newSessionHive.setAllowClose(false); } catch (HiveException e) { throw new HiveSQLException("Failed to get metastore connection", e); } // The previous sessionHive object might still be referred by any async query execution thread. // So, it shouldn't be closed here explicitly. Anyways, Hive object will auto-close HMS connection // when it is garbage collected. So, it is safe to just overwrite sessionHive here. sessionHive = newSessionHive; }
@Override protected synchronized void acquire(boolean userAccess) { super.acquire(userAccess); // if we have a metastore connection with impersonation, then set it first if (sessionHive != null) { Hive.set(sessionHive); } }
@Override public void renewDelegationToken(HiveAuthFactory authFactory, String tokenStr) throws HiveSQLException { HiveAuthFactory.verifyProxyAccess(getUsername(), getUserFromToken(authFactory, tokenStr), getIpAddress(), getHiveConf()); authFactory.renewDelegationToken(tokenStr); }