for (Map.Entry<String, String> e : req.getConfiguration().entrySet()) { if (sb == null) { sh = new SessionHandle(req.getSessionHandle()); sb = new StringBuilder("Client information for ").append(sh).append(": "); } else {
@Override public String toString() { return "SessionHandle [" + getHandleIdentifier() + "]"; } }
@Override public String getSessionHandle() { return hiveSession.getSessionHandle().toString(); } }
@Override public OperationHandle getTypeInfo(SessionHandle sessionHandle) throws HiveSQLException { try { TGetTypeInfoReq req = new TGetTypeInfoReq(sessionHandle.toTSessionHandle()); TGetTypeInfoResp resp = cliService.GetTypeInfo(req); checkStatus(resp.getStatus()); TProtocolVersion protocol = sessionHandle.getProtocolVersion(); return new OperationHandle(resp.getOperationHandle(), protocol); } catch (HiveSQLException e) { throw e; } catch (Exception e) { throw new HiveSQLException(e); } }
public HiveSessionImpl(SessionHandle sessionHandle, TProtocolVersion protocol, String username, String password, HiveConf serverConf, String ipAddress, final List<String> forwardedAddresses) { this.username = username; this.password = password; creationTime = System.currentTimeMillis(); this.sessionHandle = sessionHandle != null ? sessionHandle : new SessionHandle(protocol); this.sessionConf = new HiveConf(serverConf); this.ipAddress = ipAddress; this.forwardedAddresses = forwardedAddresses; this.operationLock = serverConf.getBoolVar( ConfVars.HIVE_SERVER2_PARALLEL_OPS_IN_SESSION) ? null : new Semaphore(1); try { // In non-impersonation mode, map scheduler queue to current user // if fair scheduler is configured. if (! sessionConf.getBoolVar(ConfVars.HIVE_SERVER2_ENABLE_DOAS) && sessionConf.getBoolVar(ConfVars.HIVE_SERVER2_MAP_FAIR_SCHEDULER_QUEUE)) { ShimLoader.getHadoopShims().refreshDefaultQueue(sessionConf, username); } } catch (IOException e) { LOG.warn("Error setting scheduler queue: " + e, e); } // Set an explicit session name to control the download directory name sessionConf.set(ConfVars.HIVESESSIONID.varname, this.sessionHandle.getHandleIdentifier().toString()); // Use thrift transportable formatter sessionConf.set(SerDeUtils.LIST_SINK_OUTPUT_FORMATTER, ThriftFormatter.class.getName()); sessionConf.setInt(SerDeUtils.LIST_SINK_OUTPUT_PROTOCOL, protocol.getValue()); }
@Override public void closeSession(SessionHandle sessionHandle) throws HiveSQLException { try { TCloseSessionReq req = new TCloseSessionReq(sessionHandle.toTSessionHandle()); TCloseSessionResp resp = cliService.CloseSession(req); checkStatus(resp.getStatus()); } catch (HiveSQLException e) { throw e; } catch (Exception e) { throw new HiveSQLException(e); } }
lensToHiveSession.put(sessionDbKey, hiveSession); log.info("New hive session for user: {} , lens session: {} , hive session handle: {} , driver : {}", ctx.getClusterUser(), sessionDbKey, hiveSession.getHandleIdentifier(), getFullyQualifiedName()); for (LensEventListener<DriverEvent> eventListener : driverListeners) { try { eventListener.onEvent(new DriverSessionStarted(System.currentTimeMillis(), this, lensSession, hiveSession .getSessionId().toString())); } catch (Exception exc) { log.error("Error sending driver {} start event to listener {}", getFullyQualifiedName(), eventListener,
@Override public TProtocolVersion getProtocolVersion() { return sessionHandle.getProtocolVersion(); }
@Override public void close() throws HiveSQLException { ClassLoader nonDBClassLoader = getSessionState().getConf().getClassLoader(); super.close(); // Release class loader resources JavaUtils.closeClassLoadersTo(nonDBClassLoader, getClass().getClassLoader()); synchronized (sessionDbClassLoaders) { for (Map.Entry<String, SessionClassLoader> entry : sessionDbClassLoaders.entrySet()) { try { // Closing session level classloaders up untill the db class loader if present, or null. // When db class loader is null, the class loader in the session is a single class loader // which stays as it is on database switch -- provided the new db doesn't have db jars. // The following line will close class loaders made on top of db class loaders and will close // only one classloader without closing the parents. In case of no db class loader, the session // classloader will already have been closed by either super.close() or before this for loop. JavaUtils.closeClassLoadersTo(entry.getValue(), getDbResService().getClassLoader(entry.getKey())); } catch (Exception e) { log.error("Error closing session classloader for session: {}", getSessionHandle().getSessionId(), e); } } sessionDbClassLoaders.clear(); } // reset classloader in close Thread.currentThread().setContextClassLoader(LensSessionImpl.class.getClassLoader()); }
@Override public OperationHandle getCatalogs(SessionHandle sessionHandle) throws HiveSQLException { try { TGetCatalogsReq req = new TGetCatalogsReq(sessionHandle.toTSessionHandle()); TGetCatalogsResp resp = cliService.GetCatalogs(req); checkStatus(resp.getStatus()); TProtocolVersion protocol = sessionHandle.getProtocolVersion(); return new OperationHandle(resp.getOperationHandle(), protocol); } catch (HiveSQLException e) { throw e; } catch (Exception e) { throw new HiveSQLException(e); } }
@Override public void cancelDelegationToken(SessionHandle sessionHandle, HiveAuthFactory authFactory, String tokenStr) throws HiveSQLException { TCancelDelegationTokenReq cancelReq = new TCancelDelegationTokenReq( sessionHandle.toTSessionHandle(), tokenStr); try { TCancelDelegationTokenResp cancelResp = cliService.CancelDelegationToken(cancelReq); checkStatus(cancelResp.getStatus()); return; } catch (TException e) { throw new HiveSQLException(e); } }
public HiveSessionImpl(TProtocolVersion protocol, String username, String password, HiveConf serverhiveConf, String ipAddress) { this.username = username; this.password = password; this.sessionHandle = new SessionHandle(protocol); this.hiveConf = new HiveConf(serverhiveConf); this.ipAddress = ipAddress; try { // In non-impersonation mode, map scheduler queue to current user // if fair scheduler is configured. if (! hiveConf.getBoolVar(ConfVars.HIVE_SERVER2_ENABLE_DOAS) && hiveConf.getBoolVar(ConfVars.HIVE_SERVER2_MAP_FAIR_SCHEDULER_QUEUE)) { ShimLoader.getHadoopShims().refreshDefaultQueue(hiveConf, username); } } catch (IOException e) { LOG.warn("Error setting scheduler queue: " + e, e); } // Set an explicit session name to control the download directory name hiveConf.set(ConfVars.HIVESESSIONID.varname, sessionHandle.getHandleIdentifier().toString()); // Use thrift transportable formatter hiveConf.set(ListSinkOperator.OUTPUT_FORMATTER, FetchFormatter.ThriftFormatter.class.getName()); hiveConf.setInt(ListSinkOperator.OUTPUT_PROTOCOL, protocol.getValue()); }
ctx.getClusterUser(), sessionDbKey, hiveSession.getHandleIdentifier(), getFullyQualifiedName()); for (LensEventListener<DriverEvent> eventListener : driverListeners) { try { eventListener.onEvent(new DriverSessionStarted(System.currentTimeMillis(), this, lensSession, hiveSession .getSessionId().toString())); } catch (Exception exc) { log.error("Error sending driver {} start event to listener {}", getFullyQualifiedName(), eventListener,
@Override public TProtocolVersion getProtocolVersion() { return sessionHandle.getProtocolVersion(); }
@Override public OperationHandle getTableTypes(SessionHandle sessionHandle) throws HiveSQLException { try { TGetTableTypesReq req = new TGetTableTypesReq(sessionHandle.toTSessionHandle()); TGetTableTypesResp resp = cliService.GetTableTypes(req); checkStatus(resp.getStatus()); TProtocolVersion protocol = sessionHandle.getProtocolVersion(); return new OperationHandle(resp.getOperationHandle(), protocol); } catch (HiveSQLException e) { throw e; } catch (Exception e) { throw new HiveSQLException(e); } }
@Override public TCloseSessionResp CloseSession(TCloseSessionReq req) throws TException { TCloseSessionResp resp = new TCloseSessionResp(); try { SessionHandle sessionHandle = new SessionHandle(req.getSessionHandle()); cliService.closeSession(sessionHandle); resp.setStatus(OK_STATUS); ThriftCLIServerContext context = (ThriftCLIServerContext)currentServerContext.get(); if (context != null) { context.setSessionHandle(null); } } catch (Exception e) { LOG.warn("Error closing session: ", e); resp.setStatus(HiveSQLException.toTStatus(e)); } return resp; }
public UUID getSessionId() { return getHandleIdentifier().getPublicId(); }
@Override public void renewDelegationToken(SessionHandle sessionHandle, HiveAuthFactory authFactory, String tokenStr) throws HiveSQLException { TRenewDelegationTokenReq cancelReq = new TRenewDelegationTokenReq( sessionHandle.toTSessionHandle(), tokenStr); try { TRenewDelegationTokenResp renewResp = cliService.RenewDelegationToken(cancelReq); checkStatus(renewResp.getStatus()); return; } catch (Exception e) { throw new HiveSQLException(e); } }
@Override public String getSessionHandle() { return hiveSession.getSessionHandle().toString(); } }
@Override public TProtocolVersion getProtocolVersion() { return sessionHandle.getProtocolVersion(); }