private boolean useBatchingSerializer(String serdeClassName) { return SessionState.get().isHiveServerQuery() && hasSetBatchSerializer(serdeClassName); }
public Map<Path, BaseWork> get(Configuration conf) { if (LlapProxy.isDaemon() || (SessionState.get() != null && SessionState.get().isHiveServerQuery()) || HiveConf.getVar(conf, ConfVars.HIVE_EXECUTION_ENGINE).equals("spark")) { if (threadLocalWorkMap == null) { threadLocalWorkMap = new ThreadLocal<Map<Path, BaseWork>>() { @Override protected Map<Path, BaseWork> initialValue() { return new HashMap<Path, BaseWork>(); } }; } return threadLocalWorkMap.get(); } if (gWorkMap == null) { gWorkMap = new HashMap<Path, BaseWork>(); } return gWorkMap; }
private RenderStrategy.UpdateFunction updateFunction() { return inPlaceUpdate && !SessionState.get().isHiveServerQuery() ? new RenderStrategy.InPlaceUpdateFunction( this) : new RenderStrategy.LogToFileFunction(this); } }
public Map<Path, BaseWork> get(Configuration conf) { if (LlapProxy.isDaemon() || (SessionState.get() != null && SessionState.get().isHiveServerQuery()) || HiveConf.getVar(conf, ConfVars.HIVE_EXECUTION_ENGINE).equals("spark")) { if (threadLocalWorkMap == null) { threadLocalWorkMap = new ThreadLocal<Map<Path, BaseWork>>() { @Override protected Map<Path, BaseWork> initialValue() { return new HashMap<Path, BaseWork>(); } }; } return threadLocalWorkMap.get(); } if (gWorkMap == null) { gWorkMap = new HashMap<Path, BaseWork>(); } return gWorkMap; }
private boolean isResultsCacheEnabled() { return conf.getBoolVar(HiveConf.ConfVars.HIVE_QUERY_RESULTS_CACHE_ENABLED) && !(SessionState.get().isHiveServerQuery() && conf.getBoolVar(HiveConf.ConfVars.HIVE_SERVER2_ENABLE_DOAS)); }
private static Token<LlapTokenIdentifier> getLlapToken( String user, final Configuration conf) throws IOException { // TODO: parts of this should be moved out of TezSession to reuse the clients, but there's // no good place for that right now (HIVE-13698). // TODO: De-link from SessionState. A TezSession can be linked to different Hive Sessions via the pool. SessionState session = SessionState.get(); boolean isInHs2 = session != null && session.isHiveServerQuery(); Token<LlapTokenIdentifier> token = null; // For Tez, we don't use appId to distinguish the tokens. LlapCoordinator coordinator = null; if (isInHs2) { // We are in HS2, get the token locally. // TODO: coordinator should be passed in; HIVE-13698. Must be initialized for now. coordinator = LlapCoordinator.getInstance(); if (coordinator == null) { throw new IOException("LLAP coordinator not initialized; cannot get LLAP tokens"); } // Signing is not required for Tez. token = coordinator.getLocalTokenClient(conf, user).createToken(null, null, false); } else { // We are not in HS2; always create a new client for now. token = new LlapTokenClient(conf).getDelegationToken(null); } if (LOG.isInfoEnabled()) { LOG.info("Obtained a LLAP token: " + token); } return token; }
private String getHS2Host() throws SemanticException { if (SessionState.get().isHiveServerQuery()) { return SessionState.get().getHiveServer2Host(); } if (conf.getBoolVar(ConfVars.HIVE_TEST_AUTHORIZATION_SQLSTD_HS2_MODE)) { // dummy value for use in tests return "dummyHostnameForTest"; } throw new SemanticException("Kill query is only supported in HiveServer2 (not hive cli)"); }
private static Token<LlapTokenIdentifier> getLlapToken( String user, final Configuration conf) throws IOException { // TODO: parts of this should be moved out of TezSession to reuse the clients, but there's // no good place for that right now (HIVE-13698). SessionState session = SessionState.get(); boolean isInHs2 = session != null && session.isHiveServerQuery(); Token<LlapTokenIdentifier> token = null; // For Tez, we don't use appId to distinguish the tokens. LlapCoordinator coordinator = null; if (isInHs2) { // We are in HS2, get the token locally. // TODO: coordinator should be passed in; HIVE-13698. Must be initialized for now. coordinator = LlapCoordinator.getInstance(); if (coordinator == null) { throw new IOException("LLAP coordinator not initialized; cannot get LLAP tokens"); } // Signing is not required for Tez. token = coordinator.getLocalTokenClient(conf, user).createToken(null, null, false); } else { // We are not in HS2; always create a new client for now. token = new LlapTokenClient(conf).getDelegationToken(null); } if (LOG.isInfoEnabled()) { LOG.info("Obtained a LLAP token: " + token); } return token; }
private CommandProcessorResponse llapCacheCommandHandler(final SessionState ss, final String[] params) throws ParseException { CommandLine args = parseCommandArgs(CACHE_OPTIONS, params); boolean purge = args.hasOption("purge"); String hs2Host = null; if (ss.isHiveServerQuery()) { hs2Host = ss.getHiveServer2Host(); } if (purge) { List<String> fullCommand = Lists.newArrayList("llap", "cache"); fullCommand.addAll(Arrays.asList(params)); CommandProcessorResponse authErrResp = CommandUtil.authorizeCommandAndServiceObject(ss, HiveOperationType.LLAP_CACHE_PURGE, fullCommand, hs2Host); if (authErrResp != null) { // there was an authorization issue return authErrResp; } try { LlapRegistryService llapRegistryService = LlapRegistryService.getClient(ss.getConf()); llapCachePurge(ss, llapRegistryService); return createProcessorSuccessResponse(); } catch (Exception e) { LOG.error("Error while purging LLAP IO Cache. err: ", e); return returnErrorResponse("Error while purging LLAP IO Cache. err: " + e.getMessage()); } } else { String usage = getUsageAsString(); return returnErrorResponse("Unsupported sub-command option. " + usage); } }
private RenderStrategy.UpdateFunction updateFunction() { return InPlaceUpdate.canRenderInPlace(hiveConf) && !SessionState.getConsole().getIsSilent() && !SessionState.get().isHiveServerQuery() ? new RenderStrategy.InPlaceUpdateFunction(this) : new RenderStrategy.LogToFileFunction(this); }
private RenderStrategy.UpdateFunction updateFunction() { return InPlaceUpdate.canRenderInPlace(hiveConf) && !SessionState.getConsole().getIsSilent() && !SessionState.get().isHiveServerQuery() ? new RenderStrategy.InPlaceUpdateFunction(this) : new RenderStrategy.LogToFileFunction(this); }
out.write(mdt.renderTable(!SessionState.get().isHiveServerQuery()).getBytes("UTF-8")); out.write(terminator); } catch (IOException e) {
public int showColumns(Hive db, ShowColumnsDesc showCols) throws HiveException { Table table = db.getTable(showCols.getTableName()); // write the results in the file DataOutputStream outStream = getOutputStream(showCols.getResFile());; try { List<FieldSchema> cols = table.getCols(); cols.addAll(table.getPartCols()); // In case the query is served by HiveServer2, don't pad it with spaces, // as HiveServer2 output is consumed by JDBC/ODBC clients. boolean isOutputPadded = !SessionState.get().isHiveServerQuery(); outStream.writeBytes(MetaDataFormatUtils.getAllColumnsInformation( cols, false, isOutputPadded, null)); } catch (IOException e) { throw new HiveException(e, ErrorMsg.GENERIC_ERROR); } finally { IOUtils.closeStream(outStream); } return 0; }
CommandLine args = parseCommandArgs(CLUSTER_OPTIONS, params); String hs2Host = null; if (ss.isHiveServerQuery()) { hs2Host = ss.getHiveServer2Host();
LOG.error(function.getClassName() + " is not a valid UDF class and was not registered."); if (SessionState.get().isHiveServerQuery()) { SessionState.getRegistryForWrite().addToUDFLoaders(loader);
LOG.error(function.getClassName() + " is not a valid UDF class and was not registered."); if (SessionState.get().isHiveServerQuery()) { SessionState.getRegistryForWrite().addToUDFLoaders(loader);
authzContextBuilder.setClientType(isHiveServerQuery() ? CLIENT_TYPE.HIVESERVER2 : CLIENT_TYPE.HIVECLI); authzContextBuilder.setSessionString(getSessionId());
authzContextBuilder.setClientType(isHiveServerQuery() ? CLIENT_TYPE.HIVESERVER2 : CLIENT_TYPE.HIVECLI); authzContextBuilder.setSessionString(getSessionId());
if (SessionState.get().isHiveServerQuery() && conf.getBoolVar(HiveConf.ConfVars.HIVE_SERVER2_THRIFT_RESULTSET_SERIALIZE_IN_TASKS)) { resFileFormat = "SequenceFile"; resultTab =
boolean isOutputPadded = !SessionState.get().isHiveServerQuery(); TextMetaDataTable tmd = new TextMetaDataTable(); for (FieldSchema fieldSchema : cols) {