void displayBrokenPipeInfo() { if (LOG.isInfoEnabled()) { LOG.info("The script did not consume all input data. This is considered as an error."); LOG.info("set " + HiveConf.ConfVars.ALLOWPARTIALCONSUMP.toString() + "=true; to ignore it."); } return; }
/** * Checks whether a given configuration name is blacklisted and should not be converted * to an environment variable. */ boolean blackListed(Configuration conf, String name) { if (blackListedConfEntries == null) { blackListedConfEntries = new HashSet<String>(); if (conf != null) { String bl = conf.get(HiveConf.ConfVars.HIVESCRIPT_ENV_BLACKLIST.toString(), HiveConf.ConfVars.HIVESCRIPT_ENV_BLACKLIST.getDefaultValue()); if (bl != null && !bl.isEmpty()) { String[] bls = bl.split(","); Collections.addAll(blackListedConfEntries, bls); } } } return blackListedConfEntries.contains(name); }
void displayBrokenPipeInfo() { if (isLogInfoEnabled) { LOG.info("The script did not consume all input data. This is considered as an error."); LOG.info("set " + HiveConf.ConfVars.ALLOWPARTIALCONSUMP.toString() + "=true; to ignore it."); } return; }
String defaultVal = getConfVar(varname).getDefaultValue(); nonErrorMessage = SetProcessor.setConf(varname, varname, defaultVal, true); if (varname.equals(HiveConf.ConfVars.HIVE_SESSION_HISTORY_ENABLED.toString())) { SessionState.get().updateHistory(Boolean.parseBoolean(defaultVal), ss);
hive_l4j = LogUtils.class.getClassLoader().getResource(HIVE_L4J); System.setProperty(HiveConf.ConfVars.HIVEQUERYID.toString(), HiveConf.getVar(conf, HiveConf.ConfVars.HIVEQUERYID)); break;
queryId = "unknown-" + System.currentTimeMillis(); System.setProperty(HiveConf.ConfVars.HIVEQUERYID.toString(), queryId);
Preconditions.checkNotNull(hosts, ConfVars.LLAP_DAEMON_SERVICE_HOSTS.toString() + " must be defined"); LlapRegistryService registry; if (hosts.startsWith("@")) {
String defaultVal = getConfVar(varname).getDefaultValue(); nonErrorMessage = SetProcessor.setConf(varname, varname, defaultVal, true); if (varname.equals(HiveConf.ConfVars.HIVE_SESSION_HISTORY_ENABLED.toString())) { SessionState.get().updateHistory(Boolean.parseBoolean(defaultVal), ss);
@Test public void testAvailableCommands() throws Exception { enableTestOnlyCmd(conf); SessionState.start(conf); for (HiveCommand command : HiveCommand.values()) { String cmd[] = command.name().toLowerCase().split("_"); Assert.assertNotNull("Cmd " + cmd[0] + " not return null", CommandProcessorFactory .getForHiveCommandInternal(cmd, conf, command.isOnlyForTesting())); Assert.assertNotNull("Cmd " + cmd + " not return null", CommandProcessorFactory.getForHiveCommandInternal( cmd, conf, command.isOnlyForTesting())); } conf.set(HiveConf.ConfVars.HIVE_SECURITY_COMMAND_WHITELIST.toString(), ""); for (HiveCommand command : HiveCommand.values()) { String cmd[] = command.name().toLowerCase().split("_"); try { CommandProcessorFactory .getForHiveCommandInternal(cmd, conf, command.isOnlyForTesting()); Assert.fail("Expected SQLException for " + cmd[0] + " as available commands is empty"); } catch (SQLException e) { Assert.assertEquals("Insufficient privileges to execute " + cmd[0], e.getMessage()); Assert.assertEquals("42000", e.getSQLState()); } } }
/** * addJobConfToEnvironment is mostly shamelessly copied from hadoop streaming. Added additional * check on environment variable length */ void addJobConfToEnvironment(Configuration conf, Map<String, String> env) { Iterator<Map.Entry<String, String>> it = conf.iterator(); while (it.hasNext()) { Map.Entry<String, String> en = it.next(); String name = en.getKey(); if (!blackListed(conf, name)) { // String value = (String)en.getValue(); // does not apply variable // expansion String value = conf.get(name); // does variable expansion name = safeEnvVarName(name); boolean truncate = conf .getBoolean(HiveConf.ConfVars.HIVESCRIPTTRUNCATEENV.toString(), false); value = safeEnvVarValue(value, name, truncate); env.put(name, value); } } }
@Override public ErrorAndSolution getErrorAndSolution() { ErrorAndSolution es = null; if(getQueryMatches() && configMatches) { List<String> matchingLines = getRegexToLogLines().get(OUT_OF_MEMORY_REGEX); if (matchingLines.size() > 0) { String confName = HiveConf.ConfVars.HIVEMAPAGGRHASHMEMORY.toString(); float confValue = HiveConf.getFloatVar(getConf(), HiveConf.ConfVars.HIVEMAPAGGRHASHMEMORY); es = new ErrorAndSolution( "Out of memory due to hash maps used in map-side aggregation.", "Currently " + confName + " is set to " + confValue + ". " + "Try setting it to a lower value. i.e " + "'set " + confName + " = " + confValue/2 + ";'"); } } reset(); return es; } }
/** * Checks whether a given configuration name is blacklisted and should not be converted * to an environment variable. */ boolean blackListed(Configuration conf, String name) { if (blackListedConfEntries == null) { blackListedConfEntries = new HashSet<String>(); if (conf != null) { String bl = conf.get(HiveConf.ConfVars.HIVESCRIPT_ENV_BLACKLIST.toString(), HiveConf.ConfVars.HIVESCRIPT_ENV_BLACKLIST.getDefaultValue()); if (bl != null && !bl.isEmpty()) { String[] bls = bl.split(","); Collections.addAll(blackListedConfEntries, bls); } } } return blackListedConfEntries.contains(name); }
/** * addJobConfToEnvironment is mostly shamelessly copied from hadoop streaming. Added additional * check on environment variable length */ void addJobConfToEnvironment(Configuration conf, Map<String, String> env) { Iterator<Map.Entry<String, String>> it = conf.iterator(); while (it.hasNext()) { Map.Entry<String, String> en = it.next(); String name = en.getKey(); if (!blackListed(conf, name)) { // String value = (String)en.getValue(); // does not apply variable // expansion String value = conf.get(name); // does variable expansion name = safeEnvVarName(name); boolean truncate = conf .getBoolean(HiveConf.ConfVars.HIVESCRIPTTRUNCATEENV.toString(), false); value = safeEnvVarValue(value, name, truncate); env.put(name, value); } } }
@Test public void testMapAggrMemErrorHeuristic() throws Exception { JobConf jobConf = new JobConf(); HiveConf.setQueryString(jobConf, "select * from foo group by moo;"); final TaskLogProcessor taskLogProcessor = new TaskLogProcessor(jobConf); Throwable oome = new OutOfMemoryError("java heap space"); File log1File = writeTestLog("1", toString(oome)); taskLogProcessor.addTaskAttemptLogUrl(log1File.toURI().toURL().toString()); List<ErrorAndSolution> errList = taskLogProcessor.getErrors(); assertEquals(1, errList.size()); final ErrorAndSolution eas = errList.get(0); String error = eas.getError(); assertNotNull(error); // check that the error code is present in the error description: assertTrue(error.contains("memory")); String solution = eas.getSolution(); assertNotNull(solution); assertTrue(solution.length() > 0); String confName = HiveConf.ConfVars.HIVEMAPAGGRHASHMEMORY.toString(); assertTrue(solution.contains(confName)); }
@Override public ErrorAndSolution getErrorAndSolution() { ErrorAndSolution es = null; if(getQueryMatches() && configMatches) { List<String> matchingLines = getRegexToLogLines().get(OUT_OF_MEMORY_REGEX); if (matchingLines.size() > 0) { String confName = HiveConf.ConfVars.HIVEMAPAGGRHASHMEMORY.toString(); float confValue = HiveConf.getFloatVar(getConf(), HiveConf.ConfVars.HIVEMAPAGGRHASHMEMORY); es = new ErrorAndSolution( "Out of memory due to hash maps used in map-side aggregation.", "Currently " + confName + " is set to " + confValue + ". " + "Try setting it to a lower value. i.e " + "'set " + confName + " = " + confValue/2 + ";'"); } } reset(); return es; } }
private static long getHeartbeatInterval(Configuration conf) throws LockException { // Retrieve HIVE_TXN_TIMEOUT in MILLISECONDS (it's defined as SECONDS), // then divide it by 2 to give us a safety factor. long interval = HiveConf.getTimeVar(conf, HiveConf.ConfVars.HIVE_TXN_TIMEOUT, TimeUnit.MILLISECONDS) / 2; if (interval == 0) { throw new LockException(HiveConf.ConfVars.HIVE_TXN_MANAGER.toString() + " not set," + " heartbeats won't be sent"); } return interval; }
@Override public void configureJobConf(TableDesc tableDesc, JobConf jobConf) { if (UserGroupInformation.isSecurityEnabled()) { // AM can not do Kerberos Auth so will do the input split generation in the HS2 LOG.debug("Setting {} to {} to enable split generation on HS2", HiveConf.ConfVars.HIVE_AM_SPLIT_GENERATION.toString(), Boolean.FALSE.toString()); jobConf.set(HiveConf.ConfVars.HIVE_AM_SPLIT_GENERATION.toString(), Boolean.FALSE.toString()); } try { DruidStorageHandlerUtils.addDependencyJars(jobConf, DruidRecordWriter.class); } catch (IOException e) { Throwables.propagate(e); } }
private void enableTestOnlyCmd(HiveConf conf){ StringBuilder securityCMDs = new StringBuilder(conf.getVar(HiveConf.ConfVars.HIVE_SECURITY_COMMAND_WHITELIST)); for(String c : testOnlyCommands){ securityCMDs.append(","); securityCMDs.append(c); } conf.set(HiveConf.ConfVars.HIVE_SECURITY_COMMAND_WHITELIST.toString(), securityCMDs.toString()); } }
private void validateFileMetadata() throws IOException { if (fileMetadata.getCompressionKind() == CompressionKind.NONE) return; int bufferSize = fileMetadata.getCompressionBufferSize(); long minAllocSize = HiveConf.getSizeVar(daemonConf, ConfVars.LLAP_ALLOCATOR_MIN_ALLOC); if (bufferSize < minAllocSize) { LOG.warn("ORC compression buffer size (" + bufferSize + ") is smaller than LLAP low-level " + "cache minimum allocation size (" + minAllocSize + "). Decrease the value for " + HiveConf.ConfVars.LLAP_ALLOCATOR_MIN_ALLOC.toString() + " to avoid wasting memory"); } }
public static long getHeartbeatInterval(Configuration conf) throws LockException { // Retrieve HIVE_TXN_TIMEOUT in MILLISECONDS (it's defined as SECONDS), // then divide it by 2 to give us a safety factor. long interval = HiveConf.getTimeVar(conf, HiveConf.ConfVars.HIVE_TXN_TIMEOUT, TimeUnit.MILLISECONDS) / 2; if (interval == 0) { throw new LockException(HiveConf.ConfVars.HIVE_TXN_TIMEOUT.toString() + " not set," + " heartbeats won't be sent"); } return interval; }