public String getQueryId() { return queryState.getQueryId(); } }
@Override public void run() { try { String queryId = queryState.getQueryId(); LOG.info("Query timed out after: " + queryTimeout + " seconds. Cancelling the execution now: " + queryId); SQLOperation.this.cancel(OperationState.TIMEDOUT); } catch (HiveSQLException e) { LOG.error("Error cancelling the query after timeout: " + queryTimeout + " seconds", e); } finally { // Stop timeoutExecutor.shutdown(); } } };
@Override public void cancel(OperationState stateAfterCancel) throws HiveSQLException { String queryId = null; if (stateAfterCancel == OperationState.CANCELED) { queryId = queryState.getQueryId(); LOG.info("Cancelling the query execution: " + queryId); } cleanup(stateAfterCancel); cleanupOperationLog(operationLogCleanupDelayMs); if (stateAfterCancel == OperationState.CANCELED) { LOG.info("Successfully cancelled the query: " + queryId); } }
public static BaseSemanticAnalyzer get(QueryState queryState, ASTNode tree) throws SemanticException { BaseSemanticAnalyzer sem = getInternal(queryState, tree); if(queryState.getHiveOperation() == null) { String query = queryState.getQueryString(); if(query != null && query.length() > 30) { query = query.substring(0, 30); } String msg = "Unknown HiveOperation for query='" + query + "' queryId=" + queryState.getQueryId(); //throw new IllegalStateException(msg); LOG.debug(msg); } return sem; }
public void lockAndRespond() throws CommandProcessorResponse { // Assumes the query has already been compiled if (plan == null) { throw new IllegalStateException( "No previously compiled query for driver - queryId=" + queryState.getQueryId()); } if (requiresLock()) { try { acquireLocks(); } catch (CommandProcessorResponse cpr) { rollback(cpr); throw cpr; } } }
protected void createOperationLog() { if (parentSession.isOperationLogEnabled()) { File operationLogFile = new File(parentSession.getOperationLogSessionDir(), queryState.getQueryId()); isOperationLogEnabled = true; // create OperationLog object with above log file operationLog = new OperationLog(opHandle.toString(), operationLogFile, parentSession.getHiveConf()); } }
protected synchronized void cleanupOperationLog(final long operationLogCleanupDelayMs) { // stop the appenders for the operation log String queryId = queryState.getQueryId(); LogUtils.stopQueryAppender(LogDivertAppender.QUERY_ROUTING_APPENDER, queryId); LogUtils.stopQueryAppender(LogDivertAppenderForTest.TEST_QUERY_ROUTING_APPENDER, queryId); if (isOperationLogEnabled) { if (opHandle == null) { LOG.warn("Operation seems to be in invalid state, opHandle is null"); return; } if (operationLog == null) { LOG.warn("Operation [ " + opHandle.getHandleIdentifier() + " ] " + "logging is enabled, " + "but its OperationLog object cannot be found. " + "Perhaps the operation has already terminated."); } else { if (operationLogCleanupDelayMs > 0) { scheduledExecutorService.schedule(new OperationLogCleaner(operationLog), operationLogCleanupDelayMs, TimeUnit.MILLISECONDS); } else { LOG.info("Closing operation log {} without delay", operationLog); operationLog.close(); } } } }
private void addToHistory(Keys key, String value) { if (SessionState.get() != null) { SessionState.get().getHiveHistory().setQueryProperty(queryState.getQueryId(), key, value); } }
/** * Invoked before runInternal(). * Set up some preconditions, or configurations. */ protected void beforeRun() { ShimLoader.getHadoopShims().setHadoopQueryContext(queryState.getQueryId()); createOperationLog(); LogUtils.registerLoggingContext(queryState.getConf()); }
if (backgroundHandle != null) { boolean success = backgroundHandle.cancel(true); String queryId = queryState.getQueryId(); if (success) { LOG.info("The running operation has been successfully interrupted: " + queryId);
final String queryId = queryState.getQueryId();
private void addToHistory(SparkJobRef jobRef) { console.printInfo("Starting Spark Job = " + jobRef.getJobId()); if (SessionState.get() != null) { SessionState.get().getHiveHistory() .setQueryProperty(queryState.getQueryId(), Keys.SPARK_JOB_ID, jobRef.getJobId()); } }
final String queryId = queryState.getQueryId();
throw toSQLException("Error while compiling statement", response); if (queryState.getQueryTag() != null && queryState.getQueryId() != null) { parentSession.updateQueryTag(queryState.getQueryId(), queryState.getQueryTag());
ShimLoader.getHadoopShims().setHadoopQueryContext(qp.getQueryState().getQueryId()); ret = qp.run(cmd).getResponseCode();
@Override public Object run() throws HiveSQLException { assert (!parentHive.allowClose()); Hive.set(parentHive); // TODO: can this result in cross-thread reuse of session state? SessionState.setCurrentSessionState(parentSessionState); PerfLogger.setPerfLogger(SessionState.getPerfLogger()); LogUtils.registerLoggingContext(queryState.getConf()); ShimLoader.getHadoopShims().setHadoopQueryContext(queryState.getQueryId()); try { if (asyncPrepare) { prepare(queryState); } runQuery(); } catch (HiveSQLException e) { // TODO: why do we invent our own error path op top of the one from Future.get? setOperationException(e); LOG.error("Error running hive query: ", e); } finally { LogUtils.unregisterLoggingContext(); // If new hive object is created by the child thread, then we need to close it as it might // have created a hms connection. Call Hive.closeCurrent() that closes the HMS connection, causes // HMS connection leaks otherwise. Hive.closeCurrent(); } return null; } };
Task<?> task; if (conf.getVar(ConfVars.HIVE_EXECUTION_ENGINE).equals("tez")) { TezWork tezWork = new TezWork(queryState.getQueryId(), conf); mergeWork.setName("File Merge"); tezWork.add(mergeWork);
final String queryId = Strings.isNullOrEmpty(queryState.getQueryId()) ? QueryPlan.makeQueryId() : queryState.getQueryId();
@Override public void cancel(OperationState stateAfterCancel) throws HiveSQLException { String queryId = null; if (stateAfterCancel == OperationState.CANCELED) { queryId = queryState.getQueryId(); LOG.info("Cancelling the query execution: " + queryId); } cleanup(stateAfterCancel); cleanupOperationLog(operationLogCleanupDelayMs); if (stateAfterCancel == OperationState.CANCELED) { LOG.info("Successfully cancelled the query: " + queryId); } }
protected void createOperationLog() { if (parentSession.isOperationLogEnabled()) { File operationLogFile = new File(parentSession.getOperationLogSessionDir(), queryState.getQueryId()); isOperationLogEnabled = true; // create OperationLog object with above log file operationLog = new OperationLog(opHandle.toString(), operationLogFile, parentSession.getHiveConf()); } }