public void run() throws HiveSQLException { beforeRun(); try { Metrics metrics = MetricsFactory.getInstance(); if (metrics != null) { metrics.incrementCounter(MetricsConstant.OPEN_OPERATIONS); } runInternal(); } finally { afterRun(); } }
/** * Cancel the running operation unless it is already in a terminal state * @param opHandle operation handle * @param errMsg error message * @throws HiveSQLException */ public void cancelOperation(OperationHandle opHandle, String errMsg) throws HiveSQLException { Operation operation = getOperation(opHandle); OperationState opState = operation.getStatus().getState(); if (opState.isTerminal()) { // Cancel should be a no-op in either cases LOG.debug(opHandle + ": Operation is already aborted in state - " + opState); } else { LOG.debug(opHandle + ": Attempting to cancel from state - " + opState); OperationState operationState = OperationState.CANCELED; operationState.setErrorMessage(errMsg); operation.cancel(operationState); if (operation instanceof SQLOperation) { removeSafeQueryInfo(opHandle); } } }
private void closeTimedOutOperations(List<Operation> operations) { acquire(false, false); try { for (Operation operation : operations) { removeOpHandle(operation.getHandle()); try { operation.close(); } catch (Exception e) { LOG.warn("Exception is thrown closing timed-out operation, reported open_operations metrics may be incorrect " + operation.getHandle(), e); } } } finally { release(false, false); } }
private void addOperation(Operation operation) { LOG.info("Adding operation: {} {}", operation.getHandle(), operation.getParentSession().getSessionHandle()); queryIdOperation.put(getQueryId(operation), operation); handleToOperation.put(operation.getHandle(), operation); if (operation instanceof SQLOperation) { synchronized (webuiLock) { liveQueryInfos.put(operation.getHandle().getHandleIdentifier().toString(), ((SQLOperation) operation).getQueryInfo()); } } }
HiveConf conf = operation.getParentSession().getHiveConf(); if (operation.shouldRunAsync()) { long maxTimeout = HiveConf.getTimeVar(conf, HiveConf.ConfVars.HIVE_SERVER2_LONG_POLLING_TIMEOUT, TimeUnit.MILLISECONDS); final long elapsed = System.currentTimeMillis() - operation.getBeginTime(); operation.getBackgroundHandle().get(timeout, TimeUnit.MILLISECONDS); } catch (TimeoutException e) { OperationStatus opStatus = operation.getStatus(); LOG.debug(opHandle + ": getOperationStatus()"); long numModifiedRows = operation.getNumModifiedRows(); opStatus.setNumModifiedRows(numModifiedRows); opStatus.setJobProgressUpdate(progressUpdateLog(getProgressUpdate, operation, conf));
private String getQueryId(Operation operation) { return operation.getParentSession().getHiveConf().getVar(ConfVars.HIVEQUERYID); }
public RowSet getOperationLogRowSet(OperationHandle opHandle, FetchOrientation orientation, long maxRows, HiveConf hConf) throws HiveSQLException { TableSchema tableSchema = new TableSchema(getLogSchema()); RowSet rowSet = RowSetFactory.create(tableSchema, getOperation(opHandle).getProtocolVersion(), false); if (hConf.getBoolVar(ConfVars.HIVE_SERVER2_LOGGING_OPERATION_ENABLED) == false) { LOG.warn("Try to get operation log when hive.server2.logging.operation.enabled is false, no log will be returned. "); return rowSet; } // get the OperationLog object from the operation OperationLog operationLog = getOperation(opHandle).getOperationLog(); if (operationLog == null) { throw new HiveSQLException("Couldn't find log associated with operation handle: " + opHandle); } // read logs List<String> logs; try { logs = operationLog.readOperationLog(isFetchFirst(orientation), maxRows); } catch (SQLException e) { throw new HiveSQLException(e.getMessage(), e.getCause()); } // convert logs to RowSet for (String log : logs) { rowSet.addRow(new String[] { log }); } return rowSet; }
public RowSet getOperationNextRowSet(OperationHandle opHandle, FetchOrientation orientation, long maxRows) throws HiveSQLException { return getOperation(opHandle).getNextRowSet(orientation, maxRows); }
private Operation removeTimedOutOperation(OperationHandle operationHandle) { Operation operation = handleToOperation.get(operationHandle); if (operation != null && operation.isTimedOut(System.currentTimeMillis())) { LOG.info("Operation is timed out,operation=" + operation.getHandle() + ",state=" + operation.getState().toString()); Metrics metrics = MetricsFactory.getInstance(); if (metrics != null) { try { metrics.decrementCounter(MetricsConstant.OPEN_OPERATIONS); } catch (Exception e) { LOG.warn("Error decrementing open_operations metric, reported values may be incorrect", e); } } return removeOperation(operationHandle); } return null; }
/** * Invoked before runInternal(). * Set up some preconditions, or configurations. */ protected void beforeRun() { ShimLoader.getHadoopShims().setHadoopQueryContext(queryState.getQueryId()); createOperationLog(); LogUtils.registerLoggingContext(queryState.getConf()); }
public void closeOperation(OperationHandle opHandle) throws HiveSQLException { LOG.info("Closing operation: " + opHandle); Operation operation = removeOperation(opHandle); Metrics metrics = MetricsFactory.getInstance(); if (metrics != null) { try { metrics.decrementCounter(MetricsConstant.OPEN_OPERATIONS); } catch (Exception e) { LOG.warn("Error Reporting close operation to Metrics system", e); } } operation.close(); }
private synchronized void addOperation(Operation operation) { handleToOperation.put(operation.getHandle(), operation); }
private JobProgressUpdate progressUpdateLog(boolean isProgressLogRequested, Operation operation, HiveConf conf) { if (!isProgressLogRequested || !ServiceUtils.canProvideProgressLog(conf) || !OperationType.EXECUTE_STATEMENT.equals(operation.getType())) { return new JobProgressUpdate(ProgressMonitor.NULL); } SessionState sessionState = operation.getParentSession().getSessionState(); long startTime = System.nanoTime(); int timeOutMs = 8; boolean terminated = operation.isDone(); try { while ((sessionState.getProgressMonitor() == null) && !terminated) { long remainingMs = (PROGRESS_MAX_WAIT_NS - (System.nanoTime() - startTime)) / 1000000l; if (remainingMs <= 0) { LOG.debug("timed out and hence returning progress log as NULL"); return new JobProgressUpdate(ProgressMonitor.NULL); } terminated = operation.waitToTerminate(Math.min(remainingMs, timeOutMs)); timeOutMs <<= 1; } } catch (InterruptedException e) { LOG.warn("Error while getting progress update", e); } ProgressMonitor pm = sessionState.getProgressMonitor(); return new JobProgressUpdate(pm != null ? pm : ProgressMonitor.NULL); }
private JobProgressUpdate progressUpdateLog(boolean isProgressLogRequested, Operation operation, HiveConf conf) { if (!isProgressLogRequested || !ServiceUtils.canProvideProgressLog(conf) || !OperationType.EXECUTE_STATEMENT.equals(operation.getType())) { return new JobProgressUpdate(ProgressMonitor.NULL); } SessionState sessionState = operation.getParentSession().getSessionState(); long startTime = System.nanoTime(); int timeOutMs = 8; try { while (sessionState.getProgressMonitor() == null && !operation.isDone()) { long remainingMs = (PROGRESS_MAX_WAIT_NS - (System.nanoTime() - startTime)) / 1000000l; if (remainingMs <= 0) { LOG.debug("timed out and hence returning progress log as NULL"); return new JobProgressUpdate(ProgressMonitor.NULL); } Thread.sleep(Math.min(remainingMs, timeOutMs)); timeOutMs <<= 1; } } catch (InterruptedException e) { LOG.warn("Error while getting progress update", e); } ProgressMonitor pm = sessionState.getProgressMonitor(); return new JobProgressUpdate(pm != null ? pm : ProgressMonitor.NULL); }
if (operation.shouldRunAsync()) { HiveConf conf = operation.getParentSession().getHiveConf(); long timeout = HiveConf.getTimeVar(conf, HiveConf.ConfVars.HIVE_SERVER2_LONG_POLLING_TIMEOUT, TimeUnit.MILLISECONDS); try { operation.getBackgroundHandle().get(timeout, TimeUnit.MILLISECONDS); } catch (TimeoutException e) { OperationStatus opStatus = operation.getStatus(); LOG.debug(opHandle + ": getOperationStatus()"); return opStatus;
@Override public void cancelOperation(OperationHandle opHandle) throws HiveSQLException { sessionManager.getOperationManager().getOperation(opHandle) .getParentSession().cancelOperation(opHandle); LOG.debug(opHandle + ": cancelOperation()"); }
public RowSet getOperationLogRowSet(OperationHandle opHandle, FetchOrientation orientation, long maxRows) throws HiveSQLException { // get the OperationLog object from the operation OperationLog operationLog = getOperation(opHandle).getOperationLog(); if (operationLog == null) { throw new HiveSQLException("Couldn't find log associated with operation handle: " + opHandle); } // read logs List<String> logs; try { logs = operationLog.readOperationLog(isFetchFirst(orientation), maxRows); } catch (SQLException e) { throw new HiveSQLException(e.getMessage(), e.getCause()); } // convert logs to RowSet TableSchema tableSchema = new TableSchema(getLogSchema()); RowSet rowSet = RowSetFactory.create(tableSchema, getOperation(opHandle).getProtocolVersion()); for (String log : logs) { rowSet.addRow(new String[] {log}); } return rowSet; }
public RowSet getNextRowSet() throws HiveSQLException { return getNextRowSet(FetchOrientation.FETCH_NEXT, DEFAULT_FETCH_MAX_ROWS); }
private Operation removeTimedOutOperation(OperationHandle operationHandle) { Operation operation = handleToOperation.get(operationHandle); if (operation != null && operation.isTimedOut(System.currentTimeMillis())) { LOG.info("Operation is timed out,operation=" + operation.getHandle() + ",state=" + operation.getState().toString()); Metrics metrics = MetricsFactory.getInstance(); if (metrics != null) { try { metrics.decrementCounter(MetricsConstant.OPEN_OPERATIONS); } catch (Exception e) { LOG.warn("Error decrementing open_operations metric, reported values may be incorrect", e); } } return removeOperation(operationHandle); } return null; }
private boolean cancelOperation(Operation operation, boolean isAdmin, String errMsg) throws HiveSQLException { if (isAdmin || operation.getParentSession().getUserName().equals(SessionState.get() .getAuthenticator().getUserName())) { OperationHandle handle = operation.getHandle(); operationManager.cancelOperation(handle, errMsg); return true; } else { return false; } }