@PluginFactory public static NameFilter createFilter( @PluginAttribute("loggingLevel") final String loggingLevel) { // Name required for routing. Error out if it is not set. Preconditions.checkNotNull(loggingLevel, "loggingLevel must be specified for " + NameFilter.class.getName()); return new NameFilter(OperationLog.getLoggingLevel(loggingLevel)); } }
private void logTypeWarning(String colName, String colType) { String warning = "Only primitive type arguments are accepted but " + colType + " is passed for " + colName + "."; warning = "WARNING: " + warning; console.printInfo(warning); // Propagate warning to beeline via operation log. OperationLog ol = OperationLog.getCurrentOperationLog(); if (ol != null) { ol.writeOperationLog(LoggingLevel.EXECUTION, warning + "\n"); } }
@Override public void run() { runner = Thread.currentThread(); try { OperationLog.setCurrentOperationLog(operationLog); SessionState.start(ss); runSequential(); } finally { try { // Call Hive.closeCurrent() that closes the HMS connection, causes // HMS connection leaks otherwise. Hive.closeCurrent(); } catch (Exception e) { LOG.warn("Exception closing Metastore connection:" + e.getMessage()); } runner = null; result.setRunning(false); } }
OperationLog operationLog = OperationLog.getCurrentOperationLog(); if (operationLog != null) { outPrinter = new StreamPrinter(executor.getInputStream(), null, System.out, operationLog.getPrintStream()); errPrinter = new StreamPrinter(executor.getErrorStream(), null, errPrintStream, operationLog.getPrintStream()); } else { outPrinter = new StreamPrinter(executor.getInputStream(), null, System.out);
operationLog = new OperationLog(opHandle.toString(), operationLogFile, parentSession.getHiveConf()); } catch (FileNotFoundException e) { LOG.warn("Unable to instantiate OperationLog object for operation: " + OperationLog.setCurrentOperationLog(operationLog);
LOG.info("Starting task [" + tsk + "] in parallel"); tskRun.setOperationLog(OperationLog.getCurrentOperationLog()); tskRun.start(); } else {
@Override public void run() { if (operationLog != null) { LOG.info("Closing operation log {}", operationLog); operationLog.close(); } } }
public RowSet getOperationLogRowSet(OperationHandle opHandle, FetchOrientation orientation, long maxRows, HiveConf hConf) throws HiveSQLException { TableSchema tableSchema = new TableSchema(getLogSchema()); RowSet rowSet = RowSetFactory.create(tableSchema, getOperation(opHandle).getProtocolVersion(), false); if (hConf.getBoolVar(ConfVars.HIVE_SERVER2_LOGGING_OPERATION_ENABLED) == false) { LOG.warn("Try to get operation log when hive.server2.logging.operation.enabled is false, no log will be returned. "); return rowSet; } // get the OperationLog object from the operation OperationLog operationLog = getOperation(opHandle).getOperationLog(); if (operationLog == null) { throw new HiveSQLException("Couldn't find log associated with operation handle: " + opHandle); } // read logs List<String> logs; try { logs = operationLog.readOperationLog(isFetchFirst(orientation), maxRows); } catch (SQLException e) { throw new HiveSQLException(e.getMessage(), e.getCause()); } // convert logs to RowSet for (String log : logs) { rowSet.addRow(new String[] { log }); } return rowSet; }
@Override public int decide(LoggingEvent ev) { OperationLog log = operationManager.getOperationLogByThread(); boolean excludeMatches = (loggingMode == OperationLog.LoggingLevel.VERBOSE); if (log == null) { return Filter.DENY; } OperationLog.LoggingLevel currentLoggingMode = log.getOpLoggingLevel(); // If logging is disabled, deny everything. if (currentLoggingMode == OperationLog.LoggingLevel.NONE) { return Filter.DENY; } // Look at the current session's setting // and set the pattern and excludeMatches accordingly. if (currentLoggingMode != loggingMode) { loggingMode = currentLoggingMode; setCurrentNamePattern(loggingMode); } boolean isMatch = namePattern.matcher(ev.getLoggerName()).matches(); if (excludeMatches == isMatch) { // Deny if this is black-list filter (excludeMatches = true) and it // matched // or if this is whitelist filter and it didn't match return Filter.DENY; } return Filter.NEUTRAL; } }
/** * Overrides WriterAppender.subAppend(), which does the real logging. No need * to worry about concurrency since log4j calls this synchronously. */ @Override protected void subAppend(LoggingEvent event) { super.subAppend(event); // That should've gone into our writer. Notify the LogContext. String logOutput = writer.toString(); writer.reset(); OperationLog log = operationManager.getOperationLogByThread(); if (log == null) { LOG.debug(" ---+++=== Dropped log event from thread " + event.getThreadName()); return; } log.writeOperationLog(logOutput); } }
protected void unregisterOperationLog() { if (isOperationLogEnabled) { OperationLog.removeCurrentOperationLog(); } }
protected void createOperationLog() { if (parentSession.isOperationLogEnabled()) { File operationLogFile = new File(parentSession.getOperationLogSessionDir(), queryState.getQueryId()); isOperationLogEnabled = true; // create OperationLog object with above log file operationLog = new OperationLog(opHandle.toString(), operationLogFile, parentSession.getHiveConf()); } }
operationLog = new OperationLog(opHandle.toString(), operationLogFile, parentSession.getHiveConf()); } catch (FileNotFoundException e) { LOG.warn("Unable to instantiate OperationLog object for operation: " + OperationLog.setCurrentOperationLog(operationLog);
public OperationLog getOperationLogByThread() { return OperationLog.getCurrentOperationLog(); }
protected synchronized void cleanupOperationLog(final long operationLogCleanupDelayMs) { // stop the appenders for the operation log String queryId = queryState.getQueryId(); LogUtils.stopQueryAppender(LogDivertAppender.QUERY_ROUTING_APPENDER, queryId); LogUtils.stopQueryAppender(LogDivertAppenderForTest.TEST_QUERY_ROUTING_APPENDER, queryId); if (isOperationLogEnabled) { if (opHandle == null) { LOG.warn("Operation seems to be in invalid state, opHandle is null"); return; } if (operationLog == null) { LOG.warn("Operation [ " + opHandle.getHandleIdentifier() + " ] " + "logging is enabled, " + "but its OperationLog object cannot be found. " + "Perhaps the operation has already terminated."); } else { if (operationLogCleanupDelayMs > 0) { scheduledExecutorService.schedule(new OperationLogCleaner(operationLog), operationLogCleanupDelayMs, TimeUnit.MILLISECONDS); } else { LOG.info("Closing operation log {} without delay", operationLog); operationLog.close(); } } } }
public RowSet getOperationLogRowSet(OperationHandle opHandle, FetchOrientation orientation, long maxRows, HiveConf hConf) throws HiveSQLException { TableSchema tableSchema = new TableSchema(getLogSchema()); RowSet rowSet = RowSetFactory.create(tableSchema, getOperation(opHandle).getProtocolVersion(), false); if (hConf.getBoolVar(ConfVars.HIVE_SERVER2_LOGGING_OPERATION_ENABLED) == false) { LOG.warn("Try to get operation log when hive.server2.logging.operation.enabled is false, no log will be returned. "); return rowSet; } // get the OperationLog object from the operation OperationLog operationLog = getOperation(opHandle).getOperationLog(); if (operationLog == null) { throw new HiveSQLException("Couldn't find log associated with operation handle: " + opHandle); } // read logs List<String> logs; try { logs = operationLog.readOperationLog(isFetchFirst(orientation), maxRows); } catch (SQLException e) { throw new HiveSQLException(e.getMessage(), e.getCause()); } // convert logs to RowSet for (String log : logs) { rowSet.addRow(new String[] { log }); } return rowSet; }
@Override public int decide(LoggingEvent ev) { OperationLog log = operationManager.getOperationLogByThread(); boolean excludeMatches = (loggingMode == OperationLog.LoggingLevel.VERBOSE); if (log == null) { return Filter.DENY; } OperationLog.LoggingLevel currentLoggingMode = log.getOpLoggingLevel(); // If logging is disabled, deny everything. if (currentLoggingMode == OperationLog.LoggingLevel.NONE) { return Filter.DENY; } // Look at the current session's setting // and set the pattern and excludeMatches accordingly. if (currentLoggingMode != loggingMode) { loggingMode = currentLoggingMode; setCurrentNamePattern(loggingMode); } boolean isMatch = namePattern.matcher(ev.getLoggerName()).matches(); if (excludeMatches == isMatch) { // Deny if this is black-list filter (excludeMatches = true) and it // matched // or if this is whitelist filter and it didn't match return Filter.DENY; } return Filter.NEUTRAL; } }
/** * Overrides WriterAppender.subAppend(), which does the real logging. No need * to worry about concurrency since log4j calls this synchronously. */ @Override protected void subAppend(LoggingEvent event) { super.subAppend(event); // That should've gone into our writer. Notify the LogContext. String logOutput = writer.toString(); writer.reset(); OperationLog log = operationManager.getOperationLogByThread(); if (log == null) { LOG.debug(" ---+++=== Dropped log event from thread " + event.getThreadName()); return; } log.writeOperationLog(logOutput); } }
protected void unregisterOperationLog() { if (isOperationLogEnabled) { OperationLog.removeCurrentOperationLog(); } }
protected void createOperationLog() { if (parentSession.isOperationLogEnabled()) { File operationLogFile = new File(parentSession.getOperationLogSessionDir(), queryState.getQueryId()); isOperationLogEnabled = true; // create OperationLog object with above log file operationLog = new OperationLog(opHandle.toString(), operationLogFile, parentSession.getHiveConf()); } }