Refine search
private Path createScratchDir() throws IOException { Path parent = new Path(SessionState.get().getHdfsScratchDirURIString(), SPARK_DIR); Path sparkDir = new Path(parent, sessionId); FileSystem fs = sparkDir.getFileSystem(conf); FsPermission fsPermission = new FsPermission(HiveConf.getVar( conf, HiveConf.ConfVars.SCRATCHDIRPERMISSION)); fs.mkdirs(sparkDir, fsPermission); fs.deleteOnExit(sparkDir); return sparkDir; }
public static void main(String[] args) throws Exception { HiveConf conf = new HiveConf(); conf.addResource(new Path("file:///", System.getProperty("oozie.action.conf.xml"))); conf.setVar(ConfVars.SEMANTIC_ANALYZER_HOOK, HCatSemanticAnalyzer.class.getName()); conf.setBoolVar(ConfVars.METASTORE_USE_THRIFT_SASL, true); SessionState.start(new CliSessionState(conf)); new CliDriver().processLine(args[0]); }
private void applyAuthorizationConfigPolicy(HiveConf newHiveConf) throws HiveException, MetaException { // authorization setup using SessionState should be revisited eventually, as // authorization and authentication are not session specific settings SessionState ss = new SessionState(newHiveConf); ss.setIsHiveServerQuery(true); SessionState.start(ss); ss.applyAuthorizationPolicy(); }
/** * start a new session and set it to current session. */ public static SessionState start(HiveConf conf) { SessionState ss = new SessionState(conf); return start(ss); }
protected boolean isAuthV2Enabled(){ SessionState ss = SessionState.get(); return (ss.isAuthorizationModeV2() && HiveConf.getBoolVar(ss.getConf(), HiveConf.ConfVars.HIVE_AUTHORIZATION_ENABLED)); }
conf.set("hive.metastore.filter.hook", "org.apache.hadoop.hive.metastore.DefaultMetaStoreFilterHookImpl"); db = Hive.get(conf); SessionState.get().setCurrentDatabase(dbName); for (String tblName : db.getAllTables()) { Table tblObj = null; if (!tblObj.isMaterializedView()) { continue; SessionState.get().setCurrentDatabase(dbName); for (String tblName : db.getAllTables()) { if (!DEFAULT_DATABASE_NAME.equals(dbName) || !srcTables.contains(tblName)) { if (!tblObj.isMaterializedView()) { continue; Path p = new Path(testWarehouse); FileSystem fileSystem = p.getFileSystem(conf); if (fileSystem.exists(p)) { for (FileStatus status : fileSystem.listStatus(p)) { if (status.isDirectory() && !srcTables.contains(status.getPath().getName())) { fileSystem.delete(status.getPath(), true); SessionState.get().setCurrentDatabase(DEFAULT_DATABASE_NAME);
/** * Uploads a local file to HDFS * This method is not thread safe * * @param source * @param conf * @return * @throws IOException */ public static URI uploadToHDFS(URI source, HiveConf conf) throws IOException { Path localFile = new Path(source.getPath()); Path remoteFile = new Path(SessionState.get().getSparkSession().getHDFSSessionDir(), getFileName(source)); FileSystem fileSystem = FileSystem.get(remoteFile.toUri(), conf); // Overwrite if the remote file already exists. Whether the file can be added // on executor is up to spark, i.e. spark.files.overwrite fileSystem.copyFromLocalFile(false, true, localFile, remoteFile); Path fullPath = fileSystem.getFileStatus(remoteFile).getPath(); return fullPath.toUri(); }
@BeforeClass public static void setUpBeforeClass() throws Exception { TestHCatClient.startMetaStoreServer(); hconf = TestHCatClient.getConf(); hconf.set(HiveConf.ConfVars.SEMANTIC_ANALYZER_HOOK.varname,""); hconf .setVar(HiveConf.ConfVars.HIVE_AUTHORIZATION_MANAGER, "org.apache.hadoop.hive.ql.security.authorization.plugin.sqlstd.SQLStdHiveAuthorizerFactory"); TEST_PATH = System.getProperty("test.warehouse.dir","/tmp") + Path.SEPARATOR + TestCommands.class.getCanonicalName() + "-" + System.currentTimeMillis(); Path testPath = new Path(TEST_PATH); FileSystem fs = FileSystem.get(testPath.toUri(),hconf); fs.mkdirs(testPath); driver = DriverFactory.newDriver(hconf); SessionState.start(new CliSessionState(hconf)); client = HCatClient.create(hconf); }
private static class ThreadLocalHive extends ThreadLocal<Hive> { @Override protected Hive initialValue() { return null; } @Override public synchronized void set(Hive hiveObj) { Hive currentHive = this.get(); if (currentHive != hiveObj) { // Remove/close current thread-local Hive object before overwriting with new Hive object. remove(); super.set(hiveObj); } } @Override public synchronized void remove() { Hive currentHive = this.get(); if (currentHive != null) { // Close the metastore connections before removing it from thread local hiveDB. currentHive.close(false); super.remove(); } } }
MetaException, NoSuchObjectException, TException { SessionState ss = SessionState.get(); if (ss == null) { throw new MetaException("No current SessionState, cannot create temporary table" Path tblPath = wh.getDnsPath(new Path(tbl.getSd().getLocation())); if (tblPath == null) { throw new MetaException("Temp table path not set for " + tbl.getTableName()); tbl.getSd().setLocation(tblPath.toString()); Table tTable = new Table(tbl); if (tables == null) { tables = new HashMap<String, Table>(); ss.getTempTables().put(dbName, tables);
private String fetchFilesNotInLocalFilesystem(String cmd) { SessionState ss = SessionState.get(); String progName = getScriptProgName(cmd); if (!ResourceDownloader.isFileUri(progName)) { String filePath = ss.add_resource(ResourceType.FILE, progName, true); Path p = new Path(filePath); String fileName = p.getName(); String scriptArgs = getScriptArgs(cmd); String finalCmd = fileName + scriptArgs; return finalCmd; } return cmd; }
String onClauseAsString, Table targetTable, boolean onlyHaveWhenNotMatchedClause) throws SemanticException { if (!conf.getBoolVar(HiveConf.ConfVars.MERGE_CARDINALITY_VIOLATION_CHECK)) { LOG.info("Merge statement cardinality violation check is disabled: " + HiveConf.ConfVars.MERGE_CARDINALITY_VIOLATION_CHECK.varname); .append("\n SELECT cardinality_violation(") .append(getSimpleTableName(target)).append(".ROW__ID"); addPartitionColsToSelect(targetTable.getPartCols(), rewrittenQueryStr, target); addPartitionColsToSelect(targetTable.getPartCols(), rewrittenQueryStr, target); format.processStorageFormat("TextFile"); Table table = db.newTable(tableName); table.setSerializationLib(format.getSerde()); List<FieldSchema> fields = new ArrayList<FieldSchema>(); fields.add(new FieldSchema("val", "int", null)); table.setFields(fields); table.setDataLocation(Warehouse.getDnsPath(new Path(SessionState.get().getTempTableSpace(), tableName), conf)); table.getTTable().setTemporary(true);
@Test public void testGetInputSummaryPoolAndFailure() throws ExecutionException, InterruptedException, IOException { ExecutorService pool = mock(ExecutorService.class); when(pool.submit(any(Runnable.class))).thenReturn(mock(Future.class)); Set<Path> pathNeedProcess = new HashSet<>(); pathNeedProcess.add(new Path("dummy-path1")); pathNeedProcess.add(new Path("dummy-path2")); pathNeedProcess.add(new Path("dummy-path3")); SessionState.start(new HiveConf()); JobConf jobConf = new JobConf(); Context context = new Context(jobConf); Utilities.getInputSummaryWithPool(context, pathNeedProcess, mock(MapWork.class), new long[3], pool); verify(pool, times(3)).submit(any(Runnable.class)); verify(pool).shutdown(); verify(pool).shutdownNow(); }
public static Path getLocalSessionPath(Configuration conf) { SessionState ss = SessionState.get(); if (ss == null) { String localPathString = conf.get(LOCAL_SESSION_PATH_KEY); Preconditions.checkNotNull(localPathString, "Conf local session path expected to be non-null"); return new Path(localPathString); } Preconditions.checkNotNull(ss.localSessionPath, "Local session path expected to be non-null"); return ss.localSessionPath; }
public static Path getTempTableSpace(Configuration conf) { SessionState ss = SessionState.get(); if (ss == null) { String tempTablePathString = conf.get(TMP_TABLE_SPACE_KEY); Preconditions.checkNotNull(tempTablePathString, "Conf temp table path expected to be non-null"); return new Path(tempTablePathString); } return ss.getTempTableSpace(); }
oldview.setViewOriginalText(crtView.getViewOriginalText()); oldview.setViewExpandedText(crtView.getViewExpandedText()); oldview.setFields(crtView.getSchema()); if (crtView.getComment() != null) { oldview.setProperty("comment", crtView.getComment()); db.alterTable(crtView.getViewName(), oldview, null); } catch (InvalidOperationException e) { throw new HiveException(e); tbl.setDataLocation(new Path(crtView.getLocation())); DataContainer dc = new DataContainer(tbl.getTTable()); SessionState.get().getLineageState().setLineage(new Path(crtView.getViewName()), dc, tbl.getCols());
/** * Checks if a given path has read-only access permissions. * * @param path The path to check for read-only permissions. * @return True if the path is read-only; False otherwise. * @throws HiveException If an error occurs while checking file permissions. */ private boolean isPathReadOnly(Path path) throws HiveException { HiveConf conf = SessionState.get().getConf(); try { FileSystem fs = path.getFileSystem(conf); UserGroupInformation ugi = Utils.getUGI(); FileStatus status = fs.getFileStatus(path); // We just check for writing permissions. If it fails with AccessControException, then it // means the location may be read-only. FileUtils.checkFileAccessWithImpersonation(fs, status, FsAction.WRITE, ugi.getUserName()); // Path has writing permissions return false; } catch (AccessControlException e) { // An AccessControlException may be caused for other different errors, // but we take it as if our path is read-only return true; } catch (Exception e) { throw new HiveException("Unable to determine if " + path + " is read only: " + e, e); } }
String destTableFullName = dest_tab.getCompleteName().replace('@', '.'); Map<String, ASTNode> iowMap = qb.getParseInfo().getInsertOverwriteTables(); if (iowMap.containsKey(destTableFullName) && conf.getBoolVar(HiveConf.ConfVars.COMPRESSRESULT), currentTableId, rsCtx.isMultiFileSpray(), canBeMerged, rsCtx.getNumFiles(), rsCtx.getTotalFiles(), rsCtx.getPartnCols(), dpCtx, dest_path, mmWriteId, isMmCtas, isInsertOverwrite); boolean isHiveServerQuery = SessionState.get().isHiveServerQuery(); fileSinkDesc.setHiveServerQuery(isHiveServerQuery); AcidUtils.isInsertOnlyTable(dest_part.getTable().getParameters())) || (table_desc != null && AcidUtils.isInsertOnlyTable(table_desc.getProperties())); fileSinkDesc.setStatsAggPrefix(fileSinkDesc.getDirName().toString()); if (!destTableIsMaterialization && HiveConf.getVar(conf, HIVESTATSDBCLASS).equalsIgnoreCase(StatDB.fs.name())) { String statsTmpLoc = ctx.getTempDirForInterimJobPath(dest_path).toString(); fileSinkDesc.setStatsTmpDir(statsTmpLoc); LOG.debug("Set stats collection dir : " + statsTmpLoc);
private void addWriteNotificationLog(Table tbl, Map<String, String> partitionSpec, List<Path> newFiles, Long writeId) throws HiveException { if (!conf.getBoolVar(ConfVars.FIRE_EVENTS_FOR_DML)) { LOG.debug("write notification log is ignored as dml event logging is disabled"); return; if (tbl.isTemporary()) { LOG.debug("write notification log is ignored as " + tbl.getTableName() + " is temporary : " + writeId); return; LOG.debug("adding write notification log for operation " + writeId + " table " + tbl.getCompleteName() + "partition " + partitionSpec + " list of files " + newFiles); FileSystem fileSystem = tbl.getDataLocation().getFileSystem(conf); Long txnId = SessionState.get().getTxnMgr().getCurrentTxnId(); throw new HiveException(e);