public HiveTxnManager getTxnMgr() { return driverContext.getCtx().getHiveTxnManager(); } }
/** * Unlock the table/partition specified * @param db * * @param unlockTbl * the table/partition to be unlocked * @return Returns 0 when execution succeeds and above 0 if it fails. * @throws HiveException * Throws this exception if an unexpected error occurs. */ private int unlockTable(Hive db, UnlockTableDesc unlockTbl) throws HiveException { Context ctx = driverContext.getCtx(); HiveTxnManager txnManager = ctx.getHiveTxnManager(); return txnManager.unlockTable(db, unlockTbl); }
/** * Lock the table/partition specified * @param db * * @param lockTbl * the table/partition to be locked along with the mode * @return Returns 0 when execution succeeds and above 0 if it fails. * @throws HiveException * Throws this exception if an unexpected error occurs. */ private int lockTable(Hive db, LockTableDesc lockTbl) throws HiveException { Context ctx = driverContext.getCtx(); HiveTxnManager txnManager = ctx.getHiveTxnManager(); return txnManager.lockTable(db, lockTbl); }
/** * Lock the database * * @param lockDb * the database to be locked along with the mode * @return Returns 0 when execution succeeds and above 0 if it fails. * @throws HiveException * Throws this exception if an unexpected error occurs. */ private int lockDatabase(Hive db, LockDatabaseDesc lockDb) throws HiveException { Context ctx = driverContext.getCtx(); HiveTxnManager txnManager = ctx.getHiveTxnManager(); return txnManager.lockDatabase(db, lockDb); }
/** * Unlock the database specified * * @param unlockDb * the database to be unlocked * @return Returns 0 when execution succeeds and above 0 if it fails. * @throws HiveException * Throws this exception if an unexpected error occurs. */ private int unlockDatabase(Hive db, UnlockDatabaseDesc unlockDb) throws HiveException { Context ctx = driverContext.getCtx(); HiveTxnManager txnManager = ctx.getHiveTxnManager(); return txnManager.unlockDatabase(db, unlockDb); }
/** * Lock the database * * @param lockDb * the database to be locked along with the mode * @return Returns 0 when execution succeeds and above 0 if it fails. * @throws HiveException * Throws this exception if an unexpected error occurs. */ private int lockDatabase(Hive db, LockDatabaseDesc lockDb) throws HiveException { Context ctx = driverContext.getCtx(); HiveTxnManager txnManager = ctx.getHiveTxnManager(); return txnManager.lockDatabase(db, lockDb); }
/** * Unlock the database specified * * @param unlockDb * the database to be unlocked * @return Returns 0 when execution succeeds and above 0 if it fails. * @throws HiveException * Throws this exception if an unexpected error occurs. */ private int unlockDatabase(Hive db, UnlockDatabaseDesc unlockDb) throws HiveException { Context ctx = driverContext.getCtx(); HiveTxnManager txnManager = ctx.getHiveTxnManager(); return txnManager.unlockDatabase(db, unlockDb); }
/** * Lock the table/partition specified * @param db * * @param lockTbl * the table/partition to be locked along with the mode * @return Returns 0 when execution succeeds and above 0 if it fails. * @throws HiveException * Throws this exception if an unexpected error occurs. */ private int lockTable(Hive db, LockTableDesc lockTbl) throws HiveException { Context ctx = driverContext.getCtx(); HiveTxnManager txnManager = ctx.getHiveTxnManager(); return txnManager.lockTable(db, lockTbl); }
/** * Unlock the table/partition specified * @param db * * @param unlockTbl * the table/partition to be unlocked * @return Returns 0 when execution succeeds and above 0 if it fails. * @throws HiveException * Throws this exception if an unexpected error occurs. */ private int unlockTable(Hive db, UnlockTableDesc unlockTbl) throws HiveException { Context ctx = driverContext.getCtx(); HiveTxnManager txnManager = ctx.getHiveTxnManager(); return txnManager.unlockTable(db, unlockTbl); }
@Override public int execute(DriverContext driverContext) { if (driverContext.getCtx().getExplainAnalyze() == AnalyzeState.RUNNING) { return 0; } try { Hive db = getHive(); return persistColumnStats(db); } catch (Exception e) { LOG.error("Failed to run column stats task", e); } return 1; }
Context ctx = driverContext.getCtx(); if (ctx.getHiveTxnManager().supportsAcid()) {
private SparkJobRef submit(final DriverContext driverContext, final SparkWork sparkWork) throws Exception { final Context ctx = driverContext.getCtx(); final HiveConf hiveConf = (HiveConf) ctx.getConf(); refreshLocalResources(sparkWork, hiveConf); final JobConf jobConf = new JobConf(hiveConf); //update the credential provider location in the jobConf HiveConfUtil.updateJobCredentialProviders(jobConf); // Create temporary scratch dir final Path emptyScratchDir = ctx.getMRTmpPath(); FileSystem fs = emptyScratchDir.getFileSystem(jobConf); fs.mkdirs(emptyScratchDir); // make sure NullScanFileSystem can be loaded - HIVE-18442 jobConf.set("fs." + NullScanFileSystem.getBaseScheme() + ".impl", NullScanFileSystem.class.getCanonicalName()); byte[] jobConfBytes = KryoSerializer.serializeJobConf(jobConf); byte[] scratchDirBytes = KryoSerializer.serialize(emptyScratchDir); byte[] sparkWorkBytes = KryoSerializer.serialize(sparkWork); JobStatusJob job = new JobStatusJob(jobConfBytes, scratchDirBytes, sparkWorkBytes); if (driverContext.isShutdown()) { throw new HiveException("Operation is cancelled."); } JobHandle<Serializable> jobHandle = remoteClient.submit(job); RemoteSparkJobStatus sparkJobStatus = new RemoteSparkJobStatus(remoteClient, jobHandle, sparkClientTimtout); return new RemoteSparkJobRef(hiveConf, jobHandle, sparkJobStatus); }
private SparkJobRef submit(final DriverContext driverContext, final SparkWork sparkWork) throws Exception { final Context ctx = driverContext.getCtx(); final HiveConf hiveConf = (HiveConf) ctx.getConf(); refreshLocalResources(sparkWork, hiveConf); final JobConf jobConf = new JobConf(hiveConf); //update the credential provider location in the jobConf HiveConfUtil.updateJobCredentialProviders(jobConf); // Create temporary scratch dir final Path emptyScratchDir = ctx.getMRTmpPath(); FileSystem fs = emptyScratchDir.getFileSystem(jobConf); fs.mkdirs(emptyScratchDir); byte[] jobConfBytes = KryoSerializer.serializeJobConf(jobConf); byte[] scratchDirBytes = KryoSerializer.serialize(emptyScratchDir); byte[] sparkWorkBytes = KryoSerializer.serialize(sparkWork); JobStatusJob job = new JobStatusJob(jobConfBytes, scratchDirBytes, sparkWorkBytes); if (driverContext.isShutdown()) { throw new HiveException("Operation is cancelled."); } JobHandle<Serializable> jobHandle = remoteClient.submit(job); RemoteSparkJobStatus sparkJobStatus = new RemoteSparkJobStatus(remoteClient, jobHandle, sparkClientTimtout); return new RemoteSparkJobRef(hiveConf, jobHandle, sparkJobStatus); }
@Override public void run(HookContext hookContext) throws Exception { assert (hookContext.getHookType() == HookContext.HookType.POST_EXEC_HOOK || hookContext.getHookType() == HookContext.HookType.ON_FAILURE_HOOK); HiveConf conf = hookContext.getConf(); if (!"tez".equals(HiveConf.getVar(conf, HiveConf.ConfVars.HIVE_EXECUTION_ENGINE))) { return; } LOG.info("Executing post execution hook to print workload manager events summary.."); SessionState.LogHelper console = SessionState.getConsole(); QueryPlan plan = hookContext.getQueryPlan(); if (plan == null) { return; } List<TezTask> rootTasks = Utilities.getTezTasks(plan.getRootTasks()); for (TezTask tezTask : rootTasks) { WmContext wmContext = tezTask.getDriverContext().getCtx().getWmContext(); if (wmContext != null) { wmContext.printJson(console); wmContext.shortPrint(console); } } }
@Override public SparkJobRef execute(DriverContext driverContext, SparkWork sparkWork) throws Exception { Context ctx = driverContext.getCtx(); HiveConf hiveConf = (HiveConf) ctx.getConf(); refreshLocalResources(sparkWork, hiveConf);
@Override public int execute(DriverContext driverContext) { if (driverContext.getCtx().getExplainAnalyze() == AnalyzeState.RUNNING) { return 0; } if (work.isAggregating() && work.isFooterScan()) { throw new RuntimeException("Can not have both basic stats work and stats no job work!"); } int ret = 0; try { if (work.isFooterScan()) { work.getBasicStatsNoJobWork().setPartitions(work.getPartitions()); } Hive db = getHive(); Table tbl = getTable(db); for (IStatsProcessor task : processors) { task.setDpPartSpecs(dpPartSpecs); ret = task.process(db, tbl); if (ret != 0) { return ret; } } } catch (Exception e) { LOG.error("Failed to run stats task", e); setException(e); return 1; } return 0; }
Context ctx = driverContext.getCtx(); if(ctx.getHiveTxnManager().supportsAcid()) {
@Override public int execute(DriverContext driverContext) { if (driverContext.getCtx().getExplainAnalyze() == AnalyzeState.RUNNING) { return 0;
@Override public int execute(DriverContext driverContext) { if (driverContext.getCtx().getExplainAnalyze() == AnalyzeState.RUNNING) { return 0;
} else { if (inputSummary == null) { inputSummary = Utilities.getInputSummary(driverContext.getCtx(), work.getMapWork(), null);