private void executePlan() throws Exception { String testName = new Exception().getStackTrace()[1].getMethodName(); MapRedTask mrtask = new MapRedTask(); DriverContext dctx = new DriverContext (); mrtask.setWork(mr); mrtask.initialize(queryState, null, dctx, null); int exitVal = mrtask.execute(dctx); if (exitVal != 0) { LOG.error(testName + " execution failed with exit status: " + exitVal); assertEquals(true, false); } LOG.info(testName + " execution completed successfully"); }
private void resolveTask(DriverContext driverContext) throws HiveException { for (Task<? extends Serializable> tsk : getListTasks()) { if (!resTasks.contains(tsk)) { driverContext.remove(tsk); console.printInfo(tsk.getId() + " is filtered out by condition resolver."); if (tsk.isMapRedTask()) { driverContext.incCurJobNo(1); } //recursively remove this task from its children's parent task tsk.removeFromChildrenTasks(); } else { if (getParentTasks() != null) { // This makes it so that we can go back up the tree later for (Task<? extends Serializable> task : getParentTasks()) { task.addDependentTask(tsk); } } // resolved task if (driverContext.addToRunnable(tsk)) { console.printInfo(tsk.getId() + " is selected by condition resolver."); } } } }
public HiveTxnManager getTxnMgr() { return driverContext.getCtx().getHiveTxnManager(); } }
private SparkJobRef submit(final DriverContext driverContext, final SparkWork sparkWork) throws Exception { final Context ctx = driverContext.getCtx(); final HiveConf hiveConf = (HiveConf) ctx.getConf(); refreshLocalResources(sparkWork, hiveConf); final JobConf jobConf = new JobConf(hiveConf); //update the credential provider location in the jobConf HiveConfUtil.updateJobCredentialProviders(jobConf); // Create temporary scratch dir final Path emptyScratchDir = ctx.getMRTmpPath(); FileSystem fs = emptyScratchDir.getFileSystem(jobConf); fs.mkdirs(emptyScratchDir); // make sure NullScanFileSystem can be loaded - HIVE-18442 jobConf.set("fs." + NullScanFileSystem.getBaseScheme() + ".impl", NullScanFileSystem.class.getCanonicalName()); byte[] jobConfBytes = KryoSerializer.serializeJobConf(jobConf); byte[] scratchDirBytes = KryoSerializer.serialize(emptyScratchDir); byte[] sparkWorkBytes = KryoSerializer.serialize(sparkWork); JobStatusJob job = new JobStatusJob(jobConfBytes, scratchDirBytes, sparkWorkBytes); if (driverContext.isShutdown()) { throw new HiveException("Operation is cancelled."); } JobHandle<Serializable> jobHandle = remoteClient.submit(job); RemoteSparkJobStatus sparkJobStatus = new RemoteSparkJobStatus(remoteClient, jobHandle, sparkClientTimtout); return new RemoteSparkJobRef(hiveConf, jobHandle, sparkJobStatus); }
cxt.incCurJobNo(1); console.printInfo("Launching Job " + cxt.getCurJobNo() + " out of " + jobs); cxt.launching(tskRun);
DriverContext driverCxt = new DriverContext(ctx); driverCxt.prepare(plan); driverCxt.addToRunnable(tsk); while (driverCxt.isRunning()) { while ((task = driverCxt.getRunnable(maxthreads)) != null) { TaskRunner runner = launchTask(task, queryId, noName, jobname, jobs, driverCxt); if (!runner.isRunning()) { TaskRunner tskRun = driverCxt.pollFinished(); if (tskRun == null) { continue; if (DriverContext.isLaunchable(backupTask)) { driverCxt.addToRunnable(backupTask); if (driverCxt.isShutdown()) { errorMessage = "FAILED: Operation cancelled. " + errorMessage; driverCxt.shutdown(); driverCxt.finished(tskRun); if (DriverContext.isLaunchable(child)) { driverCxt.addToRunnable(child); if (driverCxt.isShutdown()) {
@Override public int execute(DriverContext driverContext) { resTasks = resolver.getTasks(conf, resolverCtx); resolved = true; for (Task<? extends Serializable> tsk : getListTasks()) { if (!resTasks.contains(tsk)) { driverContext.getRunnable().remove(tsk); console.printInfo(ExecDriver.getJobEndMsg("" + Utilities.randGen.nextInt()) + ", job is filtered out (removed at runtime)."); if (tsk.isMapRedTask()) { driverContext.incCurJobNo(1); } //recursively remove this task from its children's parent task tsk.removeFromChildrenTasks(); } else { // resolved task if (!driverContext.getRunnable().contains(tsk)) { driverContext.addToRunnable(tsk); } } } return 0; }
Map<TaskResult, TaskRunner> running = new HashMap<TaskResult, TaskRunner>(); DriverContext driverCxt = new DriverContext(runnable, ctx); driverCxt.addToRunnable(tsk); if (DriverContext.isLaunchable(backupTask)) { driverCxt.addToRunnable(backupTask); if (DriverContext.isLaunchable(child)) { driverCxt.addToRunnable(child);
fmd.setOutputPath(mergeFilesDesc.getOutputDir()); CompilationOpContext opContext = driverContext.getCtx().getOpContext(); Operator<? extends OperatorDesc> mergeOp = OperatorFactory.get(opContext, fmd); aliasToWork.put(mergeFilesDesc.getInputDir().toString(), mergeOp); mergeWork.setAliasToWork(aliasToWork); DriverContext driverCxt = new DriverContext(); Task<?> task; if (conf.getVar(ConfVars.HIVE_EXECUTION_ENGINE).equals("tez")) {
conf.setVar(HiveConf.ConfVars.HADOOPJOBNAME, jobname + "(" + tsk.getId() + ")"); cxt.incCurJobNo(1); console.printInfo("Launching Job " + cxt.getCurJobNo() + " out of " + jobs);
public synchronized void launching(TaskRunner runner) throws HiveException { checkShutdown(); running.add(runner); }
DriverContext driverCxt = new DriverContext(ctx); driverCxt.prepare(plan); driverCxt.addToRunnable(tsk); while (driverCxt.isRunning()) { while ((task = driverCxt.getRunnable(maxthreads)) != null) { TaskRunner runner = launchTask(task, queryId, noName, jobname, jobs, driverCxt); if (!runner.isRunning()) { TaskRunner tskRun = driverCxt.pollFinished(); if (tskRun == null) { continue; driverCxt.shutdown(); if (DriverContext.isLaunchable(backupTask)) { driverCxt.addToRunnable(backupTask); SQLState = "08S01"; console.printError(errorMessage); driverCxt.shutdown(); driverCxt.finished(tskRun); if (DriverContext.isLaunchable(child)) { driverCxt.addToRunnable(child);
cxt.incCurJobNo(1); console.printInfo("Launching Job " + cxt.getCurJobNo() + " out of " + jobs); TaskRunner tskRun = new TaskRunner(tsk, tskRes); cxt.launching(tskRun);
private SparkJobRef submit(final DriverContext driverContext, final SparkWork sparkWork) throws Exception { final Context ctx = driverContext.getCtx(); final HiveConf hiveConf = (HiveConf) ctx.getConf(); refreshLocalResources(sparkWork, hiveConf); final JobConf jobConf = new JobConf(hiveConf); //update the credential provider location in the jobConf HiveConfUtil.updateJobCredentialProviders(jobConf); // Create temporary scratch dir final Path emptyScratchDir = ctx.getMRTmpPath(); FileSystem fs = emptyScratchDir.getFileSystem(jobConf); fs.mkdirs(emptyScratchDir); byte[] jobConfBytes = KryoSerializer.serializeJobConf(jobConf); byte[] scratchDirBytes = KryoSerializer.serialize(emptyScratchDir); byte[] sparkWorkBytes = KryoSerializer.serialize(sparkWork); JobStatusJob job = new JobStatusJob(jobConfBytes, scratchDirBytes, sparkWorkBytes); if (driverContext.isShutdown()) { throw new HiveException("Operation is cancelled."); } JobHandle<Serializable> jobHandle = remoteClient.submit(job); RemoteSparkJobStatus sparkJobStatus = new RemoteSparkJobStatus(remoteClient, jobHandle, sparkClientTimtout); return new RemoteSparkJobRef(hiveConf, jobHandle, sparkJobStatus); }
fmd.setOutputPath(mergeFilesDesc.getOutputDir()); CompilationOpContext opContext = driverContext.getCtx().getOpContext(); Operator<? extends OperatorDesc> mergeOp = OperatorFactory.get(opContext, fmd); aliasToWork.put(mergeFilesDesc.getInputDir().toString(), mergeOp); mergeWork.setAliasToWork(aliasToWork); DriverContext driverCxt = new DriverContext(); Task task; if (conf.getVar(ConfVars.HIVE_EXECUTION_ENGINE).equals("tez")) {
public synchronized void launching(TaskRunner runner) throws HiveException { checkShutdown(); running.add(runner); }
DriverContext driverCxt = new DriverContext(ctx); driverCxt.prepare(plan); driverCxt.addToRunnable(tsk); while (!destroyed && driverCxt.isRunning()) { while ((task = driverCxt.getRunnable(maxthreads)) != null) { perfLogger.PerfLogBegin(CLASS_NAME, PerfLogger.TASK + task.getName() + "." + task.getId()); TaskRunner runner = launchTask(task, queryId, noName, jobname, jobs, driverCxt); TaskRunner tskRun = driverCxt.pollFinished(); if (tskRun == null) { continue; if (exitVal != 0) { if (tsk.ifRetryCmdWhenFail()) { driverCxt.shutdown(); if (DriverContext.isLaunchable(backupTask)) { driverCxt.addToRunnable(backupTask); SQLState = "08S01"; console.printError(errorMessage); driverCxt.shutdown(); driverCxt.finished(tskRun); if (DriverContext.isLaunchable(child)) {
/** * Unlock the table/partition specified * @param db * * @param unlockTbl * the table/partition to be unlocked * @return Returns 0 when execution succeeds and above 0 if it fails. * @throws HiveException * Throws this exception if an unexpected error occurs. */ private int unlockTable(Hive db, UnlockTableDesc unlockTbl) throws HiveException { Context ctx = driverContext.getCtx(); HiveTxnManager txnManager = ctx.getHiveTxnManager(); return txnManager.unlockTable(db, unlockTbl); }
DriverContext driverCxt = new DriverContext(); PartialScanTask taskExec = new PartialScanTask(); taskExec.initialize(queryState, null, driverCxt, new CompilationOpContext());
private void resolveTask(DriverContext driverContext) throws HiveException { for (Task<? extends Serializable> tsk : getListTasks()) { if (!resTasks.contains(tsk)) { driverContext.remove(tsk); console.printInfo(tsk.getId() + " is filtered out by condition resolver."); if (tsk.isMapRedTask()) { driverContext.incCurJobNo(1); } //recursively remove this task from its children's parent task tsk.removeFromChildrenTasks(); } else { if (getParentTasks() != null) { // This makes it so that we can go back up the tree later for (Task<? extends Serializable> task : getParentTasks()) { task.addDependentTask(tsk); } } // resolved task if (driverContext.addToRunnable(tsk)) { console.printInfo(tsk.getId() + " is selected by condition resolver."); } } } }