JobHelper.distributedClassPath(jobConf.getWorkingDirectory()), JobHelper.distributedClassPath(getJobClassPathDir(job.getJobName(), jobConf.getWorkingDirectory())), job );
/** * Get the current working directory for the default file system. * * @return the directory name. */ public Path getWorkingDirectory() throws IOException { return conf.getWorkingDirectory(); }
/** * Get the current working directory for the default file system. * * @return the directory name. */ public Path getWorkingDirectory() throws IOException { return conf.getWorkingDirectory(); }
/** * Get the current working directory for the default file system. * * @return the directory name. */ public Path getWorkingDirectory() throws IOException { return conf.getWorkingDirectory(); }
/** * Get the current working directory for the default file system. * * @return the directory name. */ public Path getWorkingDirectory() throws IOException { return conf.getWorkingDirectory(); }
/** * Get the current working directory for the default file system. * * @return the directory name. */ public Path getWorkingDirectory() throws IOException { return conf.getWorkingDirectory(); }
/** * Set the {@link Path} of the output directory for the map-reduce job. * * @param conf The configuration of the job. * @param outputDir the {@link Path} of the output directory for * the map-reduce job. */ public static void setOutputPath(JobConf conf, Path outputDir) { outputDir = new Path(conf.getWorkingDirectory(), outputDir); conf.set(org.apache.hadoop.mapreduce.lib.output. FileOutputFormat.OUTDIR, outputDir.toString()); }
/** * Set the current working directory for the default file system. * * @param dir the new current working directory. */ public void setWorkingDirectory(Path dir) { dir = new Path(getWorkingDirectory(), dir); set(JobContext.WORKING_DIR, dir.toString()); }
/** * Set the {@link Path} of the output directory for the map-reduce job. * * @param conf The configuration of the job. * @param outputDir the {@link Path} of the output directory for * the map-reduce job. */ public static void setOutputPath(JobConf conf, Path outputDir) { outputDir = new Path(conf.getWorkingDirectory(), outputDir); conf.set(org.apache.hadoop.mapreduce.lib.output. FileOutputFormat.OUTDIR, outputDir.toString()); }
/** * Set the {@link Path} of the output directory for the map-reduce job. * * @param conf The configuration of the job. * @param outputDir the {@link Path} of the output directory for * the map-reduce job. */ public static void setOutputPath(JobConf conf, Path outputDir) { outputDir = new Path(conf.getWorkingDirectory(), outputDir); conf.set(org.apache.hadoop.mapreduce.lib.output. FileOutputFormat.OUTDIR, outputDir.toString()); }
/** * Set the current working directory for the default file system. * * @param dir the new current working directory. */ public void setWorkingDirectory(Path dir) { dir = new Path(getWorkingDirectory(), dir); set(JobContext.WORKING_DIR, dir.toString()); }
/** * Set the current working directory for the default file system. * * @param dir the new current working directory. */ public void setWorkingDirectory(Path dir) { dir = new Path(getWorkingDirectory(), dir); set(JobContext.WORKING_DIR, dir.toString()); }
/** * Set the current working directory for the default file system. * * @param dir the new current working directory. */ public void setWorkingDirectory(Path dir) { dir = new Path(getWorkingDirectory(), dir); set(JobContext.WORKING_DIR, dir.toString()); }
/** * Set the {@link Path} of the output directory for the map-reduce job. * * @param conf The configuration of the job. * @param outputDir the {@link Path} of the output directory for * the map-reduce job. */ public static void setOutputPath(JobConf conf, Path outputDir) { outputDir = new Path(conf.getWorkingDirectory(), outputDir); conf.set("mapred.output.dir", outputDir.toString()); }
/** * Set the {@link Path} of the output directory for the map-reduce job. * * @param conf The configuration of the job. * @param outputDir the {@link Path} of the output directory for * the map-reduce job. */ public static void setOutputPath(JobConf conf, Path outputDir) { outputDir = new Path(conf.getWorkingDirectory(), outputDir); conf.set(org.apache.hadoop.mapreduce.lib.output. FileOutputFormat.OUTDIR, outputDir.toString()); }
static void setWorkOutputPath( Configuration conf, Path outputDir ) { outputDir = new Path( asJobConfInstance( conf ).getWorkingDirectory(), outputDir ); conf.set( "mapred.work.output.dir", outputDir.toString() ); }
@Override public Object run() throws Exception { try { // use job-specified working directory FileSystem.get(job).setWorkingDirectory(job.getWorkingDirectory()); taskFinal.run(job, umbilical); // run the task } finally { TaskLog.syncLogs(logLocation, taskid, isCleanup); } return null; } });
@Override public Object run() throws Exception { // use job-specified working directory setEncryptedSpillKeyIfRequired(taskFinal); FileSystem.get(job).setWorkingDirectory(job.getWorkingDirectory()); taskFinal.run(job, umbilical); // run the task return null; } });
@Override public Object run() throws Exception { // use job-specified working directory setEncryptedSpillKeyIfRequired(taskFinal); FileSystem.get(job).setWorkingDirectory(job.getWorkingDirectory()); taskFinal.run(job, umbilical); // run the task return null; } });
assertNotNull(conf.getWorkingDirectory()); conf.setWorkingDirectory(new Path("test")); assertTrue(conf.getWorkingDirectory().toString().endsWith("test"));