Refine search
public void testBringUp() throws IOException { MiniMRCluster mr = null; try { mr = new MiniMRCluster(1, "local", 1); } finally { if (mr != null) { mr.shutdown(); } } }
public static void main(String[] args) throws IOException { LOG.info("Bringing up Jobtracker and tasktrackers."); MiniMRCluster mr = new MiniMRCluster(4, "file:///", 1); LOG.info("JobTracker and TaskTrackers are up."); mr.shutdown(); LOG.info("JobTracker and TaskTrackers brought down."); } }
public void testBringUp() throws IOException { MiniMRCluster mr = null; try { mr = new MiniMRCluster(1, "local", 1); } finally { if (mr != null) { mr.shutdown(); } } }
/** Tests using a full MiniMRCluster. */ public void testMiniMRJobRunner() throws Exception { MiniMRCluster m = new MiniMRCluster(1, "file:///", 1); try { testWithConf(m.createJobConf()); } finally { m.shutdown(); } }
public void testWithLocal() throws Exception { MiniMRCluster mr = null; try { mr = new MiniMRCluster(2, "file:///", 3); Configuration conf = mr.createJobConf(); runWordCount(conf); runMultiFileWordCount(conf); } finally { if (mr != null) { mr.shutdown(); } } }
public void testWithDFS() throws IOException { MiniDFSCluster dfs = null; MiniMRCluster mr = null; FileSystem fileSys = null; try { final int taskTrackers = 4; JobConf conf = new JobConf(); conf.set(JTConfig.JT_SYSTEM_DIR, "/tmp/custom/mapred/system"); dfs = new MiniDFSCluster(conf, 4, true, null); fileSys = dfs.getFileSystem(); mr = new MiniMRCluster(taskTrackers, fileSys.getUri().toString(), 1, null, null, conf); runWordCount(mr, mr.createJobConf(), conf.get("mapred.system.dir")); } finally { if (dfs != null) { dfs.shutdown(); } if (mr != null) { mr.shutdown(); } } }
new MiniMRCluster(taskTrackers, fileSys.getUri().toString(), 1, null, null, conf); mr.shutdown();
public void testJobKillFailAndSucceed() throws IOException { if (Shell.WINDOWS) { System.out.println( "setsid doesn't work on WINDOWS as expected. Not testing"); return; } JobConf conf=null; try { mr = new MiniMRCluster(1, "file:///", 1); // run the TCs conf = mr.createJobConf(); JobTracker jt = mr.getJobTrackerRunner().getJobTracker(); runTests(conf, jt); } finally { if (mr != null) { mr.shutdown(); } } }
public void testJobWithDFS() throws IOException { String namenode = null; MiniDFSCluster dfs = null; MiniMRCluster mr = null; FileSystem fileSys = null; try { final int taskTrackers = 4; final int jobTrackerPort = 60050; Configuration conf = new Configuration(); dfs = new MiniDFSCluster(conf, 1, true, null); fileSys = dfs.getFileSystem(); namenode = fileSys.getUri().toString(); mr = new MiniMRCluster(taskTrackers, namenode, 2); final String jobTrackerName = "localhost:" + mr.getJobTrackerPort(); JobConf jobConf = new JobConf(); boolean result; result = launchJob(fileSys.getUri(), jobTrackerName, jobConf, 3, 1); assertTrue(result); } finally { if (dfs != null) { dfs.shutdown(); } if (mr != null) { mr.shutdown(); } } }
public void testOutOfBandHeartbeats() throws Exception { MiniDFSCluster dfs = null; MiniMRCluster mr = null; try { Configuration conf = new Configuration(); dfs = new MiniDFSCluster(conf, 4, true, null); int taskTrackers = 1; JobConf jobConf = new JobConf(); jobConf.setFloat(JTConfig.JT_HEARTBEATS_SCALING_FACTOR, 30.0f); jobConf.setBoolean(TTConfig.TT_OUTOFBAND_HEARBEAT, true); mr = new MiniMRCluster(taskTrackers, dfs.getFileSystem().getUri().toString(), 3, null, null, jobConf); long start = System.currentTimeMillis(); TestMiniMRDFSSort.runRandomWriter(mr.createJobConf(), new Path("rw")); long end = System.currentTimeMillis(); final int expectedRuntimeSecs = 120; final int runTimeSecs = (int)((end-start) / 1000); System.err.println("Runtime is " + runTimeSecs); assertEquals("Actual runtime " + runTimeSecs + "s not less than expected " + "runtime of " + expectedRuntimeSecs + "s!", true, (runTimeSecs <= 120)); } finally { if (mr != null) { mr.shutdown(); } if (dfs != null) { dfs.shutdown(); } } }
public void testClassPath() throws IOException { String namenode = null; MiniDFSCluster dfs = null; MiniMRCluster mr = null; FileSystem fileSys = null; try { final int taskTrackers = 4; final int jobTrackerPort = 60050; Configuration conf = new Configuration(); dfs = new MiniDFSCluster(conf, 1, true, null); fileSys = dfs.getFileSystem(); namenode = fileSys.getUri().toString(); mr = new MiniMRCluster(taskTrackers, namenode, 3); JobConf jobConf = new JobConf(); String result; final String jobTrackerName = "localhost:" + mr.getJobTrackerPort(); result = launchWordCount(fileSys.getUri(), jobTrackerName, jobConf, "The quick brown fox\nhas many silly\n" + "red fox sox\n", 3, 1); assertEquals("The\t1\nbrown\t1\nfox\t2\nhas\t1\nmany\t1\n" + "quick\t1\nred\t1\nsilly\t1\nsox\t1\n", result); } finally { if (dfs != null) { dfs.shutdown(); } if (mr != null) { mr.shutdown(); } } }
fileSys = dfs.getFileSystem(); namenode = fileSys.getUri().toString(); mr = new MiniMRCluster(taskTrackers, namenode, 3); JobConf jobConf = new JobConf(); String result; if (mr != null) { mr.shutdown();
public void testJobDirCleanup() throws Exception { String namenode = null; MiniDFSCluster dfs = null; MiniMRCluster mr = null; FileSystem fileSys = null; try { final int taskTrackers = 10; Configuration conf = new Configuration(); JobConf mrConf = new JobConf(); mrConf.set(TTConfig.TT_REDUCE_SLOTS, "1"); dfs = new MiniDFSCluster(conf, 1, true, null); fileSys = dfs.getFileSystem(); namenode = fileSys.getUri().toString(); mr = new MiniMRCluster(10, namenode, 3, null, null, mrConf); // make cleanup inline sothat validation of existence of these directories // can be done mr.setInlineCleanupThreads(); // run the sleep job JobConf jobConf = mr.createJobConf(); JobID jobid = runSleepJob(jobConf); // verify the job directories are cleaned up. verifyJobDirCleanup(mr, taskTrackers, jobid); } finally { if (fileSys != null) { fileSys.close(); } if (dfs != null) { dfs.shutdown(); } if (mr != null) { mr.shutdown(); } } }
/** * Runs job with jvm reuse and verifies that the logs for all attempts can be * read properly. * * @throws IOException */ @Test public void testTaskLogs() throws IOException { MiniMRCluster mr = null; try { Configuration conf = new Configuration(); final int taskTrackers = 1; // taskTrackers should be 1 to test jvm reuse. conf.setInt("mapred.tasktracker.map.tasks.maximum", 1); mr = new MiniMRCluster(taskTrackers, "file:///", 1); final Path inDir = new Path(rootDir, "input"); final Path outDir = new Path(rootDir, "output"); JobConf jobConf = mr.createJobConf(); jobConf.setOutputCommitter(TestTaskFail.CommitterWithLogs.class); RunningJob rJob = launchJob(jobConf, inDir, outDir); rJob.waitForCompletion(); validateJob(rJob, mr); } finally { if (mr != null) { mr.shutdown(); } } } }
public void testMapReduceSortWithCompressedEmptyMapOutputs() throws Exception { MiniDFSCluster dfs = null; MiniMRCluster mr = null; FileSystem fileSys = null; try { Configuration conf = new Configuration(); // Start the mini-MR and mini-DFS clusters dfs = new MiniDFSCluster(conf, NUM_HADOOP_SLAVES, true, null); fileSys = dfs.getFileSystem(); mr = new MiniMRCluster(NUM_HADOOP_SLAVES, fileSys.getUri().toString(), 1); // Run randomwriter to generate input for 'sort' runRandomWriter(mr.createJobConf(), SORT_INPUT_PATH); // Run sort runSort(mr.createJobConf(), SORT_INPUT_PATH, SORT_OUTPUT_PATH); } finally { if (dfs != null) { dfs.shutdown(); } if (mr != null) { mr.shutdown(); } } }
NameNode.DEFAULT_PORT, conf, 4, true, true, null, null); fileSys = dfs.getFileSystem(); mr = new MiniMRCluster(taskTrackers, fileSys.getUri().toString(), 1); } finally { if (dfs != null) { dfs.shutdown(); } if (mr != null) { mr.shutdown();
public void testWithDFS() throws IOException, InterruptedException, ClassNotFoundException { MiniDFSCluster dfs = null; MiniMRCluster mr = null; FileSystem fileSys = null; try { final int taskTrackers = 4; Configuration conf = new Configuration(); dfs = new MiniDFSCluster(conf, 4, true, null); fileSys = dfs.getFileSystem(); mr = new MiniMRCluster(taskTrackers, fileSys.getUri().toString(), 1); // make cleanup inline sothat validation of existence of these directories // can be done mr.setInlineCleanupThreads(); runPI(mr, mr.createJobConf()); runWordCount(mr, mr.createJobConf()); } finally { if (dfs != null) { dfs.shutdown(); } if (mr != null) { mr.shutdown(); } } }
jtConf.setInt(TTConfig.TT_REDUCE_SLOTS, 1); jtConf.setLong(JTConfig.JT_TRACKER_EXPIRY_INTERVAL, 10 * 1000); mr = new MiniMRCluster(taskTrackers, fileSys.getUri().toString(), 1, null, null, jtConf); } finally { if (dfs != null) { dfs.shutdown(); } if (mr != null) { mr.shutdown();
public void testPipes() throws IOException { if (System.getProperty("compile.c++") == null) { LOG.info("compile.c++ is not defined, so skipping TestPipes"); return; } MiniDFSCluster dfs = null; MiniMRCluster mr = null; Path inputPath = new Path("testing/in"); Path outputPath = new Path("testing/out"); try { final int numSlaves = 2; Configuration conf = new Configuration(); dfs = new MiniDFSCluster(conf, numSlaves, true, null); mr = new MiniMRCluster(numSlaves, dfs.getFileSystem().getUri().toString(), 1); writeInputFile(dfs.getFileSystem(), inputPath); runProgram(mr, dfs, wordCountSimple, inputPath, outputPath, 3, 2, twoSplitOutput, null); cleanup(dfs.getFileSystem(), outputPath); runProgram(mr, dfs, wordCountSimple, inputPath, outputPath, 3, 0, noSortOutput, null); cleanup(dfs.getFileSystem(), outputPath); runProgram(mr, dfs, wordCountPart, inputPath, outputPath, 3, 2, fixedPartitionOutput, null); runNonPipedProgram(mr, dfs, wordCountNoPipes, null); mr.waitUntilIdle(); } finally { mr.shutdown(); dfs.shutdown(); } }
new MiniDFSCluster.Builder(conf).numDataNodes(2).format(true).build(); mrCluster = new MiniMRCluster(2, dfsCluster.getFileSystem().getUri().toString(), 1); JobTracker jobTracker = mrCluster.getJobTrackerRunner().getJobTracker(); Path jobFolderPath = new Path(jobTracker.getSystemDir()); 0 == jobFilesForRecovery.length); } finally { if(mrCluster != null) mrCluster.shutdown(); if(dfsCluster != null) dfsCluster.shutdown();