Refine search
private void setupMiniDfsAndMrClusters() { try { final int dataNodes = 1; // There will be 4 data nodes final int taskTrackers = 1; // There will be 4 task tracker nodes Configuration config = new Configuration(); // Builds and starts the mini dfs and mapreduce clusters if(System.getProperty("hadoop.log.dir") == null) { System.setProperty("hadoop.log.dir", "target/tmp/logs/"); } m_dfs = new MiniDFSCluster(config, dataNodes, true, null); m_fileSys = m_dfs.getFileSystem(); m_mr = new MiniMRCluster(taskTrackers, m_fileSys.getUri().toString(), 1); // Create the configuration hadoop-site.xml file File conf_dir = new File(System.getProperty("user.home"), "pigtest/conf/"); conf_dir.mkdirs(); File conf_file = new File(conf_dir, "hadoop-site.xml"); // Write the necessary config info to hadoop-site.xml m_conf = m_mr.createJobConf(); m_conf.setInt("mapred.submit.replication", 1); m_conf.set("dfs.datanode.address", "0.0.0.0:0"); m_conf.set("dfs.datanode.http.address", "0.0.0.0:0"); m_conf.writeXml(new FileOutputStream(conf_file)); // Set the system properties needed by Pig System.setProperty("cluster", m_conf.get("mapred.job.tracker")); System.setProperty("namenode", m_conf.get("fs.default.name")); System.setProperty("junit.hadoop.conf", conf_dir.getPath()); } catch (IOException e) { throw new RuntimeException(e); } }
@BeforeClass public static void setup() throws Exception { System.clearProperty("mapred.job.tracker"); String testDir = System.getProperty("test.tmp.dir", "./"); testDir = testDir + "/test_multitable_" + Math.abs(new Random().nextLong()) + "/"; workDir = new File(new File(testDir).getCanonicalPath()); FileUtil.fullyDelete(workDir); workDir.mkdirs(); warehousedir = new Path(System.getProperty("test.warehouse.dir")); HiveConf metastoreConf = new HiveConf(); metastoreConf.setVar(HiveConf.ConfVars.METASTOREWAREHOUSE, warehousedir.toString()); // Run hive metastore server MetaStoreTestUtils.startMetaStoreWithRetry(metastoreConf); // Read the warehouse dir, which can be changed so multiple MetaStore tests could be run on // the same server warehousedir = new Path(MetastoreConf.getVar(metastoreConf, MetastoreConf.ConfVars.WAREHOUSE)); // LocalJobRunner does not work with mapreduce OutputCommitter. So need // to use MiniMRCluster. MAPREDUCE-2350 Configuration conf = new Configuration(true); conf.set("yarn.scheduler.capacity.root.queues", "default"); conf.set("yarn.scheduler.capacity.root.default.capacity", "100"); FileSystem fs = FileSystem.get(conf); System.setProperty("hadoop.log.dir", new File(workDir, "/logs").getAbsolutePath()); mrCluster = new MiniMRCluster(1, fs.getUri().toString(), 1, null, null, new JobConf(conf)); mrConf = mrCluster.createJobConf(); initializeSetup(metastoreConf); warehousedir.getFileSystem(conf).mkdirs(warehousedir); }
mrCluster = new MiniMRCluster(servers, FS_URI != null ? FS_URI : FileSystem.get(conf).getUri().toString(), 1, null, null, new JobConf(this.conf)); JobConf jobConf = MapreduceTestingShim.getJobConf(mrCluster); if (jobConf == null) { jobConf = mrCluster.createJobConf();
mrCluster = new MiniMRCluster(1, fs.getUri().toString(), 1, null, null, new JobConf(conf)); mrConf = mrCluster.createJobConf();
String[] racks = null; String[] hosts = null; miniMR = new MiniMRCluster(numTaskTrackers, miniDFS.getFileSystem().getUri().toString(), numTaskTrackerDirectories, racks, hosts, new JobConf(conf)); JobConf jobConf = miniMR.createJobConf(new JobConf(conf)); System.out.println("-------" + jobConf.get("fs.defaultFS")); System.out.println("-------" + miniDFS.getFileSystem().getUri().toString());
/** Tests using a full MiniMRCluster. */ public void testMiniMRJobRunner() throws Exception { MiniMRCluster m = new MiniMRCluster(1, "file:///", 1); try { testWithConf(m.createJobConf()); } finally { m.shutdown(); } }
private void startCluster() throws Exception { super.setUp(); Configuration conf = new Configuration(); dfsCluster = new MiniDFSCluster(conf, numSlaves, true, null); JobConf jConf = new JobConf(conf); jConf.setLong("mapred.job.submission.expiry.interval", 6 * 1000); mrCluster = new MiniMRCluster(0, 0, numSlaves, dfsCluster.getFileSystem().getUri().toString(), 1, null, null, null, jConf); jt = mrCluster.getJobTrackerRunner().getJobTracker(); fs = FileSystem.get(mrCluster.createJobConf()); }
mrCluster = new MiniMRCluster(8021, 0, 1, FileSystem.get(conf).getUri().toString(), 1, null, new String[]{"localhost"}, null, jobConf); JobConf mrClusterJobConf = mrCluster.createJobConf();
public void setUp() throws Exception { super.setUp(); conf.setClass(PolicyProvider.POLICY_PROVIDER_CONFIG, HadoopPolicyProvider.class, PolicyProvider.class); jobtracker = System.getProperty("test.cli.mapred.job.tracker"); JobConf mrConf = new JobConf(conf); if (jobtracker == null) { // Start up mini mr cluster mrCluster = new MiniMRCluster(1, dfsCluster.getFileSystem().getUri().toString(), 1, null, null, mrConf); jobtracker = mrCluster.createJobConf().get(JTConfig.JT_IPC_ADDRESS, "local"); } else { conf.set(JTConfig.JT_IPC_ADDRESS, jobtracker); } cmdExecutor = new MRCmdExecutor(jobtracker); archiveCmdExecutor = new ArchiveCmdExecutor(namenode, mrConf); }
protected void setUp() throws Exception { Configuration conf = new Configuration(); conf.setClass(JTConfig.JT_TASK_SCHEDULER, FakeTaskScheduler.class, TaskScheduler.class); mr = new MiniMRCluster(0, "file:///", 1, null, null, new JobConf(conf)); jobTracker = mr.getJobTrackerRunner().getJobTracker(); for (String tracker : trackers) { FakeObjectUtilities.establishFirstContact(jobTracker, tracker); } cluster = new Cluster(mr.createJobConf()); }
public void testWithLocal() throws Exception { MiniMRCluster mr = null; try { mr = new MiniMRCluster(2, "file:///", 3); Configuration conf = mr.createJobConf(); runWordCount(conf); runMultiFileWordCount(conf); } finally { if (mr != null) { mr.shutdown(); } } }
@Override protected void setUp() throws Exception { conf = new JobConf(); conf.setClass(JTConfig.JT_TASK_SCHEDULER, MyScheduler.class, TaskScheduler.class); mr = new MiniMRCluster(1, "file:///", 1, null, null, conf); jobtracker = mr.getJobTrackerRunner().getJobTracker(); myScheduler = (MyScheduler)jobtracker.getScheduler(); conf = mr.createJobConf(); }
public void testWithDFS() throws IOException { MiniDFSCluster dfs = null; MiniMRCluster mr = null; FileSystem fileSys = null; try { final int taskTrackers = 4; JobConf conf = new JobConf(); conf.set(JTConfig.JT_SYSTEM_DIR, "/tmp/custom/mapred/system"); dfs = new MiniDFSCluster(conf, 4, true, null); fileSys = dfs.getFileSystem(); mr = new MiniMRCluster(taskTrackers, fileSys.getUri().toString(), 1, null, null, conf); runWordCount(mr, mr.createJobConf(), conf.get("mapred.system.dir")); } finally { if (dfs != null) { dfs.shutdown(); } if (mr != null) { mr.shutdown(); } } }
public void testMapReduceSortWithCompressedEmptyMapOutputs() throws Exception { MiniDFSCluster dfs = null; MiniMRCluster mr = null; FileSystem fileSys = null; try { Configuration conf = new Configuration(); // Start the mini-MR and mini-DFS clusters dfs = new MiniDFSCluster(conf, NUM_HADOOP_SLAVES, true, null); fileSys = dfs.getFileSystem(); mr = new MiniMRCluster(NUM_HADOOP_SLAVES, fileSys.getUri().toString(), 1); // Run randomwriter to generate input for 'sort' runRandomWriter(mr.createJobConf(), SORT_INPUT_PATH); // Run sort runSort(mr.createJobConf(), SORT_INPUT_PATH, SORT_OUTPUT_PATH); } finally { if (dfs != null) { dfs.shutdown(); } if (mr != null) { mr.shutdown(); } } }
public void testWithDFS() throws IOException, InterruptedException, ClassNotFoundException { MiniDFSCluster dfs = null; MiniMRCluster mr = null; FileSystem fileSys = null; try { final int taskTrackers = 4; Configuration conf = new Configuration(); dfs = new MiniDFSCluster(conf, 4, true, null); fileSys = dfs.getFileSystem(); mr = new MiniMRCluster(taskTrackers, fileSys.getUri().toString(), 1); // make cleanup inline sothat validation of existence of these directories // can be done mr.setInlineCleanupThreads(); runPI(mr, mr.createJobConf()); runWordCount(mr, mr.createJobConf()); } finally { if (dfs != null) { dfs.shutdown(); } if (mr != null) { mr.shutdown(); } } }
public void testJobKillFailAndSucceed() throws IOException { if (Shell.WINDOWS) { System.out.println( "setsid doesn't work on WINDOWS as expected. Not testing"); return; } JobConf conf=null; try { mr = new MiniMRCluster(1, "file:///", 1); // run the TCs conf = mr.createJobConf(); JobTracker jt = mr.getJobTrackerRunner().getJobTracker(); runTests(conf, jt); } finally { if (mr != null) { mr.shutdown(); } } }
public void testOutOfBandHeartbeats() throws Exception { MiniDFSCluster dfs = null; MiniMRCluster mr = null; try { Configuration conf = new Configuration(); dfs = new MiniDFSCluster(conf, 4, true, null); int taskTrackers = 1; JobConf jobConf = new JobConf(); jobConf.setFloat(JTConfig.JT_HEARTBEATS_SCALING_FACTOR, 30.0f); jobConf.setBoolean(TTConfig.TT_OUTOFBAND_HEARBEAT, true); mr = new MiniMRCluster(taskTrackers, dfs.getFileSystem().getUri().toString(), 3, null, null, jobConf); long start = System.currentTimeMillis(); TestMiniMRDFSSort.runRandomWriter(mr.createJobConf(), new Path("rw")); long end = System.currentTimeMillis(); final int expectedRuntimeSecs = 120; final int runTimeSecs = (int)((end-start) / 1000); System.err.println("Runtime is " + runTimeSecs); assertEquals("Actual runtime " + runTimeSecs + "s not less than expected " + "runtime of " + expectedRuntimeSecs + "s!", true, (runTimeSecs <= 120)); } finally { if (mr != null) { mr.shutdown(); } if (dfs != null) { dfs.shutdown(); } } }
public void testJobDirCleanup() throws Exception { String namenode = null; MiniDFSCluster dfs = null; MiniMRCluster mr = null; FileSystem fileSys = null; try { final int taskTrackers = 10; Configuration conf = new Configuration(); JobConf mrConf = new JobConf(); mrConf.set(TTConfig.TT_REDUCE_SLOTS, "1"); dfs = new MiniDFSCluster(conf, 1, true, null); fileSys = dfs.getFileSystem(); namenode = fileSys.getUri().toString(); mr = new MiniMRCluster(10, namenode, 3, null, null, mrConf); // make cleanup inline sothat validation of existence of these directories // can be done mr.setInlineCleanupThreads(); // run the sleep job JobConf jobConf = mr.createJobConf(); JobID jobid = runSleepJob(jobConf); // verify the job directories are cleaned up. verifyJobDirCleanup(mr, taskTrackers, jobid); } finally { if (fileSys != null) { fileSys.close(); } if (dfs != null) { dfs.shutdown(); } if (mr != null) { mr.shutdown(); } } }
/** * Runs job with jvm reuse and verifies that the logs for all attempts can be * read properly. * * @throws IOException */ @Test public void testTaskLogs() throws IOException { MiniMRCluster mr = null; try { Configuration conf = new Configuration(); final int taskTrackers = 1; // taskTrackers should be 1 to test jvm reuse. conf.setInt("mapred.tasktracker.map.tasks.maximum", 1); mr = new MiniMRCluster(taskTrackers, "file:///", 1); final Path inDir = new Path(rootDir, "input"); final Path outDir = new Path(rootDir, "output"); JobConf jobConf = mr.createJobConf(); jobConf.setOutputCommitter(TestTaskFail.CommitterWithLogs.class); RunningJob rJob = launchJob(jobConf, inDir, outDir); rJob.waitForCompletion(); validateJob(rJob, mr); } finally { if (mr != null) { mr.shutdown(); } } } }
/** * Main test case which checks proper execution of the testcase. * * @throws Exception */ @Test public void testDebugScript() throws Exception { JobConf conf = new JobConf(); conf.setLong(TTConfig.TT_SLEEP_TIME_BEFORE_SIG_KILL, 0L); MiniMRCluster mrCluster = new MiniMRCluster(1, "file:///", 1, null, null, conf); Path inputPath = new Path(SCRIPT_DIR); Path outputPath = new Path(SCRIPT_DIR, "task_output"); // Run a failing mapper so debug script is launched. JobID jobId = runFailingMapJob(mrCluster.createJobConf(), inputPath, outputPath); // construct the task id of first map task of failmap TaskAttemptID taskId = new TaskAttemptID( new TaskID(jobId,TaskType.MAP, 0), 0); // verify if debug script was launched correctly and ran correctly. verifyDebugScriptOutput(taskId); }