Refine search
@BeforeClass public static void setup() throws Exception { System.clearProperty("mapred.job.tracker"); String testDir = System.getProperty("test.tmp.dir", "./"); testDir = testDir + "/test_multitable_" + Math.abs(new Random().nextLong()) + "/"; workDir = new File(new File(testDir).getCanonicalPath()); FileUtil.fullyDelete(workDir); workDir.mkdirs(); warehousedir = new Path(System.getProperty("test.warehouse.dir")); HiveConf metastoreConf = new HiveConf(); metastoreConf.setVar(HiveConf.ConfVars.METASTOREWAREHOUSE, warehousedir.toString()); // Run hive metastore server MetaStoreTestUtils.startMetaStoreWithRetry(metastoreConf); // Read the warehouse dir, which can be changed so multiple MetaStore tests could be run on // the same server warehousedir = new Path(MetastoreConf.getVar(metastoreConf, MetastoreConf.ConfVars.WAREHOUSE)); // LocalJobRunner does not work with mapreduce OutputCommitter. So need // to use MiniMRCluster. MAPREDUCE-2350 Configuration conf = new Configuration(true); conf.set("yarn.scheduler.capacity.root.queues", "default"); conf.set("yarn.scheduler.capacity.root.default.capacity", "100"); FileSystem fs = FileSystem.get(conf); System.setProperty("hadoop.log.dir", new File(workDir, "/logs").getAbsolutePath()); mrCluster = new MiniMRCluster(1, fs.getUri().toString(), 1, null, null, new JobConf(conf)); mrConf = mrCluster.createJobConf(); initializeSetup(metastoreConf); warehousedir.getFileSystem(conf).mkdirs(warehousedir); }
@AfterClass public static void tearDown() throws IOException { FileUtil.fullyDelete(workDir); FileSystem fs = warehousedir.getFileSystem(hiveConf); if (fs.exists(warehousedir)) { fs.delete(warehousedir, true); } if (mrCluster != null) { mrCluster.shutdown(); } }
@Override public JobConf obtainJobConf(MiniMRCluster cluster) { if (cluster == null) return null; try { Object runner = cluster.getJobTrackerRunner(); Method meth = runner.getClass().getDeclaredMethod("getJobTracker", emptyParam); Object tracker = meth.invoke(runner, new Object []{}); Method m = tracker.getClass().getDeclaredMethod("getConf", emptyParam); return (JobConf) m.invoke(tracker, new Object []{}); } catch (NoSuchMethodException nsme) { return null; } catch (InvocationTargetException ite) { return null; } catch (IllegalAccessException iae) { return null; } }
private void setupMiniDfsAndMrClusters() { try { final int dataNodes = 1; // There will be 4 data nodes final int taskTrackers = 1; // There will be 4 task tracker nodes Configuration config = new Configuration(); // Builds and starts the mini dfs and mapreduce clusters if(System.getProperty("hadoop.log.dir") == null) { System.setProperty("hadoop.log.dir", "target/tmp/logs/"); } m_dfs = new MiniDFSCluster(config, dataNodes, true, null); m_fileSys = m_dfs.getFileSystem(); m_mr = new MiniMRCluster(taskTrackers, m_fileSys.getUri().toString(), 1); // Create the configuration hadoop-site.xml file File conf_dir = new File(System.getProperty("user.home"), "pigtest/conf/"); conf_dir.mkdirs(); File conf_file = new File(conf_dir, "hadoop-site.xml"); // Write the necessary config info to hadoop-site.xml m_conf = m_mr.createJobConf(); m_conf.setInt("mapred.submit.replication", 1); m_conf.set("dfs.datanode.address", "0.0.0.0:0"); m_conf.set("dfs.datanode.http.address", "0.0.0.0:0"); m_conf.writeXml(new FileOutputStream(conf_file)); // Set the system properties needed by Pig System.setProperty("cluster", m_conf.get("mapred.job.tracker")); System.setProperty("namenode", m_conf.get("fs.default.name")); System.setProperty("junit.hadoop.conf", conf_dir.getPath()); } catch (IOException e) { throw new RuntimeException(e); } }
mrCluster = new MiniMRCluster(servers, FS_URI != null ? FS_URI : FileSystem.get(conf).getUri().toString(), 1, null, null, new JobConf(this.conf)); JobConf jobConf = MapreduceTestingShim.getJobConf(mrCluster); if (jobConf == null) { jobConf = mrCluster.createJobConf(); jobConf.set("mapreduce.cluster.local.dir", conf.get("mapreduce.cluster.local.dir")); //Hadoop MiniMR overwrites this while it should not LOG.info("Mini mapreduce cluster started");
@BeforeClass public static void setUp() throws Exception { Configuration conf = new Configuration(); dfsCluster = new MiniDFSCluster(conf, numSlaves, true, null); jConf = new JobConf(conf); mrCluster = new MiniMRCluster(0, 0, numSlaves, dfsCluster.getFileSystem().getUri().toString(), 1, null, null, null, jConf); dfsCluster.getNamesystem().getDelegationTokenSecretManager().startThreads(); FileSystem fs = dfsCluster.getFileSystem(); p1 = new Path("file1"); p1 = fs.makeQualified(p1); }
public void testJobWithDFS() throws IOException { String namenode = null; MiniDFSCluster dfs = null; MiniMRCluster mr = null; FileSystem fileSys = null; try { final int taskTrackers = 4; final int jobTrackerPort = 60050; Configuration conf = new Configuration(); dfs = new MiniDFSCluster(conf, 1, true, null); fileSys = dfs.getFileSystem(); namenode = fileSys.getUri().toString(); mr = new MiniMRCluster(taskTrackers, namenode, 2); final String jobTrackerName = "localhost:" + mr.getJobTrackerPort(); JobConf jobConf = new JobConf(); boolean result; result = launchJob(fileSys.getUri(), jobTrackerName, jobConf, 3, 1); assertTrue(result); } finally { if (dfs != null) { dfs.shutdown(); } if (mr != null) { mr.shutdown(); } } }
protected void setUp() throws Exception { Configuration conf = new Configuration(); dfsCluster = new MiniDFSCluster(conf, NUM_HADOOP_SLAVES, true, null); dfs = dfsCluster.getFileSystem(); mrCluster = new MiniMRCluster(NUM_HADOOP_SLAVES, dfs.getUri().toString(), 1); } protected void tearDown() throws Exception {
public void testWithDFS() throws IOException { MiniDFSCluster dfs = null; MiniMRCluster mr = null; FileSystem fileSys = null; try { final int taskTrackers = 4; JobConf conf = new JobConf(); conf.set(JTConfig.JT_SYSTEM_DIR, "/tmp/custom/mapred/system"); dfs = new MiniDFSCluster(conf, 4, true, null); fileSys = dfs.getFileSystem(); mr = new MiniMRCluster(taskTrackers, fileSys.getUri().toString(), 1, null, null, conf); runWordCount(mr, mr.createJobConf(), conf.get("mapred.system.dir")); } finally { if (dfs != null) { dfs.shutdown(); } if (mr != null) { mr.shutdown(); } } }
/** * Creates Hadoop instance based on constructor configuration before * a test case is run. * * @throws Exception */ protected void setUp() throws Exception { super.setUp(); if (localFS) { fileSystem = FileSystem.getLocal(new JobConf()); } else { dfsCluster = new MiniDFSCluster(new JobConf(), dataNodes, true, null); fileSystem = dfsCluster.getFileSystem(); } if (localMR) { } else { //noinspection deprecation mrCluster = new MiniMRCluster(taskTrackers, fileSystem.getUri().toString(), 1); } }
String[] racks = null; String[] hosts = null; miniMR = new MiniMRCluster(numTaskTrackers, miniDFS.getFileSystem().getUri().toString(), numTaskTrackerDirectories, racks, hosts, new JobConf(conf)); JobConf jobConf = miniMR.createJobConf(new JobConf(conf)); System.out.println("-------" + jobConf.get("fs.defaultFS")); System.out.println("-------" + miniDFS.getFileSystem().getUri().toString()); System.setProperty("mapred.job.tracker", jobConf.get("mapred.job.tracker")); } catch (IOException e) {
@BeforeClass public static void setup() throws Exception { File workDir = handleWorkDir(); Path tmpDir = new Path(System.getProperty("test.tmp.dir", "target" + File.separator + "test" + File.separator + "tmp")); conf.set("yarn.app.mapreduce.am.staging-dir", tmpDir + File.separator + testName conf.set("fs.pfile.impl", "org.apache.hadoop.fs.ProxyLocalFileSystem"); fs = FileSystem.get(conf); System.setProperty("hadoop.log.dir", new File(workDir, "/logs").getAbsolutePath()); mrCluster = new MiniMRCluster(1, fs.getUri().toString(), 1, null, null, new JobConf(conf)); mrConf = mrCluster.createJobConf();
JobConf conf = new JobConf(); conf.set("dfs.block.access.token.enable", "false"); conf.set("dfs.permissions", "true"); conf.set("hadoop.security.authentication", "simple"); FileSystem fileSystem = dfsCluster.getFileSystem(); fileSystem.mkdirs(new Path("/tmp")); fileSystem.mkdirs(new Path("/user")); fileSystem.mkdirs(new Path("/hadoop/mapred/system")); fileSystem.setPermission(new Path("/tmp"), FsPermission.valueOf("-rwxrwxrwx")); fileSystem.setPermission(new Path("/user"), FsPermission.valueOf("-rwxrwxrwx")); fileSystem.setPermission(new Path("/hadoop/mapred/system"), FsPermission.valueOf("-rwx------")); String nnURI = fileSystem.getUri().toString(); int numDirs = 1; String[] racks = null; String[] hosts = null; mrCluster = new MiniMRCluster(0, 0, taskTrackers, nnURI, numDirs, racks, hosts, null, conf); ProxyUsers.refreshSuperUserGroupsConfiguration(conf);
dfs = new MiniDFSCluster(conf, 4, true, null); fileSys = dfs.getFileSystem(); mr = new MiniMRCluster(taskTrackers, fileSys.getUri().toString(), 1); JobTracker jt = mr.getJobTrackerRunner().getJobTracker(); final Path inDir = new Path("./input"); final Path outDir = new Path("./output"); String input = "The quick brown fox\nhas many silly\nred fox sox\n"; JobConf jobConf = mr.createJobConf(); fileSys.delete(outDir, true); jobConf.setOutputCommitter(CommitterWithFailTaskCleanup.class); rJob = launchJob(jobConf, inDir, outDir, input); rJob.waitForCompletion(); validateJob(rJob, jt); fileSys.delete(outDir, true); jobConf.setOutputCommitter(CommitterWithFailTaskCleanup2.class); rJob = launchJob(jobConf, inDir, outDir, input); rJob.waitForCompletion(); validateJob(rJob, jt); } finally { if (dfs != null) { dfs.shutdown(); } if (mr != null) { mr.shutdown(); }
@BeforeClass public static void setup() throws IOException { createWorkDir(); Configuration conf = new Configuration(true); conf.set("yarn.scheduler.capacity.root.queues", "default"); conf.set("yarn.scheduler.capacity.root.default.capacity", "100"); fs = FileSystem.get(conf); System.setProperty("hadoop.log.dir", new File(workDir, "/logs").getAbsolutePath()); // LocalJobRunner does not work with mapreduce OutputCommitter. So need // to use MiniMRCluster. MAPREDUCE-2350 mrConf = new JobConf(conf); mrCluster = new MiniMRCluster(1, fs.getUri().toString(), 1, null, null, mrConf); }
protected void setUp() throws Exception { super.setUp(); dfscluster = new MiniDFSCluster(new Configuration(), 2, true, null); fs = dfscluster.getFileSystem(); mapred = new MiniMRCluster(2, fs.getUri().toString(), 1); inputPath = new Path(fs.getHomeDirectory(), inputDir); archivePath = new Path(fs.getHomeDirectory(), "archive"); fs.mkdirs(inputPath); createFile(inputPath, "a", fs); createFile(inputPath, "b", fs); createFile(inputPath, "c", fs); }
public void testOutOfBandHeartbeats() throws Exception { MiniDFSCluster dfs = null; MiniMRCluster mr = null; try { Configuration conf = new Configuration(); dfs = new MiniDFSCluster(conf, 4, true, null); int taskTrackers = 1; JobConf jobConf = new JobConf(); jobConf.setFloat(JTConfig.JT_HEARTBEATS_SCALING_FACTOR, 30.0f); jobConf.setBoolean(TTConfig.TT_OUTOFBAND_HEARBEAT, true); mr = new MiniMRCluster(taskTrackers, dfs.getFileSystem().getUri().toString(), 3, null, null, jobConf); long start = System.currentTimeMillis(); TestMiniMRDFSSort.runRandomWriter(mr.createJobConf(), new Path("rw")); long end = System.currentTimeMillis(); final int expectedRuntimeSecs = 120; final int runTimeSecs = (int)((end-start) / 1000); System.err.println("Runtime is " + runTimeSecs); assertEquals("Actual runtime " + runTimeSecs + "s not less than expected " + "runtime of " + expectedRuntimeSecs + "s!", true, (runTimeSecs <= 120)); } finally { if (mr != null) { mr.shutdown(); } if (dfs != null) { dfs.shutdown(); } } }
FileSystem fileSys = null; Configuration conf = new Configuration(); fileSys = FileSystem.get(conf); JobConf jtConf = new JobConf(); jtConf.setInt(JTConfig.JT_MAX_TRACKER_BLACKLISTS, 1); mr = new MiniMRCluster(3, fileSys.getUri().toString(), 1, null, hosts, jtConf); JobConf mrConf = mr.createJobConf(); JobConf job = new JobConf(mrConf); job.setInt(JobContext.MAX_TASK_FAILURES_PER_TRACKER, 1); job.setNumMapTasks(6); assertEquals("Job failed", JobStatus.SUCCEEDED, running.getJobState()); assertEquals("Did not blacklist the host", 1, jc.getClusterStatus().getBlacklistedTrackers()); assertEquals("Fault count should be 1", 1, mr.getFaultCount(hosts[0])); assertEquals("Job failed", JobStatus.SUCCEEDED, running.getJobState()); assertEquals("Didn't blacklist the host", 1, jc.getClusterStatus().getBlacklistedTrackers()); assertEquals("Fault count should be 1", 1, mr.getFaultCount(hosts[0])); if (fileSys != null) { fileSys.close(); } if (mr!= null) { mr.shutdown(); }
private void startCluster() throws Exception { super.setUp(); Configuration conf = new Configuration(); dfsCluster = new MiniDFSCluster(conf, numSlaves, true, null); JobConf jConf = new JobConf(conf); jConf.setLong("mapred.job.submission.expiry.interval", 6 * 1000); mrCluster = new MiniMRCluster(0, 0, numSlaves, dfsCluster.getFileSystem().getUri().toString(), 1, null, null, null, jConf); jt = mrCluster.getJobTrackerRunner().getJobTracker(); fs = FileSystem.get(mrCluster.createJobConf()); }
RunningJob job, JobConf conf) throws IOException { JobID id = job.getID(); JobHistory jobHistory = mr.getJobTrackerRunner().getJobTracker().getJobHistory(); Path doneDir = jobHistory.getCompletedJobHistoryLocation(); Path logFile = new Path(doneDir, logFileName); FileSystem fileSys = logFile.getFileSystem(conf); assertTrue("History file does not exist", fileSys.exists(logFile)); logFile.toUri().getPath()); if (mr.getJobTrackerRunner().getJobTracker() .areACLsEnabled()) { AccessControlList acl = new AccessControlList( assertTrue("VIEW_JOB ACL is not properly logged to history file.", acl.toString().equals( jobInfo.getJobACLs().get(JobACL.VIEW_JOB).toString())); acl = new AccessControlList( conf.get(JobACL.MODIFY_JOB.getAclName(), " ")); assertTrue(jobInfo.getJobQueueName().equals(conf.getQueueName()));