private void setFs() throws IOException { if(this.dfsCluster == null){ LOG.info("Skipping setting fs because dfsCluster is null"); return; } FileSystem fs = this.dfsCluster.getFileSystem(); FSUtils.setFsDefault(this.conf, new Path(fs.getUri())); // re-enable this check with dfs conf.unset(CommonFSUtils.UNSAFE_STREAM_CAPABILITY_ENFORCE); }
@AfterClass public static void destroyHDFS() throws Exception { if (hdfsCluster != null) { hdfsCluster.getFileSystem().delete(new org.apache.hadoop.fs.Path(basePath.toUri()), true); hdfsCluster.shutdown(); } }
/** * Common method to close down a MiniDFSCluster and the associated file system * * @param cluster */ public static void shutdownDfs(MiniDFSCluster cluster) { if (cluster != null) { LOG.info("Shutting down Mini DFS "); try { cluster.shutdown(); } catch (Exception e) { /// Can get a java.lang.reflect.UndeclaredThrowableException thrown // here because of an InterruptedException. Don't let exceptions in // here be cause of test failure. } try { FileSystem fs = cluster.getFileSystem(); if (fs != null) { LOG.info("Shutting down FileSystem"); fs.close(); } FileSystem.closeAll(); } catch (IOException e) { LOG.error("error closing file system", e); } } }
private void setupMiniDfsAndMrClusters() { try { final int dataNodes = 1; // There will be 4 data nodes final int taskTrackers = 1; // There will be 4 task tracker nodes Configuration config = new Configuration(); // Builds and starts the mini dfs and mapreduce clusters if(System.getProperty("hadoop.log.dir") == null) { System.setProperty("hadoop.log.dir", "target/tmp/logs/"); } m_dfs = new MiniDFSCluster(config, dataNodes, true, null); m_fileSys = m_dfs.getFileSystem(); m_mr = new MiniMRCluster(taskTrackers, m_fileSys.getUri().toString(), 1); // Create the configuration hadoop-site.xml file File conf_dir = new File(System.getProperty("user.home"), "pigtest/conf/"); conf_dir.mkdirs(); File conf_file = new File(conf_dir, "hadoop-site.xml"); // Write the necessary config info to hadoop-site.xml m_conf = m_mr.createJobConf(); m_conf.setInt("mapred.submit.replication", 1); m_conf.set("dfs.datanode.address", "0.0.0.0:0"); m_conf.set("dfs.datanode.http.address", "0.0.0.0:0"); m_conf.writeXml(new FileOutputStream(conf_file)); // Set the system properties needed by Pig System.setProperty("cluster", m_conf.get("mapred.job.tracker")); System.setProperty("namenode", m_conf.get("fs.default.name")); System.setProperty("junit.hadoop.conf", conf_dir.getPath()); } catch (IOException e) { throw new RuntimeException(e); } }
@Test FileSystem fs = cluster.getFileSystem(); fs.copyFromLocalFile(new Path(jarFile1.getPath()), new Path(fs.getUri().toString() + Path.SEPARATOR)); String jarFileOnHDFS1 = fs.getUri().toString() + Path.SEPARATOR + jarFile1.getName(); Path pathOnHDFS1 = new Path(jarFileOnHDFS1); assertTrue("Copy jar file to HDFS failed.", fs.exists(pathOnHDFS1)); fs.copyFromLocalFile(new Path(jarFile2.getPath()), new Path(fs.getUri().toString() + Path.SEPARATOR)); String jarFileOnHDFS2 = fs.getUri().toString() + Path.SEPARATOR + jarFile2.getName();
@BeforeClass public static void createHDFS() throws Exception { final File baseDir = TMP.newFolder(); Configuration hdConf = new Configuration(); hdConf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, baseDir.getAbsolutePath()); MiniDFSCluster.Builder builder = new MiniDFSCluster.Builder(hdConf); hdfsCluster = builder.build(); org.apache.hadoop.fs.FileSystem hdfs = hdfsCluster.getFileSystem(); fs = new HadoopFileSystem(hdfs); basePath = new Path(hdfs.getUri().toString() + "/tests"); }
String[] racks = null; String[] hosts = null; miniMR = new MiniMRCluster(numTaskTrackers, miniDFS.getFileSystem().getUri().toString(), numTaskTrackerDirectories, racks, hosts, new JobConf(conf)); JobConf jobConf = miniMR.createJobConf(new JobConf(conf)); System.out.println("-------" + jobConf.get("fs.defaultFS")); System.out.println("-------" + miniDFS.getFileSystem().getUri().toString()); System.setProperty("mapred.job.tracker", jobConf.get("mapred.job.tracker")); } catch (IOException e) {
/** * Creates Hadoop instance based on constructor configuration before * a test case is run. * * @throws Exception */ protected void setUp() throws Exception { super.setUp(); if (localFS) { fileSystem = FileSystem.getLocal(new JobConf()); } else { dfsCluster = new MiniDFSCluster(new JobConf(), dataNodes, true, null); fileSystem = dfsCluster.getFileSystem(); } if (localMR) { } else { //noinspection deprecation mrCluster = new MiniMRCluster(taskTrackers, fileSystem.getUri().toString(), 1); } }
public void testText() throws Exception { Configuration conf = new HdfsConfiguration(); MiniDFSCluster cluster = null; try { cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build(); final FileSystem dfs = cluster.getFileSystem(); textTest(new Path("/texttest").makeQualified(dfs.getUri(), dfs.getWorkingDirectory()), conf); conf.set("fs.default.name", dfs.getUri().toString()); final FileSystem lfs = FileSystem.getLocal(conf); textTest(new Path(TEST_ROOT_DIR, "texttest").makeQualified(lfs.getUri(), lfs.getWorkingDirectory()), conf); } finally { if (null != cluster) { cluster.shutdown(); } } }
public void testWithDFS() throws IOException { MiniDFSCluster dfs = null; MiniMRCluster mr = null; FileSystem fileSys = null; try { final int taskTrackers = 4; JobConf conf = new JobConf(); conf.set(JTConfig.JT_SYSTEM_DIR, "/tmp/custom/mapred/system"); dfs = new MiniDFSCluster.Builder(conf).numDataNodes(4).build(); fileSys = dfs.getFileSystem(); mr = new MiniMRCluster(taskTrackers, fileSys.getUri().toString(), 1, null, null, conf); runWordCount(mr, mr.createJobConf(), conf.get("mapred.system.dir")); } finally { if (dfs != null) { dfs.shutdown(); } if (mr != null) { mr.shutdown(); } } }
Server server = new DummyServer(); Path archivedHfileDir = new Path(UTIL.getDataTestDirOnTestFS(), HConstants.HFILE_ARCHIVE_DIRECTORY); FileSystem fs = UTIL.getDFSCluster().getFileSystem(); final HFileCleaner cleaner = new HFileCleaner(1000, server, conf, fs, archivedHfileDir); Assert.assertEquals(ORIGINAL_THROTTLE_POINT, cleaner.getThrottlePoint()); Configuration newConf = new Configuration(conf); newConf.setInt(HFileCleaner.HFILE_DELETE_THROTTLE_THRESHOLD, UPDATE_THROTTLE_POINT); newConf.setInt(HFileCleaner.LARGE_HFILE_QUEUE_INIT_SIZE, UPDATE_QUEUE_INIT_SIZE);
@BeforeClass public static void createHDFS() throws Exception { final File baseDir = TEMP_FOLDER.newFolder(); final Configuration hdConf = new Configuration(); hdConf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, baseDir.getAbsolutePath()); final MiniDFSCluster.Builder builder = new MiniDFSCluster.Builder(hdConf); hdfsCluster = builder.build(); final org.apache.hadoop.fs.FileSystem hdfs = hdfsCluster.getFileSystem(); fileSystem = new HadoopFileSystem(hdfs); basePath = new Path(hdfs.getUri() + "/tests"); }
@BeforeClass public static void setUp() throws IOException { int dataNodes = 1; int port = 29999; JobConf conf = new JobConf(); channel = new MemoryChannel(); conf.set("dfs.block.access.token.enable", "false"); conf.set("dfs.permissions", "true"); conf.set("hadoop.security.authentication", "simple"); conf.set("fs.default.name", "hdfs://localhost:29999"); dfsCluster = new MiniDFSCluster(port, conf, dataNodes, true, true, null, null); fileSystem = dfsCluster.getFileSystem(); fileSystem.delete(new Path("/logs"), true); source = new AvroSource(); sink = new KaaHdfsSink(); logSchemasRootDir = new File("schemas"); if (logSchemasRootDir.exists()) { logSchemasRootDir.delete(); } prepareSchema(logSchemasRootDir); }
@BeforeClass public static void setUp() throws Exception { Logger.getRootLogger().setLevel(Level.ERROR); // Put the MiniDFSCluster directory in the target directory final File data = new File("target/test/hdfstestdata").getAbsoluteFile(); data.mkdirs(); System.setProperty("test.build.data", data.toString()); FileUtils.cleanDirectory(data); // Setup HDFS conf = new Configuration(); conf.set(FileSystem.FS_DEFAULT_NAME_KEY, HDFS_URI); conf.set("hadoop.security.token.service.use_ip", "true"); conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 1024 * 1024); // 1M blocksize setUmask(conf); cluster = new MiniDFSCluster(PORT, conf, 1, true, true, true, null, null, null, null); cluster.waitActive(); // Set up the VFS manager = new DefaultFileSystemManager(); manager.addProvider("hdfs", new HdfsFileProvider()); manager.init(); hdfs = cluster.getFileSystem(); }
@Test (timeout=30000) public void testSaveNamespaceWithDanglingLease() throws Exception { MiniDFSCluster cluster = new MiniDFSCluster.Builder(new Configuration()) .numDataNodes(1).build(); cluster.waitActive(); DistributedFileSystem fs = cluster.getFileSystem(); try { cluster.getNamesystem().leaseManager.addLease("me", "/non-existent"); fs.setSafeMode(SafeModeAction.SAFEMODE_ENTER); cluster.getNameNodeRpc().saveNamespace(); fs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE); } finally { if (cluster != null) { cluster.shutdown(); } } }
@Override public HadoopShims.MiniDFSShim getMiniDfs(Configuration conf, int numDataNodes, boolean format, String[] racks) throws IOException { configureImpersonation(conf); MiniDFSCluster miniDFSCluster = new MiniDFSCluster(conf, numDataNodes, format, racks); // Need to set the client's KeyProvider to the NN's for JKS, // else the updates do not get flushed properly KeyProviderCryptoExtension keyProvider = miniDFSCluster.getNameNode().getNamesystem().getProvider(); if (keyProvider != null) { miniDFSCluster.getFileSystem().getClient().setKeyProvider(keyProvider); } cluster = new MiniDFSShim(miniDFSCluster); return cluster; }
miniDfs = new MiniDFSCluster(new Configuration(), 1, true, null); miniDfs.getFileSystem().mkdirs(new Path("/path/to/schema")); FSDataOutputStream out = miniDfs.getFileSystem().create( new Path("/path/to/schema/schema.avsc")); out.writeBytes(RECORD_SCHEMA); out.close(); String onHDFS = miniDfs.getFileSystem().getUri() + "/path/to/schema/schema.avsc"; Configuration conf = new Configuration(); Properties tbl = createPropertiesForHiveAvroSchemaUrl(onHDFS); serDe.initialize(conf, tbl); miniDfs.shutdown();
void loadingClassFromLibDirInJar(String libPrefix) throws Exception { FileSystem fs = cluster.getFileSystem(); fs.copyFromLocalFile(new Path(outerJarFile.getPath()), new Path(fs.getUri().toString() + Path.SEPARATOR)); String jarFileOnHDFS = fs.getUri().toString() + Path.SEPARATOR + outerJarFile.getName(); assertTrue("Copy jar file to HDFS failed.", fs.exists(new Path(jarFileOnHDFS))); LOG.info("Copied jar file to HDFS: " + jarFileOnHDFS);
protected void setUp() throws Exception { Configuration conf = new Configuration(); dfsCluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build(); mrCluster = new MiniMRCluster(2, dfsCluster.getFileSystem().getUri().toString(), 1); } protected void tearDown() throws Exception {
public void testNonDefaultFS() throws IOException { FileSystem fs = cluster.getFileSystem(); Configuration conf = fs.getConf(); conf.set(DFSConfigKeys.FS_DEFAULT_NAME_KEY, fs.getUri().toString()); trashNonDefaultFS(conf); }