Refine search
@AfterClass public static void destroyHDFS() throws Exception { if (hdfsCluster != null) { hdfsCluster.getFileSystem().delete(new org.apache.hadoop.fs.Path(basePath.toUri()), true); hdfsCluster.shutdown(); } }
@AfterClass public static void destroyHDFS() throws Exception { if (hdfsCluster != null) { hdfsCluster.getFileSystem().delete(new org.apache.hadoop.fs.Path(basePath.toUri()), true); hdfsCluster.shutdown(); } }
/** * Common method to close down a MiniDFSCluster and the associated file system * * @param cluster */ public static void shutdownDfs(MiniDFSCluster cluster) { if (cluster != null) { LOG.info("Shutting down Mini DFS "); try { cluster.shutdown(); } catch (Exception e) { /// Can get a java.lang.reflect.UndeclaredThrowableException thrown // here because of an InterruptedException. Don't let exceptions in // here be cause of test failure. } try { FileSystem fs = cluster.getFileSystem(); if (fs != null) { LOG.info("Shutting down FileSystem"); fs.close(); } FileSystem.closeAll(); } catch (IOException e) { LOG.error("error closing file system", e); } } }
@Test public void testFilteredStatusDoesNotThrowOnNotFound() throws Exception { MiniDFSCluster cluster = htu.startMiniDFSCluster(1); try { assertNull(FSUtils.listStatusWithStatusFilter(cluster.getFileSystem(), new Path("definitely/doesn't/exist"), null)); } finally { cluster.shutdown(); } }
miniDfs.getFileSystem().mkdirs(new Path("/path/to/schema")); FSDataOutputStream out = miniDfs.getFileSystem().create( new Path("/path/to/schema/schema.avsc")); out.writeBytes(RECORD_SCHEMA); out.close(); String onHDFS = miniDfs.getFileSystem().getUri() + "/path/to/schema/schema.avsc"; miniDfs.shutdown();
@Test public void checkStreamCapabilitiesOnHdfsDataOutputStream() throws Exception { MiniDFSCluster cluster = htu.startMiniDFSCluster(1); try (FileSystem filesystem = cluster.getFileSystem()) { FSDataOutputStream stream = filesystem.create(new Path("/tmp/foobar")); assertTrue(FSUtils.hasCapability(stream, "hsync")); assertTrue(FSUtils.hasCapability(stream, "hflush")); assertNotEquals("We expect HdfsDataOutputStream to say it has a dummy capability iff the " + "StreamCapabilities class is not defined.", STREAM_CAPABILITIES_IS_PRESENT, FSUtils.hasCapability(stream, "a capability that hopefully HDFS doesn't add.")); } finally { cluster.shutdown(); } }
FileSystem fileSys = cluster.getFileSystem(); try { Path p = new Path("preadtest.dat"); } finally { fileSys.close(); cluster.shutdown();
public void shutdownMiniDFSCluster() throws Exception { if (this.dfsCluster != null) { try { FileSystem fs = this.dfsCluster.getFileSystem(); if (fs != null) fs.close(); } catch (IOException e) { System.err.println("error closing file system: " + e); } // The below throws an exception per dn, AsynchronousCloseException. this.dfsCluster.shutdown(); } }
private void init(Configuration conf) throws IOException { if (cluster != null) { cluster.shutdown(); } cluster = new MiniDFSCluster.Builder(conf).build(); cluster.waitClusterUp(); fileSystem = cluster.getFileSystem(); }
/** * Test case where a bunch of threads are both appending and flushing. * They all finish before the file is closed. */ @Test public void testMultipleHflushers() throws Exception { Configuration conf = new Configuration(); MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build(); FileSystem fs = cluster.getFileSystem(); Path p = new Path("/multiple-hflushers.dat"); try { doMultithreadedWrites(conf, p, NUM_THREADS, WRITE_SIZE, NUM_WRITES_PER_THREAD); } finally { fs.close(); cluster.shutdown(); } }
public void testText() throws Exception { Configuration conf = new HdfsConfiguration(); MiniDFSCluster cluster = null; try { cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build(); final FileSystem dfs = cluster.getFileSystem(); textTest(new Path("/texttest").makeQualified(dfs.getUri(), dfs.getWorkingDirectory()), conf); conf.set("fs.default.name", dfs.getUri().toString()); final FileSystem lfs = FileSystem.getLocal(conf); textTest(new Path(TEST_ROOT_DIR, "texttest").makeQualified(lfs.getUri(), lfs.getWorkingDirectory()), conf); } finally { if (null != cluster) { cluster.shutdown(); } } }
public void testWithDFS() throws IOException { MiniDFSCluster dfs = null; MiniMRCluster mr = null; FileSystem fileSys = null; try { final int taskTrackers = 4; JobConf conf = new JobConf(); conf.set(JTConfig.JT_SYSTEM_DIR, "/tmp/custom/mapred/system"); dfs = new MiniDFSCluster(conf, 4, true, null); fileSys = dfs.getFileSystem(); mr = new MiniMRCluster(taskTrackers, fileSys.getUri().toString(), 1, null, null, conf); runWordCount(mr, mr.createJobConf(), conf.get("mapred.system.dir")); } finally { if (dfs != null) { dfs.shutdown(); } if (mr != null) { mr.shutdown(); } } }
/** * Tests DFSClient.close throws no ConcurrentModificationException if * multiple files are open. */ @Test public void testDFSClose() throws Exception { Configuration conf = getTestConfiguration(); MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build(); FileSystem fileSys = cluster.getFileSystem(); try { // create two files fileSys.create(new Path("/test/dfsclose/file-0")); fileSys.create(new Path("/test/dfsclose/file-1")); fileSys.close(); } finally { if (cluster != null) {cluster.shutdown();} } }
public void testMapReduceSortWithCompressedEmptyMapOutputs() throws Exception { MiniDFSCluster dfs = null; MiniMRCluster mr = null; FileSystem fileSys = null; try { Configuration conf = new Configuration(); // Start the mini-MR and mini-DFS clusters dfs = new MiniDFSCluster(conf, NUM_HADOOP_SLAVES, true, null); fileSys = dfs.getFileSystem(); mr = new MiniMRCluster(NUM_HADOOP_SLAVES, fileSys.getUri().toString(), 1); // Run randomwriter to generate input for 'sort' runRandomWriter(mr.createJobConf(), SORT_INPUT_PATH); // Run sort runSort(mr.createJobConf(), SORT_INPUT_PATH, SORT_OUTPUT_PATH); } finally { if (dfs != null) { dfs.shutdown(); } if (mr != null) { mr.shutdown(); } } }
public void testDataNodeMetrics() throws Exception { Configuration conf = new HdfsConfiguration(); conf.setBoolean(SimulatedFSDataset.CONFIG_PROPERTY_SIMULATED, true); MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build(); try { FileSystem fs = cluster.getFileSystem(); final long LONG_FILE_LEN = Integer.MAX_VALUE+1L; DFSTestUtil.createFile(fs, new Path("/tmp.txt"), LONG_FILE_LEN, (short)1, 1L); List<DataNode> datanodes = cluster.getDataNodes(); assertEquals(datanodes.size(), 1); DataNode datanode = datanodes.get(0); DataNodeMetrics metrics = datanode.getMetrics(); assertEquals(LONG_FILE_LEN, metrics.bytesWritten.getCurrentIntervalValue()); } finally { if (cluster != null) {cluster.shutdown();} } } }
/** * Test if the seek bug exists in FSDataInputStream in DFS. */ public void testSeekBugDFS() throws IOException { Configuration conf = new HdfsConfiguration(); MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build(); FileSystem fileSys = cluster.getFileSystem(); try { Path file1 = new Path("seektest.dat"); writeFile(fileSys, file1); seekReadFile(fileSys, file1); smallReadSeek(fileSys, file1); cleanupFile(fileSys, file1); } finally { fileSys.close(); cluster.shutdown(); } }
@AfterClass public static void afterClass() throws Exception { if(cluster == null) return; FileSystem fs = cluster.getFileSystem(); bench.cleanup(fs); cluster.shutdown(); }
/** * test JMX connection to DataNode.. * @throws Exception */ public void testDataNode() throws Exception { int numDatanodes = 2; cluster = new MiniDFSCluster.Builder(config).numDataNodes(numDatanodes).build(); cluster.waitActive(); writeFile(cluster.getFileSystem(), new Path("/test"), 2); JMXGet jmx = new JMXGet(); jmx.setService("DataNode"); jmx.init(); assertEquals(Integer.parseInt(jmx.getValue("bytes_written")), 0); cluster.shutdown(); } }
private void doTestMultipleHflushers(int repl) throws Exception { Configuration conf = new Configuration(); MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf) .numDataNodes(repl) .build(); FileSystem fs = cluster.getFileSystem(); Path p = new Path("/multiple-hflushers.dat"); try { doMultithreadedWrites(conf, p, NUM_THREADS, WRITE_SIZE, NUM_WRITES_PER_THREAD, repl); System.out.println("Latency quantiles (in microseconds):\n" + quantiles); } finally { fs.close(); cluster.shutdown(); } }
private void init(Configuration conf) throws IOException { if (cluster != null) { cluster.shutdown(); } cluster = new MiniDFSCluster.Builder(conf).build(); cluster.waitClusterUp(); fileSystem = cluster.getFileSystem(); }