static void corrupt(List<File> files) throws IOException { for(File f : files) { StringBuilder content = new StringBuilder(DFSTestUtil.readFile(f)); char c = content.charAt(0); content.setCharAt(0, ++c); PrintWriter out = new PrintWriter(f); out.print(content); out.flush(); out.close(); } }
/** * Return the contents of the given block on the given datanode. * * @param The index of the datanode * @param The name of the block * @throws IOException on error accessing the file for the given block * @return The contents of the block file, null if none found */ public String readBlockOnDataNode(int i, String blockName) throws IOException { assert (i >= 0 && i < dataNodes.size()) : "Invalid datanode "+i; // Each datanode has multiple data dirs, check each for (int dn = i*2; dn < i*2+2; dn++) { File dataDir = new File(getBaseDirectory() + "data"); File blockFile = new File(dataDir, "data" + (dn+1) + FINALIZED_DIR_NAME + blockName); if (blockFile.exists()) { return DFSTestUtil.readFile(blockFile); } } return null; }
public String run(int exitcode, String... options) throws IOException { String dst = TEST_ROOT_DIR + "/" + fname+ ++count; String[] args = new String[options.length + 3]; args[0] = "-get"; args[args.length - 2] = remotef.toString(); args[args.length - 1] = dst; for(int i = 0; i < options.length; i++) { args[i + 1] = options[i]; } show("args=" + Arrays.asList(args)); try { assertEquals(exitcode, shell.run(args)); } catch (Exception e) { assertTrue(StringUtils.stringifyException(e), false); } return exitcode == 0? DFSTestUtil.readFile(new File(dst)): null; } };
static void corrupt(List<File> files) throws IOException { for(File f : files) { StringBuilder content = new StringBuilder(DFSTestUtil.readFile(f)); char c = content.charAt(0); content.setCharAt(0, ++c); PrintWriter out = new PrintWriter(f); out.print(content); out.flush(); out.close(); } }
public String readBlockOnDataNode(int i, ExtendedBlock block) throws IOException { assert (i >= 0 && i < dataNodes.size()) : "Invalid datanode "+i; File blockFile = getBlockFile(i, block); if (blockFile != null && blockFile.exists()) { return DFSTestUtil.readFile(blockFile); } return null; }
/** * Test case where there is no existing file */ @Test public void testOverwriteFile() throws IOException { assertTrue("Creating empty dst file", DST_FILE.createNewFile()); OutputStream fos = new AtomicFileOutputStream(DST_FILE); assertTrue("Empty file still exists", DST_FILE.exists()); fos.write(TEST_STRING.getBytes()); fos.flush(); // Original contents still in place assertEquals("", DFSTestUtil.readFile(DST_FILE)); fos.close(); // New contents replace original file String readBackData = DFSTestUtil.readFile(DST_FILE); assertEquals(TEST_STRING, readBackData); }
/** * Test case where there is no existing file */ @Test public void testWriteNewFile() throws IOException { OutputStream fos = new AtomicFileOutputStream(DST_FILE); assertFalse(DST_FILE.exists()); fos.write(TEST_STRING.getBytes()); fos.flush(); assertFalse(DST_FILE.exists()); fos.close(); assertTrue(DST_FILE.exists()); String readBackData = DFSTestUtil.readFile(DST_FILE); assertEquals(TEST_STRING, readBackData); }
@Override public Boolean get() { x.getAndIncrement(); try { DFSTestUtil.createFile(fs, new Path("/time.txt." + x.get()), LONG_FILE_LEN, (short) 1, Time.monotonicNow()); DFSTestUtil.readFile(fs, new Path("/time.txt." + x.get())); fs.delete(new Path("/time.txt." + x.get()), true); } catch (IOException ioe) { LOG.error("Caught IOException while ingesting DN metrics", ioe); return false; } MetricsRecordBuilder rbNew = getMetrics(datanode.getMetrics().name()); final long endWriteValue = getLongCounter("TotalWriteTime", rbNew); final long endReadValue = getLongCounter("TotalReadTime", rbNew); return endWriteValue > startWriteValue && endReadValue > startReadValue; } }, 30, 60000);
public String run(int exitcode, String... options) throws IOException { String dst = new File(TEST_ROOT_DIR, fname + ++count) .getAbsolutePath(); String[] args = new String[options.length + 3]; args[0] = "-get"; args[args.length - 2] = remotef.toString(); args[args.length - 1] = dst; for(int i = 0; i < options.length; i++) { args[i + 1] = options[i]; } show("args=" + Arrays.asList(args)); try { assertEquals(exitcode, shell.run(args)); } catch (Exception e) { assertTrue(StringUtils.stringifyException(e), false); } return exitcode == 0? DFSTestUtil.readFile(new File(dst)): null; } };
@Test public void testWithCheckpoint() throws Exception { Path path = new Path("/test"); doWriteAndAbort(fs, path); fs.delete(new Path("/test/test"), true); NameNode nameNode = cluster.getNameNode(); NameNodeAdapter.enterSafeMode(nameNode, false); NameNodeAdapter.saveNamespace(nameNode); NameNodeAdapter.leaveSafeMode(nameNode); cluster.restartNameNode(true); // read snapshot file after restart String test2snapshotPath = Snapshot.getSnapshotPath(path.toString(), "s1/test/test2"); DFSTestUtil.readFile(fs, new Path(test2snapshotPath)); String test3snapshotPath = Snapshot.getSnapshotPath(path.toString(), "s1/test/test3"); DFSTestUtil.readFile(fs, new Path(test3snapshotPath)); }
@Test public void testFilesDeletionWithCheckpoint() throws Exception { Path path = new Path("/test"); doWriteAndAbort(fs, path); fs.delete(new Path("/test/test/test2"), true); fs.delete(new Path("/test/test/test3"), true); NameNode nameNode = cluster.getNameNode(); NameNodeAdapter.enterSafeMode(nameNode, false); NameNodeAdapter.saveNamespace(nameNode); NameNodeAdapter.leaveSafeMode(nameNode); cluster.restartNameNode(true); // read snapshot file after restart String test2snapshotPath = Snapshot.getSnapshotPath(path.toString(), "s1/test/test2"); DFSTestUtil.readFile(fs, new Path(test2snapshotPath)); String test3snapshotPath = Snapshot.getSnapshotPath(path.toString(), "s1/test/test3"); DFSTestUtil.readFile(fs, new Path(test3snapshotPath)); }
/** * Tests DFSClient.close throws no ConcurrentModificationException if * multiple files are open. * Also tests that any cached sockets are closed. (HDFS-3359) */ @Test public void testDFSClose() throws Exception { Configuration conf = getTestConfiguration(); MiniDFSCluster cluster = null; try { cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build(); FileSystem fileSys = cluster.getFileSystem(); // create two files, leaving them open fileSys.create(new Path("/test/dfsclose/file-0")); fileSys.create(new Path("/test/dfsclose/file-1")); // create another file, close it, and read it, so // the client gets a socket in its SocketCache Path p = new Path("/non-empty-file"); DFSTestUtil.createFile(fileSys, p, 1L, (short)1, 0L); DFSTestUtil.readFile(fileSys, p); fileSys.close(); } finally { if (cluster != null) {cluster.shutdown();} } }
@Test public void testEncryptedAppend() throws IOException { setEncryptionConfigKeys(); cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build(); fs = getFileSystem(conf); writeTestDataToFile(fs); assertEquals(PLAIN_TEXT, DFSTestUtil.readFile(fs, TEST_PATH)); writeTestDataToFile(fs); assertEquals(PLAIN_TEXT + PLAIN_TEXT, DFSTestUtil.readFile(fs, TEST_PATH)); }
@Test public void testLongLivedReadClientAfterRestart() throws IOException { FileChecksum checksum = writeUnencryptedAndThenRestartEncryptedCluster(); assertEquals(PLAIN_TEXT, DFSTestUtil.readFile(fs, TEST_PATH)); assertEquals(checksum, fs.getFileChecksum(TEST_PATH)); // Restart the NN and DN, after which the client's encryption key will no // longer be valid. cluster.restartNameNode(); assertTrue(cluster.restartDataNode(0)); assertEquals(PLAIN_TEXT, DFSTestUtil.readFile(fs, TEST_PATH)); assertEquals(checksum, fs.getFileChecksum(TEST_PATH)); }
@Test public void testLongLivedClient() throws IOException, InterruptedException { FileChecksum checksum = writeUnencryptedAndThenRestartEncryptedCluster(); BlockTokenSecretManager btsm = cluster.getNamesystem().getBlockManager() .getBlockTokenSecretManager(); btsm.setKeyUpdateIntervalForTesting(2 * 1000); btsm.setTokenLifetime(2 * 1000); btsm.clearAllKeysForTesting(); assertEquals(PLAIN_TEXT, DFSTestUtil.readFile(fs, TEST_PATH)); assertEquals(checksum, fs.getFileChecksum(TEST_PATH)); // Sleep for 15 seconds, after which the encryption key will no longer be // valid. It needs to be a few multiples of the block token lifetime, // since several block tokens are valid at any given time (the current // and the last two, by default.) LOG.info("Sleeping so that encryption keys expire..."); Thread.sleep(15 * 1000); LOG.info("Done sleeping."); assertEquals(PLAIN_TEXT, DFSTestUtil.readFile(fs, TEST_PATH)); assertEquals(checksum, fs.getFileChecksum(TEST_PATH)); }
@Test public void testEncryptedAppendRequiringBlockTransfer() throws IOException { setEncryptionConfigKeys(); // start up 4 DNs cluster = new MiniDFSCluster.Builder(conf).numDataNodes(4).build(); fs = getFileSystem(conf); // Create a file with replication 3, so its block is on 3 / 4 DNs. writeTestDataToFile(fs); assertEquals(PLAIN_TEXT, DFSTestUtil.readFile(fs, TEST_PATH)); // Shut down one of the DNs holding a block replica. FSDataInputStream in = fs.open(TEST_PATH); List<LocatedBlock> locatedBlocks = DFSTestUtil.getAllBlocks(in); in.close(); assertEquals(1, locatedBlocks.size()); assertEquals(3, locatedBlocks.get(0).getLocations().length); DataNode dn = cluster.getDataNode( locatedBlocks.get(0).getLocations()[0].getIpcPort()); dn.shutdown(); // Reopen the file for append, which will need to add another DN to the // pipeline and in doing so trigger a block transfer. writeTestDataToFile(fs); assertEquals(PLAIN_TEXT + PLAIN_TEXT, DFSTestUtil.readFile(fs, TEST_PATH)); }
@Test(timeout = 30000) public void testReadSnapshotFileWithCheckpoint() throws Exception { Path foo = new Path("/foo"); hdfs.mkdirs(foo); hdfs.allowSnapshot(foo); Path bar = new Path("/foo/bar"); DFSTestUtil.createFile(hdfs, bar, 100, (short) 2, 100024L); hdfs.createSnapshot(foo, "s1"); assertTrue(hdfs.delete(bar, true)); // checkpoint NameNode nameNode = cluster.getNameNode(); NameNodeAdapter.enterSafeMode(nameNode, false); NameNodeAdapter.saveNamespace(nameNode); NameNodeAdapter.leaveSafeMode(nameNode); // restart namenode to load snapshot files from fsimage cluster.restartNameNode(true); String snapshotPath = Snapshot.getSnapshotPath(foo.toString(), "s1/bar"); DFSTestUtil.readFile(hdfs, new Path(snapshotPath)); }
@Test public void testLongLivedWriteClientAfterRestart() throws IOException { setEncryptionConfigKeys(); cluster = new MiniDFSCluster.Builder(conf).build(); fs = getFileSystem(conf); writeTestDataToFile(fs); assertEquals(PLAIN_TEXT, DFSTestUtil.readFile(fs, TEST_PATH)); // Restart the NN and DN, after which the client's encryption key will no // longer be valid. cluster.restartNameNode(); assertTrue(cluster.restartDataNodes()); cluster.waitActive(); writeTestDataToFile(fs); assertEquals(PLAIN_TEXT + PLAIN_TEXT, DFSTestUtil.readFile(fs, TEST_PATH)); }
/** * Tests DataTransferProtocol with the given client configuration. * * @param conf client configuration * @throws IOException if there is an I/O error */ private void doTest(HdfsConfiguration conf) throws IOException { fs = FileSystem.get(cluster.getURI(), conf); FileSystemTestHelper.createFile(fs, PATH, NUM_BLOCKS, BLOCK_SIZE); assertArrayEquals(FileSystemTestHelper.getFileData(NUM_BLOCKS, BLOCK_SIZE), DFSTestUtil.readFile(fs, PATH).getBytes("UTF-8")); BlockLocation[] blockLocations = fs.getFileBlockLocations(PATH, 0, Long.MAX_VALUE); assertNotNull(blockLocations); assertEquals(NUM_BLOCKS, blockLocations.length); for (BlockLocation blockLocation: blockLocations) { assertNotNull(blockLocation.getHosts()); assertEquals(3, blockLocation.getHosts().length); } }
private FileChecksum writeUnencryptedAndThenRestartEncryptedCluster() throws IOException { cluster = new MiniDFSCluster.Builder(conf).build(); fs = getFileSystem(conf); writeTestDataToFile(fs); assertEquals(PLAIN_TEXT, DFSTestUtil.readFile(fs, TEST_PATH)); FileChecksum checksum = fs.getFileChecksum(TEST_PATH); fs.close(); cluster.shutdown(); setEncryptionConfigKeys(); cluster = new MiniDFSCluster.Builder(conf) .manageDataDfsDirs(false) .manageNameDfsDirs(false) .format(false) .startupOption(StartupOption.REGULAR) .build(); fs = getFileSystem(conf); return checksum; }