@Before public void setup() throws Exception { baseFolder = tempFolder.newFolder("hdfsspout"); source = new Path(baseFolder.toString() + "/source"); fs.mkdirs(source); archive = new Path(baseFolder.toString() + "/archive"); fs.mkdirs(archive); badfiles = new Path(baseFolder.toString() + "/bad"); fs.mkdirs(badfiles); }
@Test public void testSimpleSequenceFile() throws Exception { //1) create a couple files to consume source = new Path("/tmp/hdfsspout/source"); fs.mkdirs(source); archive = new Path("/tmp/hdfsspout/archive"); fs.mkdirs(archive); Path file1 = new Path(source + "/file1.seq"); createSeqFile(fs, file1, 5); Path file2 = new Path(source + "/file2.seq"); createSeqFile(fs, file2, 5); try (AutoCloseableHdfsSpout closeableSpout = makeSpout(Configs.SEQ, SequenceFileReader.defaultFields)) { HdfsSpout spout = closeableSpout.spout; Map<String, Object> conf = getCommonConfigs(); openSpout(spout, 0, conf); // consume both files List<String> res = runSpout(spout, "r11"); Assert.assertEquals(10, res.size()); Assert.assertEquals(2, listDir(archive).size()); Path f1 = new Path(archive + "/file1.seq"); Path f2 = new Path(archive + "/file2.seq"); checkCollectorOutput_seq((MockCollector) spout.getCollector(), f1, f2); } }
@Before public void setUp() throws Exception { TEST_UTIL.getConfiguration().set("dfs.data.transfer.protection", protection); if (StringUtils.isBlank(encryptionAlgorithm) && StringUtils.isBlank(cipherSuite)) { TEST_UTIL.getConfiguration().setBoolean(DFS_ENCRYPT_DATA_TRANSFER_KEY, false); } else { TEST_UTIL.getConfiguration().setBoolean(DFS_ENCRYPT_DATA_TRANSFER_KEY, true); } if (StringUtils.isBlank(encryptionAlgorithm)) { TEST_UTIL.getConfiguration().unset(DFS_DATA_ENCRYPTION_ALGORITHM_KEY); } else { TEST_UTIL.getConfiguration().set(DFS_DATA_ENCRYPTION_ALGORITHM_KEY, encryptionAlgorithm); } if (StringUtils.isBlank(cipherSuite)) { TEST_UTIL.getConfiguration().unset(DFS_ENCRYPT_DATA_TRANSFER_CIPHER_SUITES_KEY); } else { TEST_UTIL.getConfiguration().set(DFS_ENCRYPT_DATA_TRANSFER_CIPHER_SUITES_KEY, cipherSuite); } TEST_UTIL.startMiniDFSCluster(3); FS = TEST_UTIL.getDFSCluster().getFileSystem(); testDirOnTestFs = new Path("/" + name.getMethodName().replaceAll("[^0-9a-zA-Z]", "_")); FS.mkdirs(testDirOnTestFs); entryptionTestDirOnTestFs = new Path("/" + testDirOnTestFs.getName() + "_enc"); FS.mkdirs(entryptionTestDirOnTestFs); createEncryptionZone(); }
miniDfs.getFileSystem().mkdirs(new Path("/path/to/schema")); FSDataOutputStream out = miniDfs.getFileSystem().create( new Path("/path/to/schema/schema.avsc"));
private Path createTargetTmpDir(DistributedFileSystem targetFs, Path targetDir) throws IOException { final Path tmp = new Path(targetDir, DistCpConstants.HDFS_DISTCP_DIFF_DIRECTORY_NAME + DistCp.rand.nextInt()); if (!targetFs.mkdirs(tmp)) { throw new IOException("The tmp directory " + tmp + " already exists"); } return tmp; }
private static Path createTargetTmpDir(DistributedFileSystem targetFs, Path targetDir) throws IOException { final Path tmp = new Path(targetDir, DistCpConstants.HDFS_DISTCP_DIFF_DIRECTORY_NAME + DistCp.rand.nextInt()); if (!targetFs.mkdirs(tmp)) { throw new IOException("The tmp directory " + tmp + " already exists"); } return tmp; }
@Before public void createMetaFolder() throws IOException { config.set(DistCpConstants.CONF_LABEL_META_FOLDER, "/meta"); Path meta = new Path("/meta"); cluster.getFileSystem().mkdirs(meta); }
@Override void prepare() throws Exception { final Path dirPath = new Path(dir); if (!dfs.exists(dirPath)) { dfs.mkdirs(dirPath); dfs.allowSnapshot(dirPath); } }
@Test public void testSetAcl() throws Exception { initCluster(true, false); fs.mkdirs(PATH); expectException(); fs.setAcl(PATH, Lists.newArrayList( aclEntry(DEFAULT, USER, "foo", READ_WRITE))); }
@Test public void testRemoveAclEntries() throws Exception { initCluster(true, false); fs.mkdirs(PATH); expectException(); fs.removeAclEntries(PATH, Lists.newArrayList( aclEntry(DEFAULT, USER, "foo", READ_WRITE))); }
@Test public void testRemoveDefaultAcl() throws Exception { initCluster(true, false); fs.mkdirs(PATH); expectException(); fs.removeAclEntries(PATH, Lists.newArrayList( aclEntry(DEFAULT, USER, "foo", READ_WRITE))); }
@Test public void testEditLog() throws Exception { // With ACLs enabled, set an ACL. initCluster(true, true); fs.mkdirs(PATH); fs.setAcl(PATH, Lists.newArrayList( aclEntry(DEFAULT, USER, "foo", READ_WRITE))); // Restart with ACLs disabled. Expect successful restart. restart(false, false); }
@Test public void testSetXAttr() throws Exception { initCluster(true, false); fs.mkdirs(PATH); expectException(); fs.setXAttr(PATH, "user.foo", null); }
@Test public void testRemoveAcl() throws Exception { initCluster(true, false); fs.mkdirs(PATH); expectException(); fs.removeAcl(PATH); }
@Test public void testFsImage() throws Exception { // With ACLs enabled, set an ACL. initCluster(true, true); fs.mkdirs(PATH); fs.setAcl(PATH, Lists.newArrayList( aclEntry(DEFAULT, USER, "foo", READ_WRITE))); // Save a new checkpoint and restart with ACLs still enabled. restart(true, true); // Restart with ACLs disabled. Expect successful restart. restart(false, false); }
@Before public void init() throws Exception { TemporaryFolder folder = new TemporaryFolder(); folder.create(); jsc = new JavaSparkContext(HoodieClientTestUtils.getSparkConfForTest("TestHoodieCommitArchiveLog")); basePath = folder.getRoot().getAbsolutePath(); hadoopConf = dfs.getConf(); jsc.hadoopConfiguration().addResource(dfs.getConf()); dfs.mkdirs(new Path(basePath)); HoodieTestUtils.init(hadoopConf, basePath); }
@Test public void testFsImage() throws Exception { // With XAttrs enabled, set an XAttr. initCluster(true, true); fs.mkdirs(PATH); fs.setXAttr(PATH, "user.foo", null); // Save a new checkpoint and restart with XAttrs still enabled. restart(true, true); // Restart with XAttrs disabled. Expect successful restart. restart(false, false); }
@Before public void setUp() throws Exception { conf = new Configuration(); cluster = new MiniDFSCluster.Builder(conf).numDataNodes(REPLICATION) .build(); cluster.waitActive(); fsn = cluster.getNamesystem(); hdfs = cluster.getFileSystem(); hdfs.mkdirs(dir); }