.hosts(hosts) .build(); namenode = conf.get(DFSConfigKeys.FS_DEFAULT_NAME_KEY, "file:///");
try { Configuration conf = new Configuration(); dfs = new MiniDFSCluster.Builder(conf).racks(rack1).hosts(hosts1) .build(); dfs.waitActive();
try { cluster = new MiniDFSCluster.Builder(conf).numDataNodes(NUM_DN).hosts(hosts) .racks(racks).build();
dfs = new MiniDFSCluster.Builder(conf).racks(rack1).hosts(hosts1) .build(); dfs.waitActive();
.hosts(hosts).numDataNodes(nDataNodes).racks(racks).build();
"/d1/r2", "/d2/r3", "/d2/r3"}; MiniDFSCluster miniCluster = new MiniDFSCluster.Builder(conf).racks(racks) .hosts(hosts).numDataNodes(hosts.length).build(); miniCluster.waitActive();
new MiniDFSCluster.Builder(conf).numDataNodes(NUM_DN).hosts(hosts) .racks(racks).build(); assertNotNull("Failed Cluster Creation", cluster);
String hosts[] = { "foo1.example.com", "foo2.example.com" }; cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2). racks(racks).hosts(hosts).build(); cluster.waitActive();
dfs = new MiniDFSCluster.Builder(conf).numDataNodes(4) .racks(new String[] { "/rack0", "/rack0", "/rack1", "/rack1" }) .hosts(new String[] { "host0", "host1", "host2", "host3" }) .build(); FileSystem fs = dfs.getFileSystem();
.hosts(hosts).racks(racks).simulatedCapacities(capacities).build();
dfs = new MiniDFSCluster.Builder(job.getConfiguration()).numDataNodes(4) .racks(new String[] { "/rack0", "/rack0", "/rack1", "/rack1" }) .hosts(new String[] { "host0", "host1", "host2", "host3" }) .build(); FileSystem fs = dfs.getFileSystem();
.hosts(new String[] {"host1.com"}).build(); dfs.waitActive(); fileSys = dfs.getFileSystem();
DistributedFileSystem dfs = null; cluster = new MiniDFSCluster.Builder(conf).numDataNodes(NUM_DN).hosts(hosts) .racks(racks).build();
DistributedFileSystem dfs ; cluster = new MiniDFSCluster.Builder(conf).numDataNodes(NUM_DN).hosts(hosts) .racks(racks).build();
conf.set("fs.hdfs.impl", MissingBlockFileSystem.class.getName()); conf.setBoolean("dfs.replication.considerLoad", false); dfs = new MiniDFSCluster.Builder(conf).racks(rack1).hosts(hosts1) .build(); dfs.waitActive();
public void testMultiLevelInput() throws Exception { JobConf job = new JobConf(conf); job.setBoolean("dfs.replication.considerLoad", false); dfs = new MiniDFSCluster.Builder(job).racks(rack1).hosts(hosts1).build(); dfs.waitActive(); String namenode = (dfs.getFileSystem()).getUri().getHost() + ":" + (dfs.getFileSystem()).getUri().getPort(); FileSystem fileSys = dfs.getFileSystem(); if (!fileSys.mkdirs(dir1)) { throw new IOException("Mkdirs failed to create " + root.toString()); } writeFile(job, file1, (short)1, 1); writeFile(job, file2, (short)1, 1); // split it using a CombinedFile input format DummyFileInputFormat inFormat = new DummyFileInputFormat(); inFormat.setInputPaths(job, root); // By default, we don't allow multi-level/recursive inputs boolean exceptionThrown = false; try { InputSplit[] splits = inFormat.getSplits(job, 1); } catch (Exception e) { exceptionThrown = true; } assertTrue("Exception should be thrown by default for scanning a " + "directory with directories inside.", exceptionThrown); // Enable multi-level/recursive inputs job.setBoolean(FileInputFormat.INPUT_DIR_RECURSIVE, true); InputSplit[] splits = inFormat.getSplits(job, 1); assertEquals(splits.length, 2); }
builder.hosts(hosts); builder.numDataNodes(servers); builder.format(true);
@Before @Override public void setUp() throws Exception { super.setUp(); conf.setClass(PolicyProvider.POLICY_PROVIDER_CONFIG, HDFSPolicyProvider.class, PolicyProvider.class); // Many of the tests expect a replication value of 1 in the output conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, 1); // Build racks and hosts configuration to test dfsAdmin -printTopology String [] racks = {"/rack1", "/rack1", "/rack2", "/rack2", "/rack2", "/rack3", "/rack4", "/rack4" }; String [] hosts = {"host1", "host2", "host3", "host4", "host5", "host6", "host7", "host8" }; dfsCluster = new MiniDFSCluster.Builder(conf).numDataNodes(8) .racks(racks) .hosts(hosts) .build(); dfsCluster.waitClusterUp(); namenode = conf.get(DFSConfigKeys.FS_DEFAULT_NAME_KEY, "file:///"); username = System.getProperty("user.name"); fs = dfsCluster.getFileSystem(); assertTrue("Not a HDFS: "+fs.getUri(), fs instanceof DistributedFileSystem); }
@Before public void setup() throws IOException { StaticMapping.resetMap(); Configuration conf = new HdfsConfiguration(); final String[] racks = { "/RACK0", "/RACK0", "/RACK2", "/RACK3", "/RACK2" }; final String[] hosts = { "/host0", "/host1", "/host2", "/host3", "/host4" }; conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, DEFAULT_BLOCK_SIZE); conf.setInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, DEFAULT_BLOCK_SIZE / 2); cluster = new MiniDFSCluster.Builder(conf).numDataNodes(5).racks(racks) .hosts(hosts).build(); cluster.waitActive(); nameNodeRpc = cluster.getNameNodeRpc(); namesystem = cluster.getNamesystem(); perm = new PermissionStatus("TestDefaultBlockPlacementPolicy", null, FsPermission.getDefault()); }
private MiniDFSCluster newDFSCluster(JobConf conf) throws Exception { return new MiniDFSCluster.Builder(conf).numDataNodes(4) .racks(new String[]{"/rack0", "/rack0", "/rack1", "/rack1"}) .hosts(new String[]{"host0", "host1", "host2", "host3"}) .build(); }