public static MiniDFSCluster getLocalHDFSCluster() throws Exception { setHadoopHomeWindows(); Configuration conf = new HdfsConfiguration(); conf.set("fs.defaultFS", "hdfs://localhost"); File hdfsPath = new File(System.getProperty("user.dir") + File.separator + "hadoop" + File.separator + "hdfs"); conf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, hdfsPath.getAbsolutePath()); MiniDFSCluster miniDFSCluster = new MiniDFSCluster.Builder(conf) .nameNodePort(12345) .nameNodeHttpPort(12341) .numDataNodes(1) .storagesPerDatanode(2) .format(true) .racks(null) .build(); miniDFSCluster.waitActive(); return miniDFSCluster; } }
DFSConfigKeys.DFS_NAMENODE_REPLICATION_PENDING_TIMEOUT_SEC_KEY, 1); cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3) .storagesPerDatanode(1).build(); FileSystem fs = cluster.getFileSystem(); FSDataOutputStream create = fs.create(new Path("/test"));
/** * Initializes the cluster. * * @param numDataNodes number of datanodes * @param storagesPerDatanode number of storage locations on each datanode * @param failedVolumesTolerated number of acceptable volume failures * @throws Exception if there is any failure */ private void initCluster(int numDataNodes, int storagesPerDatanode, int failedVolumesTolerated) throws Exception { conf = new HdfsConfiguration(); conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 512L); /* * Lower the DN heartbeat, DF rate, and recheck interval to one second * so state about failures and datanode death propagates faster. */ conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1); conf.setInt(DFSConfigKeys.DFS_DF_INTERVAL_KEY, 1000); conf.setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 1000); conf.setInt(DFSConfigKeys.DFS_DATANODE_FAILED_VOLUMES_TOLERATED_KEY, failedVolumesTolerated); cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDataNodes) .storagesPerDatanode(storagesPerDatanode).build(); cluster.waitActive(); fs = cluster.getFileSystem(); dataDir = cluster.getDataDirectory(); long dnCapacity = DFSTestUtil.getDatanodeCapacity( cluster.getNamesystem().getBlockManager().getDatanodeManager(), 0); volumeCapacity = dnCapacity / cluster.getStoragesPerDatanode(); }
final MiniDFSCluster cluster = new MiniDFSCluster .Builder(conf).numDataNodes(3) .storagesPerDatanode(NUM_STORAGES_PER_DN) .build(); try {
.storagesPerDatanode(4) .build(); try {
.storageCapacities(new long[] { BLOCK_SIZE * 10 }) .storageTypes(new StorageType[] { DEFAULT }) .storagesPerDatanode(1) .build();
.storagesPerDatanode(2) .build();
.Builder(conf) .numDataNodes(1) .storagesPerDatanode(numInitialStorages) .build(); cluster.waitActive();
.numDataNodes(2) .racks(new String[]{"/default/rack0", "/default/rack0"}) .storagesPerDatanode(2) .storageTypes(new StorageType[][]{ {StorageType.SSD, StorageType.DISK},
.storagesPerDatanode(STORAGES_PER_DATANODE) .numDataNodes(numDatanodes) .build();
TestContext(Configuration conf, int numNameServices) throws Exception { this.numNameServices = numNameServices; MiniDFSCluster.Builder bld = new MiniDFSCluster.Builder(conf). numDataNodes(1). storagesPerDatanode(1); if (numNameServices > 1) { bld.nnTopology(MiniDFSNNTopology. simpleFederatedTopology(numNameServices)); } cluster = bld.build(); cluster.waitActive(); dfs = new DistributedFileSystem[numNameServices]; for (int i = 0; i < numNameServices; i++) { dfs[i] = cluster.getFileSystem(i); } bpids = new String[numNameServices]; for (int i = 0; i < numNameServices; i++) { bpids[i] = cluster.getNamesystem(i).getBlockPoolId(); } datanode = cluster.getDataNodes().get(0); blockScanner = datanode.getBlockScanner(); for (int i = 0; i < numNameServices; i++) { dfs[i].mkdirs(new Path("/test")); } data = datanode.getFSDataset(); volumes = data.getVolumes(); }