@Override public void evaluate() throws Throwable { try { System.setProperty(TEST_BUILD_DATA, "target/test/data"); hadoopConf = hadoopConfSupplier.get(); dfscluster = new MiniDFSCluster.Builder(hadoopConf).numDataNodes(3).build(); dfscluster.waitActive(); } finally { if (dfscluster != null) { dfscluster.shutdown(); } System.clearProperty(TEST_BUILD_DATA); } } };
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
new MiniDFSNNTopology.NNConf("nn2"))); miniDFSCluster = new MiniDFSCluster.Builder(conf) .numDataNodes(numDataNodes).format(format) .racks(racks).nnTopology(topo).build(); miniDFSCluster.waitActive(); } else { miniDFSCluster = new MiniDFSCluster.Builder(conf) .numDataNodes(numDataNodes).format(format) .racks(racks).build();
private Socket createSocket() throws IOException { cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build(); cluster.waitActive(); LOG.info("MiniDFSCluster started."); return DFSOutputStream.createSocketForPipeline( new DatanodeInfo(cluster.dataNodes.get(0).datanode.getDatanodeId()), 1, cluster.getFileSystem().getClient()); } }
protected static void startCluster() throws IOException { conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY, true); cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build(); cluster.waitActive(); }
@BeforeClass public static void clusterSetupAtBegining() throws IOException, LoginException, URISyntaxException { Configuration conf = new HdfsConfiguration(); cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build(); fc = FileContext.getFileContext(cluster.getURI(0), conf); defaultWorkingDirectory = fc.makeQualified( new Path("/user/" + UserGroupInformation.getCurrentUser().getShortUserName())); fc.mkdir(defaultWorkingDirectory, FileContext.DEFAULT_PERM, true); }
private void startCluster() throws IOException { conf = new HdfsConfiguration(); conf.setInt("dfs.blocksize", 1024*1024); cluster = new Builder(conf).numDataNodes(REPL_FACTOR).build(); cluster.waitActive(); fs = cluster.getFileSystem(); nn = cluster.getNameNode(0); assertNotNull(nn); dn0 = cluster.getDataNodes().get(0); assertNotNull(dn0); blockPoolId = cluster.getNameNode(0).getNamesystem().getBlockPoolId(); }
/** * Set up the cluster and start NameNode and DataNodes according to the * corresponding scheme. */ void setupCluster() throws Exception { cluster = new MiniDFSCluster.Builder(conf).numDataNodes(clusterScheme .numDataNodes).storageTypes(clusterScheme.storageTypes) .storageCapacities(clusterScheme.storageCapacities).build(); cluster.waitActive(); dfs = cluster.getFileSystem(); }
@BeforeClass public static void setupCluster() throws IOException { Configuration conf = new HdfsConfiguration(); conf.setInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 4096); conf.set("fs.hdfs.impl.disable.cache", "true"); cluster = new MiniDFSCluster.Builder(conf) .numDataNodes(1) .build(); fs = cluster.getFileSystem(); }
@BeforeClass public static void clusterSetupAtBegining() throws IOException, LoginException, URISyntaxException { Configuration conf = new HdfsConfiguration(); cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build(); fc = FileContext.getFileContext(cluster.getURI(0), conf); defaultWorkingDirectory = fc.makeQualified( new Path("/user/" + UserGroupInformation.getCurrentUser().getShortUserName())); fc.mkdir(defaultWorkingDirectory, FileContext.DEFAULT_PERM, true); }
@BeforeClass public static void clusterSetupAtBeginning() throws IOException { cluster = new MiniDFSCluster.Builder(clusterConf) .nnTopology(MiniDFSNNTopology.simpleFederatedTopology(2)) .numDataNodes(2) .build(); cluster.waitClusterUp(); fHdfs = cluster.getFileSystem(0); fHdfs2 = cluster.getFileSystem(1); }
@Before public void setup() throws Exception { conf.setInt(DFS_DATANODE_SOCKET_REUSE_KEEPALIVE_KEY, KEEPALIVE_TIMEOUT); conf.setInt(DFS_CLIENT_MAX_BLOCK_ACQUIRE_FAILURES_KEY, 0); cluster = new MiniDFSCluster.Builder(conf) .numDataNodes(1).build(); dn = cluster.getDataNodes().get(0); }
@BeforeClass public static void setup() throws Exception { Configuration configuration = TestCopyMapper.getConfigurationForCluster(); configuration.set( HdfsClientConfigKeys.DFS_CHECKSUM_COMBINE_MODE_KEY, "COMPOSITE_CRC"); TestCopyMapper.setCluster(new MiniDFSCluster.Builder(configuration) .numDataNodes(1) .format(true) .build()); }
@BeforeClass public static void setup() throws Exception { cluster = new MiniDFSCluster.Builder(getConfigurationForCluster()) .numDataNodes(1).format(true).build(); for (int i=0; i<N_FILES; ++i) createFile("/tmp/source/" + String.valueOf(i)); FileSystem fileSystem = cluster.getFileSystem(); expectedFilePaths.add(fileSystem.listStatus( new Path("/tmp/source/0"))[0].getPath().getParent().toString()); }
@BeforeClass public static void create() throws IOException { cluster = new MiniDFSCluster.Builder(config).numDataNodes(1).format(true) .build(); }
@BeforeClass public static void init() throws Exception { conf = new Configuration(); conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_XATTRS_ENABLED_KEY, true); cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).format(true) .build(); cluster.waitActive(); fs = cluster.getFileSystem(); }
@BeforeClass public static void create() throws IOException { cluster = new MiniDFSCluster.Builder(config).numDataNodes(1).format(true) .build(); fs = cluster.getFileSystem(); buildExpectedValuesMap(); }
@Before public void setup() throws Exception { Configuration hdfsConf = new HdfsConfiguration(); hdfsConf.setLong(DFSConfigKeys.DFS_NAMENODE_MIN_BLOCK_SIZE_KEY, 0); String namenodeDir = new File(MiniDFSCluster.getBaseDirectory(), "name"). getAbsolutePath(); hdfsConf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, namenodeDir); hdfsConf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY, namenodeDir); hdfsConf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCKSIZE); cluster = new MiniDFSCluster.Builder(hdfsConf).numDataNodes(15).build(); fs = cluster.getFileSystem(); fs.enableErasureCodingPolicy(ecPolicy.getName()); fs.setErasureCodingPolicy(new Path("/"), ecPolicy.getName()); cluster.waitActive(); conf = new Configuration(); submitDir = new Path("/"); testFile = new Path("/testfile"); DFSTestUtil.writeFile(fs, testFile, StripedFileTestUtil.generateBytes(BLOCKSIZE)); conf.set(FileInputFormat.INPUT_DIR, fs.getUri().toString() + testFile.toString()); }
public static MiniDFSCluster getLocalHDFSCluster() throws Exception { setHadoopHomeWindows(); Configuration conf = new HdfsConfiguration(); conf.set("fs.defaultFS", "hdfs://localhost"); File hdfsPath = new File(System.getProperty("user.dir") + File.separator + "hadoop" + File.separator + "hdfs"); conf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, hdfsPath.getAbsolutePath()); MiniDFSCluster miniDFSCluster = new MiniDFSCluster.Builder(conf) .nameNodePort(12345) .nameNodeHttpPort(12341) .numDataNodes(1) .storagesPerDatanode(2) .format(true) .racks(null) .build(); miniDFSCluster.waitActive(); return miniDFSCluster; } }
@BeforeClass public static void setup() throws IOException { // create configuration, dfs, file system localFs = FileSystem.getLocal(conf); testRootDir = new Path("target", TestJobResourceUploaderWithSharedCache.class.getName() + "-tmpDir") .makeQualified(localFs.getUri(), localFs.getWorkingDirectory()); dfs = new MiniDFSCluster.Builder(conf).numDataNodes(1).format(true).build(); remoteFs = dfs.getFileSystem(); }