new MiniDFSNNTopology.NNConf("nn2"))); miniDFSCluster = new MiniDFSCluster.Builder(conf) .numDataNodes(numDataNodes).format(format) .racks(racks).nnTopology(topo).build(); miniDFSCluster.waitActive(); } else { miniDFSCluster = new MiniDFSCluster.Builder(conf) .numDataNodes(numDataNodes).format(format) .racks(racks).build();
@BeforeClass public static void setup() throws IOException { // create configuration, dfs, file system localFs = FileSystem.getLocal(conf); testRootDir = new Path("target", TestJobResourceUploaderWithSharedCache.class.getName() + "-tmpDir") .makeQualified(localFs.getUri(), localFs.getWorkingDirectory()); dfs = new MiniDFSCluster.Builder(conf).numDataNodes(1).format(true).build(); remoteFs = dfs.getFileSystem(); }
public static MiniDFSCluster getLocalHDFSCluster() throws Exception { setHadoopHomeWindows(); Configuration conf = new HdfsConfiguration(); conf.set("fs.defaultFS", "hdfs://localhost"); File hdfsPath = new File(System.getProperty("user.dir") + File.separator + "hadoop" + File.separator + "hdfs"); conf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, hdfsPath.getAbsolutePath()); MiniDFSCluster miniDFSCluster = new MiniDFSCluster.Builder(conf) .nameNodePort(12345) .nameNodeHttpPort(12341) .numDataNodes(1) .storagesPerDatanode(2) .format(true) .racks(null) .build(); miniDFSCluster.waitActive(); return miniDFSCluster; } }
@BeforeClass public static void setup() throws Exception { Configuration configuration = getConfigurationForCluster(); setCluster(new MiniDFSCluster.Builder(configuration) .numDataNodes(1) .format(true) .build()); }
@BeforeClass public static void create() throws IOException { cluster = new MiniDFSCluster.Builder(config).numDataNodes(1).format(true) .build(); fs = cluster.getFileSystem(); buildExpectedValuesMap(); }
@BeforeClass public static void setup() throws Exception { configuration = getConfigurationForCluster(); cluster = new MiniDFSCluster.Builder(configuration) .numDataNodes(1) .format(true) .build(); }
@BeforeClass public static void create() throws IOException { cluster = new MiniDFSCluster.Builder(config) .numDataNodes(1) .format(true) .build(); }
@BeforeClass public static void setup() throws Exception { cluster = new MiniDFSCluster.Builder(getConfigurationForCluster()) .numDataNodes(1).format(true).build(); for (int i=0; i<N_FILES; ++i) createFile("/tmp/source/" + String.valueOf(i)); FileSystem fileSystem = cluster.getFileSystem(); expectedFilePaths.add(fileSystem.listStatus( new Path("/tmp/source/0"))[0].getPath().getParent().toString()); }
private MiniDFSCluster createCluster(Configuration c) throws IOException { return new MiniDFSCluster.Builder(c) .numDataNodes(0) .startupOption(StartupOption.REGULAR) .format(false) .manageDataDfsDirs(false) .manageNameDfsDirs(false) .build(); } /**
@BeforeClass public static void setup() throws Exception { Configuration configuration = TestCopyMapper.getConfigurationForCluster(); configuration.set( HdfsClientConfigKeys.DFS_CHECKSUM_COMBINE_MODE_KEY, "COMPOSITE_CRC"); TestCopyMapper.setCluster(new MiniDFSCluster.Builder(configuration) .numDataNodes(1) .format(true) .build()); }
@BeforeClass public static void setup() throws Exception { cluster = new MiniDFSCluster.Builder(getConfigurationForCluster()) .numDataNodes(1).format(true).build(); for (int i=0; i<N_FILES; ++i) createFile("/tmp/source/" + String.valueOf(i)); FileSystem fileSystem = cluster.getFileSystem(); expectedFilePaths.add(fileSystem.listStatus( new Path("/tmp/source/0"))[0].getPath().getParent().toString()); }
@BeforeClass public static void create() throws IOException { cluster = new MiniDFSCluster.Builder(config).numDataNodes(1).format(true) .build(); }
@BeforeClass public static void init() throws Exception { conf = new Configuration(); conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_XATTRS_ENABLED_KEY, true); cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).format(true) .build(); cluster.waitActive(); fs = cluster.getFileSystem(); }
@Before public void setUp() throws Exception { conf = new Configuration(); cluster = new MiniDFSCluster.Builder(conf).numDataNodes(REPLICATION) .format(true).build(); cluster.waitActive(); hdfs = cluster.getFileSystem(); }
private MiniDFSCluster createCluster(Configuration c) throws IOException { return new MiniDFSCluster.Builder(c) .numDataNodes(0) .startupOption(StartupOption.REGULAR) .format(false) .manageDataDfsDirs(false) .manageNameDfsDirs(false) .build(); } /**
/** * Create an instance of a newly configured cluster for testing that does * not manage its own directories or files */ private MiniDFSCluster createCluster() throws IOException { return new MiniDFSCluster.Builder(conf).numDataNodes(0) .format(false) .manageDataDfsDirs(false) .manageNameDfsDirs(false) .startupOption(StartupOption.UPGRADE) .build(); }
private static void initCluster(boolean format) throws Exception { cluster = new MiniDFSCluster.Builder(conf).numDataNodes(4).format(format) .build(); hdfs = cluster.getFileSystem(); assertTrue(hdfs instanceof DistributedFileSystem); hdfsAsUser1 = DFSTestUtil.getFileSystemAs(user1, conf); assertTrue(hdfsAsUser1 instanceof DistributedFileSystem); hdfsAsUser2 = DFSTestUtil.getFileSystemAs(user2, conf); assertTrue(hdfsAsUser2 instanceof DistributedFileSystem); }
@BeforeClass public static void create() throws IOException { config = getJobForClient().getConfiguration(); config.setLong(DistCpConstants.CONF_LABEL_TOTAL_BYTES_TO_BE_COPIED, 0); cluster = new MiniDFSCluster.Builder(config).numDataNodes(1).format(true) .build(); }
@Before public void setUp() throws Exception { conf = new Configuration(); cluster = new MiniDFSCluster.Builder(conf).numDataNodes(REPLICATION) .format(true).build(); cluster.waitActive(); fsn = cluster.getNamesystem(); fsdir = fsn.getFSDirectory(); blockmanager = fsn.getBlockManager(); hdfs = cluster.getFileSystem(); }
@BeforeClass public static void create() throws IOException { cluster = new MiniDFSCluster.Builder(config).numDataNodes(1).format(true) .build(); fs = cluster.getFileSystem(); buildExpectedValuesMap(); }