public static MiniDFSCluster getLocalHDFSCluster() throws Exception { setHadoopHomeWindows(); Configuration conf = new HdfsConfiguration(); conf.set("fs.defaultFS", "hdfs://localhost"); File hdfsPath = new File(System.getProperty("user.dir") + File.separator + "hadoop" + File.separator + "hdfs"); conf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, hdfsPath.getAbsolutePath()); MiniDFSCluster miniDFSCluster = new MiniDFSCluster.Builder(conf) .nameNodePort(12345) .nameNodeHttpPort(12341) .numDataNodes(1) .storagesPerDatanode(2) .format(true) .racks(null) .build(); miniDFSCluster.waitActive(); return miniDFSCluster; } }
public static MiniDFSCluster initMiniCluster(int port, int numDN, HashMap<String, String> map) throws Exception { System.setProperty("test.build.data", "hdfs-test-cluster"); Configuration hconf = new HdfsConfiguration(); for (Entry<String, String> entry : map.entrySet()) { hconf.set(entry.getKey(), entry.getValue()); } hconf.set("dfs.namenode.fs-limits.min-block-size", "1024"); Builder builder = new MiniDFSCluster.Builder(hconf); builder.numDataNodes(numDN); builder.nameNodePort(port); MiniDFSCluster cluster = builder.build(); return cluster; }
builder.nameNodePort(54321); try { miniDFSCluster = builder.build();
URISyntaxException { if (!noDFS) { dfs = new MiniDFSCluster.Builder(conf).nameNodePort(nnPort) .numDataNodes(numDataNodes).startupOption(dfsOpts).build(); LOG.info("Started MiniDFSCluster -- namenode on port "
cluster = new MiniDFSCluster.Builder(conf).nameNodePort(nnport) .format(false) .build(); cluster = new MiniDFSCluster.Builder(conf).nameNodePort(nnport) .format(false) .build();
cluster = new MiniDFSCluster.Builder(conf).nameNodePort(nnport) .format(false) .build(); cluster = new MiniDFSCluster.Builder(conf).nameNodePort(nnport) .format(false) .build();
cluster = new MiniDFSCluster.Builder(conf).nameNodePort(nnport) .format(false) .build(); cluster = new MiniDFSCluster.Builder(conf).nameNodePort(nnport) .format(false) .build();
cluster = new MiniDFSCluster.Builder(conf).nameNodePort(nnport) .format(false) .build(); cluster = new MiniDFSCluster.Builder(conf).nameNodePort(nnport) .format(false) .build();
.nameNodePort(origPort) .nameNodeHttpPort(origHttpPort) .format(true).build();
public void testFedSingleNN() throws IOException { MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf) .nameNodePort(9927).build(); try { NameNode nn1 = cluster.getNameNode();
cluster = new MiniDFSCluster.Builder(config).nameNodePort(nnPort).build(); cluster.waitActive(); break;
@Test public void testMoveWithTargetPortEmpty() throws Exception { Configuration conf = new HdfsConfiguration(); MiniDFSCluster cluster = null; try { cluster = new MiniDFSCluster.Builder(conf) .format(true) .numDataNodes(2) .nameNodePort(8020) .waitSafeMode(true) .build(); FileSystem srcFs = cluster.getFileSystem(); FsShell shell = new FsShell(); shell.setConf(conf); String[] argv = new String[2]; argv[0] = "-mkdir"; argv[1] = "/testfile"; ToolRunner.run(shell, argv); argv = new String[3]; argv[0] = "-mv"; argv[1] = srcFs.getUri() + "/testfile"; argv[2] = "hdfs://" + srcFs.getUri().getHost() + "/testfile2"; int ret = ToolRunner.run(shell, argv); assertEquals("mv should have succeeded", 0, ret); } finally { if (cluster != null) { cluster.shutdown(); } } }
MiniDFSCluster cluster = null; try { cluster = new MiniDFSCluster.Builder(conf).nameNodePort(port) .numDataNodes(2).build(); URI uri = cluster.getFileSystem().getUri();
/** * Starts DFS as specified in member-variable options. Also writes out * configuration and details, if requested. */ public void start() throws IOException, FileNotFoundException { dfs = new MiniDFSCluster.Builder(conf).nameNodePort(nameNodePort) .numDataNodes(numDataNodes) .startupOption(dfsOpts) .format(format) .build(); dfs.waitActive(); LOG.info("Started MiniDFSCluster -- namenode on port " + dfs.getNameNodePort()); if (writeConfig != null) { FileOutputStream fos = new FileOutputStream(new File(writeConfig)); conf.writeXml(fos); fos.close(); } if (writeDetails != null) { Map<String, Object> map = new TreeMap<String, Object>(); if (dfs != null) { map.put("namenode_port", dfs.getNameNodePort()); } FileWriter fw = new FileWriter(new File(writeDetails)); fw.write(new JSON().toJSON(map)); fw.close(); } }
@BeforeClass public static void startUp() throws IOException { conf = new HdfsConfiguration(); conf.setLong(DFSConfigKeys.DFS_NAMENODE_MIN_BLOCK_SIZE_KEY, BLOCK_SIZE); conf.setInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, BLOCK_SIZE); conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, SHORT_HEARTBEAT); conf.setLong( DFSConfigKeys.DFS_NAMENODE_REPLICATION_PENDING_TIMEOUT_SEC_KEY, 1); cluster = new MiniDFSCluster.Builder(conf) .format(true) .numDataNodes(DATANODE_NUM) .nameNodePort(NameNode.DEFAULT_PORT) .waitSafeMode(true) .build(); dfs = cluster.getFileSystem(); }
@BeforeClass public static void startUp() throws IOException { conf = new HdfsConfiguration(); conf.setLong(DFSConfigKeys.DFS_NAMENODE_MIN_BLOCK_SIZE_KEY, BLOCK_SIZE); conf.setInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, BLOCK_SIZE); conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, SHORT_HEARTBEAT); conf.setLong( DFSConfigKeys.DFS_NAMENODE_REPLICATION_PENDING_TIMEOUT_SEC_KEY, 1); cluster = new MiniDFSCluster.Builder(conf) .format(true) .numDataNodes(DATANODE_NUM) .nameNodePort(NameNode.DEFAULT_PORT) .waitSafeMode(true) .build(); fs = cluster.getFileSystem(); }
static void restartCluster(StartupOption o) throws IOException { cluster.shutdown(); if(StartupOption.ROLLBACK == o) NameNode.doRollback(conf, false); cluster = new MiniDFSCluster.Builder(conf).numDataNodes(DATANODE_NUM) .format(false) .nameNodePort(NameNode.DEFAULT_PORT) .startupOption(o==StartupOption.ROLLBACK ? StartupOption.REGULAR : o) .dnStartupOption(o!=StartupOption.ROLLBACK ? StartupOption.REGULAR : o) .build(); fs = cluster.getFileSystem(); } }
public void setup(String basePath) throws Exception { conf.addResource(new Path(basePath + PATH_TO_HADOOP_CONF + "/core-site.xml")); conf.addResource(new Path(basePath + PATH_TO_HADOOP_CONF + "/mapred-site.xml")); conf.addResource(new Path(basePath + PATH_TO_HADOOP_CONF + "/hdfs-site.xml")); cleanupLocal(); conf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, MINIDFS_BASEDIR); MiniDFSCluster.Builder build = new MiniDFSCluster.Builder(conf); build.nameNodePort(nameNodePort); build.numDataNodes(numDataNodes); build.startupOption(StartupOption.REGULAR); dfsCluster = build.build(); dfs = FileSystem.get(conf); loadData(basePath); }
@Override public void start() throws Exception { LOG.info("HDFS: Starting MiniDfsCluster"); configure(); miniDFSCluster = new MiniDFSCluster.Builder(hdfsConfig) .nameNodePort(hdfsNamenodePort) .nameNodeHttpPort(hdfsNamenodeHttpPort==null? 0 : hdfsNamenodeHttpPort.intValue() ) .numDataNodes(hdfsNumDatanodes) .format(hdfsFormat) .racks(null) .build(); }
private void initMiniCluster(Configuration hconf, int numDataNodes) throws IOException { Builder builder = new MiniDFSCluster.Builder(hconf); builder.numDataNodes(numDataNodes); builder.nameNodePort(CLUSTER_PORT); cluster = builder.build(); }