Refine search
miniDfs = new MiniDFSCluster(new Configuration(), 1, true, null); miniDfs.getFileSystem().mkdirs(new Path("/path/to/schema")); FSDataOutputStream out = miniDfs.getFileSystem().create( new Path("/path/to/schema/schema.avsc")); out.writeBytes(RECORD_SCHEMA); out.close(); String onHDFS = miniDfs.getFileSystem().getUri() + "/path/to/schema/schema.avsc"; Configuration conf = new Configuration(); Properties tbl = createPropertiesForHiveAvroSchemaUrl(onHDFS); serDe.initialize(conf, tbl); miniDfs.shutdown();
@Test public void testCopyFilesParallel() throws Exception { MiniDFSCluster cluster = htu.startMiniDFSCluster(1); cluster.waitActive(); FileSystem fs = cluster.getFileSystem(); Path src = new Path("/src"); fs.mkdirs(src); for (int i = 0; i < 50; i++) { WriteDataToHDFS(fs, new Path(src, String.valueOf(i)), 1024); } Path sub = new Path(src, "sub"); fs.mkdirs(sub); for (int i = 0; i < 50; i++) { WriteDataToHDFS(fs, new Path(sub, String.valueOf(i)), 1024); } Path dst = new Path("/dst"); List<Path> allFiles = FSUtils.copyFilesParallel(fs, src, fs, dst, conf, 4); assertEquals(102, allFiles.size()); FileStatus[] list = fs.listStatus(dst); assertEquals(51, list.length); FileStatus[] sublist = fs.listStatus(new Path(dst, "sub")); assertEquals(50, sublist.length); }
@Before public void setup() throws Exception { fs = dfsClusterRule.getDfscluster().getFileSystem(); hdfsURI = "hdfs://localhost:" + dfsClusterRule.getDfscluster().getNameNodePort() + "/"; }
@AfterClass public static void destroyHDFS() throws Exception { if (hdfsCluster != null) { hdfsCluster.getFileSystem().delete(new org.apache.hadoop.fs.Path(basePath.toUri()), true); hdfsCluster.shutdown(); } }
@Override public void evaluate() throws Throwable { try { System.setProperty(TEST_BUILD_DATA, "target/test/data"); hadoopConf = hadoopConfSupplier.get(); dfscluster = new MiniDFSCluster.Builder(hadoopConf).numDataNodes(3).build(); dfscluster.waitActive(); } finally { if (dfscluster != null) { dfscluster.shutdown(); } System.clearProperty(TEST_BUILD_DATA); } } };
@BeforeClass public static void setUp() throws Exception { Configuration conf = new Configuration(); dfsCluster = new MiniDFSCluster(conf, numSlaves, true, null); jConf = new JobConf(conf); mrCluster = new MiniMRCluster(0, 0, numSlaves, dfsCluster.getFileSystem().getUri().toString(), 1, null, null, null, jConf); dfsCluster.getNamesystem().getDelegationTokenSecretManager().startThreads(); FileSystem fs = dfsCluster.getFileSystem(); p1 = new Path("file1"); p1 = fs.makeQualified(p1); }
@Test FileSystem fs = cluster.getFileSystem(); fs.copyFromLocalFile(new Path(jarFile1.getPath()), new Path(fs.getUri().toString() + Path.SEPARATOR)); String jarFileOnHDFS1 = fs.getUri().toString() + Path.SEPARATOR + jarFile1.getName(); Path pathOnHDFS1 = new Path(jarFileOnHDFS1); assertTrue("Copy jar file to HDFS failed.", fs.exists(pathOnHDFS1)); fs.copyFromLocalFile(new Path(jarFile2.getPath()), new Path(fs.getUri().toString() + Path.SEPARATOR)); String jarFileOnHDFS2 = fs.getUri().toString() + Path.SEPARATOR + jarFile2.getName(); if (env != null) { Configuration conf = env.getConfiguration(); found2_k1 = found2_k1 && (conf.get("k1") != null); found2_k2 = found2_k2 && (conf.get("k2") != null); found2_k3 = found2_k3 && (conf.get("k3") != null); } else { found2_k1 = false;
List<DataNode> datanodes = TEST_UTIL.getDFSCluster().getDataNodes(); Method selfAddress; try { List<String> files = region.getStoreFileList(new byte[][]{COLUMN_FAMILY}); for (String file : files) { FileStatus status = TEST_UTIL.getDFSCluster().getFileSystem(). getFileStatus(new Path(new URI(file).getPath())); BlockLocation[] lbks = ((DistributedFileSystem)TEST_UTIL.getDFSCluster().getFileSystem()) .getFileBlockLocations(status, 0, Long.MAX_VALUE); for (BlockLocation lbk : lbks) {
protected void setUp() throws Exception { Configuration conf = new Configuration(); dfsCluster = new MiniDFSCluster(conf, NUM_HADOOP_SLAVES, true, null); dfs = dfsCluster.getFileSystem(); mrCluster = new MiniMRCluster(NUM_HADOOP_SLAVES, dfs.getUri().toString(), 1); } protected void tearDown() throws Exception {
@BeforeClass public static void createHDFS() throws Exception { final File baseDir = TMP.newFolder(); Configuration hdConf = new Configuration(); hdConf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, baseDir.getAbsolutePath()); MiniDFSCluster.Builder builder = new MiniDFSCluster.Builder(hdConf); hdfsCluster = builder.build(); org.apache.hadoop.fs.FileSystem hdfs = hdfsCluster.getFileSystem(); fs = new HadoopFileSystem(hdfs); basePath = new Path(hdfs.getUri().toString() + "/tests"); }
@Before public void setup() throws Exception { Configuration hdfsConf = new HdfsConfiguration(); hdfsConf.setLong(DFSConfigKeys.DFS_NAMENODE_MIN_BLOCK_SIZE_KEY, 0); String namenodeDir = new File(MiniDFSCluster.getBaseDirectory(), "name"). getAbsolutePath(); hdfsConf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, namenodeDir); hdfsConf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY, namenodeDir); hdfsConf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCKSIZE); cluster = new MiniDFSCluster.Builder(hdfsConf).numDataNodes(15).build(); fs = cluster.getFileSystem(); fs.enableErasureCodingPolicy(ecPolicy.getName()); fs.setErasureCodingPolicy(new Path("/"), ecPolicy.getName()); cluster.waitActive(); conf = new Configuration(); submitDir = new Path("/"); testFile = new Path("/testfile"); DFSTestUtil.writeFile(fs, testFile, StripedFileTestUtil.generateBytes(BLOCKSIZE)); conf.set(FileInputFormat.INPUT_DIR, fs.getUri().toString() + testFile.toString()); }
private void setupMiniDfsAndMrClusters() { try { final int dataNodes = 1; // There will be 4 data nodes final int taskTrackers = 1; // There will be 4 task tracker nodes Configuration config = new Configuration(); // Builds and starts the mini dfs and mapreduce clusters if(System.getProperty("hadoop.log.dir") == null) { System.setProperty("hadoop.log.dir", "target/tmp/logs/"); } m_dfs = new MiniDFSCluster(config, dataNodes, true, null); m_fileSys = m_dfs.getFileSystem(); m_mr = new MiniMRCluster(taskTrackers, m_fileSys.getUri().toString(), 1); // Create the configuration hadoop-site.xml file File conf_dir = new File(System.getProperty("user.home"), "pigtest/conf/"); conf_dir.mkdirs(); File conf_file = new File(conf_dir, "hadoop-site.xml"); // Write the necessary config info to hadoop-site.xml m_conf = m_mr.createJobConf(); m_conf.setInt("mapred.submit.replication", 1); m_conf.set("dfs.datanode.address", "0.0.0.0:0"); m_conf.set("dfs.datanode.http.address", "0.0.0.0:0"); m_conf.writeXml(new FileOutputStream(conf_file)); // Set the system properties needed by Pig System.setProperty("cluster", m_conf.get("mapred.job.tracker")); System.setProperty("namenode", m_conf.get("fs.default.name")); System.setProperty("junit.hadoop.conf", conf_dir.getPath()); } catch (IOException e) { throw new RuntimeException(e); } }
@Before public void setUp() throws Exception { Configuration c = TEST_UTIL.getConfiguration(); c.setBoolean("dfs.support.append", true); TEST_UTIL.startMiniCluster(1); table = TEST_UTIL.createMultiRegionTable(TABLE_NAME, FAMILY); TEST_UTIL.loadTable(table, FAMILY); // setup the hdfssnapshots client = new DFSClient(TEST_UTIL.getDFSCluster().getURI(), TEST_UTIL.getConfiguration()); String fullUrIPath = TEST_UTIL.getDefaultRootDirPath().toString(); String uriString = TEST_UTIL.getTestFileSystem().getUri().toString(); baseDir = StringUtils.removeStart(fullUrIPath, uriString); client.allowSnapshot(baseDir); }
protected void setUp() throws Exception { super.setUp(); dfscluster = new MiniDFSCluster(new Configuration(), 2, true, null); fs = dfscluster.getFileSystem(); mapred = new MiniMRCluster(2, fs.getUri().toString(), 1); inputPath = new Path(fs.getHomeDirectory(), inputDir); archivePath = new Path(fs.getHomeDirectory(), "archive"); fs.mkdirs(inputPath); createFile(inputPath, "a", fs); createFile(inputPath, "b", fs); createFile(inputPath, "c", fs); }
private void startCluster() throws Exception { super.setUp(); Configuration conf = new Configuration(); dfsCluster = new MiniDFSCluster(conf, numSlaves, true, null); JobConf jConf = new JobConf(conf); jConf.setLong("mapred.job.submission.expiry.interval", 6 * 1000); mrCluster = new MiniMRCluster(0, 0, numSlaves, dfsCluster.getFileSystem().getUri().toString(), 1, null, null, null, jConf); jt = mrCluster.getJobTrackerRunner().getJobTracker(); fs = FileSystem.get(mrCluster.createJobConf()); }
public void setUp() throws Exception { super.setUp(); conf.setClass(PolicyProvider.POLICY_PROVIDER_CONFIG, HadoopPolicyProvider.class, PolicyProvider.class); jobtracker = System.getProperty("test.cli.mapred.job.tracker"); JobConf mrConf = new JobConf(conf); if (jobtracker == null) { // Start up mini mr cluster mrCluster = new MiniMRCluster(1, dfsCluster.getFileSystem().getUri().toString(), 1, null, null, mrConf); jobtracker = mrCluster.createJobConf().get(JTConfig.JT_IPC_ADDRESS, "local"); } else { conf.set(JTConfig.JT_IPC_ADDRESS, jobtracker); } cmdExecutor = new MRCmdExecutor(jobtracker); archiveCmdExecutor = new ArchiveCmdExecutor(namenode, mrConf); }
@BeforeClass public static void setUp() throws IOException { conf = new Configuration(); conf.set(DFSConfigKeys.DFS_WEBHDFS_AUTHENTICATION_FILTER_KEY, CustomizedFilter.class.getName()); conf.set(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY, "localhost:0"); cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build(); InetSocketAddress addr = cluster.getNameNode().getHttpAddress(); fs = FileSystem.get( URI.create("webhdfs://" + NetUtils.getHostPortString(addr)), conf); cluster.waitActive(); }
private AutoCloseableHdfsSpout makeSpout(String readerType, String[] outputFields) { HdfsSpout spout = new HdfsSpout().withOutputFields(outputFields) .setReaderType(readerType) .setHdfsUri(DFS_CLUSTER_RULE.getDfscluster().getURI().toString()) .setSourceDir(source.toString()) .setArchiveDir(archive.toString()) .setBadFilesDir(badfiles.toString()); return new AutoCloseableHdfsSpout(spout); }
@Override protected void setUp() throws Exception { super.setUp(); final int taskTrackers = 4; Configuration conf = new Configuration(); dfsCluster = new MiniDFSCluster(conf, 4, true, null); jc = new JobConf(); jc.setClass(JTConfig.JT_TASK_SCHEDULER, TestTaskScheduler.class, TaskScheduler.class); jc.setLong(JTConfig.JT_RUNNINGTASKS_PER_JOB, 10L); mrCluster = new MiniMRCluster(0, 0, taskTrackers, dfsCluster .getFileSystem().getUri().toString(), 1, null, null, null, jc); }
@BeforeClass public static void setup() throws Exception { // start a cluster with single datanode cluster = new MiniDFSCluster.Builder(CONF).build(); cluster.waitActive(); fs = cluster.getFileSystem(); final String str = "hftp://" + CONF.get(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY); hftpURI = new URI(str); hftpFs = cluster.getHftpFileSystem(0); }