private void resetData() { this.cluster = new NetworkTopology(); this.overUtilizedDatanodes.clear(); this.aboveAvgUtilizedDatanodes.clear(); this.belowAvgUtilizedDatanodes.clear(); this.underUtilizedDatanodes.clear(); this.datanodes.clear(); this.sources.clear(); this.targets.clear(); this.avgUtilization = 0.0D; cleanGlobalBlockList(); }
private void resetData() { this.cluster = new NetworkTopology(); this.overUtilizedDatanodes.clear(); this.aboveAvgUtilizedDatanodes.clear(); this.belowAvgUtilizedDatanodes.clear(); this.underUtilizedDatanodes.clear(); this.datanodes.clear(); this.sources.clear(); this.targets.clear(); this.avgRemaining = 0.0D; cleanGlobalBlockList(); this.movedBlocks.cleanup(); }
/** * dirs is a list of directories where the filesystem directory state * is stored */ FSNamesystem(FSImage fsImage, Configuration conf) throws IOException { super(conf); this.clusterMap = new NetworkTopology(conf); this.fsLock = new ReentrantReadWriteLock(); setConfigurationParameters(conf); this.dir = new FSDirectory(fsImage, this, conf); }
/** * FSNamesystem constructor. */ FSNamesystem(NameNode nn, Configuration conf) throws IOException { super(conf); try { clusterMap = new NetworkTopology(conf); initialize(nn, getConf()); pathValidator = new PathValidator(conf); } catch (IOException e) { LOG.error(getClass().getSimpleName() + " initialization failed.", e); close(); throw e; } }
public void splitFile(JobConf job, Path path, List<FileSplit> splits) throws IOException { NetworkTopology clusterMap = new NetworkTopology(); FileSystem fs = path.getFileSystem(job); FileStatus file = fs.getFileStatus(path);
public void splitFile(JobConf job, Path path, List<FileSplit> splits) throws IOException { NetworkTopology clusterMap = new NetworkTopology(); FileSystem fs = path.getFileSystem(job); FileStatus file = fs.getFileStatus(path);
NetworkTopology clusterMap = new NetworkTopology(); for (FileStatus file: files) { Path path = file.getPath();
NetworkTopology clusterMap = new NetworkTopology(); for (LocatedFileStatus file: files) { Path path = file.getPath();
NetworkTopology clusterMap = new NetworkTopology(); for (FileStatus file: files) { Path path = file.getPath();
NetworkTopology clusterMap = new NetworkTopology(); for (FileStatus file: files) { Path path = file.getPath();
NetworkTopology clusterMap = new NetworkTopology(); for (FileStatus file: files) { Path path = file.getPath();
NetworkTopology clusterMap = new NetworkTopology(); for (FileStatus file: files) { Path path = file.getPath();
int fileSize = block1Size + block2Size + block3Size; int replicationFactor = 3; NetworkTopology clusterMap = new NetworkTopology();
@Test public void testCreateInvalidTopology() throws Exception { NetworkTopology invalCluster = new NetworkTopology(); DatanodeDescriptor invalDataNodes[] = new DatanodeDescriptor[] { DFSTestUtil.getDatanodeDescriptor("1.1.1.1", "/d1/r1"), DFSTestUtil.getDatanodeDescriptor("2.2.2.2", "/d1/r1"), DFSTestUtil.getDatanodeDescriptor("3.3.3.3", "/d1") }; invalCluster.add(invalDataNodes[0]); invalCluster.add(invalDataNodes[1]); try { invalCluster.add(invalDataNodes[2]); fail("expected InvalidTopologyException"); } catch (NetworkTopology.InvalidTopologyException e) { assertTrue(e.getMessage().startsWith("Failed to add ")); assertTrue(e.getMessage().contains( "You cannot have a rack and a non-rack node at the same " + "level of the network topology.")); } }
int fileSize = block1Size + block2Size + block3Size; int replicationFactor = 3; NetworkTopology clusterMap = new NetworkTopology();
public void testCountNumNodes() throws Exception { NetworkTopology cluster = new NetworkTopology(); cluster.add(getNewNode("node1", "/d1/r1")); NodeElement node2 = getNewNode("node2", "/d1/r2");
public void testCountNumNodes() throws Exception { NetworkTopology cluster = new NetworkTopology(); cluster.add(getNewNode("node1", "/d1/r1")); NodeElement node2 = getNewNode("node2", "/d1/r2");