public static List<StorageLocation> getStorageLocations(Configuration conf) { Collection<String> rawLocations = conf.getTrimmedStringCollection(DFS_DATANODE_DATA_DIR_KEY); List<StorageLocation> locations = new ArrayList<StorageLocation>(rawLocations.size()); for(String locationString : rawLocations) { final StorageLocation location; try { location = StorageLocation.parse(locationString); } catch (IOException | SecurityException ioe) { LOG.error("Failed to initialize storage directory {}." + "Exception details: {}", locationString, ioe.toString()); // Ignore the exception. continue; } locations.add(location); } return locations; }
+ diskFile.getAbsolutePath()); memBlockInfo.updateWithReplica( StorageLocation.parse(diskFile.toString()));
public static List<StorageLocation> getStorageLocations(Configuration conf) { Collection<String> rawLocations = conf.getTrimmedStringCollection(DFS_DATANODE_DATA_DIR_KEY); List<StorageLocation> locations = new ArrayList<StorageLocation>(rawLocations.size()); for(String locationString : rawLocations) { final StorageLocation location; try { location = StorageLocation.parse(locationString); } catch (IOException ioe) { LOG.error("Failed to initialize storage directory " + locationString + ". Exception details: " + ioe); // Ignore the exception. continue; } catch (SecurityException se) { LOG.error("Failed to initialize storage directory " + locationString + ". Exception details: " + se); // Ignore the exception. continue; } locations.add(location); } return locations; }
public static List<StorageLocation> getStorageLocations(Configuration conf) { Collection<String> rawLocations = conf.getTrimmedStringCollection(DFS_DATANODE_DATA_DIR_KEY); List<StorageLocation> locations = new ArrayList<StorageLocation>(rawLocations.size()); for(String locationString : rawLocations) { final StorageLocation location; try { location = StorageLocation.parse(locationString); } catch (IOException ioe) { LOG.error("Failed to initialize storage directory " + locationString + ". Exception details: " + ioe); // Ignore the exception. continue; } catch (SecurityException se) { LOG.error("Failed to initialize storage directory " + locationString + ". Exception details: " + se); // Ignore the exception. continue; } locations.add(location); } return locations; }
StorageLocation.parse(dir.getRoot().toString()));
StorageLocation.parse(dir.getRoot().toString()));
/** * Create a list of StorageLocations. * If asFile sets to true, create StorageLocation as regular files, otherwise * create directories for each location. * @param numLocs the total number of StorageLocations to be created. * @param asFile set to true to create as file. * @return a list of StorageLocations. */ private static List<StorageLocation> createStorageLocations( int numLocs, boolean asFile) throws IOException { List<StorageLocation> locations = new ArrayList<StorageLocation>(); for (int i = 0; i < numLocs; i++) { String uri = TEST_DIR + "/data" + i; File file = new File(uri); if (asFile) { file.getParentFile().mkdirs(); file.createNewFile(); } else { file.mkdirs(); } StorageLocation loc = StorageLocation.parse(uri); locations.add(loc); } return locations; }
/** * Starts an instance of DataNode * @throws IOException */ @Before public void startUp() throws IOException, URISyntaxException { tearDownDone = false; conf = new HdfsConfiguration(); conf.set(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY, DATA_DIR); conf.set(DFSConfigKeys.DFS_DATANODE_ADDRESS_KEY, "0.0.0.0:0"); conf.set(DFSConfigKeys.DFS_DATANODE_HTTP_ADDRESS_KEY, "0.0.0.0:0"); conf.set(DFSConfigKeys.DFS_DATANODE_IPC_ADDRESS_KEY, "0.0.0.0:0"); conf.setInt(CommonConfigurationKeys.IPC_CLIENT_CONNECT_MAX_RETRIES_KEY, 0); FileSystem.setDefaultUri(conf, "hdfs://" + NN_ADDR.getHostName() + ":" + NN_ADDR.getPort()); File dataDir = new File(DATA_DIR); FileUtil.fullyDelete(dataDir); dataDir.mkdirs(); StorageLocation location = StorageLocation.parse(dataDir.getPath()); locations.add(location); }
StorageLocation sl = StorageLocation.parse(dir); File lockFile = new File(sl.getFile(), Storage.STORAGE_FILE_LOCK); try (RandomAccessFile raf = new RandomAccessFile(lockFile, "rws");
@Test (timeout = 30000) public void testDataDirValidation() throws Throwable { DataNodeDiskChecker diskChecker = mock(DataNodeDiskChecker.class); doThrow(new IOException()).doThrow(new IOException()).doNothing() .when(diskChecker).checkDir(any(LocalFileSystem.class), any(Path.class)); LocalFileSystem fs = mock(LocalFileSystem.class); AbstractList<StorageLocation> locations = new ArrayList<StorageLocation>(); locations.add(StorageLocation.parse("file:/p1/")); locations.add(StorageLocation.parse("file:/p2/")); locations.add(StorageLocation.parse("file:/p3/")); List<StorageLocation> checkedLocations = DataNode.checkStorageLocations(locations, fs, diskChecker); assertEquals("number of valid data dirs", 1, checkedLocations.size()); String validDir = checkedLocations.iterator().next().getFile().getPath(); assertThat("p3 should be valid", new File("/p3/").getPath(), is(validDir)); } }
.thenReturn(builder); StorageLocation location = StorageLocation.parse(badDir.toString()); List<NamespaceInfo> nsInfos = Lists.newArrayList(); for (String bpid : BLOCK_POOL_IDS) {
newDataDirBuf.append(","); newDataDirBuf.append( StorageLocation.parse(volumeDir.toString()).toString()); List<StorageLocation> effectiveStorageLocations = new ArrayList<>(); for (int i = 0; i < expectDataDirs.length; i++) { StorageLocation expectLocation = StorageLocation.parse(expectDataDirs[i]); StorageLocation effectiveLocation = StorageLocation .parse(effectiveDataDirs[i]); expectedStorageLocations.add(expectLocation); effectiveStorageLocations.add(effectiveLocation);
String pathUri = new Path(path).toUri().toString(); expectedVolumes.add(new File(pathUri).toString()); StorageLocation loc = StorageLocation.parse(pathUri); Storage.StorageDirectory sd = createStorageDirectory(new File(path)); DataStorage.VolumeBuilder builder =
@Test(timeout = 5000) public void testRemoveNewlyAddedVolume() throws IOException { final int numExistingVolumes = dataset.getVolumes().size(); List<NamespaceInfo> nsInfos = new ArrayList<>(); for (String bpid : BLOCK_POOL_IDS) { nsInfos.add(new NamespaceInfo(0, CLUSTER_ID, bpid, 1)); } String newVolumePath = BASE_DIR + "/newVolumeToRemoveLater"; StorageLocation loc = StorageLocation.parse(newVolumePath); Storage.StorageDirectory sd = createStorageDirectory(new File(newVolumePath)); DataStorage.VolumeBuilder builder = new DataStorage.VolumeBuilder(storage, sd); when(storage.prepareVolume(eq(datanode), eq(loc.getFile()), anyListOf(NamespaceInfo.class))) .thenReturn(builder); dataset.addVolume(loc, nsInfos); assertEquals(numExistingVolumes + 1, dataset.getVolumes().size()); when(storage.getNumStorageDirs()).thenReturn(numExistingVolumes + 1); when(storage.getStorageDir(numExistingVolumes)).thenReturn(sd); Set<File> volumesToRemove = new HashSet<>(); volumesToRemove.add(loc.getFile()); dataset.removeVolumes(volumesToRemove, true); assertEquals(numExistingVolumes, dataset.getVolumes().size()); }
@Test public void testParseChangedVolumes() throws IOException { startDFSCluster(1, 1); DataNode dn = cluster.getDataNodes().get(0); Configuration conf = dn.getConf(); String oldPaths = conf.get(DFS_DATANODE_DATA_DIR_KEY); List<StorageLocation> oldLocations = new ArrayList<StorageLocation>(); for (String path : oldPaths.split(",")) { oldLocations.add(StorageLocation.parse(path)); } assertFalse(oldLocations.isEmpty()); String newPaths = oldLocations.get(0).getFile().getAbsolutePath() + ",/foo/path1,/foo/path2"; DataNode.ChangedVolumes changedVolumes = dn.parseChangedVolumes(newPaths); List<StorageLocation> newVolumes = changedVolumes.newLocations; assertEquals(2, newVolumes.size()); assertEquals(new File("/foo/path1").getAbsolutePath(), newVolumes.get(0).getFile().getAbsolutePath()); assertEquals(new File("/foo/path2").getAbsolutePath(), newVolumes.get(1).getFile().getAbsolutePath()); List<StorageLocation> removedVolumes = changedVolumes.deactivateLocations; assertEquals(1, removedVolumes.size()); assertEquals(oldLocations.get(1).getFile(), removedVolumes.get(0).getFile()); assertEquals(1, changedVolumes.unchangedLocations.size()); assertEquals(oldLocations.get(0).getFile(), changedVolumes.unchangedLocations.get(0).getFile()); }
FileUtil.fullyDelete(dataDir); dataDir.mkdirs(); StorageLocation location = StorageLocation.parse(dataDir.getPath()); locations.add(location); final DatanodeProtocolClientSideTranslatorPB namenode =
assertEquals(4, effectiveVolumes.length); for (String ev : effectiveVolumes) { assertThat(StorageLocation.parse(ev).getFile().getCanonicalPath(), is(not(anyOf(is(newDirs.get(0)), is(newDirs.get(2))))));
final String volumePathToRemove = dataDirs[0]; Set<File> volumesToRemove = new HashSet<>(); volumesToRemove.add(StorageLocation.parse(volumePathToRemove).getFile());