private static boolean isActiveVolume(ServerContext context, Path dir) { // consider relative path as active and take no action if (!dir.toString().contains(":")) return true; for (String tableDir : ServerConstants.getTablesDirs(context)) { // use Path to normalize tableDir if (dir.toString().startsWith(new Path(tableDir).toString())) return true; } return false; }
public static String[] getTablesDirs(ServerContext context) { return VolumeConfiguration.prefix(getBaseUris(context), TABLE_DIR); }
public static String switchRootTableVolume(ServerContext context, String location) throws IOException { String newLocation = switchVolume(location, FileType.TABLE, ServerConstants.getVolumeReplacements(context.getConfiguration(), context.getHadoopConf())); if (newLocation != null) { MetadataTableUtil.setRootTabletDir(context, newLocation); log.info("Volume replaced: {} -> {}", location, newLocation); return new Path(newLocation).toString(); } return location; }
private static void addVolumes(VolumeManager fs, SiteConfiguration siteConfig, Configuration hadoopConf) throws IOException { String[] volumeURIs = VolumeConfiguration.getVolumeUris(siteConfig, hadoopConf); HashSet<String> initializedDirs = new HashSet<>(); initializedDirs.addAll( Arrays.asList(ServerConstants.checkBaseUris(siteConfig, hadoopConf, volumeURIs, true))); HashSet<String> uinitializedDirs = new HashSet<>(); uinitializedDirs.addAll(Arrays.asList(volumeURIs)); uinitializedDirs.removeAll(initializedDirs); Path aBasePath = new Path(initializedDirs.iterator().next()); Path iidPath = new Path(aBasePath, ServerConstants.INSTANCE_ID_DIR); Path versionPath = new Path(aBasePath, ServerConstants.VERSION_DIR); UUID uuid = UUID.fromString(ZooUtil.getInstanceIDFromHdfs(iidPath, siteConfig, hadoopConf)); for (Pair<Path,Path> replacementVolume : ServerConstants.getVolumeReplacements(siteConfig, hadoopConf)) { if (aBasePath.equals(replacementVolume.getFirst())) log.error( "{} is set to be replaced in {} and should not appear in {}." + " It is highly recommended that this property be removed as data" + " could still be written to this volume.", aBasePath, Property.INSTANCE_VOLUMES_REPLACEMENTS, Property.INSTANCE_VOLUMES); } if (ServerUtil.getAccumuloPersistentVersion(versionPath.getFileSystem(hadoopConf), versionPath) != ServerConstants.DATA_VERSION) { throw new IOException("Accumulo " + Constants.VERSION + " cannot initialize data version " + ServerUtil.getAccumuloPersistentVersion(fs)); } initDirs(fs, uuid, uinitializedDirs.toArray(new String[uinitializedDirs.size()]), true); }
if (fs.exists(oldPath)) { VolumeChooserEnvironment chooserEnv = new VolumeChooserEnvironment(RootTable.ID, context); String newPath = fs.choose(chooserEnv, ServerConstants.getBaseUris(context)) + Constants.HDFS_TABLES_DIR + Path.SEPARATOR + RootTable.ID; fs.mkdirs(new Path(newPath)); for (String basePath : ServerConstants.getTablesDirs(context)) { Path path = new Path(basePath + "/" + RootTable.ID + RootTable.ROOT_TABLET_LOCATION); if (fs.exists(path)) {
final Path rootTablet = new Path(ServerConstants.getRootTabletDir()); final Path tableMetadataTablet = new Path(ServerConstants.getMetadataTableDir() + Constants.TABLE_TABLET_LOCATION); final Path defaultMetadataTablet = new Path(ServerConstants.getMetadataTableDir() + Constants.DEFAULT_TABLET_LOCATION); final Path metadataTableDir = new Path(ServerConstants.getMetadataTableDir()); fs.mkdirs(new Path(ServerConstants.getDataVersionLocation(), "" + Constants.DATA_VERSION)); fs.mkdirs(ServerConstants.getInstanceIdLocation()); fs.createNewFile(new Path(ServerConstants.getInstanceIdLocation(), uuid.toString())); String initRootTabFile = ServerConstants.getMetadataTableDir() + "/root_tablet/00000_00000." + FileOperations.getNewFileExtension(AccumuloConfiguration.getDefaultConfiguration()); FileSKVWriter mfw = FileOperations.getInstance().openWriter(initRootTabFile, fs, conf, AccumuloConfiguration.getDefaultConfiguration());
public static synchronized Path getAccumuloInstanceIdPath(VolumeManager fs) { // It doesn't matter which Volume is used as they should all have the instance ID stored Volume v = fs.getVolumes().iterator().next(); return ServerConstants.getInstanceIdLocation(v); }
public static boolean isInitialized(FileSystem fs) throws IOException { return (fs.exists(ServerConstants.getInstanceIdLocation()) || fs.exists(ServerConstants.getDataVersionLocation())); }
public static synchronized int getAccumuloPersistentVersion(Volume v) { Path path = ServerConstants.getDataVersionLocation(v); return getAccumuloPersistentVersion(v.getFileSystem(), path); }
public static synchronized String[] getBaseUris(AccumuloConfiguration conf, Configuration hadoopConf) { if (baseUris == null) { baseUris = checkBaseUris(conf, hadoopConf, VolumeConfiguration.getVolumeUris(conf, hadoopConf), false); } return baseUris; }
private void moveRootTabletToRootTable(IZooReaderWriter zoo) throws Exception { String dirZPath = ZooUtil.getRoot(getInstance()) + RootTable.ZROOT_TABLET_PATH; if (!zoo.exists(dirZPath)) { Path oldPath = fs.getFullPath(FileType.TABLE, "/" + MetadataTable.ID + "/root_tablet"); if (fs.exists(oldPath)) { String newPath = fs.choose(Optional.of(RootTable.ID), ServerConstants.getBaseUris()) + Constants.HDFS_TABLES_DIR + Path.SEPARATOR + RootTable.ID; fs.mkdirs(new Path(newPath)); if (!fs.rename(oldPath, new Path(newPath))) { throw new IOException("Failed to move root tablet from " + oldPath + " to " + newPath); } log.info("Upgrade renamed " + oldPath + " to " + newPath); } Path location = null; for (String basePath : ServerConstants.getTablesDirs()) { Path path = new Path(basePath + "/" + RootTable.ID + RootTable.ROOT_TABLET_LOCATION); if (fs.exists(path)) { if (location != null) { throw new IllegalStateException( "Root table at multiple locations " + location + " " + path); } location = path; } } if (location == null) throw new IllegalStateException("Failed to find root tablet"); log.info("Upgrade setting root table location in zookeeper " + location); zoo.putPersistentData(dirZPath, location.toString().getBytes(), NodeExistsPolicy.FAIL); } }
private static void updateHdfs(VolumeManager fs, String newInstanceId) throws IOException { // Need to recreate the instanceId on all of them to keep consistency for (Volume v : fs.getVolumes()) { final Path instanceId = ServerConstants.getInstanceIdLocation(v); if (!v.getFileSystem().delete(instanceId, true)) { throw new IOException("Could not recursively delete " + instanceId); } if (!v.getFileSystem().mkdirs(instanceId)) { throw new IOException("Could not create directory " + instanceId); } v.getFileSystem().create(new Path(instanceId, newInstanceId)).close(); } }
private static void addVolumes(VolumeManager fs) throws IOException { String[] volumeURIs = VolumeConfiguration.getVolumeUris(SiteConfiguration.getInstance()); HashSet<String> initializedDirs = new HashSet<>(); initializedDirs.addAll(Arrays.asList(ServerConstants.checkBaseUris(volumeURIs, true))); HashSet<String> uinitializedDirs = new HashSet<>(); uinitializedDirs.addAll(Arrays.asList(volumeURIs)); uinitializedDirs.removeAll(initializedDirs); Path aBasePath = new Path(initializedDirs.iterator().next()); Path iidPath = new Path(aBasePath, ServerConstants.INSTANCE_ID_DIR); Path versionPath = new Path(aBasePath, ServerConstants.VERSION_DIR); UUID uuid = UUID .fromString(ZooUtil.getInstanceIDFromHdfs(iidPath, SiteConfiguration.getInstance())); for (Pair<Path,Path> replacementVolume : ServerConstants.getVolumeReplacements()) { if (aBasePath.equals(replacementVolume.getFirst())) log.error(aBasePath + " is set to be replaced in " + Property.INSTANCE_VOLUMES_REPLACEMENTS + " and should not appear in " + Property.INSTANCE_VOLUMES + ". It is highly recommended that this property be removed as data" + " could still be written to this volume."); } if (ServerConstants.DATA_VERSION != Accumulo.getAccumuloPersistentVersion( versionPath.getFileSystem(CachedConfiguration.getInstance()), versionPath)) { throw new IOException("Accumulo " + Constants.VERSION + " cannot initialize data version " + Accumulo.getAccumuloPersistentVersion(fs)); } initDirs(fs, uuid, uinitializedDirs.toArray(new String[uinitializedDirs.size()]), true); }
public static synchronized void updateAccumuloVersion(VolumeManager fs, int oldVersion) { for (Volume volume : fs.getVolumes()) { try { if (getAccumuloPersistentVersion(volume) == oldVersion) { log.debug("Attempting to upgrade {}", volume); Path dataVersionLocation = ServerConstants.getDataVersionLocation(volume); fs.create(new Path(dataVersionLocation, Integer.toString(ServerConstants.DATA_VERSION))) .close(); // TODO document failure mode & recovery if FS permissions cause above to work and below // to fail ACCUMULO-2596 Path prevDataVersionLoc = new Path(dataVersionLocation, Integer.toString(oldVersion)); if (!fs.delete(prevDataVersionLoc)) { throw new RuntimeException("Could not delete previous data version location (" + prevDataVersionLoc + ") for " + volume); } } } catch (IOException e) { throw new RuntimeException("Unable to set accumulo version: an error occurred.", e); } } }
public static synchronized String[] getBaseUris() { if (baseUris == null) { baseUris = checkBaseUris(VolumeConfiguration.getVolumeUris(SiteConfiguration.getInstance()), false); } return baseUris; }
@Override public void deleteTableDirIfEmpty(Table.ID tableID) throws IOException { // if dir exist and is empty, then empty list is returned... // hadoop 2.0 will throw an exception if the file does not exist for (String dir : ServerConstants.getTablesDirs(context)) { FileStatus[] tabletDirs = null; try { tabletDirs = fs.listStatus(new Path(dir + "/" + tableID)); } catch (FileNotFoundException ex) { continue; } if (tabletDirs.length == 0) { Path p = new Path(dir + "/" + tableID); log.debug("Removing table dir {}", p); if (!moveToTrash(p)) fs.delete(p); } } }
public static String[] getRecoveryDirs(ServerContext context) { return VolumeConfiguration.prefix(getBaseUris(context), RECOVERY_DIR); }
.getVolumeReplacements(getConfiguration(), getContext().getHadoopConf());
private static void verifyHdfsWritePermission(VolumeManager fs) throws Exception { for (Volume v : fs.getVolumes()) { final Path instanceId = ServerConstants.getInstanceIdLocation(v); FileStatus fileStatus = v.getFileSystem().getFileStatus(instanceId); checkHdfsAccessPermissions(fileStatus, FsAction.WRITE); } }
public static synchronized void updateAccumuloVersion(FileSystem fs) { try { if (getAccumuloPersistentVersion(fs) == Constants.PREV_DATA_VERSION) { fs.create(new Path(ServerConstants.getDataVersionLocation() + "/" + Constants.DATA_VERSION)); // TODO document failure mode & recovery if FS permissions cause above to work and below to fail ACCUMULO-2596 fs.delete(new Path(ServerConstants.getDataVersionLocation() + "/" + Constants.PREV_DATA_VERSION), false); } } catch (IOException e) { throw new RuntimeException("Unable to set accumulo version: an error occurred.", e); } }