public static synchronized int getAccumuloPersistentVersion(VolumeManager fs) { // It doesn't matter which Volume is used as they should all have the data version stored return getAccumuloPersistentVersion(fs.getVolumes().iterator().next()); }
public static synchronized int getAccumuloPersistentVersion(Volume v) { Path path = ServerConstants.getDataVersionLocation(v); return getAccumuloPersistentVersion(v.getFileSystem(), path); }
public static synchronized void updateAccumuloVersion(VolumeManager fs, int oldVersion) { for (Volume volume : fs.getVolumes()) { try { if (getAccumuloPersistentVersion(volume) == oldVersion) { log.debug("Attempting to upgrade {}", volume); Path dataVersionLocation = ServerConstants.getDataVersionLocation(volume); fs.create(new Path(dataVersionLocation, Integer.toString(ServerConstants.DATA_VERSION))) .close(); // TODO document failure mode & recovery if FS permissions cause above to work and below // to fail ACCUMULO-2596 Path prevDataVersionLoc = new Path(dataVersionLocation, Integer.toString(oldVersion)); if (!fs.delete(prevDataVersionLoc)) { throw new RuntimeException("Could not delete previous data version location (" + prevDataVersionLoc + ") for " + volume); } } } catch (IOException e) { throw new RuntimeException("Unable to set accumulo version: an error occurred.", e); } } }
private static void addVolumes(VolumeManager fs, SiteConfiguration siteConfig, Configuration hadoopConf) throws IOException { String[] volumeURIs = VolumeConfiguration.getVolumeUris(siteConfig, hadoopConf); HashSet<String> initializedDirs = new HashSet<>(); initializedDirs.addAll( Arrays.asList(ServerConstants.checkBaseUris(siteConfig, hadoopConf, volumeURIs, true))); HashSet<String> uinitializedDirs = new HashSet<>(); uinitializedDirs.addAll(Arrays.asList(volumeURIs)); uinitializedDirs.removeAll(initializedDirs); Path aBasePath = new Path(initializedDirs.iterator().next()); Path iidPath = new Path(aBasePath, ServerConstants.INSTANCE_ID_DIR); Path versionPath = new Path(aBasePath, ServerConstants.VERSION_DIR); UUID uuid = UUID.fromString(ZooUtil.getInstanceIDFromHdfs(iidPath, siteConfig, hadoopConf)); for (Pair<Path,Path> replacementVolume : ServerConstants.getVolumeReplacements(siteConfig, hadoopConf)) { if (aBasePath.equals(replacementVolume.getFirst())) log.error( "{} is set to be replaced in {} and should not appear in {}." + " It is highly recommended that this property be removed as data" + " could still be written to this volume.", aBasePath, Property.INSTANCE_VOLUMES_REPLACEMENTS, Property.INSTANCE_VOLUMES); } if (ServerUtil.getAccumuloPersistentVersion(versionPath.getFileSystem(hadoopConf), versionPath) != ServerConstants.DATA_VERSION) { throw new IOException("Accumulo " + Constants.VERSION + " cannot initialize data version " + ServerUtil.getAccumuloPersistentVersion(fs)); } initDirs(fs, uuid, uinitializedDirs.toArray(new String[uinitializedDirs.size()]), true); }
final int accumuloPersistentVersion = ServerUtil.getAccumuloPersistentVersion(fs); if (ServerUtil.persistentVersionNeedsUpgrade(accumuloPersistentVersion)) {
currentIid = ZooUtil.getInstanceIDFromHdfs(path, conf, hadoopConf); Path vpath = new Path(baseDir, VERSION_DIR); currentVersion = ServerUtil.getAccumuloPersistentVersion(vpath.getFileSystem(hadoopConf), vpath); } catch (Exception e) {
int dataVersion = ServerUtil.getAccumuloPersistentVersion(context.getVolumeManager()); log.info("Data Version {}", dataVersion); ServerUtil.waitForZookeeperAndHdfs(context);
final int accumuloPersistentVersion = ServerUtil.getAccumuloPersistentVersion(fs); if (ServerUtil.persistentVersionNeedsUpgrade(accumuloPersistentVersion)) {