public static synchronized Path getAccumuloInstanceIdPath(VolumeManager fs) { // It doesn't matter which Volume is used as they should all have the instance ID stored Volume v = fs.getVolumes().iterator().next(); return ServerConstants.getInstanceIdLocation(v); }
public static synchronized int getAccumuloPersistentVersion(VolumeManager fs) { // It doesn't matter which Volume is used as they should all have the data version stored return getAccumuloPersistentVersion(fs.getVolumes().iterator().next()); }
private static void updateHdfs(VolumeManager fs, String newInstanceId) throws IOException { // Need to recreate the instanceId on all of them to keep consistency for (Volume v : fs.getVolumes()) { final Path instanceId = ServerConstants.getInstanceIdLocation(v); if (!v.getFileSystem().delete(instanceId, true)) { throw new IOException("Could not recursively delete " + instanceId); } if (!v.getFileSystem().mkdirs(instanceId)) { throw new IOException("Could not create directory " + instanceId); } v.getFileSystem().create(new Path(instanceId, newInstanceId)).close(); } }
public static synchronized void updateAccumuloVersion(VolumeManager fs, int oldVersion) { for (Volume volume : fs.getVolumes()) { try { if (getAccumuloPersistentVersion(volume) == oldVersion) { log.debug("Attempting to upgrade {}", volume); Path dataVersionLocation = ServerConstants.getDataVersionLocation(volume); fs.create(new Path(dataVersionLocation, Integer.toString(ServerConstants.DATA_VERSION))) .close(); // TODO document failure mode & recovery if FS permissions cause above to work and below // to fail ACCUMULO-2596 Path prevDataVersionLoc = new Path(dataVersionLocation, Integer.toString(oldVersion)); if (!fs.delete(prevDataVersionLoc)) { throw new RuntimeException("Could not delete previous data version location (" + prevDataVersionLoc + ") for " + volume); } } } catch (IOException e) { throw new RuntimeException("Unable to set accumulo version: an error occurred.", e); } } }
private static void verifyHdfsWritePermission(VolumeManager fs) throws Exception { for (Volume v : fs.getVolumes()) { final Path instanceId = ServerConstants.getInstanceIdLocation(v); FileStatus fileStatus = v.getFileSystem().getFileStatus(instanceId); checkHdfsAccessPermissions(fileStatus, FsAction.WRITE); } }
public static int randomize(ServerContext context, String tableName) throws AccumuloException, AccumuloSecurityException, TableNotFoundException { final VolumeManager vm = context.getVolumeManager(); if (vm.getVolumes().size() < 2) { log.error("There are not enough volumes configured"); return 1;
VolumeManager volumeManager = new VolumeManager(); List<Volume> volumes = volumeManager.getVolumes(); Volume volume = volumes.get(0); // simplified File rootFolder = volume.getRoot(); File imgFile = new File(rootFolder, "image/1.jpg");
public static synchronized Path getAccumuloInstanceIdPath(VolumeManager fs) { // It doesn't matter which Volume is used as they should all have the instance ID stored Volume v = fs.getVolumes().iterator().next(); return ServerConstants.getInstanceIdLocation(v); }
public static synchronized int getAccumuloPersistentVersion(VolumeManager fs) { // It doesn't matter which Volume is used as they should all have the data version stored return getAccumuloPersistentVersion(fs.getVolumes().iterator().next()); }
VolumeManager volumeManager = new VolumeManager(); List<Volume> volumes = volumeManager.getVolumes(); for (Volume volume: volumes) { String label = volume.getLabel(); File rootFolder = volume.getRoot(); File[] files = rootFolder.listFiles(); for (File file: files) { String filename = file.getName(); } }
private static void updateHdfs(VolumeManager fs, Instance inst, String newInstanceId) throws IOException { // Need to recreate the instanceId on all of them to keep consistency for (Volume v : fs.getVolumes()) { final Path instanceId = ServerConstants.getInstanceIdLocation(v); if (!v.getFileSystem().delete(instanceId, true)) { throw new IOException("Could not recursively delete " + instanceId); } if (!v.getFileSystem().mkdirs(instanceId)) { throw new IOException("Could not create directory " + instanceId); } v.getFileSystem().create(new Path(instanceId, newInstanceId)).close(); } }
public static synchronized void updateAccumuloVersion(VolumeManager fs, int oldVersion) { for (Volume volume : fs.getVolumes()) { try { if (getAccumuloPersistentVersion(volume) == oldVersion) { log.debug("Attempting to upgrade " + volume); Path dataVersionLocation = ServerConstants.getDataVersionLocation(volume); fs.create(new Path(dataVersionLocation, Integer.toString(ServerConstants.DATA_VERSION))) .close(); // TODO document failure mode & recovery if FS permissions cause above to work and below // to fail ACCUMULO-2596 Path prevDataVersionLoc = new Path(dataVersionLocation, Integer.toString(oldVersion)); if (!fs.delete(prevDataVersionLoc)) { throw new RuntimeException("Could not delete previous data version location (" + prevDataVersionLoc + ") for " + volume); } } } catch (IOException e) { throw new RuntimeException("Unable to set accumulo version: an error occurred.", e); } } }
private static void verifyHdfsWritePermission(VolumeManager fs) throws Exception { for (Volume v : fs.getVolumes()) { final Path instanceId = ServerConstants.getInstanceIdLocation(v); FileStatus fileStatus = v.getFileSystem().getFileStatus(instanceId); checkHdfsAccessPermissions(fileStatus, FsAction.WRITE); } }
public static int randomize(Connector c, String tableName) throws IOException, AccumuloSecurityException, AccumuloException, TableNotFoundException { final VolumeManager vm = VolumeManagerImpl.get(); if (vm.getVolumes().size() < 2) { log.error("There are not enough volumes configured"); return 1;