public SimpleGarbageCollector(Opts opts, ServerContext context) { this.context = context; this.opts = opts; this.fs = context.getVolumeManager(); long gcDelay = getConfiguration().getTimeInMillis(Property.GC_CYCLE_DELAY); log.info("start delay: {} milliseconds", getStartDelay()); log.info("time delay: {} milliseconds", gcDelay); log.info("safemode: {}", opts.safeMode); log.info("verbose: {}", opts.verbose); log.info("memory threshold: {} of bytes", CANDIDATE_MEMORY_PERCENTAGE, Runtime.getRuntime().maxMemory()); log.info("delete threads: {}", getNumDeleteThreads()); }
private static Path createTmpDir(ServerContext context, String tabletDirectory) throws IOException { VolumeManager fs = context.getVolumeManager(); Path result = null; while (result == null) { result = new Path(tabletDirectory + Path.SEPARATOR + "tmp/idxReduce_" + String.format("%09d", new SecureRandom().nextInt(Integer.MAX_VALUE))); try { fs.getFileStatus(result); result = null; continue; } catch (FileNotFoundException fne) { // found an unused temp directory } fs.mkdirs(result); // try to reserve the tmp dir // In some versions of hadoop, two clients concurrently trying to create the same directory // might both return true // Creating a file is not subject to this, so create a special file to make sure we solely // will use this directory if (!fs.createNewFile(new Path(result, "__reserve"))) result = null; } return result; }
@Override public void process(String workID, byte[] data) { String paths[] = new String(data, UTF_8).split(","); Path orig = new Path(paths[0]); Path dest = new Path(paths[1]); Path tmp = new Path(dest.getParent(), dest.getName() + ".tmp"); VolumeManager vm = context.getVolumeManager(); try { FileSystem origFs = vm.getVolumeByPath(orig).getFileSystem(); FileSystem destFs = vm.getVolumeByPath(dest).getFileSystem(); FileUtil.copy(origFs, orig, destFs, tmp, false, true, context.getHadoopConf()); destFs.rename(tmp, dest); log.debug("copied {} to {}", orig, dest); } catch (IOException ex) { try { FileSystem destFs = vm.getVolumeByPath(dest).getFileSystem(); destFs.create(dest).close(); log.warn(" marked " + dest + " failed", ex); } catch (IOException e) { log.error("Unable to create failure flag file " + dest, e); } } }
public static Mutation createDeleteMutation(ServerContext context, Table.ID tableId, String pathToRemove) { Path path = context.getVolumeManager().getFullPath(tableId, pathToRemove); Mutation delFlag = new Mutation(new Text(MetadataSchema.DeletesSection.getRowPrefix() + path)); delFlag.put(EMPTY_TEXT, EMPTY_TEXT, new Value(new byte[] {})); return delFlag; }
public static Map<Long,? extends Collection<FileRef>> getBulkFilesLoaded(ServerContext context, KeyExtent extent) { Text metadataRow = extent.getMetadataEntry(); Map<Long,List<FileRef>> result = new HashMap<>(); VolumeManager fs = context.getVolumeManager(); try (Scanner scanner = new ScannerImpl(context, extent.isMeta() ? RootTable.ID : MetadataTable.ID, Authorizations.EMPTY)) { scanner.setRange(new Range(metadataRow)); scanner.fetchColumnFamily(TabletsSection.BulkFileColumnFamily.NAME); for (Entry<Key,Value> entry : scanner) { Long tid = Long.parseLong(entry.getValue().toString()); List<FileRef> lst = result.get(tid); if (lst == null) { result.put(tid, lst = new ArrayList<>()); } lst.add(new FileRef(fs, entry.getKey())); } } return result; }
VolumeManager fs = context.getVolumeManager(); Scanner metadata = context.createScanner(tableName, Authorizations.EMPTY); metadata.setRange(range);
public static void main(String[] args) throws Exception { Opts opts = new Opts(); opts.parseArgs(TableDiskUsage.class.getName(), args); try (AccumuloClient client = opts.createClient()) { VolumeManager fs = opts.getServerContext().getVolumeManager(); org.apache.accumulo.server.util.TableDiskUsage.printDiskUsage(opts.tables, fs, client, false); } }
public static List<FileRef> getBulkFilesLoaded(ServerContext context, AccumuloClient client, KeyExtent extent, long tid) { List<FileRef> result = new ArrayList<>(); try (Scanner mscanner = new IsolatedScanner(client.createScanner( extent.isMeta() ? RootTable.NAME : MetadataTable.NAME, Authorizations.EMPTY))) { VolumeManager fs = context.getVolumeManager(); mscanner.setRange(extent.toMetadataRange()); mscanner.fetchColumnFamily(TabletsSection.BulkFileColumnFamily.NAME); for (Entry<Key,Value> entry : mscanner) { if (Long.parseLong(entry.getValue().toString()) == tid) { result.add(new FileRef(fs, entry.getKey())); } } return result; } catch (TableNotFoundException ex) { // unlikely throw new RuntimeException("Onos! teh metadata table has vanished!!"); } }
public WeightedRandomCollection(List<String> options, VolumeChooserEnvironment env, Random random) { this.random = random; if (options.size() < 1) { throw new IllegalStateException("Options was empty! No valid volumes to choose from."); } VolumeManager manager = env.getServerContext().getVolumeManager(); // Compute percentage space available on each volume for (String option : options) { FileSystem pathFs = manager.getVolumeByPath(new Path(option)).getFileSystem(); try { FsStatus optionStatus = pathFs.getStatus(); double percentFree = ((double) optionStatus.getRemaining() / optionStatus.getCapacity()); add(percentFree, option); } catch (IOException e) { log.error("Unable to get file system status for" + option, e); } } if (map.size() < 1) { throw new IllegalStateException( "Weighted options was empty! Could indicate an issue getting file system status or " + "no free space on any volume"); } }
opts.parseArgs(LocalityCheck.class.getName(), args); VolumeManager fs = opts.getServerContext().getVolumeManager(); try (AccumuloClient accumuloClient = opts.createClient()) { Scanner scanner = accumuloClient.createScanner(MetadataTable.NAME, Authorizations.EMPTY);
public static SortedMap<FileRef,DataFileValue> getDataFileSizes(KeyExtent extent, ServerContext context) { TreeMap<FileRef,DataFileValue> sizes = new TreeMap<>(); try (Scanner mdScanner = new ScannerImpl(context, MetadataTable.ID, Authorizations.EMPTY)) { mdScanner.fetchColumnFamily(DataFileColumnFamily.NAME); Text row = extent.getMetadataEntry(); Key endKey = new Key(row, DataFileColumnFamily.NAME, new Text("")); endKey = endKey.followingKey(PartialKey.ROW_COLFAM); mdScanner.setRange(new Range(new Key(row), endKey)); for (Entry<Key,Value> entry : mdScanner) { if (!entry.getKey().getRow().equals(row)) break; DataFileValue dfv = new DataFileValue(entry.getValue().get()); sizes.put(new FileRef(context.getVolumeManager(), entry.getKey()), dfv); } return sizes; } }
int dataVersion = ServerUtil.getAccumuloPersistentVersion(context.getVolumeManager()); log.info("Data Version {}", dataVersion); ServerUtil.waitForZookeeperAndHdfs(context);
@Override public void execute(final String[] args) throws Exception { Opts opts = new Opts(); opts.parseArgs(ZooKeeperMain.class.getName(), args); try (ServerContext context = new ServerContext(new SiteConfiguration())) { FileSystem fs = context.getVolumeManager().getDefaultVolume().getFileSystem(); String baseDir = ServerConstants.getBaseUris(context)[0]; System.out.println("Using " + fs.makeQualified(new Path(baseDir + "/instance_id")) + " to lookup accumulo instance"); if (opts.servers == null) { opts.servers = context.getZooKeepers(); } System.out.println("The accumulo instance id is " + context.getInstanceID()); if (!opts.servers.contains("/")) opts.servers += "/accumulo/" + context.getInstanceID(); org.apache.zookeeper.ZooKeeperMain .main(new String[] {"-server", opts.servers, "-timeout", "" + (opts.timeout * 1000)}); } } }
public static Map<KeyExtent,Long> estimateSizes(ServerContext context, Path mapFile, long fileSize, List<KeyExtent> extents) throws IOException { FileSystem ns = context.getVolumeManager().getVolumeByPath(mapFile).getFileSystem(); return BulkImport.estimateSizes(context.getConfiguration(), mapFile, fileSize, extents, ns, null, context.getCryptoService()); }
TreeMap<FileRef,DataFileValue> sizes = new TreeMap<>(); VolumeManager fs = context.getVolumeManager(); if (extent.isRootTablet()) { getRootLogEntries(context, result);
FileSystem ns = context.getVolumeManager().getVolumeByPath(mapfile.path()).getFileSystem(); try { reader = FileOperations.getInstance().newReaderBuilder()
origDatafileSizes.put(new FileRef(context.getVolumeManager(), entry.getKey()), new DataFileValue(entry.getValue().get()));
public static WritableComparable<Key> findLastKey(ServerContext context, Collection<FileRef> mapFiles) throws IOException { Key lastKey = null; for (FileRef ref : mapFiles) { Path path = ref.path(); FileSystem ns = context.getVolumeManager().getVolumeByPath(path).getFileSystem(); FileSKVIterator reader = FileOperations.getInstance().newReaderBuilder() .forFile(path.toString(), ns, ns.getConf(), context.getCryptoService()) .withTableConfiguration(context.getConfiguration()).seekToBeginning().build(); try { if (!reader.hasTop()) // file is empty, so there is no last key continue; Key key = reader.getLastKey(); if (lastKey == null || key.compareTo(lastKey) > 0) lastKey = key; } finally { try { if (reader != null) reader.close(); } catch (IOException e) { log.error("{}", e.getMessage(), e); } } } return lastKey; }
/** * During an upgrade from 1.6 to 1.7, we need to add the replication table */ public static void createReplicationTable(ServerContext context) { VolumeChooserEnvironment chooserEnv = new VolumeChooserEnvironment(ReplicationTable.ID, context); String dir = context.getVolumeManager().choose(chooserEnv, ServerConstants.getBaseUris(context)) + Constants.HDFS_TABLES_DIR + Path.SEPARATOR + ReplicationTable.ID + Constants.DEFAULT_TABLET_LOCATION; Mutation m = new Mutation(new Text(TabletsSection.getRow(ReplicationTable.ID, null))); m.put(DIRECTORY_COLUMN.getColumnFamily(), DIRECTORY_COLUMN.getColumnQualifier(), 0, new Value(dir.getBytes(UTF_8))); m.put(TIME_COLUMN.getColumnFamily(), TIME_COLUMN.getColumnQualifier(), 0, new Value((TabletTime.LOGICAL_TIME_ID + "0").getBytes(UTF_8))); m.put(PREV_ROW_COLUMN.getColumnFamily(), PREV_ROW_COLUMN.getColumnQualifier(), 0, KeyExtent.encodePrevEndRow(null)); update(context, getMetadataTable(context), null, m); }
FileRef ref = new FileRef(context.getVolumeManager(), key); bw.addMutation(createDeleteMutation(context, tableId, ref.meta().toString()));