public static Collection<String> toPathStrings(Collection<FileRef> refs) { ArrayList<String> ret = new ArrayList<>(); for (FileRef fileRef : refs) { ret.add(fileRef.path().toString()); } return ret; }
public static void renameReplacement(VolumeManager fs, FileRef tmpDatafile, FileRef newDatafile) throws IOException { if (fs.exists(newDatafile.path())) { log.error("Target map file already exist " + newDatafile, new Exception()); throw new IllegalStateException("Target map file already exist " + newDatafile); } DatafileManager.rename(fs, tmpDatafile.path(), newDatafile.path()); }
public static void prepareReplacement(VolumeManager fs, Path location, Set<FileRef> oldDatafiles, String compactName) throws IOException { for (FileRef ref : oldDatafiles) { Path path = ref.path(); DatafileManager.rename(fs, path, new Path(location + "/delete+" + compactName + "+" + path.getName())); } }
private Map<FileSKVIterator,String> openFileRefs(Collection<FileRef> files) throws TooManyFilesException, IOException { List<String> strings = new ArrayList<>(files.size()); for (FileRef ref : files) strings.add(ref.path().toString()); return openFiles(strings); }
@Override public boolean shouldCompact(Entry<FileRef,DataFileValue> file, MajorCompactionRequest request) { return pattern.matcher(getInput(file.getKey().path())).matches(); }
public static void finishReplacement(AccumuloConfiguration acuTableConf, VolumeManager fs, Path location, Set<FileRef> oldDatafiles, String compactName) throws IOException { // start deleting files, if we do not finish they will be cleaned // up later for (FileRef ref : oldDatafiles) { Path path = ref.path(); Path deleteFile = new Path(location + "/delete+" + compactName + "+" + path.getName()); if (acuTableConf.getBoolean(Property.GC_TRASH_IGNORE) || !fs.moveToTrash(deleteFile)) fs.deleteRecursively(deleteFile); } }
public static void addDeleteEntries(KeyExtent extent, Set<FileRef> datafilesToDelete, ServerContext context) { Table.ID tableId = extent.getTableId(); // TODO could use batch writer,would need to handle failure and retry like update does - // ACCUMULO-1294 for (FileRef pathToRemove : datafilesToDelete) { update(context, createDeleteMutation(context, tableId, pathToRemove.path().toString()), extent); } }
public static void replaceFiles(AccumuloConfiguration acuTableConf, VolumeManager fs, Path location, Set<FileRef> oldDatafiles, FileRef tmpDatafile, FileRef newDatafile) throws IOException { String compactName = newDatafile.path().getName(); prepareReplacement(fs, location, oldDatafiles, compactName); renameReplacement(fs, tmpDatafile, newDatafile); finishReplacement(acuTableConf, fs, location, oldDatafiles, compactName); }
public FileSKVIterator openReader(FileRef ref) throws IOException { Preconditions.checkState(volumeManager != null, "Opening files is not" + " supported at this time. It's only supported when" + " CompactionStrategy.gatherInformation() is called."); // @TODO verify the file isn't some random file in HDFS // @TODO ensure these files are always closed? FileOperations fileFactory = FileOperations.getInstance(); FileSystem ns = volumeManager.getVolumeByPath(ref.path()).getFileSystem(); return fileFactory.newReaderBuilder() .forFile(ref.path().toString(), ns, ns.getConf(), context.getCryptoService()) .withTableConfiguration(tableConfig).seekToBeginning().build(); }
Path parent = tpath.path().getParent().getParent(); for (String tablesDir : ServerConstants.getTablesDirs(tablet.getContext())) { if (parent.equals(new Path(tablesDir, tablet.getExtent().getTableId().canonicalID()))) { bulkDir = tpath.path().getParent().toString(); else if (!bulkDir.equals(tpath.path().getParent().toString())) throw new IllegalArgumentException("bulk files in different dirs " + bulkDir + " " + tpath);
files.add(entry.getKey().path().toString());
FileSystem fs = this.fs.getVolumeByPath(mapFile.path()).getFileSystem(); FileSKVIterator reader; .forFile(mapFile.path().toString(), fs, fs.getConf(), context.getCryptoService()) .withTableConfiguration(acuTableConf).withRateLimiter(env.getReadLimiter()).build(); extent.getTableId(), mapFile.path().toString(), false, reader); ProblemType.FILE_READ, mapFile.path().toString(), e));
private Map<FileRef,Pair<Key,Key>> getFirstAndLastKeys(SortedMap<FileRef,DataFileValue> allFiles) throws IOException { Map<FileRef,Pair<Key,Key>> result = new HashMap<>(); FileOperations fileFactory = FileOperations.getInstance(); VolumeManager fs = getTabletServer().getFileSystem(); for (Entry<FileRef,DataFileValue> entry : allFiles.entrySet()) { FileRef file = entry.getKey(); FileSystem ns = fs.getVolumeByPath(file.path()).getFileSystem(); try (FileSKVIterator openReader = fileFactory.newReaderBuilder() .forFile(file.path().toString(), ns, ns.getConf(), context.getCryptoService()) .withTableConfiguration(this.getTableConfiguration()).seekToBeginning().build()) { Key first = openReader.getFirstKey(); Key last = openReader.getLastKey(); result.put(file, new Pair<>(first, last)); } } return result; }
final Path outputFilePath = outputFile.path(); final String outputFilePathName = outputFilePath.toString(); String oldThreadName = Thread.currentThread().getName(); if (!fs.deleteRecursively(outputFile.path())) { if (fs.exists(outputFile.path())) { log.error("Unable to delete {}", outputFile); mfw.close(); } finally { if (!fs.deleteRecursively(outputFile.path())) if (fs.exists(outputFile.path())) log.error("Unable to delete {}", outputFile);
FileSystem ns = context.getVolumeManager().getVolumeByPath(mapfile.path()).getFileSystem(); try { reader = FileOperations.getInstance().newReaderBuilder()
if (tablet.getTabletServer().getFileSystem().exists(newDatafile.path())) { log.error("Target map file already exist " + newDatafile, new Exception()); throw new IllegalStateException("Target map file already exist " + newDatafile); rename(tablet.getTabletServer().getFileSystem(), tmpDatafile.path(), newDatafile.path()); tablet.getTabletServer().getFileSystem().deleteRecursively(newDatafile.path());
SummarizerFactory factory = new SummarizerFactory(tableConfig); for (FileRef file : files) { FileSystem fs = volumeManager.getVolumeByPath(file.path()).getFileSystem(); Configuration conf = context.getHadoopConf(); SummaryCollection fsc = SummaryReader .load(fs, conf, factory, file.path(), summarySelector, summaryCache, indexCache, fileLenCache, context.getCryptoService()) .getSummaries(Collections.singletonList(new Gatherer.RowRange(extent)));
public static WritableComparable<Key> findLastKey(ServerContext context, Collection<FileRef> mapFiles) throws IOException { Key lastKey = null; for (FileRef ref : mapFiles) { Path path = ref.path(); FileSystem ns = context.getVolumeManager().getVolumeByPath(path).getFileSystem(); FileSKVIterator reader = FileOperations.getInstance().newReaderBuilder() .forFile(path.toString(), ns, ns.getConf(), context.getCryptoService()) .withTableConfiguration(context.getConfiguration()).seekToBeginning().build(); try { if (!reader.hasTop()) // file is empty, so there is no last key continue; Key key = reader.getLastKey(); if (lastKey == null || key.compareTo(lastKey) > 0) lastKey = key; } finally { try { if (reader != null) reader.close(); } catch (IOException e) { log.error("{}", e.getMessage(), e); } } } return lastKey; }
try { FileRef newMapfileLocation = tablet.getNextMapFilename(mergeFile == null ? "F" : "M"); FileRef tmpFileRef = new FileRef(newMapfileLocation.path() + "_tmp"); Span span = Trace.start("waitForCommits"); synchronized (tablet) { commitSession.getWALogSeq() + 1, newMapfileLocation.path().toString()); break; } catch (IOException e) {
log.error("{}", e.getMessage(), e); fs.deleteRecursively(outputFile.path()); } catch (Exception e) { log.warn("Failed to delete Canceled compaction output file {}", outputFile, e);