/** * Returns the default buffer size to use during writes. * * The size of the buffer should probably be a multiple of hardware * page size (4096 on Intel x86), and it determines how much data is * buffered during read and write operations. * * @param fs filesystem object * @return default buffer size to use during writes */ public static int getDefaultBufferSize(final FileSystem fs) { return fs.getConf().getInt("io.file.buffer.size", 4096); }
private static Configuration resolveConfiguration(Configuration configuration, FileSystem fileSystem) throws IOException { if (configuration != null) { return configuration; } if (fileSystem != null) { return fileSystem.getConf(); } throw new IOException("FileSystem configuration could not be determined from available inputs."); }
/** * Return the number of bytes that large input files should be optimally * be split into to minimize I/O time. * @deprecated use {@link #getDefaultBlockSize(Path)} instead */ @Deprecated public long getDefaultBlockSize() { // default to 32MB: large enough to minimize the impact of seeks return getConf().getLong("fs.local.block.size", 32 * 1024 * 1024); }
/** * Opens an FSDataInputStream at the indicated Path. * @param f the file to open * @throws IOException IO failure */ public FSDataInputStream open(Path f) throws IOException { return open(f, getConf().getInt(IO_FILE_BUFFER_SIZE_KEY, IO_FILE_BUFFER_SIZE_DEFAULT)); }
public static FileSystem getWALFileSystem(final Configuration c) throws IOException { Path p = getWALRootDir(c); FileSystem fs = p.getFileSystem(c); // hadoop-core does fs caching, so need to propogate this if set String enforceStreamCapability = c.get(UNSAFE_STREAM_CAPABILITY_ENFORCE); if (enforceStreamCapability != null) { fs.getConf().set(UNSAFE_STREAM_CAPABILITY_ENFORCE, enforceStreamCapability); } return fs; }
@Override public Boolean run() throws Exception { FileSystem fsAsUser = FileSystem.get(fs.getUri(), fs.getConf()); return checkIsOwnerOfFileHierarchy(fsAsUser, fileStatus, userName, recurse); } });
private void cancel() throws IOException, InterruptedException { final T fs = weakFs.get(); if (fs != null) { token.cancel(fs.getConf()); } }
public FsStateStore(FileSystem fs, String storeRootDir, Class<T> stateClass) { this.fs = fs; this.useTmpFileForPut = !FS_SCHEMES_NON_ATOMIC.contains(this.fs.getUri().getScheme()); this.conf = getConf(this.fs.getConf()); this.storeRootDir = storeRootDir; this.stateClass = stateClass; }
@Override public void initialize(URI name, Configuration conf) throws IOException { if (fs.getConf() == null) { fs.initialize(name, conf); } String scheme = name.getScheme(); if (!scheme.equals(fs.getUri().getScheme())) { swapScheme = scheme; } }
/** * Read the {@link Token}s stored in the token file. */ @VisibleForTesting Collection<Token<? extends TokenIdentifier>> readDelegationTokens(Path tokenFilePath) throws IOException { LOGGER.info("Reading updated token from token file: " + tokenFilePath); return YarnHelixUtils.readTokensFromFile(tokenFilePath, this.fs.getConf()); }
public SequenceFileReader(FileSystem fs, Path file, Map<String, Object> conf) throws IOException { super(fs, file); int bufferSize = !conf.containsKey(BUFFER_SIZE) ? DEFAULT_BUFF_SIZE : Integer.parseInt(conf.get(BUFFER_SIZE).toString()); this.reader = new SequenceFile.Reader(fs.getConf(), SequenceFile.Reader.file(file), SequenceFile.Reader.bufferSize(bufferSize)); this.key = (Key) ReflectionUtils.newInstance(reader.getKeyClass(), fs.getConf()); this.value = (Value) ReflectionUtils.newInstance(reader.getValueClass(), fs.getConf()); this.offset = new SequenceFileReader.Offset(0, 0, 0); }
public SequenceFileReader(FileSystem fs, Path file, Map<String, Object> conf, String offset) throws IOException { super(fs, file); int bufferSize = !conf.containsKey(BUFFER_SIZE) ? DEFAULT_BUFF_SIZE : Integer.parseInt(conf.get(BUFFER_SIZE).toString()); this.offset = new SequenceFileReader.Offset(offset); this.reader = new SequenceFile.Reader(fs.getConf(), SequenceFile.Reader.file(file), SequenceFile.Reader.bufferSize(bufferSize)); this.key = (Key) ReflectionUtils.newInstance(reader.getKeyClass(), fs.getConf()); this.value = (Value) ReflectionUtils.newInstance(reader.getValueClass(), fs.getConf()); skipToOffset(this.reader, this.offset, this.key); }
@Override public Object run() throws Exception { FileSystem fsAsUser = FileSystem.get(fs.getUri(), fs.getConf()); ShimLoader.getHadoopShims().checkFileAccess(fsAsUser, stat, action); addChildren(fsAsUser, stat.getPath(), children); return null; } });
private void addGoodFilesToOutputPath (List<Path> goodPaths) throws IOException { for (Path path: goodPaths) { String fileName = path.getName(); LOG.info(String.format("Adding %s to %s", path.toString(), this.dataset.outputPath())); Path outPath = MRCompactorJobRunner.this.lateOutputRecordCountProvider.constructLateFilePath(fileName, MRCompactorJobRunner.this.fs, this.dataset.outputPath()); HadoopUtils.movePath(MRCompactorJobRunner.this.tmpFs, path, FileSystem.get(this.dataset.outputPath().getParent().toUri(), this.fs.getConf()), outPath, false, this.fs.getConf()); } }
@Override public void execute() throws IOException { if (this.srcFs == null) { this.srcFs = getFileSystem(this.srcFsUri); } if (this.dstFs == null) { this.dstFs = getFileSystem(this.dstFsUri); } log.info(String.format("Moving %s to %s", this.srcPath, this.dstPath)); HadoopUtils.movePath(this.srcFs, this.srcPath, this.dstFs, this.dstPath, this.overwrite, this.dstFs.getConf()); } }
/** * Get Avro schema from an Avro data file. */ public static Schema getSchemaFromDataFile(Path dataFile, FileSystem fs) throws IOException { try (SeekableInput sin = new FsInput(dataFile, fs.getConf()); DataFileReader<GenericRecord> reader = new DataFileReader<>(sin, new GenericDatumReader<GenericRecord>())) { return reader.getSchema(); } }
/** * Renew the existing delegation token. */ private synchronized void renewDelegationToken() throws IOException, InterruptedException { this.token.renew(this.fs.getConf()); writeDelegationTokenToFile(); if (!this.firstLogin) { // Send a message to the controller and all the participants if this is not the first login sendTokenFileUpdatedMessage(InstanceType.CONTROLLER); sendTokenFileUpdatedMessage(InstanceType.PARTICIPANT); } }
protected HdfsAdmin getHdfsAdmin() { try { // Currently HdfsAdmin is the only public API that allows access to the inotify API. Because of this we need to have super user rights in HDFS. return new HdfsAdmin(getFileSystem().getUri(), getFileSystem().getConf()); } catch (IOException e) { getLogger().error("Unable to get and instance of HDFS admin. You must be an HDFS super user to view HDFS events."); throw new ProcessException(e); } }
@Test public void testFsUriSetProperly() throws Exception { HMaster master = UTIL.getMiniHBaseCluster().getMaster(); MasterFileSystem fs = master.getMasterFileSystem(); Path masterRoot = FSUtils.getRootDir(fs.getConfiguration()); Path rootDir = FSUtils.getRootDir(fs.getFileSystem().getConf()); // make sure the fs and the found root dir have the same scheme LOG.debug("from fs uri:" + FileSystem.getDefaultUri(fs.getFileSystem().getConf())); LOG.debug("from configuration uri:" + FileSystem.getDefaultUri(fs.getConfiguration())); // make sure the set uri matches by forcing it. assertEquals(masterRoot, rootDir); } }
@Test public void testFsUriSetProperly() throws Exception { HMaster master = UTIL.getMiniHBaseCluster().getMaster(); MasterFileSystem fs = master.getMasterFileSystem(); Path masterRoot = FSUtils.getRootDir(fs.getConfiguration()); Path rootDir = FSUtils.getRootDir(fs.getFileSystem().getConf()); assertEquals(masterRoot, rootDir); assertEquals(FSUtils.getWALRootDir(UTIL.getConfiguration()), fs.getWALRootDir()); } }