@Override public long groupBlockSize() { return status.getBlockSize(); }
@Override public long getBlockSize() { return realStatus.getBlockSize(); }
@Override public long getBlockSize() { return myFs.getBlockSize(); }
@Override public long getBlockSize() { long blocksize = fileStatus.getBlockSize(); if (blocksize > fileStatus.getLen()) { return fileStatus.getLen(); } return blocksize; }
/** * Get the block size for a particular file. * @param f the filename * @return the number of bytes in a block * @deprecated Use {@link #getFileStatus(Path)} instead * @throws FileNotFoundException if the path is not present * @throws IOException IO failure */ @Deprecated public long getBlockSize(Path f) throws IOException { return getFileStatus(f).getBlockSize(); }
@Override public long getBlockSizeByte(String path) throws IOException { Path tPath = new Path(path); FileSystem hdfs = getFs(); if (!hdfs.exists(tPath)) { throw new FileNotFoundException(path); } FileStatus fs = hdfs.getFileStatus(tPath); return fs.getBlockSize(); }
@Override public int blockSize() { // By convention directory has blockSize == 0, while file has blockSize > 0: return isDirectory() ? 0 : (int)status.getBlockSize(); }
@Override public boolean apply(FileStatus input) { return input.getReplication() == replication && input.getBlockSize() == blockSize; } };
private int addBHISplit(FileStatus status, InputFormat inputFormat, Class inputFormatClass, int numOrigSplits, JobConf newjob, ArrayList<InputSplit> result) throws IOException { LOG.info("block size: " + status.getBlockSize()); LOG.info("file length: " + status.getLen()); FileInputFormat.setInputPaths(newjob, status.getPath()); InputSplit[] iss = inputFormat.getSplits(newjob, 0); if (iss != null && iss.length > 0) { numOrigSplits += iss.length; result.add(new BucketizedHiveInputSplit(iss, inputFormatClass .getName())); } return numOrigSplits; } }
/** * Returns a string representation of a Hadoop {@link FileStatus}. * * @param fs Hadoop {@link FileStatus} * @return its string representation */ public static String toStringHadoopFileStatus(FileStatus fs) { StringBuilder sb = new StringBuilder(); sb.append("HadoopFileStatus: Path: ").append(fs.getPath()); sb.append(" , Length: ").append(fs.getLen()); // Use isDir instead of isDirectory for compatibility with hadoop 1. sb.append(" , IsDir: ").append(fs.isDir()); sb.append(" , BlockReplication: ").append(fs.getReplication()); sb.append(" , BlockSize: ").append(fs.getBlockSize()); sb.append(" , ModificationTime: ").append(fs.getModificationTime()); sb.append(" , AccessTime: ").append(fs.getAccessTime()); sb.append(" , Permission: ").append(fs.getPermission()); sb.append(" , Owner: ").append(fs.getOwner()); sb.append(" , Group: ").append(fs.getGroup()); return sb.toString(); }
private FileStatus cloneStatus() throws IOException { return new FileStatus(status.getLen(), status.isDirectory(), status.getReplication(), status.getBlockSize(), status.getModificationTime(), status.getAccessTime(), null, null, null, status.isSymlink() ? status.getSymlink() : null, status.getPath()); } }
/** * Copy constructor. * * @param other FileStatus to copy */ public FileStatus(FileStatus other) throws IOException { // It's important to call the getters here instead of directly accessing the // members. Subclasses like ViewFsFileStatus can override the getters. this(other.getLen(), other.isDirectory(), other.getReplication(), other.getBlockSize(), other.getModificationTime(), other.getAccessTime(), other.getPermission(), other.getOwner(), other.getGroup(), (other.isSymlink() ? other.getSymlink() : null), other.getPath()); }
protected FileStatus swizzleFileStatus(FileStatus orig, boolean isParam) { FileStatus ret = new FileStatus(orig.getLen(), orig.isDir(), orig.getReplication(), orig.getBlockSize(), orig.getModificationTime(), orig.getAccessTime(), orig.getPermission(), orig.getOwner(), orig.getGroup(), isParam ? swizzleParamPath(orig.getPath()) : swizzleReturnPath(orig.getPath())); return ret; }
/** * @return desired block size for destination file. */ public long getBlockSize(FileSystem targetFs) { return getPreserve().preserve(PreserveAttributes.Option.BLOCK_SIZE) ? getOrigin().getBlockSize() : targetFs.getDefaultBlockSize(this.destination); }
/** * Replace the scheme of the input {@link FileStatus} if it matches the string to replace. */ public static FileStatus replaceScheme(FileStatus st, String replace, String replacement) { if (replace != null && replace.equals(replacement)) { return st; } try { return new FileStatus(st.getLen(), st.isDir(), st.getReplication(), st.getBlockSize(), st.getModificationTime(), st.getAccessTime(), st.getPermission(), st.getOwner(), st.getGroup(), st.isSymlink() ? st.getSymlink() : null, replaceScheme(st.getPath(), replace, replacement)); } catch (IOException ioe) { throw new RuntimeException(ioe); } }
public void touch(MockFile file) { if (fileStatusMap.containsKey(file)) { FileStatus fileStatus = fileStatusMap.get(file); FileStatus fileStatusNew = new FileStatus(fileStatus.getLen(), fileStatus.isDirectory(), fileStatus.getReplication(), fileStatus.getBlockSize(), fileStatus.getModificationTime() + 1, fileStatus.getAccessTime(), fileStatus.getPermission(), fileStatus.getOwner(), fileStatus.getGroup(), fileStatus.getPath()); fileStatusMap.put(file, fileStatusNew); } }
public void touch(MockFile file) { if (fileStatusMap.containsKey(file)) { FileStatus fileStatus = fileStatusMap.get(file); FileStatus fileStatusNew = new FileStatus(fileStatus.getLen(), fileStatus.isDirectory(), fileStatus.getReplication(), fileStatus.getBlockSize(), fileStatus.getModificationTime() + 1, fileStatus.getAccessTime(), fileStatus.getPermission(), fileStatus.getOwner(), fileStatus.getGroup(), fileStatus.getPath()); fileStatusMap.put(file, fileStatusNew); } }
/** * Constructor * @param stat a file status * @param locations a file's block locations */ public LocatedFileStatus(FileStatus stat, BlockLocation[] locations) { this(stat.getLen(), stat.isDirectory(), stat.getReplication(), stat.getBlockSize(), stat.getModificationTime(), stat.getAccessTime(), stat.getPermission(), stat.getOwner(), stat.getGroup(), null, stat.getPath(), stat.hasAcl(), stat.isEncrypted(), stat.isErasureCoded(), locations); if (stat.isSymlink()) { try { setSymlink(stat.getSymlink()); } catch (IOException e) { throw new RuntimeException("Unexpected exception", e); } } }
public SplitGenerator(SplitInfo splitInfo, UserGroupInformation ugi, boolean allowSyntheticFileIds, boolean isDefaultFs) throws IOException { this.ugi = ugi; this.context = splitInfo.context; this.fs = splitInfo.fs; this.file = splitInfo.fileWithId.getFileStatus(); this.fsFileId = isDefaultFs ? splitInfo.fileWithId.getFileId() : null; this.blockSize = this.file.getBlockSize(); this.orcTail = splitInfo.orcTail; this.readerTypes = splitInfo.readerTypes; // TODO: potential DFS call this.locations = SHIMS.getLocationsWithOffset(fs, file); this.isOriginal = splitInfo.isOriginal; this.deltas = splitInfo.deltas; this.hasBase = splitInfo.hasBase; this.rootDir = splitInfo.dir; this.projColsUncompressedSize = -1; this.deltaSplits = splitInfo.getSplits(); this.allowSyntheticFileIds = allowSyntheticFileIds; this.ppdResult = splitInfo.ppdResult; }
public SplitGenerator(SplitInfo splitInfo, UserGroupInformation ugi, boolean allowSyntheticFileIds) throws IOException { this.ugi = ugi; this.context = splitInfo.context; this.fs = splitInfo.fs; this.file = splitInfo.fileWithId.getFileStatus(); this.fsFileId = splitInfo.fileWithId.getFileId(); this.blockSize = this.file.getBlockSize(); this.orcTail = splitInfo.orcTail; this.readerTypes = splitInfo.readerTypes; // TODO: potential DFS call this.locations = SHIMS.getLocationsWithOffset(fs, file); this.isOriginal = splitInfo.isOriginal; this.deltas = splitInfo.deltas; this.hasBase = splitInfo.hasBase; this.projColsUncompressedSize = -1; this.deltaSplits = splitInfo.getSplits(); this.allowSyntheticFileIds = allowSyntheticFileIds; this.ppdResult = splitInfo.ppdResult; }