Codota Logo
FsImageProto$INodeSection$INodeFile.getReplication
Code IndexAdd Codota to your IDE (free)

How to use
getReplication
method
in
org.apache.hadoop.hdfs.server.namenode.FsImageProto$INodeSection$INodeFile

Best Java code snippets using org.apache.hadoop.hdfs.server.namenode.FsImageProto$INodeSection$INodeFile.getReplication (Showing top 20 results out of 315)

  • Common ways to obtain FsImageProto$INodeSection$INodeFile
private void myMethod () {
FsImageProto$INodeSection$INodeFile f =
  • Codota IconFsImageProto.INodeSection$INode fsImageProtoINodeSection$INode;fsImageProtoINodeSection$INode.getFile()
  • Codota IconFsImageProto.SnapshotDiffSection$FileDiff fsImageProtoSnapshotDiffSection$FileDiff;fsImageProtoSnapshotDiffSection$FileDiff.getSnapshotCopy()
  • Smart code suggestions by Codota
}
origin: org.apache.hadoop/hadoop-hdfs

 map.put("replication", INodeFile.DEFAULT_REPL_FOR_STRIPED_BLOCKS);
} else {
 map.put("replication", f.getReplication());
origin: org.apache.hadoop/hadoop-hdfs

totalSpace += fileSize * f.getReplication();
origin: org.apache.hadoop/hadoop-hdfs

private void fillDirSummary(long id, long[] data) throws IOException {
 data[0]++;
 long[] children = dirmap.get(id);
 if (children == null) {
  return;
 }
 for (long cid : children) {
  INode node = fromINodeId(cid);
  switch (node.getType()) {
  case DIRECTORY:
   fillDirSummary(cid, data);
   break;
  case FILE:
   FsImageProto.INodeSection.INodeFile f = node.getFile();
   long curLength = getFileSize(f);
   data[1]++;
   data[2] += curLength;
   data[3] += (curLength) * (f.getReplication());
   break;
  case SYMLINK:
   data[1]++;
   break;
  default:
   break;
  }
 }
}
origin: org.apache.hadoop/hadoop-hdfs

public Builder mergeFrom(org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFile other) {
 if (other == org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFile.getDefaultInstance()) return this;
 if (other.hasReplication()) {
  setReplication(other.getReplication());
origin: org.apache.hadoop/hadoop-hdfs

  (fileInPb.getBlockType() == BlockTypeProto .STRIPED);
Short replication =
  (!isStriped ? (short)fileInPb.getReplication() : null);
Byte ecPolicyID =
  (isStriped ? (byte)fileInPb.getErasureCodingPolicyID() : null);
origin: io.prestosql.hadoop/hadoop-apache

private void dumpINodeFile(INodeSection.INodeFile f) {
 o("replication", f.getReplication()).o("mtime", f.getModificationTime())
   .o("atime", f.getAccessTime())
   .o("perferredBlockSize", f.getPreferredBlockSize())
   .o("permission", dumpPermission(f.getPermission()));
 if (f.getBlocksCount() > 0) {
  out.print("<blocks>");
  for (BlockProto b : f.getBlocksList()) {
   out.print("<block>");
   o("id", b.getBlockId()).o("genstamp", b.getGenStamp()).o("numBytes",
     b.getNumBytes());
   out.print("</block>\n");
  }
  out.print("</blocks>\n");
 }
 if (f.hasFileUC()) {
  INodeSection.FileUnderConstructionFeature u = f.getFileUC();
  out.print("<file-under-construction>");
  o("clientName", u.getClientName()).o("clientMachine",
    u.getClientMachine());
  out.print("</file-under-construction>\n");
 }
}
origin: org.apache.hadoop/hadoop-hdfs

p = getPermission(file.getPermission());
hasAcl = file.hasAcl() && file.getAcl().getEntriesCount() > 0;
append(buffer, file.getReplication());
append(buffer, formatDate(file.getModificationTime()));
append(buffer, formatDate(file.getAccessTime()));
origin: ch.cern.hadoop/hadoop-hdfs

private void dumpINodeFile(INodeSection.INodeFile f) {
 o("replication", f.getReplication()).o("mtime", f.getModificationTime())
   .o("atime", f.getAccessTime())
   .o("perferredBlockSize", f.getPreferredBlockSize())
   .o("permission", dumpPermission(f.getPermission()));
 if (f.getBlocksCount() > 0) {
  out.print("<blocks>");
  for (BlockProto b : f.getBlocksList()) {
   out.print("<block>");
   o("id", b.getBlockId()).o("genstamp", b.getGenStamp()).o("numBytes",
     b.getNumBytes());
   out.print("</block>\n");
  }
  out.print("</blocks>\n");
 }
 if (f.hasFileUC()) {
  INodeSection.FileUnderConstructionFeature u = f.getFileUC();
  out.print("<file-under-construction>");
  o("clientName", u.getClientName()).o("clientMachine",
    u.getClientMachine());
  out.print("</file-under-construction>\n");
 }
}
origin: ch.cern.hadoop/hadoop-hdfs

totalSpace += fileSize * f.getReplication();
origin: io.prestosql.hadoop/hadoop-apache

totalSpace += fileSize * f.getReplication();
origin: io.prestosql.hadoop/hadoop-apache

INodeSection.INodeFile f = n.getFile();
List<BlockProto> bp = f.getBlocksList();
short replication = (short) f.getReplication();
LoaderContext state = parent.getLoaderContext();
origin: ch.cern.hadoop/hadoop-hdfs

INodeSection.INodeFile f = n.getFile();
List<BlockProto> bp = f.getBlocksList();
short replication = (short) f.getReplication();
LoaderContext state = parent.getLoaderContext();
origin: org.apache.hadoop/hadoop-hdfs

boolean isStriped = f.hasErasureCodingPolicyID();
assert ((!isStriped) || (isStriped && !f.hasReplication()));
Short replication = (!isStriped ? (short) f.getReplication() : null);
Byte ecPolicyID = (isStriped ?
  (byte) f.getErasureCodingPolicyID() : null);
origin: io.prestosql.hadoop/hadoop-apache

fileInPb.getAccessTime(), (short) fileInPb.getReplication(),
fileInPb.getPreferredBlockSize(),
(byte)fileInPb.getStoragePolicyID(), 
origin: org.apache.hadoop/hadoop-hdfs

 o(SECTION_REPLICATION, INodeFile.DEFAULT_REPL_FOR_STRIPED_BLOCKS);
} else {
 o(SECTION_REPLICATION, f.getReplication());
origin: ch.cern.hadoop/hadoop-hdfs

INodeFile file = inode.getFile();
p = getPermission(file.getPermission());
append(buffer, file.getReplication());
append(buffer, formatDate(file.getModificationTime()));
append(buffer, formatDate(file.getAccessTime()));
origin: org.apache.hadoop/hadoop-hdfs

data[2] = getFileSize(f);
nsQuota = -1;
data[3] = data[2] * f.getReplication();
spaceQuota = -1;
return fillSummaryMap(spaceQuota, nsQuota, data);
origin: ch.cern.hadoop/hadoop-hdfs

fileInPb.getAccessTime(), (short) fileInPb.getReplication(),
fileInPb.getPreferredBlockSize(),
(byte)fileInPb.getStoragePolicyID(), 
origin: io.prestosql.hadoop/hadoop-apache

public Builder mergeFrom(org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFile other) {
 if (other == org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFile.getDefaultInstance()) return this;
 if (other.hasReplication()) {
  setReplication(other.getReplication());
origin: marcelmay/hadoop-hdfs-fsimage-exporter

@Override
public void onFile(FsImageProto.INodeSection.INode inode, String path) {
  FsImageProto.INodeSection.INodeFile f = inode.getFile();
  PermissionStatus p = loader.getPermissionStatus(f.getPermission());
  final long fileSize = FSImageLoader.getFileSize(f);
  final long fileBlocks = f.getBlocksCount();
  overallStats.sumBlocks.add(fileBlocks);
  overallStats.fileSize.observe(fileSize);
  overallStats.replication.observe(f.getReplication());
  // Group stats
  final String groupName = p.getGroupName();
  final GroupStats groupStat = report.groupStats.computeIfAbsent(groupName, report.createGroupStats);
  groupStat.sumBlocks.add(fileBlocks);
  groupStat.fileSize.observe(fileSize);
  // User stats
  final String userName = p.getUserName();
  UserStats userStat = report.userStats.computeIfAbsent(userName, report.createUserStat);
  userStat.sumBlocks.add(fileBlocks);
  userStat.fileSize.observe(fileSize);
  userStat.replication.observe(f.getReplication());
}
org.apache.hadoop.hdfs.server.namenodeFsImageProto$INodeSection$INodeFilegetReplication

Javadoc

optional uint32 replication = 1;

Popular methods of FsImageProto$INodeSection$INodeFile

  • getBlocksCount
    repeated .hadoop.hdfs.BlockProto blocks = 6;
  • getPermission
    optional fixed64 permission = 5;
  • newBuilder
  • <init>
  • getAccessTime
    optional uint64 accessTime = 3;
  • getAcl
    optional .hadoop.hdfs.fsimage.INodeSection.AclFeatureProto acl = 8;
  • getBlocks
    repeated .hadoop.hdfs.BlockProto blocks = 6;
  • getBlocksList
    repeated .hadoop.hdfs.BlockProto blocks = 6;
  • getDefaultInstance
  • getFileUC
    optional .hadoop.hdfs.fsimage.INodeSection.FileUnderConstructionFeature fileUC = 7;
  • getModificationTime
    optional uint64 modificationTime = 2;
  • getPreferredBlockSize
    optional uint64 preferredBlockSize = 4;
  • getModificationTime,
  • getPreferredBlockSize,
  • getSerializedSize,
  • getStoragePolicyID,
  • getUnknownFields,
  • getXAttrs,
  • hasAccessTime,
  • hasAcl,
  • hasFileUC

Popular in Java

  • Parsing JSON documents to java classes using gson
  • addToBackStack (FragmentTransaction)
  • setRequestProperty (URLConnection)
  • scheduleAtFixedRate (Timer)
    Schedules the specified task for repeated fixed-rate execution, beginning after the specified delay.
  • SQLException (java.sql)
    An exception that indicates a failed JDBC operation. It provides the following information about pro
  • Set (java.util)
    A collection that contains no duplicate elements. More formally, sets contain no pair of elements e1
  • FileUtils (org.apache.commons.io)
    General file manipulation utilities. Facilities are provided in the following areas: * writing to a
  • Project (org.apache.tools.ant)
    Central representation of an Ant project. This class defines an Ant project with all of its targets,
  • IsNull (org.hamcrest.core)
    Is the value null?
  • Option (scala)
Codota Logo
  • Products

    Search for Java codeSearch for JavaScript codeEnterprise
  • IDE Plugins

    IntelliJ IDEAWebStormAndroid StudioEclipseVisual Studio CodePyCharmSublime TextPhpStormVimAtomGoLandRubyMineEmacsJupyter
  • Company

    About UsContact UsCareers
  • Resources

    FAQBlogCodota Academy Plugin user guide Terms of usePrivacy policyJava Code IndexJavascript Code Index
Get Codota for your IDE now