Codota Logo
NamenodeProtocol
Code IndexAdd Codota to your IDE (free)

How to use
NamenodeProtocol
in
org.apache.hadoop.hdfs.server.protocol

Best Java code snippets using org.apache.hadoop.hdfs.server.protocol.NamenodeProtocol (Showing top 20 results out of 315)

  • Common ways to obtain NamenodeProtocol
private void myMethod () {
NamenodeProtocol n =
  • Codota IconPB.NamenodeProtocolPB rpcProxy;new NamenodeProtocolTranslatorPB(rpcProxy)
  • Codota IconObject implementation;Map methodNameToPolicyMap;(NamenodeProtocol) RetryProxy.create(NamenodeProtocol.class, implementation, methodNameToPolicyMap)
  • Smart code suggestions by Codota
}
origin: org.apache.hadoop/hadoop-hdfs

/** @return blocks with locations. */
public BlocksWithLocations getBlocks(DatanodeInfo datanode, long size, long
  minBlockSize)
  throws IOException {
 return namenode.getBlocks(datanode, size, minBlockSize);
}
origin: org.apache.hadoop/hadoop-hdfs

 @Override
 protected Void doWork() throws IOException {
  cachedActiveProxy.rollEditLog();
  return null;
 }
};
origin: org.apache.hadoop/hadoop-hdfs

@Override
public void run() {
 try {
  while (shouldRun) {
   try {
    blockTokenSecretManager.addKeys(namenode.getBlockKeys());
   } catch (IOException e) {
    LOG.error("Failed to set keys", e);
   }
   Thread.sleep(sleepInterval);
  }
 } catch (InterruptedException e) {
  LOG.debug("InterruptedException in block key updater thread", e);
 } catch (Throwable e) {
  LOG.error("Exception in block key updater thread", e);
  shouldRun = false;
 }
}
origin: org.apache.hadoop/hadoop-hdfs

 nsInfo = proxy.versionRequest();
 isUpgradeFinalized = proxy.isUpgradeFinalized();
 break;
} catch (IOException ioe) {
origin: org.apache.hadoop/hadoop-hdfs

final long imageTxId = proxy.getMostRecentCheckpointTxId();
final long curTxId = proxy.getTransactionID();
FSImage image = new FSImage(conf);
try {
origin: org.apache.hadoop/hadoop-hdfs

CheckpointSignature sig = namenode.rollEditLog();
 namenode.getEditLogManifest(sig.mostRecentCheckpointTxId + 1);
origin: org.apache.hadoop/hadoop-hdfs

 getRemoteNamenodeProxy().startCheckpoint(backupNode.getRegistration());
CheckpointCommand cpCmd = null;
switch(cmd.getAction()) {
LOG.debug("Doing checkpoint. Last applied: " + lastApplied);
RemoteEditLogManifest manifest =
 getRemoteNamenodeProxy().getEditLogManifest(bnImage.getLastAppliedTxId() + 1);
 if (!backupNode.namenode.isRollingUpgrade()) {
  bnImage.updateStorageVersion();
getRemoteNamenodeProxy().endCheckpoint(backupNode.getRegistration(), sig);
origin: ch.cern.hadoop/hadoop-hdfs

 getRemoteNamenodeProxy().startCheckpoint(backupNode.getRegistration());
CheckpointCommand cpCmd = null;
switch(cmd.getAction()) {
LOG.debug("Doing checkpoint. Last applied: " + lastApplied);
RemoteEditLogManifest manifest =
 getRemoteNamenodeProxy().getEditLogManifest(bnImage.getLastAppliedTxId() + 1);
getRemoteNamenodeProxy().endCheckpoint(backupNode.getRegistration(), sig);
origin: org.apache.hadoop/hadoop-hdfs

private static NamespaceInfo handshake(NamenodeProtocol namenode)
throws IOException, SocketTimeoutException {
 NamespaceInfo nsInfo;
 nsInfo = namenode.versionRequest();  // throws SocketTimeoutException 
 String errorMsg = null;
 // verify build version
 if( ! nsInfo.getBuildVersion().equals( Storage.getBuildVersion())) {
  errorMsg = "Incompatible build versions: active name-node BV = " 
   + nsInfo.getBuildVersion() + "; backup node BV = "
   + Storage.getBuildVersion();
  LOG.error(errorMsg);
  throw new IOException(errorMsg);
 }
 assert HdfsServerConstants.NAMENODE_LAYOUT_VERSION == nsInfo.getLayoutVersion() :
  "Active and backup node layout versions must be the same. Expected: "
  + HdfsServerConstants.NAMENODE_LAYOUT_VERSION + " actual "+ nsInfo.getLayoutVersion();
 return nsInfo;
}
origin: com.facebook.hadoop/hadoop-core

CheckpointSignature sig = (CheckpointSignature)namenode.rollEditLog();
namenode.rollFsImage(new CheckpointSignature(checkpointImage));
checkpointImage.endCheckpoint();
origin: org.apache.hadoop/hadoop-hdfs

private long countUncheckpointedTxns() throws IOException {
 long curTxId = namenode.getTransactionID();
 long uncheckpointedTxns = curTxId -
  checkpointImage.getStorage().getMostRecentCheckpointTxId();
 assert uncheckpointedTxns >= 0;
 return uncheckpointedTxns;
}
origin: org.apache.hadoop/hadoop-hdfs

@Override
public GetEditLogManifestResponseProto getEditLogManifest(
  RpcController unused, GetEditLogManifestRequestProto request)
  throws ServiceException {
 RemoteEditLogManifest manifest;
 try {
  manifest = impl.getEditLogManifest(request.getSinceTxId());
 } catch (IOException e) {
  throw new ServiceException(e);
 }
 return GetEditLogManifestResponseProto.newBuilder()
   .setManifest(PBHelper.convert(manifest)).build();
}
origin: org.apache.hadoop/hadoop-hdfs

@Override
public ErrorReportResponseProto errorReport(RpcController unused,
  ErrorReportRequestProto request) throws ServiceException {
 try {
  impl.errorReport(PBHelper.convert(request.getRegistration()),
    request.getErrorCode(), request.getMsg());
 } catch (IOException e) {
  throw new ServiceException(e);
 }
 return VOID_ERROR_REPORT_RESPONSE;
}
origin: org.apache.hadoop/hadoop-hdfs

/**
 * @return true if an upgrade is in progress, false if not.
 * @throws IOException
 */
public boolean isUpgrading() throws IOException {
 // fsimage upgrade
 final boolean isUpgrade = !namenode.isUpgradeFinalized();
 // rolling upgrade
 RollingUpgradeInfo info = fs.rollingUpgrade(
   HdfsConstants.RollingUpgradeAction.QUERY);
 final boolean isRollingUpgrade = (info != null && !info.isFinalized());
 return (isUpgrade || isRollingUpgrade);
}
origin: org.apache.hadoop/hadoop-hdfs

@Override
public EndCheckpointResponseProto endCheckpoint(RpcController unused,
  EndCheckpointRequestProto request) throws ServiceException {
 try {
  impl.endCheckpoint(PBHelper.convert(request.getRegistration()),
    PBHelper.convert(request.getSignature()));
 } catch (IOException e) {
  throw new ServiceException(e);
 }
 return VOID_END_CHECKPOINT_RESPONSE;
}
origin: org.apache.hadoop/hadoop-hdfs

@Override
public GetMostRecentCheckpointTxIdResponseProto getMostRecentCheckpointTxId(
  RpcController unused, GetMostRecentCheckpointTxIdRequestProto request)
  throws ServiceException {
 long txid;
 try {
  txid = impl.getMostRecentCheckpointTxId();
 } catch (IOException e) {
  throw new ServiceException(e);
 }
 return GetMostRecentCheckpointTxIdResponseProto.newBuilder().setTxId(txid).build();
}
origin: ch.cern.hadoop/hadoop-hdfs

CheckpointSignature sig = namenode.rollEditLog();
 namenode.getEditLogManifest(sig.mostRecentCheckpointTxId + 1);
origin: io.prestosql.hadoop/hadoop-apache

 getRemoteNamenodeProxy().startCheckpoint(backupNode.getRegistration());
CheckpointCommand cpCmd = null;
switch(cmd.getAction()) {
LOG.debug("Doing checkpoint. Last applied: " + lastApplied);
RemoteEditLogManifest manifest =
 getRemoteNamenodeProxy().getEditLogManifest(bnImage.getLastAppliedTxId() + 1);
getRemoteNamenodeProxy().endCheckpoint(backupNode.getRegistration(), sig);
origin: org.apache.hadoop/hadoop-hdfs

public NameNodeConnector(String name, URI nameNodeUri, Path idPath,
             List<Path> targetPaths, Configuration conf,
             int maxNotChangedIterations)
  throws IOException {
 this.nameNodeUri = nameNodeUri;
 this.idPath = idPath;
 this.targetPaths = targetPaths == null || targetPaths.isEmpty() ? Arrays
   .asList(new Path("/")) : targetPaths;
 this.maxNotChangedIterations = maxNotChangedIterations;
 this.namenode = NameNodeProxies.createProxy(conf, nameNodeUri,
   NamenodeProtocol.class).getProxy();
 this.client = NameNodeProxies.createProxy(conf, nameNodeUri,
   ClientProtocol.class, fallbackToSimpleAuth).getProxy();
 this.fs = (DistributedFileSystem)FileSystem.get(nameNodeUri, conf);
 final NamespaceInfo namespaceinfo = namenode.versionRequest();
 this.blockpoolID = namespaceinfo.getBlockPoolID();
 final FsServerDefaults defaults = fs.getServerDefaults(new Path("/"));
 this.keyManager = new KeyManager(blockpoolID, namenode,
   defaults.getEncryptDataTransfer(), conf);
 // if it is for test, we do not create the id file
 if (checkOtherInstanceRunning) {
  out = checkAndMarkRunning();
  if (out == null) {
   // Exit if there is another one running.
   throw new IOException("Another " + name + " is running.");
  }
 }
}
origin: ch.cern.hadoop/hadoop-hdfs

boolean isUpgradeFinalized;
try {
 nsInfo = proxy.versionRequest();
 isUpgradeFinalized = proxy.isUpgradeFinalized();
} catch (IOException ioe) {
 LOG.fatal("Unable to fetch namespace information from active NN at " +
org.apache.hadoop.hdfs.server.protocolNamenodeProtocol

Javadoc

Protocol that a secondary NameNode uses to communicate with the NameNode. It's used to get part of the name node state

Most used methods

  • getBlocks
    Get a list of blocks belonging to datanode whose total size equals size.
  • rollEditLog
    Closes the current edit log and opens a new one. The call fails if the file system is in SafeMode.
  • getBlockKeys
    Get the current block keys
  • getEditLogManifest
    Return a structure containing details about all edit logs available to be fetched from the NameNode.
  • versionRequest
    Request name-node version and storage information.
  • endCheckpoint
    A request to the active name-node to finalize previously started checkpoint.
  • errorReport
    Report to the active name-node an error occurred on a subordinate node. Depending on the error code
  • getMostRecentCheckpointTxId
    Get the transaction ID of the most recent checkpoint.
  • getTransactionID
  • isUpgradeFinalized
  • registerSubordinateNamenode
    Register a subordinate name-node like backup node.
  • startCheckpoint
    A request to the active name-node to start a checkpoint. The name-node should decide whether to admi
  • registerSubordinateNamenode,
  • startCheckpoint,
  • getEditLogSize,
  • rollFsImage,
  • getNextSPSPath,
  • isRollingUpgrade

Popular in Java

  • Making http post requests using okhttp
  • orElseThrow (Optional)
    Return the contained value, if present, otherwise throw an exception to be created by the provided s
  • scheduleAtFixedRate (Timer)
    Schedules the specified task for repeated fixed-rate execution, beginning after the specified delay.
  • getExternalFilesDir (Context)
  • VirtualMachine (com.sun.tools.attach)
    A Java virtual machine. A VirtualMachine represents a Java virtual machine to which this Java vir
  • Container (java.awt)
    A generic Abstract Window Toolkit(AWT) container object is a component that can contain other AWT co
  • FileReader (java.io)
    A specialized Reader that reads from a file in the file system. All read requests made by calling me
  • FileWriter (java.io)
    Convenience class for writing character files. The constructors of this class assume that the defaul
  • Thread (java.lang)
    A thread is a thread of execution in a program. The Java Virtual Machine allows an application to ha
  • Modifier (javassist)
    The Modifier class provides static methods and constants to decode class and member access modifiers
Codota Logo
  • Products

    Search for Java codeSearch for JavaScript codeEnterprise
  • IDE Plugins

    IntelliJ IDEAWebStormAndroid StudioEclipseVisual Studio CodePyCharmSublime TextPhpStormVimAtomGoLandRubyMineEmacsJupyter
  • Company

    About UsContact UsCareers
  • Resources

    FAQBlogCodota Academy Plugin user guide Terms of usePrivacy policyJava Code IndexJavascript Code Index
Get Codota for your IDE now