public BlockScanner(DataNode datanode) { this(datanode, datanode.getConf()); }
@Override // DataNodeMXBean public String getHttpPort(){ return this.getConf().get("dfs.datanode.info.port"); }
/** * Get timeout value of each OOB type from configuration */ private void initOOBTimeout() { final int oobStart = Status.OOB_RESTART_VALUE; // the first OOB type final int oobEnd = Status.OOB_RESERVED3_VALUE; // the last OOB type final int numOobTypes = oobEnd - oobStart + 1; oobTimeouts = new long[numOobTypes]; final String[] ele = getConf().get(DFS_DATANODE_OOB_TIMEOUT_KEY, DFS_DATANODE_OOB_TIMEOUT_DEFAULT).split(","); for (int i = 0; i < numOobTypes; i++) { oobTimeouts[i] = (i < ele.length) ? Long.parseLong(ele[i]) : 0; } }
@Override // DataNodeMXBean public String getRpcPort(){ InetSocketAddress ipcAddr = NetUtils.createSocketAddr( this.getConf().get(DFS_DATANODE_IPC_ADDRESS_KEY)); return Integer.toString(ipcAddr.getPort()); }
/** * Connect to the NN for the lifeline protocol. This is separated out for * easier testing. * * @param lifelineNnAddr address of lifeline RPC server * @return lifeline RPC proxy */ DatanodeLifelineProtocolClientSideTranslatorPB connectToLifelineNN( InetSocketAddress lifelineNnAddr) throws IOException { return new DatanodeLifelineProtocolClientSideTranslatorPB(lifelineNnAddr, getConf()); }
@Override // DataNodeMXBean public String getDataPort(){ InetSocketAddress dataAddr = NetUtils.createSocketAddr( this.getConf().get(DFS_DATANODE_ADDRESS_KEY)); return Integer.toString(dataAddr.getPort()); }
/** * Connect to the NN. This is separated out for easier testing. */ DatanodeProtocolClientSideTranslatorPB connectToNN( InetSocketAddress nnAddr) throws IOException { return new DatanodeProtocolClientSideTranslatorPB(nnAddr, getConf()); }
BlockRecoveryWorker(DataNode datanode) { this.datanode = datanode; conf = datanode.getConf(); dnConf = datanode.getDnConf(); }
IOStreamPair connectToDN(DatanodeInfo datanodeID, int timeout, ExtendedBlock block, Token<BlockTokenIdentifier> blockToken) throws IOException { return DFSUtilClient.connectToDN(datanodeID, timeout, getConf(), saslClient, NetUtils.getDefaultSocketFactory(getConf()), false, getDataEncryptionKeyFactoryForBlock(block), blockToken); }
protected ThreadPoolExecutor initializeCacheExecutor(File parent) { if (storageType.isTransient()) { return null; } if (dataset.datanode == null) { // FsVolumeImpl is used in test. return null; } final int maxNumThreads = dataset.datanode.getConf().getInt( DFSConfigKeys.DFS_DATANODE_FSDATASETCACHE_MAX_THREADS_PER_VOLUME_KEY, DFSConfigKeys.DFS_DATANODE_FSDATASETCACHE_MAX_THREADS_PER_VOLUME_DEFAULT); ThreadFactory workerFactory = new ThreadFactoryBuilder() .setDaemon(true) .setNameFormat("FsVolumeImplWorker-" + parent.toString() + "-%d") .build(); ThreadPoolExecutor executor = new ThreadPoolExecutor( 1, maxNumThreads, 60, TimeUnit.SECONDS, new LinkedBlockingQueue<Runnable>(), workerFactory); executor.allowCoreThreadTimeOut(true); return executor; }
this.deferredUncachingExecutor = new ScheduledThreadPoolExecutor( 1, workerFactory); this.revocationMs = dataset.datanode.getConf().getLong( DFS_DATANODE_CACHE_REVOCATION_TIMEOUT_MS, DFS_DATANODE_CACHE_REVOCATION_TIMEOUT_MS_DEFAULT); long confRevocationPollingMs = dataset.datanode.getConf().getLong( DFS_DATANODE_CACHE_REVOCATION_POLLING_MS, DFS_DATANODE_CACHE_REVOCATION_POLLING_MS_DEFAULT);
@Override // ClientDatanodeProtocol public void refreshNamenodes() throws IOException { checkSuperuserPrivilege(); setConf(new Configuration()); refreshNamenodes(getConf()); }
/** * Start a timer to periodically write DataNode metrics to the log file. This * behavior can be disabled by configuration. * */ protected void startMetricsLogger() { long metricsLoggerPeriodSec = getConf().getInt( DFS_DATANODE_METRICS_LOGGER_PERIOD_SECONDS_KEY, DFS_DATANODE_METRICS_LOGGER_PERIOD_SECONDS_DEFAULT); if (metricsLoggerPeriodSec <= 0) { return; } MetricsLoggerTask.makeMetricsLoggerAsync(METRICS_LOG); // Schedule the periodic logging. metricsLoggerTimer = new ScheduledThreadPoolExecutor(1); metricsLoggerTimer.setExecuteExistingDelayedTasksAfterShutdownPolicy(false); metricsLoggerTimer.scheduleWithFixedDelay(new MetricsLoggerTask(METRICS_LOG, "DataNode", (short) 0), metricsLoggerPeriodSec, metricsLoggerPeriodSec, TimeUnit.SECONDS); }
BlockChecksumComputer(DataNode datanode, ExtendedBlock block, BlockChecksumOptions blockChecksumOptions) throws IOException { super(datanode, blockChecksumOptions); this.block = block; this.requestLength = block.getNumBytes(); Preconditions.checkArgument(requestLength >= 0); this.metadataIn = datanode.data.getMetaDataInputStream(block); this.visibleLength = datanode.data.getReplicaVisibleLength(block); this.partialBlk = requestLength < visibleLength; int ioFileBufferSize = DFSUtilClient.getIoFileBufferSize(datanode.getConf()); this.checksumIn = new DataInputStream( new BufferedInputStream(metadataIn, ioFileBufferSize)); }
/** * Add a list of volumes to be managed by DataStorage. If the volume is empty, * format it, otherwise recover it from previous transitions if required. * * @param datanode the reference to DataNode. * @param nsInfo namespace information * @param dataDirs array of data storage directories * @param startOpt startup option * @return a list of successfully loaded storage directories. */ @VisibleForTesting synchronized List<StorageDirectory> addStorageLocations(DataNode datanode, NamespaceInfo nsInfo, Collection<StorageLocation> dataDirs, StartupOption startOpt) throws IOException { final int numThreads = getParallelVolumeLoadThreadsNum( dataDirs.size(), datanode.getConf()); final ExecutorService executor = Executors.newFixedThreadPool(numThreads); try { final List<StorageLocation> successLocations = loadDataStorage( datanode, nsInfo, dataDirs, startOpt, executor); return loadBlockPoolSliceStorage( datanode, nsInfo, successLocations, startOpt, executor); } finally { executor.shutdown(); } }
private DataXceiver(Peer peer, DataNode datanode, DataXceiverServer dataXceiverServer) throws IOException { super(datanode.getTracer()); this.peer = peer; this.dnConf = datanode.getDnConf(); this.socketIn = peer.getInputStream(); this.socketOut = peer.getOutputStream(); this.datanode = datanode; this.dataXceiverServer = dataXceiverServer; this.connectToDnViaHostname = datanode.getDnConf().connectToDnViaHostname; this.ioFileBufferSize = DFSUtilClient.getIoFileBufferSize(datanode.getConf()); this.smallBufferSize = DFSUtilClient.getSmallBufferSize(datanode.getConf()); remoteAddress = peer.getRemoteAddressString(); final int colonIdx = remoteAddress.indexOf(':'); remoteAddressWithoutPort = (colonIdx < 0) ? remoteAddress : remoteAddress.substring(0, colonIdx); localAddress = peer.getLocalAddressString(); LOG.debug("Number of active connections is: {}", datanode.getXceiverCount()); }
/** * @see DFSUtil#getHttpPolicy(org.apache.hadoop.conf.Configuration) * for information related to the different configuration options and * Http Policy is decided. */ private void startInfoServer() throws IOException { // SecureDataNodeStarter will bind the privileged port to the channel if // the DN is started by JSVC, pass it along. ServerSocketChannel httpServerChannel = secureResources != null ? secureResources.getHttpServerChannel() : null; httpServer = new DatanodeHttpServer(getConf(), this, httpServerChannel); httpServer.start(); if (httpServer.getHttpAddress() != null) { infoPort = httpServer.getHttpAddress().getPort(); } if (httpServer.getHttpsAddress() != null) { infoSecurePort = httpServer.getHttpsAddress().getPort(); } }
/** Start a single datanode daemon and wait for it to finish. * If this thread is specifically interrupted, it will stop waiting. */ public void runDatanodeDaemon() throws IOException { blockPoolManager.startAll(); // start dataXceiveServer dataXceiverServer.start(); if (localDataXceiverServer != null) { localDataXceiverServer.start(); } ipcServer.setTracer(tracer); ipcServer.start(); startPlugins(getConf()); }
/** * Allows submission of a disk balancer Job. * @param planID - Hash value of the plan. * @param planVersion - Plan version, reserved for future use. We have only * version 1 now. * @param planFile - Plan file name * @param planData - Actual plan data in json format * @throws IOException */ @Override public void submitDiskBalancerPlan(String planID, long planVersion, String planFile, String planData, boolean skipDateCheck) throws IOException { checkSuperuserPrivilege(); if (getStartupOption(getConf()) != StartupOption.REGULAR) { throw new DiskBalancerException( "Datanode is in special state, e.g. Upgrade/Rollback etc." + " Disk balancing not permitted.", DiskBalancerException.Result.DATANODE_STATUS_NOT_REGULAR); } getDiskBalancer().submitPlan(planID, planVersion, planFile, planData, skipDateCheck); }
/** * Creates a dummy DataNode for testing purpose. */ @VisibleForTesting @InterfaceAudience.LimitedPrivate("HDFS") DataNode(final Configuration conf) throws DiskErrorException { super(conf); this.tracer = createTracer(conf); this.tracerConfigurationManager = new TracerConfigurationManager(DATANODE_HTRACE_PREFIX, conf); this.fileIoProvider = new FileIoProvider(conf, this); this.fileDescriptorPassingDisabledReason = null; this.maxNumberOfBlocksToLog = 0; this.confVersion = null; this.usersWithLocalPathAccess = null; this.connectToDnViaHostname = false; this.blockScanner = new BlockScanner(this, this.getConf()); this.pipelineSupportECN = false; this.socketFactory = NetUtils.getDefaultSocketFactory(conf); this.dnConf = new DNConf(this); initOOBTimeout(); storageLocationChecker = null; volumeChecker = new DatasetVolumeChecker(conf, new Timer()); }