/** * Execute cb with a configured nimbus client that will be closed once cb returns. * @param cb the callback to send to nimbus. * @throws Exception on any kind of error. */ public static void withConfiguredClient(WithNimbus cb) throws Exception { withConfiguredClient(cb, ConfigUtils.readStormConfig()); }
protected void fillHadoopConfiguration(Map topologyConf, String configKey, Configuration configuration) { Map<String, Object> config = (Map<String, Object>) topologyConf.get(configKey); LOG.info("TopoConf {}, got config {}, for configKey {}", ConfigUtils.maskPasswords(topologyConf), ConfigUtils.maskPasswords(config), configKey); if (config != null) { List<String> resourcesToLoad = new ArrayList<>(); for (Map.Entry<String, Object> entry : config.entrySet()) { if (entry.getKey().equals(CONFIG_KEY_RESOURCES)) { resourcesToLoad.addAll((List<String>) entry.getValue()); } else { configuration.set(entry.getKey(), String.valueOf(entry.getValue())); } } LOG.info("Resources to load {}", resourcesToLoad); // add configs from resources like hdfs-site.xml for (String pathStr : resourcesToLoad) { configuration.addResource(new Path(Paths.get(pathStr).toUri())); } } LOG.info("Initializing UGI with config {}", configuration); UserGroupInformation.setConfiguration(configuration); }
public static String workerArtifactsRoot(Map<String, Object> conf, String id, Integer port) { return (workerArtifactsRoot(conf, id) + FILE_SEPARATOR + port); }
static List<String> getKeyListFromId(Map<String, Object> conf, String id) { List<String> ret = new ArrayList<>(3); ret.add(ConfigUtils.masterStormCodeKey(id)); ret.add(ConfigUtils.masterStormConfKey(id)); if (!ConfigUtils.isLocalMode(conf)) { ret.add(ConfigUtils.masterStormJarKey(id)); } return ret; }
static boolean doRequiredTopoFilesExist(Map<String, Object> conf, String stormId) throws IOException { String stormroot = ConfigUtils.supervisorStormDistRoot(conf, stormId); String stormjarpath = ConfigUtils.supervisorStormJarPath(stormroot); String stormcodepath = ConfigUtils.supervisorStormCodePath(stormroot); String stormconfpath = ConfigUtils.supervisorStormConfPath(stormroot); if (!Utils.checkFileExists(stormroot)) { return false; } if (!Utils.checkFileExists(stormcodepath)) { return false; } if (!Utils.checkFileExists(stormconfpath)) { return false; } if (ConfigUtils.isLocalMode(conf) || Utils.checkFileExists(stormjarpath)) { return true; } return false; }
/** * Create symlink from the containers directory/artifacts to the artifacts directory. * * @throws IOException on any error */ protected void createArtifactsLink() throws IOException { _type.assertFull(); if (!_symlinksDisabled) { File workerDir = new File(ConfigUtils.workerRoot(_conf, _workerId)); File topoDir = new File(ConfigUtils.workerArtifactsRoot(_conf, _topologyId, _port)); if (_ops.fileExists(workerDir)) { LOG.debug("Creating symlinks for worker-id: {} topology-id: {} to its port artifacts directory", _workerId, _topologyId); _ops.createSymlink(new File(workerDir, "artifacts"), topoDir); } } }
public void start() throws Exception { LOG.info("Launching worker for {} on {}:{} with id {} and conf {}", topologyId, assignmentId, port, workerId, ConfigUtils.maskPasswords(conf)); if (!ConfigUtils.isLocalMode(conf)) { FileUtils.touch(new File(ConfigUtils.workerPidPath(conf, workerId, pid))); FileUtils.writeStringToFile(new File(ConfigUtils.workerArtifactsPidPath(conf, topologyId, port)), pid, Charset.forName("UTF-8")); ConfigUtils.overrideLoginConfigWithSystemProperty(ConfigUtils.readSupervisorStormConf(conf, topologyId)); ClusterStateContext csContext = new ClusterStateContext(DaemonType.WORKER, topologyConf); IStateStorage stateStorage = ClusterUtils.mkStateStorage(conf, topologyConf, csContext);
/** * Setup the container to run. By default this creates the needed directories/links in the local file system PREREQUISITE: All needed * blobs and topology, jars/configs have been downloaded and placed in the appropriate locations * * @throws IOException on any error */ protected void setup() throws IOException { _type.assertFull(); if (!_ops.doRequiredTopoFilesExist(_conf, _topologyId)) { LOG.info("Missing topology storm code, so can't launch worker with assignment {} for this supervisor {} on port {} with id {}", _assignment, _supervisorId, _port, _workerId); throw new IllegalStateException("Not all needed files are here!!!!"); } LOG.info("Setting up {}:{}", _supervisorId, _workerId); _ops.forceMkdir(new File(ConfigUtils.workerPidsRoot(_conf, _workerId))); _ops.forceMkdir(new File(ConfigUtils.workerTmpRoot(_conf, _workerId))); _ops.forceMkdir(new File(ConfigUtils.workerHeartbeatsRoot(_conf, _workerId))); File workerArtifacts = new File(ConfigUtils.workerArtifactsRoot(_conf, _topologyId, _port)); if (!_ops.fileExists(workerArtifacts)) { _ops.forceMkdir(workerArtifacts); _ops.setupWorkerArtifactsDir(_assignment.get_owner(), workerArtifacts); } String user = getWorkerUser(); writeLogMetadata(user); saveWorkerUser(user); createArtifactsLink(); createBlobstoreLinks(); }
/** * Main method to start the server. */ public static void main(String [] args) throws Exception { Utils.setupDefaultUncaughtExceptionHandler(); Map<String, Object> conf = ConfigUtils.readStormConfig(); StormMetricsRegistry metricsRegistry = new StormMetricsRegistry(); String logRoot = ConfigUtils.workerArtifactsRoot(conf); File logRootDir = new File(logRoot); logRootDir.mkdirs(); WorkerLogs workerLogs = new WorkerLogs(conf, logRootDir, metricsRegistry); DirectoryCleaner directoryCleaner = new DirectoryCleaner(metricsRegistry); try (LogviewerServer server = new LogviewerServer(conf, metricsRegistry); LogCleaner logCleaner = new LogCleaner(conf, workerLogs, directoryCleaner, logRootDir, metricsRegistry)) { metricsRegistry.startMetricsReporters(conf); Utils.addShutdownHookWithForceKillIn1Sec(() -> { server.meterShutdownCalls.mark(); metricsRegistry.stopMetricsReporters(); server.close(); }); logCleaner.start(); server.start(); server.awaitTermination(); } } }
public static String workerUserRoot(Map<String, Object> conf) { return (absoluteStormLocalDir(conf) + FILE_SEPARATOR + "workers-users"); }
/** * @return the user that some operations should be done as. * * @throws IOException on any error */ protected String getWorkerUser() throws IOException { LOG.info("GET worker-user for {}", _workerId); File file = new File(ConfigUtils.workerUserFile(_conf, _workerId)); if (_ops.fileExists(file)) { return _ops.slurpString(file).trim(); } else if (_assignment != null && _assignment.is_set_owner()) { return _assignment.get_owner(); } if (ConfigUtils.isLocalMode(_conf)) { return System.getProperty("user.name"); } else { File f = new File(ConfigUtils.workerArtifactsRoot(_conf)); if (f.exists()) { return Files.getOwner(f.toPath()).getName(); } throw new IllegalStateException("Could not recover the user for " + _workerId); } }
public static void main(String[] args) { SysOutOverSLF4J.sendSystemOutAndErrToSLF4J(); Map<String, Object> conf = ConfigUtils.overrideLoginConfigWithSystemProperty(ConfigUtils.readStormConfig()); StormMetricsRegistry metricsRegistry = new StormMetricsRegistry(); final Pacemaker serverHandler = new Pacemaker(conf, metricsRegistry); serverHandler.launchServer(); metricsRegistry.startMetricsReporters(conf); Utils.addShutdownHookWithForceKillIn1Sec(metricsRegistry::stopMetricsReporters); }
/** * Get a set of java properties that are common to both the log writer and the worker processes. These are mostly system properties that * are used by logging. * * @return a list of command line options */ private List<String> getCommonParams() { final String workersArtifacts = ConfigUtils.workerArtifactsRoot(_conf); String stormLogDir = ConfigUtils.getLogDir(); List<String> commonParams = new ArrayList<>(); commonParams.add("-Dlogging.sensitivity=" + OR((String) _topoConf.get(Config.TOPOLOGY_LOGGING_SENSITIVITY), "S3")); commonParams.add("-Dlogfile.name=worker.log"); commonParams.add("-Dstorm.home=" + OR(_stormHome, "")); commonParams.add("-Dworkers.artifacts=" + workersArtifacts); commonParams.add("-Dstorm.id=" + _topologyId); commonParams.add("-Dworker.id=" + _workerId); commonParams.add("-Dworker.port=" + _port); commonParams.add("-Dstorm.log.dir=" + stormLogDir); commonParams.add("-DLog4jContextSelector=org.apache.logging.log4j.core.selector.BasicContextSelector"); commonParams.add("-Dstorm.local.dir=" + _conf.get(Config.STORM_LOCAL_DIR)); if (memoryLimitMB > 0) { commonParams.add("-Dworker.memory_limit_mb=" + memoryLimitMB); } return commonParams; }
final Integer topologyWorkerDefaultMemoryAllocation = 768; List<String> topologyWorkerGcChildopts = ConfigUtils.getValueAsList( Config.TOPOLOGY_WORKER_GC_CHILDOPTS, topConf); List<String> workerGcChildopts = ConfigUtils.getValueAsList( Config.WORKER_GC_CHILDOPTS, topConf); Double memGcChildopts = null; List<String> topologyWorkerChildopts = ConfigUtils.getValueAsList( Config.TOPOLOGY_WORKER_CHILDOPTS, topConf); Double memTopologyWorkerChildopts = Utils.parseJvmHeapMemByChildOpts( topologyWorkerChildopts, null); List<String> workerChildopts = ConfigUtils.getValueAsList( Config.WORKER_CHILDOPTS, topConf); Double memWorkerChildopts = Utils.parseJvmHeapMemByChildOpts( List<String> topoWorkerLwChildopts = ConfigUtils.getValueAsList( Config.TOPOLOGY_WORKER_LOGWRITER_CHILDOPTS, topConf); if (topoWorkerLwChildopts != null) {
public GeneralTopologyContext(StormTopology topology, Map<String, Object> topoConf, Map<Integer, String> taskToComponent, Map<String, List<Integer>> componentToSortedTasks, Map<String, Map<String, Fields>> componentToStreamToFields, String stormId) { _topology = topology; _topoConf = topoConf; _taskToComponent = taskToComponent; _stormId = stormId; _componentToTasks = componentToSortedTasks; _componentToStreamToFields = componentToStreamToFields; _doSanityCheck = ConfigUtils.isLocalMode(_topoConf); }
public static String supervisorStormResourcesPath(String stormRoot) { return (concatIfNotNull(stormRoot) + FILE_SEPARATOR + RESOURCES_SUBDIR); }
@Override public Void call() throws Exception { try { String stormroot = ConfigUtils.supervisorStormDistRoot(_conf, _topologyId); Map<String, Object> topoConf = ConfigUtils.readSupervisorStormConf(_conf, _topologyId); StormTopology stormCode = ConfigUtils.readSupervisorTopology(_conf, _topologyId, _fsOps); List<String> dependencies = new ArrayList<>(); if (stormCode.is_set_dependency_jars()) {
String topologyId = pna.getToplogyId(); String topoOwner = pna.getOwner(); String stormroot = ConfigUtils.supervisorStormDistRoot(conf, topologyId); Map<String, Object> topoConf = ConfigUtils.readSupervisorStormConf(conf, topologyId);
@Override public boolean runProfiling(ProfileRequest request, boolean stop) throws IOException, InterruptedException { _type.assertFull(); String targetDir = ConfigUtils.workerArtifactsRoot(_conf, _topologyId, _port); @SuppressWarnings("unchecked") Map<String, String> env = (Map<String, String>) _topoConf.get(Config.TOPOLOGY_ENVIRONMENT); if (env == null) { env = new HashMap<>(); } String str = ConfigUtils.workerArtifactsPidPath(_conf, _topologyId, _port); String workerPid = _ops.slurpString(new File(str)).trim(); ProfileAction profileAction = request.get_action(); String logPrefix = "ProfilerAction process " + _topologyId + ":" + _port + " PROFILER_ACTION: " + profileAction + " "; List<String> command = mkProfileCommand(profileAction, stop, workerPid, targetDir); File targetFile = new File(targetDir); if (command.size() > 0) { return runProfilingCommand(command, env, logPrefix, targetFile); } LOG.warn("PROFILING REQUEST NOT SUPPORTED {} IGNORED...", request); return true; }
if (ConfigUtils.isLocalMode(topologyConf)) { Executor executor = LocalExecutor.mkExecutor(workerState, e, initCreds); execs.add(executor); setupBackPressureCheckTimer(topologyConf); LOG.info("Worker has topology config {}", ConfigUtils.maskPasswords(topologyConf)); LOG.info("Worker {} for storm {} on {}:{} has finished loading", workerId, topologyId, assignmentId, port); return this;