protected void fillHadoopConfiguration(Map topologyConf, String configKey, Configuration configuration) { Map<String, Object> config = (Map<String, Object>) topologyConf.get(configKey); LOG.info("TopoConf {}, got config {}, for configKey {}", ConfigUtils.maskPasswords(topologyConf), ConfigUtils.maskPasswords(config), configKey); if (config != null) { List<String> resourcesToLoad = new ArrayList<>(); for (Map.Entry<String, Object> entry : config.entrySet()) { if (entry.getKey().equals(CONFIG_KEY_RESOURCES)) { resourcesToLoad.addAll((List<String>) entry.getValue()); } else { configuration.set(entry.getKey(), String.valueOf(entry.getValue())); } } LOG.info("Resources to load {}", resourcesToLoad); // add configs from resources like hdfs-site.xml for (String pathStr : resourcesToLoad) { configuration.addResource(new Path(Paths.get(pathStr).toUri())); } } LOG.info("Initializing UGI with config {}", configuration); UserGroupInformation.setConfiguration(configuration); }
@Test public void testMaskPasswords() { Map<String, Object> conf = new HashMap<>(); conf.put(DaemonConfig.LOGVIEWER_HTTPS_KEY_PASSWORD, "pass1"); conf.put(Config.TOPOLOGY_MESSAGE_TIMEOUT_SECS, 100); Map result = ConfigUtils.maskPasswords(conf); Assert.assertEquals("*****", result.get(DaemonConfig.LOGVIEWER_HTTPS_KEY_PASSWORD)); Assert.assertEquals(100, result.get(Config.TOPOLOGY_MESSAGE_TIMEOUT_SECS)); } }
setupBackPressureCheckTimer(topologyConf); LOG.info("Worker has topology config {}", ConfigUtils.maskPasswords(topologyConf)); LOG.info("Worker {} for storm {} on {}:{} has finished loading", workerId, topologyId, assignmentId, port); return this;
NimbusInfo hpi = nimbusHostPortInfo; LOG.info("Starting Nimbus with conf {}", ConfigUtils.maskPasswords(conf)); validator.prepare(conf);
public void start() throws Exception { LOG.info("Launching worker for {} on {}:{} with id {} and conf {}", topologyId, assignmentId, port, workerId, ConfigUtils.maskPasswords(conf));
/** * Launch the supervisor. */ public void launch() throws Exception { LOG.info("Starting Supervisor with conf {}", ConfigUtils.maskPasswords(conf)); String path = ServerConfigUtils.supervisorTmpDir(conf); FileUtils.cleanDirectory(new File(path)); SupervisorHeartbeat hb = new SupervisorHeartbeat(conf, this); hb.run(); // should synchronize supervisor so it doesn't launch anything after being down (optimization) Integer heartbeatFrequency = ObjectReader.getInt(conf.get(DaemonConfig.SUPERVISOR_HEARTBEAT_FREQUENCY_SECS)); heartbeatTimer.scheduleRecurring(0, heartbeatFrequency, hb); this.eventManager = new EventManagerImp(false); this.readState = new ReadClusterState(this); asyncLocalizer.start(); if ((Boolean) conf.get(DaemonConfig.SUPERVISOR_ENABLE)) { // This isn't strictly necessary, but it doesn't hurt and ensures that the machine stays up // to date even if callbacks don't all work exactly right eventTimer.scheduleRecurring(0, 10, new EventManagerPushCallback(new SynchronizeAssignments(this, null, readState), eventManager)); // supervisor health check eventTimer.scheduleRecurring(30, 30, new SupervisorHealthCheck(this)); } ReportWorkerHeartbeats reportWorkerHeartbeats = new ReportWorkerHeartbeats(conf, this); Integer workerHeartbeatFrequency = ObjectReader.getInt(conf.get(Config.WORKER_HEARTBEAT_FREQUENCY_SECS)); workerHeartbeatTimer.scheduleRecurring(0, workerHeartbeatFrequency, reportWorkerHeartbeats); LOG.info("Starting supervisor with id {} at host {}.", getId(), getHostName()); }
topoVersionString, topology.get_jdk_version(), ConfigUtils.maskPasswords(topoConf));