public int getPort(Map<String, Object> conf) { if (isFake) { return -1; } return ObjectReader.getInt(conf.get(portConf)); }
/** * Given the blob information returns the value of the workerRestart field, handling it either being a string or a boolean value, or if * it's not specified then returns false. * * @param blobInfo the info for the blob. * @return true if the blob needs a worker restart by way of the callback else false. */ public static boolean blobNeedsWorkerRestart(Map<String, Object> blobInfo) { return ObjectReader.getBoolean(blobInfo.get("workerRestart"), false); }
@Override public void prepare(Map<String, Object> conf) { toleranceCount = ObjectReader.getInt(conf.get(DaemonConfig.BLACKLIST_SCHEDULER_TOLERANCE_COUNT), DEFAULT_BLACKLIST_SCHEDULER_TOLERANCE_COUNT); resumeTime = ObjectReader.getInt(conf.get(DaemonConfig.BLACKLIST_SCHEDULER_RESUME_TIME), DEFAULT_BLACKLIST_SCHEDULER_RESUME_TIME); String reporterClassName = ObjectReader.getString(conf.get(DaemonConfig.BLACKLIST_SCHEDULER_REPORTER), LogReporter.class.getName()); reporter = (IReporter) initializeInstance(reporterClassName, "blacklist reporter"); nimbusMonitorFreqSecs = ObjectReader.getInt(conf.get(DaemonConfig.NIMBUS_MONITOR_FREQ_SECS)); blacklist = new TreeMap<>(); }
/** * initializes member variables. */ private void initConfigs() { this.topologyWorkerMaxHeapSize = ObjectReader.getDouble( topologyConf.get(Config.TOPOLOGY_WORKER_MAX_HEAP_SIZE_MB), null); this.topologyPriority = ObjectReader.getInt(topologyConf.get(Config.TOPOLOGY_PRIORITY), null); assert this.topologyWorkerMaxHeapSize != null; assert this.topologyPriority != null; }
@Override public void prepare(Map<String, Object> conf, WAIT_SITUATION waitSituation) { if (waitSituation == WAIT_SITUATION.SPOUT_WAIT) { level1Count = ObjectReader.getInt(conf.get(Config.TOPOLOGY_SPOUT_WAIT_PROGRESSIVE_LEVEL1_COUNT)); level2Count = ObjectReader.getInt(conf.get(Config.TOPOLOGY_SPOUT_WAIT_PROGRESSIVE_LEVEL2_COUNT)); level3SleepMs = ObjectReader.getLong(conf.get(Config.TOPOLOGY_SPOUT_WAIT_PROGRESSIVE_LEVEL3_SLEEP_MILLIS)); } else if (waitSituation == WAIT_SITUATION.BOLT_WAIT) { level1Count = ObjectReader.getInt(conf.get(Config.TOPOLOGY_BOLT_WAIT_PROGRESSIVE_LEVEL1_COUNT)); level2Count = ObjectReader.getInt(conf.get(Config.TOPOLOGY_BOLT_WAIT_PROGRESSIVE_LEVEL2_COUNT)); level3SleepMs = ObjectReader.getLong(conf.get(Config.TOPOLOGY_BOLT_WAIT_PROGRESSIVE_LEVEL3_SLEEP_MILLIS)); } else if (waitSituation == WAIT_SITUATION.BACK_PRESSURE_WAIT) { level1Count = ObjectReader.getInt(conf.get(Config.TOPOLOGY_BACKPRESSURE_WAIT_PROGRESSIVE_LEVEL1_COUNT)); level2Count = ObjectReader.getInt(conf.get(Config.TOPOLOGY_BACKPRESSURE_WAIT_PROGRESSIVE_LEVEL2_COUNT)); level3SleepMs = ObjectReader.getLong(conf.get(Config.TOPOLOGY_BACKPRESSURE_WAIT_PROGRESSIVE_LEVEL3_SLEEP_MILLIS)); } else { throw new IllegalArgumentException("Unknown wait situation : " + waitSituation); } }
String className = (String) info.get(TOPOLOGY_METRICS_CONSUMER_CLASS); Object argument = info.get(TOPOLOGY_METRICS_CONSUMER_ARGUMENT); Integer maxRetainMetricTuples = ObjectReader.getInt(info.get( TOPOLOGY_METRICS_CONSUMER_MAX_RETAIN_METRIC_TUPLES), 100); Integer phintNum = ObjectReader.getInt(info.get(TOPOLOGY_METRICS_CONSUMER_PARALLELISM_HINT), 1); Map<String, Object> metricsConsumerConf = new HashMap<String, Object>(); metricsConsumerConf.put(Config.TOPOLOGY_TASKS, phintNum); TOPOLOGY_METRICS_CONSUMER_BLACKLIST); FilterByMetricName filterPredicate = new FilterByMetricName(whitelist, blacklist); Boolean expandMapType = ObjectReader.getBoolean(info.get( TOPOLOGY_METRICS_CONSUMER_EXPAND_MAP_TYPE), false); String metricNameSeparator = ObjectReader.getString(info.get( TOPOLOGY_METRICS_CONSUMER_METRIC_NAME_SEPARATOR), "."); DataPointExpander expander = new DataPointExpander(expandMapType, metricNameSeparator);
protected static void setupBuilder(CuratorFrameworkFactory.Builder builder, final String zkStr, Map<String, Object> conf, ZookeeperAuthInfo auth) { List<String> exhibitorServers = ObjectReader.getStrings(conf.get(Config.STORM_EXHIBITOR_SERVERS)); if (!exhibitorServers.isEmpty()) { new Exhibitors(exhibitorServers, ObjectReader.getInt(conf.get(Config.STORM_EXHIBITOR_PORT)), new Exhibitors.BackupConnectionStringProvider() { @Override ObjectReader.getString(conf.get(Config.STORM_EXHIBITOR_URIPATH)), ObjectReader.getInt(conf.get(Config.STORM_EXHIBITOR_POLL)), new StormBoundedExponentialBackoffRetry( ObjectReader.getInt(conf.get(Config.STORM_EXHIBITOR_RETRY_INTERVAL)), ObjectReader.getInt(conf.get(Config.STORM_EXHIBITOR_RETRY_INTERVAL_CEILING)), ObjectReader.getInt(conf.get(Config.STORM_EXHIBITOR_RETRY_TIMES))))); } else { builder.connectString(zkStr); .connectionTimeoutMs(ObjectReader.getInt(conf.get(Config.STORM_ZOOKEEPER_CONNECTION_TIMEOUT))) .sessionTimeoutMs(ObjectReader.getInt(conf.get(Config.STORM_ZOOKEEPER_SESSION_TIMEOUT))) .retryPolicy(new StormBoundedExponentialBackoffRetry( ObjectReader.getInt(conf.get(Config.STORM_ZOOKEEPER_RETRY_INTERVAL)), ObjectReader.getInt(conf.get(Config.STORM_ZOOKEEPER_RETRY_INTERVAL_CEILING)), ObjectReader.getInt(conf.get(Config.STORM_ZOOKEEPER_RETRY_TIMES))));
ObjectReader.getDouble(conf.get(Config.TOPOLOGY_WORKER_MAX_HEAP_SIZE_MB))); Utils.validateTopologyBlobStoreMap(topoConf, blobStore); long uniqueNum = submittedCount.incrementAndGet(); int numAckerExecs = ObjectReader.getInt(totalConf.get(Config.TOPOLOGY_ACKER_EXECUTORS), estimatedNumWorker); int numEventLoggerExecs = ObjectReader.getInt(totalConf.get(Config.TOPOLOGY_EVENTLOGGER_EXECUTORS), estimatedNumWorker); if (ObjectReader.getBoolean(conf.get(Config.SUPERVISOR_RUN_WORKER_AS_USER), false) && (submitterUser == null || submitterUser.isEmpty())) { throw new WrappedAuthorizationException("Could not determine the user to run this topology as."); state.setupHeatbeats(topoId, topoConf); state.setupErrors(topoId, topoConf); if (ObjectReader.getBoolean(totalConf.get(Config.TOPOLOGY_BACKPRESSURE_ENABLE), false)) { state.setupBackpressure(topoId, topoConf);
private static String getMetricsPrefixedWith(Map reporterConf) { return ObjectReader.getString(reporterConf.get(GRAPHITE_PREFIXED_WITH), null); }
closing = false; this.scheduler = scheduler; int bufferSize = ObjectReader.getInt(topoConf.get(Config.STORM_MESSAGING_NETTY_BUFFER_SIZE)); int lowWatermark = ObjectReader.getInt(topoConf.get(Config.STORM_MESSAGING_NETTY_BUFFER_LOW_WATERMARK)); int highWatermark = ObjectReader.getInt(topoConf.get(Config.STORM_MESSAGING_NETTY_BUFFER_HIGH_WATERMARK)); saslChannelReady.set(!ObjectReader.getBoolean(topoConf.get(Config.STORM_MESSAGING_NETTY_AUTHENTICATION), false)); LOG.info("Creating Netty Client, connecting to {}:{}, bufferSize: {}, lowWatermark: {}, highWatermark: {}", host, port, bufferSize, lowWatermark, highWatermark); int minWaitMs = ObjectReader.getInt(topoConf.get(Config.STORM_MESSAGING_NETTY_MIN_SLEEP_MS)); int maxWaitMs = ObjectReader.getInt(topoConf.get(Config.STORM_MESSAGING_NETTY_MAX_SLEEP_MS)); retryPolicy = new StormBoundedExponentialBackoffRetry(minWaitMs, maxWaitMs, -1); launchChannelAliveThread(); scheduleConnect(NO_DELAY_MS); int messageBatchSize = ObjectReader.getInt(topoConf.get(Config.STORM_NETTY_MESSAGE_BATCH_SIZE), 262144); batcher = new MessageBuffer(messageBatchSize); String clazz = (String) topoConf.get(Config.TOPOLOGY_BACKPRESSURE_WAIT_STRATEGY);
private void setBlobPermissions(Map<String, Object> conf, String user, Path path) throws IOException { if (!ObjectReader.getBoolean(conf.get(Config.SUPERVISOR_RUN_WORKER_AS_USER), false)) { return; } String wlCommand = ObjectReader.getString(conf.get(Config.SUPERVISOR_WORKER_LAUNCHER), ""); if (wlCommand.isEmpty()) { String stormHome = System.getProperty(ConfigUtils.STORM_HOME); wlCommand = stormHome + "/bin/worker-launcher"; } List<String> command = new ArrayList<>(Arrays.asList(wlCommand, user, "blob", path.toString())); String[] commandArray = command.toArray(new String[command.size()]); ShellUtils.ShellCommandExecutor shExec = new ShellUtils.ShellCommandExecutor(commandArray); LOG.debug("Setting blob permissions, command: {}", Arrays.toString(commandArray)); try { shExec.execute(); LOG.debug("output: {}", shExec.getOutput()); } catch (ShellUtils.ExitCodeException e) { int exitCode = shExec.getExitCode(); LOG.warn("Exit code from worker-launcher is: {}", exitCode, e); LOG.debug("output: {}", shExec.getOutput()); throw new IOException("Setting blob permissions failed" + " (exitCode=" + exitCode + ") with output: " + shExec.getOutput(), e); } }
public static Double getDouble(Object o) { Double result = getDouble(o, null); if (null == result) { throw new IllegalArgumentException("Don't know how to convert null to double"); } return result; }
public static Long getLong(Object o) { return getLong(o, null); }
/** * Get the whitelist of users and groups for given file. * * @param fileName file name to get the whitelist */ public LogUserGroupWhitelist getLogUserGroupWhitelist(String fileName) { File wlFile = ServerConfigUtils.getLogMetaDataFile(fileName); Map<String, Object> map = (Map<String, Object>) Utils.readYamlFile(wlFile.getAbsolutePath()); if (map == null) { return null; } List<String> logsUsers = ObjectReader.getStrings(map.get(DaemonConfig.LOGS_USERS)); List<String> logsGroups = ObjectReader.getStrings(map.get(DaemonConfig.LOGS_GROUPS)); return new LogUserGroupWhitelist( logsUsers.isEmpty() ? new HashSet<>() : new HashSet<>(logsUsers), logsGroups.isEmpty() ? new HashSet<>() : new HashSet<>(logsGroups) ); }
/** * Creates a SlowExecutorPattern from a Map config. * @param conf the conf to parse. * @return the corresponding SlowExecutorPattern. */ public static SlowExecutorPattern fromConf(Map<String, Object> conf) { double slowness = ObjectReader.getDouble(conf.get("slownessMs"), 0.0); int count = ObjectReader.getInt(conf.get("count"), 1); return new SlowExecutorPattern(slowness, count); }
private static String getMetricsTargetHost(Map reporterConf) { return ObjectReader.getString(reporterConf.get(GRAPHITE_HOST), null); }
private void setupFlushTupleTimer(final Map<String, Object> topologyConf, final List<IRunningExecutor> executors) { final Integer producerBatchSize = ObjectReader.getInt(topologyConf.get(Config.TOPOLOGY_PRODUCER_BATCH_SIZE)); final Integer xferBatchSize = ObjectReader.getInt(topologyConf.get(Config.TOPOLOGY_TRANSFER_BATCH_SIZE)); final Long flushIntervalMillis = ObjectReader.getLong(topologyConf.get(Config.TOPOLOGY_BATCH_FLUSH_INTERVAL_MILLIS)); if ((producerBatchSize == 1 && xferBatchSize == 1) || flushIntervalMillis == 0) { LOG.info("Flush Tuple generation disabled. producerBatchSize={}, xferBatchSize={}, flushIntervalMillis={}", producerBatchSize, xferBatchSize, flushIntervalMillis); return; } workerState.flushTupleTimer.scheduleRecurringMs(flushIntervalMillis, flushIntervalMillis, () -> { // send flush tuple to all local executors for (int i = 0; i < executors.size(); i++) { IRunningExecutor exec = executors.get(i); if (exec.getExecutorId().get(0) != Constants.SYSTEM_TASK_ID) { exec.publishFlushTuple(); } } } ); LOG.info("Flush tuple will be generated every {} millis", flushIntervalMillis); }
public static int samplingRate(Map<String, Object> conf) { double rate = ObjectReader.getDouble(conf.get(Config.TOPOLOGY_STATS_SAMPLE_RATE)); if (rate != 0) { return (int) (1 / rate); } throw new IllegalArgumentException("Illegal topology.stats.sample.rate in conf: " + rate); }
@Override public void open(Map<String, Object> conf, TopologyContext context, SpoutOutputCollector collector) { this.collector = collector; this.sleep = ObjectReader.getLong(conf.get("spout.sleep"), 0L); }
/** * Checks whether user is authorized to access file. Checks regardless of UI filter. * * @param user username * @param fileName file name to access */ public boolean isAuthorizedLogUser(String user, String fileName) { if (StringUtils.isEmpty(user) || StringUtils.isEmpty(fileName) || getLogUserGroupWhitelist(fileName) == null) { return false; } else { LogUserGroupWhitelist whitelist = getLogUserGroupWhitelist(fileName); List<String> logsUsers = new ArrayList<>(); logsUsers.addAll(ObjectReader.getStrings(stormConf.get(DaemonConfig.LOGS_USERS))); logsUsers.addAll(ObjectReader.getStrings(stormConf.get(Config.NIMBUS_ADMINS))); logsUsers.addAll(whitelist.getUserWhitelist()); List<String> logsGroups = new ArrayList<>(); logsGroups.addAll(ObjectReader.getStrings(stormConf.get(DaemonConfig.LOGS_GROUPS))); logsGroups.addAll(ObjectReader.getStrings(stormConf.get(Config.NIMBUS_ADMINS_GROUPS))); logsGroups.addAll(whitelist.getGroupWhitelist()); String userName = principalToLocal.toLocal(user); Set<String> groups = getUserGroups(userName); return logsUsers.stream().anyMatch(u -> u.equals(userName)) || Sets.intersection(groups, new HashSet<>(logsGroups)).size() > 0; } }