public static Long getLong(Object o) { return getLong(o, null); }
@Override public void open(Map<String, Object> conf, TopologyContext context, SpoutOutputCollector collector) { this.collector = collector; this.sleep = ObjectReader.getLong(conf.get("spout.sleep"), 0L); }
/** * Constructor. This assumes that state can store the tokens securely, and that they should be enabled at all. Please use * ClientAuthUtils.areWorkerTokensEnabledServer to validate this first. * * @param daemonConf the config for nimbus. * @param state the state used to store private keys. */ public WorkerTokenManager(Map<String, Object> daemonConf, IStormClusterState state) { this.state = state; try { keyGen = KeyGenerator.getInstance(WorkerTokenSigner.DEFAULT_HMAC_ALGORITHM); keyGen.init(KEY_LENGTH); } catch (NoSuchAlgorithmException nsa) { throw new IllegalArgumentException("Can't find " + WorkerTokenSigner.DEFAULT_HMAC_ALGORITHM + " algorithm."); } this.tokenLifetimeMillis = TimeUnit.MILLISECONDS.convert( ObjectReader.getLong(daemonConf.get(DaemonConfig.STORM_WORKER_TOKEN_LIFE_TIME_HOURS), 24L), TimeUnit.HOURS); }
private void setupBackPressureCheckTimer(final Map<String, Object> topologyConf) { if (workerState.isSingleWorker()) { LOG.info("BackPressure change checking is disabled as there is only one worker"); return; } final Long bpCheckIntervalMs = ObjectReader.getLong(topologyConf.get(Config.TOPOLOGY_BACKPRESSURE_CHECK_MILLIS)); workerState.backPressureCheckTimer.scheduleRecurringMs(bpCheckIntervalMs, bpCheckIntervalMs, () -> workerState.refreshBackPressureStatus()); LOG.info("BackPressure status change checking will be performed every {} millis", bpCheckIntervalMs); }
@Override public void prepare(Map<String, Object> conf, WAIT_SITUATION waitSituation) { if (waitSituation == WAIT_SITUATION.SPOUT_WAIT) { level1Count = ObjectReader.getInt(conf.get(Config.TOPOLOGY_SPOUT_WAIT_PROGRESSIVE_LEVEL1_COUNT)); level2Count = ObjectReader.getInt(conf.get(Config.TOPOLOGY_SPOUT_WAIT_PROGRESSIVE_LEVEL2_COUNT)); level3SleepMs = ObjectReader.getLong(conf.get(Config.TOPOLOGY_SPOUT_WAIT_PROGRESSIVE_LEVEL3_SLEEP_MILLIS)); } else if (waitSituation == WAIT_SITUATION.BOLT_WAIT) { level1Count = ObjectReader.getInt(conf.get(Config.TOPOLOGY_BOLT_WAIT_PROGRESSIVE_LEVEL1_COUNT)); level2Count = ObjectReader.getInt(conf.get(Config.TOPOLOGY_BOLT_WAIT_PROGRESSIVE_LEVEL2_COUNT)); level3SleepMs = ObjectReader.getLong(conf.get(Config.TOPOLOGY_BOLT_WAIT_PROGRESSIVE_LEVEL3_SLEEP_MILLIS)); } else if (waitSituation == WAIT_SITUATION.BACK_PRESSURE_WAIT) { level1Count = ObjectReader.getInt(conf.get(Config.TOPOLOGY_BACKPRESSURE_WAIT_PROGRESSIVE_LEVEL1_COUNT)); level2Count = ObjectReader.getInt(conf.get(Config.TOPOLOGY_BACKPRESSURE_WAIT_PROGRESSIVE_LEVEL2_COUNT)); level3SleepMs = ObjectReader.getLong(conf.get(Config.TOPOLOGY_BACKPRESSURE_WAIT_PROGRESSIVE_LEVEL3_SLEEP_MILLIS)); } else { throw new IllegalArgumentException("Unknown wait situation : " + waitSituation); } }
try { Process process = Runtime.getRuntime().exec(script); final long timeout = ObjectReader.getLong(conf.get(DaemonConfig.STORM_HEALTH_CHECK_TIMEOUT_MS), 5000L); final Thread curThread = Thread.currentThread();
private void setupFlushTupleTimer(final Map<String, Object> topologyConf, final List<IRunningExecutor> executors) { final Integer producerBatchSize = ObjectReader.getInt(topologyConf.get(Config.TOPOLOGY_PRODUCER_BATCH_SIZE)); final Integer xferBatchSize = ObjectReader.getInt(topologyConf.get(Config.TOPOLOGY_TRANSFER_BATCH_SIZE)); final Long flushIntervalMillis = ObjectReader.getLong(topologyConf.get(Config.TOPOLOGY_BATCH_FLUSH_INTERVAL_MILLIS)); if ((producerBatchSize == 1 && xferBatchSize == 1) || flushIntervalMillis == 0) { LOG.info("Flush Tuple generation disabled. producerBatchSize={}, xferBatchSize={}, flushIntervalMillis={}", producerBatchSize, xferBatchSize, flushIntervalMillis); return; } workerState.flushTupleTimer.scheduleRecurringMs(flushIntervalMillis, flushIntervalMillis, () -> { // send flush tuple to all local executors for (int i = 0; i < executors.size(); i++) { IRunningExecutor exec = executors.get(i); if (exec.getExecutorId().get(0) != Constants.SYSTEM_TASK_ID) { exec.publishFlushTuple(); } } } ); LOG.info("Flush tuple will be generated every {} millis", flushIntervalMillis); }
static StormTopology getTopology(Map<String, Object> conf) { Long sleepMs = ObjectReader.getLong(conf.get(SLEEP_MS)); // 1 - Setup Spout -------- ConstSpout spout = new ConstSpout("some data").withOutputFields("string"); // 2 - Setup DevNull Bolt -------- ThrottledBolt bolt = new ThrottledBolt(sleepMs); // 3 - Setup Topology -------- TopologyBuilder builder = new TopologyBuilder(); builder.setSpout(SPOUT_ID, spout, Helper.getInt(conf, SPOUT_COUNT, 1)); BoltDeclarer bd = builder.setBolt(BOLT_ID, bolt, Helper.getInt(conf, BOLT_COUNT, 1)); bd.localOrShuffleGrouping(SPOUT_ID); return builder.createTopology(); }
static StormTopology getTopology(Map<String, Object> conf) { Long sleepMs = ObjectReader.getLong(conf.get(SLEEP_MS)); // 1 - Setup Spout -------- ThrottledSpout spout = new ThrottledSpout(sleepMs).withOutputFields(ThrottledSpout.DEFAULT_FIELD_NAME); // 2 - Setup DevNull Bolt -------- LatencyPrintBolt bolt = new LatencyPrintBolt(); // 3 - Setup Topology -------- TopologyBuilder builder = new TopologyBuilder(); builder.setSpout(SPOUT_ID, spout, Helper.getInt(conf, SPOUT_COUNT, 1)); BoltDeclarer bd = builder.setBolt(BOLT_ID, bolt, Helper.getInt(conf, BOLT_COUNT, 1)); bd.localOrShuffleGrouping(SPOUT_ID); // bd.shuffleGrouping(SPOUT_ID); return builder.createTopology(); }