public int getPort(Map<String, Object> conf) { if (isFake) { return -1; } return ObjectReader.getInt(conf.get(portConf)); }
public Integer getSocketTimeOut(Map<String, Object> conf) { if (socketTimeoutConf == null) { return null; } return ObjectReader.getInt(conf.get(socketTimeoutConf)); }
public static Integer getInt(Object o) { Integer result = getInt(o, null); if (null == result) { throw new IllegalArgumentException("Don't know how to convert null to int"); } return result; }
public int getNumThreads(Map<String, Object> conf) { if (isFake) { return 1; } return ObjectReader.getInt(conf.get(threadsConf)); }
public int getSpoutRecvqCheckSkipCount() { if (ackingEnabled) { return 0; // always check recQ if ACKing enabled } return ObjectReader.getInt(conf.get(Config.TOPOLOGY_SPOUT_RECVQ_SKIPS), 0); }
public static boolean hasEventLoggers(Map<String, Object> topoConf) { Object eventLoggerNum = topoConf.get(Config.TOPOLOGY_EVENTLOGGER_EXECUTORS); return eventLoggerNum == null || ObjectReader.getInt(eventLoggerNum) > 0; }
@VisibleForTesting long cleanupCutoffAgeMillis(long nowMillis) { final Integer intervalMins = ObjectReader.getInt(stormConf.get(LOGVIEWER_CLEANUP_AGE_MINS)); return nowMillis - TimeUnit.MINUTES.toMillis(intervalMins); } }
public ReportError(Map<String, Object> topoConf, IStormClusterState stormClusterState, String stormId, String componentId, WorkerTopologyContext workerTopologyContext) { this.topoConf = topoConf; this.stormClusterState = stormClusterState; this.stormId = stormId; this.componentId = componentId; this.workerTopologyContext = workerTopologyContext; this.errorIntervalSecs = ObjectReader.getInt(topoConf.get(Config.TOPOLOGY_ERROR_THROTTLE_INTERVAL_SECS)); this.maxPerInterval = ObjectReader.getInt(topoConf.get(Config.TOPOLOGY_MAX_ERROR_REPORT_PER_INTERVAL)); this.intervalStartTime = new AtomicInteger(Time.currentTimeSecs()); this.intervalErrors = new AtomicInteger(0); }
@Override public void prepare(Map<String, Object> conf) { toleranceCount = ObjectReader.getInt(conf.get(DaemonConfig.BLACKLIST_SCHEDULER_TOLERANCE_COUNT), DEFAULT_BLACKLIST_SCHEDULER_TOLERANCE_COUNT); resumeTime = ObjectReader.getInt(conf.get(DaemonConfig.BLACKLIST_SCHEDULER_RESUME_TIME), DEFAULT_BLACKLIST_SCHEDULER_RESUME_TIME); String reporterClassName = ObjectReader.getString(conf.get(DaemonConfig.BLACKLIST_SCHEDULER_REPORTER), LogReporter.class.getName()); reporter = (IReporter) initializeInstance(reporterClassName, "blacklist reporter"); nimbusMonitorFreqSecs = ObjectReader.getInt(conf.get(DaemonConfig.NIMBUS_MONITOR_FREQ_SECS)); blacklist = new TreeMap<>(); }
@Override public void prepare(Map<String, Object> conf) { this.client = NimbusClient.getConfiguredClient(conf); if (conf != null) { this.bufferSize = ObjectReader.getInt(conf.get(Config.STORM_BLOBSTORE_INPUTSTREAM_BUFFER_SIZE_BYTES), bufferSize); } }
/** * Constructs a TimeCacheMap instance with a blobstore timeout and no callback function. * * @param conf the config to use * @return the newly created TimeCacheMap */ @SuppressWarnings("deprecation") private static TimeCacheMap<String, Iterator<String>> makeBlobListCacheMap(Map<String, Object> conf) { return new TimeCacheMap<>(ObjectReader.getInt(conf.get(DaemonConfig.NIMBUS_BLOBSTORE_EXPIRATION_SECS), 600)); }
private Set<List<Integer>> aliveExecutors(String topoId, Set<List<Integer>> allExecutors, Assignment assignment) { return heartbeatsCache.getAliveExecutors(topoId, allExecutors, assignment, ObjectReader.getInt(conf.get(DaemonConfig.NIMBUS_TASK_LAUNCH_SECS))); }
private Map<String, Object> makeDefaultResources() { int threadPoolSize = ObjectReader.getInt(conf.get(Config.TOPOLOGY_WORKER_SHARED_THREAD_POOL_SIZE)); return ImmutableMap.of(WorkerTopologyContext.SHARED_EXECUTOR, Executors.newFixedThreadPool(threadPoolSize)); }
public static int getComponentParallelism(Map<String, Object> topoConf, Object component) throws InvalidTopologyException { Map<String, Object> combinedConf = Utils.merge(topoConf, StormCommon.componentConf(component)); int numTasks = ObjectReader.getInt(combinedConf.get(Config.TOPOLOGY_TASKS), StormCommon.numStartExecutors(component)); Integer maxParallel = ObjectReader.getInt(combinedConf.get(Config.TOPOLOGY_MAX_TASK_PARALLELISM), null); int ret = numTasks; if (maxParallel != null) { ret = Math.min(maxParallel, numTasks); } return ret; }
@Override public void prepare(Map<String, Object> conf) { this.conf = conf; schedulingPriorityStrategy = ReflectionUtils.newInstance( (String) conf.get(DaemonConfig.RESOURCE_AWARE_SCHEDULER_PRIORITY_STRATEGY)); configLoader = ConfigLoaderFactoryService.createConfigLoader(conf); maxSchedulingAttempts = ObjectReader.getInt( conf.get(DaemonConfig.RESOURCE_AWARE_SCHEDULER_MAX_TOPOLOGY_SCHEDULING_ATTEMPTS), 5); }
/** * Creates a SlowExecutorPattern from a Map config. * @param conf the conf to parse. * @return the corresponding SlowExecutorPattern. */ public static SlowExecutorPattern fromConf(Map<String, Object> conf) { double slowness = ObjectReader.getDouble(conf.get("slownessMs"), 0.0); int count = ObjectReader.getInt(conf.get("count"), 1); return new SlowExecutorPattern(slowness, count); }
private int getMemOnHeap(WorkerResources resources) { int memOnheap = 0; if (resources != null && resources.is_set_mem_on_heap() && resources.get_mem_on_heap() > 0) { memOnheap = (int) Math.ceil(resources.get_mem_on_heap()); } else { // set the default heap memory size for supervisor-test memOnheap = ObjectReader.getInt(_topoConf.get(Config.WORKER_HEAP_MEMORY_MB), 768); } return memOnheap; }
/** * Constructor. * @param conf Drpc conf for the servers * @param metricsRegistry The metrics registry */ public DRPCServer(Map<String, Object> conf, StormMetricsRegistry metricsRegistry) { meterShutdownCalls = metricsRegistry.registerMeter("drpc:num-shutdown-calls"); drpc = new DRPC(metricsRegistry, conf); DRPCThrift thrift = new DRPCThrift(drpc); handlerServer = mkHandlerServer(thrift, ObjectReader.getInt(conf.get(Config.DRPC_PORT), null), conf); invokeServer = mkInvokeServer(thrift, ObjectReader.getInt(conf.get(Config.DRPC_INVOCATIONS_PORT), 3773), conf); httpServer = mkHttpServer(metricsRegistry, conf, drpc); }
private void updateHeartbeatsFromZkHeartbeat(String topoId, Set<List<Integer>> allExecutors, Assignment existingAssignment) { LOG.debug("Updating heartbeats for {} {} (from ZK heartbeat)", topoId, allExecutors); IStormClusterState state = stormClusterState; Map<List<Integer>, Map<String, Object>> executorBeats = StatsUtil.convertExecutorBeats(state.executorBeats(topoId, existingAssignment.get_executor_node_port())); heartbeatsCache.updateFromZkHeartbeat(topoId, executorBeats, allExecutors, ObjectReader.getInt(conf.get(DaemonConfig.NIMBUS_TASK_TIMEOUT_SECS))); }
public static void main(String[] args) throws Exception { SysOutOverSLF4J.sendSystemOutAndErrToSLF4J(); Map<String, Object> conf = ConfigUtils.readStormConfig(); Object port = conf.get(Config.STORM_ZOOKEEPER_PORT); String localPath = (String) conf.get(DaemonConfig.DEV_ZOOKEEPER_PATH); Utils.forceDelete(localPath); Zookeeper.mkInprocessZookeeper(localPath, ObjectReader.getInt(port)); } }