public static String workerPidPath(Map<String, Object> conf, String id, String pid) { return (workerPidsRoot(conf, id) + FILE_SEPARATOR + pid); }
/** * @return all of the pids that are a part of this container. */ protected Set<Long> getAllPids() throws IOException { Set<Long> ret = new HashSet<>(); for (String listing : ConfigUtils.readDirContents(ConfigUtils.workerPidsRoot(_conf, _workerId))) { ret.add(Long.valueOf(listing)); } if (_resourceIsolationManager != null) { Set<Long> morePids = _resourceIsolationManager.getRunningPids(_workerId); assert (morePids != null); ret.addAll(morePids); } return ret; }
@SuppressWarnings("unchecked") public static WorkerTopologyContext makeWorkerContext(Map<String, Object> workerData) { try { StormTopology stormTopology = (StormTopology) workerData.get(Constants.SYSTEM_TOPOLOGY); Map<String, Object> topoConf = (Map) workerData.get(Constants.STORM_CONF); Map<Integer, String> taskToComponent = (Map<Integer, String>) workerData.get(Constants.TASK_TO_COMPONENT); Map<String, List<Integer>> componentToSortedTasks = (Map<String, List<Integer>>) workerData.get(Constants.COMPONENT_TO_SORTED_TASKS); Map<String, Map<String, Fields>> componentToStreamToFields = (Map<String, Map<String, Fields>>) workerData.get(Constants.COMPONENT_TO_STREAM_TO_FIELDS); String stormId = (String) workerData.get(Constants.STORM_ID); Map<String, Object> conf = (Map) workerData.get(Constants.CONF); Integer port = (Integer) workerData.get(Constants.PORT); String codeDir = ConfigUtils.supervisorStormResourcesPath(ConfigUtils.supervisorStormDistRoot(conf, stormId)); String pidDir = ConfigUtils.workerPidsRoot(conf, stormId); List<Integer> workerTasks = (List<Integer>) workerData.get(Constants.TASK_IDS); Map<String, Object> defaultResources = (Map<String, Object>) workerData.get(Constants.DEFAULT_SHARED_RESOURCES); Map<String, Object> userResources = (Map<String, Object>) workerData.get(Constants.USER_SHARED_RESOURCES); return new WorkerTopologyContext(stormTopology, topoConf, taskToComponent, componentToSortedTasks, componentToStreamToFields, stormId, codeDir, pidDir, port, workerTasks, defaultResources, userResources); } catch (IOException e) { throw Utils.wrapInRuntime(e); } }
public WorkerTopologyContext getWorkerTopologyContext() { try { String codeDir = ConfigUtils.supervisorStormResourcesPath(ConfigUtils.supervisorStormDistRoot(conf, topologyId)); String pidDir = ConfigUtils.workerPidsRoot(conf, topologyId); return new WorkerTopologyContext(systemTopology, topologyConf, taskToComponent, componentToSortedTasks, componentToStreamToFields, topologyId, codeDir, pidDir, port, localTaskIds, defaultSharedResources, userSharedResources, cachedTaskToNodePort, assignmentId); } catch (IOException e) { throw Utils.wrapInRuntime(e); } }
/** * Clean up the container partly preparing for restart. By default delete all of the temp directories we are going to get a new * worker_id anyways. POST CONDITION: the workerId will be set to null * * @throws IOException on any error */ public void cleanUpForRestart() throws IOException { LOG.info("Cleaning up {}:{}", _supervisorId, _workerId); Set<Long> pids = getAllPids(); String user = getWorkerUser(); for (Long pid : pids) { File path = new File(ConfigUtils.workerPidPath(_conf, _workerId, pid)); _ops.deleteIfExists(path, user, _workerId); } //clean up for resource isolation if enabled if (_resourceIsolationManager != null) { _resourceIsolationManager.releaseResourcesForWorker(_workerId); } //Always make sure to clean up everything else before worker directory //is removed since that is what is going to trigger the retry for cleanup _ops.deleteIfExists(new File(ConfigUtils.workerHeartbeatsRoot(_conf, _workerId)), user, _workerId); _ops.deleteIfExists(new File(ConfigUtils.workerPidsRoot(_conf, _workerId)), user, _workerId); _ops.deleteIfExists(new File(ConfigUtils.workerTmpRoot(_conf, _workerId)), user, _workerId); _ops.deleteIfExists(new File(ConfigUtils.workerRoot(_conf, _workerId)), user, _workerId); deleteSavedWorkerUser(); _workerId = null; }
/** * Setup the container to run. By default this creates the needed directories/links in the local file system PREREQUISITE: All needed * blobs and topology, jars/configs have been downloaded and placed in the appropriate locations * * @throws IOException on any error */ protected void setup() throws IOException { _type.assertFull(); if (!_ops.doRequiredTopoFilesExist(_conf, _topologyId)) { LOG.info("Missing topology storm code, so can't launch worker with assignment {} for this supervisor {} on port {} with id {}", _assignment, _supervisorId, _port, _workerId); throw new IllegalStateException("Not all needed files are here!!!!"); } LOG.info("Setting up {}:{}", _supervisorId, _workerId); _ops.forceMkdir(new File(ConfigUtils.workerPidsRoot(_conf, _workerId))); _ops.forceMkdir(new File(ConfigUtils.workerTmpRoot(_conf, _workerId))); _ops.forceMkdir(new File(ConfigUtils.workerHeartbeatsRoot(_conf, _workerId))); File workerArtifacts = new File(ConfigUtils.workerArtifactsRoot(_conf, _topologyId, _port)); if (!_ops.fileExists(workerArtifacts)) { _ops.forceMkdir(workerArtifacts); _ops.setupWorkerArtifactsDir(_assignment.get_owner(), workerArtifacts); } String user = getWorkerUser(); writeLogMetadata(user); saveWorkerUser(user); createArtifactsLink(); createBlobstoreLinks(); }
private TopologyContext mkTopologyContext(StormTopology topology) throws IOException { Map<String, Object> conf = workerData.getConf(); return new TopologyContext( topology, workerData.getTopologyConf(), workerData.getTaskToComponent(), workerData.getComponentToSortedTasks(), workerData.getComponentToStreamToFields(), // This is updated by the Worker and the topology has shared access to it workerData.getBlobToLastKnownVersion(), workerData.getTopologyId(), ConfigUtils.supervisorStormResourcesPath( ConfigUtils.supervisorStormDistRoot(conf, workerData.getTopologyId())), ConfigUtils.workerPidsRoot(conf, workerData.getWorkerId()), taskId, workerData.getPort(), workerData.getLocalTaskIds(), workerData.getDefaultSharedResources(), workerData.getUserSharedResources(), executor.getSharedExecutorData(), executor.getIntervalToTaskToMetricToRegistry(), executor.getOpenOrPrepareWasCalled()); }
public static String workerPidPath(Map conf, String id, String pid) { return (workerPidsRoot(conf, id) + FILE_SEPARATOR + pid); }
/** * @return all of the pids that are a part of this container. */ protected Set<Long> getAllPids() throws IOException { Set<Long> ret = new HashSet<>(); for (String listing: Utils.readDirContents(ConfigUtils.workerPidsRoot(_conf, _workerId))) { ret.add(Long.valueOf(listing)); } return ret; }
/** * Clean up the container partly preparing for restart. * By default delete all of the temp directories we are going * to get a new worker_id anyways. * POST CONDITION: the workerId will be set to null * @throws IOException on any error */ public void cleanUpForRestart() throws IOException { LOG.info("Cleaning up {}:{}", _supervisorId, _workerId); Set<Long> pids = getAllPids(); String user = getWorkerUser(); for (Long pid : pids) { File path = new File(ConfigUtils.workerPidPath(_conf, _workerId, pid)); _ops.deleteIfExists(path, user, _workerId); } //Always make sure to clean up everything else before worker directory //is removed since that is what is going to trigger the retry for cleanup _ops.deleteIfExists(new File(ConfigUtils.workerHeartbeatsRoot(_conf, _workerId)), user, _workerId); _ops.deleteIfExists(new File(ConfigUtils.workerPidsRoot(_conf, _workerId)), user, _workerId); _ops.deleteIfExists(new File(ConfigUtils.workerTmpRoot(_conf, _workerId)), user, _workerId); _ops.deleteIfExists(new File(ConfigUtils.workerRoot(_conf, _workerId)), user, _workerId); deleteSavedWorkerUser(); _workerId = null; }
_ops.forceMkdir(new File(ConfigUtils.workerPidsRoot(_conf, _workerId))); _ops.forceMkdir(new File(ConfigUtils.workerTmpRoot(_conf, _workerId))); _ops.forceMkdir(new File(ConfigUtils.workerHeartbeatsRoot(_conf, _workerId)));