/** * Delete a file or a directory and all of the children. If it exists. * * @param path what to delete * @param user who to delete it as if doing it as someone else is supported * @param logPrefix if an external process needs to be launched to delete the object what prefix to include in the logs * @throws IOException on any error. */ public void deleteIfExists(File path, String user, String logPrefix) throws IOException { //by default no need to do this as a different user deleteIfExists(path); }
private void removeAll(String baseName) throws IOException { try (DirectoryStream<Path> children = fsOps.newDirectoryStream(topologyBasicBlobsRootDir)) { for (Path p : children) { String fileName = p.getFileName().toString(); if (fileName.startsWith(baseName)) { fsOps.deleteIfExists(p.toFile()); } } } }
protected void deleteSavedWorkerUser() throws IOException { LOG.info("REMOVE worker-user {}", _workerId); _ops.deleteIfExists(new File(ConfigUtils.workerUserFile(_conf, _workerId))); }
private void cleanUpTemp(String baseName) throws IOException { LOG.debug("Cleaning up temporary data in {}", topologyBasicBlobsRootDir); try (DirectoryStream<Path> children = fsOps.newDirectoryStream(topologyBasicBlobsRootDir, (p) -> { String fileName = p.getFileName().toString(); Matcher m = EXTRACT_BASE_NAME_AND_VERSION.matcher(fileName); return m.matches() && baseName.equals(m.group(1)); })) { //children is only ever null if topologyBasicBlobsRootDir does not exist. This happens during unit tests // And because a non-existant directory is by definition clean we are ignoring it. if (children != null) { for (Path p : children) { LOG.debug("Cleaning up {}", p); fsOps.deleteIfExists(p.toFile()); } } } }
/** * Clean up the container partly preparing for restart. By default delete all of the temp directories we are going to get a new * worker_id anyways. POST CONDITION: the workerId will be set to null * * @throws IOException on any error */ public void cleanUpForRestart() throws IOException { LOG.info("Cleaning up {}:{}", _supervisorId, _workerId); Set<Long> pids = getAllPids(); String user = getWorkerUser(); for (Long pid : pids) { File path = new File(ConfigUtils.workerPidPath(_conf, _workerId, pid)); _ops.deleteIfExists(path, user, _workerId); } //clean up for resource isolation if enabled if (_resourceIsolationManager != null) { _resourceIsolationManager.releaseResourcesForWorker(_workerId); } //Always make sure to clean up everything else before worker directory //is removed since that is what is going to trigger the retry for cleanup _ops.deleteIfExists(new File(ConfigUtils.workerHeartbeatsRoot(_conf, _workerId)), user, _workerId); _ops.deleteIfExists(new File(ConfigUtils.workerPidsRoot(_conf, _workerId)), user, _workerId); _ops.deleteIfExists(new File(ConfigUtils.workerTmpRoot(_conf, _workerId)), user, _workerId); _ops.deleteIfExists(new File(ConfigUtils.workerRoot(_conf, _workerId)), user, _workerId); deleteSavedWorkerUser(); _workerId = null; }
fsOps.deleteIfExists(versionFile.toFile()); //So if we fail we are forced to try again LOG.debug("Removing destination file {} in preparation for move", dest); fsOps.deleteIfExists(dest.toFile()); if (type.needsExtraction()) { Path extractionTemp = topologyBasicBlobsRootDir.resolve(type.getTempExtractionDir(newVersion)); Path extractionDest = topologyBasicBlobsRootDir.resolve(type.getExtractionDir()); LOG.debug("Removing extraction dest {} in preparation for extraction", extractionDest); fsOps.deleteIfExists(extractionDest.toFile()); if (fsOps.fileExists(extractionTemp)) { fsOps.moveDirectoryPreferAtomic(extractionTemp.toFile(), extractionDest.toFile());
forEachTopologyDistDir((p, topologyId) -> { if (!safeTopologyIds.contains(topologyId)) { fsOps.deleteIfExists(p.toFile());
verify(ops).deleteIfExists(eq(new File(workerPidsRoot, String.valueOf(pid))), eq(user), any(String.class)); verify(iso).releaseResourcesForWorker(workerId); verify(ops).deleteIfExists(eq(new File(workerRoot, "pids")), eq(user), any(String.class)); verify(ops).deleteIfExists(eq(new File(workerRoot, "tmp")), eq(user), any(String.class)); verify(ops).deleteIfExists(eq(new File(workerRoot, "heartbeats")), eq(user), any(String.class)); verify(ops).deleteIfExists(eq(workerRoot), eq(user), any(String.class)); verify(ops).deleteIfExists(workerUserFile);
/** * Delete a file or a directory and all of the children. If it exists. * @param path what to delete * @param user who to delete it as if doing it as someone else is supported * @param logPrefix if an external process needs to be launched to delete * the object what prefix to include in the logs * @throws IOException on any error. */ public void deleteIfExists(File path, String user, String logPrefix) throws IOException { //by default no need to do this as a different user deleteIfExists(path); }
protected void deleteSavedWorkerUser() throws IOException { LOG.info("REMOVE worker-user {}", _workerId); _ops.deleteIfExists(new File(ConfigUtils.workerUserFile(_conf, _workerId))); }
@Override public synchronized void cleanupUnusedTopologies() throws IOException { File distRoot = new File(ConfigUtils.supervisorStormDistRoot(_conf)); LOG.info("Cleaning up unused topologies in {}", distRoot); File[] children = distRoot.listFiles(); if (children != null) { for (File topoDir : children) { String topoId = URLDecoder.decode(topoDir.getName(), "UTF-8"); if (_basicPending.get(topoId) == null && _blobPending.get(topoId) == null) { _fsOps.deleteIfExists(topoDir, null, "rmr " + topoId); } } } }
/** * Clean up the container partly preparing for restart. * By default delete all of the temp directories we are going * to get a new worker_id anyways. * POST CONDITION: the workerId will be set to null * @throws IOException on any error */ public void cleanUpForRestart() throws IOException { LOG.info("Cleaning up {}:{}", _supervisorId, _workerId); Set<Long> pids = getAllPids(); String user = getWorkerUser(); for (Long pid : pids) { File path = new File(ConfigUtils.workerPidPath(_conf, _workerId, pid)); _ops.deleteIfExists(path, user, _workerId); } //Always make sure to clean up everything else before worker directory //is removed since that is what is going to trigger the retry for cleanup _ops.deleteIfExists(new File(ConfigUtils.workerHeartbeatsRoot(_conf, _workerId)), user, _workerId); _ops.deleteIfExists(new File(ConfigUtils.workerPidsRoot(_conf, _workerId)), user, _workerId); _ops.deleteIfExists(new File(ConfigUtils.workerTmpRoot(_conf, _workerId)), user, _workerId); _ops.deleteIfExists(new File(ConfigUtils.workerRoot(_conf, _workerId)), user, _workerId); deleteSavedWorkerUser(); _workerId = null; }
if (!_fsOps.supportsAtomicDirectoryMove()) { LOG.warn("{} may have partially downloaded blobs, recovering", _topologyId); _fsOps.deleteIfExists(_stormRoot); } else { LOG.warn("{} already downloaded blobs, skipping", _topologyId); if (deleteAll) { LOG.warn("Failed to download basic resources for topology-id {}", _topologyId); _fsOps.deleteIfExists(tr); _fsOps.deleteIfExists(_stormRoot);
_basicPending.remove(topologyId); String path = ConfigUtils.supervisorStormDistRoot(_conf, topologyId); _fsOps.deleteIfExists(new File(path), null, "rmr "+topologyId); } else { LOG.debug("Released basic reference {} {} still waiting on {}", topologyId, port, localResource);