@Override public void run() { long cutoffMillis = System.currentTimeMillis() - retentionMillis; LOG.info("aggregated log deletion started."); try { FileSystem fs = remoteRootLogDir.getFileSystem(conf); for(FileStatus userDir : fs.listStatus(remoteRootLogDir)) { if(userDir.isDirectory()) { Path userDirPath = new Path(userDir.getPath(), suffix); deleteOldLogDirsFrom(userDirPath, cutoffMillis, fs, rmClient); } } } catch (IOException e) { logIOException("Error reading root log dir this deletion " + "attempt is being aborted", e); } LOG.info("aggregated log deletion finished."); }
appDir.getModificationTime() < cutoffMillis) { boolean appTerminated = isApplicationTerminated(ConverterUtils.toApplicationId(appDir .getPath().getName()), rmClient); if(appTerminated && shouldDeleteLogDir(appDir, cutoffMillis, fs)) { try { LOG.info("Deleting aggregated logs in "+appDir.getPath());
private static void deleteOldLogDirsFrom(Path dir, long cutoffMillis, FileSystem fs, ApplicationClientProtocol rmClient) { FileStatus[] appDirs; try { appDirs = fs.listStatus(dir); } catch (IOException e) { logException("Could not read the contents of " + dir, e); return; } for (FileStatus appDir : appDirs) { deleteAppDirLogs(cutoffMillis, fs, rmClient, appDir); } }
createMockRMClient(finishedApplications, null); AggregatedLogDeletionService.LogDeletionTask deletionTask = new AggregatedLogDeletionService.LogDeletionTask(conf, RETENTION_SECS, rmClient); deletionTask.run(); verify(mockFs).delete(app3Dir, true);
appDir.getModificationTime() < cutoffMillis) { boolean appTerminated = isApplicationTerminated(ConverterUtils.toApplicationId(appDir .getPath().getName()), rmClient); if(appTerminated && shouldDeleteLogDir(appDir, cutoffMillis, fs)) { try { LOG.info("Deleting aggregated logs in "+appDir.getPath());
ApplicationId appId = ApplicationId.fromString( appDir.getPath().getName()); boolean appTerminated = isApplicationTerminated(appId, rmClient); if (!appTerminated) { } else if (shouldDeleteLogDir(appDir, cutoffMillis, fs)) {
private void scheduleLogDeletionTask() throws IOException { Configuration conf = getConfig(); if (!conf.getBoolean(YarnConfiguration.LOG_AGGREGATION_ENABLED, YarnConfiguration.DEFAULT_LOG_AGGREGATION_ENABLED)) { // Log aggregation is not enabled so don't bother return; } long retentionSecs = conf.getLong( YarnConfiguration.LOG_AGGREGATION_RETAIN_SECONDS, YarnConfiguration.DEFAULT_LOG_AGGREGATION_RETAIN_SECONDS); if (retentionSecs < 0) { LOG.info("Log Aggregation deletion is disabled because retention is" + " too small (" + retentionSecs + ")"); return; } setLogAggCheckIntervalMsecs(retentionSecs); task = new LogDeletionTask(conf, retentionSecs, creatRMClient()); timer = new Timer(); timer.scheduleAtFixedRate(task, 0, checkIntervalMsecs); }
private void scheduleLogDeletionTask() throws IOException { Configuration conf = getConfig(); if (!conf.getBoolean(YarnConfiguration.LOG_AGGREGATION_ENABLED, YarnConfiguration.DEFAULT_LOG_AGGREGATION_ENABLED)) { // Log aggregation is not enabled so don't bother return; } long retentionSecs = conf.getLong( YarnConfiguration.LOG_AGGREGATION_RETAIN_SECONDS, YarnConfiguration.DEFAULT_LOG_AGGREGATION_RETAIN_SECONDS); if (retentionSecs < 0) { LOG.info("Log Aggregation deletion is disabled because retention is" + " too small (" + retentionSecs + ")"); return; } setLogAggCheckIntervalMsecs(retentionSecs); task = new LogDeletionTask(conf, retentionSecs, creatRMClient()); timer = new Timer(); timer.scheduleAtFixedRate(task, 0, checkIntervalMsecs); }
@Override public void run() { long cutoffMillis = System.currentTimeMillis() - retentionMillis; LOG.info("aggregated log deletion started."); try { FileSystem fs = remoteRootLogDir.getFileSystem(conf); for(FileStatus userDir : fs.listStatus(remoteRootLogDir)) { if(userDir.isDirectory()) { Path userDirPath = new Path(userDir.getPath(), suffix); deleteOldLogDirsFrom(userDirPath, cutoffMillis, fs, rmClient); } } } catch (IOException e) { logIOException("Error reading root log dir this deletion " + "attempt is being aborted", e); } LOG.info("aggregated log deletion finished."); }
private void scheduleLogDeletionTask() throws IOException { Configuration conf = getConfig(); if (!conf.getBoolean(YarnConfiguration.LOG_AGGREGATION_ENABLED, YarnConfiguration.DEFAULT_LOG_AGGREGATION_ENABLED)) { // Log aggregation is not enabled so don't bother return; } long retentionSecs = conf.getLong( YarnConfiguration.LOG_AGGREGATION_RETAIN_SECONDS, YarnConfiguration.DEFAULT_LOG_AGGREGATION_RETAIN_SECONDS); if (retentionSecs < 0) { LOG.info("Log Aggregation deletion is disabled because retention is" + " too small (" + retentionSecs + ")"); return; } setLogAggCheckIntervalMsecs(retentionSecs); task = new LogDeletionTask(conf, retentionSecs, createRMClient()); timer = new Timer(); timer.scheduleAtFixedRate(task, 0, checkIntervalMsecs); }
@Override public void run() { long cutoffMillis = System.currentTimeMillis() - retentionMillis; LOG.info("aggregated log deletion started."); try { FileSystem fs = remoteRootLogDir.getFileSystem(conf); for(FileStatus userDir : fs.listStatus(remoteRootLogDir)) { if(userDir.isDirectory()) { Path userDirPath = new Path(userDir.getPath(), suffix); deleteOldLogDirsFrom(userDirPath, cutoffMillis, fs, rmClient); } } } catch (Throwable t) { logException("Error reading root log dir this deletion " + "attempt is being aborted", t); } LOG.info("aggregated log deletion finished."); }
@VisibleForTesting protected void stopRMClient() { if (task != null && task.getRMClient() != null) { RPC.stopProxy(task.getRMClient()); } } }
@VisibleForTesting protected void stopRMClient() { if (task != null && task.getRMClient() != null) { RPC.stopProxy(task.getRMClient()); } } }
@VisibleForTesting protected void stopRMClient() { if (task != null && task.getRMClient() != null) { RPC.stopProxy(task.getRMClient()); } } }