if (taskData.status.isSuccess()) { futures.add(stopTasksInGroup(taskGroup, "task[%s] succeeded in the same taskGroup", taskData.status.getId())); iTaskGroups.remove();
if (taskData.status.isSuccess()) {
if (task.status.isSuccess()) {
private void blacklistWorkerIfNeeded(TaskStatus taskStatus, WorkerHolder workerHolder) { synchronized (blackListedWorkers) { if (taskStatus.isSuccess()) { workerHolder.resetContinuouslyFailedTasksCount(); if (blackListedWorkers.remove(workerHolder.getWorker().getHost()) != null) { workerHolder.setBlacklistedUntil(null); log.info("[%s] removed from blacklist because a task finished with SUCCESS", workerHolder.getWorker()); } } else if (taskStatus.isFailure()) { workerHolder.incrementContinuouslyFailedTasksCount(); } if (workerHolder.getContinuouslyFailedTasksCount() > config.getMaxRetriesBeforeBlacklist() && blackListedWorkers.size() <= workers.size() * (config.getMaxPercentageBlacklistWorkers() / 100.0) - 1) { workerHolder.setBlacklistedUntil(DateTimes.nowUtc().plus(config.getWorkerBlackListBackoffTime())); if (blackListedWorkers.put(workerHolder.getWorker().getHost(), workerHolder) == null) { log.info( "Blacklisting [%s] until [%s] after [%,d] failed tasks in a row.", workerHolder.getWorker(), workerHolder.getBlacklistedUntil(), workerHolder.getContinuouslyFailedTasksCount() ); } } } }
); if (status.isSuccess()) { totalSuccessfulTaskCount.computeIfAbsent(task.getDataSource(), k -> new AtomicLong()) .incrementAndGet();
if (!eachResult.isSuccess()) { failCnt++; log.warn("Failed to run indexSpec: [%s].\nTrying the next indexSpec.", json);
if (taskStatus.isSuccess()) { zkWorker.resetContinuouslyFailedTasksCount(); if (blackListedWorkers.remove(zkWorker)) {
Assert.assertTrue("Task should be in Success state", tsqa.getStatus(taskId).get().isSuccess());
@Override public TaskStatus run(TaskToolbox toolbox) throws Exception { final List<DataSegment> segments = toolbox.getTaskActionClient().submit( new SegmentListUsedAction( getDataSource(), getInterval(), null ) ); SubTask mergeTask = new SubTask( getId(), getDataSource(), segments, aggregators, rollup, indexSpec, segmentWriteOutMediumFactory, getContext() ); final TaskStatus status = mergeTask.run(toolbox); if (!status.isSuccess()) { return TaskStatus.fromCode(getId(), status.getStatusCode()); } return success(); }
@Test public void testRun() throws Exception { runIndexTask(); final Builder builder = new Builder( DATA_SOURCE, getObjectMapper(), AuthTestUtils.TEST_AUTHORIZER_MAPPER, null, rowIngestionMetersFactory ); final CompactionTask compactionTask = builder .interval(Intervals.of("2014-01-01/2014-01-02")) .build(); final Pair<TaskStatus, List<DataSegment>> resultPair = runTask(compactionTask); Assert.assertTrue(resultPair.lhs.isSuccess()); final List<DataSegment> segments = resultPair.rhs; Assert.assertEquals(3, segments.size()); for (int i = 0; i < 3; i++) { Assert.assertEquals(Intervals.of("2014-01-01T0%d:00:00/2014-01-01T0%d:00:00", i, i + 1), segments.get(i).getInterval()); Assert.assertEquals(new NumberedShardSpec(0, 0), segments.get(i).getShardSpec()); } }
private static WorkerHolder createWorkerHolder( ObjectMapper smileMapper, HttpClient httpClient, HttpRemoteTaskRunnerConfig config, ScheduledExecutorService workersSyncExec, WorkerHolder.Listener listener, Worker worker, // simulates task announcements received from worker on first sync call for the tasks that are already // running/completed on the worker. List<TaskAnnouncement> preExistingTaskAnnouncements, // defines behavior for what to do when a particular task is assigned Map<Task, List<TaskAnnouncement>> toBeAssignedTasks, // incremented on each runnable completion in workersSyncExec, useful for deterministically watching that some // work completed AtomicInteger ticks, // Updated each time a shutdown(taskId) call is received, useful for asserting that expected shutdowns indeed // happened. Set<String> actualShutdowns ) { return new WorkerHolder(smileMapper, httpClient, config, workersSyncExec, listener, worker) { private final String workerHost = worker.getHost().substring(0, worker.getHost().indexOf(':')); private final int workerPort = Integer.parseInt(worker.getHost().substring(worker.getHost().indexOf(':') + 1)); @Override
Assert.assertTrue(workerTaskManager.getCompletedTasks().get(task2.getId()).getTaskStatus().isSuccess()); Assert.assertTrue(workerTaskManager.getCompletedTasks().get(task1.getId()).getTaskStatus().isSuccess()); Assert.assertTrue(new File(workerTaskManager.getCompletedTaskDir(), task1.getId()).exists()); Assert.assertFalse(new File(workerTaskManager.getAssignedTaskDir(), task1.getId()).exists()); WorkerHistoryItem.TaskUpdate baseUpdate2 = (WorkerHistoryItem.TaskUpdate) baseHistory.getRequests().get(2); Assert.assertTrue(baseUpdate1.getTaskAnnouncement().getTaskStatus().isSuccess()); Assert.assertTrue(baseUpdate2.getTaskAnnouncement().getTaskStatus().isSuccess()); Assert.assertTrue(workerTaskManager.getCompletedTasks().get(task3.getId()).getTaskStatus().isSuccess()); Assert.assertTrue(new File(workerTaskManager.getCompletedTaskDir(), task3.getId()).exists()); Assert.assertFalse(new File(workerTaskManager.getAssignedTaskDir(), task3.getId()).exists()); Assert.assertTrue(update4.getTaskAnnouncement().getTaskStatus().isSuccess()); Assert.assertNotNull(update4.getTaskAnnouncement().getTaskLocation().getHost());
Assert.assertTrue(taskFuture2.get().isSuccess()); Assert.assertEquals(0, remoteTaskRunner.getBlackListedWorkers().size());
mockWorkerRunningTask(task3); mockWorkerCompleteSuccessfulTask(task3); Assert.assertTrue(taskFuture3.get().isSuccess()); Assert.assertEquals(0, remoteTaskRunner.getBlackListedWorkers().size()); Assert.assertEquals(
private void blacklistWorkerIfNeeded(TaskStatus taskStatus, WorkerHolder workerHolder) { synchronized (blackListedWorkers) { if (taskStatus.isSuccess()) { workerHolder.resetContinuouslyFailedTasksCount(); if (blackListedWorkers.remove(workerHolder.getWorker().getHost()) != null) { workerHolder.setBlacklistedUntil(null); log.info("[%s] removed from blacklist because a task finished with SUCCESS", workerHolder.getWorker()); } } else if (taskStatus.isFailure()) { workerHolder.incrementContinuouslyFailedTasksCount(); } if (workerHolder.getContinuouslyFailedTasksCount() > config.getMaxRetriesBeforeBlacklist() && blackListedWorkers.size() <= workers.size() * (config.getMaxPercentageBlacklistWorkers() / 100.0) - 1) { workerHolder.setBlacklistedUntil(DateTimes.nowUtc().plus(config.getWorkerBlackListBackoffTime())); if (blackListedWorkers.put(workerHolder.getWorker().getHost(), workerHolder) == null) { log.info( "Blacklisting [%s] until [%s] after [%,d] failed tasks in a row.", workerHolder.getWorker(), workerHolder.getBlacklistedUntil(), workerHolder.getContinuouslyFailedTasksCount() ); } } } }
@Override public TaskStatus run(TaskToolbox toolbox) throws Exception { final List<DataSegment> segments = toolbox.getTaskActionClient().submit( new SegmentListUsedAction( getDataSource(), getInterval(), null ) ); SubTask mergeTask = new SubTask( getId(), getDataSource(), segments, aggregators, rollup, indexSpec, segmentWriteOutMediumFactory, getContext() ); final TaskStatus status = mergeTask.run(toolbox); if (!status.isSuccess()) { return TaskStatus.fromCode(getId(), status.getStatusCode()); } return success(); }