public static Task<Void> warning(final String message, final Throwable optionalError) { log.warn(message); return TaskTags.markInessential(fail(message, optionalError)); }
public static <U,V extends TaskAdaptable<U>> V markInessential(V task) { addTagDynamically(task, INESSENTIAL_TASK); return task; }
public static boolean isInessential(Task<?> task) { return hasTag(task, INESSENTIAL_TASK); }
} catch (Exception e) { Exceptions.propagateIfFatal(e); if (TaskTags.isInessential(task)) {
@Override public void drain(Duration optionalTimeout, boolean includePrimary, boolean throwFirstError) { try { dstJob.join(includePrimary, optionalTimeout); } catch (InterruptedException e) { throw Exceptions.propagate(e); } if (throwFirstError) { if (isError()) getUnchecked(); for (Task<?> t: getQueue()) if (t.isError() && !TaskTags.isInessential(t)) t.getUnchecked(); } }
public static void stopSequentially(Iterable<? extends Startable> entities) { List<Exception> exceptions = Lists.newArrayList(); List<Startable> failedEntities = Lists.newArrayList(); for (final Startable entity : entities) { if (!Entities.isManaged((Entity)entity)) { log.debug("Not stopping {} because it is not managed; continuing", entity); continue; } try { TaskAdaptable<Void> task = TaskTags.markInessential(Effectors.invocation((Entity)entity, Startable.STOP, Collections.emptyMap())); DynamicTasks.submit(task, (Entity)entity).getUnchecked(); } catch (Exception e) { log.warn("Error stopping "+entity+"; continuing with shutdown", e); exceptions.add(e); failedEntities.add(entity); } } if (exceptions.size() > 0) { throw new CompoundRuntimeException("Error stopping "+(failedEntities.size() > 1 ? "entities" : "entity")+": "+failedEntities, exceptions); } }
/** as {@link TaskTags#addTagDynamically(TaskAdaptable, Object)} but for current task, skipping if no current task */ public static void addTagDynamically(Object tag) { Task<?> t = Tasks.current(); if (t!=null) TaskTags.addTagDynamically(t, tag); }
/** marks the current task inessential; this mainly matters if the task is running in a parent * {@link TaskQueueingContext} and we don't want the parent to fail if this task fails * <p> * no-op (silently ignored) if not in a task */ public static void markInessential() { Task<?> task = Tasks.current(); if (task==null) { TaskQueueingContext qc = DynamicTasks.getTaskQueuingContext(); if (qc!=null) task = qc.asTask(); } if (task!=null) { TaskTags.markInessential(task); } }
/** * Creates a MachineDetails for the given location by SSHing to the machine and * running a Bash script to gather data. Should only be called from within a * task context. If this might not be the case then use {@link * #taskForSshMachineLocation(SshMachineLocation)} instead. */ @Beta public static BasicMachineDetails forSshMachineLocationLive(SshMachineLocation location) { return TaskTags.markInessential(DynamicTasks.queueIfPossible(taskForSshMachineLocation(location)) .orSubmitAsync() .asTask()) .getUnchecked(); }
"tag", BrooklynTaskTags.tagForCallerEntity(callingEntity)), tasks); TaskTags.markInessential(invoke); return DynamicTasks.queueIfPossible(invoke).orSubmitAsync(callingEntity).asTask();
TaskAdaptable<?> task = ((EffectorWithBody)eff).getBody().newTask(delegate, eff, ConfigBag.newInstance(parameters)); TaskTags.markInessential(task); result = DynamicTasks.queueIfPossible(task.asTask()).orSubmitAsync(delegate).andWaitForSuccess(); } else {
TaskTags.markInessential(parallel); DynamicTasks.queueIfPossible(parallel).orSubmitAsync(this); Map<Entity, Throwable> errors = waitForTasksOnEntityStart(tasks);
protected <T> Task<T> runAtEntity(Entity entity, TaskAdaptable<T> task) { getExecutionContext(entity).submit(task); if (DynamicTasks.getTaskQueuingContext()!=null) { // put it in the queueing context so it appears in the GUI // mark it inessential as this is being invoked from code, // the caller will do 'get' to handle errors TaskTags.markInessential(task); DynamicTasks.getTaskQueuingContext().queue(task.asTask()); } return task.asTask(); }
private void queueShutdownTask() { ConfigBag stopParameters = BrooklynTaskTags.getCurrentEffectorParameters(); ConfigBag shutdownParameters; if (stopParameters != null) { shutdownParameters = ConfigBag.newInstanceCopying(stopParameters); } else { shutdownParameters = ConfigBag.newInstance(); } shutdownParameters.putIfAbsent(ShutdownEffector.REQUEST_TIMEOUT, Duration.ONE_MINUTE); shutdownParameters.putIfAbsent(ShutdownEffector.FORCE_SHUTDOWN_ON_ERROR, Boolean.TRUE); TaskAdaptable<Void> shutdownTask = Effectors.invocation(this, SHUTDOWN, shutdownParameters); //Mark inessential so that even if it fails the process stop task will run afterwards to clean up. TaskTags.markInessential(shutdownTask); DynamicTasks.queue(shutdownTask); }
public static <T> Task<T> invokeEffector(Entity callingEntity, Entity entityToCall, final Effector<T> effector, final Map<String,?> parameters) { Task<T> t = Effectors.invocation(entityToCall, effector, parameters).asTask(); TaskTags.markInessential(t); // we pass to callingEntity for consistency above, but in exec-context it should be re-dispatched to targetEntity // reassign t as the return value may be a wrapper, if it is switching execution contexts; see submitInternal's javadoc t = ((EntityInternal)callingEntity).getExecutionContext().submit( MutableMap.of("tag", BrooklynTaskTags.tagForCallerEntity(callingEntity)), t); if (DynamicTasks.getTaskQueuingContext()!=null) { // include it as a child (in the gui), marked inessential, because the caller is invoking programmatically DynamicTasks.queue(t); } return t; }
TaskTags.markInessential(task); DynamicTasks.queue(task); try {
@Override protected StopMachineDetails<Integer> stopAnyProvisionedMachines() { if (detectChefMode(entity())==ChefModes.KNIFE) { DynamicTasks.queue( // if this task fails show it as failed but don't block subsequent routines // (ie allow us to actually decommission the machine) // TODO args could be a List<String> config key ? TaskTags.markInessential( new KnifeTaskFactory<String>("delete node and client registration at chef server") .add("knife node delete "+getNodeName()+" -y") .add("knife client delete "+getNodeName()+" -y") .requiringZeroAndReturningStdout() .newTask() )); } return super.stopAnyProvisionedMachines(); }
@Override protected StopMachineDetails<Integer> stopAnyProvisionedMachines() { if (detectChefMode(entity())==ChefModes.KNIFE) { DynamicTasks.queue( // if this task fails show it as failed but don't block subsequent routines // (ie allow us to actually decommission the machine) // TODO args could be a List<String> config key ? TaskTags.markInessential( new KnifeTaskFactory<String>("delete node and client registration at chef server") .add("knife node delete "+getNodeName()+" -y") .add("knife client delete "+getNodeName()+" -y") .requiringZeroAndReturningStdout() .newTask() )); } return super.stopAnyProvisionedMachines(); }
@Test public void testInessentialChildrenFailureDoesNotAbortSecondaryOrFailPrimary() { Task<String> t1 = monitorableTask(null, "1", new FailCallable()); TaskTags.markInessential(t1); Task<String> t = Tasks.<String>builder().dynamic(true) .body(monitorableJob("main")) .add(t1).add(monitorableTask("2")).build(); ec.submit(t); releaseAndWaitForMonitorableJob("1"); Assert.assertFalse(t.blockUntilEnded(TINY_TIME)); releaseAndWaitForMonitorableJob("2"); Assert.assertFalse(t.blockUntilEnded(TINY_TIME)); releaseMonitorableJob("main"); Assert.assertTrue(t.blockUntilEnded(TIMEOUT)); Assert.assertEquals(messages, MutableList.of("1", "2", "main")); Assert.assertTrue(stopwatch.elapsed(TimeUnit.MILLISECONDS) < TIMEOUT.toMilliseconds(), "took too long: "+stopwatch); Assert.assertFalse(t.isError()); Assert.assertTrue(t1.isError()); }
.newTask(); TaskTags.markInessential(copyTask); DynamicTasks.queue(copyTask);