/** * Gets the run canary stages that contain the results */ @NotNull protected List<Stage> getRunCanaryStages(@Nonnull Stage stage) { // Collect the Run Canary Stages where the parent id is itself // Sorting by number after the # in the name return stage.getExecution().getStages().stream() .filter(s -> s.getType().equals(RunCanaryStage.STAGE_TYPE)) .sorted(Comparator.comparing(s -> Integer.valueOf(StringUtils.substringAfterLast(s.getName(), "#")))) .collect(Collectors.toList()); }
Map<String, Object> context = stage.getContext(); String storageAccountName = (String)context.get("storageAccountName"); List<String> controlMetricSetListIds = getMetricSetListIds(stage.getExecution(), (String)context.get("controlRefidPrefix")); List<String> experimentMetricSetListIds = getMetricSetListIds(stage.getExecution(), (String)context.get("experimentRefidPrefix")); String resolvedAccountName = CredentialsHelper.resolveAccountByNameOrType(storageAccountName, AccountCredentials.Type.OBJECT_STORE,
stage.getId(), stage.getExecution().getId(), canaryPipelineExecutionId, stage.getContext()); } catch (Exception e) { log.error("Failed to cancel stage (stageId: {}, executionId: {}), e: {}", stage.getId(), stage.getExecution().getId(), e.getMessage(), e); stage.getId(), stage.getExecution().getId(), stage.getContext());
.orElseThrow(() -> new IllegalArgumentException("No storage service was configured; unable to load metric set lists.")); CanaryConfig canaryConfig = executionMapper.getCanaryConfig(stage.getExecution()); List<MetricSetPair> metricSetPairList = storageService.loadObject(resolvedStorageAccountName, ObjectType.METRIC_SET_PAIR_LIST, metricSetPairListId); CanaryJudgeConfig canaryJudgeConfig = canaryConfig.getJudge(); CanaryExecutionRequest canaryExecutionRequest = executionMapper.getCanaryExecutionRequest(stage.getExecution());
@Test public void test_that_getRunCanaryStages_returns_the_expected_sorted_list_of_stages_sorted_by_the_number_in_the_stage_name() { Stage stage = mock(Stage.class); Execution execution = mock(Execution.class); when(stage.getExecution()).thenReturn(execution); when(execution.getStages()).thenReturn(ImmutableList.of( new Stage(null, STAGE_TYPE, "foo #1", Maps.newHashMap(ImmutableMap.of("index", "0"))), new Stage(null, STAGE_TYPE, "foo #3", Maps.newHashMap(ImmutableMap.of("index", "2"))), new Stage(null, STAGE_TYPE, "foo #2", Maps.newHashMap(ImmutableMap.of("index", "1"))), new Stage(null, STAGE_TYPE, "foo #4", Maps.newHashMap(ImmutableMap.of("index", "3"))) )); List<Stage> actual = task.getRunCanaryStages(stage); for (int i = 0; i < 4; i++) { assertEquals(String.valueOf(i), actual.get(i).getContext().get("index")); } }
stage.getExecution().getId(), Optional.ofNullable(context.getCanaryConfigId()).orElse(AD_HOC), request.getCanaryConfig(),
private String executionKey(Stage stage) { return format("%s:%s", stage.getExecution().getType(), stage.getExecution().getId()); }
private Trigger getTrigger() { return stage.getExecution().getTrigger(); }
public Map<String, Object> buildExecutionContext(Stage stage, boolean includeStageContext) { Map<String, Object> augmentedContext = new HashMap<>(); if (includeStageContext) { augmentedContext.putAll(stage.getContext()); } if (stage.getExecution().getType() == PIPELINE) { augmentedContext.put("trigger", mapper.convertValue( stage.getExecution().getTrigger(), new TypeReference<Map<String, Object>>() {}) ); augmentedContext.put("execution", stage.getExecution()); } return augmentedContext; }
/** * Gets the run canary stages that contain the results */ @NotNull protected List<Stage> getRunCanaryStages(@Nonnull Stage stage) { // Collect the Run Canary Stages where the parent id is itself // Sorting by number after the # in the name return stage.getExecution().getStages().stream() .filter(s -> s.getType().equals(RunCanaryStage.STAGE_TYPE)) .sorted(Comparator.comparing(s -> Integer.valueOf(StringUtils.substringAfterLast(s.getName(), "#")))) .collect(Collectors.toList()); }
@Override public List<Stage> buildStages(Stage parentStage) { Map<String, Object> waitContext = Collections.singletonMap("waitTime", waitTime); return Collections.singletonList( newStage( parentStage.getExecution(), waitStage.getType(), "wait", waitContext, parentStage, SyntheticStageOwner.STAGE_AFTER ) ); }
@JsonIgnore public List<Stage> downstreamStages() { return getExecution() .getStages() .stream() .filter(it -> it.getRequisiteStageRefIds().contains(getRefId())) .collect(toList()); }
public @Nonnull List<Artifact> getArtifacts(@Nonnull Stage stage) { if (stage.getContext() instanceof StageContext) { return (List<Artifact>) Optional.ofNullable((List) ((StageContext) stage.getContext()).getAll("artifacts")) .map(list -> list.stream() .filter(Objects::nonNull) .flatMap(it -> ((List) it).stream()) .map(a -> a instanceof Map ? objectMapper.convertValue(a, Artifact.class) : a) .collect(Collectors.toList())) .orElse(emptyList()); } else { log.warn("Unable to read artifacts from unknown context type: {} ({})", stage.getContext().getClass(), stage.getExecution().getId()); return emptyList(); } }
private RedisClientDelegate getRedisDelegate(Stage stage) { return getRedisDelegate(stage.getExecution().getType(), stage.getExecution().getId()); }
@Nonnull @Override public TaskResult execute(@Nonnull Stage stage) { JiraService.CreateIssueRequest createIssueRequest = stage.mapTo(JiraService.CreateIssueRequest.class); Optional.ofNullable(stage.getExecution().getAuthentication()) .map(Execution.AuthenticationDetails::getUser) .ifPresent(createIssueRequest::setReporter); CreateJiraIssueResponse createJiraIssueResponse = jiraService.createJiraIssue(createIssueRequest); return new TaskResult( ExecutionStatus.SUCCEEDED, ImmutableMap.of("createJiraIssueResponse", createJiraIssueResponse) ); } }
public StageStarted( @Nonnull Object source, @Nonnull Stage stage ) { this(source, stage.getExecution().getType(), stage.getExecution().getId(), stage.getId(), stage.getType(), stage.getName()); }
public StageComplete( @Nonnull Object source, @Nonnull Stage stage ) { this(source, stage.getExecution().getType(), stage.getExecution().getId(), stage.getId(), stage.getType(), stage.getName(), stage.getStatus()); }
private String generateRefId() { long offset = parent .getExecution() .getStages() .stream() .filter(i -> parent.getId().equals(i.getParentStageId()) && type == i.getSyntheticStageOwner()) .count(); return format( "%s%s%d", parent.getRefId(), type == STAGE_BEFORE ? "<" : ">", offset + graph.nodes().size() ); }
@Override public void updateStageContext(@Nonnull Stage stage) { RedisClientDelegate delegate = getRedisDelegate(stage); String key = executionKey(stage); String contextKey = format("stage.%s.context", stage.getId()); delegate.withCommandsClient(c -> { try { c.hset(key, contextKey, mapper.writeValueAsString(stage.getContext())); } catch (JsonProcessingException e) { throw new StageSerializationException( format("Failed serializing stage, executionId: %s, stageId: %s", stage.getExecution().getId(), stage.getId()), e ); } }); }
/** * Adds a new stage to the graph. By default the new stage is not dependent on any * others. Use {@link #connect(Stage, Stage)} to make it depend on other stages or * have other stages depend on it. */ public void add(@Nonnull Stage stage) { stage.setExecution(parent.getExecution()); stage.setParentStageId(parent.getId()); stage.setSyntheticStageOwner(type); if (graph.addNode(stage)) { stage.setRefId(generateRefId()); } lastAdded = stage; }