@Override public List<Task> listByThisInstance() { return primary.listByThisInstance(); } }
@Override public Task create(String phase, String status) { return primary.create(phase, status); }
public DefaultTaskStatus currentState(JedisTask task) { String historyId = "taskHistory:" + task.getId(); RedisClientDelegate client = clientForTask(task); String state = retry(() -> client.withCommandsClient(c -> { return c.lindex(historyId, -1); }), format("Getting current state for task %s", task.getId())); Map<String, String> history; try { history = mapper.readValue(state, HISTORY_TYPE); } catch (IOException e) { throw new RuntimeException("Failed converting task history json to object", e); } return DefaultTaskStatus.create(history.get("phase"), history.get("status"), TaskState.valueOf(history.get("state"))); }
@Override public Task create(String phase, String status, String clientRequestId) { String taskKey = getClientRequestKey(clientRequestId); String taskId = UUID.randomUUID().toString(); JedisTask task = new JedisTask(taskId, System.currentTimeMillis(), this, ClouddriverHostname.ID, false); addToHistory(DefaultTaskStatus.create(phase, status, TaskState.STARTED), task); set(taskId, task); Long newTask = retry(() -> redisClientDelegate.withCommandsClient(client -> { return client.setnx(taskKey, taskId); }), "Registering task with index"); if (newTask != 0) { return task; } // There's an existing taskId for this key, clean up what we just created and get the existing task addToHistory(DefaultTaskStatus.create(phase, "Duplicate of " + clientRequestId, TaskState.FAILED), task); return getByClientRequestId(clientRequestId); }
public void addToHistory(DefaultTaskStatus status, JedisTask task) { String historyId = "taskHistory:" + task.getId(); Map<String, String> data = new HashMap<>(); data.put("phase", status.getPhase()); data.put("status", status.getStatus()); data.put("state", status.getState().toString()); String hist; try { hist = mapper.writeValueAsString(data); } catch (JsonProcessingException e) { throw new RuntimeException("Failed converting task history to json", e); } retry(() -> redisClientDelegate.withCommandsClient(client -> { client.rpush(historyId, hist); client.expire(historyId, TASK_TTL); if (status.isCompleted()) { client.srem(RUNNING_TASK_KEY, task.getId()); } }), format("Adding status history to task %s: %s", task.getId(), status)); }
public MigrateClusterConfigurationResult migrate(boolean dryRun) { getTask().updateStatus(BASE_PHASE, (dryRun ? "Calculating" : "Beginning") + " migration of cluster config " + source.toString()); MigrateClusterConfigurationResult result = migrationStrategy.generateResults(source, target, sourceLookup, targetLookup, migrateLoadBalancerStrategy, migrateSecurityGroupStrategy, subnetType, elbSubnetType, iamRole, keyPair, loadBalancerNameMapping, allowIngressFromClassic, dryRun); getTask().updateStatus(BASE_PHASE, "Migration of cluster configuration " + source.toString() + (dryRun ? " calculated" : " completed") + "."); return result; }
@Override public List<Task> listByThisInstance() { return list().stream() .filter(t -> ClouddriverHostname.ID.equals(t.getOwnerId())) .collect(Collectors.toList()); }
@Override public Task getByClientRequestId(String clientRequestId) { return Optional .ofNullable(primary.getByClientRequestId(clientRequestId)) .orElse(previous.getByClientRequestId(clientRequestId)); }
@Primary @Bean TaskRepository dualExecutionRepository(Properties properties, List<TaskRepository> allRepositories) { TaskRepository primary = findTaskRepositoryByClass(allRepositories, properties.primaryClass); TaskRepository previous = findTaskRepositoryByClass(allRepositories, properties.previousClass); return new DualTaskRepository( primary, previous, properties.executorThreadPoolSize, properties.executorTimeoutSeconds ); }
public List<Status> getHistory(JedisTask task) { String historyId = "taskHistory:" + task.getId(); RedisClientDelegate client = clientForTask(task); return retry(() -> client.withCommandsClient(c -> { return c.lrange(historyId, 0, -1); }), format("Getting history for task %s", task.getId())) .stream() .map(h -> { Map<String, String> history; try { history = mapper.readValue(h, HISTORY_TYPE); } catch (IOException e) { throw new RuntimeException("Could not convert history json to type", e); } return TaskDisplayStatus.create(DefaultTaskStatus.create(history.get("phase"), history.get("status"), TaskState.valueOf(history.get("state")))); }) .collect(Collectors.toList()); }
default void terminateInstancesInAutoScalingGroup(Task task, AmazonEC2 amazonEC2, AutoScalingGroup autoScalingGroup) { String serverGroupName = autoScalingGroup.getAutoScalingGroupName(); List<String> instanceIds = autoScalingGroup .getInstances() .stream() .map(Instance::getInstanceId) .collect(Collectors.toList()); int terminatedCount = 0; for (List<String> partition : Lists.partition(instanceIds, MAX_SIMULTANEOUS_TERMINATIONS)) { try { terminatedCount += partition.size(); task.updateStatus( PHASE, String.format("Terminating %d of %d instances in %s", terminatedCount, instanceIds.size(), serverGroupName) ); amazonEC2.terminateInstances(new TerminateInstancesRequest().withInstanceIds(partition)); } catch (Exception e) { task.updateStatus( PHASE, String.format("Unable to terminate instances, reason: '%s'", e.getMessage()) ); } } } }
@Override public Task create(String phase, String status, String clientRequestId) { return primary.create(phase, status, clientRequestId); }
@Override public Void operate(List priorOutputs) { getTask().updateStatus(BASE_PHASE, String.format("Initializing Delete Image operation for %s", description)); amazonClientProvider .getAmazonEC2(description.getCredentials(), description.getRegion()) .deregisterImage(new DeregisterImageRequest().withImageId(description.getImageId())); getTask().updateStatus(BASE_PHASE, String.format("Deleted Image %s in %s", description.getImageId(), description.getRegion())); return null; } }
event.getTask().updateStatus( PHASE, "Skipping explicit instance termination, server group is attached to one or more load balancers" ); if (!existingLifecycleHooks.isEmpty()) { event.getTask().updateStatus( PHASE, "Skipping explicit instance termination, server group has one or more lifecycle hooks" event.getTask().updateStatus( PHASE, String.format(
@Override public Map operate(List priorOutputs) { Task task = getTask(); if (description.getLoadBalancerId() != null) { try { LoadBalancer lb = description.getCredentials().getLoadBalancerClient().getLoadBalancer( GetLoadBalancerRequest.builder().loadBalancerId(description.getLoadBalancerId()).build()).getLoadBalancer(); if (lb != null) { update(lb, task); } else { task.updateStatus(UPDATE, "LoadBalancer ${description.loadBalancerId} does not exist."); } } catch (BmcException e) { if (e.getStatusCode() == 404) { task.updateStatus(UPDATE, "LoadBalancer ${description.loadBalancerId} does not exist."); } else { throw e; } } } else { create(task); } return mapOf("loadBalancers", mapOf(description.getCredentials().getRegion(), mapOf("name", description.qualifiedName()))); }
private SecurityGroupUpdater createDependentSecurityGroup(MigrateSecurityGroupReference reference) { String sourceAccount = sourceLookup.getAccountNameForId(reference.getAccountId()); Optional<SecurityGroupUpdater> sourceGroup = sourceLookup.getSecurityGroupByName(sourceAccount, reference.getTargetName(), reference.getVpcId()); String description = "Security group " + reference.getTargetName(); if (sourceGroup.isPresent()) { description = sourceGroup.get().getSecurityGroup().getDescription(); } UpsertSecurityGroupDescription upsertDescription = new UpsertSecurityGroupDescription(); upsertDescription.setName(reference.getTargetName()); upsertDescription.setCredentials(reference.getCredentials()); upsertDescription.setDescription(description); upsertDescription.setVpcId(reference.getVpcId()); getTask().updateStatus(SecurityGroupMigrator.BASE_PHASE, "Creating dependent security group " + reference.getTargetName() + " in " + reference.getCredentialAccount() + "/" + target.getRegion() + "/" + target.getVpcId()); return targetLookup.createSecurityGroup(upsertDescription); }
void updateCertificates(LoadBalancer lb, Task task) { if (lb.getCertificates() != null) { lb.getCertificates().forEach( (name, existingCert) -> { CertificateDetails cert = (description.getCertificates() != null)? description.getCertificates().get(name) : null; if (cert == null) { // Delete certificate: must have no listener using it DeleteCertificateResponse res = description.getCredentials().getLoadBalancerClient().deleteCertificate( DeleteCertificateRequest.builder().loadBalancerId(lb.getId()).certificateName(name).build()); task.updateStatus(UPDATE, "DeleteCertificateRequest of ${name} submitted - work request id: ${rs.getOpcWorkRequestId()}"); OracleWorkRequestPoller.poll(res.getOpcWorkRequestId(), UPDATE, task, description.getCredentials().getLoadBalancerClient()); } }); } // Add new certificate Map<String, CertificateDetails> certificates = description.getCertificates(); if (certificates != null) { certificates.forEach( (name, details) -> { Certificate cert = lb.getCertificates().get(name); if (cert == null) { CreateCertificateResponse res = description.getCredentials().getLoadBalancerClient().createCertificate( CreateCertificateRequest.builder().loadBalancerId(description.getLoadBalancerId()) .createCertificateDetails(toCreate(details, name)).build()); task.updateStatus(UPDATE, "CreateCertificateRequest of ${name} submitted - work request id: ${rs.getOpcWorkRequestId()}"); OracleWorkRequestPoller.poll(res.getOpcWorkRequestId(), UPDATE, task, description.getCredentials().getLoadBalancerClient()); } }); } }
UpdateBackendSetRequest.builder().loadBalancerId(lb.getId()).backendSetName(name) .updateBackendSetDetails(toUpdate(backendSetUpdate, existingBackendSet)).build()); task.updateStatus(UPDATE, "UpdateBackendSetRequest of ${name} submitted - work request id: ${rs.getOpcWorkRequestId()}"); OracleWorkRequestPoller.poll(res.getOpcWorkRequestId(), UPDATE, task, description.getCredentials().getLoadBalancerClient()); } else { task.updateStatus(UPDATE, "DeleteBackendSetRequest of ${name} submitted - work request id: ${rs.getOpcWorkRequestId()}"); OracleWorkRequestPoller.poll(res.getOpcWorkRequestId(), UPDATE, task, description.getCredentials().getLoadBalancerClient()); CreateBackendSetRequest.builder().loadBalancerId(description.getLoadBalancerId()) .createBackendSetDetails(toCreate(details, name)).build()); task.updateStatus(UPDATE, "CreateBackendSetRequest of ${name} submitted - work request id: ${rs.getOpcWorkRequestId()}"); OracleWorkRequestPoller.poll(res.getOpcWorkRequestId(), UPDATE, task, description.getCredentials().getLoadBalancerClient());
void create(Task task) { String clusterName = description.qualifiedName(); task.updateStatus(CREATE, "Create LB: ${description.qualifiedName()}"); CreateLoadBalancerDetails.Builder lbDetails = CreateLoadBalancerDetails.builder() .displayName(clusterName) .compartmentId(description.getCredentials().getCompartmentId()) .shapeName(description.getShape()) .subnetIds(description.getSubnetIds()); if (description.getIsPrivate()) { lbDetails.isPrivate(description.getIsPrivate()); } if (description.getCertificates() != null) { lbDetails.certificates(description.getCertificates()); } if (description.getBackendSets() != null) { lbDetails.backendSets(description.getBackendSets()); } if (description.getListeners() != null) { lbDetails.listeners(description.getListeners()); } CreateLoadBalancerResponse res = description.getCredentials().getLoadBalancerClient().createLoadBalancer( CreateLoadBalancerRequest.builder().createLoadBalancerDetails(lbDetails.build()).build()); task.updateStatus(CREATE, "Create LB rq submitted - work request id: ${rs.getOpcWorkRequestId()}"); OracleWorkRequestPoller.poll(res.getOpcWorkRequestId(), CREATE, task, description.getCredentials().getLoadBalancerClient()); }