Refine search
public static boolean isDeploymentConfigReady(DeploymentConfig d) { Utils.checkNotNull(d, "Deployment can't be null."); DeploymentConfigSpec spec = d.getSpec(); DeploymentConfigStatus status = d.getStatus(); if (status == null || status.getReplicas() == null || status.getAvailableReplicas() == null) { return false; } //Can be true in testing, so handle it to make test writing easier. if (spec == null || spec.getReplicas() == null) { return false; } return spec.getReplicas().intValue() == status.getReplicas() && spec.getReplicas().intValue() <= status.getAvailableReplicas(); }
public void run() { try { DeploymentConfig deploymentConfig = get(); //If the rs is gone, we shouldn't wait. if (deploymentConfig == null) { if (count == 0) { queue.put(true); return; } else { queue.put(new IllegalStateException("Can't wait for DeploymentConfig: " + checkName(getItem()) + " in namespace: " + checkName(getItem()) + " to scale. Resource is no longer available.")); return; } } replicasRef.set(deploymentConfig.getStatus().getReplicas()); int currentReplicas = deploymentConfig.getStatus().getReplicas() != null ? deploymentConfig.getStatus().getReplicas() : 0; if (deploymentConfig.getStatus().getObservedGeneration() >= deploymentConfig.getMetadata().getGeneration() && Objects.equals(deploymentConfig.getSpec().getReplicas(), currentReplicas)) { queue.put(true); } else { LOG.debug("Only {}/{} pods scheduled for DeploymentConfig: {} in namespace: {} seconds so waiting...", deploymentConfig.getStatus().getReplicas(), deploymentConfig.getSpec().getReplicas(), deploymentConfig.getMetadata().getName(), namespace); } } catch (Throwable t) { LOG.error("Error while waiting for Deployment to be scaled.", t); } } };
DeploymentConfigSpec spec = deploymentConfig.getSpec(); oldReplicas = spec.getReplicas(); spec.setReplicas(replicas); DeploymentConfig updated = resource.patch(deploymentConfig); System.out.println("Updated the DeploymentConfig " + name + " version: " + deploymentConfig.getApiVersion() + " with replicas: " + replicas + " to resourceVersion: " + updated.getMetadata().getResourceVersion()); } catch (Exception e) { System.out.println("Failed to update the DeploymentConfig " + name + " with replicas: " + replicas); if (name.equals(item.getMetadata().getName())) { found = item; break; found.getSpec().setReplicas(oldReplicas); System.out.println("Updated the list.item DeploymentConfig " + name + " version: " + found.getApiVersion() + " with replicas: " + oldReplicas + " to resourceVersion: " + updated.getMetadata().getResourceVersion()); } catch (Exception e) { System.out.println("Failed to update the list.item DeploymentConfig " + name + " with replicas: " + oldReplicas);
public static void main(String[] args) { try { OpenShiftClient client = new DefaultOpenShiftClient(); if (!client.supportsOpenShiftAPIGroup(OpenShiftAPIGroups.APPS)) { System.out.println("WARNING this cluster does not support the API Group " + OpenShiftAPIGroups.APPS); return; } DeploymentConfigList list = client.deploymentConfigs().list(); if (list == null) { System.out.println("ERROR no list returned!"); return; } List<DeploymentConfig> items = list.getItems(); for (DeploymentConfig item : items) { System.out.println("DeploymentConfig " + item.getMetadata().getName() + " has version: " + item.getApiVersion()); } if (items.size() > 0) { // lets check .get() too DeploymentConfig deploymentConfig = items.get(0); String name = deploymentConfig.getMetadata().getName(); deploymentConfig = client.deploymentConfigs().withName(name).get(); assertNotNull("No DeploymentConfig found for name " + name, deploymentConfig); System.out.println("get() DeploymentConfig " + name + " has version: " + deploymentConfig.getApiVersion()); } } catch (KubernetesClientException e) { System.out.println("Failed: " + e); e.printStackTrace(); } }
public DeploymentConfigBuilder(DeploymentConfigFluent<?> fluent,DeploymentConfig instance,Boolean validationEnabled){ this.fluent = fluent; fluent.withApiVersion(instance.getApiVersion()); fluent.withKind(instance.getKind()); fluent.withMetadata(instance.getMetadata()); fluent.withSpec(instance.getSpec()); fluent.withStatus(instance.getStatus()); this.validationEnabled = validationEnabled; } public DeploymentConfigBuilder(DeploymentConfig instance){
@Override protected Future<ReconcileResult<DeploymentConfig>> internalPatch(String namespace, String name, DeploymentConfig current, DeploymentConfig desired) { desired.getSpec().getTemplate().getSpec().getContainers().get(0).setImage(current.getSpec().getTemplate().getSpec().getContainers().get(0).getImage()); return super.internalPatch(namespace, name, current, desired); } }
/** * Check if Integration has active deployments. * @param deployment The specified {@link IntegrationDeployment}. * @return The true if there are, false otherwise. */ private boolean hasPublishedDeployments(IntegrationDeployment deployment) { Integration integration = deployment.getSpec(); String id = Labels.validate(integration.getId().orElseThrow(() -> new IllegalStateException("Couldn't find the id of the integration"))); String version = String.valueOf(integration.getVersion()); Map<String, String> labels = new HashMap<>(); labels.put(OpenShiftService.INTEGRATION_ID_LABEL, id); return (int) openShiftService().getDeploymentsByLabel(labels) .stream() .filter(d -> !version.equals(d.getMetadata().getLabels().get(OpenShiftService.DEPLOYMENT_VERSION_LABEL))) .filter(d -> d.getSpec().getReplicas() > 0) .count() > 0; }
@Override protected Integer currentScale(String namespace, String name) { DeploymentConfig deploymentConfig = get(namespace, name); if (deploymentConfig != null) { return deploymentConfig.getSpec().getReplicas(); } else { return null; } }
private void createFromTemplate(OpenShiftRuntimeConfig runtimeConfig) throws OpenShiftClientException { OpenShiftTemplate template = new OpenShiftTemplate(this, runtimeConfig); Map<String, String> parameters = new LinkedHashMap<String, String>(); parameters.putAll(OpenShiftParameters.fromRuntimeConfig(runtimeConfig)); String kieServerContainerDeployment = runtimeConfig.getKieServerContainerDeployment(); if (kieServerContainerDeployment != null && !kieServerContainerDeployment.trim().isEmpty()) { parameters.put(OpenShiftProperty.KIE_SERVER_CONTAINER_DEPLOYMENT.envKey(), kieServerContainerDeployment); } KubernetesList kubeList = template.process(parameters); if (kubeList != null && kubeList.getItems().size() > 0) { try { DeploymentConfig dc = getDeploymentConfig(kubeList, runtimeConfig.getServiceName()); if (dc != null) { dc.getSpec().setReplicas(0); } String prjName = runtimeConfig.getProjectName(); delegate.lists().inNamespace(prjName).create(kubeList); } catch (Throwable t) { throw new OpenShiftClientException(t.getMessage(), t); } } }
@Override public boolean reap() { DeploymentConfig deployment = operation.cascading(false).edit().editSpec().withReplicas(0).endSpec().done(); //TODO: These checks shouldn't be used as they are not realistic. We just use them to support mock/crud tests. Need to find a cleaner way to do so. if (deployment.getStatus() != null) { waitForObservedGeneration(deployment.getStatus().getObservedGeneration()); } //We are deleting the DC before reaping the replication controller, because the RC's won't go otherwise. Boolean reaped = operation.cascading(false).delete(); // Waiting for the DC to be completely deleted before removing the replication controller (error in Openshift 3.9) waitForDeletion(); Map<String, String> selector = new HashMap<>(); selector.put(DEPLOYMENT_CONFIG_REF, deployment.getMetadata().getName()); if (selector != null && !selector.isEmpty()) { Boolean deleted = new ReplicationControllerOperationsImpl(client, operation.getConfig(), operation.getNamespace()) .withLabels(selector) .delete(); } return reaped; }
public void run() { DeploymentConfig deployment = operation.getMandatory(); if (observedGeneration <= deployment.getStatus().getObservedGeneration()) { countDownLatch.countDown(); } } };
DeploymentConfigSpec spec = resource.getSpec(); if (spec != null) { selector = toLabelSelector(spec.getSelector());
@Override public HasMetadata convert(HasMetadata item, boolean trimImageInContainerSpec, boolean enableAutomaticTrigger) { if (item instanceof DeploymentConfig) { DeploymentConfig resource = (DeploymentConfig) item; if (openshiftDeployTimeoutSeconds != null && openshiftDeployTimeoutSeconds > 0) { DeploymentConfigBuilder builder = new DeploymentConfigBuilder(resource); DeploymentConfigFluent.SpecNested<DeploymentConfigBuilder> specBuilder; if (resource.getSpec() != null) { specBuilder = builder.editSpec(); } else { specBuilder = builder.withNewSpec(); } specBuilder.withNewStrategy().withType("Rolling"). withNewRollingParams().withTimeoutSeconds(openshiftDeployTimeoutSeconds).endRollingParams().endStrategy(); specBuilder.endSpec(); return builder.build(); } } return item; } }
@Override public void scale(String name, Map<String, String> labels, int desiredReplicas, long amount, TimeUnit timeUnit) throws InterruptedException { String sName = openshiftName(name); getDeploymentsByLabel(labels) .stream() .filter(d -> d.getMetadata().getName().equals(sName)) .map(d -> new DeploymentConfigBuilder(d).editSpec().withReplicas(desiredReplicas).endSpec().build()) .findAny().ifPresent(d -> openShiftClient.deploymentConfigs().createOrReplace(d)); }
public DeploymentConfigBuilder( DeploymentConfigFluent<?> fluent ){ this(fluent, new DeploymentConfig()); } public DeploymentConfigBuilder( DeploymentConfigFluent<?> fluent , DeploymentConfig instance ){
public DeploymentConfigBuilder(DeploymentConfig instance,Boolean validationEnabled){ this.fluent = this; this.withApiVersion(instance.getApiVersion()); this.withKind(instance.getKind()); this.withMetadata(instance.getMetadata()); this.withSpec(instance.getSpec()); this.withStatus(instance.getStatus()); this.validationEnabled = validationEnabled; }
@Override protected boolean findResourceAddress(LocationSpec<? extends KubernetesMachineLocation> locationSpec, Entity entity, HasMetadata metadata, String resourceType, String resourceName, String namespace) { if (super.findResourceAddress(locationSpec, entity, metadata, resourceType, resourceName, namespace)) { return true; } if (resourceType.equals(OpenShiftResource.DEPLOYMENT_CONFIG)) { DeploymentConfig deploymentConfig = (DeploymentConfig) metadata; Map<String, String> labels = deploymentConfig.getSpec().getTemplate().getMetadata().getLabels(); Pod pod = getPod(namespace, labels); entity.sensors().set(OpenShiftPod.KUBERNETES_POD, pod.getMetadata().getName()); InetAddress node = Networking.getInetAddressWithFixedName(pod.getSpec().getNodeName()); String podAddress = pod.getStatus().getPodIP(); locationSpec.configure("address", node); locationSpec.configure(SshMachineLocation.PRIVATE_ADDRESSES, ImmutableSet.of(podAddress)); return true; } else { return false; } }
/** * Count the deployments of the owner of the specified integration. * * @param deployment The specified IntegrationDeployment. * @return The number of deployed integrations (excluding the current). */ private int countDeployments(IntegrationDeployment deployment) { Integration integration = deployment.getSpec(); String id = Labels.sanitize(integration.getId().orElseThrow(() -> new IllegalStateException("Couldn't find the id of the integration"))); String username = deployment.getUserId().orElseThrow(() -> new IllegalStateException("Couldn't find the user of the integration")); Map<String, String> labels = new HashMap<>(); labels.put(OpenShiftService.USERNAME_LABEL, Labels.sanitize(username)); return (int) openShiftService().getDeploymentsByLabel(labels) .stream() .filter(d -> !id.equals(d.getMetadata().getLabels().get(OpenShiftService.INTEGRATION_ID_LABEL))) .filter(d -> d.getSpec().getReplicas() > 0) .count(); }
private Integer getReplicas(Service service) { DeployableScalableResource<DeploymentConfig, DoneableDeploymentConfig> dcr = getDeploymentConfigResource(service); return dcr != null ? dcr.get().getSpec().getReplicas() : null; }
private void setReplicas(String id, int replicas) throws InterruptedException { OpenShiftRuntimeId runtimeId = OpenShiftRuntimeId.fromString(id); String prjName = runtimeId.project(); String svcName = runtimeId.service(); Service service = delegate.services().inNamespace(prjName).withName(svcName).get(); DeployableScalableResource<DeploymentConfig, DoneableDeploymentConfig> dcr = getDeploymentConfigResource(service); if (dcr != null) { DeploymentConfig dc = dcr.get(); dc.getSpec().setReplicas(replicas); dcr.replace(dc); dcr.waitUntilReady(buildTimeout, TimeUnit.MILLISECONDS); } }