@Override int getDesiredReplicas(StatefulSet item) { return item.getSpec().getReplicas(); }
public static boolean isStatefulSetReady(StatefulSet ss) { Utils.checkNotNull(ss, "StatefulSet can't be null."); StatefulSetSpec spec = ss.getSpec(); StatefulSetStatus status =ss.getStatus(); if (status == null || status.getReplicas() == null) { return false; } //Can be true in testing, so handle it to make test writing easier. if (spec == null || spec.getReplicas() == null) { return false; } return spec.getReplicas().intValue() == status.getReplicas(); }
@Override public RouterCluster getRouterCluster() throws IOException { StatefulSet s = client.apps().statefulSets().withName("qdrouterd-" + infraUuid).get(); StandardInfraConfig infraConfig = null; if (s.getMetadata().getAnnotations() != null && s.getMetadata().getAnnotations().get(AnnotationKeys.APPLIED_INFRA_CONFIG) != null) { infraConfig = mapper.readValue(s.getMetadata().getAnnotations().get(AnnotationKeys.APPLIED_INFRA_CONFIG), StandardInfraConfig.class); } return new RouterCluster(s.getMetadata().getName(), s.getSpec().getReplicas(), infraConfig); }
public Future<Void> deletePvc(StatefulSet ss, String pvcName) { String namespace = ss.getMetadata().getNamespace(); Future<Void> f = Future.future(); Future<ReconcileResult<PersistentVolumeClaim>> r = pvcOperations.reconcile(namespace, pvcName, null); r.setHandler(h -> { if (h.succeeded()) { f.complete(); } else { f.fail(h.cause()); } }); return f; }
private void revertVolumeChanges(StatefulSet current, StatefulSet desired) { Container currentKafka = current.getSpec().getTemplate().getSpec().getContainers().stream().filter(c -> c.getName().equals("kafka")).findFirst().get(); Container desiredKafka = desired.getSpec().getTemplate().getSpec().getContainers().stream().filter(c -> c.getName().equals("kafka")).findFirst().get(); desiredKafka.setVolumeMounts(currentKafka.getVolumeMounts()); StatefulSet updated = new StatefulSetBuilder(desired) .editSpec() .editTemplate() .editSpec() .editFirstContainer() .editMatchingEnv(e -> e.getName().equals(KafkaCluster.ENV_VAR_KAFKA_LOG_DIRS)) .withValue(desiredKafka.getVolumeMounts().stream() .filter(vm -> vm.getMountPath().contains(AbstractModel.VOLUME_NAME)) .map(vm -> vm.getMountPath()) .collect(Collectors.joining(","))) .endEnv() .endContainer() .endSpec() .endTemplate() .endSpec() .build(); desired.setSpec(updated.getSpec()); }
/** * Asynchronously perform a rolling update of all the pods in the StatefulSet identified by the given * {@code namespace} and {@code name}, returning a Future that will complete when the rolling update * is complete. Starting with pod 0, each pod will be deleted and re-created automatically by the ReplicaSet, * once the pod has been recreated then given {@code isReady} function will be polled until it returns true, * before the process proceeds with the pod with the next higher number. */ public Future<Void> maybeRollingUpdate(StatefulSet ss, Predicate<Pod> podRestart) { String namespace = ss.getMetadata().getNamespace(); String name = ss.getMetadata().getName(); final int replicas = ss.getSpec().getReplicas(); log.debug("Considering rolling update of {}/{}", namespace, name); Future<Void> f = Future.succeededFuture(); for (int i = 0; i < replicas; i++) { String podName = name + "-" + i; f = f.compose(ignored -> maybeRestartPod(ss, podName, podRestart)); } return f; }
private boolean isPodUpToDate(StatefulSet ss, Pod pod) { final int ssGeneration = StatefulSetOperator.getSsGeneration(ss); final int podGeneration = StatefulSetOperator.getPodGeneration(pod); log.debug("Rolling update of {}/{}: pod {} has {}={}; ss has {}={}", ss.getMetadata().getNamespace(), ss.getMetadata().getName(), pod.getMetadata().getName(), StatefulSetOperator.ANNO_STRIMZI_IO_GENERATION, podGeneration, StatefulSetOperator.ANNO_STRIMZI_IO_GENERATION, ssGeneration); return ssGeneration == podGeneration; }
@Override int getCurrentReplicas(StatefulSet current) { return current.getStatus().getReplicas(); }
@Override protected int getReplicas(StatefulSet obj) { return obj.getSpec().getReplicas(); }
String namespace = ss.getMetadata().getNamespace(); String name = ss.getMetadata().getName(); final int replicas = ss.getSpec().getReplicas(); log.debug("Considering rolling update of {}/{}", namespace, name); Future<Void> f = Future.succeededFuture(); boolean zkRoll = false; ArrayList<Pod> pods = new ArrayList<>(); String cluster = ss.getMetadata().getLabels().get(Labels.STRIMZI_CLUSTER_LABEL); for (int i = 0; i < replicas; i++) { Pod pod = podOperations.get(ss.getMetadata().getNamespace(), KafkaResources.zookeeperPodName(cluster, i)); zkRoll |= podRestart.test(pod); pods.add(pod);
private void checkStatefulSetsReady(AddressSpace addressSpace, List<HasMetadata> requiredResources) { Set<String> readyStatefulSets = kubernetes.getReadyStatefulSets(addressSpace).stream() .map(statefulSet -> statefulSet.getMetadata().getName()) .collect(Collectors.toSet()); Set<String> requiredStatefulSets = requiredResources.stream() .filter(KubernetesHelper::isStatefulSet) .map(item -> item.getMetadata().getName()) .collect(Collectors.toSet()); boolean isReady = readyStatefulSets.containsAll(requiredStatefulSets); if (!isReady) { Set<String> missing = new HashSet<>(requiredStatefulSets); missing.removeAll(readyStatefulSets); addressSpace.getStatus().setReady(false); addressSpace.getStatus().appendMessage("The following stateful sets are not ready: " + missing); } }
private int findReadyReplicas(List<HasMetadata> items) { for (HasMetadata item : items) { if (item instanceof StatefulSet) { return Optional.ofNullable(((StatefulSet)item).getStatus()).map(StatefulSetStatus::getReadyReplicas).orElse(0); } else if (item instanceof Deployment) { return Optional.ofNullable(((Deployment)item).getStatus()).map(DeploymentStatus::getReadyReplicas).orElse(0); } } return 0; }
@Override protected PodList listSelectedPods(StatefulSet obj) { FilterWatchListDeletable<Pod, PodList, Boolean, Watch, Watcher<Pod>> podLister = pods().inNamespace(namespace); if (obj.getSpec().getSelector().getMatchLabels() != null) { podLister.withLabels(obj.getSpec().getSelector().getMatchLabels()); } if (obj.getSpec().getSelector().getMatchExpressions() != null) { for (LabelSelectorRequirement req : obj.getSpec().getSelector().getMatchExpressions()) { switch (req.getOperator()) { case "In": podLister.withLabelIn(req.getKey(), req.getValues().toArray(new String[]{})); break; case "NotIn": podLister.withLabelNotIn(req.getKey(), req.getValues().toArray(new String[]{})); break; case "DoesNotExist": podLister.withoutLabel(req.getKey()); break; case "Exists": podLister.withLabel(req.getKey()); break; } } } return podLister.list(); }
public static Map<String, String> getKafkaContainerEnv(StatefulSet ss) { for (Container container : ss.getSpec().getTemplate().getSpec().getContainers()) { if ("kafka".equals(container.getName())) { LinkedHashMap<String, String> map = new LinkedHashMap<>(container.getEnv() == null ? 2 : container.getEnv().size()); if (container.getEnv() != null) { for (EnvVar envVar : container.getEnv()) { map.put(envVar.getName(), envVar.getValue()); } } return map; } } throw new KafkaUpgradeException("Could not find 'kafka' container in StatefulSet " + ss.getMetadata().getName()); }
Future<ReconciliationState> zkManualPodCleaning() { String reason = "manual pod cleaning"; Future<StatefulSet> futss = zkSetOperations.getAsync(namespace, ZookeeperCluster.zookeeperClusterName(name)); if (futss != null) { return futss.compose(ss -> { if (ss != null) { log.debug("{}: Cleaning Pods for StatefulSet {} to {}", reconciliation, ss.getMetadata().getName(), reason); return zkSetOperations.maybeDeletePodAndPvc(ss); } return Future.succeededFuture(); }).map(i -> this); } return Future.succeededFuture(this); }
private static boolean isReady(StatefulSet statefulSet) { // TODO: Assuming at least one replica is ok Integer readyReplicas = statefulSet.getStatus().getReadyReplicas(); return readyReplicas != null && readyReplicas >= 1; }
@Override public StatefulSet updateImage(String image) { StatefulSet oldRC = get(); if (oldRC == null) { throw new KubernetesClientException("Existing StatefulSet doesn't exist"); } if (oldRC.getSpec().getTemplate().getSpec().getContainers().size() > 1) { throw new KubernetesClientException("Image update is not supported for multicontainer pods"); } if (oldRC.getSpec().getTemplate().getSpec().getContainers().size() == 0) { throw new KubernetesClientException("Pod has no containers!"); } Container updatedContainer = new ContainerBuilder(oldRC.getSpec().getTemplate().getSpec().getContainers().iterator().next()).withImage(image).build(); StatefulSetBuilder newRCBuilder = new StatefulSetBuilder(oldRC); newRCBuilder.editMetadata().withResourceVersion(null).endMetadata() .editSpec().editTemplate().editSpec().withContainers(Collections.singletonList(updatedContainer)) .endSpec().endTemplate().endSpec(); return new StatefulSetRollingUpdater(client, config, namespace).rollUpdate(oldRC, newRCBuilder.build()); }
public Future<Void> maybeDeletePodAndPvc(StatefulSet ss) { String namespace = ss.getMetadata().getNamespace(); String name = ss.getMetadata().getName(); final int replicas = ss.getSpec().getReplicas(); log.debug("Considering manual deletion and restart of pods for {}/{}", namespace, name); Future<Void> f = Future.succeededFuture(); Map<String, String> ssLabels = ss.getMetadata().getLabels();
Future<ReconciliationState> kafkaManualPodCleaning() { String reason = "manual pod cleaning"; Future<StatefulSet> futss = kafkaSetOperations.getAsync(namespace, KafkaCluster.kafkaClusterName(name)); if (futss != null) { return futss.compose(ss -> { if (ss != null) { log.debug("{}: Cleaning Pods for StatefulSet {} to {}", reconciliation, ss.getMetadata().getName(), reason); return kafkaSetOperations.maybeDeletePodAndPvc(ss); } return Future.succeededFuture(); }).map(i -> this); } return Future.succeededFuture(this); }