@Override int getDesiredReplicas(StatefulSet item) { return item.getSpec().getReplicas(); }
@Override protected int getReplicas(StatefulSet obj) { return obj.getSpec().getReplicas(); }
public static boolean isStatefulSetReady(StatefulSet ss) { Utils.checkNotNull(ss, "StatefulSet can't be null."); StatefulSetSpec spec = ss.getSpec(); StatefulSetStatus status =ss.getStatus(); if (status == null || status.getReplicas() == null) { return false; } //Can be true in testing, so handle it to make test writing easier. if (spec == null || spec.getReplicas() == null) { return false; } return spec.getReplicas().intValue() == status.getReplicas(); }
@Override protected PodList listSelectedPods(StatefulSet obj) { FilterWatchListDeletable<Pod, PodList, Boolean, Watch, Watcher<Pod>> podLister = pods().inNamespace(namespace); if (obj.getSpec().getSelector().getMatchLabels() != null) { podLister.withLabels(obj.getSpec().getSelector().getMatchLabels()); } if (obj.getSpec().getSelector().getMatchExpressions() != null) { for (LabelSelectorRequirement req : obj.getSpec().getSelector().getMatchExpressions()) { switch (req.getOperator()) { case "In": podLister.withLabelIn(req.getKey(), req.getValues().toArray(new String[]{})); break; case "NotIn": podLister.withLabelNotIn(req.getKey(), req.getValues().toArray(new String[]{})); break; case "DoesNotExist": podLister.withoutLabel(req.getKey()); break; case "Exists": podLister.withLabel(req.getKey()); break; } } } return podLister.list(); }
@Override public StatefulSet updateImage(String image) { StatefulSet oldRC = get(); if (oldRC == null) { throw new KubernetesClientException("Existing StatefulSet doesn't exist"); } if (oldRC.getSpec().getTemplate().getSpec().getContainers().size() > 1) { throw new KubernetesClientException("Image update is not supported for multicontainer pods"); } if (oldRC.getSpec().getTemplate().getSpec().getContainers().size() == 0) { throw new KubernetesClientException("Pod has no containers!"); } Container updatedContainer = new ContainerBuilder(oldRC.getSpec().getTemplate().getSpec().getContainers().iterator().next()).withImage(image).build(); StatefulSetBuilder newRCBuilder = new StatefulSetBuilder(oldRC); newRCBuilder.editMetadata().withResourceVersion(null).endMetadata() .editSpec().editTemplate().editSpec().withContainers(Collections.singletonList(updatedContainer)) .endSpec().endTemplate().endSpec(); return new StatefulSetRollingUpdater(client, config, namespace).rollUpdate(oldRC, newRCBuilder.build()); }
private void setGeneration(StatefulSet desired, int nextGeneration) { Map<String, String> annotations = Annotations.annotations(desired.getSpec().getTemplate()); annotations.remove(ANNO_OP_STRIMZI_IO_GENERATION); annotations.put(ANNO_STRIMZI_IO_GENERATION, String.valueOf(nextGeneration)); }
@Override protected Integer currentScale(String namespace, String name) { StatefulSet statefulSet = get(namespace, name); if (statefulSet != null) { return statefulSet.getSpec().getReplicas(); } else { return null; } }
public static int getSsGeneration(StatefulSet resource) { if (resource == null) { return NO_GENERATION; } return Annotations.intAnnotation(resource.getSpec().getTemplate(), ANNO_STRIMZI_IO_GENERATION, NO_GENERATION, ANNO_OP_STRIMZI_IO_GENERATION); }
private static ObjectMeta templateMetadata(StatefulSet resource) { return resource.getSpec().getTemplate().getMetadata(); }
/** * Returns a future that completes when all the pods [0..replicas-1] in the given statefulSet are ready. */ protected Future<?> podReadiness(String namespace, StatefulSet desired, long pollInterval, long operationTimeoutMs) { final int replicas = desired.getSpec().getReplicas(); List<Future> waitPodResult = new ArrayList<>(replicas); for (int i = 0; i < replicas; i++) { String podName = getPodName(desired, i); waitPodResult.add(podOperations.readiness(namespace, podName, pollInterval, operationTimeoutMs)); } return CompositeFuture.join(waitPodResult); }
protected void incrementGeneration(StatefulSet current, StatefulSet desired) { final int generation = Annotations.intAnnotation(current.getSpec().getTemplate(), ANNO_STRIMZI_IO_GENERATION, INIT_GENERATION, ANNO_OP_STRIMZI_IO_GENERATION); final int nextGeneration = generation + 1; setGeneration(desired, nextGeneration); }
private int findReplicas(List<HasMetadata> items) { for (HasMetadata item : items) { if (item instanceof StatefulSet) { return ((StatefulSet)item).getSpec().getReplicas(); } else if (item instanceof Deployment) { return ((Deployment)item).getSpec().getReplicas(); } } return 0; }
Future<ReconciliationState> zkStatefulSet() { StatefulSet zkSs = zkCluster.generateStatefulSet(isOpenShift); Annotations.annotations(zkSs.getSpec().getTemplate()).put(Ca.ANNO_STRIMZI_IO_CLUSTER_CA_CERT_GENERATION, String.valueOf(getCaCertGeneration(this.clusterCa))); return withZkDiff(zkSetOperations.reconcile(namespace, zkCluster.getName(), zkSs)); }
Future<ReconciliationState> kafkaStatefulSet() { kafkaCluster.setExternalAddresses(kafkaExternalAddresses); StatefulSet kafkaSs = kafkaCluster.generateStatefulSet(isOpenShift); PodTemplateSpec template = kafkaSs.getSpec().getTemplate(); Annotations.annotations(template).put( Ca.ANNO_STRIMZI_IO_CLUSTER_CA_CERT_GENERATION, String.valueOf(getCaCertGeneration(this.clusterCa))); Annotations.annotations(template).put( Ca.ANNO_STRIMZI_IO_CLIENTS_CA_CERT_GENERATION, String.valueOf(getCaCertGeneration(this.clientsCa))); return withKafkaDiff(kafkaSetOperations.reconcile(namespace, kafkaCluster.getName(), kafkaSs)); }
/** * Overridden to not cascade to dependent resources (e.g. pods). * * {@inheritDoc} */ @Override protected Future<ReconcileResult<StatefulSet>> internalPatch(String namespace, String name, StatefulSet current, StatefulSet desired) { if (shouldIncrementGeneration(current, desired)) { incrementGeneration(current, desired); } else { setGeneration(desired, getSsGeneration(current)); } // Don't scale via patch desired.getSpec().setReplicas(current.getSpec().getReplicas()); if (log.isTraceEnabled()) { log.trace("Patching {} {}/{} to match desired state {}", resourceKind, namespace, name, desired); } else { log.debug("Patching {} {}/{}", resourceKind, namespace, name); } return super.internalPatch(namespace, name, current, desired, false); }
private void revertVolumeChanges(StatefulSet current, StatefulSet desired) { Container currentKafka = current.getSpec().getTemplate().getSpec().getContainers().stream().filter(c -> c.getName().equals("kafka")).findFirst().get(); Container desiredKafka = desired.getSpec().getTemplate().getSpec().getContainers().stream().filter(c -> c.getName().equals("kafka")).findFirst().get(); desiredKafka.setVolumeMounts(currentKafka.getVolumeMounts()); StatefulSet updated = new StatefulSetBuilder(desired) .editSpec() .editTemplate() .editSpec() .editFirstContainer() .editMatchingEnv(e -> e.getName().equals(KafkaCluster.ENV_VAR_KAFKA_LOG_DIRS)) .withValue(desiredKafka.getVolumeMounts().stream() .filter(vm -> vm.getMountPath().contains(AbstractModel.VOLUME_NAME)) .map(vm -> vm.getMountPath()) .collect(Collectors.joining(","))) .endEnv() .endContainer() .endSpec() .endTemplate() .endSpec() .build(); desired.setSpec(updated.getSpec()); }
/** * Asynchronously perform a rolling update of all the pods in the StatefulSet identified by the given * {@code namespace} and {@code name}, returning a Future that will complete when the rolling update * is complete. Starting with pod 0, each pod will be deleted and re-created automatically by the ReplicaSet, * once the pod has been recreated then given {@code isReady} function will be polled until it returns true, * before the process proceeds with the pod with the next higher number. */ public Future<Void> maybeRollingUpdate(StatefulSet ss, Predicate<Pod> podRestart) { String namespace = ss.getMetadata().getNamespace(); String name = ss.getMetadata().getName(); final int replicas = ss.getSpec().getReplicas(); log.debug("Considering rolling update of {}/{}", namespace, name); Future<Void> f = Future.succeededFuture(); for (int i = 0; i < replicas; i++) { String podName = name + "-" + i; f = f.compose(ignored -> maybeRestartPod(ss, podName, podRestart)); } return f; }
public static Map<String, String> getKafkaContainerEnv(StatefulSet ss) { for (Container container : ss.getSpec().getTemplate().getSpec().getContainers()) { if ("kafka".equals(container.getName())) { LinkedHashMap<String, String> map = new LinkedHashMap<>(container.getEnv() == null ? 2 : container.getEnv().size()); if (container.getEnv() != null) { for (EnvVar envVar : container.getEnv()) { map.put(envVar.getName(), envVar.getValue()); } } return map; } } throw new KafkaUpgradeException("Could not find 'kafka' container in StatefulSet " + ss.getMetadata().getName()); }
@Override public RouterCluster getRouterCluster() throws IOException { StatefulSet s = client.apps().statefulSets().withName("qdrouterd-" + infraUuid).get(); StandardInfraConfig infraConfig = null; if (s.getMetadata().getAnnotations() != null && s.getMetadata().getAnnotations().get(AnnotationKeys.APPLIED_INFRA_CONFIG) != null) { infraConfig = mapper.readValue(s.getMetadata().getAnnotations().get(AnnotationKeys.APPLIED_INFRA_CONFIG), StandardInfraConfig.class); } return new RouterCluster(s.getMetadata().getName(), s.getSpec().getReplicas(), infraConfig); }
/** * Scale up is divided by scaling up Zookeeper cluster in steps. * Scaling up from N to M (N > 0 and M>N) replicas is done in M-N steps. * Each step performs scale up by one replica and full tolling update of Zookeeper cluster. * This approach ensures a valid configuration of each Zk pod. * Together with modified `maybeRollingUpdate` the quorum is not lost after the scale up operation is performed. * There is one special case of scaling from standalone (single one) Zookeeper pod. * In this case quorum cannot be preserved. */ Future<ReconciliationState> zkScaleUpStep() { Future<StatefulSet> futss = zkSetOperations.getAsync(namespace, ZookeeperCluster.zookeeperClusterName(name)); return withVoid(futss.map(ss -> ss == null ? 0 : ss.getSpec().getReplicas()) .compose(currentReplicas -> { if (currentReplicas > 0 && zkCluster.getReplicas() > currentReplicas) { zkCluster.setReplicas(currentReplicas + 1); } Future<Integer> result = Future.succeededFuture(zkCluster.getReplicas() + 1); return result; })); }