public KindAndName(HasMetadata item) { this(io.fabric8.maven.core.util.kubernetes.KubernetesHelper.getKind(item), KubernetesHelper.getName(item)); }
private Map<String, ImageStream> readAlreadyExtractedImageStreams(File target) throws IOException { // If it already exists, read in the file and use it for update Map<String, ImageStream> imageStreams = new HashMap<>(); if (target.length() > 0) { for (HasMetadata entity : KubernetesResourceUtil.loadResources(target)) { if ("ImageStream".equals(KubernetesHelper.getKind(entity))) { imageStreams.put(entity.getMetadata().getName(), (ImageStream) entity); } // Ignore all other kind of entities. There shouldn't be any included anyway } } return imageStreams; }
private void copyTemplateResourcesToTemplatesDir(File templatesDir, Template template) throws MojoExecutionException { List<HasMetadata> objects = template.getObjects(); if (objects != null) { for (HasMetadata object : objects) { String name = KubernetesResourceUtil.getNameWithSuffix(KubernetesHelper.getName(object), KubernetesHelper.getKind(object)) + ".yaml"; File outFile = new File(templatesDir, name); try { ResourceUtil.save(outFile, object); } catch (IOException e) { throw new MojoExecutionException("Failed to save template " + outFile + ": " + e, e); } } } }
private void processArtifactSetResources(Set<URL> artifactSet, Function<List<HasMetadata>, Void> function) { for (URL url : artifactSet) { try { InputStream is = url.openStream(); if (is != null) { log.debug("Processing Kubernetes YAML in at: %s", url); KubernetesList resources = new ObjectMapper(new YAMLFactory()).readValue(is, KubernetesList.class); List<HasMetadata> items = resources.getItems(); if (items.size() == 0 && Objects.equals("Template", resources.getKind())) { is = url.openStream(); Template template = new ObjectMapper(new YAMLFactory()).readValue(is, Template.class); if (template != null) { items.add(template); } } for (HasMetadata item : items) { KubernetesResourceUtil.setSourceUrlAnnotationIfNotSet(item, url.toString()); log.debug(" found %s %s", KubernetesHelper.getKind(item), KubernetesHelper.getName(item)); } function.apply(items); } } catch (IOException e) { getLog().debug("Skipping %s: %s", url, e); } } }
File namespaceDir = new File(logJsonDir, namespace); namespaceDir.mkdirs(); String kind = getKind(entity); String name = getName(entity); if (StringUtils.isNotBlank(kind)) {
private boolean updateImageName(HasMetadata entity, PodTemplateSpec template, String imagePrefix, String imageName) { boolean answer = false; PodSpec spec = template.getSpec(); if (spec != null) { List<Container> containers = spec.getContainers(); if (containers != null) { for (Container container : containers) { String image = container.getImage(); if (image != null && image.startsWith(imagePrefix)) { container.setImage(imageName); log.info("Updating " + KubernetesHelper.getKind(entity) + " " + KubernetesHelper.getName(entity) + " to use image: " + imageName); answer = true; } } } } return answer; } }
public static void deleteEntities(KubernetesClient kubernetes, String namespace, Set<HasMetadata> entities, String s2iBuildNameSuffix, Logger log) { List<HasMetadata> list = new ArrayList<>(entities); // For OpenShift cluster, also delete s2i buildconfig OpenShiftClient openshiftClient = OpenshiftHelper.asOpenShiftClient(kubernetes); if (openshiftClient != null) { for (HasMetadata entity : list) { if ("ImageStream".equals(KubernetesHelper.getKind(entity))) { ImageName imageName = new ImageName(entity.getMetadata().getName()); String buildName = getS2IBuildName(imageName, s2iBuildNameSuffix); log.info("Deleting resource BuildConfig " + namespace + "/" + buildName); openshiftClient.buildConfigs().inNamespace(namespace).withName(buildName).delete(); } } } // lets delete in reverse order Collections.reverse(list); for (HasMetadata entity : list) { log.info("Deleting resource " + KubernetesHelper.getKind(entity) + " " + namespace + "/" + KubernetesHelper.getName(entity)); kubernetes.resource(entity).inNamespace(namespace).cascading(true).delete(); } }
log.info("Enabling debug on " + KubernetesHelper.getKind(entity) + " " + KubernetesHelper.getName( entity) + " due to the property: " + ENABLE_DEBUG_MAVEN_PROPERTY); return true;
protected <T extends HasMetadata,L,D> void doCreateResource(T resource, String namespace , String sourceName, MixedOperation<T, L, D, ? extends Resource<T, D>> resources) throws Exception { String kind = getKind(resource); log.info("Creating a " + kind + " from " + sourceName + " namespace " + namespace + " name " + getName(resource)); try { Object answer; if (StringUtils.isNotBlank(namespace)) { answer = resources.inNamespace(namespace).create(resource); } else { answer = resources.inNamespace(getNamespace()).create(resource); } logGeneratedEntity("Created " + kind + ": ", namespace, resource, answer); } catch (Exception e) { onApplyError("Failed to create " + kind + " from " + sourceName + ". " + e + ". " + resource, e); } }
container.setEnv(env); if (container.getReadinessProbe() != null) { log.info("Readiness probe will be disabled on " + KubernetesHelper.getKind(entity) + " " + getName(entity) + " to allow attaching a remote debugger during suspension"); container.setReadinessProbe(null); log.info("Enabling debug on " + KubernetesHelper.getKind(entity) + " " + getName(entity)); return true;
private KubernetesList removeOpenShiftObjects(KubernetesList list) { KubernetesListBuilder ret = new KubernetesListBuilder(); ret.withMetadata(list.getMetadata()); for (HasMetadata item : list.getItems()) { if (!isOpenshiftItem(item)) { ret.addToItems(item); } else { log.verbose("kubernetes.yml: Removed OpenShift specific resource '%s' of type %s", KubernetesHelper.getName(item), KubernetesHelper.getKind(item)); } } return ret.build(); }
protected static HasMetadata mergeConfigMaps(ConfigMap cm1, ConfigMap cm2, Logger log, boolean switchOnLocalCustomisation) { ConfigMap cm1OrCopy = cm1; if (!switchOnLocalCustomisation) { // lets copy the original to avoid modifying it cm1OrCopy = new ConfigMapBuilder(cm1OrCopy).build(); } log.info("Merging 2 resources for " + KubernetesHelper.getKind(cm1OrCopy) + " " + KubernetesHelper.getName(cm1OrCopy) + " from " + getSourceUrlAnnotation(cm1OrCopy) + " and " + getSourceUrlAnnotation(cm2) + " and removing " + getSourceUrlAnnotation(cm1OrCopy)); cm1OrCopy.setData(mergeMapsAndRemoveEmptyStrings(cm2.getData(), cm1OrCopy.getData())); mergeMetadata(cm1OrCopy, cm2); return cm1OrCopy; }
public void applyImageStream(ImageStream entity, String sourceName) { OpenShiftClient openShiftClient = getOpenShiftClient(); if (openShiftClient != null) { String kind = getKind(entity); String name = getName(entity); String namespace = getNamespace(); try { Resource<ImageStream, DoneableImageStream> resource = openShiftClient.imageStreams().inNamespace(namespace).withName(name); ImageStream old = resource.get(); if (old == null) { log.info("Creating " + kind + " " + name + " from " + sourceName); resource.create(entity); } else { log.info("Updating " + kind + " " + name + " from " + sourceName); copyAllImageStreamTags(entity, old); patchService.compareAndPatchEntity(namespace, entity, old); } openShiftClient.resource(entity).inNamespace(namespace).createOrReplace(); } catch (Exception e) { onApplyError("Failed to create " + kind + " from " + sourceName + ". " + e, e); } } }
public static void resizeApp(KubernetesClient kubernetes, String namespace, Set<HasMetadata> entities, int replicas, Logger log) { for (HasMetadata entity : entities) { String name = KubernetesHelper.getName(entity); Scaleable<?> scalable = null; if (entity instanceof Deployment) { scalable = kubernetes.extensions().deployments().inNamespace(namespace).withName(name); } else if (entity instanceof ReplicaSet) { scalable = kubernetes.extensions().replicaSets().inNamespace(namespace).withName(name); } else if (entity instanceof ReplicationController) { scalable = kubernetes.replicationControllers().inNamespace(namespace).withName(name); } else if (entity instanceof DeploymentConfig) { OpenShiftClient openshiftClient = OpenshiftHelper.asOpenShiftClient(kubernetes); if (openshiftClient == null) { log.warn("Ignoring DeploymentConfig %s as not connected to an OpenShift cluster", name); continue; } scalable = openshiftClient.deploymentConfigs().inNamespace(namespace).withName(name); } if (scalable != null) { log.info("Scaling " + KubernetesHelper.getKind(entity) + " " + namespace + "/" + name + " to replicas: " + replicas); scalable.scale(replicas, true); } } }
public <T extends HasMetadata,L,D> void applyResource(T resource, String sourceName, MixedOperation<T, L, D, ? extends Resource<T, D>> resources) throws Exception { String namespace = getNamespace(); String id = getName(resource); String kind = getKind(resource); Objects.requireNonNull(id, "No name for " + resource + " " + sourceName); if (isServicesOnlyMode()) {
HasMetadata entity = (HasMetadata) dto; try { log.info("Applying " + getKind(entity) + " " + getName(entity) + " from " + sourceName); kubernetesClient.resource(entity).inNamespace(getNamespace()).createOrReplace(); } catch (Exception e) { onApplyError("Failed to create " + getKind(entity) + " from " + sourceName + ". " + e, e);
log.info("Merging 2 resources for " + KubernetesHelper.getKind(resource1OrCopy) + " " + KubernetesHelper.getName(resource1OrCopy) + " from " + getSourceUrlAnnotation(resource1OrCopy) + " and " + getSourceUrlAnnotation(resource2) + " and removing " + getSourceUrlAnnotation(resource1OrCopy)); return resource1OrCopy;