@Override public boolean equals(Object o) { if (this == o) return true; if ( ! (o instanceof RunId)) return false; RunId id = (RunId) o; if (number != id.number) return false; if ( ! application.equals(id.application)) return false; return type == id.type; }
@Override public void delete(ApplicationId id) { logs.keySet().removeIf(runId -> runId.application().equals(id)); }
/** Returns the subset of load balancers owned by given application */ public LoadBalancerList owner(ApplicationId application) { return loadBalancers.stream() .filter(lb -> lb.id().application().equals(application)) .collect(collectingAndThen(Collectors.toList(), LoadBalancerList::new)); }
private List<Node> getNodesBelongingToApplication(Collection<Node> allNodes, ApplicationId applicationId) { return allNodes.stream() .filter(node -> node.allocation().isPresent()) .filter(node -> node.allocation().get().owner().equals(applicationId)) .collect(Collectors.toList()); }
/** * Add existing nodes allocated to the application */ void addApplicationNodes() { List<Node.State> legalStates = Arrays.asList(Node.State.active, Node.State.inactive, Node.State.reserved); allNodes.stream() .filter(node -> node.type().equals(requestedNodes.type())) .filter(node -> legalStates.contains(node.state())) .filter(node -> node.allocation().isPresent()) .filter(node -> node.allocation().get().owner().equals(appId)) .map(node -> toNodePriority(node, false, false)) .forEach(prioritizableNode -> nodes.put(prioritizableNode.node, prioritizableNode)); }
public static ApplicationInstanceReference toApplicationInstanceReference(ApplicationId appId, InstanceLookupService instanceLookupService) throws ApplicationIdNotFoundException { Set<ApplicationInstanceReference> appRefs = instanceLookupService.knownInstances(); List<ApplicationInstanceReference> appRefList = appRefs.stream() .filter(a -> OrchestratorUtil.toApplicationId(a).equals(appId)) .collect(Collectors.toList()); if (appRefList.size() > 1) { String msg = String.format("ApplicationId '%s' was not unique but mapped to '%s'", appId, appRefList); throw new ApplicationIdNotFoundException(msg); } if (appRefList.size() == 0) { throw new ApplicationIdNotFoundException(); } return appRefList.get(0); }
/** Returns the subset of nodes owned by the given application */ public NodeList owner(ApplicationId application) { return filter(node -> node.allocation().map(a -> a.owner().equals(application)).orElse(false)); }
/** * Returns all nodes allocated to the given application which are in one of the given states * If no states are given this returns all nodes. */ public List<Node> getNodes(ApplicationId applicationId, Node.State ... states) { List<Node> nodes = getNodes(states); nodes.removeIf(node -> ! node.allocation().isPresent() || ! node.allocation().get().owner().equals(applicationId)); return nodes; }
private boolean shouldHaveSlobrok(ContainerModel containerModel) { // Avoid Slobroks on node-admin container cluster, as node-admin is migrating // TODO: Remove this hack once node-admin has migrated out the zone app ApplicationId applicationId = context.getDeployState().getProperties().applicationId(); if (!applicationId.equals(ZONE_APPLICATION_ID)) { return true; } // aka clustername, aka application-model's ClusterId String clustername = containerModel.getCluster().getName(); return !Objects.equals(clustername, "node-admin"); }
NodePrioritizer(List<Node> allNodes, ApplicationId appId, ClusterSpec clusterSpec, NodeSpec nodeSpec, int spares, NameResolver nameResolver) { this.allNodes = Collections.unmodifiableList(allNodes); this.requestedNodes = nodeSpec; this.clusterSpec = clusterSpec; this.appId = appId; this.nameResolver = nameResolver; this.spareHosts = findSpareHosts(allNodes, spares); this.capacity = new DockerHostCapacity(allNodes); long nofFailedNodes = allNodes.stream() .filter(node -> node.state().equals(Node.State.failed)) .filter(node -> node.allocation().isPresent()) .filter(node -> node.allocation().get().owner().equals(appId)) .filter(node -> node.allocation().get().membership().cluster().id().equals(clusterSpec.id())) .count(); long nofNodesInCluster = allNodes.stream() .filter(node -> node.allocation().isPresent()) .filter(node -> node.allocation().get().owner().equals(appId)) .filter(node -> node.allocation().get().membership().cluster().id().equals(clusterSpec.id())) .count(); this.isAllocatingForReplacement = isReplacement(nofNodesInCluster, nofFailedNodes); this.isDocker = isDocker(); }
nodeRepository().getNode(node.hostname()) .filter(upToDateNode -> node.state() == Node.State.active) .filter(upToDateNode -> node.allocation().get().owner().equals(upToDateNode.allocation().get().owner()))) .flatMap(node -> node.map(Stream::of).orElseGet(Stream::empty)) .collect(Collectors.toSet());
ConcurrentSuspensionLimitForCluster getConcurrentSuspensionLimit(ClusterApi clusterApi) { if (clusterApi.isStorageCluster()) { return ConcurrentSuspensionLimitForCluster.ONE_NODE; } if (VespaModelUtil.ADMIN_CLUSTER_ID.equals(clusterApi.clusterId())) { if (VespaModelUtil.SLOBROK_SERVICE_TYPE.equals(clusterApi.serviceType())) { return ConcurrentSuspensionLimitForCluster.ONE_NODE; } return ConcurrentSuspensionLimitForCluster.ALL_NODES; } if (clusterApi.getApplication().applicationId().equals(VespaModelUtil.ZONE_APPLICATION_ID) && clusterApi.clusterId().equals(VespaModelUtil.NODE_ADMIN_CLUSTER_ID)) { return ConcurrentSuspensionLimitForCluster.TWENTY_PERCENT; } return ConcurrentSuspensionLimitForCluster.TEN_PERCENT; } }
boolean wantToRetireNode = false; ClusterMembership membership = offered.allocation().get().membership(); if ( ! offered.allocation().get().owner().equals(application)) continue; // wrong application if ( ! membership.cluster().equalsIgnoringGroupAndVespaVersion(cluster)) continue; // wrong cluster id/type if ((! offeredPriority.isSurplusNode || saturated()) && ! membership.cluster().group().equals(cluster.group())) continue; // wrong group and we can't or have no reason to change it