/** * Returns all the DAG's for the argument ids, which must already exist */ public List<DAG> getAll(Set<TreeId> ids) { List<DAG> all = new ArrayList<>(); Set<TreeId> missing = new HashSet<>(); for (TreeId id : ids) { DAG dag; if (ROOT_ID.equals(id)) { dag = ClusteringStrategy.this.root; } else { dag = treeBuff.get(id); } if (dag == null) { missing.add(id); } else { all.add(dag); } } List<DAG> uncached = store.getTrees(missing); all.addAll(uncached); return all; }
/** * Returns all the DAG's for the argument ids, which must already exist */ public List<DAG> getAll(Set<TreeId> ids) { List<DAG> all = new ArrayList<>(); Set<TreeId> missing = new HashSet<>(); for (TreeId id : ids) { DAG dag; if (ROOT_ID.equals(id)) { dag = ClusteringStrategy.this.root; } else { dag = treeBuff.get(id); } if (dag == null) { missing.add(id); } else { all.add(dag); } } List<DAG> uncached = store.getTrees(missing); all.addAll(uncached); return all; }
private void shrinkIfUnderflow(final DAG dag) { if (dag.numBuckets() == 0) { return; } final long childCount = dag.getTotalChildCount(); // TODO: in the case of quadtrees would need to check if it's an unpromotables bucket and // use canonical's normalized size limit instead? final int depth = dag.getId().depthLength(); final int normalizedSizeLimit = normalizedSizeLimit(depth); if (childCount > normalizedSizeLimit) { return; } Set<NodeId> childrenRecursive = getChildrenRecursiveAndClearBuckets(dag); int collectedSize = childrenRecursive.size(); if (dag.getId().equals(failingDag)) { System.err.printf("expected: %d, collected: %d\n", childCount, collectedSize); } if (collectedSize != childCount) { throw new IllegalStateException(String.format("expected %s, got %s, at: %s", childCount, childrenRecursive.size(), dag)); } dag.clearBuckets(); childrenRecursive.forEach((id) -> dag.addChild(id)); }