/** * Get a map of collection name vs DocCollection objects * * Implementation note: This method resolves the collection reference by calling * {@link CollectionRef#get()} which can make a call to ZooKeeper. This is necessary * because the semantics of how collection list is loaded have changed in SOLR-6629. * Please see javadocs in {@link ZkStateReader#refreshCollectionList(Watcher)} * * @return a map of collection name vs DocCollection object */ public Map<String, DocCollection> getCollectionsMap() { Map<String, DocCollection> result = new HashMap<>(collectionStates.size()); for (Entry<String, CollectionRef> entry : collectionStates.entrySet()) { DocCollection collection = entry.getValue().get(); if (collection != null) { result.put(entry.getKey(), collection); } } return result; }
@Override public void write(JSONWriter jsonWriter) { LinkedHashMap<String , DocCollection> map = new LinkedHashMap<>(); for (Entry<String, CollectionRef> e : collectionStates.entrySet()) { // using this class check to avoid fetching from ZK in case of lazily loaded collection if (e.getValue().getClass() == CollectionRef.class) { // check if it is a lazily loaded collection outside of clusterstate.json DocCollection coll = e.getValue().get(); if (coll.getStateFormat() == 1) { map.put(coll.getName(),coll); } } } jsonWriter.write(map); }
@Override public void write(JSONWriter jsonWriter) { LinkedHashMap<String , DocCollection> map = new LinkedHashMap<>(); for (Entry<String, CollectionRef> e : collectionStates.entrySet()) { // using this class check to avoid fetching from ZK in case of lazily loaded collection if (e.getValue().getClass() == CollectionRef.class) { // check if it is a lazily loaded collection outside of clusterstate.json DocCollection coll = e.getValue().get(); if (coll.getStateFormat() == 1) { map.put(coll.getName(),coll); } } } jsonWriter.write(map); }
.get(coll).get(); updateWatchedCollection(newState);
if (ref.get() != null) { return;
public static DocCollection getCollectionLive(ZkStateReader zkStateReader, String coll) { String collectionPath = getCollectionPath(coll); try { Stat stat = new Stat(); byte[] data = zkStateReader.getZkClient().getData(collectionPath, null, stat, true); ClusterState state = ClusterState.load(stat.getVersion(), data, Collections.<String> emptySet(), collectionPath); ClusterState.CollectionRef collectionRef = state.getCollectionStates().get(coll); return collectionRef == null ? null : collectionRef.get(); } catch (KeeperException.NoNodeException e) { log.warn("No node available : " + collectionPath, e); return null; } catch (KeeperException e) { throw new SolrException(ErrorCode.BAD_REQUEST, "Could not load collection from ZK:" + coll, e); } catch (InterruptedException e) { Thread.currentThread().interrupt(); throw new SolrException(ErrorCode.BAD_REQUEST, "Could not load collection from ZK:" + coll, e); } }
public String getShardId(String collectionName, String nodeName, String coreName) { Collection<CollectionRef> states = collectionStates.values(); if (collectionName != null) { CollectionRef c = collectionStates.get(collectionName); if (c != null) states = Collections.singletonList( c ); } for (CollectionRef ref : states) { DocCollection coll = ref.get(); if(coll == null) continue;// this collection go tremoved in between, skip for (Slice slice : coll.getSlices()) { for (Replica replica : slice.getReplicas()) { // TODO: for really large clusters, we could 'index' on this String rnodeName = replica.getStr(ZkStateReader.NODE_NAME_PROP); String rcore = replica.getStr(ZkStateReader.CORE_NAME_PROP); if (nodeName.equals(rnodeName) && coreName.equals(rcore)) { return slice.getName(); } } } } return null; }
private DocCollection fetchCollectionState(String coll, Watcher watcher) throws KeeperException, InterruptedException { String collectionPath = getCollectionPath(coll); while (true) { try { Stat stat = new Stat(); byte[] data = zkClient.getData(collectionPath, watcher, stat, true); ClusterState state = ClusterState.load(stat.getVersion(), data, Collections.<String>emptySet(), collectionPath); ClusterState.CollectionRef collectionRef = state.getCollectionStates().get(coll); return collectionRef == null ? null : collectionRef.get(); } catch (KeeperException.NoNodeException e) { if (watcher != null) { // Leave an exists watch in place in case a state.json is created later. Stat exists = zkClient.exists(collectionPath, watcher, true); if (exists != null) { // Rare race condition, we tried to fetch the data and couldn't find it, then we found it exists. // Loop and try again. continue; } } return null; } } }
public String getShardId(String collectionName, String nodeName, String coreName) { Collection<CollectionRef> states = collectionStates.values(); if (collectionName != null) { CollectionRef c = collectionStates.get(collectionName); if (c != null) states = Collections.singletonList( c ); } for (CollectionRef ref : states) { DocCollection coll = ref.get(); if(coll == null) continue;// this collection go tremoved in between, skip for (Slice slice : coll.getSlices()) { for (Replica replica : slice.getReplicas()) { // TODO: for really large clusters, we could 'index' on this String rnodeName = replica.getStr(ZkStateReader.NODE_NAME_PROP); String rcore = replica.getStr(ZkStateReader.CORE_NAME_PROP); if (nodeName.equals(rnodeName) && coreName.equals(rcore)) { return slice.getName(); } } } } return null; }
protected void readReplicaDetails() throws IOException { ClusterStateProvider clusterStateProvider = getClusterStateProvider(); ClusterState clusterState = clusterStateProvider.getClusterState(); if (clusterState == null) { // zkStateReader still initializing return; } Map<String, ClusterState.CollectionRef> all = clusterStateProvider.getClusterState().getCollectionStates(); all.forEach((collName, ref) -> { DocCollection coll = ref.get(); if (coll == null) return; if (coll.getProperties().get(CollectionAdminParams.WITH_COLLECTION) != null) { withCollectionsMap.put(coll.getName(), (String) coll.getProperties().get(CollectionAdminParams.WITH_COLLECTION)); } coll.forEachReplica((shard, replica) -> { Map<String, Map<String, List<ReplicaInfo>>> nodeData = nodeVsCollectionVsShardVsReplicaInfo.computeIfAbsent(replica.getNodeName(), k -> new HashMap<>()); Map<String, List<ReplicaInfo>> collData = nodeData.computeIfAbsent(collName, k -> new HashMap<>()); List<ReplicaInfo> replicas = collData.computeIfAbsent(shard, k -> new ArrayList<>()); replicas.add(new ReplicaInfo(collName, shard, replica, new HashMap<>(replica.getProperties()))); }); }); }
/** * Returns the corresponding {@link DocCollection} object for the given collection name * if such a collection exists. Returns null otherwise. * * @param collectionName Name of the collection * @param allowCached allow LazyCollectionRefs to use a time-based cached value * * Implementation note: This method resolves the collection reference by calling * {@link CollectionRef#get()} which may make a call to ZooKeeper. This is necessary * because the semantics of how collection list is loaded have changed in SOLR-6629. * Please see javadocs in {@link ZkStateReader#refreshCollectionList(Watcher)} */ public DocCollection getCollectionOrNull(String collectionName, boolean allowCached) { CollectionRef ref = collectionStates.get(collectionName); return ref == null ? null : ref.get(allowCached); }
public void forEachCollection(Consumer<DocCollection> consumer) { collectionStates.forEach((s, collectionRef) -> { try { consumer.accept(collectionRef.get()); } catch (SolrException e) { if (e.getCause() instanceof KeeperException.NoNodeException) { //don't do anything. This collection does not exist } else{ throw e; } } }); } public static class CollectionRef {
public DocCollection getCollectionOrNull(String coll) { CollectionRef ref = collectionStates.get(coll); return ref == null? null:ref.get(); }
/** Return the DocCollection, always refetching if lazy. Equivalent to get(false) * @return The collection state modeled in zookeeper */ public DocCollection get(){ return get(false); }