result.putIfAbsent(entry.getKey(), new ClusterState.CollectionRef(entry.getValue()));
/** * Get a map of collection name vs DocCollection objects * * Implementation note: This method resolves the collection reference by calling * {@link CollectionRef#get()} which can make a call to ZooKeeper. This is necessary * because the semantics of how collection list is loaded have changed in SOLR-6629. * Please see javadocs in {@link ZkStateReader#refreshCollectionList(Watcher)} * * @return a map of collection name vs DocCollection object */ public Map<String, DocCollection> getCollectionsMap() { Map<String, DocCollection> result = new HashMap<>(collectionStates.size()); for (Entry<String, CollectionRef> entry : collectionStates.entrySet()) { DocCollection collection = entry.getValue().get(); if (collection != null) { result.put(entry.getKey(), collection); } } return result; }
.get(coll).get(); updateWatchedCollection(newState);
if (!ref.isLazilyLoaded()) { return ref.get(); col = ref.get();//this is a call to ZK
updateWatchedCollection(live); result.put(s, new ClusterState.CollectionRef(live)); result.put(s, new ClusterState.CollectionRef(null) { @Override public DocCollection get() {
if (!ref.isLazilyLoaded()) { return ref.get(); fetchedCol = ref.get();//this is a call to ZK if (fetchedCol == null) return null;// this collection no more exists if (col != null && fetchedCol.getZNodeVersion() == col.getZNodeVersion()) {
} else if (ref.isLazilyLoaded()) { log.debug("Refreshing lazily-loaded state for collection {}", collection); if (ref.get() != null) { return;
public static DocCollection getCollectionLive(ZkStateReader zkStateReader, String coll) { String collectionPath = getCollectionPath(coll); try { Stat stat = new Stat(); byte[] data = zkStateReader.getZkClient().getData(collectionPath, null, stat, true); ClusterState state = ClusterState.load(stat.getVersion(), data, Collections.<String> emptySet(), collectionPath); ClusterState.CollectionRef collectionRef = state.getCollectionStates().get(coll); return collectionRef == null ? null : collectionRef.get(); } catch (KeeperException.NoNodeException e) { log.warn("No node available : " + collectionPath, e); return null; } catch (KeeperException e) { throw new SolrException(ErrorCode.BAD_REQUEST, "Could not load collection from ZK:" + coll, e); } catch (InterruptedException e) { Thread.currentThread().interrupt(); throw new SolrException(ErrorCode.BAD_REQUEST, "Could not load collection from ZK:" + coll, e); } }
public String getShardId(String collectionName, String nodeName, String coreName) { Collection<CollectionRef> states = collectionStates.values(); if (collectionName != null) { CollectionRef c = collectionStates.get(collectionName); if (c != null) states = Collections.singletonList( c ); } for (CollectionRef ref : states) { DocCollection coll = ref.get(); if(coll == null) continue;// this collection go tremoved in between, skip for (Slice slice : coll.getSlices()) { for (Replica replica : slice.getReplicas()) { // TODO: for really large clusters, we could 'index' on this String rnodeName = replica.getStr(ZkStateReader.NODE_NAME_PROP); String rcore = replica.getStr(ZkStateReader.CORE_NAME_PROP); if (nodeName.equals(rnodeName) && coreName.equals(rcore)) { return slice.getName(); } } } } return null; }
public String getShardId(String collectionName, String nodeName, String coreName) { Collection<CollectionRef> states = collectionStates.values(); if (collectionName != null) { CollectionRef c = collectionStates.get(collectionName); if (c != null) states = Collections.singletonList( c ); } for (CollectionRef ref : states) { DocCollection coll = ref.get(); if(coll == null) continue;// this collection go tremoved in between, skip for (Slice slice : coll.getSlices()) { for (Replica replica : slice.getReplicas()) { // TODO: for really large clusters, we could 'index' on this String rnodeName = replica.getStr(ZkStateReader.NODE_NAME_PROP); String rcore = replica.getStr(ZkStateReader.CORE_NAME_PROP); if (nodeName.equals(rnodeName) && coreName.equals(rcore)) { return slice.getName(); } } } } return null; }
private DocCollection fetchCollectionState(String coll, Watcher watcher) throws KeeperException, InterruptedException { String collectionPath = getCollectionPath(coll); while (true) { try { Stat stat = new Stat(); byte[] data = zkClient.getData(collectionPath, watcher, stat, true); ClusterState state = ClusterState.load(stat.getVersion(), data, Collections.<String>emptySet(), collectionPath); ClusterState.CollectionRef collectionRef = state.getCollectionStates().get(coll); return collectionRef == null ? null : collectionRef.get(); } catch (KeeperException.NoNodeException e) { if (watcher != null) { // Leave an exists watch in place in case a state.json is created later. Stat exists = zkClient.exists(collectionPath, watcher, true); if (exists != null) { // Rare race condition, we tried to fetch the data and couldn't find it, then we found it exists. // Loop and try again. continue; } } return null; } } }
/** * Create ClusterState from json string that is typically stored in zookeeper. * * @param version zk version of the clusterstate.json file (bytes) * @param bytes clusterstate.json as a byte array * @param liveNodes list of live nodes * @return the ClusterState */ public static ClusterState load(Integer version, byte[] bytes, Set<String> liveNodes, String znode) { // System.out.println("######## ClusterState.load:" + (bytes==null ? null : new String(bytes))); if (bytes == null || bytes.length == 0) { return new ClusterState(version, liveNodes, Collections.<String, DocCollection>emptyMap()); } Map<String, Object> stateMap = (Map<String, Object>) Utils.fromJSON(bytes); Map<String,CollectionRef> collections = new LinkedHashMap<>(stateMap.size()); for (Entry<String, Object> entry : stateMap.entrySet()) { String collectionName = entry.getKey(); DocCollection coll = collectionFromObjects(collectionName, (Map<String,Object>)entry.getValue(), version, znode); collections.put(collectionName, new CollectionRef(coll)); } return new ClusterState( liveNodes, collections,version); }
protected void readReplicaDetails() throws IOException { ClusterStateProvider clusterStateProvider = getClusterStateProvider(); ClusterState clusterState = clusterStateProvider.getClusterState(); if (clusterState == null) { // zkStateReader still initializing return; } Map<String, ClusterState.CollectionRef> all = clusterStateProvider.getClusterState().getCollectionStates(); all.forEach((collName, ref) -> { DocCollection coll = ref.get(); if (coll == null) return; if (coll.getProperties().get(CollectionAdminParams.WITH_COLLECTION) != null) { withCollectionsMap.put(coll.getName(), (String) coll.getProperties().get(CollectionAdminParams.WITH_COLLECTION)); } coll.forEachReplica((shard, replica) -> { Map<String, Map<String, List<ReplicaInfo>>> nodeData = nodeVsCollectionVsShardVsReplicaInfo.computeIfAbsent(replica.getNodeName(), k -> new HashMap<>()); Map<String, List<ReplicaInfo>> collData = nodeData.computeIfAbsent(collName, k -> new HashMap<>()); List<ReplicaInfo> replicas = collData.computeIfAbsent(shard, k -> new ArrayList<>()); replicas.add(new ReplicaInfo(collName, shard, replica, new HashMap<>(replica.getProperties()))); }); }); }
@Override public void write(JSONWriter jsonWriter) { LinkedHashMap<String , DocCollection> map = new LinkedHashMap<>(); for (Entry<String, CollectionRef> e : collectionStates.entrySet()) { // using this class check to avoid fetching from ZK in case of lazily loaded collection if (e.getValue().getClass() == CollectionRef.class) { // check if it is a lazily loaded collection outside of clusterstate.json DocCollection coll = e.getValue().get(); if (coll.getStateFormat() == 1) { map.put(coll.getName(),coll); } } } jsonWriter.write(map); }
/** * Returns the corresponding {@link DocCollection} object for the given collection name * if such a collection exists. Returns null otherwise. * * @param collectionName Name of the collection * @param allowCached allow LazyCollectionRefs to use a time-based cached value * * Implementation note: This method resolves the collection reference by calling * {@link CollectionRef#get()} which may make a call to ZooKeeper. This is necessary * because the semantics of how collection list is loaded have changed in SOLR-6629. * Please see javadocs in {@link ZkStateReader#refreshCollectionList(Watcher)} */ public DocCollection getCollectionOrNull(String collectionName, boolean allowCached) { CollectionRef ref = collectionStates.get(collectionName); return ref == null ? null : ref.get(allowCached); }
/** * Returns a new cluster state object modified with the given collection. * * @param collectionName the name of the modified (or deleted) collection * @param collection the collection object. A null value deletes the collection from the state * @return the updated cluster state which preserves the current live nodes and zk node version */ public ClusterState copyWith(String collectionName, DocCollection collection) { ClusterState result = new ClusterState(liveNodes, new LinkedHashMap<>(collectionStates), znodeVersion); if (collection == null) { result.collectionStates.remove(collectionName); } else { result.collectionStates.put(collectionName, new CollectionRef(collection)); } return result; }
/** * Returns a new cluster state object modified with the given collection. * * @param collectionName the name of the modified (or deleted) collection * @param collection the collection object. A null value deletes the collection from the state * @return the updated cluster state which preserves the current live nodes and zk node version */ public ClusterState copyWith(String collectionName, DocCollection collection) { ClusterState result = new ClusterState(liveNodes, new LinkedHashMap<>(collectionStates), znodeVersion); if (collection == null) { result.collectionStates.remove(collectionName); } else { result.collectionStates.put(collectionName, new CollectionRef(collection)); } return result; }
@Override public void write(JSONWriter jsonWriter) { LinkedHashMap<String , DocCollection> map = new LinkedHashMap<>(); for (Entry<String, CollectionRef> e : collectionStates.entrySet()) { // using this class check to avoid fetching from ZK in case of lazily loaded collection if (e.getValue().getClass() == CollectionRef.class) { // check if it is a lazily loaded collection outside of clusterstate.json DocCollection coll = e.getValue().get(); if (coll.getStateFormat() == 1) { map.put(coll.getName(),coll); } } } jsonWriter.write(map); }
public void forEachCollection(Consumer<DocCollection> consumer) { collectionStates.forEach((s, collectionRef) -> { try { consumer.accept(collectionRef.get()); } catch (SolrException e) { if (e.getCause() instanceof KeeperException.NoNodeException) { //don't do anything. This collection does not exist } else{ throw e; } } }); } public static class CollectionRef {