public String getLeaderUrl(String collection, String shard, int timeout) throws InterruptedException, KeeperException { ZkCoreNodeProps props = new ZkCoreNodeProps(getLeaderRetry(collection, shard, timeout)); return props.getCoreUrl(); }
public String getLeaderUrl(String collection, String shard, int timeout) throws InterruptedException { ZkCoreNodeProps props = new ZkCoreNodeProps(getLeaderRetry(collection, shard, timeout)); return props.getCoreUrl(); }
public List<List<String>> extractShardUrls(String zkHost, String collection) { DocCollection docCollection = extractDocCollection(zkHost, collection); List<Slice> slices = getSortedSlices(docCollection.getSlices()); List<List<String>> solrUrls = new ArrayList<List<String>>(slices.size()); for (Slice slice : slices) { if (slice.getLeader() == null) { throw new IllegalArgumentException("Cannot find SolrCloud slice leader. " + "It looks like not all of your shards are registered in ZooKeeper yet"); } Collection<Replica> replicas = slice.getReplicas(); List<String> urls = new ArrayList<String>(replicas.size()); for (Replica replica : replicas) { ZkCoreNodeProps props = new ZkCoreNodeProps(replica); urls.add(props.getCoreUrl()); } solrUrls.add(urls); } return solrUrls; }
protected List<String> getShardUrls() throws IOException { try { ZkStateReader zkStateReader = cloudSolrClient.getZkStateReader(); Slice[] slices = CloudSolrStream.getSlices(this.collection, zkStateReader, false); ClusterState clusterState = zkStateReader.getClusterState(); Set<String> liveNodes = clusterState.getLiveNodes(); List<String> baseUrls = new ArrayList<>(); for(Slice slice : slices) { Collection<Replica> replicas = slice.getReplicas(); List<Replica> shuffler = new ArrayList<>(); for(Replica replica : replicas) { if(replica.getState() == Replica.State.ACTIVE && liveNodes.contains(replica.getNodeName())) { shuffler.add(replica); } } Collections.shuffle(shuffler, new Random()); Replica rep = shuffler.get(0); ZkCoreNodeProps zkProps = new ZkCoreNodeProps(rep); String url = zkProps.getCoreUrl(); baseUrls.add(url); } return baseUrls; } catch (Exception e) { throw new IOException(e); } }
/** * Retrieve all requests recorded by this queue which were sent to given collection and shard * * @param zkStateReader the {@link org.apache.solr.common.cloud.ZkStateReader} from which cluster state is read * @param collectionName the given collection name for which requests have to be extracted * @param shardId the given shard name for which requests have to be extracted * @return a list of {@link org.apache.solr.handler.component.TrackingShardHandlerFactory.ShardRequestAndParams} * or empty list if none are found */ public List<ShardRequestAndParams> getShardRequests(ZkStateReader zkStateReader, String collectionName, String shardId) { DocCollection collection = zkStateReader.getClusterState().getCollection(collectionName); assert collection != null; Slice slice = collection.getSlice(shardId); assert slice != null; for (Map.Entry<String, List<ShardRequestAndParams>> entry : requests.entrySet()) { // multiple shard addresses may be present separated by '|' List<String> list = StrUtils.splitSmart(entry.getKey(), '|'); for (Map.Entry<String, Replica> replica : slice.getReplicasMap().entrySet()) { String coreUrl = new ZkCoreNodeProps(replica.getValue()).getCoreUrl(); if (list.contains(coreUrl)) { return new ArrayList<>(entry.getValue()); } } } return Collections.emptyList(); }
params.put("objectSerialize", objectSerialize); Replica rep = shuffler.get(w); ZkCoreNodeProps zkProps = new ZkCoreNodeProps(rep); String url = zkProps.getCoreUrl(); SolrStream solrStream = new SolrStream(url, params);
ZkCoreNodeProps zkProps = new ZkCoreNodeProps(leader); String url = zkProps.getCoreUrl(); urls.add(url); if (!replica.getNodeName().equals(leader.getNodeName()) && !replica.getName().equals(leader.getName())) { ZkCoreNodeProps zkProps1 = new ZkCoreNodeProps(replica); String url1 = zkProps1.getCoreUrl(); urls.add(url1);
private List<String> getShardUrls() throws IOException { try { ZkStateReader zkStateReader = cloudSolrClient.getZkStateReader(); Slice[] slices = CloudSolrStream.getSlices(this.collection, zkStateReader, false); ClusterState clusterState = zkStateReader.getClusterState(); Set<String> liveNodes = clusterState.getLiveNodes(); List<String> baseUrls = new ArrayList<>(); for(Slice slice : slices) { Collection<Replica> replicas = slice.getReplicas(); List<Replica> shuffler = new ArrayList<>(); for(Replica replica : replicas) { if(replica.getState() == Replica.State.ACTIVE && liveNodes.contains(replica.getNodeName())) { shuffler.add(replica); } } Collections.shuffle(shuffler, new Random()); Replica rep = shuffler.get(0); ZkCoreNodeProps zkProps = new ZkCoreNodeProps(rep); String url = zkProps.getCoreUrl(); baseUrls.add(url); } return baseUrls; } catch (Exception e) { throw new IOException(e); } }
public List<List<String>> extractShardUrls(String zkHost, String collection) { DocCollection docCollection = extractDocCollection(zkHost, collection); List<Slice> slices = getSortedSlices(docCollection.getSlices()); List<List<String>> solrUrls = new ArrayList<List<String>>(slices.size()); for (Slice slice : slices) { if (slice.getLeader() == null) { throw new IllegalArgumentException("Cannot find SolrCloud slice leader. " + "It looks like not all of your shards are registered in ZooKeeper yet"); } Collection<Replica> replicas = slice.getReplicas(); List<String> urls = new ArrayList<String>(replicas.size()); for (Replica replica : replicas) { ZkCoreNodeProps props = new ZkCoreNodeProps(replica); if (replica.getStr(Slice.LEADER) == null) { urls.add(props.getCoreUrl()); // add followers at tail } else { urls.add(0, props.getCoreUrl()); // insert leader at head } } solrUrls.add(urls); } return solrUrls; }
protected void constructStreams() throws IOException { try { ZkStateReader zkStateReader = cloudSolrClient.getZkStateReader(); ClusterState clusterState = zkStateReader.getClusterState(); //System.out.println("Connected to zk an got cluster state."); Collection<Slice> slices = clusterState.getActiveSlices(this.collection); long time = System.currentTimeMillis(); params.put("distrib","false"); // We are the aggregator. for(Slice slice : slices) { Collection<Replica> replicas = slice.getReplicas(); List<Replica> shuffler = new ArrayList(); for(Replica replica : replicas) { shuffler.add(replica); } Collections.shuffle(shuffler, new Random(time)); Replica rep = shuffler.get(0); ZkCoreNodeProps zkProps = new ZkCoreNodeProps(rep); String url = zkProps.getCoreUrl(); SolrStream solrStream = new SolrStream(url, params); if(streamContext != null) { solrStream.setStreamContext(streamContext); } solrStream.setFieldMappings(this.fieldMappings); solrStreams.add(solrStream); } } catch (Exception e) { throw new IOException(e); } }
List<ZkCoreNodeProps> nodes = new ArrayList<>(shardMap.size()); for (Entry<String,Replica> entry : shardMap.entrySet().stream().filter((e)->acceptReplicaType.contains(e.getValue().getType())).collect(Collectors.toList())) { ZkCoreNodeProps nodeProps = new ZkCoreNodeProps(entry.getValue());
protected ZkCoreNodeProps getLeaderUrlFromZk(String collection, String slice) { ClusterState clusterState = getCommonCloudSolrClient().getZkStateReader().getClusterState(); final DocCollection docCollection = clusterState.getCollectionOrNull(collection); if (docCollection != null && docCollection.getLeader(slice) != null) { return new ZkCoreNodeProps(docCollection.getLeader(slice)); } throw new RuntimeException("Could not find leader:" + collection + " " + slice); }
Replica leader = slice.getLeader(); if (leader != null) { ZkCoreNodeProps zkProps = new ZkCoreNodeProps(leader); String leaderUrl = zkProps.getBaseUrl() + "/" + zkProps.getCoreName(); leaders.put(leaderUrl, slice.getName());
Replica leader = slice.getLeader(); if (leader != null) { ZkCoreNodeProps zkProps = new ZkCoreNodeProps(leader); String leaderUrl = zkProps.getBaseUrl() + "/" + zkProps.getCoreName(); leaders.put(leaderUrl, slice.getName());
ZkCoreNodeProps zkProps = new ZkCoreNodeProps(rep); String url = zkProps.getCoreUrl(); shards.add(url);
protected SolrStream constructStream(String sql) throws IOException { try { ZkStateReader zkStateReader = this.connection.getClient().getZkStateReader(); Slice[] slices = CloudSolrStream.getSlices(this.connection.getCollection(), zkStateReader, true); List<Replica> shuffler = new ArrayList<>(); for(Slice slice : slices) { Collection<Replica> replicas = slice.getReplicas(); for (Replica replica : replicas) { shuffler.add(replica); } } Collections.shuffle(shuffler, new Random()); ModifiableSolrParams params = new ModifiableSolrParams(); params.set(CommonParams.QT, "/sql"); params.set("stmt", sql); for(String propertyName : this.connection.getProperties().stringPropertyNames()) { params.set(propertyName, this.connection.getProperties().getProperty(propertyName)); } Replica rep = shuffler.get(0); ZkCoreNodeProps zkProps = new ZkCoreNodeProps(rep); String url = zkProps.getCoreUrl(); return new SolrStream(url, params); } catch (Exception e) { throw new IOException(e); } }
List<ZkCoreNodeProps> nodes = new ArrayList<>(shardMap.size()); for (Entry<String,Replica> entry : shardMap.entrySet()) { ZkCoreNodeProps nodeProps = new ZkCoreNodeProps(entry.getValue());
protected boolean reloadCollection(Replica replica, String testCollectionName) throws Exception { ZkCoreNodeProps coreProps = new ZkCoreNodeProps(replica); String coreName = coreProps.getCoreName(); boolean reloadedOk = false;
ZkCoreNodeProps zkProps = new ZkCoreNodeProps(leader); String url = zkProps.getCoreUrl(); urls.add(url); if (!replica.getNodeName().equals(leader.getNodeName()) && !replica.getName().equals(leader.getName())) { ZkCoreNodeProps zkProps1 = new ZkCoreNodeProps(replica); String url1 = zkProps1.getCoreUrl(); urls.add(url1);
ZkCoreNodeProps zkProps = new ZkCoreNodeProps(rep); String url = zkProps.getCoreUrl(); SolrStream solrStream = new SolrStream(url, localParams);