Refine search
final Integer zkConnectionTimeout = context.getProperty(ZK_CONNECTION_TIMEOUT).asTimePeriod(TimeUnit.MILLISECONDS).intValue(); CloudSolrClient cloudSolrClient = new CloudSolrClient(solrLocation, httpClient); cloudSolrClient.setDefaultCollection(collection); cloudSolrClient.setZkClientTimeout(zkClientTimeout); cloudSolrClient.setZkConnectTimeout(zkConnectionTimeout); return cloudSolrClient;
/** * Checks if the collection has already been created in Solr. */ private static boolean checkIfCollectionExists(CloudSolrClient server, String collection) throws KeeperException, InterruptedException { ZkStateReader zkStateReader = server.getZkStateReader(); zkStateReader.updateClusterState(true); ClusterState clusterState = zkStateReader.getClusterState(); return clusterState.getCollectionOrNull(collection) != null; }
public SolrIndex(final Configuration config) throws BackendException { Preconditions.checkArgument(config!=null); configuration = config; mode = Mode.parse(config.get(SOLR_MODE)); dynFields = config.get(DYNAMIC_FIELDS); keyFieldIds = parseKeyFieldsForCollections(config); maxResults = config.get(INDEX_MAX_RESULT_SET_SIZE); ttlField = config.get(TTL_FIELD); waitSearcher = config.get(WAIT_SEARCHER); if (mode==Mode.CLOUD) { String zookeeperUrl = config.get(SolrIndex.ZOOKEEPER_URL); CloudSolrClient cloudServer = new CloudSolrClient(zookeeperUrl, true); cloudServer.connect(); solrClient = cloudServer; } else if (mode==Mode.HTTP) { HttpClient clientParams = HttpClientUtil.createClient(new ModifiableSolrParams() {{ add(HttpClientUtil.PROP_ALLOW_COMPRESSION, config.get(HTTP_ALLOW_COMPRESSION).toString()); add(HttpClientUtil.PROP_CONNECTION_TIMEOUT, config.get(HTTP_CONNECTION_TIMEOUT).toString()); add(HttpClientUtil.PROP_MAX_CONNECTIONS_PER_HOST, config.get(HTTP_MAX_CONNECTIONS_PER_HOST).toString()); add(HttpClientUtil.PROP_MAX_CONNECTIONS, config.get(HTTP_GLOBAL_MAX_CONNECTIONS).toString()); }}); solrClient = new LBHttpSolrClient(clientParams, config.get(HTTP_URLS)); } else { throw new IllegalArgumentException("Unsupported Solr operation mode: " + mode); } }
if (server != null && CloudSolrClient.class.isAssignableFrom(server.getClass())) { CloudSolrClient cs = (CloudSolrClient) server; if (StringUtils.isBlank(cs.getDefaultCollection())) { cs.setDefaultCollection(getPrimaryName()); if (Objects.equals(cs.getDefaultCollection(), ((CloudSolrClient) reindexServer).getDefaultCollection())) { throw new IllegalStateException("Primary and Reindex servers cannot have the same defaultCollection: " + cs.getDefaultCollection());
CloudSolrClient reindexCloudClient = (CloudSolrClient) solrConfiguration.getReindexServer(); try { primaryCloudClient.connect(); Aliases aliases = primaryCloudClient.getZkStateReader().getAliases(); Map<String, String> aliasCollectionMap = aliases.getCollectionAliasMap(); if (aliasCollectionMap == null || !aliasCollectionMap.containsKey(primaryCloudClient.getDefaultCollection()) || !aliasCollectionMap.containsKey(reindexCloudClient.getDefaultCollection())) { throw new IllegalStateException("Could not determine the PRIMARY or REINDEX " + "collection or collections from the Solr aliases."); String primaryCollectionName = aliasCollectionMap.get(primaryCloudClient.getDefaultCollection()); String reindexCollectionName = aliasCollectionMap.get(reindexCloudClient.getDefaultCollection()); new CollectionAdminRequest.CreateAlias().setAliasName(primaryCloudClient.getDefaultCollection()) .setAliasedCollections(reindexCollectionName).process(primaryCloudClient); new CollectionAdminRequest.CreateAlias().setAliasName(reindexCloudClient.getDefaultCollection()) .setAliasedCollections(primaryCollectionName).process(reindexCloudClient); } catch (Exception e) {
private SolrClient initializeWithCloudSolrServer() throws IOException { CloudSolrClient cloudSolrServer = new CloudSolrClient(remoteSolrServerConfiguration.getSolrZkHost()); cloudSolrServer.setZkConnectTimeout(remoteSolrServerConfiguration.getConnectionTimeout()); cloudSolrServer.setZkClientTimeout(remoteSolrServerConfiguration.getSocketTimeout()); cloudSolrServer.setIdField(OakSolrConfigurationDefaults.PATH_FIELD_NAME); cloudSolrServer.setDefaultCollection("collection1"); // workaround for first request when the needed collection may not exist cloudSolrServer.setDefaultCollection(remoteSolrServerConfiguration.getSolrCollection()); while (i < 3) { try { SolrPingResponse ping = cloudSolrServer.ping(); if (ping != null && 0 == ping.getStatus()) { return cloudSolrServer; } else { cloudSolrServer.close(); throw new IOException("the found SolrCloud server is not alive"); cloudSolrServer.close(); throw new IOException("the found SolrCloud server is not alive"); } else { cloudSolrServer.close(); throw new IOException("could not connect to Zookeeper hosted at " + remoteSolrServerConfiguration.getSolrZkHost());
public static SolrClient createCloudSolrClient(Map<String, String> connectionParameters, int zkSessionTimeout) { String solrZk = connectionParameters.get(SolrConnectionParams.ZOOKEEPER); CloudSolrClient solr = new CloudSolrClient.Builder().withZkHost(solrZk).build(); solr.setZkClientTimeout(zkSessionTimeout); solr.setZkConnectTimeout(zkSessionTimeout); String collection = connectionParameters.get(SolrConnectionParams.COLLECTION); solr.setDefaultCollection(collection); return solr; }
@Override public void init(Configuration c) throws WriterException { String serverName = c.get("solr.cloud.address"); if (serverName == null) { throw new WriterException("Solr server address is not defined."); } String collection = c.get("solr.collection", "logs"); if(client == null) { client = new CloudSolrClient(serverName); client.setDefaultCollection(collection); } }
protected static SolrClient getSolrClient() { String zkHostString = "127.0.0.1:9983"; // zkHostString for Solr gettingstarted example return new CloudSolrClient(zkHostString); }
public static SolrClient createCloudSolrClient(Map<String, String> connectionParameters, String uniqueKeyField, int zkSessionTimeout) { String solrZk = connectionParameters.get(SolrConnectionParams.ZOOKEEPER); CloudSolrClient solr = new CloudSolrClient.Builder().withZkHost(solrZk).build(); solr.setZkClientTimeout(zkSessionTimeout); solr.setZkConnectTimeout(zkSessionTimeout); String collection = connectionParameters.get(SolrConnectionParams.COLLECTION); solr.setDefaultCollection(collection); solr.setIdField(uniqueKeyField); return solr; }
HttpClientUtil.setConfigurer(new Krb5HttpClientConfigurer()); String zookeeperUrl = config.get(Solr5Index.ZOOKEEPER_URL); CloudSolrClient cloudServer = new CloudSolrClient(zookeeperUrl, true); cloudServer.setZkConnectTimeout(config.get(ZOOKEEPER_CONNECT_TIMEOUT)); cloudServer.setZkClientTimeout(config.get(ZOOKEEPER_SESSION_TIMEOUT)); cloudServer.connect(); solrClient = cloudServer; } else if (mode==Mode.HTTP) {
private void getCheckpoints() throws IOException { this.checkpoints = new HashMap<>(); ZkStateReader zkStateReader = cloudSolrClient.getZkStateReader(); Slice[] slices = CloudSolrStream.getSlices(this.collection, zkStateReader, false); ClusterState clusterState = zkStateReader.getClusterState(); Set<String> liveNodes = clusterState.getLiveNodes(); for(Slice slice : slices) { String sliceName = slice.getName(); long checkpoint; if(initialCheckpoint > -1) { checkpoint = initialCheckpoint; } else { checkpoint = getCheckpoint(slice, liveNodes); } this.checkpoints.put(sliceName, checkpoint); } }
final CloudSolrClient server = new CloudSolrClient("localhost:2181"); try { //probably this is the line that missed from your code... server.connect(); final ClusterState clusterState = server.getZkStateReader().getClusterState(); final DocCollection collection = clusterState.getCollection("collection1"); //EVRIKA! collection object // and get the leader of the collection...pretty easy. Replica leader = clusterState.getLeader("collection1", "shard1"); } catch (Exception e) { // do your stuff } finally { server.close(); }
ZkStateReader zkStateReader = server.getZkStateReader(); try { boolean cont = true; zkStateReader.updateClusterState(true); ClusterState clusterState = zkStateReader.getClusterState(); Map<String, Slice> slices = clusterState.getSlicesMap(collection); Preconditions.checkNotNull("Could not find collection:" + collection, slices); String state = shard.getValue().getStr(ZkStateReader.STATE_PROP); if ((state.equals(Replica.State.RECOVERING) || state.equals(Replica.State.DOWN)) && clusterState.liveNodesContain(shard.getValue().getStr( ZkStateReader.NODE_NAME_PROP))) { sawLiveRecovering = true;
@Override public void clearStorage() throws BackendException { try { if (mode!=Mode.CLOUD) throw new UnsupportedOperationException("Operation only supported for SolrCloud"); logger.debug("Clearing storage from Solr: {}", solrClient); ZkStateReader zkStateReader = ((CloudSolrClient) solrClient).getZkStateReader(); zkStateReader.updateClusterState(true); ClusterState clusterState = zkStateReader.getClusterState(); for (String collection : clusterState.getCollections()) { logger.debug("Clearing collection [{}] in Solr",collection); UpdateRequest deleteAll = newUpdateRequest(); deleteAll.deleteByQuery("*:*"); solrClient.request(deleteAll, collection); } } catch (SolrServerException e) { logger.error("Unable to clear storage from index due to server error on Solr.", e); throw new PermanentBackendException(e); } catch (IOException e) { logger.error("Unable to clear storage from index due to low-level I/O error.", e); throw new PermanentBackendException(e); } catch (Exception e) { logger.error("Unable to clear storage from index due to general error.", e); throw new PermanentBackendException(e); } }
protected int getTotalReplicas(String collection) { ZkStateReader zkStateReader = cloudClient.getZkStateReader(); DocCollection coll = zkStateReader.getClusterState().getCollectionOrNull(collection); if (coll == null) return 0; // support for when collection hasn't been created yet int cnt = 0; for (Slice slices : coll.getSlices()) { cnt += slices.getReplicas().size(); } return cnt; }
/** Delete all collections (and aliases) */ public void deleteAllCollections() throws Exception { try (ZkStateReader reader = new ZkStateReader(solrClient.getZkStateReader().getZkClient())) { reader.createClusterStateWatchersAndUpdate(); // up to date aliases & collections reader.aliasesManager.applyModificationAndExportToZk(aliases -> Aliases.EMPTY); for (String collection : reader.getClusterState().getCollectionStates().keySet()) { CollectionAdminRequest.deleteCollection(collection).process(solrClient); } } }
static void waitForNewLeader(CloudSolrClient cloudClient, String shardName, Replica oldLeader, TimeOut timeOut) throws Exception { log.info("Will wait for a node to become leader for {} secs", timeOut.timeLeft(SECONDS)); ZkStateReader zkStateReader = cloudClient.getZkStateReader(); zkStateReader.forceUpdateCollection(DEFAULT_COLLECTION); for (; ; ) { ClusterState clusterState = zkStateReader.getClusterState(); DocCollection coll = clusterState.getCollection("collection1"); Slice slice = coll.getSlice(shardName); if (slice.getLeader() != null && !slice.getLeader().equals(oldLeader) && slice.getLeader().getState() == Replica.State.ACTIVE) { log.info("Old leader {}, new leader {}. New leader got elected in {} ms", oldLeader, slice.getLeader(),timeOut.timeElapsed(MILLISECONDS) ); break; } if (timeOut.hasTimedOut()) { Diagnostics.logThreadDumps("Could not find new leader in specified timeout"); zkStateReader.getZkClient().printLayoutToStdOut(); fail("Could not find new leader even after waiting for " + timeOut.timeElapsed(MILLISECONDS) + "ms"); } Thread.sleep(100); } }
static ClusterState getClusterState(AuthorizedSolrClient<CloudSolrClient> authorizedSolrClient) { authorizedSolrClient.solrClient.connect(); return authorizedSolrClient.solrClient.getZkStateReader().getClusterState(); }
protected String printClusterStateInfo(String collection) throws Exception { cloudClient.getZkStateReader().forceUpdateCollection(collection); String cs = null; ClusterState clusterState = cloudClient.getZkStateReader().getClusterState(); if (collection != null) { cs = clusterState.getCollection(collection).toString(); } else { Map<String,DocCollection> map = clusterState.getCollectionsMap(); CharArr out = new CharArr(); new JSONWriter(out, 2).write(map); cs = out.toString(); } return cs; }