DocIterator it = list.iterator();
DocIterator iterator() { if (iterator == null) { iterator = page.iterator(); } return iterator; } };
/** * Takes a list of docs (the doc ids actually) and a set of fields to load, * and reads them into an array of Documents. */ public void readDocs(Document[] docs, DocList ids, Set<String> fields) throws IOException { DocIterator iter = ids.iterator(); for (int i=0; i<docs.length; i++) { docs[i] = doc(iter.nextDoc(), fields); } }
private Set<Integer> getDocidsFromQuery(Query query, SolrIndexSearcher searcher, int maxRows) throws SyntaxError, IOException { DocList match = searcher.getDocList(query, null, null, 0, maxRows, 0); // only get the first one... Set<Integer> docsIds = new HashSet<Integer>(); DocIterator iterator = match.iterator(); while (iterator.hasNext()) { docsIds.add(iterator.nextDoc()); } return docsIds; }
public void getResult(NamedList result, DocList docs, CollapseContext collapseContext) { for (DocIterator i = docs.iterator(); i.hasNext();) { int id = i.nextDoc(); Counter counter = documentHeadCount.get(id); if (counter == null) { continue; } NamedList collapsedGroupResult = getCollapseGroupResult(id, result); collapsedGroupResult.add("collapseCount", counter.getCount()); } } }
@Override public void write(Writer w, SolrQueryRequest req, SolrQueryResponse rsp) throws IOException { SolrIndexSearcher searcher = req.getSearcher(); NamedList nl = rsp.getValues(); int sz = nl.size(); for (int li = 0; li < sz; li++) { Object val = nl.getVal(li); if (val instanceof DocList) { //<co id="co.fuzzy.type-ahead.doclist"/> DocList dl = (DocList) val; DocIterator iterator = dl.iterator(); w.append("<ul>\n"); while (iterator.hasNext()) { int id = iterator.nextDoc(); Document doc = searcher.doc(id, fields); //<co id="co.fuzzy.type-ahead.search"/> String name = doc.get("word"); w.append("<li>" + name + "</li>\n"); } w.append("</ul>\n"); } } } }
public void getResult(NamedList result, DocList docs, CollapseContext collapseContext) { Map<Integer, CollapseGroup> docHeadCollapseGroupAssoc = collapseContext.getDocumentHeadCollapseGroupAssociation(); for (DocIterator i = docs.iterator(); i.hasNext();) { int id = i.nextDoc(); CollapseGroup collapseGroup = docHeadCollapseGroupAssoc.get(id); if (collapseGroup == null) { continue; } NamedList collapsedGroupResult = getCollapseGroupResult(id, result); String fieldValue = collapseFieldType.indexedToReadable(collapseGroup.getKey()); collapsedGroupResult.add("fieldValue", fieldValue); } }
public void getResult(NamedList result, DocList docs, CollapseContext collapseContext) { Map<Integer, CollapseGroup> docHeadCollapseGroupAssoc = collapseContext.getDocumentHeadCollapseGroupAssociation(); for (DocIterator i = docs.iterator(); i.hasNext();) { int id = i.nextDoc(); CollapseGroup collapseGroup = docHeadCollapseGroupAssoc.get(id); if (collapseGroup == null) { continue; } for (AggregateField aggregateField : functions.keySet()) { AggregateFunction function = functions.get(aggregateField); String functionResult = function.calculate(collapseGroup); if (functionResult != null) { NamedList collapsedGroupResult = getCollapseGroupResult(id, result); NamedList aggragateFunctions = (NamedList) collapsedGroupResult.get("aggregate"); if (aggragateFunctions == null) { aggragateFunctions = new NamedList<Object>(); collapsedGroupResult.add("aggregate", aggragateFunctions); } aggragateFunctions.add(aggregateField.getUniqueName(), functionResult); } } } }
public void writeDocs(boolean includeScore, Set<String> fields) throws IOException { SolrIndexSearcher searcher = request.getSearcher(); DocIterator iterator = ids.iterator(); int sz = ids.size(); includeScore = includeScore && ids.hasScores(); for (int i=0; i<sz; i++) { int id = iterator.nextDoc(); Document doc = searcher.doc(id, fields); writeDoc(null, doc, fields, (includeScore ? iterator.score() : 0.0f), includeScore); } } }, fields );
/** * Returns a random set of documents from the index. Mainly for testing purposes. * * @param req * @param rsp * @throws IOException */ private void handleRandomSearch(SolrQueryRequest req, SolrQueryResponse rsp) throws IOException { SolrIndexSearcher searcher = req.getSearcher(); Query query = new MatchAllDocsQuery(); DocList docList = searcher.getDocList(query, getFilterQueries(req), Sort.RELEVANCE, 0, numberOfCandidateResults, 0); int paramRows = Math.min(req.getParams().getInt("rows", defaultNumberOfResults), docList.size()); if (docList.size() < 1) { rsp.add("Error", "No documents in index"); } else { LinkedList list = new LinkedList(); while (list.size() < paramRows) { DocList auxList = docList.subset((int) (Math.random() * docList.size()), 1); Document doc = null; for (DocIterator it = auxList.iterator(); it.hasNext(); ) { doc = searcher.doc(it.nextDoc()); } if (!list.contains(doc)) { list.add(doc); } } rsp.addResponse(list); } }
for (DocIterator i = docs.iterator(); i.hasNext();) { int id = i.nextDoc(); if (!docHeadCollapseGroupAssoc.containsKey(id)) {
private MLTResult getMoreLikeTheseFromQuery(SolrQueryResponse rsp, SolrParams params, int flags, String q, Query query, SortSpec sortSpec, List<Query> targetFqFilters, List<Query> mltFqFilters, SolrIndexSearcher searcher, MoreLikeThisHelper mlt, int start, int rows) throws IOException, SyntaxError { boolean includeMatch = params.getBool(MoreLikeThisParams.MATCH_INCLUDE, true); int matchOffset = params.getInt(MoreLikeThisParams.MATCH_OFFSET, 0); // Find the base match DocList match = searcher.getDocList(query, targetFqFilters, null, matchOffset, 10000, flags); // only get the first one... if(match.matches() == 0){ throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, String.format("MoreLikeThis was unable to find any documents matching the query: '%s'.", q)); } if (includeMatch) { rsp.add("match", match); } // This is an iterator, but we only handle the first match DocIterator iterator = match.iterator(); if (iterator.hasNext()) { // do a MoreLikeThis query for each document in results return mlt.getMoreLikeTheseFromDocs(iterator, start, rows, mltFqFilters, flags, sortSpec.getSort()); } return null; }
private MLTResult expandQueryAndReExecute(SolrQueryResponse rsp, SolrParams params, int maxDocumentsToMatch, int flags, String q, Query seedQuery, SortSpec sortSpec, List<Query> targetFqFilters, List<Query> mltFqFilters, SolrIndexSearcher searcher, UnsupervisedFeedbackHelper uff, int start, int rows) throws IOException, SyntaxError { boolean includeMatch = params.getBool(UnsupervisedFeedbackParams.MATCH_INCLUDE, true); int matchOffset = params.getInt(UnsupervisedFeedbackParams.MATCH_OFFSET, 0); // Find the base match DocList match = searcher.getDocList(seedQuery, targetFqFilters, null, matchOffset, maxDocumentsToMatch, flags); // only get the first one... if(match.matches() == 0){ throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, String.format("Unsupervised feedback handler was unable to find any documents matching the seed query: '%s'.", q)); } if (includeMatch) { rsp.add("match", match); } // This is an iterator, but we only handle the first match DocIterator iterator = match.iterator(); MLTResult mltResult = null; if (iterator.hasNext()) { // do a MoreLikeThis query for each document in results mltResult = uff.expandQueryAndReExecute(iterator, seedQuery, start, rows, mltFqFilters, flags, sortSpec.getSort()); } return mltResult; }
public NamedList<DocList> getMoreLikeThese( DocList docs, int rows, int flags ) throws IOException { IndexSchema schema = searcher.getSchema(); NamedList<DocList> mlt = new SimpleOrderedMap<DocList>(); DocIterator iterator = docs.iterator(); while( iterator.hasNext() ) { int id = iterator.nextDoc(); DocListAndSet sim = getMoreLikeThis( id, 0, rows, null, null, flags ); String name = schema.printableUniqueKey( reader.document( id ) ); mlt.add(name, sim.docList); } return mlt; }
public void writeDocList(DocList ids, JavaBinCodec codec) throws IOException { codec.writeTag(JavaBinCodec.SOLRDOCLST); List l = new ArrayList(3); l.add((long) ids.matches()); l.add((long) ids.offset()); Float maxScore = null; if (includeScore && ids.hasScores()) { maxScore = ids.maxScore(); } l.add(maxScore); codec.writeArray(l); int sz = ids.size(); codec.writeTag(JavaBinCodec.ARR, sz); if(searcher == null) searcher = solrQueryRequest.getSearcher(); if(schema == null) schema = solrQueryRequest.getSchema(); DocIterator iterator = ids.iterator(); for (int i = 0; i < sz; i++) { int id = iterator.nextDoc(); Document doc = searcher.doc(id, returnFields); SolrDocument sdoc = getDoc(doc); if (includeScore && ids.hasScores()) { sdoc.addField("score", iterator.score()); } codec.writeSolrDocument(sdoc); } }
/** * Generates an list of Explanations for each item in a list of docs. * * @param query The Query you want explanations in the context of * @param docs The Documents you want explained relative that query */ public static NamedList getExplainList(Query query, DocList docs, SolrIndexSearcher searcher, IndexSchema schema) throws IOException { NamedList explainList = new SimpleOrderedMap(); DocIterator iterator = docs.iterator(); for (int i=0; i<docs.size(); i++) { int id = iterator.nextDoc(); Explanation explain = searcher.explain(query, id); Document doc = searcher.doc(id); String strid = schema.printableUniqueKey(doc); // String docname = ""; // if (strid != null) docname="id="+strid+","; // docname = docname + "internal_docid="+id; explainList.add(strid, "\n" +explain.toString()); } return explainList; }
if (o instanceof DocList) { DocList docs = (DocList)o; for (DocIterator iter = docs.iterator(); iter.hasNext();) { newSearcher.doc(iter.nextDoc());
DocIterator iterator = ids.iterator(); for (int i=0; i<sz; i++) { int id = iterator.nextDoc();
@Override public void process(ResponseBuilder rb) throws IOException { if(isEnabled(rb)){ long startTime = System.currentTimeMillis(); SolrParams params = rb.req.getParams(); int topN = getTopN(params); boolean binary = getBinary(params); boolean logTfs = getLogTfs(params); boolean includeExisting = getIncludeExisting(params); final SolrIndexSearcher searcher = rb.req.getSearcher(); IndexReader ir = searcher.getIndexReader(); Analyzer analyzer = searcher.getSchema().getIndexAnalyzer(); DocListAndSet docs = rb.getResults(); DocIterator iterator = docs.docList.iterator(); String uniqueKeyField = searcher.getSchema().getUniqueKeyField().getName(); NamedList<NamedList<Double>> topPredictions = new NamedList<NamedList<Double>>(); while(iterator.hasNext()) { int docNum = iterator.nextDoc(); Map<String, Map<String,Integer>> tf = getFieldTermFrequencyCounts(fields, ir, analyzer, docNum); NamedList<Double> predictions = predict(tf, topN, binary, logTfs, includeExisting); String uniqueFieldValue = getUniqueKeyFieldValue(ir, analyzer, uniqueKeyField, docNum); topPredictions.add(String.format("%s:%s", uniqueKeyField, uniqueFieldValue), predictions); } long duration = System.currentTimeMillis() - startTime; NamedList<Object> results = new NamedList<Object>(); results.add("Time", duration); results.add("values", topPredictions); rb.rsp.add(getPrefix(), results); } }
list.setStart(docs.offset()); DocIterator dit = docs.iterator();