Term term = idTerm.createTerm(idFieldType.toInternal(cmd.id));
/** Given the readable value, return the term value that will match it. */ public String readableToIndexed(String val) { return toInternal(val); }
protected String toTerm(String readableValue) { // needed for frange queries to work properly return ft.toInternal(readableValue); }
protected final Term idTerm(String readableId) { // to correctly create the Term, the string needs to be run // through the Analyzer for that field. return new Term(idField.getName(), idFieldType.toInternal(readableId)); }
/** * Returns a Query instance for doing range searches on this field type. {@link org.apache.solr.search.SolrQueryParser} * currently passes part1 and part2 as null if they are '*' respectively. minInclusive and maxInclusive are both true * currently by SolrQueryParser but that may change in the future. Also, other QueryParser implementations may have * different semantics. * <p/> * Sub-classes should override this method to provide their own range query implementation. They should strive to * handle nulls in part1 and/or part2 as well as unequal minInclusive and maxInclusive parameters gracefully. * * @param parser * @param field the schema field * @param part1 the lower boundary of the range, nulls are allowed. * @param part2 the upper boundary of the range, nulls are allowed * @param minInclusive whether the minimum of the range is inclusive or not * @param maxInclusive whether the maximum of the range is inclusive or not * @return a Query instance to perform range search according to given parameters * * @see org.apache.solr.search.SolrQueryParser#getRangeQuery(String, String, String, boolean) */ public Query getRangeQuery(QParser parser, SchemaField field, String part1, String part2, boolean minInclusive, boolean maxInclusive) { // constant score mode is now enabled per default return new TermRangeQuery( field.getName(), part1 == null ? null : toInternal(part1), part2 == null ? null : toInternal(part2), minInclusive, maxInclusive); }
@Override public boolean incrementToken() throws IOException { clearAttributes(); int n = input.read(cbuf,0,maxChars); if (n<=0) return false; String s = toInternal(new String(cbuf,0,n)); termAtt.setTermBuffer(s); offsetAtt.setOffset(correctOffset(0),correctOffset(n)); return true; } };
public void delete(DeleteUpdateCommand cmd) throws IOException { deleteByIdCommands.incrementAndGet(); deleteByIdCommandsCumulative.incrementAndGet(); if (!cmd.fromPending && !cmd.fromCommitted) { numErrors.incrementAndGet(); numErrorsCumulative.incrementAndGet(); throw new SolrException( SolrException.ErrorCode.BAD_REQUEST,"meaningless command: " + cmd); } if (!cmd.fromPending || !cmd.fromCommitted) { numErrors.incrementAndGet(); numErrorsCumulative.incrementAndGet(); throw new SolrException( SolrException.ErrorCode.BAD_REQUEST,"operation not supported" + cmd); } iwCommit.lock(); try { openWriter(); writer.deleteDocuments(idTerm.createTerm(idFieldType.toInternal(cmd.id))); } finally { iwCommit.unlock(); } if( tracker.timeUpperBound > 0 ) { tracker.scheduleCommitWithin( tracker.timeUpperBound ); } }
private NamedList getListedTermCounts(String field, String termList) throws IOException { FieldType ft = searcher.getSchema().getFieldType(field); List<String> terms = StrUtils.splitSmart(termList, ",", true); NamedList res = new NamedList(); Term t = new Term(field); for (String term : terms) { String internal = ft.toInternal(term); int count = searcher.numDocs(new TermQuery(t.createTerm(internal)), base); res.add(term, count); } return res; }
/************** Direct update handler - pseudo code *********** def add(doc, id, allowDups, overwritePending, overwriteCommitted): if not overwritePending and not overwriteCommitted: #special case... no need to check pending set, and we don't keep #any state around about this addition if allowDups: committed[id]=doc #100 return else: #if no dups allowed, we must check the *current* index (pending and committed) if not committed[id]: committed[id]=doc #000 return #001 (searchd addConditionally) if not allowDups and not overwritePending and pending[id]: return del committed[id] #delete from pending and committed 111 011 committed[id]=doc pending[id]=True ****************************************************************/ // could return the number of docs deleted, but is that always possible to know??? public void delete(DeleteUpdateCommand cmd) throws IOException { if (!cmd.fromPending && !cmd.fromCommitted) throw new SolrException( SolrException.ErrorCode.BAD_REQUEST,"meaningless command: " + cmd); if (!cmd.fromPending || !cmd.fromCommitted) throw new SolrException( SolrException.ErrorCode.BAD_REQUEST,"operation not supported" + cmd); String indexedId = idFieldType.toInternal(cmd.id); synchronized(this) { deleteInIndex(indexedId); pset.remove(indexedId); } }
sfc = new ShardFacetCount(); sfc.name = name; sfc.indexed = ftype == null ? sfc.name : ftype.toInternal(sfc.name); sfc.termNum = termNum++; counts.put(name, sfc);
FieldType ft = req.getSchema().getFieldType(field); if (!(ft instanceof TextField)) { String internal = ft.toInternal(queryText); return new TermQuery(new Term(field, internal));
String val = line.substring(delimIndex+1, endIndex); String internalKey = idType.toInternal(key); float fval; try {
public String getIndexedId(IndexSchema schema) { if (indexedId == null) { SchemaField sf = schema.getUniqueKeyField(); if (sf != null) { if (doc != null) { schema.getUniqueKeyField(); Field storedId = doc.getField(sf.getName()); indexedId = sf.getType().storedToIndexed(storedId); } if (solrDoc != null) { SolrInputField field = solrDoc.getField(sf.getName()); if (field != null) { indexedId = sf.getType().toInternal( field.getFirstValue().toString() ); } } } } return indexedId; }
String val; try { val = toInternal(externalVal); } catch (RuntimeException e) { throw new SolrException( SolrException.ErrorCode.SERVER_ERROR, "Error while creating field '" + field + "' from value '" + externalVal + "'", e, false);
int lim=limit>=0 ? limit : Integer.MAX_VALUE; String startTerm = prefix==null ? "" : ft.toInternal(prefix); TermEnum te = r.terms(new Term(field,startTerm)); TermDocs td = r.termDocs();
for (Map.Entry<String,StatsValues> entry : facetStatsValues.entrySet()) { String termLabel = entry.getKey(); int missingCount = searcher.numDocs(new TermQuery(new Term(f.name, facetType.toInternal(termLabel))), missing); entry.getValue().addMissing(missingCount);
for (int i=0; i<idArr.size(); i++) { int id = req.getSearcher().getFirstMatch( new Term(idField.getName(), idField.getType().toInternal(idArr.get(i)))); if (id >= 0) luceneIds[docs++] = id;
String idString = (String)doc.getFirstValue(CmsSearchField.FIELD_ID); int id = solrQueryRequest.getSearcher().getFirstMatch( new Term(idField.getName(), idField.getType().toInternal(idString))); luceneIds[docs++] = id;
String lower = lowerStr==null ? prefix : (raw ? lowerStr : ft.toInternal(lowerStr)); if (lower == null) lower=""; String upper = upperStr==null ? null : (raw ? upperStr : ft.toInternal(upperStr));
String v = uniqueKey.getType().toInternal( params.get(ID) ); Term t = new Term( uniqueKey.getName(), v ); docId = searcher.getFirstMatch( t );