@Override public Query getFieldQuery(final QParser parser, final SchemaField field, final String externalVal) { // Not useful for now in SIREn throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Not implemented operation." + field.getName()); }
public ValueSource getValueSource(FunctionQParser fp, String arg) { if (arg==null) return null; SchemaField f = fp.req.getSchema().getField(arg); if (f.getType().getClass() == DateField.class || f.getType().getClass() == LegacyDateField.class) { throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Can't use ms() function on non-numeric legacy date field " + arg); } return f.getType().getValueSource(f, fp); }
protected void addSingleField(SchemaField sfield, String val, float boost) { //System.out.println("###################ADDING FIELD "+sfield+"="+val); // we don't check for a null val ourselves because a solr.FieldType // might actually want to map it to something. If createField() // returns null, then we don't store the field. Field field = sfield.createField(val, boost); if (field != null) { if (!sfield.multiValued()) { String oldValue = map.put(sfield.getName(), val); if (oldValue != null) { throw new SolrException( SolrException.ErrorCode.BAD_REQUEST,"ERROR: multiple values encountered for non multiValued field " + sfield.getName() + ": first='" + oldValue + "' second='" + val + "'"); } } // field.setBoost(boost); doc.add(field); } }
public boolean isFieldPhraseQueryable(final SchemaField field) { if (field != null) { final FieldType fieldType = field.getType(); return (fieldType instanceof TextField) && !field.omitPositions() && !field.omitTermFreqAndPositions(); } return false; }
@Override protected HashMap<String,Analyzer> analyzerCache() { HashMap<String,Analyzer> cache = new HashMap<String,Analyzer>(); for (SchemaField f : getFields().values()) { Analyzer analyzer = f.getType().getQueryAnalyzer(); cache.put(f.getName(), analyzer); } return cache; }
protected HashMap<String,Analyzer> analyzerCache() { HashMap<String,Analyzer> cache = new HashMap<String,Analyzer>(); for (SchemaField f : getFields().values()) { Analyzer analyzer = f.getType().getAnalyzer(); cache.put(f.getName(), analyzer); } return cache; }
final String indexedField = req.getParams().get("field"); if (indexedField == null) throw new RuntimeException("required param 'field'"); throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, getClass().getSimpleName()+" does not support multiple ContentStreams"); throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, getClass().getSimpleName()+" requires text to be POSTed to it"); initOffsetCorrector(htmlOffsetAdjust, xmlOffsetAdjust, inputString, nonTaggableTags); final SolrIndexSearcher searcher = req.getSearcher(); final FixedBitSet matchDocIdsBS = new FixedBitSet(searcher.maxDoc()); final List tags = new ArrayList(2000); Analyzer analyzer = req.getSchema().getField(indexedField).getType().getQueryAnalyzer(); try (TokenStream tokenStream = analyzer.tokenStream("", inputReader)) { Terms terms = searcher.getSlowAtomicReader().terms(indexedField); if (terms == null) throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, idSchemaField.getType().getValueSource(idSchemaField, null));
SolrParams params = req.getParams(); if (!params.getBool(COMPONENT_NAME, true)) { return; SolrIndexSearcher searcher = req.getSearcher(); throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "'start' parameter cannot be negative"); long timeAllowed = (long)params.getInt( CommonParams.TIME_ALLOWED, -1 ); String ids = params.get(ShardParams.IDS); if (ids != null) { SchemaField idField = req.getSchema().getUniqueKeyField(); List<String> idArr = StrUtils.splitSmart(ids, ",", true); int[] luceneIds = new int[idArr.size()]; int docs = 0; for (int i=0; i<idArr.size(); i++) { int id = req.getSearcher().getFirstMatch( new Term(idField.getName(), idField.getType().toInternal(idArr.get(i)))); if (id >= 0) luceneIds[docs++] = id; List<Query> filters = rb.getFilters(); if (filters != null) queries.addAll(filters); res.docSet = searcher.getDocSet(queries);
throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "Illegal query type. The incoming query must be a Lucene SpanNearQuery and it was a " + origQuery.getClass().getName()); SolrIndexSearcher searcher = rb.req.getSearcher(); IndexReader reader = searcher.getIndexReader(); Spans spans = sQuery.getSpans(reader); addPassage(tvm.passage, rankedPassages, termWeights, bigramWeights, adjWeight, secondAdjWeight, bigramWeight); } catch (CloneNotSupportedException e) { throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "Internal error cloning Passage", e); int rows = params.getInt(QA_ROWS, 5); SchemaField uniqField = rb.req.getSchema().getUniqueKeyField(); if (rankedPassages.size() > 0) { int size = Math.min(rows, rankedPassages.size()); String idValue; if (uniqField != null) { idName = uniqField.getName(); fields.add(idName); passNL.add("field", passage.field); String fldValue = searcher.doc(passage.lDocId, fields).get(passage.field); if (fldValue != null) {
@Override public void handleRequestBody(SolrQueryRequest req, SolrQueryResponse rsp) throws Exception IndexSchema schema = req.getSchema(); SolrIndexSearcher searcher = req.getSearcher(); IndexReader reader = searcher.getReader(); SolrParams params = req.getParams(); int numTerms = params.getInt( NUMTERMS, DEFAULT_COUNT ); Integer docId = params.getInt( DOC_ID ); if( docId == null && params.get( ID ) != null ) { SchemaField uniqueKey = schema.getUniqueKeyField(); String v = uniqueKey.getType().toInternal( params.get(ID) ); Term t = new Term( uniqueKey.getName(), v ); docId = searcher.getFirstMatch( t ); if( docId < 0 ) { throw new SolrException( SolrException.ErrorCode.NOT_FOUND, "Can't find document: "+params.get( ID ) ); throw new SolrException( SolrException.ErrorCode.NOT_FOUND, "Can't find document: "+docId );
SolrHighlighter highligher = rb.req.getCore().getHighlighter(); if (highligher.isHighlightingEnabled(rb.req.getParams())) SchemaField keyField = rb.req.getSearcher().getSchema().getUniqueKeyField(); if (null != keyField) if (!returnFields.contains(keyField)) fieldFilter.add(ByteBufferUtil.bytes(keyField.getName())); rb.req.getSearcher().getReader().document(docIds.get(0), selector);
public void inform(SolrCore core) String a = initArgs.get( FIELD_TYPE ); if( a != null ) { FieldType ft = core.getSchema().getFieldTypes().get( a ); if( ft == null ) { throw new SolrException( SolrException.ErrorCode.SERVER_ERROR, "Unknown FieldType: '"+a+"' used in QueryElevationComponent" ); analyzer = ft.getQueryAnalyzer(); SchemaField sf = core.getSchema().getUniqueKeyField(); if( sf == null ) { throw new SolrException( SolrException.ErrorCode.SERVER_ERROR, "QueryElevationComponent requires the schema to have a uniqueKeyField" ); idField = StringHelper.intern(sf.getName()); try { searchHolder = core.getNewestSearcher(false); IndexReader reader = searchHolder.get().getReader(); getElevationMap( reader, core ); } finally {
String mt = atm.get(type); String field = params.get(QUERY_FIELD); SchemaField sp = req.getSchema().getFieldOrNull(field); if (sp == null) { throw new SolrException(ErrorCode.SERVER_ERROR,"Undefined field: "+field); Analyzer analyzer = sp.getType().getQueryAnalyzer(); TokenStream ts = analyzer.tokenStream(field, new StringReader(qstr)); throw new ParseException(e.getLocalizedMessage()); return new SpanNearQuery(sql.toArray(new SpanQuery[sql.size()]), params.getInt(QAParams.SLOP, 10), true);//<co id="qqp.spanNear"/>
public void process(ResponseBuilder rb) throws IOException { SolrParams params = rb.req.getParams(); if (!params.getBool(COMPONENT_NAME, false)) { return; rb.rsp.add(TERM_VECTORS, termVectors); boolean termFreq = params.getBool(TermVectorParams.TF, false); boolean positions = params.getBool(TermVectorParams.POSITIONS, false); boolean offsets = params.getBool(TermVectorParams.OFFSETS, false); boolean docFreq = params.getBool(TermVectorParams.DF, false); iter = list.iterator(); SolrIndexSearcher searcher = rb.req.getSearcher(); IndexReader reader = searcher.getReader(); IndexSchema schema = rb.req.getSchema(); String uniqFieldName = schema.getUniqueKeyField().getName();
SchemaField f = SchemaField.create(name,ft,args); SchemaField old = fields.put(f.getName(),f); if( old != null ) { String msg = "[schema.xml] Duplicate field definition for '" + f.getName() + "' ignoring: "+old.toString(); if( f.getDefaultValue() != null ) { log.debug(name+" contains default value: " + f.getDefaultValue()); fieldsWithDefaultValue.add( f ); if (f.isRequired()) { log.debug(name+" is required in this schema"); requiredFields.add(f); if( df.regex.equals( f.name ) ) { String msg = "[schema.xml] Duplicate DynamicField definition for '" + f.getName() + "' ignoring: "+f.toString(); SchemaField defaultSearchField = getFields().get(defaultSearchFieldName); if ((defaultSearchField == null) || !defaultSearchField.indexed()) { log.warn("no uniqueKey specified in schema."); } else { uniqueKeyField=getIndexedField(node.getNodeValue().trim()); uniqueKeyFieldName=uniqueKeyField.getName(); uniqueKeyFieldType=uniqueKeyField.getType(); log.info("unique key field: "+uniqueKeyFieldName);
@Override public void handleRequestBody(SolrQueryRequest req, SolrQueryResponse rsp) throws Exception SolrIndexSearcher searcher = req.getSearcher(); SchemaField uniqueKeyField = searcher.getSchema().getUniqueKeyField(); ModifiableSolrParams params = new ModifiableSolrParams(req.getParams()); configureSolrParameters(req, params, uniqueKeyField.getName()); mltFqFilters = getFilters(req, UnsupervisedFeedbackParams.FQ); } catch (SyntaxError e) { throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, e); throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Dice unsupervised feedback handler requires either a query (?q=) to find similar documents.");
@Override public IndexableField createField(final SchemaField field, final Object value, final float boost) { if (!field.indexed()) { throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "SirenField instances must be indexed: " + field.getName()); if (field.multiValued()) { throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "SirenField instances can not be multivalued: " + field.getName()); if (!field.omitNorms()) { throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "SirenField instances must omit norms: " + field.getName()); if (field.omitTermFreqAndPositions()) { throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "SirenField instances must not omit term " + "frequencies and positions: " + field.getName()); if (field.omitPositions()) { throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "SirenField instances must not omit term " + "positions: " + field.getName()); if (field.storeTermVector()) { throw new SolrException(SolrException.ErrorCode.SERVER_ERROR,
String carrotAlgorithmClassName = initParams.get(CarrotParams.ALGORITHM); try { this.clusteringAlgorithmClass = core.getResourceLoader().findClass( carrotAlgorithmClassName, IClusteringAlgorithm.class); } catch (SolrException s) { if (!(s.getCause() instanceof ClassNotFoundException)) { throw s; String componentName = initParams.get(ClusteringEngine.ENGINE_NAME); log.info("Initializing Clustering Engine '" + MoreObjects.firstNonNull(componentName, "<no 'name' attribute>") + "'"); initAttributes.putAll(defaultSet.getAttributeValues()); } catch (Exception e) { throw new SolrException(ErrorCode.SERVER_ERROR, "Could not read attributes XML for clustering component: " + componentName, e); SchemaField uniqueField = core.getLatestSchema().getUniqueKeyField(); if (uniqueField == null) { throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, CarrotClusteringEngine.class.getSimpleName() + " requires the schema to have a uniqueKeyField"); this.idFieldName = uniqueField.getName();
final String[] fields = params.getParams(FacetParams.FACET_DATE); final Date NOW = new Date(); final IndexSchema schema = searcher.getSchema(); for (String f : fields) { parseParams(FacetParams.FACET_DATE, f); final SchemaField sf = schema.getField(f); if (! (sf.getType() instanceof DateField)) { throw new SolrException (SolrException.ErrorCode.BAD_REQUEST, "Can not date facet on a field which is not a DateField: " + f); final DateField ft = (DateField) sf.getType(); final String startS = required.getFieldParam(f,FacetParams.FACET_DATE_START); final Date start; try { start = ft.parseMath(NOW, startS); } catch (SolrException e) { throw new SolrException (SolrException.ErrorCode.BAD_REQUEST, "date facet 'start' is not a valid Date string: " + startS, e); = required.getFieldParam(f,FacetParams.FACET_DATE_END);
@Override public Field createField(SchemaField field, String externalVal, float boost) { boolean indexed = field.indexed(); boolean stored = field.stored(); if (!indexed && !stored) { if (log.isTraceEnabled()) log.trace("Ignoring unindexed/unstored field: " + field); return null; } int ps = precisionStep; byte[] arr=null; TokenStream ts=null; long time = super.parseMath(null, externalVal).getTime(); if (stored) arr = TrieField.toArr(time); if (indexed) ts = new NumericTokenStream(ps).setLongValue(time); Field f; if (stored) { f = new Field(field.getName(), arr, Field.Store.YES); if (indexed) f.setTokenStream(ts); } else { f = new Field(field.getName(), ts); } // term vectors aren't supported f.setOmitNorms(field.omitNorms()); f.setOmitTermFreqAndPositions(field.omitTf()); f.setBoost(boost); return f; }