@Override protected TokenStream normalize(String fieldName, TokenStream in) { return new LowerCaseFilter(in); } }
@Override protected TokenStream normalize(String fieldName, TokenStream in) { switch (fieldName) { case QueryBuilder.DEFS: case QueryBuilder.REFS: return in; default: return new LowerCaseFilter(in); } } }
@Override protected TokenStream normalize(String fieldName, TokenStream in) { switch (fieldName) { case QueryBuilder.DEFS: case QueryBuilder.REFS: return in; default: return new LowerCaseFilter(in); } } }
@Override protected TokenStream normalize(String fieldName, TokenStream in) { return new LowerCaseFilter(in); } }
@Override protected TokenStreamComponents createComponents(final String fieldName) { final StandardTokenizer src = new StandardTokenizer(); src.setMaxTokenLength(maxTokenLength); TokenStream tok = new LowerCaseFilter(src); tok = new StopFilter(tok, stopwords); return new TokenStreamComponents(src, tok) { @Override protected void setReader(final Reader reader) { // So that if maxTokenLength was changed, the change takes // effect next time tokenStream is called: src.setMaxTokenLength(StandardAnalyzer.this.maxTokenLength); super.setReader(reader); } }; }
@Override protected TokenStream normalize(String fieldName, TokenStream in) { return new LowerCaseFilter(in); } }
@Override protected TokenStream normalize(String fieldName, TokenStream in) { return new LowerCaseFilter(in); } }
@Override protected TokenStream normalize(String fieldName, TokenStream in) { return new LowerCaseFilter(in); } }
@Override protected TokenStream normalize(String fieldName, TokenStream in) { return new LowerCaseFilter(in); } }
@Override protected TokenStream normalize(String fieldName, TokenStream in) { return new LowerCaseFilter(in); } }
@Override protected TokenStream normalize(String fieldName, TokenStream in) { return new LowerCaseFilter(in); } }
@Override protected TokenStream normalize(String fieldName, TokenStream in) { return new LowerCaseFilter(in); } }
@Override protected TokenStream normalize(String fieldName, TokenStream in) { return new LowerCaseFilter(in); } }
@Override protected TokenStream normalize(String fieldName, TokenStream in) { return new LowerCaseFilter(in); } }
@Override protected TokenStream normalize(String fieldName, TokenStream in) { return new LowerCaseFilter(in); } }
@Override protected TokenStream normalize(String fieldName, TokenStream in) { TokenStream result = new ElisionFilter(in, DEFAULT_ARTICLES); result = new LowerCaseFilter(result); return result; } }
@Override protected TokenStream normalize(String fieldName, TokenStream in) { TokenStream result = new LowerCaseFilter(in); result = new GermanNormalizationFilter(result); return result; } }
@Override protected TokenStream normalize(String fieldName, TokenStream in) { TokenStream result = new ElisionFilter(in, DEFAULT_ARTICLES); result = new LowerCaseFilter(result); return result; } }
@Override protected TokenStream normalize(String fieldName, TokenStream in) { TokenStream result = new SoraniNormalizationFilter(in); result = new LowerCaseFilter(result); result = new DecimalDigitFilter(result); return result; } }
@Override protected TokenStreamComponents createComponents(String fieldName) { final Tokenizer source = new StandardTokenizer(); // run the widthfilter first before bigramming, it sometimes combines characters. TokenStream result = new CJKWidthFilter(source); result = new LowerCaseFilter(result); result = new CJKBigramFilter(result); return new TokenStreamComponents(source, new StopFilter(result, stopwords)); }