@Override protected TokenStreamComponents createComponents(final String fieldName) { final StandardTokenizer src = new StandardTokenizer(); src.setMaxTokenLength(maxTokenLength); TokenStream tok = new LowerCaseFilter(src); tok = new StopFilter(tok, stopwords); return new TokenStreamComponents(src, tok) { @Override protected void setReader(final Reader reader) { // So that if maxTokenLength was changed, the change takes // effect next time tokenStream is called: src.setMaxTokenLength(StandardAnalyzer.this.maxTokenLength); super.setReader(reader); } }; }
/** * Builds an analyzer which removes words in ENGLISH_STOP_WORDS. */ public HistoryAnalyzer() { super(Analyzer.PER_FIELD_REUSE_STRATEGY); stopWords = StopFilter.makeStopSet(ENGLISH_STOP_WORDS); }
public TokenStream reusableTokenStream(String fieldName, Reader reader) throws IOException { SavedStreams streams = (SavedStreams) getPreviousTokenStream(); if (streams == null) { streams = new SavedStreams(); setPreviousTokenStream(streams); streams.tokenStream = new StandardTokenizer(reader); streams.filteredTokenStream = new StandardFilter(streams.tokenStream); streams.filteredTokenStream = new LowerCaseFilter(streams.filteredTokenStream); streams.filteredTokenStream = new StopFilter(streams.filteredTokenStream, stopSet); } else { streams.tokenStream.reset(reader); } streams.tokenStream.setMaxTokenLength(maxTokenLength); streams.tokenStream.setReplaceInvalidAcronym(replaceInvalidAcronym); return streams.filteredTokenStream; }
@Override protected TokenStreamComponents createComponents(String fieldName) { final Tokenizer source = new StandardTokenizer(); // run the widthfilter first before bigramming, it sometimes combines characters. TokenStream result = new CJKWidthFilter(source); result = new LowerCaseFilter(result); result = new CJKBigramFilter(result); return new TokenStreamComponents(source, new StopFilter(result, stopwords)); }
/** * Creates a * {@link org.apache.lucene.analysis.ReusableAnalyzerBase.TokenStreamComponents} * which tokenizes all the text in the provided {@link Reader}. * * @return A * {@link org.apache.lucene.analysis.ReusableAnalyzerBase.TokenStreamComponents} * built from an {@link StandardTokenizer} filtered with * {@link StandardFilter}, {@link IrishLowerCaseFilter}, {@link StopFilter} * , {@link KeywordMarkerFilter} if a stem exclusion set is * provided and {@link SnowballFilter}. */ @Override protected TokenStreamComponents createComponents(String fieldName, Reader reader) { final Tokenizer source = new StandardTokenizer(matchVersion, reader); TokenStream result = new StandardFilter(matchVersion, source); StopFilter s = new StopFilter(matchVersion, result, HYPHENATIONS); s.setEnablePositionIncrements(false); result = s; result = new ElisionFilter(matchVersion, result, DEFAULT_ARTICLES); result = new IrishLowerCaseFilter(result); result = new StopFilter(matchVersion, result, stopwords); if(!stemExclusionSet.isEmpty()) result = new KeywordMarkerFilter(result, stemExclusionSet); result = new SnowballFilter(result, new IrishStemmer()); return new TokenStreamComponents(source, result); } }
@Override protected TokenStreamComponents createComponents(final String fieldName) { final UAX29URLEmailTokenizer src = new UAX29URLEmailTokenizer(); src.setMaxTokenLength(maxTokenLength); TokenStream tok = new LowerCaseFilter(src); tok = new StopFilter(tok, stopwords); return new TokenStreamComponents(src, tok) { @Override protected void setReader(final Reader reader) { // So that if maxTokenLength was changed, the change takes // effect next time tokenStream is called: src.setMaxTokenLength(UAX29URLEmailAnalyzer.this.maxTokenLength); super.setReader(reader); } }; }
@Override public TokenStream tokenStream(String fieldName, Reader reader) { TokenStream result = new ClassicTokenizer(Version.LUCENE_36, reader); result = new StandardFilter(Version.LUCENE_36, result); result = new LowerCaseFilter(Version.LUCENE_36, result); result = new StopFilter(Version.LUCENE_36, result, DEFAULT_STOP_SET); result = new ASCIIFoldingFilter(result); return result; }
@Override protected TokenStreamComponents wrapComponents(String fieldName, TokenStreamComponents components) { Set<String> stopWords = stopWordsPerField.get(fieldName); if (stopWords == null) { return components; } StopFilter stopFilter = new StopFilter(components.getTokenStream(), new CharArraySet(stopWords, false)); return new TokenStreamComponents(components.getTokenizer(), stopFilter); }
/** * Returns as {@link CharArraySet} from wordFiles, which * can be a comma-separated list of filenames */ protected final CharArraySet getWordSet(ResourceLoader loader, String wordFiles, boolean ignoreCase) throws IOException { List<String> files = splitFileNames(wordFiles); CharArraySet words = null; if (files.size() > 0) { // default stopwords list has 35 or so words, but maybe don't make it that // big to start words = new CharArraySet(files.size() * 10, ignoreCase); for (String file : files) { List<String> wlist = getLines(loader, file.trim()); words.addAll(StopFilter.makeStopSet(wlist, ignoreCase)); } } return words; }
/** * Filters LowerCaseTokenizer with StopFilter. * @param fieldName name of field for which to create components * @return components for this analyzer */ @Override protected TokenStreamComponents createComponents(String fieldName) { JFlexTokenizer plainfull = new JFlexTokenizer(new PlainFullTokenizer( FileAnalyzer.dummyReader)); //we are counting position increments, this might affect the queries //later and need to be in sync, especially for highlighting of results return new TokenStreamComponents(plainfull, new StopFilter(plainfull, stopWords)); }
@Override public TokenStream apply(final TokenStream input) { return new StopFilter(LuceneVersion.get(), input, makeStopSet(GermanAnalyzer.GERMAN_STOP_WORDS)); } };
public StopFilter create(TokenStream input) { StopFilter stopFilter = new StopFilter(input,stopWords,ignoreCase); stopFilter.setEnablePositionIncrements(enablePositionIncrements); return stopFilter; } }
public TokenStream reusableTokenStream(String fieldName, Reader reader) throws IOException { SavedStreams streams = (SavedStreams) getPreviousTokenStream(); if (streams == null) { streams = new SavedStreams(); setPreviousTokenStream(streams); streams.tokenStream = new StandardTokenizer(reader); streams.filteredTokenStream = new StandardFilter(streams.tokenStream); streams.filteredTokenStream = new LowerCaseFilter(streams.filteredTokenStream); streams.filteredTokenStream = new StopFilter(streams.filteredTokenStream, stopSet); } else { streams.tokenStream.reset(reader); } streams.tokenStream.setMaxTokenLength(maxTokenLength); streams.tokenStream.setReplaceInvalidAcronym(replaceInvalidAcronym); return streams.filteredTokenStream; }
/** * Creates * {@link org.apache.lucene.analysis.Analyzer.TokenStreamComponents} * used to tokenize all the text in the provided {@link Reader}. * * @return {@link org.apache.lucene.analysis.Analyzer.TokenStreamComponents} * built from a {@link StandardTokenizer} filtered with * {@link LowerCaseFilter}, {@link StopFilter} * , {@link SetKeywordMarkerFilter} if a stem exclusion set is * provided, {@link GermanNormalizationFilter} and {@link GermanLightStemFilter} */ @Override protected TokenStreamComponents createComponents(String fieldName) { final Tokenizer source = new StandardTokenizer(); TokenStream result = new LowerCaseFilter(source); result = new StopFilter(result, stopwords); result = new SetKeywordMarkerFilter(result, exclusionSet); result = new GermanNormalizationFilter(result); result = new GermanLightStemFilter(result); return new TokenStreamComponents(source, result); }
/** * Creates * {@link org.apache.lucene.analysis.Analyzer.TokenStreamComponents} * used to tokenize all the text in the provided {@link Reader}. * * @return {@link org.apache.lucene.analysis.Analyzer.TokenStreamComponents} * built from a {@link ThaiTokenizer} filtered with * {@link LowerCaseFilter}, {@link DecimalDigitFilter} and {@link StopFilter} */ @Override protected TokenStreamComponents createComponents(String fieldName) { final Tokenizer source = new ThaiTokenizer(); TokenStream result = new LowerCaseFilter(source); result = new DecimalDigitFilter(result); result = new StopFilter(result, stopwords); return new TokenStreamComponents(source, result); }
@Override public TokenStream create(TokenStream tokenStream) { if (removeTrailing) { return new StopFilter(tokenStream, stopWords); } else { return new SuggestStopFilter(tokenStream, stopWords); } }
/** * Creates a * {@link org.apache.lucene.analysis.Analyzer.TokenStreamComponents} * which tokenizes all the text in the provided {@link Reader}. * * @return A * {@link org.apache.lucene.analysis.Analyzer.TokenStreamComponents} * built from an {@link StandardTokenizer} filtered with * {@link LowerCaseFilter}, {@link StopFilter} * , {@link SetKeywordMarkerFilter} if a stem exclusion set is * provided and {@link LatvianStemFilter}. */ @Override protected TokenStreamComponents createComponents(String fieldName) { final Tokenizer source = new StandardTokenizer(); TokenStream result = new LowerCaseFilter(source); result = new StopFilter(result, stopwords); if(!stemExclusionSet.isEmpty()) result = new SetKeywordMarkerFilter(result, stemExclusionSet); result = new LatvianStemFilter(result); return new TokenStreamComponents(source, result); }
@Override protected TokenStreamComponents createComponents(final String fieldName) { final ClassicTokenizer src = new ClassicTokenizer(); src.setMaxTokenLength(maxTokenLength); TokenStream tok = new ClassicFilter(src); tok = new LowerCaseFilter(tok); tok = new StopFilter(tok, stopwords); return new TokenStreamComponents(src, tok) { @Override protected void setReader(final Reader reader) { src.setMaxTokenLength(ClassicAnalyzer.this.maxTokenLength); super.setReader(reader); } }; }
/** * Builds a Set from an array of stop words, * appropriate for passing into the StopFilter constructor. * This permits this stopWords construction to be cached once when * an Analyzer is constructed. * * @param stopWords An array of stopwords * @see #makeStopSet(java.lang.String[], boolean) passing false to ignoreCase */ public static CharArraySet makeStopSet(String... stopWords) { return makeStopSet(stopWords, false); }
/** * Creates * {@link org.apache.lucene.analysis.Analyzer.TokenStreamComponents} * used to tokenize all the text in the provided {@link Reader}. * * @return {@link org.apache.lucene.analysis.Analyzer.TokenStreamComponents} * built from a {@link LowerCaseTokenizer} filtered with * {@link StopFilter} */ @Override protected TokenStreamComponents createComponents(String fieldName) { final Tokenizer source = new LowerCaseTokenizer(); return new TokenStreamComponents(source, new StopFilter(source, stopwords)); }