@Override public Analyzer createAnalyzer() { return new StandardAnalyzer(); }
@Override public Analyzer createAnalyzer() { return new UAX29URLEmailAnalyzer(); }
@Override public Analyzer createAnalyzer() { return new ClassicAnalyzer(); }
@Override protected void setReader(final Reader reader) { // So that if maxTokenLength was changed, the change takes // effect next time tokenStream is called: src.setMaxTokenLength(StandardAnalyzer.this.maxTokenLength); super.setReader(reader); } };
/** * Pushes the specified amount of characters back into the input stream. * * They will be read again by then next call of the scanning method * * @param number the number of characters to be read again. * This number must not be greater than yylength()! */ public void yypushback(int number) { if ( number > yylength() ) zzScanError(ZZ_PUSHBACK_2BIG); zzMarkedPos -= number; }
private static int [] zzUnpackAction() { int [] result = new int[24]; int offset = 0; offset = zzUnpackAction(ZZ_ACTION_PACKED_0, offset, result); return result; }
private static int [] zzUnpackTrans() { int [] result = new int[396]; int offset = 0; offset = zzUnpackTrans(ZZ_TRANS_PACKED_0, offset, result); return result; }
private static int [] zzUnpackRowMap() { int [] result = new int[24]; int offset = 0; offset = zzUnpackRowMap(ZZ_ROWMAP_PACKED_0, offset, result); return result; }
private static int [] zzUnpackAttribute() { int [] result = new int[24]; int offset = 0; offset = zzUnpackAttribute(ZZ_ATTRIBUTE_PACKED_0, offset, result); return result; }
private void init() { this.scanner = new StandardTokenizerImpl(input); }
@Override public void close() throws IOException { super.close(); scanner.yyreset(input); }
/** * Creates a new StandardTokenizer with a given {@link org.apache.lucene.util.AttributeFactory} */ public StandardTokenizer(AttributeFactory factory) { super(factory); init(); }
/** Builds an analyzer with the stop words from the given reader. * @see WordlistLoader#getWordSet(Reader) * @param stopwords Reader to read stop words from */ public StandardAnalyzer(Reader stopwords) throws IOException { this(loadStopwordSet(stopwords)); }
public void setAnalyzer(Analyzer analyzer) { if (analyzer == null) { this.analyzer = new StandardAnalyzer(); } else { this.analyzer = analyzer; } }
/** * Creates a new instance of the {@link org.apache.lucene.analysis.standard.StandardTokenizer}. Attaches * the <code>input</code> to the newly created JFlex scanner. * See http://issues.apache.org/jira/browse/LUCENE-1068 */ public StandardTokenizer() { init(); }
/** * Creates a new config, using {@link StandardAnalyzer} as the * analyzer. By default, {@link TieredMergePolicy} is used * for merging; * Note that {@link TieredMergePolicy} is free to select * non-contiguous merges, which means docIDs may not * remain monotonic over time. If this is a problem you * should switch to {@link LogByteSizeMergePolicy} or * {@link LogDocMergePolicy}. */ public IndexWriterConfig() { this(new StandardAnalyzer()); }
@Override public LuceneIndexFactoryImpl addField(final String name) { return addField(name, new StandardAnalyzer()); }
private LuceneIndexCreationProfile getDefaultSerializerCreationProfile() { return new LuceneIndexCreationProfile(INDEX_NAME, REGION_NAME, new String[] {"field1"}, new StandardAnalyzer(), null, null); }
private LuceneIndexCreationProfile getReverseFieldsLuceneIndexCreationProfile() { return new LuceneIndexCreationProfile(INDEX_NAME, REGION_NAME, new String[] {"field2", "field1"}, new StandardAnalyzer(), null, null); }
private LuceneIndexCreationProfile getTwoFieldLuceneIndexCreationProfile() { return new LuceneIndexCreationProfile(INDEX_NAME, REGION_NAME, new String[] {"field1", "field2"}, new StandardAnalyzer(), null, null); }