.filter(StopFilterFactory.class) .filter(EdgeNGramFilterFactory.class) .param("minGramSize", "3") .param("maxGramSize", "50") .analyzerDef("autocompletePhoneticAnalyzer", StandardTokenizerFactory.class) .filter(StandardFilterFactory.class) .filter(StopFilterFactory.class) .filter(PhoneticFilterFactory.class) .param("encoder", "DoubleMetaphone") .filter(SnowballPorterFilterFactory.class) .param("language", "English") .analyzerDef("autocompleteNGramAnalyzer", StandardTokenizerFactory.class) .filter(WordDelimiterFilterFactory.class) .filter(LowerCaseFilterFactory.class) .filter(NGramFilterFactory.class) .param("minGramSize", "3") .param("maxGramSize", "20") .analyzerDef("standardAnalyzer", StandardTokenizerFactory.class) .filter(LowerCaseFilterFactory.class)
.filter(StopFilterFactory.class) .filter(EdgeNGramFilterFactory.class) .param("minGramSize", "3") .param("maxGramSize", "50") .analyzerDef("autocompletePhoneticAnalyzer", StandardTokenizerFactory.class) .filter(StandardFilterFactory.class) .filter(StopFilterFactory.class) .filter(PhoneticFilterFactory.class) .param("encoder", "DoubleMetaphone") .filter(SnowballPorterFilterFactory.class) .param("language", "English") .analyzerDef("autocompleteNGramAnalyzer", StandardTokenizerFactory.class) .filter(WordDelimiterFilterFactory.class) .filter(LowerCaseFilterFactory.class) .filter(NGramFilterFactory.class) .param("minGramSize", "3") .param("maxGramSize", "20") .analyzerDef("standardAnalyzer", StandardTokenizerFactory.class) .filter(LowerCaseFilterFactory.class)
@Override public void defineMappings(Cache cache, SearchMapping searchMapping) { searchMapping .analyzerDef("standard", StandardTokenizerFactory.class) .filter(StandardFilterFactory.class) .filter(LowerCaseFilterFactory.class) .analyzerDef("simple", LowerCaseTokenizerFactory.class) .filter(LowerCaseFilterFactory.class) .analyzerDef("whitespace", WhitespaceTokenizerFactory.class) .analyzerDef("keyword", KeywordTokenizerFactory.class) .analyzerDef("stemmer", StandardTokenizerFactory.class) .filter(StandardFilterFactory.class) .filter(LowerCaseFilterFactory.class) .filter(StopFilterFactory.class) .filter(SnowballPorterFilterFactory.class) .param("language", "English") .analyzerDef("ngram", StandardTokenizerFactory.class) .filter(StandardFilterFactory.class) .filter(LowerCaseFilterFactory.class) .filter(StopFilterFactory.class) .filter(NGramFilterFactory.class) .param("minGramSize", "3") .param("maxGramSize", "3"); } }
@Factory public SearchMapping build() { SearchMapping mapping = new SearchMapping(); mapping .analyzerDef( "ngram", StandardTokenizerFactory.class ) .filter( LowerCaseFilterFactory.class ) .filter( StopFilterFactory.class ) .param( "words", "non-existent-resourcename.file" ) // We must mark at least one entity as indexed, otherwise analyzer definitions are not initialized (no need to) .entity( SomeHibernateEntity.class ).indexed(); return mapping; }
@Factory public SearchMapping build() { SearchMapping mapping = new SearchMapping(); mapping .analyzerDef( "stemmer", StandardTokenizerFactory.class ) .filter( StandardFilterFactory.class ) .filter( LowerCaseFilterFactory.class ) .filter( StopFilterFactory.class ) .filter( SnowballPorterFilterFactory.class ) .param( "language", "English" ) .analyzerDef( "ngram", StandardTokenizerFactory.class ) .filter( StandardFilterFactory.class ) .filter( LowerCaseFilterFactory.class ) .filter( StopFilterFactory.class ) .filter( NGramFilterFactory.class ) .param( "minGramSize", "3" ) .param( "maxGramSize", "3" ) .analyzerDef( "same_base_as_ngram", StandardTokenizerFactory.class ) .filter( StandardFilterFactory.class ) .filter( LowerCaseFilterFactory.class ) .filter( StopFilterFactory.class ) .analyzerDef( "htmlStrip", StandardTokenizerFactory.class ) .charFilter( HTMLStripCharFilterFactory.class ) .param( "escapedTags", "escaped" ) .filter( LowerCaseFilterFactory.class ) .normalizerDef( "lower" ) .filter( LowerCaseFilterFactory.class ); return mapping; } }