public int hashCode() { return 17*item.getIndexedString().hashCode()+ 33*item.getIndexName().hashCode(); }
@Override protected void appendBodyString(StringBuilder buffer) { buffer.append(fieldName).append(':'); buffer.append('{'); for (Iterator<Item> i = getItemIterator(); i.hasNext();) { TermItem term = (TermItem) i.next(); buffer.append(term.getIndexName()).append(':').append(term.getIndexedString()); if (i.hasNext()) { buffer.append(' '); } } buffer.append('}'); }
protected boolean labelMatches(TermItem evaluationTerm,RuleEvaluation e) { String indexName=evaluationTerm.getIndexName(); String label=getLabel(); if (label==null) label=e.getCurrentLabel(); if ("".equals(indexName) && label==null) return true; if (indexName.equals(label)) return true; if (e.getTraceLevel()>=4) e.trace(4,"'" + this + "' does not match, label of " + e.currentItem() + " was required to be " + label); return false; }
private void normalizeWord(Language language, IndexFacts.Session indexFacts, TermItem term, ListIterator<Item> i) { if ( ! (term instanceof WordItem)) return; if ( ! term.isNormalizable()) return; Index index = indexFacts.getIndex(term.getIndexName()); if (index.isAttribute()) return; if ( ! index.getNormalize()) return; WordItem word = (WordItem) term; String accentDropped = linguistics.getTransformer().accentDrop(word.getWord(), language); if (accentDropped.length() == 0) i.remove(); else word.setWord(accentDropped); }
@Override protected void adding(Item item) { super.adding(item); Validator.ensureInstanceOf("Child item", item, TermItem.class); TermItem asTerm = (TermItem) item; Validator.ensureNonEmpty("Struct fieldname", asTerm.getIndexName()); Validator.ensureNonEmpty("Query term", asTerm.getIndexedString()); }
private void addLiterals(RankItem rankTerms, Item item, IndexFacts.Session indexFacts) { if (item == null) return; if (item instanceof NotItem) { addLiterals(rankTerms, ((NotItem) item).getPositiveItem(), indexFacts); } else if (item instanceof CompositeItem) { for (Iterator<Item> i = ((CompositeItem)item).getItemIterator(); i.hasNext(); ) addLiterals(rankTerms, i.next(), indexFacts); } else if (item instanceof TermItem) { TermItem termItem = (TermItem)item; Index index = indexFacts.getIndex(termItem.getIndexName()); if (index.getLiteralBoost()) rankTerms.addItem(new WordItem(toLowerCase(termItem.getRawWord()), index.getName() + "_literal")); } }
public void replace() { PhraseItem phrase=new PhraseItem(); TermItem firstWord=(TermItem)owner.setItem(startIndex,phrase); replace(firstWord,0); phrase.setIndexName(firstWord.getIndexName()); phrase.addItem(firstWord); for (int i=1; i<length; i++) { TermItem followingWord=(TermItem)owner.removeItem(startIndex+1); replace(followingWord,i); phrase.addItem(followingWord); } }
index=termItem.getIndexName(); else if (!termItem.getIndexName().equals(index)) break;
private boolean rewriteToNGramMatching(Item item, int indexInParent, IndexFacts.Session indexFacts, Query query) { boolean rewritten = false; if (item instanceof SegmentItem) { // handle CJK segmented terms which should be grams instead SegmentItem segments = (SegmentItem)item; Index index = indexFacts.getIndex(segments.getIndexName()); if (index.isNGram()) { Item grams = splitToGrams(segments, toLowerCase(segments.getRawWord()), index.getGramSize(), query); replaceItemByGrams(item, grams, indexInParent); rewritten = true; } } else if (item instanceof CompositeItem) { CompositeItem composite = (CompositeItem)item; for (int i=0; i<composite.getItemCount(); i++) rewritten = rewriteToNGramMatching(composite.getItem(i), i, indexFacts, query) || rewritten; } else if (item instanceof TermItem) { TermItem term = (TermItem)item; Index index = indexFacts.getIndex(term.getIndexName()); if (index.isNGram()) { Item grams = splitToGrams(term,term.stringValue(), index.getGramSize(), query); replaceItemByGrams(item, grams, indexInParent); rewritten = true; } } return rewritten; }