long numDocs = getDocCount(fieldName, topLevelTerms); int freq = getTermFreq(termsEnum, docsEnum); if (isNoise(term.bytes().utf8ToString(), freq)) { continue; long docFreq = getTermStatistics(topLevelTermsEnum, term).docFreq(); if (!isAccepted(docFreq)) { continue; float score = computeScore(docFreq, freq, numDocs); queue.addOrUpdate(new ScoreTerm(term.field(), term.bytes().utf8ToString(), score));
termsSize = Math.min(termsSize, termVectorsFilter.size(field)); if (hasScores && !termVectorsFilter.hasScoreTerm(term)) { continue; writeScoreTerm(termVectorsFilter.getScoreTerm(term));
termVectorsFilter = new TermVectorsFilter(termVectorsByField, topLevelFields, request.selectedFields(), dfs); termVectorsFilter.setSettings(request.filterSettings()); try { termVectorsFilter.selectBestTerms(); } catch (IOException e) { throw new ElasticsearchException("failed to select best terms", e);
public boolean hasScoreTerm(Term term) { return getScoreTerm(term) != null; }
termVectorsFilter = new TermVectorsFilter(termVectorsByField, topLevelFields, request.selectedFields(), dfs); termVectorsFilter.setSettings(request.filterSettings()); try { termVectorsFilter.selectBestTerms(); } catch (IOException e) { throw new ElasticsearchException("failed to select best terms", e);
public boolean hasScoreTerm(Term term) { return getScoreTerm(term) != null; }
long numDocs = getDocCount(fieldName, topLevelTerms); int freq = getTermFreq(termsEnum, docsEnum); if (isNoise(term.bytes().utf8ToString(), freq)) { continue; long docFreq = getTermStatistics(topLevelTermsEnum, term).docFreq(); if (!isAccepted(docFreq)) { continue; float score = computeScore(docFreq, freq, numDocs); queue.addOrUpdate(new ScoreTerm(term.field(), term.bytes().utf8ToString(), score));
termVectorsFilter = new TermVectorsFilter(termVectorsByField, topLevelFields, request.selectedFields(), dfs); termVectorsFilter.setSettings(request.filterSettings()); try { termVectorsFilter.selectBestTerms(); } catch (IOException e) { throw new ElasticsearchException("failed to select best terms", e);
termsSize = Math.min(termsSize, termVectorsFilter.size(field)); if (hasScores && !termVectorsFilter.hasScoreTerm(term)) { continue; writeScoreTerm(termVectorsFilter.getScoreTerm(term));
public boolean hasScoreTerm(Term term) { return getScoreTerm(term) != null; }
long numDocs = getDocCount(fieldName, topLevelTerms); int freq = getTermFreq(termsEnum, docsEnum); if (isNoise(term.bytes().utf8ToString(), freq)) { continue; long docFreq = getTermStatistics(topLevelTermsEnum, term).docFreq(); if (!isAccepted(docFreq)) { continue; float score = computeScore(docFreq, freq, numDocs); queue.addOrUpdate(new ScoreTerm(term.field(), term.bytes().utf8ToString(), score));
termVectorsFilter = new TermVectorsFilter(termVectorsByField, topLevelFields, request.selectedFields(), dfs); termVectorsFilter.setSettings(request.filterSettings()); try { termVectorsFilter.selectBestTerms(); } catch (IOException e) { throw new ElasticsearchException("failed to select best terms", e);
termsSize = Math.min(termsSize, termVectorsFilter.size(field)); if (hasScores && !termVectorsFilter.hasScoreTerm(term)) { continue; writeScoreTerm(termVectorsFilter.getScoreTerm(term));
public boolean hasScoreTerm(Term term) { return getScoreTerm(term) != null; }
long numDocs = getDocCount(fieldName, topLevelTerms); int freq = getTermFreq(termsEnum, docsEnum); if (isNoise(term.bytes().utf8ToString(), freq)) { continue; long docFreq = getTermStatistics(topLevelTermsEnum, term).docFreq(); if (!isAccepted(docFreq)) { continue; float score = computeScore(docFreq, freq, numDocs); queue.addOrUpdate(new ScoreTerm(term.field(), term.bytes().utf8ToString(), score));
termVectorsFilter = new TermVectorsFilter(termVectorsByField, topLevelFields, request.selectedFields(), dfs); termVectorsFilter.setSettings(request.filterSettings()); try { termVectorsFilter.selectBestTerms(); } catch (IOException e) { throw new ElasticsearchException("failed to select best terms", e);
termsSize = Math.min(termsSize, termVectorsFilter.size(field)); if (hasScores && !termVectorsFilter.hasScoreTerm(term)) { continue; writeScoreTerm(termVectorsFilter.getScoreTerm(term));
public boolean hasScoreTerm(Term term) { return getScoreTerm(term) != null; }
long numDocs = getDocCount(fieldName, topLevelTerms); int freq = getTermFreq(termsEnum, docsEnum); if (isNoise(term.bytes().utf8ToString(), freq)) { continue; long docFreq = getTermStatistics(topLevelTermsEnum, term).docFreq(); if (!isAccepted(docFreq)) { continue; float score = computeScore(docFreq, freq, numDocs); queue.addOrUpdate(new ScoreTerm(term.field(), term.bytes().utf8ToString(), score));
termsSize = Math.min(termsSize, termVectorsFilter.size(field)); if (hasScores && !termVectorsFilter.hasScoreTerm(term)) { continue; writeScoreTerm(termVectorsFilter.getScoreTerm(term));