public double apply(long user, long item, double value) { return domain.clampValue(value); } }
/** * Prepare the updater for updating the feature values for a particular user/item ID. * * @param feature The feature we are training. * @param rating The rating value. * @param estimate The estimate through the previous feature. * @param uv The user feature value. * @param iv The item feature value. * @param trail The sum of the trailing feature value products. */ public void prepare(int feature, double rating, double estimate, double uv, double iv, double trail) { // Compute prediction double pred = estimate + uv * iv; PreferenceDomain dom = updateRule.getDomain(); if (dom != null) { pred = dom.clampValue(pred); } pred += trail; // Compute the err and store this value error = rating - pred; userFeatureValue = uv; itemFeatureValue = iv; // Update statistics n += 1; sse += error * error; }
@Nonnull @Override public ResultMap predictWithDetails(long user, @Nonnull Collection<Long> items) { ResultMap scores = scorer.scoreWithDetails(user, items); List<Result> rescored = new ArrayList<>(scores.size()); for (Result r: scores) { double val = r.getScore(); if (preferenceDomain != null) { val = preferenceDomain.clampValue(val); } rescored.add(Results.rescore(r, val)); } return Results.newResultMap(rescored); } }
@Override protected double computeScore(double bias, @Nonnull RealVector user, @Nonnull RealVector item) { if (domain == null) { return super.computeScore(bias, user, item); } else { double result = bias; int n = user.getDimension(); for (int i = 0; i < n; i++) { result = domain.clampValue(result + user.getEntry(i) * item.getEntry(i)); } return result; } }
@Override public RescoredResult predict(long user, long item) { Result r = scorer.score(user, item); if(r == null) { return null; } double val = r.getScore(); if (preferenceDomain != null) { val = preferenceDomain.clampValue(val); } return Results.rescore(r, val); }
public Long2DoubleSortedMap clampVector(Map<Long,Double> scores) { SortedKeyIndex keys = SortedKeyIndex.fromCollection(scores.keySet()); Long2DoubleFunction baseVals = LongUtils.asLong2DoubleMap(scores); double[] values = new double[keys.size()]; for (int i = 0; i < values.length; i++) { long item = keys.getKey(i); values[i] = clampValue(baseVals.get(item)); } return Long2DoubleSortedArrayMap.wrap(keys, values); }
double predValue = total / nitems; if (domain != null) { predValue = domain.clampValue(predValue);
/** * Update the current estimates with trained values for a new feature. * @param ufvs The user feature values. * @param ifvs The item feature values. */ public void update(RealVector ufvs, RealVector ifvs) { for (RatingMatrixEntry r : ratings) { int idx = r.getIndex(); double est = estimates[idx]; est += ufvs.getEntry(r.getUserIndex()) * ifvs.getEntry(r.getItemIndex()); if (domain != null) { est = domain.clampValue(est); } estimates[idx] = est; } } }
double predValue = total / nitems; if (domain != null) { predValue = domain.clampValue(predValue);
public double apply(long user, long item, double value) { return domain.clampValue(value); } }
/** * Prepare the updater for updating the feature values for a particular user/item ID. * * @param feature The feature we are training. * @param rating The rating value. * @param estimate The estimate through the previous feature. * @param uv The user feature value. * @param iv The item feature value. * @param trail The sum of the trailing feature value products. */ public void prepare(int feature, double rating, double estimate, double uv, double iv, double trail) { // Compute prediction double pred = estimate + uv * iv; PreferenceDomain dom = updateRule.getDomain(); if (dom != null) { pred = dom.clampValue(pred); } pred += trail; // Compute the err and store this value error = rating - pred; userFeatureValue = uv; itemFeatureValue = iv; // Update statistics n += 1; sse += error * error; }
@Override public double apply(double bias, @Nonnull RealVector user, @Nonnull RealVector item) { final int n = user.getDimension(); Preconditions.checkArgument(item.getDimension() == n, "vectors have different lengths"); double result = bias; for (int i = 0; i < n; i++) { result = domain.clampValue(result + user.getEntry(i) * item.getEntry(i)); } return result; }
@Nonnull @Override public ResultMap predictWithDetails(long user, @Nonnull Collection<Long> items) { ResultMap scores = scorer.scoreWithDetails(user, items); List<Result> rescored = new ArrayList<>(scores.size()); for (Result r: scores) { double val = r.getScore(); if (preferenceDomain != null) { val = preferenceDomain.clampValue(val); } rescored.add(Results.rescore(r, val)); } return Results.newResultMap(rescored); } }
@Override public RescoredResult predict(long user, long item) { Result r = scorer.score(user, item); if(r == null) { return null; } double val = r.getScore(); if (preferenceDomain != null) { val = preferenceDomain.clampValue(val); } return Results.rescore(r, val); }
public Long2DoubleSortedMap clampVector(Map<Long,Double> scores) { SortedKeyIndex keys = SortedKeyIndex.fromCollection(scores.keySet()); Long2DoubleFunction baseVals = LongUtils.asLong2DoubleFunction(scores); double[] values = new double[keys.size()]; for (int i = 0; i < values.length; i++) { long item = keys.getKey(i); values[i] = clampValue(baseVals.get(item)); } return Long2DoubleSortedArrayMap.wrap(keys, values); }
/** * Update the current estimates with trained values for a new feature. * @param ufvs The user feature values. * @param ifvs The item feature values. */ public void update(RealVector ufvs, RealVector ifvs) { for (RatingMatrixEntry r : ratings) { int idx = r.getIndex(); double est = estimates[idx]; est += ufvs.getEntry(r.getUserIndex()) * ifvs.getEntry(r.getItemIndex()); if (domain != null) { est = domain.clampValue(est); } estimates[idx] = est; } } }
double predValue = total / nitems; if (domain != null) { predValue = domain.clampValue(predValue);
double predValue = total / nitems; if (domain != null) { predValue = domain.clampValue(predValue);