/** * Returns n-1 probabilities, one for each category but the 0-th. The probability of the 0-th * category is 1 - sum(this result). * * @param instance A vector of features to be classified. * @return A vector of probabilities, one for each of the first n-1 categories. */ @Override public Vector classify(Vector instance) { return link(classifyNoLink(instance)); }
/** * Returns a single scalar probability in the case where we have two categories. Using this * method avoids an extra vector allocation as opposed to calling classify() or an extra two * vector allocations relative to classifyFull(). * * @param instance The vector of features to be classified. * @return The probability of the first of two categories. * @throws IllegalArgumentException If the classifier doesn't have two categories. */ @Override public double classifyScalar(Vector instance) { Preconditions.checkArgument(numCategories() == 2, "Can only call classifyScalar with two categories"); // apply pending regularization to whichever coefficients matter regularize(instance); // result is a vector with one element so we can just use dot product return link(classifyScalarNoLink(instance)); }
public Matrix getBeta() { close(); return beta; }
@Override public void train(long trackingKey, String groupKey, int actual, Vector instance) { unseal(); double learningRate = currentLearningRate(); // push coefficients back to zero based on the prior regularize(instance); // update each row of coefficients according to result Vector gradient = this.gradient.apply(groupKey, actual, instance, this); for (int i = 0; i < numCategories - 1; i++) { double gradientBase = gradient.get(i); // then we apply the gradientBase to the resulting element. for (Element updateLocation : instance.nonZeroes()) { int j = updateLocation.index(); double newValue = beta.getQuick(i, j) + gradientBase * learningRate * perTermLearningRate(j) * instance.get(j); beta.setQuick(i, j, newValue); } } // remember that these elements got updated for (Element element : instance.nonZeroes()) { int j = element.index(); updateSteps.setQuick(j, getStep()); updateCounts.incrementQuick(j, 1); } nextStep(); }
public void regularize(Vector instance) { if (updateSteps == null || isSealed()) { return; } // anneal learning rate double learningRate = currentLearningRate(); // here we lazily apply the prior to make up for our neglect for (int i = 0; i < numCategories - 1; i++) { for (Element updateLocation : instance.nonZeroes()) { int j = updateLocation.index(); double missingUpdates = getStep() - updateSteps.get(j); if (missingUpdates > 0) { double rate = getLambda() * learningRate * perTermLearningRate(j); double newValue = prior.age(beta.get(i, j), missingUpdates, rate); beta.set(i, j, newValue); updateSteps.set(j, getStep()); } } } }
public void copyFrom(OnlineLogisticRegression other) { super.copyFrom(other); mu0 = other.mu0; decayFactor = other.decayFactor; stepOffset = other.stepOffset; forgettingExponent = other.forgettingExponent; perTermAnnealingOffset = other.perTermAnnealingOffset; }
@Override public OnlineLogisticRegression lambda(double lambda) { // we only over-ride this to provide a more restrictive return type super.lambda(lambda); return this; }
@Override public void train(long trackingKey, String groupKey, int actual, Vector instance) { unseal(); double learningRate = currentLearningRate(); // push coefficients back to zero based on the prior regularize(instance); // update each row of coefficients according to result Vector gradient = this.gradient.apply(groupKey, actual, instance, this); for (int i = 0; i < numCategories - 1; i++) { double gradientBase = gradient.get(i); // then we apply the gradientBase to the resulting element. for (Element updateLocation : instance.nonZeroes()) { int j = updateLocation.index(); double newValue = beta.getQuick(i, j) + gradientBase * learningRate * perTermLearningRate(j) * instance.get(j); beta.setQuick(i, j, newValue); } } // remember that these elements got updated for (Element element : instance.nonZeroes()) { int j = element.index(); updateSteps.setQuick(j, getStep()); updateCounts.incrementQuick(j, 1); } nextStep(); }
public void regularize(Vector instance) { if (updateSteps == null || isSealed()) { return; } // anneal learning rate double learningRate = currentLearningRate(); // here we lazily apply the prior to make up for our neglect for (int i = 0; i < numCategories - 1; i++) { for (Element updateLocation : instance.nonZeroes()) { int j = updateLocation.index(); double missingUpdates = getStep() - updateSteps.get(j); if (missingUpdates > 0) { double rate = getLambda() * learningRate * perTermLearningRate(j); double newValue = prior.age(beta.get(i, j), missingUpdates, rate); beta.set(i, j, newValue); updateSteps.set(j, getStep()); } } } }
public void copyFrom(OnlineLogisticRegression other) { super.copyFrom(other); mu0 = other.mu0; decayFactor = other.decayFactor; stepOffset = other.stepOffset; forgettingExponent = other.forgettingExponent; perTermAnnealingOffset = other.perTermAnnealingOffset; }
@Override public OnlineLogisticRegression lambda(double lambda) { // we only over-ride this to provide a more restrictive return type super.lambda(lambda); return this; }
@Override public void train(long trackingKey, String groupKey, int actual, Vector instance) { unseal(); double learningRate = currentLearningRate(); // push coefficients back to zero based on the prior regularize(instance); // update each row of coefficients according to result Vector gradient = this.gradient.apply(groupKey, actual, instance, this); for (int i = 0; i < numCategories - 1; i++) { double gradientBase = gradient.get(i); // then we apply the gradientBase to the resulting element. for (Element updateLocation : instance.nonZeroes()) { int j = updateLocation.index(); double newValue = beta.getQuick(i, j) + gradientBase * learningRate * perTermLearningRate(j) * instance.get(j); beta.setQuick(i, j, newValue); } } // remember that these elements got updated for (Element element : instance.nonZeroes()) { int j = element.index(); updateSteps.setQuick(j, getStep()); updateCounts.incrementQuick(j, 1); } nextStep(); }
public void regularize(Vector instance) { if (updateSteps == null || isSealed()) { return; } // anneal learning rate double learningRate = currentLearningRate(); // here we lazily apply the prior to make up for our neglect for (int i = 0; i < numCategories - 1; i++) { for (Element updateLocation : instance.nonZeroes()) { int j = updateLocation.index(); double missingUpdates = getStep() - updateSteps.get(j); if (missingUpdates > 0) { double rate = getLambda() * learningRate * perTermLearningRate(j); double newValue = prior.age(beta.get(i, j), missingUpdates, rate); beta.set(i, j, newValue); updateSteps.set(j, getStep()); } } } }
/** * Returns a single scalar probability in the case where we have two categories. Using this * method avoids an extra vector allocation as opposed to calling classify() or an extra two * vector allocations relative to classifyFull(). * * @param instance The vector of features to be classified. * @return The probability of the first of two categories. * @throws IllegalArgumentException If the classifier doesn't have two categories. */ @Override public double classifyScalar(Vector instance) { Preconditions.checkArgument(numCategories() == 2, "Can only call classifyScalar with two categories"); // apply pending regularization to whichever coefficients matter regularize(instance); // result is a vector with one element so we can just use dot product return link(classifyScalarNoLink(instance)); }
/** * Returns n-1 probabilities, one for each category but the 0-th. The probability of the 0-th * category is 1 - sum(this result). * * @param instance A vector of features to be classified. * @return A vector of probabilities, one for each of the first n-1 categories. */ @Override public Vector classify(Vector instance) { return link(classifyNoLink(instance)); }
public void copyFrom(OnlineLogisticRegression other) { super.copyFrom(other); mu0 = other.mu0; decayFactor = other.decayFactor; stepOffset = other.stepOffset; forgettingExponent = other.forgettingExponent; perTermAnnealingOffset = other.perTermAnnealingOffset; }
@Override public OnlineLogisticRegression lambda(double lambda) { // we only over-ride this to provide a more restrictive return type super.lambda(lambda); return this; }
public Matrix getBeta() { close(); return beta; }
/** * Returns a single scalar probability in the case where we have two categories. Using this * method avoids an extra vector allocation as opposed to calling classify() or an extra two * vector allocations relative to classifyFull(). * * @param instance The vector of features to be classified. * @return The probability of the first of two categories. * @throws IllegalArgumentException If the classifier doesn't have two categories. */ @Override public double classifyScalar(Vector instance) { Preconditions.checkArgument(numCategories() == 2, "Can only call classifyScalar with two categories"); // apply pending regularization to whichever coefficients matter regularize(instance); // result is a vector with one element so we can just use dot product return link(classifyScalarNoLink(instance)); }
/** * Returns n-1 probabilities, one for each category but the 0-th. The probability of the 0-th * category is 1 - sum(this result). * * @param instance A vector of features to be classified. * @return A vector of probabilities, one for each of the first n-1 categories. */ @Override public Vector classify(Vector instance) { return link(classifyNoLink(instance)); }