@Override public DoubleVector subtract(double scalar) { return vector.subtract(scalar); }
@Override public double calculateLoss(DoubleVector y, DoubleVector hypothesis) { DoubleVector negativeOutcome = y.subtractFrom(1.0d); DoubleVector inverseOutcome = y.multiply(-1d); DoubleVector negativeHypo = hypothesis.subtractFrom(1d); DoubleVector negativeLogHypo = MathUtils.logVector(negativeHypo); DoubleVector positiveLogHypo = MathUtils.logVector(hypothesis); DoubleVector negativePenalty = negativeOutcome.multiply(negativeLogHypo); DoubleVector positivePenalty = inverseOutcome.multiply(positiveLogHypo); return (positivePenalty.subtract(negativePenalty)).sum(); }
@Override public DoubleVector subtract(DoubleVector v) { return vector.subtract(v); }
@Override public DoubleVector subtract(double scalar) { return vector.subtract(scalar); }
@Override public DoubleVector subtract(DoubleVector v) { return this.mainVector.subtract(v); }
@Override public DoubleVector subtract(DoubleVector v) { return vector.subtract(v); }
@Override public DoubleVector subtract(double scalar) { return this.mainVector.subtract(scalar); }
public static int compareVector(DoubleVector a, DoubleVector o) { DoubleVector subtract = a.subtract(o); return (int) subtract.sum(); }
@Override public double calculateLoss(DoubleVector y, DoubleVector hypothesis) { return y.subtract(hypothesis).sum(); }
@Override public DoubleVector calculateGradient(DoubleVector feature, DoubleVector y, DoubleVector hypothesis) { return feature.multiply(hypothesis.subtract(y).get(0)); } }
@Override public DoubleVector calculateGradient(DoubleVector feature, DoubleVector y, DoubleVector hypothesis) { return feature.multiply(hypothesis.subtract(y).get(0)); }
@Override public DoubleVector calculateGradient(DoubleVector feature, DoubleVector y, DoubleVector hypothesis) { return feature.multiply(hypothesis.subtract(y).get(0)); }
@Override public DoubleVector calculateGradient(DoubleVector feature, DoubleVector y, DoubleVector hypothesis) { return feature.multiply(hypothesis.subtract(y).get(0)); }
public void computeMomentum() { // compute momentum if (lastTheta != null && momentum != 0d) { // we add momentum as the parameter "m" multiplied by the // difference of both theta vectors theta = theta.add((lastTheta.subtract(theta)).multiply(momentum)); } }
@Override public double measureDistance(DoubleVector vec1, DoubleVector vec2) { return vec1.subtract(vec2).abs().sum(); }
@Override public DoubleVector apply(DoubleVector vector) { double max = vector.max(); DoubleVector exp = vector.subtract(max).exp(); return exp.divide(exp.sum()); }
/** * Simplistic gradient descent without regularization. */ @Override public CostWeightTuple computeNewWeights(DoubleVector theta, DoubleVector gradient, double learningRate, long iteration, double cost) { CostGradientTuple gradientTuple = updateGradient(theta, gradient, learningRate, iteration, cost); DoubleVector dampened = gradientTuple.getGradient().multiply(learningRate); DoubleVector newWeights = theta.subtract(dampened); return new CostWeightTuple(gradientTuple.getCost(), newWeights); }
@Override public DenseDoubleMatrix subtract(DoubleVector vec) { DenseDoubleMatrix cop = new DenseDoubleMatrix(this.getRowCount(), this.getColumnCount()); for (int i = 0; i < this.getColumnCount(); i++) { cop.setColumnVector(i, getColumnVector(i).subtract(vec)); } return cop; }
@Override public double measureDistance(DoubleVector vec1, DoubleVector vec2) { if (vec1.isSparse() || vec2.isSparse()) { return FastMath.sqrt(vec2.subtract(vec1).pow(2).sum()); } else { // dense vectors usually doesn't do a defensive copy, so it is faster than // the implementation above. return measureDistance(vec1.toArray(), vec2.toArray()); } }
@Override public double measureDistance(DoubleVector vec1, DoubleVector vec2) { if (vec1.isSparse() || vec2.isSparse()) { return FastMath.sqrt(vec2.subtract(vec1).pow(2).sum()); } else { // dense vectors usually doesn't do a defensive copy, so it is faster than // the implementation above. return measureDistance(vec1.toArray(), vec2.toArray()); } }