@Override public double sum() { return vector.sum(); }
@Override public double sum() { return this.mainVector.sum(); }
@Override public double sum() { return vector.sum(); }
public static int compareVector(DoubleVector a, DoubleVector o) { DoubleVector subtract = a.subtract(o); return (int) subtract.sum(); }
@Override public double calculateLoss(DoubleVector y, DoubleVector hypothesis) { return y.subtract(hypothesis).sum(); }
@Override public double measureDistance(DoubleVector vec1, DoubleVector vec2) { double lengthSquaredv1 = vec1.pow(2).sum(); double lengthSquaredv2 = vec2.pow(2).sum(); double dotProduct = vec2.dot(vec1); double denominator = Math.sqrt(lengthSquaredv1) * Math.sqrt(lengthSquaredv2); // correct for floating-point rounding errors if (denominator < dotProduct) { denominator = dotProduct; } return 1.0 - dotProduct / denominator; }
private static double estimateLikelihood(DoubleMatrix alpha) { // sum the last row in our alpha matrix generated by the forward algorithm, // this denotes the endstate of our sequence. return alpha.getRowVector(alpha.getRowCount() - 1).sum(); }
@Override public DoubleVector predictProbability(DoubleVector features) { DoubleVector predict = predict(features); return predict.divide(predict.sum()); }
@Override public DoubleVector predictProbability(DoubleVector features) { DoubleVector prediction = predict(features); if (numOutcomes != 2) { prediction = prediction.divide(prediction.sum()); } return prediction; }
@Override public double measureDistance(DoubleVector vec1, DoubleVector vec2) { return vec1.subtract(vec2).abs().sum(); }
@Override public double calculateLoss(DoubleVector y, DoubleVector hypothesis) { return y.multiply(MathUtils.logVector(hypothesis)).sum(); }
/** * @return the average transition probability of the given sequence. */ public double averageTransitionProbability(int[] sequence) { DoubleVector distribution = getTransitionProbabilities(sequence); return FastMath.exp(distribution.sum() / Math.max(1d, distribution.getLength())); }
@Override public DoubleVector apply(DoubleVector vector) { double max = vector.max(); DoubleVector exp = vector.subtract(max).exp(); return exp.divide(exp.sum()); }
@Override public DoubleVector calculateGradient(DoubleVector feature, DoubleVector y, DoubleVector hypothesis) { double error = y.subtract(hypothesis).sum(); if (error != 0d) { DoubleVector result = feature.deepCopy(); Iterator<DoubleVectorElement> iterateNonZero = feature.iterateNonZero(); while (iterateNonZero.hasNext()) { DoubleVectorElement next = iterateNonZero.next(); result.set(next.getIndex(), MathUtils.guardedLogarithm(next.getValue() + 1d) * error * -1d); } return result; } return new SequentialSparseDoubleVector(feature.getDimension()); } }
@Override public CostGradientTuple updateGradient(DoubleVector weights, DoubleVector gradient, double learningRate, long iteration, double cost) { if (l2 != 0d) { DoubleVector powered = weights.pow(2d); DoubleVector regGrad = weights.multiply(l2); // assume bias is on the first dimension powered.set(0, 0); regGrad.set(0, 0); cost += l2 * powered.sum() / 2d; gradient = gradient.add(regGrad); } return new CostGradientTuple(cost, gradient); } }
@Override public double measureDistance(DoubleVector vec1, DoubleVector vec2) { if (vec1.isSparse() || vec2.isSparse()) { return FastMath.sqrt(vec2.subtract(vec1).pow(2).sum()); } else { // dense vectors usually doesn't do a defensive copy, so it is faster than // the implementation above. return measureDistance(vec1.toArray(), vec2.toArray()); } }
@Override public double calculateLoss(DoubleVector y, DoubleVector hypothesis) { DoubleVector negativeOutcome = y.subtractFrom(1.0d); DoubleVector inverseOutcome = y.multiply(-1d); DoubleVector negativeHypo = hypothesis.subtractFrom(1d); DoubleVector negativeLogHypo = MathUtils.logVector(negativeHypo); DoubleVector positiveLogHypo = MathUtils.logVector(hypothesis); DoubleVector negativePenalty = negativeOutcome.multiply(negativeLogHypo); DoubleVector positivePenalty = inverseOutcome.multiply(positiveLogHypo); return (positivePenalty.subtract(negativePenalty)).sum(); }
@Override public double measureDistance(DoubleVector vec1, DoubleVector vec2) { if (vec1.isSparse() || vec2.isSparse()) { return FastMath.sqrt(vec2.subtract(vec1).pow(2).sum()); } else { // dense vectors usually doesn't do a defensive copy, so it is faster than // the implementation above. return measureDistance(vec1.toArray(), vec2.toArray()); } }
@Override public DoubleVector predict(DoubleVector features) { // clamp the features to the visible units, calculate the joint // probability for each hidden state and put it into the vector DoubleVector probabilities = emissionProbabilityMatrix .multiplyVectorRow(features); double max = probabilities.max(); for (int state = 0; state < probabilities.getDimension(); state++) { probabilities.set(state, FastMath.exp(probabilities.get(state) - max) * hiddenPriorProbability.get(state)); } // normalize again return probabilities.divide(probabilities.sum()); }