@Override public DoubleVector add(DoubleVector v) { return vector.add(v); }
@Override public DoubleVector add(double scalar) { return this.mainVector.add(scalar); }
@Override public DoubleVector add(DoubleVector v) { return vector.add(v); }
@Override public DoubleVector add(double scalar) { return vector.add(scalar); }
@Override public DoubleVector add(double scalar) { return vector.add(scalar); }
@Override public DoubleVector add(DoubleVector v) { return this.mainVector.add(v); }
/** * Initialize the node with a two ClusterNodes */ ClusterNode(ClusterNode node1, ClusterNode node2, double distance) { this.mean = (node1.mean.add(node2.mean)).divide(2); this.splitDistance = distance; this.left = node1; this.right = node2; left.parent = this; right.parent = this; }
private void computeCenters(Deque<DoubleVector>[] assignments) { IntStream.range(0, assignments.length).parallel().forEach((i) -> { int len = assignments[i].size(); if (len > 0) { DoubleVector sumVector = assignments[i].pop(); while (!assignments[i].isEmpty()) { sumVector = sumVector.add(assignments[i].pop()); } centers[i] = sumVector.divide(len); } }); }
/** * Ranks the terms at the indices by their relevance scores and the similarity * scores. They are multiplied by the given alpha. * * @return a vector which represents the rank of the terms. */ static DoubleVector rankScores(double alpha, DenseDoubleVector relevanceScores, DenseDoubleVector similarityScores) { DoubleVector multiply = relevanceScores.multiply(alpha); return similarityScores.multiply(alpha).add(multiply); }
/** * Merges two centers when they are within the given distance of each other. */ private static void merge(List<DoubleVector> centers, double mergeWindow) { for (int i = 0; i < centers.size(); i++) { DoubleVector referenceVector = centers.get(i); // find centers to merge if they are within our merge window for (int j = i + 1; j < centers.size(); j++) { DoubleVector center = centers.get(j); double dist = EuclidianDistance.get().measureDistance(referenceVector, center); if (dist < mergeWindow) { centers.remove(j); centers.set(i, referenceVector.add(center).divide(2d)); // decrement to not omit the following record j--; } } } }
public void computeMomentum() { // compute momentum if (lastTheta != null && momentum != 0d) { // we add momentum as the parameter "m" multiplied by the // difference of both theta vectors theta = theta.add((lastTheta.subtract(theta)).multiply(momentum)); } }
if (!assignedIndices.get(neighbour.getValue()) && neighbour.getDistance() < h) { center = center.add(neighbour.getVector()); assignedIndices.set(neighbour.getValue()); added++;
CostGradientTuple result = completionService.take().get(); costSum += result.getCost(); gradientSum = gradientSum.add(result.getGradient());
@Override public CostGradientTuple updateGradient(DoubleVector weights, DoubleVector gradient, double learningRate, long iteration, double cost) { if (l2 != 0d) { DoubleVector powered = weights.pow(2d); DoubleVector regGrad = weights.multiply(l2); // assume bias is on the first dimension powered.set(0, 0); regGrad.set(0, 0); cost += l2 * powered.sum() / 2d; gradient = gradient.add(regGrad); } return new CostGradientTuple(cost, gradient); } }
@Override public CostGradientTuple updateGradient(DoubleVector theta, DoubleVector gradient, double learningRate, long iteration, double cost) { if (movingAvg == null) { // initialize same types with zeros movingAvg = gradient.deepCopy().multiply(0); squaredGradient = gradient.deepCopy().multiply(0); } DoubleVector oneMinusBeta1Grad = gradient.multiply(1d - movingAvgDecay); movingAvg = movingAvg.multiply(movingAvgDecay).add(oneMinusBeta1Grad); DoubleVector oneMinusBeta2GradSquared = gradient.pow(2d).multiply( 1 - squaredDecay); squaredGradient = squaredGradient.multiply(squaredDecay).add( oneMinusBeta2GradSquared); double beta1t = FastMath.pow(movingAvgDecay, iteration); double beta2t = FastMath.pow(squaredDecay, iteration); double alphat = alpha * FastMath.sqrt(1 - beta2t) / (1 - beta1t); if (Double.isNaN(alphat) || alphat == 0.0) { alphat = EPS; } DoubleVector sqrtV = squaredGradient.sqrt().add(eps); gradient = movingAvg.multiply(alphat).divide(sqrtV); return new CostGradientTuple(cost, gradient); }
public DoubleVector predict(DoubleVector features, DoubleVector previousOutcome) { // clamp the features to the visible units, calculate the joint // probability for each hidden state and put it into the vector DoubleVector probabilities = emissionProbabilityMatrix .multiplyVectorRow(features); // we can add here, both are logarithms probabilities.add(transitionProbabilityMatrix .multiplyVectorRow(previousOutcome)); double max = probabilities.max(); for (int state = 0; state < probabilities.getDimension(); state++) { probabilities.set(state, FastMath.exp(probabilities.get(state) - max) * hiddenPriorProbability.get(state)); } // normalize again return probabilities.divide(probabilities.sum()); }
@Override public CostGradientTuple evaluateCost(DoubleVector theta) { DoubleVector activation = SIGMOID.get().apply(x.multiplyVectorRow(theta)); DenseDoubleMatrix hypo = new DenseDoubleMatrix(Arrays.asList(activation)); double error = ERROR_FUNCTION.calculateLoss(y, hypo); DoubleMatrix loss = hypo.subtract(y); double j = error / m; DoubleVector gradient = xTransposed.multiplyVectorRow(loss.getRowVector(0)) .divide(m); if (lambda != 0d) { DoubleVector reg = theta.multiply(lambda / m); // don't regularize the bias reg.set(0, 0d); gradient = gradient.add(reg); j += lambda * theta.pow(2).sum() / m; } return new CostGradientTuple(j, gradient); } }