@Override public DoubleVector divide(DoubleVector vector) { return vector.divide(vector); }
@Override public DoubleVector divide(double scalar) { return this.mainVector.divide(scalar); }
@Override public DoubleVector divide(double scalar) { return vector.divide(scalar); }
@Override public DoubleVector divide(double scalar) { return vector.divide(scalar); }
@Override public DoubleVector divide(DoubleVector vector) { return vector.divide(vector); }
@Override public DoubleVector divide(DoubleVector vector) { return this.mainVector.divide(vector); }
/** * Initialize the node with a two ClusterNodes */ ClusterNode(ClusterNode node1, ClusterNode node2, double distance) { this.mean = (node1.mean.add(node2.mean)).divide(2); this.splitDistance = distance; this.left = node1; this.right = node2; left.parent = this; right.parent = this; }
private void computeCenters(Deque<DoubleVector>[] assignments) { IntStream.range(0, assignments.length).parallel().forEach((i) -> { int len = assignments[i].size(); if (len > 0) { DoubleVector sumVector = assignments[i].pop(); while (!assignments[i].isEmpty()) { sumVector = sumVector.add(assignments[i].pop()); } centers[i] = sumVector.divide(len); } }); }
/** * Merges two centers when they are within the given distance of each other. */ private static void merge(List<DoubleVector> centers, double mergeWindow) { for (int i = 0; i < centers.size(); i++) { DoubleVector referenceVector = centers.get(i); // find centers to merge if they are within our merge window for (int j = i + 1; j < centers.size(); j++) { DoubleVector center = centers.get(j); double dist = EuclidianDistance.get().measureDistance(referenceVector, center); if (dist < mergeWindow) { centers.remove(j); centers.set(i, referenceVector.add(center).divide(2d)); // decrement to not omit the following record j--; } } } }
@Override public DoubleVector predictProbability(DoubleVector features) { DoubleVector predict = predict(features); return predict.divide(predict.sum()); }
@Override public DoubleVector predictProbability(DoubleVector features) { DoubleVector prediction = predict(features); if (numOutcomes != 2) { prediction = prediction.divide(prediction.sum()); } return prediction; }
@Override public DoubleVector apply(DoubleVector vector) { double max = vector.max(); DoubleVector exp = vector.subtract(max).exp(); return exp.divide(exp.sum()); }
@Override public DoubleMatrix divide(DoubleVector vec) { DoubleMatrix cop = new DenseDoubleMatrix(this.getRowCount(), this.getColumnCount()); for (int i = 0; i < this.getColumnCount(); i++) { cop.setColumnVector(i, getColumnVector(i).divide(vec)); } return cop; }
@Override public DoubleVector predict(DoubleVector feature) { DoubleVector mesh = new DenseDoubleVector(classifier.length); for (int i = 0; i < classifier.length; i++) { RegressionClassifier clf = classifier[i]; DoubleVector prediction = clf.predict(feature); Preconditions.checkArgument(prediction.getDimension() == 1, "Prediction only works for a single dimensional output! Given " + prediction.getDimension()); mesh.set(i, prediction.get(0)); } if (normalize) { double sum = mesh.sum(); if (sum != 0d) { mesh = mesh.divide(sum); } } return mesh; }
@Override public DoubleVector predict(DoubleVector features) { // clamp the features to the visible units, calculate the joint // probability for each hidden state and put it into the vector DoubleVector probabilities = emissionProbabilityMatrix .multiplyVectorRow(features); double max = probabilities.max(); for (int state = 0; state < probabilities.getDimension(); state++) { probabilities.set(state, FastMath.exp(probabilities.get(state) - max) * hiddenPriorProbability.get(state)); } // normalize again return probabilities.divide(probabilities.sum()); }
private static void normalize(DoubleVector hiddenPriorProbability, DoubleMatrix transitionProbabilityMatrix, DoubleMatrix emissionProbabilitiyMatrix, boolean log) { double sum = hiddenPriorProbability.sum(); if (sum != 0d) { for (int i = 0; i < hiddenPriorProbability.getDimension(); i++) { hiddenPriorProbability.set(i, hiddenPriorProbability.get(i) / sum); } } for (int row = 0; row < transitionProbabilityMatrix.getRowCount(); row++) { // note that we are using row vectors here, because dense matrices give us // the underlying array wrapped by the vector object so we can directly // mutate the values beneath DoubleVector rowVector = transitionProbabilityMatrix.getRowVector(row); rowVector = rowVector.divide(rowVector.sum()); if (log) { rowVector = rowVector.log(); } transitionProbabilityMatrix.setRowVector(row, rowVector); rowVector = emissionProbabilitiyMatrix.getRowVector(row); rowVector = rowVector.divide(rowVector.sum()); if (log) { rowVector = rowVector.log(); } emissionProbabilitiyMatrix.setRowVector(row, rowVector); } }
public DoubleVector predict(DoubleVector features, DoubleVector previousOutcome) { // clamp the features to the visible units, calculate the joint // probability for each hidden state and put it into the vector DoubleVector probabilities = emissionProbabilityMatrix .multiplyVectorRow(features); // we can add here, both are logarithms probabilities.add(transitionProbabilityMatrix .multiplyVectorRow(previousOutcome)); double max = probabilities.max(); for (int state = 0; state < probabilities.getDimension(); state++) { probabilities.set(state, FastMath.exp(probabilities.get(state) - max) * hiddenPriorProbability.get(state)); } // normalize again return probabilities.divide(probabilities.sum()); }
@Override public CostGradientTuple evaluateCost(DoubleVector theta) { DoubleVector activation = SIGMOID.get().apply(x.multiplyVectorRow(theta)); DenseDoubleMatrix hypo = new DenseDoubleMatrix(Arrays.asList(activation)); double error = ERROR_FUNCTION.calculateLoss(y, hypo); DoubleMatrix loss = hypo.subtract(y); double j = error / m; DoubleVector gradient = xTransposed.multiplyVectorRow(loss.getRowVector(0)) .divide(m); if (lambda != 0d) { DoubleVector reg = theta.multiply(lambda / m); // don't regularize the bias reg.set(0, 0d); gradient = gradient.add(reg); j += lambda * theta.pow(2).sum() / m; } return new CostGradientTuple(j, gradient); } }
@Override public CostGradientTuple updateGradient(DoubleVector theta, DoubleVector gradient, double learningRate, long iteration, double cost) { if (movingAvg == null) { // initialize same types with zeros movingAvg = gradient.deepCopy().multiply(0); squaredGradient = gradient.deepCopy().multiply(0); } DoubleVector oneMinusBeta1Grad = gradient.multiply(1d - movingAvgDecay); movingAvg = movingAvg.multiply(movingAvgDecay).add(oneMinusBeta1Grad); DoubleVector oneMinusBeta2GradSquared = gradient.pow(2d).multiply( 1 - squaredDecay); squaredGradient = squaredGradient.multiply(squaredDecay).add( oneMinusBeta2GradSquared); double beta1t = FastMath.pow(movingAvgDecay, iteration); double beta2t = FastMath.pow(squaredDecay, iteration); double alphat = alpha * FastMath.sqrt(1 - beta2t) / (1 - beta1t); if (Double.isNaN(alphat) || alphat == 0.0) { alphat = EPS; } DoubleVector sqrtV = squaredGradient.sqrt().add(eps); gradient = movingAvg.multiply(alphat).divide(sqrtV); return new CostGradientTuple(cost, gradient); }
/** * @return the normalized matrix (0 mean and stddev of 1) as well as the mean * and the stddev. */ public static Tuple3<DoubleMatrix, DoubleVector, DoubleVector> meanNormalizeColumns( DoubleMatrix x) { DenseDoubleMatrix toReturn = new DenseDoubleMatrix(x.getRowCount(), x.getColumnCount()); final int length = x.getColumnCount(); DoubleVector meanVector = new DenseDoubleVector(length); DoubleVector stddevVector = new DenseDoubleVector(length); for (int col = 0; col < length; col++) { DoubleVector column = x.getColumnVector(col); double mean = column.sum() / column.getLength(); meanVector.set(col, mean); double var = column.subtract(mean).pow(2).sum() / column.getLength(); stddevVector.set(col, Math.sqrt(var)); } for (int col = 0; col < length; col++) { DoubleVector column = x.getColumnVector(col) .subtract(meanVector.get(col)).divide(stddevVector.get(col)); toReturn.setColumn(col, column.toArray()); } return new Tuple3<>(toReturn, meanVector, stddevVector); }