SimpleMatrix gappedEmbedding = gappedArguments.get(j - 1).getAverageEmbeddings(); distance = fullEmbedding.minus(gappedEmbedding).normF();
/** * Applies the derivative of tanh to each of the elements in the vector. Returns a new matrix. */ public static SimpleMatrix elementwiseApplyTanhDerivative(SimpleMatrix input) { SimpleMatrix output = new SimpleMatrix(input.numRows(), input.numCols()); output.set(1.0); output = output.minus(input.elementMult(input)); return output; }
double normF = subtrees.get(i).second().minus(subtrees.get(j).second()).normF();
SimpleMatrix deltaClass = goldClass >= 0 ? predictions.minus(goldLabel).scale(nodeWeight) : new SimpleMatrix(predictions.numRows(), predictions.numCols()); SimpleMatrix localCD = deltaClass.mult(NeuralUtils.concatenateWithBias(currentVector).transpose());
public SimpleMatrix minusXYZ(Coordinates coord){ return this.ecef.minus(coord.ecef); } /**
SimpleMatrix compute_residuals( SimpleMatrix X ) { SimpleMatrix residuals = y0.minus(H.mult(X)); return residuals; }
public static double euclidianDistance(SimpleMatrix columnVector1, SimpleMatrix columnVector2) { double distance = 0; SimpleMatrix distVector = columnVector2.minus(columnVector1); distance = Math.sqrt(MatrixOps.elemPow(distVector, 2).elementSum()); return distance; }
public ArrayList<Double> mahalanobis(SimpleMatrix x, ArrayList<SimpleMatrix> means, ArrayList<SimpleMatrix> covs) { ArrayList<Double> mahalanobisDistances = new java.util.ArrayList<Double>(); for (int i = 0; i < means.size(); i++) { SimpleMatrix m = means.get(i); SimpleMatrix c = covs.get(i); double distance = x.minus(m).transpose().mult(c.invert()).mult(x.minus(m)).trace(); mahalanobisDistances.add(distance); } return mahalanobisDistances; }
/** * Calculcates Mahalanobis distance between given point x and all given components defined by their means and covariances. * * @param x The reference point. * @param means The component means. * @param covs The component covariances. * @return A list with Mahalanobis distances between x and the given components. */ private static ArrayList<Double> mahalanobis(SimpleMatrix x, List<SimpleMatrix> means, List<SimpleMatrix> covs) { ArrayList<Double> mahalanobisDistances = new java.util.ArrayList<Double>(); for (int i = 0; i < means.size(); i++) { SimpleMatrix m = means.get(i); SimpleMatrix c = covs.get(i); // calculate Mahalanobis distance double distance = x.minus(m).transpose().mult(c.invert()).mult(x.minus(m)).trace(); mahalanobisDistances.add(distance); } return mahalanobisDistances; } }
/** * @see de.tuhh.luethke.okde.model.BaseSampleDistribution#evaluate(SimpleMatrix pointVector) */ @Override public double evaluate(SimpleMatrix pointVector) { SimpleMatrix smoothedCov = mGlobalCovariance.plus(mBandwidthMatrix); double d = 0d; double n = mGlobalMean.numRows(); double a = Math.pow(Math.sqrt(2 * Math.PI), n); double tmp = (-0.5d) * pointVector.minus(mGlobalMean).transpose().mult(smoothedCov.invert()).mult(pointVector.minus(mGlobalMean)).trace(); d += ((1 / (a * Math.sqrt(smoothedCov.determinant()))) * Math.exp(tmp)) * mGlobalWeight; return d; }
@Override public double evaluate(SimpleMatrix pointVector) { SimpleMatrix[] means = this.getSubMeans(); SimpleMatrix[] covs = this.getSubCovariances(); Double[] weights = this.getSubWeights(); double d = 0d; double n = means[0].numRows(); double a = Math.pow(Math.sqrt(2 * Math.PI), n); for (int i = 0; i < means.length; i++) { SimpleMatrix m = means[i]; SimpleMatrix c = covs[i].plus(this.mBandwidthMatrix); double w = weights[i]; double tmp = (-0.5d) * pointVector.minus(m).transpose().mult(c.invert()).mult(pointVector.minus(m)).trace(); d += ((1 / (a * Math.sqrt(c.determinant()))) * Math.exp(tmp)) * w; } return d; }
/** * Applies the derivative of tanh to each of the elements in the vector. Returns a new matrix. */ public static SimpleMatrix elementwiseApplyTanhDerivative(SimpleMatrix input) { SimpleMatrix output = new SimpleMatrix(input.numRows(), input.numCols()); output.set(1.0); output = output.minus(input.elementMult(input)); return output; }
/** * Applies the derivative of tanh to each of the elements in the vector. Returns a new matrix. */ public static SimpleMatrix elementwiseApplyTanhDerivative(SimpleMatrix input) { SimpleMatrix output = new SimpleMatrix(input.numRows(), input.numCols()); output.set(1.0); output = output.minus(input.elementMult(input)); return output; }
/** * Applies the derivative of tanh to each of the elements in the vector. Returns a new matrix. */ public static SimpleMatrix elementwiseApplyTanhDerivative(SimpleMatrix input) { SimpleMatrix output = new SimpleMatrix(input.numRows(), input.numCols()); output.set(1.0); output = output.minus(input.elementMult(input)); return output; }
/** * Runs LSPI for either numIterations or until the change in the weight matrix is no greater than maxChange. * @param numIterations the maximum number of policy iterations. * @param maxChange when the weight change is smaller than this value, LSPI terminates. * @return a {@link burlap.behavior.policy.GreedyQPolicy} using this object as the {@link QProvider} source. */ public GreedyQPolicy runPolicyIteration(int numIterations, double maxChange){ boolean converged = false; for(int i = 0; i < numIterations && !converged; i++){ SimpleMatrix nw = this.LSTDQ(); double change = Double.POSITIVE_INFINITY; if(this.lastWeights != null){ change = this.lastWeights.minus(nw).normF(); if(change <= maxChange){ converged = true; } } this.lastWeights = nw; DPrint.cl(0, "Finished iteration: " + i + ". Weight change: " + change); } DPrint.cl(0, "Finished Policy Iteration."); return new GreedyQPolicy(this); }
public void update(DenseMatrix64F yDense) { // a fast way to make the matrices usable by SimpleMatrix SimpleMatrix y = SimpleMatrix.wrap(yDense); // z = y - H x SimpleMatrix z = y.minus(H.mult(x)); // S = H P H' + R SimpleMatrix S = H.mult(P).mult(H.transpose()).plus(R); // K = PH'S^(-1) SimpleMatrix K = P.mult(H.transpose().mult(S.invert())); // x = x + Kz x = x.plus(K.mult(z)); // P = (I-kH)P = P - KHP P = P.minus(K.mult(H).mult(P)); }
public void update(DenseMatrix64F yDense) { // a fast way to make the matrices usable by SimpleMatrix SimpleMatrix y = SimpleMatrix.wrap(yDense); // z = y - H x SimpleMatrix z = y.minus(H.mult(x)); // S = H P H' + R SimpleMatrix S = H.mult(P).mult(H.transpose()).plus(R); // K = PH'S^(-1) SimpleMatrix K = P.mult(H.transpose().mult(S.invert())); // x = x + Kz x = x.plus(K.mult(z)); // P = (I-kH)P = P - KHP P = P.minus(K.mult(H).mult(P)); }
/** * <p> * Computes a metric which measures the the quality of an eigen value decomposition. If a * value is returned that is close to or smaller than 1e-15 then it is within machine precision. * </p> * <p> * EVD quality is defined as:<br> * <br> * Quality = ||A*V - V*D|| / ||A*V||. * </p> * * @param orig The original matrix. Not modified. * @param eig EVD of the original matrix. Not modified. * @return The quality of the decomposition. */ public static double quality( DenseMatrix64F orig , EigenDecomposition<DenseMatrix64F> eig ) { SimpleMatrix A = SimpleMatrix.wrap(orig); SimpleMatrix V = SimpleMatrix.wrap(EigenOps.createMatrixV(eig)); SimpleMatrix D = SimpleMatrix.wrap(EigenOps.createMatrixD(eig)); SimpleMatrix L = A.mult(V); SimpleMatrix R = V.mult(D); SimpleMatrix diff = L.minus(R); double top = diff.normF(); double bottom = L.normF(); double error = top/bottom; return error; }
/** * <p> * Computes a metric which measures the the quality of an eigen value decomposition. If a * value is returned that is close to or smaller than 1e-15 then it is within machine precision. * </p> * <p> * EVD quality is defined as:<br> * <br> * Quality = ||A*V - V*D|| / ||A*V||. * </p> * * @param orig The original matrix. Not modified. * @param eig EVD of the original matrix. Not modified. * @return The quality of the decomposition. */ public static double quality( DenseMatrix64F orig , EigenDecomposition<DenseMatrix64F> eig ) { SimpleMatrix A = SimpleMatrix.wrap(orig); SimpleMatrix V = SimpleMatrix.wrap(EigenOps.createMatrixV(eig)); SimpleMatrix D = SimpleMatrix.wrap(EigenOps.createMatrixD(eig)); SimpleMatrix L = A.mult(V); SimpleMatrix R = V.mult(D); SimpleMatrix diff = L.minus(R); double top = diff.normF(); double bottom = L.normF(); double error = top/bottom; return error; }
/** * Naive but easy to visually verify implementation of the inverse BFGS update. Primarily * for testing purposes. * * @param H inverse matrix being updated * @param s change in state * @param y change in gradient */ public static void naiveInverseUpdate(DMatrixRMaj H, DMatrixRMaj s, DMatrixRMaj y) { SimpleMatrix _y = new SimpleMatrix(y); SimpleMatrix _s = new SimpleMatrix(s); SimpleMatrix B = new SimpleMatrix(H); SimpleMatrix I = SimpleMatrix.identity(_y.getNumElements()); double p = 1.0/_y.dot(_s); SimpleMatrix A1 = I.minus(_s.mult(_y.transpose()).scale(p)); SimpleMatrix A2 = I.minus(_y.mult(_s.transpose()).scale(p)); SimpleMatrix SS = _s.mult(_s.transpose()).scale(p); SimpleMatrix M = A1.mult(B).mult(A2).plus(SS); H.set(M.getMatrix()); }