/** * Returns the sum of all elements in the tensor. */ public double elementSum() { double sum = 0.0; for (SimpleMatrix slice : slices) { sum += slice.elementSum(); } return sum; }
private static double score(SimpleMatrix features, List<SimpleMatrix> weights) { for (int i = 0; i < weights.size(); i += 2) { features = weights.get(i).mult(features).plus(weights.get(i + 1)); if (weights.get(i).numRows() > 1) { features = NeuralUtils.elementwiseApplyReLU(features); } } return features.elementSum(); }
private static double scaleAndRegularize(Map<String, SimpleMatrix> derivatives, Map<String, SimpleMatrix> currentMatrices, double scale, double regCost, boolean activeMatricesOnly, boolean dropBiasColumn) { double cost = 0.0; // the regularization cost for (Map.Entry<String, SimpleMatrix> entry : currentMatrices.entrySet()) { SimpleMatrix D = derivatives.get(entry.getKey()); if (activeMatricesOnly && D == null) { // Fill in an emptpy matrix so the length of theta can match. // TODO: might want to allow for sparse parameter vectors derivatives.put(entry.getKey(), new SimpleMatrix(entry.getValue().numRows(), entry.getValue().numCols())); continue; } SimpleMatrix regMatrix = entry.getValue(); if (dropBiasColumn) { regMatrix = new SimpleMatrix(regMatrix); regMatrix.insertIntoThis(0, regMatrix.numCols() - 1, new SimpleMatrix(regMatrix.numRows(), 1)); } D = D.scale(scale).plus(regMatrix.scale(regCost)); derivatives.put(entry.getKey(), D); cost += regMatrix.elementMult(regMatrix).elementSum() * regCost / 2.0; } return cost; }
/** * Applies softmax to all of the elements of the matrix. The return * matrix will have all of its elements sum to 1. If your matrix is * not already a vector, be sure this is what you actually want. */ public static SimpleMatrix softmax(SimpleMatrix input) { SimpleMatrix output = new SimpleMatrix(input); for (int i = 0; i < output.numRows(); ++i) { for (int j = 0; j < output.numCols(); ++j) { output.set(i, j, Math.exp(output.get(i, j))); } } double sum = output.elementSum(); // will be safe, since exp should never return 0 return output.scale(1.0 / sum); }
private static double scaleAndRegularize(TwoDimensionalMap<String, String, SimpleMatrix> derivatives, TwoDimensionalMap<String, String, SimpleMatrix> currentMatrices, double scale, double regCost, boolean dropBiasColumn) { double cost = 0.0; // the regularization cost for (TwoDimensionalMap.Entry<String, String, SimpleMatrix> entry : currentMatrices) { SimpleMatrix D = derivatives.get(entry.getFirstKey(), entry.getSecondKey()); SimpleMatrix regMatrix = entry.getValue(); if (dropBiasColumn) { regMatrix = new SimpleMatrix(regMatrix); regMatrix.insertIntoThis(0, regMatrix.numCols() - 1, new SimpleMatrix(regMatrix.numRows(), 1)); } D = D.scale(scale).plus(regMatrix.scale(regCost)); derivatives.put(entry.getFirstKey(), entry.getSecondKey(), D); cost += regMatrix.elementMult(regMatrix).elementSum() * regCost / 2.0; } return cost; }
SimpleMatrix localCD = deltaClass.mult(NeuralUtils.concatenateWithBias(currentVector).transpose()); double error = -(NeuralUtils.elementwiseApplyLog(predictions).elementMult(goldLabel).elementSum()); error = error * nodeWeight; RNNCoreAnnotations.setPredictionError(tree, error);
/** * Returns the sum of all elements in the tensor. */ public double elementSum() { double sum = 0.0; for (SimpleMatrix slice : slices) { sum += slice.elementSum(); } return sum; }
/** * Returns the sum of all elements in the tensor. */ public double elementSum() { double sum = 0.0; for (SimpleMatrix slice : slices) { sum += slice.elementSum(); } return sum; }
public static double euclidianDistance(SimpleMatrix columnVector1, SimpleMatrix columnVector2) { double distance = 0; SimpleMatrix distVector = columnVector2.minus(columnVector1); distance = Math.sqrt(MatrixOps.elemPow(distVector, 2).elementSum()); return distance; }
double scaleAndRegularize(Map<String, SimpleMatrix> derivatives, Map<String, SimpleMatrix> currentMatrices, double scale, double regCost) { double cost = 0.0; // the regularization cost for (Map.Entry<String, SimpleMatrix> entry : currentMatrices.entrySet()) { SimpleMatrix D = derivatives.get(entry.getKey()); D = D.scale(scale).plus(entry.getValue().scale(regCost)); derivatives.put(entry.getKey(), D); cost += entry.getValue().elementMult(entry.getValue()).elementSum() * regCost / 2.0; } return cost; }
private static double score(SimpleMatrix features, List<SimpleMatrix> weights) { for (int i = 0; i < weights.size(); i += 2) { features = weights.get(i).mult(features).plus(weights.get(i + 1)); if (weights.get(i).numRows() > 1) { features = NeuralUtils.elementwiseApplyReLU(features); } } return features.elementSum(); }
public boolean isValidXYZ(){ return (this.ecef != null && this.ecef.elementSum() != 0 && !Double.isNaN(this.ecef.get(0)) && !Double.isNaN(this.ecef.get(1)) && !Double.isNaN(this.ecef.get(2)) && !Double.isInfinite(this.ecef.get(0)) && !Double.isInfinite(this.ecef.get(1)) && !Double.isInfinite(this.ecef.get(2)) && ( ecef.get(0) != 0 && ecef.get(1)!=0 && ecef.get(2)!= 0 ) ); }
double[] x = { 2.5, 0.5, 2.2, 1.9, 3.1, 2.3, 2.0, 1.0, 1.5, 1.1 }; double[] y = { 2.4, 0.7, 2.9, 2.2, 3.0, 2.7, 1.6, 1.1, 1.6, 0.9 }; SimpleMatrix a = new SimpleMatrix(x.length,1,true,x); SimpleMatrix b = new SimpleMatrix(x.length,1,true,x); double meanA = a.elementSum()/x.length; double meanB = a.elementSum()/y.length; // X = X - mean(A) CommonOps.add(a.getMatrix(),-meanA); CommonOps.add(b.getMatrix(),-meanB); // compute the covariance double c11 = a.transpose().mult(a).get(0,0)/x.length; double c12 = a.transpose().mult(b).get(0,0)/x.length; double c22 = b.transpose().mult(b).get(0,0)/x.length; SimpleMatrix covariance = new SimpleMatrix(2,2,true,c11,c12,c12,c22);
private static double scaleAndRegularize(Map<String, SimpleMatrix> derivatives, Map<String, SimpleMatrix> currentMatrices, double scale, double regCost, boolean activeMatricesOnly, boolean dropBiasColumn) { double cost = 0.0; // the regularization cost for (Map.Entry<String, SimpleMatrix> entry : currentMatrices.entrySet()) { SimpleMatrix D = derivatives.get(entry.getKey()); if (activeMatricesOnly && D == null) { // Fill in an emptpy matrix so the length of theta can match. // TODO: might want to allow for sparse parameter vectors derivatives.put(entry.getKey(), new SimpleMatrix(entry.getValue().numRows(), entry.getValue().numCols())); continue; } SimpleMatrix regMatrix = entry.getValue(); if (dropBiasColumn) { regMatrix = new SimpleMatrix(regMatrix); regMatrix.insertIntoThis(0, regMatrix.numCols() - 1, new SimpleMatrix(regMatrix.numRows(), 1)); } D = D.scale(scale).plus(regMatrix.scale(regCost)); derivatives.put(entry.getKey(), D); cost += regMatrix.elementMult(regMatrix).elementSum() * regCost / 2.0; } return cost; }
/** * Applies softmax to all of the elements of the matrix. The return * matrix will have all of its elements sum to 1. If your matrix is * not already a vector, be sure this is what you actually want. */ public static SimpleMatrix softmax(SimpleMatrix input) { SimpleMatrix output = new SimpleMatrix(input); for (int i = 0; i < output.numRows(); ++i) { for (int j = 0; j < output.numCols(); ++j) { output.set(i, j, Math.exp(output.get(i, j))); } } double sum = output.elementSum(); // will be safe, since exp should never return 0 return output.scale(1.0 / sum); }
/** * Applies softmax to all of the elements of the matrix. The return * matrix will have all of its elements sum to 1. If your matrix is * not already a vector, be sure this is what you actually want. */ public static SimpleMatrix softmax(SimpleMatrix input) { SimpleMatrix output = new SimpleMatrix(input); for (int i = 0; i < output.numRows(); ++i) { for (int j = 0; j < output.numCols(); ++j) { output.set(i, j, Math.exp(output.get(i, j))); } } double sum = output.elementSum(); // will be safe, since exp should never return 0 return output.scale(1.0 / sum); }
/** * Applies softmax to all of the elements of the matrix. The return * matrix will have all of its elements sum to 1. If your matrix is * not already a vector, be sure this is what you actually want. */ public static SimpleMatrix softmax(SimpleMatrix input) { SimpleMatrix output = new SimpleMatrix(input); for (int i = 0; i < output.numRows(); ++i) { for (int j = 0; j < output.numCols(); ++j) { output.set(i, j, Math.exp(output.get(i, j))); } } double sum = output.elementSum(); // will be safe, since exp should never return 0 return output.scale(1.0 / sum); }
public static double calculateUnscentedHellingerDistance(OneComponentDistribution dist1, TwoComponentDistribution dist2) throws Exception { ThreeComponentDistribution dist0 = mergeSampleDists(dist1, dist2, HALF, HALF); List<SigmaPoint> sigmaPoints = getAllSigmaPoints(dist0, 3); //System.out.println("sigmapoints: " + sigmaPoints.size()); ArrayList<SimpleMatrix> points = new ArrayList<SimpleMatrix>(); ArrayList<Double> weights = new ArrayList<Double>(); for (SigmaPoint p : sigmaPoints) { points.add(p.getmPointVecor()); weights.add(p.getmWeight()); //System.out.println(p.getmPointVecor() + " - " + p.getmWeight()); } List<Double> dist1Ev = dist1.evaluate(points); List<Double> dist2Ev = dist2.evaluate(points); dist1Ev = MatrixOps.setNegativeValuesToZero(dist1Ev); dist2Ev = MatrixOps.setNegativeValuesToZero(dist2Ev); List<Double> dist0Ev = dist0.evaluate(points); dist0Ev = MatrixOps.setNegativeValuesToZero(dist0Ev); SimpleMatrix mat0 = MatrixOps.doubleListToMatrix(dist0Ev); SimpleMatrix mat1 = MatrixOps.doubleListToMatrix(dist1Ev); SimpleMatrix mat2 = MatrixOps.doubleListToMatrix(dist2Ev); SimpleMatrix weightsMatrix = MatrixOps.doubleListToMatrix(weights); SimpleMatrix g = MatrixOps.elemPow((MatrixOps.elemSqrt(mat1).minus(MatrixOps.elemSqrt(mat2))), 2); SimpleMatrix tmp = new SimpleMatrix(weightsMatrix); tmp = weightsMatrix.elementMult(g); CommonOps.elementDiv(tmp.getMatrix(), mat0.getMatrix(), tmp.getMatrix()); double val = tmp.elementSum(); double H = Math.sqrt(Math.abs(val / 2)); //System.out.println("Hellinger dist: " + H); return H; }
double scaleAndRegularize(TwoDimensionalMap<String, String, SimpleMatrix> derivatives, TwoDimensionalMap<String, String, SimpleMatrix> currentMatrices, double scale, double regCost) { double cost = 0.0; // the regularization cost for (TwoDimensionalMap.Entry<String, String, SimpleMatrix> entry : currentMatrices) { SimpleMatrix D = derivatives.get(entry.getFirstKey(), entry.getSecondKey()); D = D.scale(scale).plus(entry.getValue().scale(regCost)); derivatives.put(entry.getFirstKey(), entry.getSecondKey(), D); cost += entry.getValue().elementMult(entry.getValue()).elementSum() * regCost / 2.0; } return cost; }
private static double scaleAndRegularize(TwoDimensionalMap<String, String, SimpleMatrix> derivatives, TwoDimensionalMap<String, String, SimpleMatrix> currentMatrices, double scale, double regCost, boolean dropBiasColumn) { double cost = 0.0; // the regularization cost for (TwoDimensionalMap.Entry<String, String, SimpleMatrix> entry : currentMatrices) { SimpleMatrix D = derivatives.get(entry.getFirstKey(), entry.getSecondKey()); SimpleMatrix regMatrix = entry.getValue(); if (dropBiasColumn) { regMatrix = new SimpleMatrix(regMatrix); regMatrix.insertIntoThis(0, regMatrix.numCols() - 1, new SimpleMatrix(regMatrix.numRows(), 1)); } D = D.scale(scale).plus(regMatrix.scale(regCost)); derivatives.put(entry.getFirstKey(), entry.getSecondKey(), D); cost += regMatrix.elementMult(regMatrix).elementSum() * regCost / 2.0; } return cost; }