/** * Convert a file into a list of matrices. The expected format is one row * per line, one entry per column for each matrix, with each matrix separated * by an empty line. */ public static List<SimpleMatrix> loadTextMatrices(String path) { List<SimpleMatrix> matrices = new ArrayList<>(); for (String mString : IOUtils.stringFromFile(path).trim().split("\n\n")) { matrices.add(NeuralUtils.convertTextMatrix(mString).transpose()); } return matrices; }
/** * Compute dot product between two vectors. */ public static double dot(SimpleMatrix vector1, SimpleMatrix vector2){ if(vector1.numRows()==1){ // vector1: row vector, assume that vector2 is a row vector too return vector1.mult(vector2.transpose()).get(0); } else if (vector1.numCols()==1){ // vector1: col vector, assume that vector2 is also a column vector. return vector1.transpose().mult(vector2).get(0); } else { throw new AssertionError("Error in neural.Utils.dot: vector1 is a matrix " + vector1.numRows() + " x " + vector1.numCols()); } }
/** * Returns a column vector where each entry is the nth bilinear * product of the nth slices of the two tensors. */ public SimpleMatrix bilinearProducts(SimpleMatrix in) { if (in.numCols() != 1) { throw new AssertionError("Expected a column vector"); } if (in.numRows() != numCols) { throw new AssertionError("Number of rows in the input does not match number of columns in tensor"); } if (numRows != numCols) { throw new AssertionError("Can only perform this operation on a SimpleTensor with square slices"); } SimpleMatrix inT = in.transpose(); SimpleMatrix out = new SimpleMatrix(numSlices, 1); for (int slice = 0; slice < numSlices; ++slice) { double result = inT.mult(slices[slice]).mult(in).get(0); out.set(slice, result); } return out; }
private static SimpleTensor getTensorGradient(SimpleMatrix deltaFull, SimpleMatrix leftVector, SimpleMatrix rightVector) { int size = deltaFull.getNumElements(); SimpleTensor Wt_df = new SimpleTensor(size*2, size*2, size); // TODO: combine this concatenation with computeTensorDeltaDown? SimpleMatrix fullVector = NeuralUtils.concatenate(leftVector, rightVector); for (int slice = 0; slice < size; ++slice) { Wt_df.setSlice(slice, fullVector.scale(deltaFull.get(slice)).mult(fullVector.transpose())); } return Wt_df; }
private static SimpleMatrix computeTensorDeltaDown(SimpleMatrix deltaFull, SimpleMatrix leftVector, SimpleMatrix rightVector, SimpleMatrix W, SimpleTensor Wt) { SimpleMatrix WTDelta = W.transpose().mult(deltaFull); SimpleMatrix WTDeltaNoBias = WTDelta.extractMatrix(0, deltaFull.numRows() * 2, 0, 1); int size = deltaFull.getNumElements(); SimpleMatrix deltaTensor = new SimpleMatrix(size*2, 1); SimpleMatrix fullVector = NeuralUtils.concatenate(leftVector, rightVector); for (int slice = 0; slice < size; ++slice) { SimpleMatrix scaledFullVector = fullVector.scale(deltaFull.get(slice)); deltaTensor = deltaTensor.plus(Wt.getSlice(slice).plus(Wt.getSlice(slice).transpose()).mult(scaledFullVector)); } return deltaTensor.plus(WTDeltaNoBias); }
@SuppressWarnings("unchecked") public Matrix pseudoinverse(double lambda) { SimpleSVD<SimpleMatrix> simpleSVD = this.svd(); SimpleMatrix U = simpleSVD.getU(); SimpleMatrix S = simpleSVD.getW(); SimpleMatrix V = simpleSVD.getV(); int N = Math.min(this.numRows(),this.numCols()); double maxSingular = 0; for( int i = 0; i < N; ++i ) { if( S.get(i, i) > maxSingular ) { maxSingular = S.get(i, i); } } double tolerance = FastMath.DBL_EPSILON * Math.max(this.numRows(),this.numCols()) * maxSingular; for(int i=0;i<Math.min(S.numRows(), S.numCols());++i) { double a = S.get(i, i); if(a <= tolerance) { a = 0; } else { a = a/(a * a + lambda * lambda); } S.set(i, i, a); } return new Matrix(V.mult(S.transpose()).mult(U.transpose())); }
currentVectorDerivative = currentVectorDerivative.elementMult(scoreW.transpose()); SimpleMatrix WTdelta = W.transpose().mult(deltaCurrent); binaryScoreDerivatives.get(leftLabel, rightLabel).plus(currentVector.transpose())); childrenVector = concatenateContextWords(childrenVector, tree.getSpan(), words); SimpleMatrix W_df = deltaCurrent.mult(childrenVector.transpose()); binaryW_dfs.put(leftLabel, rightLabel, binaryW_dfs.get(leftLabel, rightLabel).plus(W_df)); String childLabel = dvModel.basicCategory(tree.children()[0].label().value()); unaryScoreDerivatives.put(childLabel,unaryScoreDerivatives.get(childLabel).plus(currentVector.transpose())); childVectorWithBias = concatenateContextWords(childVectorWithBias, tree.getSpan(), words); SimpleMatrix W_df = deltaCurrent.mult(childVectorWithBias.transpose());
SimpleMatrix localCD = deltaClass.mult(NeuralUtils.concatenateWithBias(currentVector).transpose()); SimpleMatrix deltaFromClass = model.getUnaryClassification(category).transpose().mult(deltaClass); deltaFromClass = deltaFromClass.extractMatrix(0, model.op.numHid, 0, 1).elementMult(currentVectorDerivative); SimpleMatrix deltaFull = deltaFromClass.plus(deltaUp); SimpleMatrix deltaFromClass = model.getBinaryClassification(leftCategory, rightCategory).transpose().mult(deltaClass); deltaFromClass = deltaFromClass.extractMatrix(0, model.op.numHid, 0, 1).elementMult(currentVectorDerivative); SimpleMatrix deltaFull = deltaFromClass.plus(deltaUp); SimpleMatrix rightVector = RNNCoreAnnotations.getNodeVector(tree.children()[1]); SimpleMatrix childrenVector = NeuralUtils.concatenateWithBias(leftVector, rightVector); SimpleMatrix W_df = deltaFull.mult(childrenVector.transpose()); binaryTD.put(leftCategory, rightCategory, binaryTD.get(leftCategory, rightCategory).plus(W_df)); SimpleMatrix deltaDown; deltaDown = computeTensorDeltaDown(deltaFull, leftVector, rightVector, model.getBinaryTransform(leftCategory, rightCategory), model.getBinaryTensor(leftCategory, rightCategory)); } else { deltaDown = model.getBinaryTransform(leftCategory, rightCategory).transpose().mult(deltaFull);
/** * Convert a file into a list of matrices. The expected format is one row * per line, one entry per column for each matrix, with each matrix separated * by an empty line. */ public static List<SimpleMatrix> loadTextMatrices(String path) { List<SimpleMatrix> matrices = new ArrayList<>(); for (String mString : IOUtils.stringFromFile(path).trim().split("\n\n")) { matrices.add(NeuralUtils.convertTextMatrix(mString).transpose()); } return matrices; }
/** * Convert a file into a list of matrices. The expected format is one row * per line, one entry per column for each matrix, with each matrix separated * by an empty line. */ public static List<SimpleMatrix> loadTextMatrices(String path) { List<SimpleMatrix> matrices = new ArrayList<>(); for (String mString : IOUtils.stringFromFile(path).trim().split("\n\n")) { matrices.add(NeuralUtils.convertTextMatrix(mString).transpose()); } return matrices; }
public ArrayList<Double> mahalanobis(SimpleMatrix x, ArrayList<SimpleMatrix> means, ArrayList<SimpleMatrix> covs) { ArrayList<Double> mahalanobisDistances = new java.util.ArrayList<Double>(); for (int i = 0; i < means.size(); i++) { SimpleMatrix m = means.get(i); SimpleMatrix c = covs.get(i); double distance = x.minus(m).transpose().mult(c.invert()).mult(x.minus(m)).trace(); mahalanobisDistances.add(distance); } return mahalanobisDistances; }
/** * Compute dot product between two vectors. */ public static double dot(SimpleMatrix vector1, SimpleMatrix vector2){ if(vector1.numRows()==1){ // vector1: row vector, assume that vector2 is a row vector too return vector1.mult(vector2.transpose()).get(0); } else if (vector1.numCols()==1){ // vector1: col vector, assume that vector2 is also a column vector. return vector1.transpose().mult(vector2).get(0); } else { throw new AssertionError("Error in neural.Utils.dot: vector1 is a matrix " + vector1.numRows() + " x " + vector1.numCols()); } }
/** * Compute dot product between two vectors. */ public static double dot(SimpleMatrix vector1, SimpleMatrix vector2){ if(vector1.numRows()==1){ // vector1: row vector, assume that vector2 is a row vector too return vector1.mult(vector2.transpose()).get(0); } else if (vector1.numCols()==1){ // vector1: col vector, assume that vector2 is also a column vector. return vector1.transpose().mult(vector2).get(0); } else { throw new AssertionError("Error in neural.Utils.dot: vector1 is a matrix " + vector1.numRows() + " x " + vector1.numCols()); } }
protected double computeKineticEnergy(ArrayList<RevoluteJoint> joints, DenseMatrix64F massMatrix) { SimpleMatrix jointVelocities = new SimpleMatrix(joints.size(), 1); for (int i = 0; i < joints.size(); i++) { jointVelocities.set(i, 0, joints.get(i).getQd()); } SimpleMatrix massMatrix_ = SimpleMatrix.wrap(massMatrix); SimpleMatrix kineticEnergy = jointVelocities.transpose().mult(massMatrix_).mult(jointVelocities); return 0.5 * kineticEnergy.get(0, 0); }
private static void addTask(DenseMatrix64F taskJacobian, DenseMatrix64F taskObjective, DenseMatrix64F taskWeight, DenseMatrix64F hToModify, DenseMatrix64F fToModify) { SimpleMatrix J = new SimpleMatrix(taskJacobian); SimpleMatrix W = new SimpleMatrix(taskWeight); SimpleMatrix b = new SimpleMatrix(taskObjective); SimpleMatrix H = J.transpose().mult(W).mult(J); SimpleMatrix f = J.transpose().mult(W).mult(b); CommonOps.add(hToModify, H.getMatrix(), hToModify); CommonOps.add(fToModify, f.getMatrix(), fToModify); } }
private SimpleTensor getTensorGradient(SimpleMatrix deltaFull, SimpleMatrix leftVector, SimpleMatrix rightVector) { int size = deltaFull.getNumElements(); SimpleTensor Wt_df = new SimpleTensor(size*2, size*2, size); // TODO: combine this concatenation with computeTensorDeltaDown? SimpleMatrix fullVector = NeuralUtils.concatenate(leftVector, rightVector); for (int slice = 0; slice < size; ++slice) { Wt_df.setSlice(slice, fullVector.scale(deltaFull.get(slice)).mult(fullVector.transpose())); } return Wt_df; }
public void predict(DenseMatrix64F uDense) { // x = F x + G u SimpleMatrix u = SimpleMatrix.wrap(uDense); x = (F.mult(x)).plus((G.mult(u))); // P = F P F' + Q P = F.mult(P).mult(F.transpose()).plus(Q); }
private static SimpleTensor getTensorGradient(SimpleMatrix deltaFull, SimpleMatrix leftVector, SimpleMatrix rightVector) { int size = deltaFull.getNumElements(); SimpleTensor Wt_df = new SimpleTensor(size*2, size*2, size); // TODO: combine this concatenation with computeTensorDeltaDown? SimpleMatrix fullVector = NeuralUtils.concatenate(leftVector, rightVector); for (int slice = 0; slice < size; ++slice) { Wt_df.setSlice(slice, fullVector.scale(deltaFull.get(slice)).mult(fullVector.transpose())); } return Wt_df; }
public void predict(DenseMatrix64F uDense) { // x = F x + G u SimpleMatrix u = SimpleMatrix.wrap(uDense); x = (F.mult(x)).plus((G.mult(u))); // P = F P F' + Q P = F.mult(P).mult(F.transpose()).plus(Q); }
private static SimpleMatrix computeTensorDeltaDown(SimpleMatrix deltaFull, SimpleMatrix leftVector, SimpleMatrix rightVector, SimpleMatrix W, SimpleTensor Wt) { SimpleMatrix WTDelta = W.transpose().mult(deltaFull); SimpleMatrix WTDeltaNoBias = WTDelta.extractMatrix(0, deltaFull.numRows() * 2, 0, 1); int size = deltaFull.getNumElements(); SimpleMatrix deltaTensor = new SimpleMatrix(size*2, 1); SimpleMatrix fullVector = NeuralUtils.concatenate(leftVector, rightVector); for (int slice = 0; slice < size; ++slice) { SimpleMatrix scaledFullVector = fullVector.scale(deltaFull.get(slice)); deltaTensor = deltaTensor.plus(Wt.getSlice(slice).plus(Wt.getSlice(slice).transpose()).mult(scaledFullVector)); } return deltaTensor.plus(WTDeltaNoBias); }