private static double estimateLikelihood(DoubleMatrix alpha) { // sum the last row in our alpha matrix generated by the forward algorithm, // this denotes the endstate of our sequence. return alpha.getRowVector(alpha.getRowCount() - 1).sum(); }
public DoubleVector predict(DoubleVector feature, DoubleVector[] featuresPerState) { return ViterbiUtils.decode(theta, new SparseDoubleRowMatrix(Collections.singletonList(feature)), new SparseDoubleRowMatrix(featuresPerState), classes).getRowVector(0); }
/** * Row-copies the given matrix to this sparse implementation. * * @param mat the matrix to copy. */ public SparseDoubleRowMatrix(DoubleMatrix mat) { this(mat.getRowCount(), mat.getColumnCount()); for (int i = 0; i < numColumns; i++) { setRowVector(i, mat.getRowVector(i)); } }
if (optionalRandom.isPresent()) { stateSequence[index] = chooseState(optionalRandom.get(), transitionProbabilities.getRowVector(stateSequence[index - 1])); } else { stateSequence[index] = transitionProbabilities.getRowVector( stateSequence[index - 1]).maxIndex();
@Override public DoubleMatrix apply(DoubleMatrix matrix) { DoubleMatrix dm = newInstance(matrix); for (int row = 0; row < matrix.getRowCount(); row++) { DoubleVector apply = apply(matrix.getRowVector(row)); if (apply.getLength() != 0) { dm.setRowVector(row, apply); } } return dm; }
DoubleVector rowVector = transitionProbabilities.getRowVector(rowIndex); double sum = rowVector.sum(); Iterator<DoubleVectorElement> iterateNonZero = rowVector.iterateNonZero();
DoubleVector rowVector = probabilityMatrix.getRowVector(row);
DoubleVector rowVector = features.getRowVector(row); double[] logProbabilities = new double[classes]; gradient.set(i, next.getIndex(), gradient.get(i, next.getIndex()) + prob); if (correctPrediction(i, outcome.getRowVector(row))) { gradient.set(i, next.getIndex(), gradient.get(i, next.getIndex()) - 1d); if (correctPrediction(i, outcome.getRowVector(row))) { cost -= Math.log(prob);
private static void normalize(DoubleVector hiddenPriorProbability, DoubleMatrix transitionProbabilityMatrix, DoubleMatrix emissionProbabilitiyMatrix, boolean log) { double sum = hiddenPriorProbability.sum(); if (sum != 0d) { for (int i = 0; i < hiddenPriorProbability.getDimension(); i++) { hiddenPriorProbability.set(i, hiddenPriorProbability.get(i) / sum); } } for (int row = 0; row < transitionProbabilityMatrix.getRowCount(); row++) { // note that we are using row vectors here, because dense matrices give us // the underlying array wrapped by the vector object so we can directly // mutate the values beneath DoubleVector rowVector = transitionProbabilityMatrix.getRowVector(row); rowVector = rowVector.divide(rowVector.sum()); if (log) { rowVector = rowVector.log(); } transitionProbabilityMatrix.setRowVector(row, rowVector); rowVector = emissionProbabilitiyMatrix.getRowVector(row); rowVector = rowVector.divide(rowVector.sum()); if (log) { rowVector = rowVector.log(); } emissionProbabilitiyMatrix.setRowVector(row, rowVector); } }
double[] localScores = computeScores(classes, features.getRowVector(0), weights); prevLabel = j; localScores = computeScores(classes, featuresPerState.getRowVector(i + j), weights); for (int currLabel = 0; currLabel < localScores.length; currLabel++) { double score = localScores[currLabel]
@Override public CostGradientTuple evaluateCost(DoubleVector theta) { DoubleVector activation = SIGMOID.get().apply(x.multiplyVectorRow(theta)); DenseDoubleMatrix hypo = new DenseDoubleMatrix(Arrays.asList(activation)); double error = ERROR_FUNCTION.calculateLoss(y, hypo); DoubleMatrix loss = hypo.subtract(y); double j = error / m; DoubleVector gradient = xTransposed.multiplyVectorRow(loss.getRowVector(0)) .divide(m); if (lambda != 0d) { DoubleVector reg = theta.multiply(lambda / m); // don't regularize the bias reg.set(0, 0d); gradient = gradient.add(reg); j += lambda * theta.pow(2).sum() / m; } return new CostGradientTuple(j, gradient); } }
DoubleVector rowVector = probabilityMatrix.getRowVector(row);
@Override public DoubleMatrix apply(DoubleMatrix matrix) { DoubleMatrix newInstance = newInstance(matrix); if (matrix.isSparse()) { // if we have a sparse matrix, it is more efficient to loop over the // sparse row vectors int[] rows = matrix.rowIndices(); for (int row : rows) { DoubleVector rowVector = matrix.getRowVector(row); if (rowVector.getLength() > 0) { DoubleVector apply = apply(rowVector); newInstance.setRowVector(row, apply); } } } else { // on dense matrices we can be faster by directly looping over the items for (int i = 0; i < matrix.getRowCount(); i++) { for (int j = 0; j < matrix.getColumnCount(); j++) { newInstance.set(i, j, apply(matrix.get(i, j))); } } } return newInstance; }
hiddenPriorProbability = alpha.getRowVector(0).multiply( beta.getRowVector(0)); final double modelLikelihood = estimateLikelihood(alpha);