protected double computeScaleFactor( Vector gradientCurrent, Vector gradientPrevious ) { double gradientTgradient = gradientCurrent.norm2Squared(); double denom = gradientPrevious.norm2Squared(); double beta = gradientTgradient / denom; return beta; }
protected double computeScaleFactor( Vector gradientCurrent, Vector gradientPrevious ) { double gradientTgradient = gradientCurrent.norm2Squared(); double denom = gradientPrevious.norm2Squared(); double beta = gradientTgradient / denom; return beta; }
protected double computeScaleFactor( Vector gradientCurrent, Vector gradientPrevious ) { double gradientTgradient = gradientCurrent.norm2Squared(); double denom = gradientPrevious.norm2Squared(); double beta = gradientTgradient / denom; return beta; }
@Override public double euclideanDistanceSquared( final Vector other ) { return this.minus( other ).norm2Squared(); }
@Override public double euclideanDistanceSquared( final Vector other ) { return this.minus( other ).norm2Squared(); }
@Override public double euclideanDistanceSquared( final Vector other ) { return this.minus( other ).norm2Squared(); }
@Override protected double computeUpdate( final LinearBinaryCategorizer target, final Vector input, final boolean actualCategory, final double predicted) { // Get the actual category. final double actual = actualCategory ? +1.0 : -1.0; // Compute the margin. final double margin = actual * predicted; final double norm = input.norm2Squared(); return computeUpdate(margin, norm); }
@Override protected double computeUpdate( final LinearBinaryCategorizer target, final Vector input, final boolean actualCategory, final double predicted) { // Get the actual category. final double actual = actualCategory ? +1.0 : -1.0; // Compute the margin. final double margin = actual * predicted; final double norm = input.norm2Squared(); return computeUpdate(margin, norm); }
@Override protected double computeUpdate( final LinearBinaryCategorizer target, final Vector input, final boolean actualCategory, final double predicted) { // Get the actual category. final double actual = actualCategory ? +1.0 : -1.0; // Compute the margin. final double margin = actual * predicted; final double norm = input.norm2Squared(); return computeUpdate(margin, norm); }
@Override public double computeUpdate( final LinearBinaryCategorizer target, final Vector input, final boolean actualCategory, final double predicted) { final double actual = actualCategory ? +1.0 : -1.0; final double margin = actual * predicted; final double hingeLoss = 1.0 - margin; if (Math.abs(margin) > 1.0) { // Passive when there is no loss. return 0.0; } else { // Update methods use ||x||^2. final double inputNorm2Squared = input.norm2Squared(); // Compute the update value (tau). return this.computeUpdate( actual, predicted, hingeLoss, inputNorm2Squared); } }
@Override public double computeUpdate( final LinearBinaryCategorizer target, final Vector input, final boolean actualCategory, final double predicted) { final double actual = actualCategory ? +1.0 : -1.0; final double loss = 1.0 - actual * predicted; if (loss <= 0.0) { // Passive when there is no loss. return 0.0; } else { // Update methods use ||x||^2. final double inputNorm2Squared = input.norm2Squared(); // Compute the update value (tau). return this.computeUpdate( actual, predicted, loss, inputNorm2Squared); } }
@Override public double computeUpdate( final LinearBinaryCategorizer target, final Vector input, final boolean actualCategory, final double predicted) { final double actual = actualCategory ? +1.0 : -1.0; final double loss = 1.0 - actual * predicted; if (loss <= 0.0) { // Passive when there is no loss. return 0.0; } else { // Update methods use ||x||^2. final double inputNorm2Squared = input.norm2Squared(); // Compute the update value (tau). return this.computeUpdate( actual, predicted, loss, inputNorm2Squared); } }
@Override public double computeUpdate( final LinearBinaryCategorizer target, final Vector input, final boolean actualCategory, final double predicted) { final double actual = actualCategory ? +1.0 : -1.0; final double loss = 1.0 - actual * predicted; if (loss <= 0.0) { // Passive when there is no loss. return 0.0; } else { // Update methods use ||x||^2. final double inputNorm2Squared = input.norm2Squared(); // Compute the update value (tau). return this.computeUpdate( actual, predicted, loss, inputNorm2Squared); } }
protected double computeScaleFactor( Vector gradientCurrent, Vector gradientPrevious ) { Vector deltaGradient = gradientCurrent.minus( gradientPrevious ); double deltaTgradient = deltaGradient.dotProduct( gradientCurrent ); double denom = gradientPrevious.norm2Squared(); double beta = deltaTgradient / denom; return beta; }
protected double computeScaleFactor( Vector gradientCurrent, Vector gradientPrevious ) { Vector deltaGradient = gradientCurrent.minus( gradientPrevious ); double deltaTgradient = deltaGradient.dotProduct( gradientCurrent ); double denom = gradientPrevious.norm2Squared(); double beta = deltaTgradient / denom; return beta; }
protected double computeScaleFactor( Vector gradientCurrent, Vector gradientPrevious ) { Vector deltaGradient = gradientCurrent.minus( gradientPrevious ); double deltaTgradient = deltaGradient.dotProduct( gradientCurrent ); double denom = gradientPrevious.norm2Squared(); double beta = deltaTgradient / denom; return beta; }
/** * Gets the regularization penalty term for the current result. It * computes the squared 2-norm of the parameters of the factorization * machine, each multiplied with their appropriate regularization weight. * * @return * The regularization penalty term for the objective. */ public double getRegularizationPenalty() { final double bias = this.result.getBias(); double penalty = this.biasRegularization * bias * bias; if (this.result.hasWeights()) { penalty += this.weightRegularization * this.result.getWeights().norm2Squared(); } if (this.result.hasFactors()) { penalty += this.factorRegularization * this.result.getFactors().normFrobeniusSquared(); } return penalty; }
/** * Gets the regularization penalty term in the error for the objective. * * @return * The regularization penalty term. */ public double getRegularizationPenalty() { if (this.result == null) { return 0.0; } double penalty = this.biasRegularization * this.result.getBias(); if (this.result.hasWeights()) { penalty += this.weightRegularization * this.result.getWeights().norm2Squared(); } if (this.result.hasFactors()) { penalty += this.factorRegularization * this.result.getFactors().normFrobeniusSquared(); } return penalty; }
/** * Gets the regularization penalty term in the error for the objective. * * @return * The regularization penalty term. */ public double getRegularizationPenalty() { if (this.result == null) { return 0.0; } double penalty = this.biasRegularization * this.result.getBias(); if (this.result.hasWeights()) { penalty += this.weightRegularization * this.result.getWeights().norm2Squared(); } if (this.result.hasFactors()) { penalty += this.factorRegularization * this.result.getFactors().normFrobeniusSquared(); } return penalty; }
/** * Gets the regularization penalty term in the error for the objective. * * @return * The regularization penalty term. */ public double getRegularizationPenalty() { if (this.result == null) { return 0.0; } double penalty = this.biasRegularization * this.result.getBias(); if (this.result.hasWeights()) { penalty += this.weightRegularization * this.result.getWeights().norm2Squared(); } if (this.result.hasFactors()) { penalty += this.factorRegularization * this.result.getFactors().normFrobeniusSquared(); } return penalty; }