QNMinimizer minimizer = new QNMinimizer( l1Cost, l2Cost, iterations, m, maxFctEval, printMessages); minimizer.setEvaluator(new ModelEvaluator(indexer)); double[] parameters = minimizer.minimize(objectiveFunction);
currValue += l1Cost * ArrayMath.l1norm(currPoint); pseudoGrad = new double[dimension]; computePseudoGrad(currPoint, currGrad, pseudoGrad); display("\nSolving convex optimization problem."); display("\nObjective function has " + dimension + " variable(s)."); display("\n\nPerforming " + iterations + " iterations with " + "L1Cost=" + l1Cost + " and L2Cost=" + l2Cost + "\n"); computeDirection(direction); computePseudoGrad(lsr.getNextPoint(), lsr.getGradAtNext(), pseudoGrad); lsr.setPseudoGradAtNext(pseudoGrad); display(" " + iter + ": "); else if (iter < 100) display(" " + iter + ": "); else display(iter + ": "); display("\t" + lsr.getValueAtNext() + "\t" + lsr.getFuncChangeRate() + "\t" + evaluator.evaluate(lsr.getNextPoint()) + "\n"); } else { display("\t " + lsr.getValueAtNext() + "\t" + lsr.getFuncChangeRate() + "\n"); if (isConverged(lsr)) break;
@Test public void testQuadraticFunction() { QNMinimizer minimizer = new QNMinimizer(); Function f = new QuadraticFunction(); double[] x = minimizer.minimize(f); double minValue = f.valueAt(x); Assert.assertEquals(x[0], 1.0, 1e-5); Assert.assertEquals(x[1], 5.0, 1e-5); Assert.assertEquals(minValue, 10.0, 1e-10); }
display("Function change rate is smaller than the threshold " + CONVERGE_TOLERANCE + ".\nTraining will stop.\n\n"); return true; if (gradNorm / xNorm < REL_GRAD_NORM_TOL) { if (verbose) display("Relative L2-norm of the gradient is smaller than the threshold " + REL_GRAD_NORM_TOL + ".\nTraining will stop.\n\n"); return true; display("Step size is smaller than the minimum step size " + MIN_STEP_SIZE + ".\nTraining will stop.\n\n"); return true; display("Maximum number of function evaluations has exceeded the threshold " + this.maxFctEval + ".\nTraining will stop.\n\n"); return true;
@Test public void testRosenbrockFunction() { QNMinimizer minimizer = new QNMinimizer(); Function f = new Rosenbrock(); double[] x = minimizer.minimize(f); double minValue = f.valueAt(x); Assert.assertEquals(x[0], 1.0, 1e-5); Assert.assertEquals(x[1], 1.0, 1e-5); Assert.assertEquals(minValue, 0, 1e-10); }
display("Function change rate is smaller than the threshold " + CONVERGE_TOLERANCE + ".\nTraining will stop.\n\n"); return true; if (gradNorm / xNorm < REL_GRAD_NORM_TOL) { if (verbose) display("Relative L2-norm of the gradient is smaller than the threshold " + REL_GRAD_NORM_TOL + ".\nTraining will stop.\n\n"); return true; display("Step size is smaller than the minimum step size " + MIN_STEP_SIZE + ".\nTraining will stop.\n\n"); return true; display("Maximum number of function evaluations has exceeded the threshold " + this.maxFctEval + ".\nTraining will stop.\n\n"); return true;
QNMinimizer minimizer = new QNMinimizer( l1Cost, l2Cost, iterations, m, maxFctEval, printMessages); minimizer.setEvaluator(new ModelEvaluator(indexer)); double[] parameters = minimizer.minimize(objectiveFunction);
currValue += l1Cost * ArrayMath.l1norm(currPoint); pseudoGrad = new double[dimension]; computePseudoGrad(currPoint, currGrad, pseudoGrad); display("\nSolving convex optimization problem."); display("\nObjective function has " + dimension + " variable(s)."); display("\n\nPerforming " + iterations + " iterations with " + "L1Cost=" + l1Cost + " and L2Cost=" + l2Cost + "\n"); computeDirection(direction); computePseudoGrad(lsr.getNextPoint(), lsr.getGradAtNext(), pseudoGrad); lsr.setPseudoGradAtNext(pseudoGrad); display(" " + iter + ": "); else if (iter < 100) display(" " + iter + ": "); else display(iter + ": "); display("\t" + lsr.getValueAtNext() + "\t" + lsr.getFuncChangeRate() + "\t" + evaluator.evaluate(lsr.getNextPoint()) + "\n"); } else { display("\t " + lsr.getValueAtNext() + "\t" + lsr.getFuncChangeRate() + "\n"); if (isConverged(lsr)) break;
display("Function change rate is smaller than the threshold " + CONVERGE_TOLERANCE + ".\nTraining will stop.\n\n"); return true; if (gradNorm / xNorm < REL_GRAD_NORM_TOL) { if (verbose) display("Relative L2-norm of the gradient is smaller than the threshold " + REL_GRAD_NORM_TOL + ".\nTraining will stop.\n\n"); return true; display("Step size is smaller than the minimum step size " + MIN_STEP_SIZE + ".\nTraining will stop.\n\n"); return true; display("Maximum number of function evaluations has exceeded the threshold " + this.maxFctEval + ".\nTraining will stop.\n\n"); return true;
QNMinimizer minimizer = new QNMinimizer( l1Cost, l2Cost, iterations, m, maxFctEval, printMessages); minimizer.setEvaluator(new ModelEvaluator(indexer)); double[] parameters = minimizer.minimize(objectiveFunction);
currValue += l1Cost * ArrayMath.l1norm(currPoint); pseudoGrad = new double[dimension]; computePseudoGrad(currPoint, currGrad, pseudoGrad); display("\nSolving convex optimization problem."); display("\nObjective function has " + dimension + " variable(s)."); display("\n\nPerforming " + iterations + " iterations with " + "L1Cost=" + l1Cost + " and L2Cost=" + l2Cost + "\n"); computeDirection(direction); computePseudoGrad(lsr.getNextPoint(), lsr.getGradAtNext(), pseudoGrad); lsr.setPseudoGradAtNext(pseudoGrad); display(" " + iter + ": "); else if (iter < 100) display(" " + iter + ": "); else display(iter + ": "); display("\t" + lsr.getValueAtNext() + "\t" + lsr.getFuncChangeRate() + "\t" + evaluator.evaluate(lsr.getNextPoint()) + "\n"); } else { display("\t " + lsr.getValueAtNext() + "\t" + lsr.getFuncChangeRate() + "\n"); if (isConverged(lsr)) break;