/** * Build an optimizer for least squares problems. * <p>The default values for the algorithm settings are: * <ul> * <li>{@link #setInitialStepBoundFactor initial step bound factor}: 100.0</li> * <li>{@link #setMaxIterations maximal iterations}: 1000</li> * <li>{@link #setCostRelativeTolerance cost relative tolerance}: 1.0e-10</li> * <li>{@link #setParRelativeTolerance parameters relative tolerance}: 1.0e-10</li> * <li>{@link #setOrthoTolerance orthogonality tolerance}: 1.0e-10</li> * </ul> * </p> */ public LevenbergMarquardtOptimizer() { // set up the superclass with a default max cost evaluations setting setMaxIterations(1000); // default values for the tuning parameters setInitialStepBoundFactor(100.0); setCostRelativeTolerance(1.0e-10); setParRelativeTolerance(1.0e-10); setOrthoTolerance(1.0e-10); }
/** * Build an optimizer for least squares problems. * <p>The default values for the algorithm settings are: * <ul> * <li>{@link #setConvergenceChecker(VectorialConvergenceChecker) vectorial convergence checker}: null</li> * <li>{@link #setInitialStepBoundFactor(double) initial step bound factor}: 100.0</li> * <li>{@link #setMaxIterations(int) maximal iterations}: 1000</li> * <li>{@link #setCostRelativeTolerance(double) cost relative tolerance}: 1.0e-10</li> * <li>{@link #setParRelativeTolerance(double) parameters relative tolerance}: 1.0e-10</li> * <li>{@link #setOrthoTolerance(double) orthogonality tolerance}: 1.0e-10</li> * <li>{@link #setQRRankingThreshold(double) QR ranking threshold}: {@link MathUtils#SAFE_MIN}</li> * </ul> * </p> * <p>These default values may be overridden after construction. If the {@link * #setConvergenceChecker vectorial convergence checker} is set to a non-null value, it * will be used instead of the {@link #setCostRelativeTolerance cost relative tolerance} * and {@link #setParRelativeTolerance parameters relative tolerance} settings. */ public LevenbergMarquardtOptimizer() { // set up the superclass with a default max cost evaluations setting setMaxIterations(1000); // default values for the tuning parameters setConvergenceChecker(null); setInitialStepBoundFactor(100.0); setCostRelativeTolerance(1.0e-10); setParRelativeTolerance(1.0e-10); setOrthoTolerance(1.0e-10); setQRRankingThreshold(MathUtils.SAFE_MIN); }
/** Simple constructor with default settings. * <p>The convergence check is set to a {@link SimpleVectorialValueChecker} * and the maximal number of evaluation is set to its default value.</p> */ protected AbstractLeastSquaresOptimizer() { setConvergenceChecker(new SimpleVectorialValueChecker()); setMaxIterations(DEFAULT_MAX_ITERATIONS); setMaxEvaluations(Integer.MAX_VALUE); }
preconditioner = new IdentityPreconditioner(); double[] r = computeObjectiveGradient(point); if (goal == GoalType.MINIMIZE) { for (int i = 0; i < n; ++i) { double[] steepestDescent = preconditioner.precondition(point, r); double[] searchDirection = steepestDescent.clone(); final double objective = computeObjectiveValue(point); RealPointValuePair previous = current; current = new RealPointValuePair(point, objective); if (previous != null) { if (checker.converged(getIterations(), previous, current)) { incrementIterationsCounter(); final UnivariateRealFunction lsf = new LineSearchFunction(searchDirection); final double step = solver.solve(lsf, 0, findUpperBound(lsf, 0, initialStep)); r = computeObjectiveGradient(point); if (goal == GoalType.MINIMIZE) { for (int i = 0; i < n; ++i) { final double[] newSteepestDescent = preconditioner.precondition(point, r); delta = 0; for (int i = 0; i < n; ++i) { if ((getIterations() % n == 0) || (beta < 0)) {
/** Simple constructor with default settings. * <p>The convergence check is set to a {@link SimpleScalarValueChecker} * and the maximal number of evaluation is set to its default value.</p> */ protected AbstractScalarDifferentiableOptimizer() { setConvergenceChecker(new SimpleScalarValueChecker()); setMaxIterations(DEFAULT_MAX_ITERATIONS); setMaxEvaluations(Integer.MAX_VALUE); }
/** * Guess the errors in optimized parameters. * <p>Guessing is covariance-based, it only gives rough order of magnitude.</p> * @return errors in optimized parameters * @exception FunctionEvaluationException if the function jacobian cannot b evaluated * @exception OptimizationException if the covariances matrix cannot be computed * or the number of degrees of freedom is not positive (number of measurements * lesser or equal to number of parameters) */ public double[] guessParametersErrors() throws FunctionEvaluationException, OptimizationException { if (rows <= cols) { throw new OptimizationException( "no degrees of freedom ({0} measurements, {1} parameters)", rows, cols); } double[] errors = new double[cols]; final double c = Math.sqrt(getChiSquare() / (rows - cols)); double[][] covar = getCovariances(); for (int i = 0; i < errors.length; ++i) { errors[i] = Math.sqrt(covar[i][i]) * c; } return errors; }
/** * @param xDegree Degree of the polynomial fitting functions along the * x-dimension. * @param yDegree Degree of the polynomial fitting functions along the * y-dimension. */ public SmoothingPolynomialBicubicSplineInterpolator(int xDegree, int yDegree) { xFitter = new PolynomialFitter(xDegree, new GaussNewtonOptimizer(false)); yFitter = new PolynomialFitter(yDegree, new GaussNewtonOptimizer(false)); }
/** {@inheritDoc} */ public double value(double x) throws FunctionEvaluationException { // current point in the search direction final double[] shiftedPoint = point.clone(); for (int i = 0; i < shiftedPoint.length; ++i) { shiftedPoint[i] += x * searchDirection[i]; } // gradient of the objective function final double[] gradient; gradient = computeObjectiveGradient(shiftedPoint); // dot product with the search direction double dotProduct = 0; for (int i = 0; i < gradient.length; ++i) { dotProduct += gradient[i] * searchDirection[i]; } return dotProduct; }
/** {@inheritDoc} */ public RealPointValuePair optimize(final DifferentiableMultivariateRealFunction f, final GoalType goalType, final double[] startPoint) throws FunctionEvaluationException, OptimizationException, IllegalArgumentException { // reset counters iterations = 0; evaluations = 0; gradientEvaluations = 0; // store optimization problem characteristics function = f; gradient = f.gradient(); goal = goalType; point = startPoint.clone(); return doOptimize(); }
/** {@inheritDoc} */ public VectorialPointValuePair optimize(final DifferentiableMultivariateVectorialFunction f, final double[] target, final double[] weights, final double[] startPoint) throws FunctionEvaluationException, OptimizationException, IllegalArgumentException { if (target.length != weights.length) { throw new OptimizationException(LocalizedFormats.DIMENSIONS_MISMATCH_SIMPLE, target.length, weights.length); } // reset counters iterations = 0; objectiveEvaluations = 0; jacobianEvaluations = 0; // store least squares problem characteristics function = f; jF = f.jacobian(); targetValues = target.clone(); residualsWeights = weights.clone(); this.point = startPoint.clone(); this.residuals = new double[target.length]; // arrays shared with the other private methods rows = target.length; cols = point.length; jacobian = new double[rows][cols]; wjacobian = new double[rows][cols]; wresiduals = new double[rows]; cost = Double.POSITIVE_INFINITY; return doOptimize(); }
work1[pj] = sPar * diag[pj]; determineLMDirection(qy, work1, work2, work3);
/** * Get the Root Mean Square value. * Get the Root Mean Square value, i.e. the root of the arithmetic * mean of the square of all weighted residuals. This is related to the * criterion that is minimized by the optimizer as follows: if * <em>c</em> if the criterion, and <em>n</em> is the number of * measurements, then the RMS is <em>sqrt (c/n)</em>. * * @return RMS value */ public double getRMS() { return FastMath.sqrt(getChiSquare() / rows); }
preconditioner = new IdentityPreconditioner(); double[] r = computeObjectiveGradient(point); if (goal == GoalType.MINIMIZE) { for (int i = 0; i < n; ++i) { double[] steepestDescent = preconditioner.precondition(point, r); double[] searchDirection = steepestDescent.clone(); final double objective = computeObjectiveValue(point); RealPointValuePair previous = current; current = new RealPointValuePair(point, objective); if (previous != null) { if (checker.converged(getIterations(), previous, current)) { incrementIterationsCounter(); final UnivariateRealFunction lsf = new LineSearchFunction(searchDirection); final double step = solver.solve(lsf, 0, findUpperBound(lsf, 0, initialStep)); r = computeObjectiveGradient(point); if (goal == GoalType.MINIMIZE) { for (int i = 0; i < n; ++i) { final double[] newSteepestDescent = preconditioner.precondition(point, r); delta = 0; for (int i = 0; i < n; ++i) { if ((getIterations() % n == 0) || (beta < 0)) {
/** Simple constructor with default settings. * <p>The convergence check is set to a {@link SimpleVectorialValueChecker} * and the maximal number of evaluation is set to its default value.</p> */ protected AbstractLeastSquaresOptimizer() { setConvergenceChecker(new SimpleVectorialValueChecker()); setMaxIterations(DEFAULT_MAX_ITERATIONS); setMaxEvaluations(Integer.MAX_VALUE); }
/** Simple constructor with default settings. * <p>The convergence check is set to a {@link SimpleScalarValueChecker} * and the maximal number of evaluation is set to its default value.</p> */ protected AbstractScalarDifferentiableOptimizer() { setConvergenceChecker(new SimpleScalarValueChecker()); setMaxIterations(DEFAULT_MAX_ITERATIONS); setMaxEvaluations(Integer.MAX_VALUE); }
/** * Guess the errors in optimized parameters. * <p>Guessing is covariance-based, it only gives rough order of magnitude.</p> * @return errors in optimized parameters * @exception FunctionEvaluationException if the function jacobian cannot b evaluated * @exception OptimizationException if the covariances matrix cannot be computed * or the number of degrees of freedom is not positive (number of measurements * lesser or equal to number of parameters) */ public double[] guessParametersErrors() throws FunctionEvaluationException, OptimizationException { if (rows <= cols) { throw new OptimizationException( LocalizedFormats.NO_DEGREES_OF_FREEDOM, rows, cols); } double[] errors = new double[cols]; final double c = FastMath.sqrt(getChiSquare() / (rows - cols)); double[][] covar = getCovariances(); for (int i = 0; i < errors.length; ++i) { errors[i] = FastMath.sqrt(covar[i][i]) * c; } return errors; }
/** {@inheritDoc} */ public double value(double x) throws FunctionEvaluationException { // current point in the search direction final double[] shiftedPoint = point.clone(); for (int i = 0; i < shiftedPoint.length; ++i) { shiftedPoint[i] += x * searchDirection[i]; } // gradient of the objective function final double[] gradient = computeObjectiveGradient(shiftedPoint); // dot product with the search direction double dotProduct = 0; for (int i = 0; i < gradient.length; ++i) { dotProduct += gradient[i] * searchDirection[i]; } return dotProduct; }
/** {@inheritDoc} */ public RealPointValuePair optimize(final DifferentiableMultivariateRealFunction f, final GoalType goalType, final double[] startPoint) throws FunctionEvaluationException, OptimizationException, IllegalArgumentException { // reset counters iterations = 0; evaluations = 0; gradientEvaluations = 0; // store optimization problem characteristics function = f; gradient = f.gradient(); goal = goalType; point = startPoint.clone(); return doOptimize(); }
/** {@inheritDoc} */ public VectorialPointValuePair optimize(final DifferentiableMultivariateVectorialFunction f, final double[] target, final double[] weights, final double[] startPoint) throws FunctionEvaluationException, OptimizationException, IllegalArgumentException { if (target.length != weights.length) { throw new OptimizationException("dimension mismatch {0} != {1}", target.length, weights.length); } // reset counters iterations = 0; objectiveEvaluations = 0; jacobianEvaluations = 0; // store least squares problem characteristics function = f; jF = f.jacobian(); targetValues = target.clone(); residualsWeights = weights.clone(); this.point = startPoint.clone(); this.residuals = new double[target.length]; // arrays shared with the other private methods rows = target.length; cols = point.length; jacobian = new double[rows][cols]; cost = Double.POSITIVE_INFINITY; return doOptimize(); }
work1[pj] = sPar * diag[pj]; determineLMDirection(qy, work1, work2, work3);