/** * Use this when the model is given as a function of its parameters only (i.e. a function that takes a set of * parameters and return a set of model values, * so the measurement points are already known to the function), and analytic parameter sensitivity is available * @param observedValues Set of measurement values * @param sigma Set of measurement errors * @param func The model as a function of its parameters only * @param jac The model sensitivity to its parameters (i.e. the Jacobian matrix) as a function of its parameters only * @param startPos Initial value of the parameters * @param penalty Penalty matrix * @return the least-square results */ public LeastSquareWithPenaltyResults solve( DoubleArray observedValues, DoubleArray sigma, Function<DoubleArray, DoubleArray> func, Function<DoubleArray, DoubleMatrix> jac, DoubleArray startPos, DoubleMatrix penalty) { return solve(observedValues, sigma, func, jac, startPos, penalty, UNCONSTRAINED); }
private DirectIborCapletFloorletVolatilityCalibrator( VolatilityIborCapFloorLegPricer pricer, double epsilon, ReferenceData referenceData) { super(pricer, referenceData); this.solver = new NonLinearLeastSquareWithPenalty(new CholeskyDecompositionCommons(), epsilon); }
DoubleArray error = getError(func, observedValues, sigma, theta); DoubleMatrix jacobian = getJacobian(jac, sigma, theta); oldChiSqr = getChiSqr(error); double p = getANorm(penalty, theta); oldChiSqr += p; DoubleArray beta = getChiSqrGrad(error, jacobian); DoubleArray temp = (DoubleArray) _algebra.multiply(penalty, theta); beta = (DoubleArray) _algebra.subtract(beta, temp); alpha = getModifiedCurvatureMatrix(jacobian, lambda, penalty); DoubleArray deltaTheta; lambda = increaseLambda(lambda); continue; newError = getError(func, observedValues, sigma, trialTheta); p = getANorm(penalty, trialTheta); newChiSqr = getChiSqr(newError); newChiSqr += p; DoubleMatrix alpha0 = lambda == 0.0 ? alpha : getModifiedCurvatureMatrix(jacobian, 0.0, penalty); return finish(alpha0, decmp, newChiSqr - p, p, jacobian, trialTheta, sigma); lambda = decreaseLambda(lambda); theta = trialTheta;
private LeastSquareWithPenaltyResults finish( DoubleMatrix alpha, DecompositionResult decmp, double chiSqr, double penalty, DoubleMatrix jacobian, DoubleArray newTheta, DoubleArray sigma) { DoubleMatrix covariance = decmp.solve(DoubleMatrix.identity(alpha.rowCount())); DoubleMatrix bT = getBTranspose(jacobian, sigma); DoubleMatrix inverseJacobian = decmp.solve(bT); return new LeastSquareWithPenaltyResults(chiSqr, penalty, newTheta, covariance, inverseJacobian); }
/** * Use this when the model is given as a function of its parameters only (i.e. a function that takes a set of * parameters and return a set of model values, * so the measurement points are already known to the function), and analytic parameter sensitivity is not available * @param observedValues Set of measurement values * @param sigma Set of measurement errors * @param func The model as a function of its parameters only * @param startPos Initial value of the parameters * @param penalty Penalty matrix * @return value of the fitted parameters */ public LeastSquareWithPenaltyResults solve( DoubleArray observedValues, DoubleArray sigma, Function<DoubleArray, DoubleArray> func, DoubleArray startPos, DoubleMatrix penalty) { VectorFieldFirstOrderDifferentiator jac = new VectorFieldFirstOrderDifferentiator(); return solve(observedValues, sigma, func, jac.differentiate(func), startPos, penalty); }
/** * Use this when the model is given as a function of its parameters only (i.e. a function that takes a set of * parameters and return a set of model values, * so the measurement points are already known to the function), and analytic parameter sensitivity is not available * @param observedValues Set of measurement values * @param sigma Set of measurement errors * @param func The model as a function of its parameters only * @param startPos Initial value of the parameters * @param penalty Penalty matrix * @param allowedValue a function which returned true if the new trial position is allowed by the model. An example * would be to enforce positive parameters * without resorting to a non-linear parameter transform. In some circumstances this approach will lead to slow * convergence. * @return value of the fitted parameters */ public LeastSquareWithPenaltyResults solve( DoubleArray observedValues, DoubleArray sigma, Function<DoubleArray, DoubleArray> func, DoubleArray startPos, DoubleMatrix penalty, Function<DoubleArray, Boolean> allowedValue) { VectorFieldFirstOrderDifferentiator jac = new VectorFieldFirstOrderDifferentiator(); return solve(observedValues, sigma, func, jac.differentiate(func), startPos, penalty, allowedValue); }
/** * Use this when the model is given as a function of its parameters only (i.e. a function that takes a set of * parameters and return a set of model values, * so the measurement points are already known to the function), and analytic parameter sensitivity is not available. * * @param observedValues Set of measurement values * @param func The model as a function of its parameters only * @param startPos Initial value of the parameters * @param penalty Penalty matrix * @return value of the fitted parameters */ public LeastSquareWithPenaltyResults solve( DoubleArray observedValues, Function<DoubleArray, DoubleArray> func, DoubleArray startPos, DoubleMatrix penalty) { int n = observedValues.size(); VectorFieldFirstOrderDifferentiator jac = new VectorFieldFirstOrderDifferentiator(); return solve(observedValues, DoubleArray.filled(n, 1.0), func, jac.differentiate(func), startPos, penalty); }
DoubleMatrix penaltyMatrix = directDefinition.computePenaltyMatrix(strikes, capletExpiries); LeastSquareResults res = solver.solve( DoubleArray.copyOf(priceList), DoubleArray.copyOf(errorList),
DoubleArray start = DoubleArray.of(nWeights, i -> random.nextDouble()); LeastSquareWithPenaltyResults lsRes = NLLSWP.solve( DoubleArray.copyOf(obs), DoubleArray.filled(n, 0.01),