From 58b8383c1ad5c2a4be6cc497872497735d4cb190 Mon Sep 17 00:00:00 2001 From: Fabian Joswig Date: Thu, 6 Oct 2022 18:07:19 +0100 Subject: [PATCH] feat: hessian added to prior fit and odr fit routines. --- pyerrors/fits.py | 23 +++++++++++++++-------- 1 file changed, 15 insertions(+), 8 deletions(-) diff --git a/pyerrors/fits.py b/pyerrors/fits.py index 0ba7db55..d4087caf 100644 --- a/pyerrors/fits.py +++ b/pyerrors/fits.py @@ -165,6 +165,8 @@ def total_least_squares(x, y, func, silent=False, **kwargs): corrected by effects caused by correlated input data. This can take a while as the full correlation matrix has to be calculated (default False). + num_grad : bool + Use numerical differentation instead of automatic differentiation to perform the error propagation (default False). Notes ----- @@ -179,7 +181,12 @@ def total_least_squares(x, y, func, silent=False, **kwargs): x_shape = x.shape - jacobian = auto_jacobian + if kwargs.get('num_grad') is True: + jacobian = num_jacobian + hessian = num_hessian + else: + jacobian = auto_jacobian + hessian = auto_hessian if not callable(func): raise TypeError('func has to be a function.') @@ -275,7 +282,7 @@ def total_least_squares(x, y, func, silent=False, **kwargs): fitp = out.beta try: - hess = jacobian(jacobian(odr_chisquare))(np.concatenate((fitp, out.xplus.ravel()))) + hess = hessian(odr_chisquare)(np.concatenate((fitp, out.xplus.ravel()))) except TypeError: raise Exception("It is required to use autograd.numpy instead of numpy within fit functions, see the documentation for details.") from None @@ -284,7 +291,7 @@ def total_least_squares(x, y, func, silent=False, **kwargs): chisq = anp.sum(((y_f - model) / dy_f) ** 2) + anp.sum(((d[n_parms + m:].reshape(x_shape) - d[n_parms:n_parms + m].reshape(x_shape)) / dx_f) ** 2) return chisq - jac_jac_x = jacobian(jacobian(odr_chisquare_compact_x))(np.concatenate((fitp, out.xplus.ravel(), x_f.ravel()))) + jac_jac_x = hessian(odr_chisquare_compact_x)(np.concatenate((fitp, out.xplus.ravel(), x_f.ravel()))) # Compute hess^{-1} @ jac_jac_x[:n_parms + m, n_parms + m:] using LAPACK dgesv try: @@ -297,7 +304,7 @@ def total_least_squares(x, y, func, silent=False, **kwargs): chisq = anp.sum(((d[n_parms + m:] - model) / dy_f) ** 2) + anp.sum(((x_f - d[n_parms:n_parms + m].reshape(x_shape)) / dx_f) ** 2) return chisq - jac_jac_y = jacobian(jacobian(odr_chisquare_compact_y))(np.concatenate((fitp, out.xplus.ravel(), y_f))) + jac_jac_y = hessian(odr_chisquare_compact_y)(np.concatenate((fitp, out.xplus.ravel(), y_f))) # Compute hess^{-1} @ jac_jac_y[:n_parms + m, n_parms + m:] using LAPACK dgesv try: @@ -326,9 +333,9 @@ def _prior_fit(x, y, func, priors, silent=False, **kwargs): x = np.asarray(x) if kwargs.get('num_grad') is True: - jacobian = num_jacobian + hessian = num_hessian else: - jacobian = auto_jacobian + hessian = auto_hessian if not callable(func): raise TypeError('func has to be a function.') @@ -418,7 +425,7 @@ def _prior_fit(x, y, func, priors, silent=False, **kwargs): if not m.fmin.is_valid: raise Exception('The minimization procedure did not converge.') - hess = jacobian(jacobian(chisqfunc))(params) + hess = hessian(chisqfunc)(params) if kwargs.get('num_grad') is True: hess = hess[0] hess_inv = np.linalg.pinv(hess) @@ -428,7 +435,7 @@ def _prior_fit(x, y, func, priors, silent=False, **kwargs): chisq = anp.sum(((d[n_parms: n_parms + len(x)] - model) / dy_f) ** 2) + anp.sum(((d[n_parms + len(x):] - d[:n_parms]) / dp_f) ** 2) return chisq - jac_jac = jacobian(jacobian(chisqfunc_compact))(np.concatenate((params, y_f, p_f))) + jac_jac = hessian(chisqfunc_compact)(np.concatenate((params, y_f, p_f))) if kwargs.get('num_grad') is True: jac_jac = jac_jac[0]