mirror of
https://github.com/fjosw/pyerrors.git
synced 2025-05-15 12:03:42 +02:00
fix: Error handling for fits and root finding with numpy instead of autograd.numpy
improved. Tests added.
This commit is contained in:
parent
7f5989dfb9
commit
b14314b424
4 changed files with 53 additions and 10 deletions
|
@ -260,7 +260,10 @@ def total_least_squares(x, y, func, silent=False, **kwargs):
|
|||
output.chisquare_by_expected_chisquare)
|
||||
|
||||
fitp = out.beta
|
||||
hess_inv = np.linalg.pinv(jacobian(jacobian(odr_chisquare))(np.concatenate((fitp, out.xplus.ravel()))))
|
||||
try:
|
||||
hess_inv = np.linalg.pinv(jacobian(jacobian(odr_chisquare))(np.concatenate((fitp, out.xplus.ravel()))))
|
||||
except TypeError:
|
||||
raise Exception("It is required to use autograd.numpy instead of numpy within fit functions, see the documentation for details.") from None
|
||||
|
||||
def odr_chisquare_compact_x(d):
|
||||
model = func(d[:n_parms], d[n_parms:n_parms + m].reshape(x_shape))
|
||||
|
@ -537,7 +540,10 @@ def _standard_fit(x, y, func, silent=False, **kwargs):
|
|||
output.chisquare_by_expected_chisquare)
|
||||
|
||||
fitp = fit_result.x
|
||||
hess_inv = np.linalg.pinv(jacobian(jacobian(chisqfunc))(fitp))
|
||||
try:
|
||||
hess_inv = np.linalg.pinv(jacobian(jacobian(chisqfunc))(fitp))
|
||||
except TypeError:
|
||||
raise Exception("It is required to use autograd.numpy instead of numpy within fit functions, see the documentation for details.") from None
|
||||
|
||||
if kwargs.get('correlated_fit') is True:
|
||||
def chisqfunc_compact(d):
|
||||
|
|
|
@ -31,7 +31,10 @@ def find_root(d, func, guess=1.0, **kwargs):
|
|||
|
||||
# Error propagation as detailed in arXiv:1809.01289
|
||||
dx = jacobian(func)(root[0], d.value)
|
||||
da = jacobian(lambda u, v: func(v, u))(d.value, root[0])
|
||||
try:
|
||||
da = jacobian(lambda u, v: func(v, u))(d.value, root[0])
|
||||
except TypeError:
|
||||
raise Exception("It is required to use autograd.numpy instead of numpy within root functions, see the documentation for details.") from None
|
||||
deriv = - da / dx
|
||||
|
||||
res = derived_observable(lambda x, **kwargs: (x[0] + np.finfo(np.float64).eps) / (d.value + np.finfo(np.float64).eps) * root[0], [d], man_grad=[deriv])
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue