fix: Error handling for fits and root finding with numpy instead of autograd.numpy

improved. Tests added.
This commit is contained in:
Fabian Joswig 2022-03-02 15:10:07 +00:00
parent 7f5989dfb9
commit b14314b424
4 changed files with 53 additions and 10 deletions

View file

@ -260,7 +260,10 @@ def total_least_squares(x, y, func, silent=False, **kwargs):
output.chisquare_by_expected_chisquare)
fitp = out.beta
try:
hess_inv = np.linalg.pinv(jacobian(jacobian(odr_chisquare))(np.concatenate((fitp, out.xplus.ravel()))))
except TypeError:
raise Exception("It is required to use autograd.numpy instead of numpy within fit functions, see the documentation for details.") from None
def odr_chisquare_compact_x(d):
model = func(d[:n_parms], d[n_parms:n_parms + m].reshape(x_shape))
@ -537,7 +540,10 @@ def _standard_fit(x, y, func, silent=False, **kwargs):
output.chisquare_by_expected_chisquare)
fitp = fit_result.x
try:
hess_inv = np.linalg.pinv(jacobian(jacobian(chisqfunc))(fitp))
except TypeError:
raise Exception("It is required to use autograd.numpy instead of numpy within fit functions, see the documentation for details.") from None
if kwargs.get('correlated_fit') is True:
def chisqfunc_compact(d):

View file

@ -31,7 +31,10 @@ def find_root(d, func, guess=1.0, **kwargs):
# Error propagation as detailed in arXiv:1809.01289
dx = jacobian(func)(root[0], d.value)
try:
da = jacobian(lambda u, v: func(v, u))(d.value, root[0])
except TypeError:
raise Exception("It is required to use autograd.numpy instead of numpy within root functions, see the documentation for details.") from None
deriv = - da / dx
res = derived_observable(lambda x, **kwargs: (x[0] + np.finfo(np.float64).eps) / (d.value + np.finfo(np.float64).eps) * root[0], [d], man_grad=[deriv])

View file

@ -1,4 +1,5 @@
import autograd.numpy as np
import numpy as np
import autograd.numpy as anp
import math
import scipy.optimize
from scipy.odr import ODR, Model, RealData
@ -41,12 +42,12 @@ def test_least_squares():
oy.append(pe.pseudo_Obs(y[i], yerr[i], str(i)))
def f(x, a, b):
return a * np.exp(-b * x)
return a * anp.exp(-b * x)
popt, pcov = scipy.optimize.curve_fit(f, x, y, sigma=[o.dvalue for o in oy], absolute_sigma=True)
def func(a, x):
y = a[0] * np.exp(-a[1] * x)
y = a[0] * anp.exp(-a[1] * x)
return y
out = pe.least_squares(x, oy, func, expected_chisquare=True, resplot=True, qqplot=True)
@ -95,7 +96,7 @@ def test_alternative_solvers():
oy.append(pe.pseudo_Obs(y[i], yerr[i], 'test'))
def func(a, x):
y = a[0] * np.exp(-a[1] * x)
y = a[0] * anp.exp(-a[1] * x)
return y
chisquare_values = []
@ -145,7 +146,7 @@ def test_correlated_fit():
return p[1] + p[0] * x
else:
def fitf(p, x):
return p[1] * np.exp(-p[0] * x)
return p[1] * anp.exp(-p[0] * x)
fitp = pe.least_squares(x, data, fitf, expected_chisquare=True)
@ -172,10 +173,10 @@ def test_total_least_squares():
oy.append(pe.pseudo_Obs(y[i], yerr[i], str(i)))
def f(x, a, b):
return a * np.exp(-b * x)
return a * anp.exp(-b * x)
def func(a, x):
y = a[0] * np.exp(-a[1] * x)
y = a[0] * anp.exp(-a[1] * x)
return y
data = RealData([o.value for o in ox], [o.value for o in oy], sx=[o.dvalue for o in ox], sy=[o.dvalue for o in oy])
@ -336,6 +337,27 @@ def test_error_band():
pe.fits.error_band(x, f, fitp.fit_parameters)
def test_fit_no_autograd():
dim = 10
x = np.arange(dim)
y = 2 * np.exp(-0.08 * x) + np.random.normal(0.0, 0.15, dim)
yerr = 0.1 + 0.1 * np.random.rand(dim)
oy = []
for i, item in enumerate(x):
oy.append(pe.pseudo_Obs(y[i], yerr[i], str(i)))
def func(a, x):
y = a[0] * np.exp(-a[1] * x)
return y
with pytest.raises(Exception):
pe.least_squares(x, oy, func)
with pytest.raises(Exception):
pe.total_least_squares(oy, oy, func)
def test_ks_test():
def f(a, x):
y = a[0] + a[1] * x

View file

@ -30,3 +30,15 @@ def test_root_linear_idl():
difference = my_obs - my_root
assert difference.is_zero()
def test_root_no_autograd():
def root_function(x, d):
return x - np.log(np.exp(d))
value = np.random.normal(0, 100)
my_obs = pe.pseudo_Obs(value, 0.1, 't')
with pytest.raises(Exception):
my_root = pe.roots.find_root(my_obs, root_function)