mirror of
https://github.com/fjosw/pyerrors.git
synced 2025-03-15 23:00:25 +01:00
fix: fits and root now work when the value of the zeroth input is
exactly zero. Tests extended.
This commit is contained in:
parent
968cdf3181
commit
9ddadaf6b3
3 changed files with 33 additions and 4 deletions
|
@ -301,7 +301,7 @@ def total_least_squares(x, y, func, silent=False, **kwargs):
|
||||||
|
|
||||||
result = []
|
result = []
|
||||||
for i in range(n_parms):
|
for i in range(n_parms):
|
||||||
result.append(derived_observable(lambda my_var, **kwargs: my_var[0] / x.ravel()[0].value * out.beta[i], list(x.ravel()) + list(y), man_grad=list(deriv_x[i]) + list(deriv_y[i])))
|
result.append(derived_observable(lambda my_var, **kwargs: (my_var[0] + np.finfo(np.float64).eps) / (x.ravel()[0].value + np.finfo(np.float64).eps) * out.beta[i], list(x.ravel()) + list(y), man_grad=list(deriv_x[i]) + list(deriv_y[i])))
|
||||||
|
|
||||||
output.fit_parameters = result + const_par
|
output.fit_parameters = result + const_par
|
||||||
|
|
||||||
|
@ -418,7 +418,7 @@ def _prior_fit(x, y, func, priors, silent=False, **kwargs):
|
||||||
|
|
||||||
result = []
|
result = []
|
||||||
for i in range(n_parms):
|
for i in range(n_parms):
|
||||||
result.append(derived_observable(lambda x, **kwargs: x[0] / y[0].value * params[i], list(y) + list(loc_priors), man_grad=list(deriv[i])))
|
result.append(derived_observable(lambda x, **kwargs: (x[0] + np.finfo(np.float64).eps) / (y[0].value + np.finfo(np.float64).eps) * params[i], list(y) + list(loc_priors), man_grad=list(deriv[i])))
|
||||||
|
|
||||||
output.fit_parameters = result
|
output.fit_parameters = result
|
||||||
output.chisquare = chisqfunc(np.asarray(params))
|
output.chisquare = chisqfunc(np.asarray(params))
|
||||||
|
@ -612,7 +612,7 @@ def _standard_fit(x, y, func, silent=False, **kwargs):
|
||||||
|
|
||||||
result = []
|
result = []
|
||||||
for i in range(n_parms):
|
for i in range(n_parms):
|
||||||
result.append(derived_observable(lambda x, **kwargs: x[0] / y[0].value * fit_result.x[i], list(y), man_grad=list(deriv[i])))
|
result.append(derived_observable(lambda x, **kwargs: (x[0] + np.finfo(np.float64).eps) / (y[0].value + np.finfo(np.float64).eps) * fit_result.x[i], list(y), man_grad=list(deriv[i])))
|
||||||
|
|
||||||
output.fit_parameters = result + const_par
|
output.fit_parameters = result + const_par
|
||||||
|
|
||||||
|
|
|
@ -1,3 +1,4 @@
|
||||||
|
import numpy as np
|
||||||
import scipy.optimize
|
import scipy.optimize
|
||||||
from autograd import jacobian
|
from autograd import jacobian
|
||||||
from .obs import derived_observable
|
from .obs import derived_observable
|
||||||
|
@ -33,5 +34,5 @@ def find_root(d, func, guess=1.0, **kwargs):
|
||||||
da = jacobian(lambda u, v: func(v, u))(d.value, root[0])
|
da = jacobian(lambda u, v: func(v, u))(d.value, root[0])
|
||||||
deriv = - da / dx
|
deriv = - da / dx
|
||||||
|
|
||||||
res = derived_observable(lambda x, **kwargs: x[0] / d.value * root[0], [d], man_grad=[deriv])
|
res = derived_observable(lambda x, **kwargs: (x[0] + np.finfo(np.float64).eps) / (d.value + np.finfo(np.float64).eps) * root[0], [d], man_grad=[deriv])
|
||||||
return res
|
return res
|
||||||
|
|
|
@ -10,6 +10,26 @@ import pytest
|
||||||
np.random.seed(0)
|
np.random.seed(0)
|
||||||
|
|
||||||
|
|
||||||
|
def test_fit_lin():
|
||||||
|
x = [0, 2]
|
||||||
|
y = [pe.pseudo_Obs(0, 0.1, 'ensemble'),
|
||||||
|
pe.pseudo_Obs(2, 0.1, 'ensemble')]
|
||||||
|
|
||||||
|
res = pe.fits.fit_lin(x, y)
|
||||||
|
|
||||||
|
assert res[0] == y[0]
|
||||||
|
assert res[1] == (y[1] - y[0]) / (x[1] - x[0])
|
||||||
|
|
||||||
|
x = y = [pe.pseudo_Obs(0, 0.1, 'ensemble'),
|
||||||
|
pe.pseudo_Obs(2, 0.1, 'ensemble')]
|
||||||
|
|
||||||
|
res = pe.fits.fit_lin(x, y)
|
||||||
|
|
||||||
|
m = (y[1] - y[0]) / (x[1] - x[0])
|
||||||
|
assert res[0] == y[1] - x[1] * m
|
||||||
|
assert res[1] == m
|
||||||
|
|
||||||
|
|
||||||
def test_least_squares():
|
def test_least_squares():
|
||||||
dim = 10 + int(30 * np.random.rand())
|
dim = 10 + int(30 * np.random.rand())
|
||||||
x = np.arange(dim)
|
x = np.arange(dim)
|
||||||
|
@ -32,6 +52,10 @@ def test_least_squares():
|
||||||
out = pe.least_squares(x, oy, func, expected_chisquare=True, resplot=True, qqplot=True)
|
out = pe.least_squares(x, oy, func, expected_chisquare=True, resplot=True, qqplot=True)
|
||||||
beta = out.fit_parameters
|
beta = out.fit_parameters
|
||||||
|
|
||||||
|
str(out)
|
||||||
|
repr(out)
|
||||||
|
len(out)
|
||||||
|
|
||||||
for i in range(2):
|
for i in range(2):
|
||||||
beta[i].gamma_method(S=1.0)
|
beta[i].gamma_method(S=1.0)
|
||||||
assert math.isclose(beta[i].value, popt[i], abs_tol=1e-5)
|
assert math.isclose(beta[i].value, popt[i], abs_tol=1e-5)
|
||||||
|
@ -136,6 +160,10 @@ def test_total_least_squares():
|
||||||
out = pe.total_least_squares(ox, oy, func, expected_chisquare=True)
|
out = pe.total_least_squares(ox, oy, func, expected_chisquare=True)
|
||||||
beta = out.fit_parameters
|
beta = out.fit_parameters
|
||||||
|
|
||||||
|
str(out)
|
||||||
|
repr(out)
|
||||||
|
len(out)
|
||||||
|
|
||||||
for i in range(2):
|
for i in range(2):
|
||||||
beta[i].gamma_method(S=1.0)
|
beta[i].gamma_method(S=1.0)
|
||||||
assert math.isclose(beta[i].value, output.beta[i], rel_tol=1e-5)
|
assert math.isclose(beta[i].value, output.beta[i], rel_tol=1e-5)
|
||||||
|
|
Loading…
Add table
Reference in a new issue