mirror of
https://github.com/fjosw/pyerrors.git
synced 2026-02-28 15:53:38 +01:00
[Fix] Migrate to odrpack because of scipy.odr deprecation in recent
release
This commit is contained in:
parent
da399b7c02
commit
46d32683d3
3 changed files with 80 additions and 38 deletions
|
|
@ -7,7 +7,7 @@ import scipy.optimize
|
|||
import scipy.stats
|
||||
import matplotlib.pyplot as plt
|
||||
from matplotlib import gridspec
|
||||
from scipy.odr import ODR, Model, RealData
|
||||
from odrpack import odr_fit
|
||||
import iminuit
|
||||
from autograd import jacobian as auto_jacobian
|
||||
from autograd import hessian as auto_hessian
|
||||
|
|
@ -634,17 +634,27 @@ def total_least_squares(x, y, func, silent=False, **kwargs):
|
|||
raise Exception('No y errors available, run the gamma method first.')
|
||||
|
||||
if 'initial_guess' in kwargs:
|
||||
x0 = kwargs.get('initial_guess')
|
||||
x0 = np.asarray(kwargs.get('initial_guess'), dtype=np.float64)
|
||||
if len(x0) != n_parms:
|
||||
raise Exception('Initial guess does not have the correct length: %d vs. %d' % (len(x0), n_parms))
|
||||
else:
|
||||
x0 = [1] * n_parms
|
||||
x0 = np.ones(n_parms, dtype=np.float64)
|
||||
|
||||
data = RealData(x_f, y_f, sx=dx_f, sy=dy_f)
|
||||
model = Model(func)
|
||||
odr = ODR(data, model, x0, partol=np.finfo(np.float64).eps)
|
||||
odr.set_job(fit_type=0, deriv=1)
|
||||
out = odr.run()
|
||||
# odrpack expects f(x, beta), but pyerrors convention is f(beta, x)
|
||||
def wrapped_func(x, beta):
|
||||
return func(beta, x)
|
||||
|
||||
out = odr_fit(
|
||||
wrapped_func,
|
||||
np.asarray(x_f, dtype=np.float64),
|
||||
np.asarray(y_f, dtype=np.float64),
|
||||
beta0=x0,
|
||||
weight_x=1.0 / np.asarray(dx_f, dtype=np.float64) ** 2,
|
||||
weight_y=1.0 / np.asarray(dy_f, dtype=np.float64) ** 2,
|
||||
partol=np.finfo(np.float64).eps,
|
||||
task='explicit-ODR',
|
||||
diff_scheme='central'
|
||||
)
|
||||
|
||||
output.residual_variance = out.res_var
|
||||
|
||||
|
|
@ -652,14 +662,14 @@ def total_least_squares(x, y, func, silent=False, **kwargs):
|
|||
|
||||
output.message = out.stopreason
|
||||
|
||||
output.xplus = out.xplus
|
||||
output.xplus = out.xplusd
|
||||
|
||||
if not silent:
|
||||
print('Method: ODR')
|
||||
print(*out.stopreason)
|
||||
print(out.stopreason)
|
||||
print('Residual variance:', output.residual_variance)
|
||||
|
||||
if out.info > 3:
|
||||
if not out.success:
|
||||
raise Exception('The minimization procedure did not converge.')
|
||||
|
||||
m = x_f.size
|
||||
|
|
@ -679,9 +689,9 @@ def total_least_squares(x, y, func, silent=False, **kwargs):
|
|||
|
||||
number_of_x_parameters = int(m / x_f.shape[-1])
|
||||
|
||||
old_jac = jacobian(func)(out.beta, out.xplus)
|
||||
old_jac = jacobian(func)(out.beta, out.xplusd)
|
||||
fused_row1 = np.concatenate((old_jac, np.concatenate((number_of_x_parameters * [np.zeros(old_jac.shape)]), axis=0)))
|
||||
fused_row2 = np.concatenate((jacobian(lambda x, y: func(y, x))(out.xplus, out.beta).reshape(x_f.shape[-1], x_f.shape[-1] * number_of_x_parameters), np.identity(number_of_x_parameters * old_jac.shape[0])))
|
||||
fused_row2 = np.concatenate((jacobian(lambda x, y: func(y, x))(out.xplusd, out.beta).reshape(x_f.shape[-1], x_f.shape[-1] * number_of_x_parameters), np.identity(number_of_x_parameters * old_jac.shape[0])))
|
||||
new_jac = np.concatenate((fused_row1, fused_row2), axis=1)
|
||||
|
||||
A = W @ new_jac
|
||||
|
|
@ -690,14 +700,14 @@ def total_least_squares(x, y, func, silent=False, **kwargs):
|
|||
if expected_chisquare <= 0.0:
|
||||
warnings.warn("Negative expected_chisquare.", RuntimeWarning)
|
||||
expected_chisquare = np.abs(expected_chisquare)
|
||||
output.chisquare_by_expected_chisquare = odr_chisquare(np.concatenate((out.beta, out.xplus.ravel()))) / expected_chisquare
|
||||
output.chisquare_by_expected_chisquare = odr_chisquare(np.concatenate((out.beta, out.xplusd.ravel()))) / expected_chisquare
|
||||
if not silent:
|
||||
print('chisquare/expected_chisquare:',
|
||||
output.chisquare_by_expected_chisquare)
|
||||
|
||||
fitp = out.beta
|
||||
try:
|
||||
hess = hessian(odr_chisquare)(np.concatenate((fitp, out.xplus.ravel())))
|
||||
hess = hessian(odr_chisquare)(np.concatenate((fitp, out.xplusd.ravel())))
|
||||
except TypeError:
|
||||
raise Exception("It is required to use autograd.numpy instead of numpy within fit functions, see the documentation for details.") from None
|
||||
|
||||
|
|
@ -706,7 +716,7 @@ def total_least_squares(x, y, func, silent=False, **kwargs):
|
|||
chisq = anp.sum(((y_f - model) / dy_f) ** 2) + anp.sum(((d[n_parms + m:].reshape(x_shape) - d[n_parms:n_parms + m].reshape(x_shape)) / dx_f) ** 2)
|
||||
return chisq
|
||||
|
||||
jac_jac_x = hessian(odr_chisquare_compact_x)(np.concatenate((fitp, out.xplus.ravel(), x_f.ravel())))
|
||||
jac_jac_x = hessian(odr_chisquare_compact_x)(np.concatenate((fitp, out.xplusd.ravel(), x_f.ravel())))
|
||||
|
||||
# Compute hess^{-1} @ jac_jac_x[:n_parms + m, n_parms + m:] using LAPACK dgesv
|
||||
try:
|
||||
|
|
@ -719,7 +729,7 @@ def total_least_squares(x, y, func, silent=False, **kwargs):
|
|||
chisq = anp.sum(((d[n_parms + m:] - model) / dy_f) ** 2) + anp.sum(((x_f - d[n_parms:n_parms + m].reshape(x_shape)) / dx_f) ** 2)
|
||||
return chisq
|
||||
|
||||
jac_jac_y = hessian(odr_chisquare_compact_y)(np.concatenate((fitp, out.xplus.ravel(), y_f)))
|
||||
jac_jac_y = hessian(odr_chisquare_compact_y)(np.concatenate((fitp, out.xplusd.ravel(), y_f)))
|
||||
|
||||
# Compute hess^{-1} @ jac_jac_y[:n_parms + m, n_parms + m:] using LAPACK dgesv
|
||||
try:
|
||||
|
|
@ -733,7 +743,7 @@ def total_least_squares(x, y, func, silent=False, **kwargs):
|
|||
|
||||
output.fit_parameters = result
|
||||
|
||||
output.odr_chisquare = odr_chisquare(np.concatenate((out.beta, out.xplus.ravel())))
|
||||
output.odr_chisquare = odr_chisquare(np.concatenate((out.beta, out.xplusd.ravel())))
|
||||
output.dof = x.shape[-1] - n_parms
|
||||
output.p_value = 1 - scipy.stats.chi2.cdf(output.odr_chisquare, output.dof)
|
||||
|
||||
|
|
|
|||
2
setup.py
2
setup.py
|
|
@ -25,7 +25,7 @@ setup(name='pyerrors',
|
|||
license="MIT",
|
||||
packages=find_packages(),
|
||||
python_requires='>=3.10.0',
|
||||
install_requires=['numpy>=2.0', 'autograd>=1.7.0', 'numdifftools>=0.9.41', 'matplotlib>=3.9', 'scipy>=1.13', 'iminuit>=2.28', 'h5py>=3.11', 'lxml>=5.0', 'python-rapidjson>=1.20', 'pandas>=2.2'],
|
||||
install_requires=['numpy>=2.0', 'autograd>=1.7.0', 'numdifftools>=0.9.41', 'matplotlib>=3.9', 'scipy>=1.13', 'iminuit>=2.28', 'h5py>=3.11', 'lxml>=5.0', 'python-rapidjson>=1.20', 'pandas>=2.2', 'odrpack>=0.4'],
|
||||
extras_require={'test': ['pytest', 'pytest-cov', 'pytest-benchmark', 'hypothesis', 'nbmake', 'flake8']},
|
||||
classifiers=[
|
||||
'Development Status :: 5 - Production/Stable',
|
||||
|
|
|
|||
|
|
@ -3,7 +3,7 @@ import autograd.numpy as anp
|
|||
import matplotlib.pyplot as plt
|
||||
import math
|
||||
import scipy.optimize
|
||||
from scipy.odr import ODR, Model, RealData
|
||||
from odrpack import odr_fit
|
||||
from scipy.linalg import cholesky
|
||||
from scipy.stats import norm
|
||||
import iminuit
|
||||
|
|
@ -397,11 +397,21 @@ def test_total_least_squares():
|
|||
y = a[0] * anp.exp(-a[1] * x)
|
||||
return y
|
||||
|
||||
data = RealData([o.value for o in ox], [o.value for o in oy], sx=[o.dvalue for o in ox], sy=[o.dvalue for o in oy])
|
||||
model = Model(func)
|
||||
odr = ODR(data, model, [0, 0], partol=np.finfo(np.float64).eps)
|
||||
odr.set_job(fit_type=0, deriv=1)
|
||||
output = odr.run()
|
||||
# odrpack expects f(x, beta), but pyerrors convention is f(beta, x)
|
||||
def wrapped_func(x, beta):
|
||||
return func(beta, x)
|
||||
|
||||
output = odr_fit(
|
||||
wrapped_func,
|
||||
np.array([o.value for o in ox]),
|
||||
np.array([o.value for o in oy]),
|
||||
beta0=np.array([0.0, 0.0]),
|
||||
weight_x=1.0 / np.array([o.dvalue for o in ox]) ** 2,
|
||||
weight_y=1.0 / np.array([o.dvalue for o in oy]) ** 2,
|
||||
partol=np.finfo(np.float64).eps,
|
||||
task='explicit-ODR',
|
||||
diff_scheme='central'
|
||||
)
|
||||
|
||||
out = pe.total_least_squares(ox, oy, func, expected_chisquare=True)
|
||||
beta = out.fit_parameters
|
||||
|
|
@ -1431,11 +1441,11 @@ def fit_general(x, y, func, silent=False, **kwargs):
|
|||
global print_output, beta0
|
||||
print_output = 1
|
||||
if 'initial_guess' in kwargs:
|
||||
beta0 = kwargs.get('initial_guess')
|
||||
beta0 = np.asarray(kwargs.get('initial_guess'), dtype=np.float64)
|
||||
if len(beta0) != n_parms:
|
||||
raise Exception('Initial guess does not have the correct length.')
|
||||
else:
|
||||
beta0 = np.arange(n_parms)
|
||||
beta0 = np.arange(n_parms, dtype=np.float64)
|
||||
|
||||
if len(x) != len(y):
|
||||
raise Exception('x and y have to have the same length')
|
||||
|
|
@ -1463,23 +1473,45 @@ def fit_general(x, y, func, silent=False, **kwargs):
|
|||
|
||||
xerr = kwargs.get('xerr')
|
||||
|
||||
# odrpack expects f(x, beta), but pyerrors convention is f(beta, x)
|
||||
def wrapped_func(x, beta):
|
||||
return func(beta, x)
|
||||
|
||||
if length == len(obs):
|
||||
assert 'x_constants' in kwargs
|
||||
data = RealData(kwargs.get('x_constants'), obs, sy=yerr)
|
||||
fit_type = 2
|
||||
x_data = np.asarray(kwargs.get('x_constants'))
|
||||
y_data = np.asarray(obs)
|
||||
# Ordinary least squares (no x errors)
|
||||
output = odr_fit(
|
||||
wrapped_func,
|
||||
x_data,
|
||||
y_data,
|
||||
beta0=beta0,
|
||||
weight_y=1.0 / np.asarray(yerr) ** 2,
|
||||
partol=np.finfo(np.float64).eps,
|
||||
task='explicit-OLS',
|
||||
diff_scheme='central'
|
||||
)
|
||||
elif length == len(obs) // 2:
|
||||
data = RealData(obs[:length], obs[length:], sx=xerr, sy=yerr)
|
||||
fit_type = 0
|
||||
x_data = np.asarray(obs[:length])
|
||||
y_data = np.asarray(obs[length:])
|
||||
# ODR with x errors
|
||||
output = odr_fit(
|
||||
wrapped_func,
|
||||
x_data,
|
||||
y_data,
|
||||
beta0=beta0,
|
||||
weight_x=1.0 / np.asarray(xerr) ** 2,
|
||||
weight_y=1.0 / np.asarray(yerr) ** 2,
|
||||
partol=np.finfo(np.float64).eps,
|
||||
task='explicit-ODR',
|
||||
diff_scheme='central'
|
||||
)
|
||||
else:
|
||||
raise Exception('x and y do not fit together.')
|
||||
|
||||
model = Model(func)
|
||||
|
||||
odr = ODR(data, model, beta0, partol=np.finfo(np.float64).eps)
|
||||
odr.set_job(fit_type=fit_type, deriv=1)
|
||||
output = odr.run()
|
||||
if print_output and not silent:
|
||||
print(*output.stopreason)
|
||||
print(output.stopreason)
|
||||
print('chisquare/d.o.f.:', output.res_var)
|
||||
print_output = 0
|
||||
beta0 = output.beta
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue