Merge branch 'develop'

This commit is contained in:
Fabian Joswig 2025-10-10 17:34:35 +02:00
commit dbe5912ca3
9 changed files with 129 additions and 51 deletions

View file

@ -43,4 +43,4 @@ jobs:
uv pip freeze --system
- name: Run tests
run: pytest --cov=pyerrors -vv
run: pytest --cov=pyerrors -vv -Werror

View file

@ -2,6 +2,11 @@
All notable changes to this project will be documented in this file.
## [2.15.0] - 2025-10-10
### Added
- Option to explicitly specify the number of fit parameters added.
## [2.14.0] - 2025-03-09
### Added

View file

@ -151,7 +151,7 @@
"\n",
"$$C_{\\textrm{projected}}(t)=v_1^T \\underline{C}(t) v_2$$\n",
"\n",
"If we choose the vectors to be $v_1=v_2=(0,1,0,0)$, we should get the same correlator as in the cell above. \n",
"If we choose the vectors to be $v_1=v_2=(1,0,0,0)$, we should get the same correlator as in the cell above. \n",
"\n",
"Thinking about it this way is usefull in the Context of the generalized eigenvalue problem (GEVP), used to find the source-sink combination, which best describes a certain energy eigenstate.\n",
"A good introduction is found in https://arxiv.org/abs/0902.1265."

View file

@ -131,7 +131,7 @@ def least_squares(x, y, func, priors=None, silent=False, **kwargs):
Obs (e.g. results from a previous fit) or strings containing a value and an error formatted like
0.548(23), 500(40) or 0.5(0.4)
silent : bool, optional
If true all output to the console is omitted (default False).
If True all output to the console is omitted (default False).
initial_guess : list
can provide an initial guess for the input parameters. Relevant for
non-linear fits with many parameters. In case of correlated fits the guess is used to perform
@ -139,10 +139,10 @@ def least_squares(x, y, func, priors=None, silent=False, **kwargs):
method : str, optional
can be used to choose an alternative method for the minimization of chisquare.
The possible methods are the ones which can be used for scipy.optimize.minimize and
migrad of iminuit. If no method is specified, Levenberg-Marquard is used.
migrad of iminuit. If no method is specified, LevenbergMarquardt is used.
Reliable alternatives are migrad, Powell and Nelder-Mead.
tol: float, optional
can be used (only for combined fits and methods other than Levenberg-Marquard) to set the tolerance for convergence
can be used (only for combined fits and methods other than LevenbergMarquardt) to set the tolerance for convergence
to a different value to either speed up convergence at the cost of a larger error on the fitted parameters (and possibly
invalid estimates for parameter uncertainties) or smaller values to get more accurate parameter values
The stopping criterion depends on the method, e.g. migrad: edm_max = 0.002 * tol * errordef (EDM criterion: edm < edm_max)
@ -152,7 +152,7 @@ def least_squares(x, y, func, priors=None, silent=False, **kwargs):
In practice the correlation matrix is Cholesky decomposed and inverted (instead of the covariance matrix).
This procedure should be numerically more stable as the correlation matrix is typically better conditioned (Jacobi preconditioning).
inv_chol_cov_matrix [array,list], optional
array: shape = (no of y values) X (no of y values)
array: shape = (number of y values) X (number of y values)
list: for an uncombined fit: [""]
for a combined fit: list of keys belonging to the corr_matrix saved in the array, must be the same as the keys of the y dict in alphabetical order
If correlated_fit=True is set as well, can provide an inverse covariance matrix (y errors, dy_f included!) of your own choosing for a correlated fit.
@ -168,6 +168,9 @@ def least_squares(x, y, func, priors=None, silent=False, **kwargs):
If True, a quantile-quantile plot of the fit result is generated (default False).
num_grad : bool
Use numerical differentation instead of automatic differentiation to perform the error propagation (default False).
n_parms : int, optional
Number of fit parameters. Overrides automatic detection of parameter count.
Useful when autodetection fails. Must match the length of initial_guess or priors (if provided).
Returns
-------
@ -269,6 +272,18 @@ def least_squares(x, y, func, priors=None, silent=False, **kwargs):
raise Exception("No y errors available, run the gamma method first.")
# number of fit parameters
if 'n_parms' in kwargs:
n_parms = kwargs.get('n_parms')
if not isinstance(n_parms, int):
raise TypeError(
f"'n_parms' must be an integer, got {n_parms!r} "
f"of type {type(n_parms).__name__}."
)
if n_parms <= 0:
raise ValueError(
f"'n_parms' must be a positive integer, got {n_parms}."
)
else:
n_parms_ls = []
for key in key_ls:
if not callable(funcd[key]):
@ -535,17 +550,20 @@ def total_least_squares(x, y, func, silent=False, **kwargs):
It is important that all numpy functions refer to autograd.numpy, otherwise the differentiation
will not work.
silent : bool, optional
If true all output to the console is omitted (default False).
If True all output to the console is omitted (default False).
initial_guess : list
can provide an initial guess for the input parameters. Relevant for non-linear
fits with many parameters.
expected_chisquare : bool
If true prints the expected chisquare which is
If True prints the expected chisquare which is
corrected by effects caused by correlated input data.
This can take a while as the full correlation matrix
has to be calculated (default False).
num_grad : bool
Use numerical differentation instead of automatic differentiation to perform the error propagation (default False).
Use numerical differentiation instead of automatic differentiation to perform the error propagation (default False).
n_parms : int, optional
Number of fit parameters. Overrides automatic detection of parameter count.
Useful when autodetection fails. Must match the length of initial_guess (if provided).
Notes
-----
@ -575,7 +593,19 @@ def total_least_squares(x, y, func, silent=False, **kwargs):
if not callable(func):
raise TypeError('func has to be a function.')
for i in range(42):
if 'n_parms' in kwargs:
n_parms = kwargs.get('n_parms')
if not isinstance(n_parms, int):
raise TypeError(
f"'n_parms' must be an integer, got {n_parms!r} "
f"of type {type(n_parms).__name__}."
)
if n_parms <= 0:
raise ValueError(
f"'n_parms' must be a positive integer, got {n_parms}."
)
else:
for i in range(100):
try:
func(np.arange(i), x.T[0])
except TypeError:
@ -588,6 +618,7 @@ def total_least_squares(x, y, func, silent=False, **kwargs):
raise RuntimeError("Fit function is not valid.")
n_parms = i
if not silent:
print('Fit with', n_parms, 'parameter' + 's' * (n_parms > 1))

View file

@ -571,7 +571,6 @@ def _ol_from_dict(ind, reps='DICTOBS'):
counter = 0
def dict_replace_obs(d):
nonlocal ol
nonlocal counter
x = {}
for k, v in d.items():
@ -592,7 +591,6 @@ def _ol_from_dict(ind, reps='DICTOBS'):
return x
def list_replace_obs(li):
nonlocal ol
nonlocal counter
x = []
for e in li:
@ -613,7 +611,6 @@ def _ol_from_dict(ind, reps='DICTOBS'):
return x
def obslist_replace_obs(li):
nonlocal ol
nonlocal counter
il = []
for e in li:
@ -694,7 +691,6 @@ def _od_from_list_and_dict(ol, ind, reps='DICTOBS'):
def dict_replace_string(d):
nonlocal counter
nonlocal ol
x = {}
for k, v in d.items():
if isinstance(v, dict):
@ -710,7 +706,6 @@ def _od_from_list_and_dict(ol, ind, reps='DICTOBS'):
def list_replace_string(li):
nonlocal counter
nonlocal ol
x = []
for e in li:
if isinstance(e, list):

View file

@ -1,6 +1,7 @@
import warnings
import gzip
import sqlite3
from contextlib import closing
import pandas as pd
from ..obs import Obs
from ..correlators import Corr
@ -29,9 +30,8 @@ def to_sql(df, table_name, db, if_exists='fail', gz=True, **kwargs):
None
"""
se_df = _serialize_df(df, gz=gz)
con = sqlite3.connect(db)
se_df.to_sql(table_name, con, if_exists=if_exists, index=False, **kwargs)
con.close()
with closing(sqlite3.connect(db)) as con:
se_df.to_sql(table_name, con=con, if_exists=if_exists, index=False, **kwargs)
def read_sql(sql, db, auto_gamma=False, **kwargs):
@ -52,9 +52,8 @@ def read_sql(sql, db, auto_gamma=False, **kwargs):
data : pandas.DataFrame
Dataframe with the content of the sqlite database.
"""
con = sqlite3.connect(db)
extract_df = pd.read_sql(sql, con, **kwargs)
con.close()
with closing(sqlite3.connect(db)) as con:
extract_df = pd.read_sql(sql, con=con, **kwargs)
return _deserialize_df(extract_df, auto_gamma=auto_gamma)

View file

@ -1 +1 @@
__version__ = "2.14.0"
__version__ = "2.15.0"

View file

@ -30,7 +30,6 @@ setup(name='pyerrors',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.9',
'Programming Language :: Python :: 3.10',

View file

@ -1098,6 +1098,7 @@ def test_combined_fit_xerr():
}
xd = {k: np.transpose([[1 + .01 * np.random.uniform(), 2] for i in range(len(yd[k]))]) for k in fitd}
pe.fits.least_squares(xd, yd, fitd)
pe.fits.least_squares(xd, yd, fitd, n_parms=4)
def test_x_multidim_fit():
@ -1340,6 +1341,54 @@ def test_combined_fit_constant_shape():
funcs = {"a": lambda a, x: a[0] + a[1] * x,
"": lambda a, x: a[1] + x * 0}
pe.fits.least_squares(x, y, funcs, method='migrad')
pe.fits.least_squares(x, y, funcs, method='migrad', n_parms=2)
def test_fit_n_parms():
# Function that fails if the number of parameters is not specified:
def fcn(p, x):
# Assumes first half of terms are A second half are E
NTerms = int(len(p)/2)
A = anp.array(p[0:NTerms])[:, np.newaxis] # shape (n, 1)
E_P = anp.array(p[NTerms:])[:, np.newaxis] # shape (n, 1)
# This if statement handles the case where x is a single value rather than an array
if isinstance(x, anp.float64) or isinstance(x, anp.int64) or isinstance(x, float) or isinstance(x, int):
x = anp.array([x])[np.newaxis, :] # shape (1, m)
else:
x = anp.array(x)[np.newaxis, :] # shape (1, m)
exp_term = anp.exp(-E_P * x)
weighted_sum = A * exp_term # shape (n, m)
return anp.mean(weighted_sum, axis=0) # shape(m)
c = pe.Corr([pe.pseudo_Obs(2. * np.exp(-.2 * t) + .4 * np.exp(+.4 * t) + .4 * np.exp(-.6 * t), .1, 'corr') for t in range(12)])
c.fit(fcn, n_parms=2)
c.fit(fcn, n_parms=4)
xf = [pe.pseudo_Obs(t, .05, 'corr') for t in range(c.T)]
yf = [c[t] for t in range(c.T)]
pe.fits.total_least_squares(xf, yf, fcn, n_parms=2)
pe.fits.total_least_squares(xf, yf, fcn, n_parms=4)
# Is expected to fail, this is what is fixed with n_parms
with pytest.raises(RuntimeError):
c.fit(fcn, )
with pytest.raises(RuntimeError):
pe.fits.total_least_squares(xf, yf, fcn, )
# Test for positivity
with pytest.raises(ValueError):
c.fit(fcn, n_parms=-2)
with pytest.raises(ValueError):
pe.fits.total_least_squares(xf, yf, fcn, n_parms=-4)
# Have to pass an interger
with pytest.raises(TypeError):
c.fit(fcn, n_parms=2.)
with pytest.raises(TypeError):
pe.fits.total_least_squares(xf, yf, fcn, n_parms=1.2343)
# Improper number of parameters (function should fail)
with pytest.raises(ValueError):
c.fit(fcn, n_parms=7)
with pytest.raises(ValueError):
pe.fits.total_least_squares(xf, yf, fcn, n_parms=5)
def fit_general(x, y, func, silent=False, **kwargs):