Merge branch 'develop' into documentation

This commit is contained in:
fjosw 2025-10-09 11:14:27 +00:00
commit ed158d17f5
2 changed files with 116 additions and 36 deletions

View file

@ -131,7 +131,7 @@ def least_squares(x, y, func, priors=None, silent=False, **kwargs):
Obs (e.g. results from a previous fit) or strings containing a value and an error formatted like
0.548(23), 500(40) or 0.5(0.4)
silent : bool, optional
If true all output to the console is omitted (default False).
If True all output to the console is omitted (default False).
initial_guess : list
can provide an initial guess for the input parameters. Relevant for
non-linear fits with many parameters. In case of correlated fits the guess is used to perform
@ -139,10 +139,10 @@ def least_squares(x, y, func, priors=None, silent=False, **kwargs):
method : str, optional
can be used to choose an alternative method for the minimization of chisquare.
The possible methods are the ones which can be used for scipy.optimize.minimize and
migrad of iminuit. If no method is specified, Levenberg-Marquard is used.
migrad of iminuit. If no method is specified, LevenbergMarquardt is used.
Reliable alternatives are migrad, Powell and Nelder-Mead.
tol: float, optional
can be used (only for combined fits and methods other than Levenberg-Marquard) to set the tolerance for convergence
can be used (only for combined fits and methods other than LevenbergMarquardt) to set the tolerance for convergence
to a different value to either speed up convergence at the cost of a larger error on the fitted parameters (and possibly
invalid estimates for parameter uncertainties) or smaller values to get more accurate parameter values
The stopping criterion depends on the method, e.g. migrad: edm_max = 0.002 * tol * errordef (EDM criterion: edm < edm_max)
@ -152,7 +152,7 @@ def least_squares(x, y, func, priors=None, silent=False, **kwargs):
In practice the correlation matrix is Cholesky decomposed and inverted (instead of the covariance matrix).
This procedure should be numerically more stable as the correlation matrix is typically better conditioned (Jacobi preconditioning).
inv_chol_cov_matrix [array,list], optional
array: shape = (no of y values) X (no of y values)
array: shape = (number of y values) X (number of y values)
list: for an uncombined fit: [""]
for a combined fit: list of keys belonging to the corr_matrix saved in the array, must be the same as the keys of the y dict in alphabetical order
If correlated_fit=True is set as well, can provide an inverse covariance matrix (y errors, dy_f included!) of your own choosing for a correlated fit.
@ -168,6 +168,9 @@ def least_squares(x, y, func, priors=None, silent=False, **kwargs):
If True, a quantile-quantile plot of the fit result is generated (default False).
num_grad : bool
Use numerical differentation instead of automatic differentiation to perform the error propagation (default False).
n_parms : int, optional
Number of fit parameters. Overrides automatic detection of parameter count.
Useful when autodetection fails. Must match the length of initial_guess or priors (if provided).
Returns
-------
@ -269,6 +272,18 @@ def least_squares(x, y, func, priors=None, silent=False, **kwargs):
raise Exception("No y errors available, run the gamma method first.")
# number of fit parameters
if 'n_parms' in kwargs:
n_parms = kwargs.get('n_parms')
if not isinstance(n_parms, int):
raise TypeError(
f"'n_parms' must be an integer, got {n_parms!r} "
f"of type {type(n_parms).__name__}."
)
if n_parms <= 0:
raise ValueError(
f"'n_parms' must be a positive integer, got {n_parms}."
)
else:
n_parms_ls = []
for key in key_ls:
if not callable(funcd[key]):
@ -535,17 +550,20 @@ def total_least_squares(x, y, func, silent=False, **kwargs):
It is important that all numpy functions refer to autograd.numpy, otherwise the differentiation
will not work.
silent : bool, optional
If true all output to the console is omitted (default False).
If True all output to the console is omitted (default False).
initial_guess : list
can provide an initial guess for the input parameters. Relevant for non-linear
fits with many parameters.
expected_chisquare : bool
If true prints the expected chisquare which is
If True prints the expected chisquare which is
corrected by effects caused by correlated input data.
This can take a while as the full correlation matrix
has to be calculated (default False).
num_grad : bool
Use numerical differentation instead of automatic differentiation to perform the error propagation (default False).
Use numerical differentiation instead of automatic differentiation to perform the error propagation (default False).
n_parms : int, optional
Number of fit parameters. Overrides automatic detection of parameter count.
Useful when autodetection fails. Must match the length of initial_guess (if provided).
Notes
-----
@ -575,7 +593,19 @@ def total_least_squares(x, y, func, silent=False, **kwargs):
if not callable(func):
raise TypeError('func has to be a function.')
for i in range(42):
if 'n_parms' in kwargs:
n_parms = kwargs.get('n_parms')
if not isinstance(n_parms, int):
raise TypeError(
f"'n_parms' must be an integer, got {n_parms!r} "
f"of type {type(n_parms).__name__}."
)
if n_parms <= 0:
raise ValueError(
f"'n_parms' must be a positive integer, got {n_parms}."
)
else:
for i in range(100):
try:
func(np.arange(i), x.T[0])
except TypeError:
@ -588,6 +618,7 @@ def total_least_squares(x, y, func, silent=False, **kwargs):
raise RuntimeError("Fit function is not valid.")
n_parms = i
if not silent:
print('Fit with', n_parms, 'parameter' + 's' * (n_parms > 1))

View file

@ -1098,6 +1098,7 @@ def test_combined_fit_xerr():
}
xd = {k: np.transpose([[1 + .01 * np.random.uniform(), 2] for i in range(len(yd[k]))]) for k in fitd}
pe.fits.least_squares(xd, yd, fitd)
pe.fits.least_squares(xd, yd, fitd, n_parms=4)
def test_x_multidim_fit():
@ -1340,6 +1341,54 @@ def test_combined_fit_constant_shape():
funcs = {"a": lambda a, x: a[0] + a[1] * x,
"": lambda a, x: a[1] + x * 0}
pe.fits.least_squares(x, y, funcs, method='migrad')
pe.fits.least_squares(x, y, funcs, method='migrad', n_parms=2)
def test_fit_n_parms():
# Function that fails if the number of parameters is not specified:
def fcn(p, x):
# Assumes first half of terms are A second half are E
NTerms = int(len(p)/2)
A = anp.array(p[0:NTerms])[:, np.newaxis] # shape (n, 1)
E_P = anp.array(p[NTerms:])[:, np.newaxis] # shape (n, 1)
# This if statement handles the case where x is a single value rather than an array
if isinstance(x, anp.float64) or isinstance(x, anp.int64) or isinstance(x, float) or isinstance(x, int):
x = anp.array([x])[np.newaxis, :] # shape (1, m)
else:
x = anp.array(x)[np.newaxis, :] # shape (1, m)
exp_term = anp.exp(-E_P * x)
weighted_sum = A * exp_term # shape (n, m)
return anp.mean(weighted_sum, axis=0) # shape(m)
c = pe.Corr([pe.pseudo_Obs(2. * np.exp(-.2 * t) + .4 * np.exp(+.4 * t) + .4 * np.exp(-.6 * t), .1, 'corr') for t in range(12)])
c.fit(fcn, n_parms=2)
c.fit(fcn, n_parms=4)
xf = [pe.pseudo_Obs(t, .05, 'corr') for t in range(c.T)]
yf = [c[t] for t in range(c.T)]
pe.fits.total_least_squares(xf, yf, fcn, n_parms=2)
pe.fits.total_least_squares(xf, yf, fcn, n_parms=4)
# Is expected to fail, this is what is fixed with n_parms
with pytest.raises(RuntimeError):
c.fit(fcn, )
with pytest.raises(RuntimeError):
pe.fits.total_least_squares(xf, yf, fcn, )
# Test for positivity
with pytest.raises(ValueError):
c.fit(fcn, n_parms=-2)
with pytest.raises(ValueError):
pe.fits.total_least_squares(xf, yf, fcn, n_parms=-4)
# Have to pass an interger
with pytest.raises(TypeError):
c.fit(fcn, n_parms=2.)
with pytest.raises(TypeError):
pe.fits.total_least_squares(xf, yf, fcn, n_parms=1.2343)
# Improper number of parameters (function should fail)
with pytest.raises(ValueError):
c.fit(fcn, n_parms=7)
with pytest.raises(ValueError):
pe.fits.total_least_squares(xf, yf, fcn, n_parms=5)
def fit_general(x, y, func, silent=False, **kwargs):