documentation of fit functions adjusted, deprecation warnings added to

standard_fit and prior_fit
This commit is contained in:
Fabian Joswig 2021-11-01 14:54:36 +00:00
parent ebba71075a
commit f2a5f12dce
3 changed files with 61 additions and 70 deletions

View file

@ -4,7 +4,7 @@ import autograd.numpy as anp
import matplotlib.pyplot as plt
import scipy.linalg
from .pyerrors import Obs, dump_object
from .fits import standard_fit
from .fits import least_squares
from .linalg import eigh, inv, cholesky
from .roots import find_root
@ -316,7 +316,7 @@ class Corr:
xs = [x for x in range(fitrange[0], fitrange[1]) if not self.content[x] is None]
ys = [self.content[x][0] for x in range(fitrange[0], fitrange[1]) if not self.content[x] is None]
result = standard_fit(xs, ys, function, silent=silent, **kwargs)
result = least_squares(xs, ys, function, silent=silent, **kwargs)
if isinstance(result, list):
[item.gamma_method() for item in result if isinstance(item, Obs)]
elif isinstance(result, dict):

View file

@ -56,35 +56,40 @@ class Fit_result(Sequence):
def least_squares(x, y, func, priors=None, silent=False, **kwargs):
if priors is not None:
return prior_fit(x, y, func, priors, silent=silent, **kwargs)
else:
return standard_fit(x, y, func, silent=silent, **kwargs)
"""Performs a non-linear fit to y = func(x).
Arguments:
----------
x : list
list of floats.
y : list
list of Obs.
func : object
fit function, has to be of the form
def standard_fit(x, y, func, silent=False, **kwargs):
"""Performs a non-linear fit to y = func(x) and returns a list of Obs corresponding to the fit parameters.
def func(a, x):
return a[0] + a[1] * x + a[2] * anp.sinh(x)
x has to be a list of floats.
y has to be a list of Obs, the dvalues of the Obs are used as yerror for the fit.
For multiple x values func can be of the form
func has to be of the form
def func(a, x):
(x1, x2) = x
return a[0] * x1 ** 2 + a[1] * x2
def func(a, x):
return a[0] + a[1] * x + a[2] * anp.sinh(x)
It is important that all numpy functions refer to autograd.numpy, otherwise the differentiation
will not work
priors : list, optional
priors has to be a list with an entry for every parameter in the fit. The entries can either be
Obs (e.g. results from a previous fit) or strings containing a value and an error formatted like
0.548(23), 500(40) or 0.5(0.4)
It is important for the subsequent error estimation that the e_tag for the gamma method is large
enough.
silent : bool, optional
If true all output to the console is omitted (default False).
For multiple x values func can be of the form
def func(a, x):
(x1, x2) = x
return a[0] * x1 ** 2 + a[1] * x2
It is important that all numpy functions refer to autograd.numpy, otherwise the differentiation
will not work
Keyword arguments
-----------------
silent -- If true all output to the console is omitted (default False).
initial_guess -- can provide an initial guess for the input parameters. Relevant for
non-linear fits with many parameters.
method -- can be used to choose an alternative method for the minimization of chisquare.
@ -98,6 +103,18 @@ def standard_fit(x, y, func, silent=False, **kwargs):
This can take a while as the full correlation matrix
has to be calculated (default False).
"""
if priors is not None:
return _prior_fit(x, y, func, priors, silent=silent, **kwargs)
else:
return _standard_fit(x, y, func, silent=silent, **kwargs)
def standard_fit(x, y, func, silent=False, **kwargs):
warnings.warn("standard_fit renamed to least_squares", DeprecationWarning)
return least_squares(x, y, func, silent=silent, **kwargs)
def _standard_fit(x, y, func, silent=False, **kwargs):
output = Fit_result()
@ -236,29 +253,31 @@ def odr_fit(x, y, func, silent=False, **kwargs):
def total_least_squares(x, y, func, silent=False, **kwargs):
"""Performs a non-linear fit to y = func(x) and returns a list of Obs corresponding to the fit parameters.
x has to be a list of Obs, or a tuple of lists of Obs
y has to be a list of Obs
the dvalues of the Obs are used as x- and yerror for the fit.
x : list
list of Obs, or a tuple of lists of Obs
y : list
list of Obs. The dvalues of the Obs are used as x- and yerror for the fit.
func : object
func has to be of the form
func has to be of the form
def func(a, x):
y = a[0] + a[1] * x + a[2] * anp.sinh(x)
return y
def func(a, x):
y = a[0] + a[1] * x + a[2] * anp.sinh(x)
return y
For multiple x values func can be of the form
For multiple x values func can be of the form
def func(a, x):
(x1, x2) = x
return a[0] * x1 ** 2 + a[1] * x2
def func(a, x):
(x1, x2) = x
return a[0] * x1 ** 2 + a[1] * x2
It is important that all numpy functions refer to autograd.numpy, otherwise the differentiation
will not work.
It is important that all numpy functions refer to autograd.numpy, otherwise the differentiation
will not work.
silent : bool, optional
If true all output to the console is omitted (default False).
Based on the orthogonal distance regression module of scipy
Keyword arguments
-----------------
silent -- If true all output to the console is omitted (default False).
initial_guess -- can provide an initial guess for the input parameters. Relevant for non-linear
fits with many parameters.
expected_chisquare -- If true prints the expected chisquare which is
@ -396,39 +415,11 @@ def total_least_squares(x, y, func, silent=False, **kwargs):
def prior_fit(x, y, func, priors, silent=False, **kwargs):
"""Performs a non-linear fit to y = func(x) with given priors and returns a list of Obs corresponding to the fit parameters.
warnings.warn("prior_fit renamed to least_squares", DeprecationWarning)
return least_squares(x, y, func, priors=priors, silent=silent, **kwargs)
x has to be a list of floats.
y has to be a list of Obs, the dvalues of the Obs are used as yerror for the fit.
func has to be of the form
def func(a, x):
y = a[0] + a[1] * x + a[2] * anp.sinh(x)
return y
It is important that all numpy functions refer to autograd.numpy, otherwise the differentiation
will not work
priors has to be a list with an entry for every parameter in the fit. The entries can either be
Obs (e.g. results from a previous fit) or strings containing a value and an error formatted like
0.548(23), 500(40) or 0.5(0.4)
It is important for the subsequent error estimation that the e_tag for the gamma method is large
enough.
Keyword arguments
-----------------
dict_output -- If true, the output is a dictionary containing all relevant
data instead of just a list of the fit parameters.
silent -- If true all output to the console is omitted (default False).
initial_guess -- can provide an initial guess for the input parameters.
If no guess is provided, the prior values are used.
resplot -- if true, a plot which displays fit, data and residuals is generated (default False)
qqplot -- if true, a quantile-quantile plot of the fit result is generated (default False)
tol -- Specify the tolerance of the migrad solver (default 1e-4)
"""
def _prior_fit(x, y, func, priors, silent=False, **kwargs):
output = Fit_result()
output.fit_function = func

View file

@ -8,7 +8,7 @@ import pytest
np.random.seed(0)
def test_standard_fit():
def test_least_squares():
dim = 10 + int(30 * np.random.rand())
x = np.arange(dim)
y = 2 * np.exp(-0.06 * x) + np.random.normal(0.0, 0.15, dim)
@ -43,7 +43,7 @@ def test_standard_fit():
assert math.isclose(chi2_pyerrors, chi2_scipy, abs_tol=1e-10)
def test_odr_fit():
def test_total_least_squares():
dim = 10 + int(30 * np.random.rand())
x = np.arange(dim) + np.random.normal(0.0, 0.15, dim)
xerr = 0.1 + 0.1 * np.random.rand(dim)