From 14e23cf35c0583a84d4aa5f4ecaa7d0c5d124e34 Mon Sep 17 00:00:00 2001 From: fjosw Date: Wed, 15 Jun 2022 13:15:08 +0000 Subject: [PATCH] Documentation updated --- docs/pyerrors/fits.html | 2293 +++++++++++++++++++-------------------- 1 file changed, 1146 insertions(+), 1147 deletions(-) diff --git a/docs/pyerrors/fits.html b/docs/pyerrors/fits.html index b674cbc4..6b016de2 100644 --- a/docs/pyerrors/fits.html +++ b/docs/pyerrors/fits.html @@ -111,741 +111,740 @@ 8import matplotlib.pyplot as plt 9from matplotlib import gridspec 10from scipy.odr import ODR, Model, RealData - 11from scipy.stats import chi2 - 12import iminuit - 13from autograd import jacobian - 14from autograd import elementwise_grad as egrad - 15from .obs import Obs, derived_observable, covariance, cov_Obs + 11import iminuit + 12from autograd import jacobian + 13from autograd import elementwise_grad as egrad + 14from .obs import Obs, derived_observable, covariance, cov_Obs + 15 16 - 17 - 18class Fit_result(Sequence): - 19 """Represents fit results. - 20 - 21 Attributes - 22 ---------- - 23 fit_parameters : list - 24 results for the individual fit parameters, - 25 also accessible via indices. - 26 """ - 27 - 28 def __init__(self): - 29 self.fit_parameters = None - 30 - 31 def __getitem__(self, idx): - 32 return self.fit_parameters[idx] - 33 - 34 def __len__(self): - 35 return len(self.fit_parameters) - 36 - 37 def gamma_method(self, **kwargs): - 38 """Apply the gamma method to all fit parameters""" - 39 [o.gamma_method(**kwargs) for o in self.fit_parameters] - 40 - 41 def __str__(self): - 42 my_str = 'Goodness of fit:\n' - 43 if hasattr(self, 'chisquare_by_dof'): - 44 my_str += '\u03C7\u00b2/d.o.f. = ' + f'{self.chisquare_by_dof:2.6f}' + '\n' - 45 elif hasattr(self, 'residual_variance'): - 46 my_str += 'residual variance = ' + f'{self.residual_variance:2.6f}' + '\n' - 47 if hasattr(self, 'chisquare_by_expected_chisquare'): - 48 my_str += '\u03C7\u00b2/\u03C7\u00b2exp = ' + f'{self.chisquare_by_expected_chisquare:2.6f}' + '\n' - 49 if hasattr(self, 'p_value'): - 50 my_str += 'p-value = ' + f'{self.p_value:2.4f}' + '\n' - 51 my_str += 'Fit parameters:\n' - 52 for i_par, par in enumerate(self.fit_parameters): - 53 my_str += str(i_par) + '\t' + ' ' * int(par >= 0) + str(par).rjust(int(par < 0.0)) + '\n' - 54 return my_str - 55 - 56 def __repr__(self): - 57 m = max(map(len, list(self.__dict__.keys()))) + 1 - 58 return '\n'.join([key.rjust(m) + ': ' + repr(value) for key, value in sorted(self.__dict__.items())]) + 17class Fit_result(Sequence): + 18 """Represents fit results. + 19 + 20 Attributes + 21 ---------- + 22 fit_parameters : list + 23 results for the individual fit parameters, + 24 also accessible via indices. + 25 """ + 26 + 27 def __init__(self): + 28 self.fit_parameters = None + 29 + 30 def __getitem__(self, idx): + 31 return self.fit_parameters[idx] + 32 + 33 def __len__(self): + 34 return len(self.fit_parameters) + 35 + 36 def gamma_method(self, **kwargs): + 37 """Apply the gamma method to all fit parameters""" + 38 [o.gamma_method(**kwargs) for o in self.fit_parameters] + 39 + 40 def __str__(self): + 41 my_str = 'Goodness of fit:\n' + 42 if hasattr(self, 'chisquare_by_dof'): + 43 my_str += '\u03C7\u00b2/d.o.f. = ' + f'{self.chisquare_by_dof:2.6f}' + '\n' + 44 elif hasattr(self, 'residual_variance'): + 45 my_str += 'residual variance = ' + f'{self.residual_variance:2.6f}' + '\n' + 46 if hasattr(self, 'chisquare_by_expected_chisquare'): + 47 my_str += '\u03C7\u00b2/\u03C7\u00b2exp = ' + f'{self.chisquare_by_expected_chisquare:2.6f}' + '\n' + 48 if hasattr(self, 'p_value'): + 49 my_str += 'p-value = ' + f'{self.p_value:2.4f}' + '\n' + 50 my_str += 'Fit parameters:\n' + 51 for i_par, par in enumerate(self.fit_parameters): + 52 my_str += str(i_par) + '\t' + ' ' * int(par >= 0) + str(par).rjust(int(par < 0.0)) + '\n' + 53 return my_str + 54 + 55 def __repr__(self): + 56 m = max(map(len, list(self.__dict__.keys()))) + 1 + 57 return '\n'.join([key.rjust(m) + ': ' + repr(value) for key, value in sorted(self.__dict__.items())]) + 58 59 - 60 - 61def least_squares(x, y, func, priors=None, silent=False, **kwargs): - 62 r'''Performs a non-linear fit to y = func(x). - 63 - 64 Parameters - 65 ---------- - 66 x : list - 67 list of floats. - 68 y : list - 69 list of Obs. - 70 func : object - 71 fit function, has to be of the form - 72 - 73 ```python - 74 import autograd.numpy as anp - 75 - 76 def func(a, x): - 77 return a[0] + a[1] * x + a[2] * anp.sinh(x) - 78 ``` - 79 - 80 For multiple x values func can be of the form - 81 - 82 ```python - 83 def func(a, x): - 84 (x1, x2) = x - 85 return a[0] * x1 ** 2 + a[1] * x2 - 86 ``` - 87 - 88 It is important that all numpy functions refer to autograd.numpy, otherwise the differentiation - 89 will not work. - 90 priors : list, optional - 91 priors has to be a list with an entry for every parameter in the fit. The entries can either be - 92 Obs (e.g. results from a previous fit) or strings containing a value and an error formatted like - 93 0.548(23), 500(40) or 0.5(0.4) - 94 silent : bool, optional - 95 If true all output to the console is omitted (default False). - 96 initial_guess : list - 97 can provide an initial guess for the input parameters. Relevant for - 98 non-linear fits with many parameters. In case of correlated fits the guess is used to perform - 99 an uncorrelated fit which then serves as guess for the correlated fit. -100 method : str, optional -101 can be used to choose an alternative method for the minimization of chisquare. -102 The possible methods are the ones which can be used for scipy.optimize.minimize and -103 migrad of iminuit. If no method is specified, Levenberg-Marquard is used. -104 Reliable alternatives are migrad, Powell and Nelder-Mead. -105 correlated_fit : bool -106 If True, use the full inverse covariance matrix in the definition of the chisquare cost function. -107 For details about how the covariance matrix is estimated see `pyerrors.obs.covariance`. -108 In practice the correlation matrix is Cholesky decomposed and inverted (instead of the covariance matrix). -109 This procedure should be numerically more stable as the correlation matrix is typically better conditioned (Jacobi preconditioning). -110 At the moment this option only works for `prior==None` and when no `method` is given. -111 expected_chisquare : bool -112 If True estimates the expected chisquare which is -113 corrected by effects caused by correlated input data (default False). -114 resplot : bool -115 If True, a plot which displays fit, data and residuals is generated (default False). -116 qqplot : bool -117 If True, a quantile-quantile plot of the fit result is generated (default False). -118 ''' -119 if priors is not None: -120 return _prior_fit(x, y, func, priors, silent=silent, **kwargs) -121 else: -122 return _standard_fit(x, y, func, silent=silent, **kwargs) + 60def least_squares(x, y, func, priors=None, silent=False, **kwargs): + 61 r'''Performs a non-linear fit to y = func(x). + 62 + 63 Parameters + 64 ---------- + 65 x : list + 66 list of floats. + 67 y : list + 68 list of Obs. + 69 func : object + 70 fit function, has to be of the form + 71 + 72 ```python + 73 import autograd.numpy as anp + 74 + 75 def func(a, x): + 76 return a[0] + a[1] * x + a[2] * anp.sinh(x) + 77 ``` + 78 + 79 For multiple x values func can be of the form + 80 + 81 ```python + 82 def func(a, x): + 83 (x1, x2) = x + 84 return a[0] * x1 ** 2 + a[1] * x2 + 85 ``` + 86 + 87 It is important that all numpy functions refer to autograd.numpy, otherwise the differentiation + 88 will not work. + 89 priors : list, optional + 90 priors has to be a list with an entry for every parameter in the fit. The entries can either be + 91 Obs (e.g. results from a previous fit) or strings containing a value and an error formatted like + 92 0.548(23), 500(40) or 0.5(0.4) + 93 silent : bool, optional + 94 If true all output to the console is omitted (default False). + 95 initial_guess : list + 96 can provide an initial guess for the input parameters. Relevant for + 97 non-linear fits with many parameters. In case of correlated fits the guess is used to perform + 98 an uncorrelated fit which then serves as guess for the correlated fit. + 99 method : str, optional +100 can be used to choose an alternative method for the minimization of chisquare. +101 The possible methods are the ones which can be used for scipy.optimize.minimize and +102 migrad of iminuit. If no method is specified, Levenberg-Marquard is used. +103 Reliable alternatives are migrad, Powell and Nelder-Mead. +104 correlated_fit : bool +105 If True, use the full inverse covariance matrix in the definition of the chisquare cost function. +106 For details about how the covariance matrix is estimated see `pyerrors.obs.covariance`. +107 In practice the correlation matrix is Cholesky decomposed and inverted (instead of the covariance matrix). +108 This procedure should be numerically more stable as the correlation matrix is typically better conditioned (Jacobi preconditioning). +109 At the moment this option only works for `prior==None` and when no `method` is given. +110 expected_chisquare : bool +111 If True estimates the expected chisquare which is +112 corrected by effects caused by correlated input data (default False). +113 resplot : bool +114 If True, a plot which displays fit, data and residuals is generated (default False). +115 qqplot : bool +116 If True, a quantile-quantile plot of the fit result is generated (default False). +117 ''' +118 if priors is not None: +119 return _prior_fit(x, y, func, priors, silent=silent, **kwargs) +120 else: +121 return _standard_fit(x, y, func, silent=silent, **kwargs) +122 123 -124 -125def total_least_squares(x, y, func, silent=False, **kwargs): -126 r'''Performs a non-linear fit to y = func(x) and returns a list of Obs corresponding to the fit parameters. -127 -128 Parameters -129 ---------- -130 x : list -131 list of Obs, or a tuple of lists of Obs -132 y : list -133 list of Obs. The dvalues of the Obs are used as x- and yerror for the fit. -134 func : object -135 func has to be of the form -136 -137 ```python -138 import autograd.numpy as anp -139 -140 def func(a, x): -141 return a[0] + a[1] * x + a[2] * anp.sinh(x) -142 ``` -143 -144 For multiple x values func can be of the form -145 -146 ```python -147 def func(a, x): -148 (x1, x2) = x -149 return a[0] * x1 ** 2 + a[1] * x2 -150 ``` -151 -152 It is important that all numpy functions refer to autograd.numpy, otherwise the differentiation -153 will not work. -154 silent : bool, optional -155 If true all output to the console is omitted (default False). -156 initial_guess : list -157 can provide an initial guess for the input parameters. Relevant for non-linear -158 fits with many parameters. -159 expected_chisquare : bool -160 If true prints the expected chisquare which is -161 corrected by effects caused by correlated input data. -162 This can take a while as the full correlation matrix -163 has to be calculated (default False). -164 -165 Notes -166 ----- -167 Based on the orthogonal distance regression module of scipy -168 ''' -169 -170 output = Fit_result() -171 -172 output.fit_function = func -173 -174 x = np.array(x) -175 -176 x_shape = x.shape -177 -178 if not callable(func): -179 raise TypeError('func has to be a function.') -180 -181 for i in range(25): -182 try: -183 func(np.arange(i), x.T[0]) -184 except Exception: -185 pass -186 else: -187 break -188 -189 n_parms = i -190 if not silent: -191 print('Fit with', n_parms, 'parameter' + 's' * (n_parms > 1)) -192 -193 x_f = np.vectorize(lambda o: o.value)(x) -194 dx_f = np.vectorize(lambda o: o.dvalue)(x) -195 y_f = np.array([o.value for o in y]) -196 dy_f = np.array([o.dvalue for o in y]) -197 -198 if np.any(np.asarray(dx_f) <= 0.0): -199 raise Exception('No x errors available, run the gamma method first.') -200 -201 if np.any(np.asarray(dy_f) <= 0.0): -202 raise Exception('No y errors available, run the gamma method first.') -203 -204 if 'initial_guess' in kwargs: -205 x0 = kwargs.get('initial_guess') -206 if len(x0) != n_parms: -207 raise Exception('Initial guess does not have the correct length: %d vs. %d' % (len(x0), n_parms)) -208 else: -209 x0 = [1] * n_parms -210 -211 data = RealData(x_f, y_f, sx=dx_f, sy=dy_f) -212 model = Model(func) -213 odr = ODR(data, model, x0, partol=np.finfo(np.float64).eps) -214 odr.set_job(fit_type=0, deriv=1) -215 out = odr.run() -216 -217 output.residual_variance = out.res_var -218 -219 output.method = 'ODR' -220 -221 output.message = out.stopreason -222 -223 output.xplus = out.xplus -224 -225 if not silent: -226 print('Method: ODR') -227 print(*out.stopreason) -228 print('Residual variance:', output.residual_variance) -229 -230 if out.info > 3: -231 raise Exception('The minimization procedure did not converge.') -232 -233 m = x_f.size -234 -235 def odr_chisquare(p): -236 model = func(p[:n_parms], p[n_parms:].reshape(x_shape)) -237 chisq = anp.sum(((y_f - model) / dy_f) ** 2) + anp.sum(((x_f - p[n_parms:].reshape(x_shape)) / dx_f) ** 2) -238 return chisq -239 -240 if kwargs.get('expected_chisquare') is True: -241 W = np.diag(1 / np.asarray(np.concatenate((dy_f.ravel(), dx_f.ravel())))) -242 -243 if kwargs.get('covariance') is not None: -244 cov = kwargs.get('covariance') -245 else: -246 cov = covariance(np.concatenate((y, x.ravel()))) -247 -248 number_of_x_parameters = int(m / x_f.shape[-1]) -249 -250 old_jac = jacobian(func)(out.beta, out.xplus) -251 fused_row1 = np.concatenate((old_jac, np.concatenate((number_of_x_parameters * [np.zeros(old_jac.shape)]), axis=0))) -252 fused_row2 = np.concatenate((jacobian(lambda x, y: func(y, x))(out.xplus, out.beta).reshape(x_f.shape[-1], x_f.shape[-1] * number_of_x_parameters), np.identity(number_of_x_parameters * old_jac.shape[0]))) -253 new_jac = np.concatenate((fused_row1, fused_row2), axis=1) -254 -255 A = W @ new_jac -256 P_phi = A @ np.linalg.pinv(A.T @ A) @ A.T -257 expected_chisquare = np.trace((np.identity(P_phi.shape[0]) - P_phi) @ W @ cov @ W) -258 if expected_chisquare <= 0.0: -259 warnings.warn("Negative expected_chisquare.", RuntimeWarning) -260 expected_chisquare = np.abs(expected_chisquare) -261 output.chisquare_by_expected_chisquare = odr_chisquare(np.concatenate((out.beta, out.xplus.ravel()))) / expected_chisquare -262 if not silent: -263 print('chisquare/expected_chisquare:', -264 output.chisquare_by_expected_chisquare) -265 -266 fitp = out.beta -267 try: -268 hess = jacobian(jacobian(odr_chisquare))(np.concatenate((fitp, out.xplus.ravel()))) -269 except TypeError: -270 raise Exception("It is required to use autograd.numpy instead of numpy within fit functions, see the documentation for details.") from None -271 -272 def odr_chisquare_compact_x(d): -273 model = func(d[:n_parms], d[n_parms:n_parms + m].reshape(x_shape)) -274 chisq = anp.sum(((y_f - model) / dy_f) ** 2) + anp.sum(((d[n_parms + m:].reshape(x_shape) - d[n_parms:n_parms + m].reshape(x_shape)) / dx_f) ** 2) -275 return chisq -276 -277 jac_jac_x = jacobian(jacobian(odr_chisquare_compact_x))(np.concatenate((fitp, out.xplus.ravel(), x_f.ravel()))) -278 -279 # Compute hess^{-1} @ jac_jac_x[:n_parms + m, n_parms + m:] using LAPACK dgesv -280 try: -281 deriv_x = -scipy.linalg.solve(hess, jac_jac_x[:n_parms + m, n_parms + m:]) -282 except np.linalg.LinAlgError: -283 raise Exception("Cannot invert hessian matrix.") -284 -285 def odr_chisquare_compact_y(d): -286 model = func(d[:n_parms], d[n_parms:n_parms + m].reshape(x_shape)) -287 chisq = anp.sum(((d[n_parms + m:] - model) / dy_f) ** 2) + anp.sum(((x_f - d[n_parms:n_parms + m].reshape(x_shape)) / dx_f) ** 2) -288 return chisq -289 -290 jac_jac_y = jacobian(jacobian(odr_chisquare_compact_y))(np.concatenate((fitp, out.xplus.ravel(), y_f))) -291 -292 # Compute hess^{-1} @ jac_jac_y[:n_parms + m, n_parms + m:] using LAPACK dgesv -293 try: -294 deriv_y = -scipy.linalg.solve(hess, jac_jac_y[:n_parms + m, n_parms + m:]) -295 except np.linalg.LinAlgError: -296 raise Exception("Cannot invert hessian matrix.") -297 -298 result = [] -299 for i in range(n_parms): -300 result.append(derived_observable(lambda my_var, **kwargs: (my_var[0] + np.finfo(np.float64).eps) / (x.ravel()[0].value + np.finfo(np.float64).eps) * out.beta[i], list(x.ravel()) + list(y), man_grad=list(deriv_x[i]) + list(deriv_y[i]))) -301 -302 output.fit_parameters = result -303 -304 output.odr_chisquare = odr_chisquare(np.concatenate((out.beta, out.xplus.ravel()))) -305 output.dof = x.shape[-1] - n_parms -306 output.p_value = 1 - chi2.cdf(output.odr_chisquare, output.dof) -307 -308 return output +124def total_least_squares(x, y, func, silent=False, **kwargs): +125 r'''Performs a non-linear fit to y = func(x) and returns a list of Obs corresponding to the fit parameters. +126 +127 Parameters +128 ---------- +129 x : list +130 list of Obs, or a tuple of lists of Obs +131 y : list +132 list of Obs. The dvalues of the Obs are used as x- and yerror for the fit. +133 func : object +134 func has to be of the form +135 +136 ```python +137 import autograd.numpy as anp +138 +139 def func(a, x): +140 return a[0] + a[1] * x + a[2] * anp.sinh(x) +141 ``` +142 +143 For multiple x values func can be of the form +144 +145 ```python +146 def func(a, x): +147 (x1, x2) = x +148 return a[0] * x1 ** 2 + a[1] * x2 +149 ``` +150 +151 It is important that all numpy functions refer to autograd.numpy, otherwise the differentiation +152 will not work. +153 silent : bool, optional +154 If true all output to the console is omitted (default False). +155 initial_guess : list +156 can provide an initial guess for the input parameters. Relevant for non-linear +157 fits with many parameters. +158 expected_chisquare : bool +159 If true prints the expected chisquare which is +160 corrected by effects caused by correlated input data. +161 This can take a while as the full correlation matrix +162 has to be calculated (default False). +163 +164 Notes +165 ----- +166 Based on the orthogonal distance regression module of scipy +167 ''' +168 +169 output = Fit_result() +170 +171 output.fit_function = func +172 +173 x = np.array(x) +174 +175 x_shape = x.shape +176 +177 if not callable(func): +178 raise TypeError('func has to be a function.') +179 +180 for i in range(25): +181 try: +182 func(np.arange(i), x.T[0]) +183 except Exception: +184 pass +185 else: +186 break +187 +188 n_parms = i +189 if not silent: +190 print('Fit with', n_parms, 'parameter' + 's' * (n_parms > 1)) +191 +192 x_f = np.vectorize(lambda o: o.value)(x) +193 dx_f = np.vectorize(lambda o: o.dvalue)(x) +194 y_f = np.array([o.value for o in y]) +195 dy_f = np.array([o.dvalue for o in y]) +196 +197 if np.any(np.asarray(dx_f) <= 0.0): +198 raise Exception('No x errors available, run the gamma method first.') +199 +200 if np.any(np.asarray(dy_f) <= 0.0): +201 raise Exception('No y errors available, run the gamma method first.') +202 +203 if 'initial_guess' in kwargs: +204 x0 = kwargs.get('initial_guess') +205 if len(x0) != n_parms: +206 raise Exception('Initial guess does not have the correct length: %d vs. %d' % (len(x0), n_parms)) +207 else: +208 x0 = [1] * n_parms +209 +210 data = RealData(x_f, y_f, sx=dx_f, sy=dy_f) +211 model = Model(func) +212 odr = ODR(data, model, x0, partol=np.finfo(np.float64).eps) +213 odr.set_job(fit_type=0, deriv=1) +214 out = odr.run() +215 +216 output.residual_variance = out.res_var +217 +218 output.method = 'ODR' +219 +220 output.message = out.stopreason +221 +222 output.xplus = out.xplus +223 +224 if not silent: +225 print('Method: ODR') +226 print(*out.stopreason) +227 print('Residual variance:', output.residual_variance) +228 +229 if out.info > 3: +230 raise Exception('The minimization procedure did not converge.') +231 +232 m = x_f.size +233 +234 def odr_chisquare(p): +235 model = func(p[:n_parms], p[n_parms:].reshape(x_shape)) +236 chisq = anp.sum(((y_f - model) / dy_f) ** 2) + anp.sum(((x_f - p[n_parms:].reshape(x_shape)) / dx_f) ** 2) +237 return chisq +238 +239 if kwargs.get('expected_chisquare') is True: +240 W = np.diag(1 / np.asarray(np.concatenate((dy_f.ravel(), dx_f.ravel())))) +241 +242 if kwargs.get('covariance') is not None: +243 cov = kwargs.get('covariance') +244 else: +245 cov = covariance(np.concatenate((y, x.ravel()))) +246 +247 number_of_x_parameters = int(m / x_f.shape[-1]) +248 +249 old_jac = jacobian(func)(out.beta, out.xplus) +250 fused_row1 = np.concatenate((old_jac, np.concatenate((number_of_x_parameters * [np.zeros(old_jac.shape)]), axis=0))) +251 fused_row2 = np.concatenate((jacobian(lambda x, y: func(y, x))(out.xplus, out.beta).reshape(x_f.shape[-1], x_f.shape[-1] * number_of_x_parameters), np.identity(number_of_x_parameters * old_jac.shape[0]))) +252 new_jac = np.concatenate((fused_row1, fused_row2), axis=1) +253 +254 A = W @ new_jac +255 P_phi = A @ np.linalg.pinv(A.T @ A) @ A.T +256 expected_chisquare = np.trace((np.identity(P_phi.shape[0]) - P_phi) @ W @ cov @ W) +257 if expected_chisquare <= 0.0: +258 warnings.warn("Negative expected_chisquare.", RuntimeWarning) +259 expected_chisquare = np.abs(expected_chisquare) +260 output.chisquare_by_expected_chisquare = odr_chisquare(np.concatenate((out.beta, out.xplus.ravel()))) / expected_chisquare +261 if not silent: +262 print('chisquare/expected_chisquare:', +263 output.chisquare_by_expected_chisquare) +264 +265 fitp = out.beta +266 try: +267 hess = jacobian(jacobian(odr_chisquare))(np.concatenate((fitp, out.xplus.ravel()))) +268 except TypeError: +269 raise Exception("It is required to use autograd.numpy instead of numpy within fit functions, see the documentation for details.") from None +270 +271 def odr_chisquare_compact_x(d): +272 model = func(d[:n_parms], d[n_parms:n_parms + m].reshape(x_shape)) +273 chisq = anp.sum(((y_f - model) / dy_f) ** 2) + anp.sum(((d[n_parms + m:].reshape(x_shape) - d[n_parms:n_parms + m].reshape(x_shape)) / dx_f) ** 2) +274 return chisq +275 +276 jac_jac_x = jacobian(jacobian(odr_chisquare_compact_x))(np.concatenate((fitp, out.xplus.ravel(), x_f.ravel()))) +277 +278 # Compute hess^{-1} @ jac_jac_x[:n_parms + m, n_parms + m:] using LAPACK dgesv +279 try: +280 deriv_x = -scipy.linalg.solve(hess, jac_jac_x[:n_parms + m, n_parms + m:]) +281 except np.linalg.LinAlgError: +282 raise Exception("Cannot invert hessian matrix.") +283 +284 def odr_chisquare_compact_y(d): +285 model = func(d[:n_parms], d[n_parms:n_parms + m].reshape(x_shape)) +286 chisq = anp.sum(((d[n_parms + m:] - model) / dy_f) ** 2) + anp.sum(((x_f - d[n_parms:n_parms + m].reshape(x_shape)) / dx_f) ** 2) +287 return chisq +288 +289 jac_jac_y = jacobian(jacobian(odr_chisquare_compact_y))(np.concatenate((fitp, out.xplus.ravel(), y_f))) +290 +291 # Compute hess^{-1} @ jac_jac_y[:n_parms + m, n_parms + m:] using LAPACK dgesv +292 try: +293 deriv_y = -scipy.linalg.solve(hess, jac_jac_y[:n_parms + m, n_parms + m:]) +294 except np.linalg.LinAlgError: +295 raise Exception("Cannot invert hessian matrix.") +296 +297 result = [] +298 for i in range(n_parms): +299 result.append(derived_observable(lambda my_var, **kwargs: (my_var[0] + np.finfo(np.float64).eps) / (x.ravel()[0].value + np.finfo(np.float64).eps) * out.beta[i], list(x.ravel()) + list(y), man_grad=list(deriv_x[i]) + list(deriv_y[i]))) +300 +301 output.fit_parameters = result +302 +303 output.odr_chisquare = odr_chisquare(np.concatenate((out.beta, out.xplus.ravel()))) +304 output.dof = x.shape[-1] - n_parms +305 output.p_value = 1 - scipy.stats.chi2.cdf(output.odr_chisquare, output.dof) +306 +307 return output +308 309 -310 -311def _prior_fit(x, y, func, priors, silent=False, **kwargs): -312 output = Fit_result() -313 -314 output.fit_function = func -315 -316 x = np.asarray(x) -317 -318 if not callable(func): -319 raise TypeError('func has to be a function.') -320 -321 for i in range(100): -322 try: -323 func(np.arange(i), 0) -324 except Exception: -325 pass -326 else: -327 break -328 -329 n_parms = i -330 -331 if n_parms != len(priors): -332 raise Exception('Priors does not have the correct length.') -333 -334 def extract_val_and_dval(string): -335 split_string = string.split('(') -336 if '.' in split_string[0] and '.' not in split_string[1][:-1]: -337 factor = 10 ** -len(split_string[0].partition('.')[2]) -338 else: -339 factor = 1 -340 return float(split_string[0]), float(split_string[1][:-1]) * factor -341 -342 loc_priors = [] -343 for i_n, i_prior in enumerate(priors): -344 if isinstance(i_prior, Obs): -345 loc_priors.append(i_prior) -346 else: -347 loc_val, loc_dval = extract_val_and_dval(i_prior) -348 loc_priors.append(cov_Obs(loc_val, loc_dval ** 2, '#prior' + str(i_n) + f"_{np.random.randint(2147483647):010d}")) -349 -350 output.priors = loc_priors -351 -352 if not silent: -353 print('Fit with', n_parms, 'parameter' + 's' * (n_parms > 1)) -354 -355 y_f = [o.value for o in y] -356 dy_f = [o.dvalue for o in y] -357 -358 if np.any(np.asarray(dy_f) <= 0.0): -359 raise Exception('No y errors available, run the gamma method first.') -360 -361 p_f = [o.value for o in loc_priors] -362 dp_f = [o.dvalue for o in loc_priors] -363 -364 if np.any(np.asarray(dp_f) <= 0.0): -365 raise Exception('No prior errors available, run the gamma method first.') -366 -367 if 'initial_guess' in kwargs: -368 x0 = kwargs.get('initial_guess') -369 if len(x0) != n_parms: -370 raise Exception('Initial guess does not have the correct length.') -371 else: -372 x0 = p_f -373 -374 def chisqfunc(p): -375 model = func(p, x) -376 chisq = anp.sum(((y_f - model) / dy_f) ** 2) + anp.sum(((p_f - p) / dp_f) ** 2) -377 return chisq -378 -379 if not silent: -380 print('Method: migrad') -381 -382 m = iminuit.Minuit(chisqfunc, x0) -383 m.errordef = 1 -384 m.print_level = 0 -385 if 'tol' in kwargs: -386 m.tol = kwargs.get('tol') -387 else: -388 m.tol = 1e-4 -389 m.migrad() -390 params = np.asarray(m.values) -391 -392 output.chisquare_by_dof = m.fval / len(x) -393 -394 output.method = 'migrad' -395 -396 if not silent: -397 print('chisquare/d.o.f.:', output.chisquare_by_dof) -398 -399 if not m.fmin.is_valid: -400 raise Exception('The minimization procedure did not converge.') -401 -402 hess_inv = np.linalg.pinv(jacobian(jacobian(chisqfunc))(params)) -403 -404 def chisqfunc_compact(d): -405 model = func(d[:n_parms], x) -406 chisq = anp.sum(((d[n_parms: n_parms + len(x)] - model) / dy_f) ** 2) + anp.sum(((d[n_parms + len(x):] - d[:n_parms]) / dp_f) ** 2) -407 return chisq -408 -409 jac_jac = jacobian(jacobian(chisqfunc_compact))(np.concatenate((params, y_f, p_f))) -410 -411 deriv = -hess_inv @ jac_jac[:n_parms, n_parms:] -412 -413 result = [] -414 for i in range(n_parms): -415 result.append(derived_observable(lambda x, **kwargs: (x[0] + np.finfo(np.float64).eps) / (y[0].value + np.finfo(np.float64).eps) * params[i], list(y) + list(loc_priors), man_grad=list(deriv[i]))) -416 -417 output.fit_parameters = result -418 output.chisquare = chisqfunc(np.asarray(params)) -419 -420 if kwargs.get('resplot') is True: -421 residual_plot(x, y, func, result) -422 -423 if kwargs.get('qqplot') is True: -424 qqplot(x, y, func, result) -425 -426 return output +310def _prior_fit(x, y, func, priors, silent=False, **kwargs): +311 output = Fit_result() +312 +313 output.fit_function = func +314 +315 x = np.asarray(x) +316 +317 if not callable(func): +318 raise TypeError('func has to be a function.') +319 +320 for i in range(100): +321 try: +322 func(np.arange(i), 0) +323 except Exception: +324 pass +325 else: +326 break +327 +328 n_parms = i +329 +330 if n_parms != len(priors): +331 raise Exception('Priors does not have the correct length.') +332 +333 def extract_val_and_dval(string): +334 split_string = string.split('(') +335 if '.' in split_string[0] and '.' not in split_string[1][:-1]: +336 factor = 10 ** -len(split_string[0].partition('.')[2]) +337 else: +338 factor = 1 +339 return float(split_string[0]), float(split_string[1][:-1]) * factor +340 +341 loc_priors = [] +342 for i_n, i_prior in enumerate(priors): +343 if isinstance(i_prior, Obs): +344 loc_priors.append(i_prior) +345 else: +346 loc_val, loc_dval = extract_val_and_dval(i_prior) +347 loc_priors.append(cov_Obs(loc_val, loc_dval ** 2, '#prior' + str(i_n) + f"_{np.random.randint(2147483647):010d}")) +348 +349 output.priors = loc_priors +350 +351 if not silent: +352 print('Fit with', n_parms, 'parameter' + 's' * (n_parms > 1)) +353 +354 y_f = [o.value for o in y] +355 dy_f = [o.dvalue for o in y] +356 +357 if np.any(np.asarray(dy_f) <= 0.0): +358 raise Exception('No y errors available, run the gamma method first.') +359 +360 p_f = [o.value for o in loc_priors] +361 dp_f = [o.dvalue for o in loc_priors] +362 +363 if np.any(np.asarray(dp_f) <= 0.0): +364 raise Exception('No prior errors available, run the gamma method first.') +365 +366 if 'initial_guess' in kwargs: +367 x0 = kwargs.get('initial_guess') +368 if len(x0) != n_parms: +369 raise Exception('Initial guess does not have the correct length.') +370 else: +371 x0 = p_f +372 +373 def chisqfunc(p): +374 model = func(p, x) +375 chisq = anp.sum(((y_f - model) / dy_f) ** 2) + anp.sum(((p_f - p) / dp_f) ** 2) +376 return chisq +377 +378 if not silent: +379 print('Method: migrad') +380 +381 m = iminuit.Minuit(chisqfunc, x0) +382 m.errordef = 1 +383 m.print_level = 0 +384 if 'tol' in kwargs: +385 m.tol = kwargs.get('tol') +386 else: +387 m.tol = 1e-4 +388 m.migrad() +389 params = np.asarray(m.values) +390 +391 output.chisquare_by_dof = m.fval / len(x) +392 +393 output.method = 'migrad' +394 +395 if not silent: +396 print('chisquare/d.o.f.:', output.chisquare_by_dof) +397 +398 if not m.fmin.is_valid: +399 raise Exception('The minimization procedure did not converge.') +400 +401 hess_inv = np.linalg.pinv(jacobian(jacobian(chisqfunc))(params)) +402 +403 def chisqfunc_compact(d): +404 model = func(d[:n_parms], x) +405 chisq = anp.sum(((d[n_parms: n_parms + len(x)] - model) / dy_f) ** 2) + anp.sum(((d[n_parms + len(x):] - d[:n_parms]) / dp_f) ** 2) +406 return chisq +407 +408 jac_jac = jacobian(jacobian(chisqfunc_compact))(np.concatenate((params, y_f, p_f))) +409 +410 deriv = -hess_inv @ jac_jac[:n_parms, n_parms:] +411 +412 result = [] +413 for i in range(n_parms): +414 result.append(derived_observable(lambda x, **kwargs: (x[0] + np.finfo(np.float64).eps) / (y[0].value + np.finfo(np.float64).eps) * params[i], list(y) + list(loc_priors), man_grad=list(deriv[i]))) +415 +416 output.fit_parameters = result +417 output.chisquare = chisqfunc(np.asarray(params)) +418 +419 if kwargs.get('resplot') is True: +420 residual_plot(x, y, func, result) +421 +422 if kwargs.get('qqplot') is True: +423 qqplot(x, y, func, result) +424 +425 return output +426 427 -428 -429def _standard_fit(x, y, func, silent=False, **kwargs): -430 -431 output = Fit_result() -432 -433 output.fit_function = func -434 -435 x = np.asarray(x) -436 -437 if x.shape[-1] != len(y): -438 raise Exception('x and y input have to have the same length') -439 -440 if len(x.shape) > 2: -441 raise Exception('Unknown format for x values') -442 -443 if not callable(func): -444 raise TypeError('func has to be a function.') -445 -446 for i in range(25): -447 try: -448 func(np.arange(i), x.T[0]) -449 except Exception: -450 pass -451 else: -452 break -453 -454 n_parms = i -455 -456 if not silent: -457 print('Fit with', n_parms, 'parameter' + 's' * (n_parms > 1)) -458 -459 y_f = [o.value for o in y] -460 dy_f = [o.dvalue for o in y] -461 -462 if np.any(np.asarray(dy_f) <= 0.0): -463 raise Exception('No y errors available, run the gamma method first.') -464 -465 if 'initial_guess' in kwargs: -466 x0 = kwargs.get('initial_guess') -467 if len(x0) != n_parms: -468 raise Exception('Initial guess does not have the correct length: %d vs. %d' % (len(x0), n_parms)) -469 else: -470 x0 = [0.1] * n_parms -471 -472 if kwargs.get('correlated_fit') is True: -473 corr = covariance(y, correlation=True, **kwargs) -474 covdiag = np.diag(1 / np.asarray(dy_f)) -475 condn = np.linalg.cond(corr) -476 if condn > 0.1 / np.finfo(float).eps: -477 raise Exception(f"Cannot invert correlation matrix as its condition number exceeds machine precision ({condn:1.2e})") -478 if condn > 1 / np.sqrt(np.finfo(float).eps): -479 warnings.warn("Correlation matrix may be ill-conditioned, condition number: {%1.2e}" % (condn), RuntimeWarning) -480 chol = np.linalg.cholesky(corr) -481 chol_inv = scipy.linalg.solve_triangular(chol, covdiag, lower=True) -482 -483 def chisqfunc_corr(p): -484 model = func(p, x) -485 chisq = anp.sum(anp.dot(chol_inv, (y_f - model)) ** 2) -486 return chisq -487 -488 def chisqfunc(p): -489 model = func(p, x) -490 chisq = anp.sum(((y_f - model) / dy_f) ** 2) -491 return chisq -492 -493 output.method = kwargs.get('method', 'Levenberg-Marquardt') -494 if not silent: -495 print('Method:', output.method) -496 -497 if output.method != 'Levenberg-Marquardt': -498 if output.method == 'migrad': -499 fit_result = iminuit.minimize(chisqfunc, x0, tol=1e-4) # Stopping criterion 0.002 * tol * errordef -500 if kwargs.get('correlated_fit') is True: -501 fit_result = iminuit.minimize(chisqfunc_corr, fit_result.x, tol=1e-4) # Stopping criterion 0.002 * tol * errordef -502 output.iterations = fit_result.nfev -503 else: -504 fit_result = scipy.optimize.minimize(chisqfunc, x0, method=kwargs.get('method'), tol=1e-12) -505 if kwargs.get('correlated_fit') is True: -506 fit_result = scipy.optimize.minimize(chisqfunc_corr, fit_result.x, method=kwargs.get('method'), tol=1e-12) -507 output.iterations = fit_result.nit -508 -509 chisquare = fit_result.fun -510 -511 else: -512 if kwargs.get('correlated_fit') is True: -513 def chisqfunc_residuals_corr(p): -514 model = func(p, x) -515 chisq = anp.dot(chol_inv, (y_f - model)) -516 return chisq -517 -518 def chisqfunc_residuals(p): -519 model = func(p, x) -520 chisq = ((y_f - model) / dy_f) -521 return chisq -522 -523 fit_result = scipy.optimize.least_squares(chisqfunc_residuals, x0, method='lm', ftol=1e-15, gtol=1e-15, xtol=1e-15) -524 if kwargs.get('correlated_fit') is True: -525 fit_result = scipy.optimize.least_squares(chisqfunc_residuals_corr, fit_result.x, method='lm', ftol=1e-15, gtol=1e-15, xtol=1e-15) -526 -527 chisquare = np.sum(fit_result.fun ** 2) -528 if kwargs.get('correlated_fit') is True: -529 assert np.isclose(chisquare, chisqfunc_corr(fit_result.x), atol=1e-14) -530 else: -531 assert np.isclose(chisquare, chisqfunc(fit_result.x), atol=1e-14) -532 -533 output.iterations = fit_result.nfev -534 -535 if not fit_result.success: -536 raise Exception('The minimization procedure did not converge.') -537 -538 if x.shape[-1] - n_parms > 0: -539 output.chisquare_by_dof = chisquare / (x.shape[-1] - n_parms) -540 else: -541 output.chisquare_by_dof = float('nan') -542 -543 output.message = fit_result.message -544 if not silent: -545 print(fit_result.message) -546 print('chisquare/d.o.f.:', output.chisquare_by_dof) -547 -548 if kwargs.get('expected_chisquare') is True: -549 if kwargs.get('correlated_fit') is not True: -550 W = np.diag(1 / np.asarray(dy_f)) -551 cov = covariance(y) -552 A = W @ jacobian(func)(fit_result.x, x) -553 P_phi = A @ np.linalg.pinv(A.T @ A) @ A.T -554 expected_chisquare = np.trace((np.identity(x.shape[-1]) - P_phi) @ W @ cov @ W) -555 output.chisquare_by_expected_chisquare = chisquare / expected_chisquare -556 if not silent: -557 print('chisquare/expected_chisquare:', -558 output.chisquare_by_expected_chisquare) -559 -560 fitp = fit_result.x -561 try: -562 if kwargs.get('correlated_fit') is True: -563 hess = jacobian(jacobian(chisqfunc_corr))(fitp) -564 else: -565 hess = jacobian(jacobian(chisqfunc))(fitp) -566 except TypeError: -567 raise Exception("It is required to use autograd.numpy instead of numpy within fit functions, see the documentation for details.") from None -568 -569 if kwargs.get('correlated_fit') is True: -570 def chisqfunc_compact(d): -571 model = func(d[:n_parms], x) -572 chisq = anp.sum(anp.dot(chol_inv, (d[n_parms:] - model)) ** 2) -573 return chisq -574 -575 else: -576 def chisqfunc_compact(d): -577 model = func(d[:n_parms], x) -578 chisq = anp.sum(((d[n_parms:] - model) / dy_f) ** 2) -579 return chisq -580 -581 jac_jac = jacobian(jacobian(chisqfunc_compact))(np.concatenate((fitp, y_f))) -582 -583 # Compute hess^{-1} @ jac_jac[:n_parms, n_parms:] using LAPACK dgesv -584 try: -585 deriv = -scipy.linalg.solve(hess, jac_jac[:n_parms, n_parms:]) -586 except np.linalg.LinAlgError: -587 raise Exception("Cannot invert hessian matrix.") -588 -589 result = [] -590 for i in range(n_parms): -591 result.append(derived_observable(lambda x, **kwargs: (x[0] + np.finfo(np.float64).eps) / (y[0].value + np.finfo(np.float64).eps) * fit_result.x[i], list(y), man_grad=list(deriv[i]))) -592 -593 output.fit_parameters = result -594 -595 output.chisquare = chisquare -596 output.dof = x.shape[-1] - n_parms -597 output.p_value = 1 - chi2.cdf(output.chisquare, output.dof) -598 -599 if kwargs.get('resplot') is True: -600 residual_plot(x, y, func, result) -601 -602 if kwargs.get('qqplot') is True: -603 qqplot(x, y, func, result) -604 -605 return output +428def _standard_fit(x, y, func, silent=False, **kwargs): +429 +430 output = Fit_result() +431 +432 output.fit_function = func +433 +434 x = np.asarray(x) +435 +436 if x.shape[-1] != len(y): +437 raise Exception('x and y input have to have the same length') +438 +439 if len(x.shape) > 2: +440 raise Exception('Unknown format for x values') +441 +442 if not callable(func): +443 raise TypeError('func has to be a function.') +444 +445 for i in range(25): +446 try: +447 func(np.arange(i), x.T[0]) +448 except Exception: +449 pass +450 else: +451 break +452 +453 n_parms = i +454 +455 if not silent: +456 print('Fit with', n_parms, 'parameter' + 's' * (n_parms > 1)) +457 +458 y_f = [o.value for o in y] +459 dy_f = [o.dvalue for o in y] +460 +461 if np.any(np.asarray(dy_f) <= 0.0): +462 raise Exception('No y errors available, run the gamma method first.') +463 +464 if 'initial_guess' in kwargs: +465 x0 = kwargs.get('initial_guess') +466 if len(x0) != n_parms: +467 raise Exception('Initial guess does not have the correct length: %d vs. %d' % (len(x0), n_parms)) +468 else: +469 x0 = [0.1] * n_parms +470 +471 if kwargs.get('correlated_fit') is True: +472 corr = covariance(y, correlation=True, **kwargs) +473 covdiag = np.diag(1 / np.asarray(dy_f)) +474 condn = np.linalg.cond(corr) +475 if condn > 0.1 / np.finfo(float).eps: +476 raise Exception(f"Cannot invert correlation matrix as its condition number exceeds machine precision ({condn:1.2e})") +477 if condn > 1 / np.sqrt(np.finfo(float).eps): +478 warnings.warn("Correlation matrix may be ill-conditioned, condition number: {%1.2e}" % (condn), RuntimeWarning) +479 chol = np.linalg.cholesky(corr) +480 chol_inv = scipy.linalg.solve_triangular(chol, covdiag, lower=True) +481 +482 def chisqfunc_corr(p): +483 model = func(p, x) +484 chisq = anp.sum(anp.dot(chol_inv, (y_f - model)) ** 2) +485 return chisq +486 +487 def chisqfunc(p): +488 model = func(p, x) +489 chisq = anp.sum(((y_f - model) / dy_f) ** 2) +490 return chisq +491 +492 output.method = kwargs.get('method', 'Levenberg-Marquardt') +493 if not silent: +494 print('Method:', output.method) +495 +496 if output.method != 'Levenberg-Marquardt': +497 if output.method == 'migrad': +498 fit_result = iminuit.minimize(chisqfunc, x0, tol=1e-4) # Stopping criterion 0.002 * tol * errordef +499 if kwargs.get('correlated_fit') is True: +500 fit_result = iminuit.minimize(chisqfunc_corr, fit_result.x, tol=1e-4) # Stopping criterion 0.002 * tol * errordef +501 output.iterations = fit_result.nfev +502 else: +503 fit_result = scipy.optimize.minimize(chisqfunc, x0, method=kwargs.get('method'), tol=1e-12) +504 if kwargs.get('correlated_fit') is True: +505 fit_result = scipy.optimize.minimize(chisqfunc_corr, fit_result.x, method=kwargs.get('method'), tol=1e-12) +506 output.iterations = fit_result.nit +507 +508 chisquare = fit_result.fun +509 +510 else: +511 if kwargs.get('correlated_fit') is True: +512 def chisqfunc_residuals_corr(p): +513 model = func(p, x) +514 chisq = anp.dot(chol_inv, (y_f - model)) +515 return chisq +516 +517 def chisqfunc_residuals(p): +518 model = func(p, x) +519 chisq = ((y_f - model) / dy_f) +520 return chisq +521 +522 fit_result = scipy.optimize.least_squares(chisqfunc_residuals, x0, method='lm', ftol=1e-15, gtol=1e-15, xtol=1e-15) +523 if kwargs.get('correlated_fit') is True: +524 fit_result = scipy.optimize.least_squares(chisqfunc_residuals_corr, fit_result.x, method='lm', ftol=1e-15, gtol=1e-15, xtol=1e-15) +525 +526 chisquare = np.sum(fit_result.fun ** 2) +527 if kwargs.get('correlated_fit') is True: +528 assert np.isclose(chisquare, chisqfunc_corr(fit_result.x), atol=1e-14) +529 else: +530 assert np.isclose(chisquare, chisqfunc(fit_result.x), atol=1e-14) +531 +532 output.iterations = fit_result.nfev +533 +534 if not fit_result.success: +535 raise Exception('The minimization procedure did not converge.') +536 +537 if x.shape[-1] - n_parms > 0: +538 output.chisquare_by_dof = chisquare / (x.shape[-1] - n_parms) +539 else: +540 output.chisquare_by_dof = float('nan') +541 +542 output.message = fit_result.message +543 if not silent: +544 print(fit_result.message) +545 print('chisquare/d.o.f.:', output.chisquare_by_dof) +546 +547 if kwargs.get('expected_chisquare') is True: +548 if kwargs.get('correlated_fit') is not True: +549 W = np.diag(1 / np.asarray(dy_f)) +550 cov = covariance(y) +551 A = W @ jacobian(func)(fit_result.x, x) +552 P_phi = A @ np.linalg.pinv(A.T @ A) @ A.T +553 expected_chisquare = np.trace((np.identity(x.shape[-1]) - P_phi) @ W @ cov @ W) +554 output.chisquare_by_expected_chisquare = chisquare / expected_chisquare +555 if not silent: +556 print('chisquare/expected_chisquare:', +557 output.chisquare_by_expected_chisquare) +558 +559 fitp = fit_result.x +560 try: +561 if kwargs.get('correlated_fit') is True: +562 hess = jacobian(jacobian(chisqfunc_corr))(fitp) +563 else: +564 hess = jacobian(jacobian(chisqfunc))(fitp) +565 except TypeError: +566 raise Exception("It is required to use autograd.numpy instead of numpy within fit functions, see the documentation for details.") from None +567 +568 if kwargs.get('correlated_fit') is True: +569 def chisqfunc_compact(d): +570 model = func(d[:n_parms], x) +571 chisq = anp.sum(anp.dot(chol_inv, (d[n_parms:] - model)) ** 2) +572 return chisq +573 +574 else: +575 def chisqfunc_compact(d): +576 model = func(d[:n_parms], x) +577 chisq = anp.sum(((d[n_parms:] - model) / dy_f) ** 2) +578 return chisq +579 +580 jac_jac = jacobian(jacobian(chisqfunc_compact))(np.concatenate((fitp, y_f))) +581 +582 # Compute hess^{-1} @ jac_jac[:n_parms, n_parms:] using LAPACK dgesv +583 try: +584 deriv = -scipy.linalg.solve(hess, jac_jac[:n_parms, n_parms:]) +585 except np.linalg.LinAlgError: +586 raise Exception("Cannot invert hessian matrix.") +587 +588 result = [] +589 for i in range(n_parms): +590 result.append(derived_observable(lambda x, **kwargs: (x[0] + np.finfo(np.float64).eps) / (y[0].value + np.finfo(np.float64).eps) * fit_result.x[i], list(y), man_grad=list(deriv[i]))) +591 +592 output.fit_parameters = result +593 +594 output.chisquare = chisquare +595 output.dof = x.shape[-1] - n_parms +596 output.p_value = 1 - scipy.stats.chi2.cdf(output.chisquare, output.dof) +597 +598 if kwargs.get('resplot') is True: +599 residual_plot(x, y, func, result) +600 +601 if kwargs.get('qqplot') is True: +602 qqplot(x, y, func, result) +603 +604 return output +605 606 -607 -608def fit_lin(x, y, **kwargs): -609 """Performs a linear fit to y = n + m * x and returns two Obs n, m. -610 -611 Parameters -612 ---------- -613 x : list -614 Can either be a list of floats in which case no xerror is assumed, or -615 a list of Obs, where the dvalues of the Obs are used as xerror for the fit. -616 y : list -617 List of Obs, the dvalues of the Obs are used as yerror for the fit. -618 """ -619 -620 def f(a, x): -621 y = a[0] + a[1] * x -622 return y -623 -624 if all(isinstance(n, Obs) for n in x): -625 out = total_least_squares(x, y, f, **kwargs) -626 return out.fit_parameters -627 elif all(isinstance(n, float) or isinstance(n, int) for n in x) or isinstance(x, np.ndarray): -628 out = least_squares(x, y, f, **kwargs) -629 return out.fit_parameters -630 else: -631 raise Exception('Unsupported types for x') +607def fit_lin(x, y, **kwargs): +608 """Performs a linear fit to y = n + m * x and returns two Obs n, m. +609 +610 Parameters +611 ---------- +612 x : list +613 Can either be a list of floats in which case no xerror is assumed, or +614 a list of Obs, where the dvalues of the Obs are used as xerror for the fit. +615 y : list +616 List of Obs, the dvalues of the Obs are used as yerror for the fit. +617 """ +618 +619 def f(a, x): +620 y = a[0] + a[1] * x +621 return y +622 +623 if all(isinstance(n, Obs) for n in x): +624 out = total_least_squares(x, y, f, **kwargs) +625 return out.fit_parameters +626 elif all(isinstance(n, float) or isinstance(n, int) for n in x) or isinstance(x, np.ndarray): +627 out = least_squares(x, y, f, **kwargs) +628 return out.fit_parameters +629 else: +630 raise Exception('Unsupported types for x') +631 632 -633 -634def qqplot(x, o_y, func, p): -635 """Generates a quantile-quantile plot of the fit result which can be used to -636 check if the residuals of the fit are gaussian distributed. -637 """ -638 -639 residuals = [] -640 for i_x, i_y in zip(x, o_y): -641 residuals.append((i_y - func(p, i_x)) / i_y.dvalue) -642 residuals = sorted(residuals) -643 my_y = [o.value for o in residuals] -644 probplot = scipy.stats.probplot(my_y) -645 my_x = probplot[0][0] -646 plt.figure(figsize=(8, 8 / 1.618)) -647 plt.errorbar(my_x, my_y, fmt='o') -648 fit_start = my_x[0] -649 fit_stop = my_x[-1] -650 samples = np.arange(fit_start, fit_stop, 0.01) -651 plt.plot(samples, samples, 'k--', zorder=11, label='Standard normal distribution') -652 plt.plot(samples, probplot[1][0] * samples + probplot[1][1], zorder=10, label='Least squares fit, r=' + str(np.around(probplot[1][2], 3)), marker='', ls='-') -653 -654 plt.xlabel('Theoretical quantiles') -655 plt.ylabel('Ordered Values') -656 plt.legend() -657 plt.draw() +633def qqplot(x, o_y, func, p): +634 """Generates a quantile-quantile plot of the fit result which can be used to +635 check if the residuals of the fit are gaussian distributed. +636 """ +637 +638 residuals = [] +639 for i_x, i_y in zip(x, o_y): +640 residuals.append((i_y - func(p, i_x)) / i_y.dvalue) +641 residuals = sorted(residuals) +642 my_y = [o.value for o in residuals] +643 probplot = scipy.stats.probplot(my_y) +644 my_x = probplot[0][0] +645 plt.figure(figsize=(8, 8 / 1.618)) +646 plt.errorbar(my_x, my_y, fmt='o') +647 fit_start = my_x[0] +648 fit_stop = my_x[-1] +649 samples = np.arange(fit_start, fit_stop, 0.01) +650 plt.plot(samples, samples, 'k--', zorder=11, label='Standard normal distribution') +651 plt.plot(samples, probplot[1][0] * samples + probplot[1][1], zorder=10, label='Least squares fit, r=' + str(np.around(probplot[1][2], 3)), marker='', ls='-') +652 +653 plt.xlabel('Theoretical quantiles') +654 plt.ylabel('Ordered Values') +655 plt.legend() +656 plt.draw() +657 658 -659 -660def residual_plot(x, y, func, fit_res): -661 """ Generates a plot which compares the fit to the data and displays the corresponding residuals""" -662 sorted_x = sorted(x) -663 xstart = sorted_x[0] - 0.5 * (sorted_x[1] - sorted_x[0]) -664 xstop = sorted_x[-1] + 0.5 * (sorted_x[-1] - sorted_x[-2]) -665 x_samples = np.arange(xstart, xstop + 0.01, 0.01) -666 -667 plt.figure(figsize=(8, 8 / 1.618)) -668 gs = gridspec.GridSpec(2, 1, height_ratios=[3, 1], wspace=0.0, hspace=0.0) -669 ax0 = plt.subplot(gs[0]) -670 ax0.errorbar(x, [o.value for o in y], yerr=[o.dvalue for o in y], ls='none', fmt='o', capsize=3, markersize=5, label='Data') -671 ax0.plot(x_samples, func([o.value for o in fit_res], x_samples), label='Fit', zorder=10, ls='-', ms=0) -672 ax0.set_xticklabels([]) -673 ax0.set_xlim([xstart, xstop]) -674 ax0.set_xticklabels([]) -675 ax0.legend() -676 -677 residuals = (np.asarray([o.value for o in y]) - func([o.value for o in fit_res], x)) / np.asarray([o.dvalue for o in y]) -678 ax1 = plt.subplot(gs[1]) -679 ax1.plot(x, residuals, 'ko', ls='none', markersize=5) -680 ax1.tick_params(direction='out') -681 ax1.tick_params(axis="x", bottom=True, top=True, labelbottom=True) -682 ax1.axhline(y=0.0, ls='--', color='k', marker=" ") -683 ax1.fill_between(x_samples, -1.0, 1.0, alpha=0.1, facecolor='k') -684 ax1.set_xlim([xstart, xstop]) -685 ax1.set_ylabel('Residuals') -686 plt.subplots_adjust(wspace=None, hspace=None) -687 plt.draw() +659def residual_plot(x, y, func, fit_res): +660 """ Generates a plot which compares the fit to the data and displays the corresponding residuals""" +661 sorted_x = sorted(x) +662 xstart = sorted_x[0] - 0.5 * (sorted_x[1] - sorted_x[0]) +663 xstop = sorted_x[-1] + 0.5 * (sorted_x[-1] - sorted_x[-2]) +664 x_samples = np.arange(xstart, xstop + 0.01, 0.01) +665 +666 plt.figure(figsize=(8, 8 / 1.618)) +667 gs = gridspec.GridSpec(2, 1, height_ratios=[3, 1], wspace=0.0, hspace=0.0) +668 ax0 = plt.subplot(gs[0]) +669 ax0.errorbar(x, [o.value for o in y], yerr=[o.dvalue for o in y], ls='none', fmt='o', capsize=3, markersize=5, label='Data') +670 ax0.plot(x_samples, func([o.value for o in fit_res], x_samples), label='Fit', zorder=10, ls='-', ms=0) +671 ax0.set_xticklabels([]) +672 ax0.set_xlim([xstart, xstop]) +673 ax0.set_xticklabels([]) +674 ax0.legend() +675 +676 residuals = (np.asarray([o.value for o in y]) - func([o.value for o in fit_res], x)) / np.asarray([o.dvalue for o in y]) +677 ax1 = plt.subplot(gs[1]) +678 ax1.plot(x, residuals, 'ko', ls='none', markersize=5) +679 ax1.tick_params(direction='out') +680 ax1.tick_params(axis="x", bottom=True, top=True, labelbottom=True) +681 ax1.axhline(y=0.0, ls='--', color='k', marker=" ") +682 ax1.fill_between(x_samples, -1.0, 1.0, alpha=0.1, facecolor='k') +683 ax1.set_xlim([xstart, xstop]) +684 ax1.set_ylabel('Residuals') +685 plt.subplots_adjust(wspace=None, hspace=None) +686 plt.draw() +687 688 -689 -690def error_band(x, func, beta): -691 """Returns the error band for an array of sample values x, for given fit function func with optimized parameters beta.""" -692 cov = covariance(beta) -693 if np.any(np.abs(cov - cov.T) > 1000 * np.finfo(np.float64).eps): -694 warnings.warn("Covariance matrix is not symmetric within floating point precision", RuntimeWarning) -695 -696 deriv = [] -697 for i, item in enumerate(x): -698 deriv.append(np.array(egrad(func)([o.value for o in beta], item))) -699 -700 err = [] -701 for i, item in enumerate(x): -702 err.append(np.sqrt(deriv[i] @ cov @ deriv[i])) -703 err = np.array(err) -704 -705 return err +689def error_band(x, func, beta): +690 """Returns the error band for an array of sample values x, for given fit function func with optimized parameters beta.""" +691 cov = covariance(beta) +692 if np.any(np.abs(cov - cov.T) > 1000 * np.finfo(np.float64).eps): +693 warnings.warn("Covariance matrix is not symmetric within floating point precision", RuntimeWarning) +694 +695 deriv = [] +696 for i, item in enumerate(x): +697 deriv.append(np.array(egrad(func)([o.value for o in beta], item))) +698 +699 err = [] +700 for i, item in enumerate(x): +701 err.append(np.sqrt(deriv[i] @ cov @ deriv[i])) +702 err = np.array(err) +703 +704 return err +705 706 -707 -708def ks_test(objects=None): -709 """Performs a Kolmogorov–Smirnov test for the p-values of all fit object. -710 -711 Parameters -712 ---------- -713 objects : list -714 List of fit results to include in the analysis (optional). -715 """ -716 -717 if objects is None: -718 obs_list = [] -719 for obj in gc.get_objects(): -720 if isinstance(obj, Fit_result): -721 obs_list.append(obj) -722 else: -723 obs_list = objects -724 -725 p_values = [o.p_value for o in obs_list] -726 -727 bins = len(p_values) -728 x = np.arange(0, 1.001, 0.001) -729 plt.plot(x, x, 'k', zorder=1) -730 plt.xlim(0, 1) -731 plt.ylim(0, 1) -732 plt.xlabel('p-value') -733 plt.ylabel('Cumulative probability') -734 plt.title(str(bins) + ' p-values') -735 -736 n = np.arange(1, bins + 1) / np.float64(bins) -737 Xs = np.sort(p_values) -738 plt.step(Xs, n) -739 diffs = n - Xs -740 loc_max_diff = np.argmax(np.abs(diffs)) -741 loc = Xs[loc_max_diff] -742 plt.annotate('', xy=(loc, loc), xytext=(loc, loc + diffs[loc_max_diff]), arrowprops=dict(arrowstyle='<->', shrinkA=0, shrinkB=0)) -743 plt.draw() -744 -745 print(scipy.stats.kstest(p_values, 'uniform')) +707def ks_test(objects=None): +708 """Performs a Kolmogorov–Smirnov test for the p-values of all fit object. +709 +710 Parameters +711 ---------- +712 objects : list +713 List of fit results to include in the analysis (optional). +714 """ +715 +716 if objects is None: +717 obs_list = [] +718 for obj in gc.get_objects(): +719 if isinstance(obj, Fit_result): +720 obs_list.append(obj) +721 else: +722 obs_list = objects +723 +724 p_values = [o.p_value for o in obs_list] +725 +726 bins = len(p_values) +727 x = np.arange(0, 1.001, 0.001) +728 plt.plot(x, x, 'k', zorder=1) +729 plt.xlim(0, 1) +730 plt.ylim(0, 1) +731 plt.xlabel('p-value') +732 plt.ylabel('Cumulative probability') +733 plt.title(str(bins) + ' p-values') +734 +735 n = np.arange(1, bins + 1) / np.float64(bins) +736 Xs = np.sort(p_values) +737 plt.step(Xs, n) +738 diffs = n - Xs +739 loc_max_diff = np.argmax(np.abs(diffs)) +740 loc = Xs[loc_max_diff] +741 plt.annotate('', xy=(loc, loc), xytext=(loc, loc + diffs[loc_max_diff]), arrowprops=dict(arrowstyle='<->', shrinkA=0, shrinkB=0)) +742 plt.draw() +743 +744 print(scipy.stats.kstest(p_values, 'uniform')) @@ -861,47 +860,47 @@ -
19class Fit_result(Sequence):
-20    """Represents fit results.
-21
-22    Attributes
-23    ----------
-24    fit_parameters : list
-25        results for the individual fit parameters,
-26        also accessible via indices.
-27    """
-28
-29    def __init__(self):
-30        self.fit_parameters = None
-31
-32    def __getitem__(self, idx):
-33        return self.fit_parameters[idx]
-34
-35    def __len__(self):
-36        return len(self.fit_parameters)
-37
-38    def gamma_method(self, **kwargs):
-39        """Apply the gamma method to all fit parameters"""
-40        [o.gamma_method(**kwargs) for o in self.fit_parameters]
-41
-42    def __str__(self):
-43        my_str = 'Goodness of fit:\n'
-44        if hasattr(self, 'chisquare_by_dof'):
-45            my_str += '\u03C7\u00b2/d.o.f. = ' + f'{self.chisquare_by_dof:2.6f}' + '\n'
-46        elif hasattr(self, 'residual_variance'):
-47            my_str += 'residual variance = ' + f'{self.residual_variance:2.6f}' + '\n'
-48        if hasattr(self, 'chisquare_by_expected_chisquare'):
-49            my_str += '\u03C7\u00b2/\u03C7\u00b2exp  = ' + f'{self.chisquare_by_expected_chisquare:2.6f}' + '\n'
-50        if hasattr(self, 'p_value'):
-51            my_str += 'p-value   = ' + f'{self.p_value:2.4f}' + '\n'
-52        my_str += 'Fit parameters:\n'
-53        for i_par, par in enumerate(self.fit_parameters):
-54            my_str += str(i_par) + '\t' + ' ' * int(par >= 0) + str(par).rjust(int(par < 0.0)) + '\n'
-55        return my_str
-56
-57    def __repr__(self):
-58        m = max(map(len, list(self.__dict__.keys()))) + 1
-59        return '\n'.join([key.rjust(m) + ': ' + repr(value) for key, value in sorted(self.__dict__.items())])
+            
18class Fit_result(Sequence):
+19    """Represents fit results.
+20
+21    Attributes
+22    ----------
+23    fit_parameters : list
+24        results for the individual fit parameters,
+25        also accessible via indices.
+26    """
+27
+28    def __init__(self):
+29        self.fit_parameters = None
+30
+31    def __getitem__(self, idx):
+32        return self.fit_parameters[idx]
+33
+34    def __len__(self):
+35        return len(self.fit_parameters)
+36
+37    def gamma_method(self, **kwargs):
+38        """Apply the gamma method to all fit parameters"""
+39        [o.gamma_method(**kwargs) for o in self.fit_parameters]
+40
+41    def __str__(self):
+42        my_str = 'Goodness of fit:\n'
+43        if hasattr(self, 'chisquare_by_dof'):
+44            my_str += '\u03C7\u00b2/d.o.f. = ' + f'{self.chisquare_by_dof:2.6f}' + '\n'
+45        elif hasattr(self, 'residual_variance'):
+46            my_str += 'residual variance = ' + f'{self.residual_variance:2.6f}' + '\n'
+47        if hasattr(self, 'chisquare_by_expected_chisquare'):
+48            my_str += '\u03C7\u00b2/\u03C7\u00b2exp  = ' + f'{self.chisquare_by_expected_chisquare:2.6f}' + '\n'
+49        if hasattr(self, 'p_value'):
+50            my_str += 'p-value   = ' + f'{self.p_value:2.4f}' + '\n'
+51        my_str += 'Fit parameters:\n'
+52        for i_par, par in enumerate(self.fit_parameters):
+53            my_str += str(i_par) + '\t' + ' ' * int(par >= 0) + str(par).rjust(int(par < 0.0)) + '\n'
+54        return my_str
+55
+56    def __repr__(self):
+57        m = max(map(len, list(self.__dict__.keys()))) + 1
+58        return '\n'.join([key.rjust(m) + ': ' + repr(value) for key, value in sorted(self.__dict__.items())])
 
@@ -927,8 +926,8 @@ also accessible via indices.
-
29    def __init__(self):
-30        self.fit_parameters = None
+            
28    def __init__(self):
+29        self.fit_parameters = None
 
@@ -946,9 +945,9 @@ also accessible via indices.
-
38    def gamma_method(self, **kwargs):
-39        """Apply the gamma method to all fit parameters"""
-40        [o.gamma_method(**kwargs) for o in self.fit_parameters]
+            
37    def gamma_method(self, **kwargs):
+38        """Apply the gamma method to all fit parameters"""
+39        [o.gamma_method(**kwargs) for o in self.fit_parameters]
 
@@ -979,68 +978,68 @@ also accessible via indices.
-
 62def least_squares(x, y, func, priors=None, silent=False, **kwargs):
- 63    r'''Performs a non-linear fit to y = func(x).
- 64
- 65    Parameters
- 66    ----------
- 67    x : list
- 68        list of floats.
- 69    y : list
- 70        list of Obs.
- 71    func : object
- 72        fit function, has to be of the form
- 73
- 74        ```python
- 75        import autograd.numpy as anp
- 76
- 77        def func(a, x):
- 78            return a[0] + a[1] * x + a[2] * anp.sinh(x)
- 79        ```
- 80
- 81        For multiple x values func can be of the form
- 82
- 83        ```python
- 84        def func(a, x):
- 85            (x1, x2) = x
- 86            return a[0] * x1 ** 2 + a[1] * x2
- 87        ```
- 88
- 89        It is important that all numpy functions refer to autograd.numpy, otherwise the differentiation
- 90        will not work.
- 91    priors : list, optional
- 92        priors has to be a list with an entry for every parameter in the fit. The entries can either be
- 93        Obs (e.g. results from a previous fit) or strings containing a value and an error formatted like
- 94        0.548(23), 500(40) or 0.5(0.4)
- 95    silent : bool, optional
- 96        If true all output to the console is omitted (default False).
- 97    initial_guess : list
- 98        can provide an initial guess for the input parameters. Relevant for
- 99        non-linear fits with many parameters. In case of correlated fits the guess is used to perform
-100        an uncorrelated fit which then serves as guess for the correlated fit.
-101    method : str, optional
-102        can be used to choose an alternative method for the minimization of chisquare.
-103        The possible methods are the ones which can be used for scipy.optimize.minimize and
-104        migrad of iminuit. If no method is specified, Levenberg-Marquard is used.
-105        Reliable alternatives are migrad, Powell and Nelder-Mead.
-106    correlated_fit : bool
-107        If True, use the full inverse covariance matrix in the definition of the chisquare cost function.
-108        For details about how the covariance matrix is estimated see `pyerrors.obs.covariance`.
-109        In practice the correlation matrix is Cholesky decomposed and inverted (instead of the covariance matrix).
-110        This procedure should be numerically more stable as the correlation matrix is typically better conditioned (Jacobi preconditioning).
-111        At the moment this option only works for `prior==None` and when no `method` is given.
-112    expected_chisquare : bool
-113        If True estimates the expected chisquare which is
-114        corrected by effects caused by correlated input data (default False).
-115    resplot : bool
-116        If True, a plot which displays fit, data and residuals is generated (default False).
-117    qqplot : bool
-118        If True, a quantile-quantile plot of the fit result is generated (default False).
-119    '''
-120    if priors is not None:
-121        return _prior_fit(x, y, func, priors, silent=silent, **kwargs)
-122    else:
-123        return _standard_fit(x, y, func, silent=silent, **kwargs)
+            
 61def least_squares(x, y, func, priors=None, silent=False, **kwargs):
+ 62    r'''Performs a non-linear fit to y = func(x).
+ 63
+ 64    Parameters
+ 65    ----------
+ 66    x : list
+ 67        list of floats.
+ 68    y : list
+ 69        list of Obs.
+ 70    func : object
+ 71        fit function, has to be of the form
+ 72
+ 73        ```python
+ 74        import autograd.numpy as anp
+ 75
+ 76        def func(a, x):
+ 77            return a[0] + a[1] * x + a[2] * anp.sinh(x)
+ 78        ```
+ 79
+ 80        For multiple x values func can be of the form
+ 81
+ 82        ```python
+ 83        def func(a, x):
+ 84            (x1, x2) = x
+ 85            return a[0] * x1 ** 2 + a[1] * x2
+ 86        ```
+ 87
+ 88        It is important that all numpy functions refer to autograd.numpy, otherwise the differentiation
+ 89        will not work.
+ 90    priors : list, optional
+ 91        priors has to be a list with an entry for every parameter in the fit. The entries can either be
+ 92        Obs (e.g. results from a previous fit) or strings containing a value and an error formatted like
+ 93        0.548(23), 500(40) or 0.5(0.4)
+ 94    silent : bool, optional
+ 95        If true all output to the console is omitted (default False).
+ 96    initial_guess : list
+ 97        can provide an initial guess for the input parameters. Relevant for
+ 98        non-linear fits with many parameters. In case of correlated fits the guess is used to perform
+ 99        an uncorrelated fit which then serves as guess for the correlated fit.
+100    method : str, optional
+101        can be used to choose an alternative method for the minimization of chisquare.
+102        The possible methods are the ones which can be used for scipy.optimize.minimize and
+103        migrad of iminuit. If no method is specified, Levenberg-Marquard is used.
+104        Reliable alternatives are migrad, Powell and Nelder-Mead.
+105    correlated_fit : bool
+106        If True, use the full inverse covariance matrix in the definition of the chisquare cost function.
+107        For details about how the covariance matrix is estimated see `pyerrors.obs.covariance`.
+108        In practice the correlation matrix is Cholesky decomposed and inverted (instead of the covariance matrix).
+109        This procedure should be numerically more stable as the correlation matrix is typically better conditioned (Jacobi preconditioning).
+110        At the moment this option only works for `prior==None` and when no `method` is given.
+111    expected_chisquare : bool
+112        If True estimates the expected chisquare which is
+113        corrected by effects caused by correlated input data (default False).
+114    resplot : bool
+115        If True, a plot which displays fit, data and residuals is generated (default False).
+116    qqplot : bool
+117        If True, a quantile-quantile plot of the fit result is generated (default False).
+118    '''
+119    if priors is not None:
+120        return _prior_fit(x, y, func, priors, silent=silent, **kwargs)
+121    else:
+122        return _standard_fit(x, y, func, silent=silent, **kwargs)
 
@@ -1115,190 +1114,190 @@ If True, a quantile-quantile plot of the fit result is generated (default False)
-
126def total_least_squares(x, y, func, silent=False, **kwargs):
-127    r'''Performs a non-linear fit to y = func(x) and returns a list of Obs corresponding to the fit parameters.
-128
-129    Parameters
-130    ----------
-131    x : list
-132        list of Obs, or a tuple of lists of Obs
-133    y : list
-134        list of Obs. The dvalues of the Obs are used as x- and yerror for the fit.
-135    func : object
-136        func has to be of the form
-137
-138        ```python
-139        import autograd.numpy as anp
-140
-141        def func(a, x):
-142            return a[0] + a[1] * x + a[2] * anp.sinh(x)
-143        ```
-144
-145        For multiple x values func can be of the form
-146
-147        ```python
-148        def func(a, x):
-149            (x1, x2) = x
-150            return a[0] * x1 ** 2 + a[1] * x2
-151        ```
-152
-153        It is important that all numpy functions refer to autograd.numpy, otherwise the differentiation
-154        will not work.
-155    silent : bool, optional
-156        If true all output to the console is omitted (default False).
-157    initial_guess : list
-158        can provide an initial guess for the input parameters. Relevant for non-linear
-159        fits with many parameters.
-160    expected_chisquare : bool
-161        If true prints the expected chisquare which is
-162        corrected by effects caused by correlated input data.
-163        This can take a while as the full correlation matrix
-164        has to be calculated (default False).
-165
-166    Notes
-167    -----
-168    Based on the orthogonal distance regression module of scipy
-169    '''
-170
-171    output = Fit_result()
-172
-173    output.fit_function = func
-174
-175    x = np.array(x)
-176
-177    x_shape = x.shape
-178
-179    if not callable(func):
-180        raise TypeError('func has to be a function.')
-181
-182    for i in range(25):
-183        try:
-184            func(np.arange(i), x.T[0])
-185        except Exception:
-186            pass
-187        else:
-188            break
-189
-190    n_parms = i
-191    if not silent:
-192        print('Fit with', n_parms, 'parameter' + 's' * (n_parms > 1))
-193
-194    x_f = np.vectorize(lambda o: o.value)(x)
-195    dx_f = np.vectorize(lambda o: o.dvalue)(x)
-196    y_f = np.array([o.value for o in y])
-197    dy_f = np.array([o.dvalue for o in y])
-198
-199    if np.any(np.asarray(dx_f) <= 0.0):
-200        raise Exception('No x errors available, run the gamma method first.')
-201
-202    if np.any(np.asarray(dy_f) <= 0.0):
-203        raise Exception('No y errors available, run the gamma method first.')
-204
-205    if 'initial_guess' in kwargs:
-206        x0 = kwargs.get('initial_guess')
-207        if len(x0) != n_parms:
-208            raise Exception('Initial guess does not have the correct length: %d vs. %d' % (len(x0), n_parms))
-209    else:
-210        x0 = [1] * n_parms
-211
-212    data = RealData(x_f, y_f, sx=dx_f, sy=dy_f)
-213    model = Model(func)
-214    odr = ODR(data, model, x0, partol=np.finfo(np.float64).eps)
-215    odr.set_job(fit_type=0, deriv=1)
-216    out = odr.run()
-217
-218    output.residual_variance = out.res_var
-219
-220    output.method = 'ODR'
-221
-222    output.message = out.stopreason
-223
-224    output.xplus = out.xplus
-225
-226    if not silent:
-227        print('Method: ODR')
-228        print(*out.stopreason)
-229        print('Residual variance:', output.residual_variance)
-230
-231    if out.info > 3:
-232        raise Exception('The minimization procedure did not converge.')
-233
-234    m = x_f.size
-235
-236    def odr_chisquare(p):
-237        model = func(p[:n_parms], p[n_parms:].reshape(x_shape))
-238        chisq = anp.sum(((y_f - model) / dy_f) ** 2) + anp.sum(((x_f - p[n_parms:].reshape(x_shape)) / dx_f) ** 2)
-239        return chisq
-240
-241    if kwargs.get('expected_chisquare') is True:
-242        W = np.diag(1 / np.asarray(np.concatenate((dy_f.ravel(), dx_f.ravel()))))
-243
-244        if kwargs.get('covariance') is not None:
-245            cov = kwargs.get('covariance')
-246        else:
-247            cov = covariance(np.concatenate((y, x.ravel())))
-248
-249        number_of_x_parameters = int(m / x_f.shape[-1])
-250
-251        old_jac = jacobian(func)(out.beta, out.xplus)
-252        fused_row1 = np.concatenate((old_jac, np.concatenate((number_of_x_parameters * [np.zeros(old_jac.shape)]), axis=0)))
-253        fused_row2 = np.concatenate((jacobian(lambda x, y: func(y, x))(out.xplus, out.beta).reshape(x_f.shape[-1], x_f.shape[-1] * number_of_x_parameters), np.identity(number_of_x_parameters * old_jac.shape[0])))
-254        new_jac = np.concatenate((fused_row1, fused_row2), axis=1)
-255
-256        A = W @ new_jac
-257        P_phi = A @ np.linalg.pinv(A.T @ A) @ A.T
-258        expected_chisquare = np.trace((np.identity(P_phi.shape[0]) - P_phi) @ W @ cov @ W)
-259        if expected_chisquare <= 0.0:
-260            warnings.warn("Negative expected_chisquare.", RuntimeWarning)
-261            expected_chisquare = np.abs(expected_chisquare)
-262        output.chisquare_by_expected_chisquare = odr_chisquare(np.concatenate((out.beta, out.xplus.ravel()))) / expected_chisquare
-263        if not silent:
-264            print('chisquare/expected_chisquare:',
-265                  output.chisquare_by_expected_chisquare)
-266
-267    fitp = out.beta
-268    try:
-269        hess = jacobian(jacobian(odr_chisquare))(np.concatenate((fitp, out.xplus.ravel())))
-270    except TypeError:
-271        raise Exception("It is required to use autograd.numpy instead of numpy within fit functions, see the documentation for details.") from None
-272
-273    def odr_chisquare_compact_x(d):
-274        model = func(d[:n_parms], d[n_parms:n_parms + m].reshape(x_shape))
-275        chisq = anp.sum(((y_f - model) / dy_f) ** 2) + anp.sum(((d[n_parms + m:].reshape(x_shape) - d[n_parms:n_parms + m].reshape(x_shape)) / dx_f) ** 2)
-276        return chisq
-277
-278    jac_jac_x = jacobian(jacobian(odr_chisquare_compact_x))(np.concatenate((fitp, out.xplus.ravel(), x_f.ravel())))
-279
-280    # Compute hess^{-1} @ jac_jac_x[:n_parms + m, n_parms + m:] using LAPACK dgesv
-281    try:
-282        deriv_x = -scipy.linalg.solve(hess, jac_jac_x[:n_parms + m, n_parms + m:])
-283    except np.linalg.LinAlgError:
-284        raise Exception("Cannot invert hessian matrix.")
-285
-286    def odr_chisquare_compact_y(d):
-287        model = func(d[:n_parms], d[n_parms:n_parms + m].reshape(x_shape))
-288        chisq = anp.sum(((d[n_parms + m:] - model) / dy_f) ** 2) + anp.sum(((x_f - d[n_parms:n_parms + m].reshape(x_shape)) / dx_f) ** 2)
-289        return chisq
-290
-291    jac_jac_y = jacobian(jacobian(odr_chisquare_compact_y))(np.concatenate((fitp, out.xplus.ravel(), y_f)))
-292
-293    # Compute hess^{-1} @ jac_jac_y[:n_parms + m, n_parms + m:] using LAPACK dgesv
-294    try:
-295        deriv_y = -scipy.linalg.solve(hess, jac_jac_y[:n_parms + m, n_parms + m:])
-296    except np.linalg.LinAlgError:
-297        raise Exception("Cannot invert hessian matrix.")
-298
-299    result = []
-300    for i in range(n_parms):
-301        result.append(derived_observable(lambda my_var, **kwargs: (my_var[0] + np.finfo(np.float64).eps) / (x.ravel()[0].value + np.finfo(np.float64).eps) * out.beta[i], list(x.ravel()) + list(y), man_grad=list(deriv_x[i]) + list(deriv_y[i])))
-302
-303    output.fit_parameters = result
-304
-305    output.odr_chisquare = odr_chisquare(np.concatenate((out.beta, out.xplus.ravel())))
-306    output.dof = x.shape[-1] - n_parms
-307    output.p_value = 1 - chi2.cdf(output.odr_chisquare, output.dof)
-308
-309    return output
+            
125def total_least_squares(x, y, func, silent=False, **kwargs):
+126    r'''Performs a non-linear fit to y = func(x) and returns a list of Obs corresponding to the fit parameters.
+127
+128    Parameters
+129    ----------
+130    x : list
+131        list of Obs, or a tuple of lists of Obs
+132    y : list
+133        list of Obs. The dvalues of the Obs are used as x- and yerror for the fit.
+134    func : object
+135        func has to be of the form
+136
+137        ```python
+138        import autograd.numpy as anp
+139
+140        def func(a, x):
+141            return a[0] + a[1] * x + a[2] * anp.sinh(x)
+142        ```
+143
+144        For multiple x values func can be of the form
+145
+146        ```python
+147        def func(a, x):
+148            (x1, x2) = x
+149            return a[0] * x1 ** 2 + a[1] * x2
+150        ```
+151
+152        It is important that all numpy functions refer to autograd.numpy, otherwise the differentiation
+153        will not work.
+154    silent : bool, optional
+155        If true all output to the console is omitted (default False).
+156    initial_guess : list
+157        can provide an initial guess for the input parameters. Relevant for non-linear
+158        fits with many parameters.
+159    expected_chisquare : bool
+160        If true prints the expected chisquare which is
+161        corrected by effects caused by correlated input data.
+162        This can take a while as the full correlation matrix
+163        has to be calculated (default False).
+164
+165    Notes
+166    -----
+167    Based on the orthogonal distance regression module of scipy
+168    '''
+169
+170    output = Fit_result()
+171
+172    output.fit_function = func
+173
+174    x = np.array(x)
+175
+176    x_shape = x.shape
+177
+178    if not callable(func):
+179        raise TypeError('func has to be a function.')
+180
+181    for i in range(25):
+182        try:
+183            func(np.arange(i), x.T[0])
+184        except Exception:
+185            pass
+186        else:
+187            break
+188
+189    n_parms = i
+190    if not silent:
+191        print('Fit with', n_parms, 'parameter' + 's' * (n_parms > 1))
+192
+193    x_f = np.vectorize(lambda o: o.value)(x)
+194    dx_f = np.vectorize(lambda o: o.dvalue)(x)
+195    y_f = np.array([o.value for o in y])
+196    dy_f = np.array([o.dvalue for o in y])
+197
+198    if np.any(np.asarray(dx_f) <= 0.0):
+199        raise Exception('No x errors available, run the gamma method first.')
+200
+201    if np.any(np.asarray(dy_f) <= 0.0):
+202        raise Exception('No y errors available, run the gamma method first.')
+203
+204    if 'initial_guess' in kwargs:
+205        x0 = kwargs.get('initial_guess')
+206        if len(x0) != n_parms:
+207            raise Exception('Initial guess does not have the correct length: %d vs. %d' % (len(x0), n_parms))
+208    else:
+209        x0 = [1] * n_parms
+210
+211    data = RealData(x_f, y_f, sx=dx_f, sy=dy_f)
+212    model = Model(func)
+213    odr = ODR(data, model, x0, partol=np.finfo(np.float64).eps)
+214    odr.set_job(fit_type=0, deriv=1)
+215    out = odr.run()
+216
+217    output.residual_variance = out.res_var
+218
+219    output.method = 'ODR'
+220
+221    output.message = out.stopreason
+222
+223    output.xplus = out.xplus
+224
+225    if not silent:
+226        print('Method: ODR')
+227        print(*out.stopreason)
+228        print('Residual variance:', output.residual_variance)
+229
+230    if out.info > 3:
+231        raise Exception('The minimization procedure did not converge.')
+232
+233    m = x_f.size
+234
+235    def odr_chisquare(p):
+236        model = func(p[:n_parms], p[n_parms:].reshape(x_shape))
+237        chisq = anp.sum(((y_f - model) / dy_f) ** 2) + anp.sum(((x_f - p[n_parms:].reshape(x_shape)) / dx_f) ** 2)
+238        return chisq
+239
+240    if kwargs.get('expected_chisquare') is True:
+241        W = np.diag(1 / np.asarray(np.concatenate((dy_f.ravel(), dx_f.ravel()))))
+242
+243        if kwargs.get('covariance') is not None:
+244            cov = kwargs.get('covariance')
+245        else:
+246            cov = covariance(np.concatenate((y, x.ravel())))
+247
+248        number_of_x_parameters = int(m / x_f.shape[-1])
+249
+250        old_jac = jacobian(func)(out.beta, out.xplus)
+251        fused_row1 = np.concatenate((old_jac, np.concatenate((number_of_x_parameters * [np.zeros(old_jac.shape)]), axis=0)))
+252        fused_row2 = np.concatenate((jacobian(lambda x, y: func(y, x))(out.xplus, out.beta).reshape(x_f.shape[-1], x_f.shape[-1] * number_of_x_parameters), np.identity(number_of_x_parameters * old_jac.shape[0])))
+253        new_jac = np.concatenate((fused_row1, fused_row2), axis=1)
+254
+255        A = W @ new_jac
+256        P_phi = A @ np.linalg.pinv(A.T @ A) @ A.T
+257        expected_chisquare = np.trace((np.identity(P_phi.shape[0]) - P_phi) @ W @ cov @ W)
+258        if expected_chisquare <= 0.0:
+259            warnings.warn("Negative expected_chisquare.", RuntimeWarning)
+260            expected_chisquare = np.abs(expected_chisquare)
+261        output.chisquare_by_expected_chisquare = odr_chisquare(np.concatenate((out.beta, out.xplus.ravel()))) / expected_chisquare
+262        if not silent:
+263            print('chisquare/expected_chisquare:',
+264                  output.chisquare_by_expected_chisquare)
+265
+266    fitp = out.beta
+267    try:
+268        hess = jacobian(jacobian(odr_chisquare))(np.concatenate((fitp, out.xplus.ravel())))
+269    except TypeError:
+270        raise Exception("It is required to use autograd.numpy instead of numpy within fit functions, see the documentation for details.") from None
+271
+272    def odr_chisquare_compact_x(d):
+273        model = func(d[:n_parms], d[n_parms:n_parms + m].reshape(x_shape))
+274        chisq = anp.sum(((y_f - model) / dy_f) ** 2) + anp.sum(((d[n_parms + m:].reshape(x_shape) - d[n_parms:n_parms + m].reshape(x_shape)) / dx_f) ** 2)
+275        return chisq
+276
+277    jac_jac_x = jacobian(jacobian(odr_chisquare_compact_x))(np.concatenate((fitp, out.xplus.ravel(), x_f.ravel())))
+278
+279    # Compute hess^{-1} @ jac_jac_x[:n_parms + m, n_parms + m:] using LAPACK dgesv
+280    try:
+281        deriv_x = -scipy.linalg.solve(hess, jac_jac_x[:n_parms + m, n_parms + m:])
+282    except np.linalg.LinAlgError:
+283        raise Exception("Cannot invert hessian matrix.")
+284
+285    def odr_chisquare_compact_y(d):
+286        model = func(d[:n_parms], d[n_parms:n_parms + m].reshape(x_shape))
+287        chisq = anp.sum(((d[n_parms + m:] - model) / dy_f) ** 2) + anp.sum(((x_f - d[n_parms:n_parms + m].reshape(x_shape)) / dx_f) ** 2)
+288        return chisq
+289
+290    jac_jac_y = jacobian(jacobian(odr_chisquare_compact_y))(np.concatenate((fitp, out.xplus.ravel(), y_f)))
+291
+292    # Compute hess^{-1} @ jac_jac_y[:n_parms + m, n_parms + m:] using LAPACK dgesv
+293    try:
+294        deriv_y = -scipy.linalg.solve(hess, jac_jac_y[:n_parms + m, n_parms + m:])
+295    except np.linalg.LinAlgError:
+296        raise Exception("Cannot invert hessian matrix.")
+297
+298    result = []
+299    for i in range(n_parms):
+300        result.append(derived_observable(lambda my_var, **kwargs: (my_var[0] + np.finfo(np.float64).eps) / (x.ravel()[0].value + np.finfo(np.float64).eps) * out.beta[i], list(x.ravel()) + list(y), man_grad=list(deriv_x[i]) + list(deriv_y[i])))
+301
+302    output.fit_parameters = result
+303
+304    output.odr_chisquare = odr_chisquare(np.concatenate((out.beta, out.xplus.ravel())))
+305    output.dof = x.shape[-1] - n_parms
+306    output.p_value = 1 - scipy.stats.chi2.cdf(output.odr_chisquare, output.dof)
+307
+308    return output
 
@@ -1359,30 +1358,30 @@ has to be calculated (default False).
-
609def fit_lin(x, y, **kwargs):
-610    """Performs a linear fit to y = n + m * x and returns two Obs n, m.
-611
-612    Parameters
-613    ----------
-614    x : list
-615        Can either be a list of floats in which case no xerror is assumed, or
-616        a list of Obs, where the dvalues of the Obs are used as xerror for the fit.
-617    y : list
-618        List of Obs, the dvalues of the Obs are used as yerror for the fit.
-619    """
-620
-621    def f(a, x):
-622        y = a[0] + a[1] * x
-623        return y
-624
-625    if all(isinstance(n, Obs) for n in x):
-626        out = total_least_squares(x, y, f, **kwargs)
-627        return out.fit_parameters
-628    elif all(isinstance(n, float) or isinstance(n, int) for n in x) or isinstance(x, np.ndarray):
-629        out = least_squares(x, y, f, **kwargs)
-630        return out.fit_parameters
-631    else:
-632        raise Exception('Unsupported types for x')
+            
608def fit_lin(x, y, **kwargs):
+609    """Performs a linear fit to y = n + m * x and returns two Obs n, m.
+610
+611    Parameters
+612    ----------
+613    x : list
+614        Can either be a list of floats in which case no xerror is assumed, or
+615        a list of Obs, where the dvalues of the Obs are used as xerror for the fit.
+616    y : list
+617        List of Obs, the dvalues of the Obs are used as yerror for the fit.
+618    """
+619
+620    def f(a, x):
+621        y = a[0] + a[1] * x
+622        return y
+623
+624    if all(isinstance(n, Obs) for n in x):
+625        out = total_least_squares(x, y, f, **kwargs)
+626        return out.fit_parameters
+627    elif all(isinstance(n, float) or isinstance(n, int) for n in x) or isinstance(x, np.ndarray):
+628        out = least_squares(x, y, f, **kwargs)
+629        return out.fit_parameters
+630    else:
+631        raise Exception('Unsupported types for x')
 
@@ -1412,30 +1411,30 @@ List of Obs, the dvalues of the Obs are used as yerror for the fit.
-
635def qqplot(x, o_y, func, p):
-636    """Generates a quantile-quantile plot of the fit result which can be used to
-637       check if the residuals of the fit are gaussian distributed.
-638    """
-639
-640    residuals = []
-641    for i_x, i_y in zip(x, o_y):
-642        residuals.append((i_y - func(p, i_x)) / i_y.dvalue)
-643    residuals = sorted(residuals)
-644    my_y = [o.value for o in residuals]
-645    probplot = scipy.stats.probplot(my_y)
-646    my_x = probplot[0][0]
-647    plt.figure(figsize=(8, 8 / 1.618))
-648    plt.errorbar(my_x, my_y, fmt='o')
-649    fit_start = my_x[0]
-650    fit_stop = my_x[-1]
-651    samples = np.arange(fit_start, fit_stop, 0.01)
-652    plt.plot(samples, samples, 'k--', zorder=11, label='Standard normal distribution')
-653    plt.plot(samples, probplot[1][0] * samples + probplot[1][1], zorder=10, label='Least squares fit, r=' + str(np.around(probplot[1][2], 3)), marker='', ls='-')
-654
-655    plt.xlabel('Theoretical quantiles')
-656    plt.ylabel('Ordered Values')
-657    plt.legend()
-658    plt.draw()
+            
634def qqplot(x, o_y, func, p):
+635    """Generates a quantile-quantile plot of the fit result which can be used to
+636       check if the residuals of the fit are gaussian distributed.
+637    """
+638
+639    residuals = []
+640    for i_x, i_y in zip(x, o_y):
+641        residuals.append((i_y - func(p, i_x)) / i_y.dvalue)
+642    residuals = sorted(residuals)
+643    my_y = [o.value for o in residuals]
+644    probplot = scipy.stats.probplot(my_y)
+645    my_x = probplot[0][0]
+646    plt.figure(figsize=(8, 8 / 1.618))
+647    plt.errorbar(my_x, my_y, fmt='o')
+648    fit_start = my_x[0]
+649    fit_stop = my_x[-1]
+650    samples = np.arange(fit_start, fit_stop, 0.01)
+651    plt.plot(samples, samples, 'k--', zorder=11, label='Standard normal distribution')
+652    plt.plot(samples, probplot[1][0] * samples + probplot[1][1], zorder=10, label='Least squares fit, r=' + str(np.around(probplot[1][2], 3)), marker='', ls='-')
+653
+654    plt.xlabel('Theoretical quantiles')
+655    plt.ylabel('Ordered Values')
+656    plt.legend()
+657    plt.draw()
 
@@ -1456,34 +1455,34 @@ check if the residuals of the fit are gaussian distributed.

-
661def residual_plot(x, y, func, fit_res):
-662    """ Generates a plot which compares the fit to the data and displays the corresponding residuals"""
-663    sorted_x = sorted(x)
-664    xstart = sorted_x[0] - 0.5 * (sorted_x[1] - sorted_x[0])
-665    xstop = sorted_x[-1] + 0.5 * (sorted_x[-1] - sorted_x[-2])
-666    x_samples = np.arange(xstart, xstop + 0.01, 0.01)
-667
-668    plt.figure(figsize=(8, 8 / 1.618))
-669    gs = gridspec.GridSpec(2, 1, height_ratios=[3, 1], wspace=0.0, hspace=0.0)
-670    ax0 = plt.subplot(gs[0])
-671    ax0.errorbar(x, [o.value for o in y], yerr=[o.dvalue for o in y], ls='none', fmt='o', capsize=3, markersize=5, label='Data')
-672    ax0.plot(x_samples, func([o.value for o in fit_res], x_samples), label='Fit', zorder=10, ls='-', ms=0)
-673    ax0.set_xticklabels([])
-674    ax0.set_xlim([xstart, xstop])
-675    ax0.set_xticklabels([])
-676    ax0.legend()
-677
-678    residuals = (np.asarray([o.value for o in y]) - func([o.value for o in fit_res], x)) / np.asarray([o.dvalue for o in y])
-679    ax1 = plt.subplot(gs[1])
-680    ax1.plot(x, residuals, 'ko', ls='none', markersize=5)
-681    ax1.tick_params(direction='out')
-682    ax1.tick_params(axis="x", bottom=True, top=True, labelbottom=True)
-683    ax1.axhline(y=0.0, ls='--', color='k', marker=" ")
-684    ax1.fill_between(x_samples, -1.0, 1.0, alpha=0.1, facecolor='k')
-685    ax1.set_xlim([xstart, xstop])
-686    ax1.set_ylabel('Residuals')
-687    plt.subplots_adjust(wspace=None, hspace=None)
-688    plt.draw()
+            
660def residual_plot(x, y, func, fit_res):
+661    """ Generates a plot which compares the fit to the data and displays the corresponding residuals"""
+662    sorted_x = sorted(x)
+663    xstart = sorted_x[0] - 0.5 * (sorted_x[1] - sorted_x[0])
+664    xstop = sorted_x[-1] + 0.5 * (sorted_x[-1] - sorted_x[-2])
+665    x_samples = np.arange(xstart, xstop + 0.01, 0.01)
+666
+667    plt.figure(figsize=(8, 8 / 1.618))
+668    gs = gridspec.GridSpec(2, 1, height_ratios=[3, 1], wspace=0.0, hspace=0.0)
+669    ax0 = plt.subplot(gs[0])
+670    ax0.errorbar(x, [o.value for o in y], yerr=[o.dvalue for o in y], ls='none', fmt='o', capsize=3, markersize=5, label='Data')
+671    ax0.plot(x_samples, func([o.value for o in fit_res], x_samples), label='Fit', zorder=10, ls='-', ms=0)
+672    ax0.set_xticklabels([])
+673    ax0.set_xlim([xstart, xstop])
+674    ax0.set_xticklabels([])
+675    ax0.legend()
+676
+677    residuals = (np.asarray([o.value for o in y]) - func([o.value for o in fit_res], x)) / np.asarray([o.dvalue for o in y])
+678    ax1 = plt.subplot(gs[1])
+679    ax1.plot(x, residuals, 'ko', ls='none', markersize=5)
+680    ax1.tick_params(direction='out')
+681    ax1.tick_params(axis="x", bottom=True, top=True, labelbottom=True)
+682    ax1.axhline(y=0.0, ls='--', color='k', marker=" ")
+683    ax1.fill_between(x_samples, -1.0, 1.0, alpha=0.1, facecolor='k')
+684    ax1.set_xlim([xstart, xstop])
+685    ax1.set_ylabel('Residuals')
+686    plt.subplots_adjust(wspace=None, hspace=None)
+687    plt.draw()
 
@@ -1503,22 +1502,22 @@ check if the residuals of the fit are gaussian distributed.

-
691def error_band(x, func, beta):
-692    """Returns the error band for an array of sample values x, for given fit function func with optimized parameters beta."""
-693    cov = covariance(beta)
-694    if np.any(np.abs(cov - cov.T) > 1000 * np.finfo(np.float64).eps):
-695        warnings.warn("Covariance matrix is not symmetric within floating point precision", RuntimeWarning)
-696
-697    deriv = []
-698    for i, item in enumerate(x):
-699        deriv.append(np.array(egrad(func)([o.value for o in beta], item)))
-700
-701    err = []
-702    for i, item in enumerate(x):
-703        err.append(np.sqrt(deriv[i] @ cov @ deriv[i]))
-704    err = np.array(err)
-705
-706    return err
+            
690def error_band(x, func, beta):
+691    """Returns the error band for an array of sample values x, for given fit function func with optimized parameters beta."""
+692    cov = covariance(beta)
+693    if np.any(np.abs(cov - cov.T) > 1000 * np.finfo(np.float64).eps):
+694        warnings.warn("Covariance matrix is not symmetric within floating point precision", RuntimeWarning)
+695
+696    deriv = []
+697    for i, item in enumerate(x):
+698        deriv.append(np.array(egrad(func)([o.value for o in beta], item)))
+699
+700    err = []
+701    for i, item in enumerate(x):
+702        err.append(np.sqrt(deriv[i] @ cov @ deriv[i]))
+703    err = np.array(err)
+704
+705    return err
 
@@ -1538,44 +1537,44 @@ check if the residuals of the fit are gaussian distributed.

-
709def ks_test(objects=None):
-710    """Performs a Kolmogorov–Smirnov test for the p-values of all fit object.
-711
-712    Parameters
-713    ----------
-714    objects : list
-715        List of fit results to include in the analysis (optional).
-716    """
-717
-718    if objects is None:
-719        obs_list = []
-720        for obj in gc.get_objects():
-721            if isinstance(obj, Fit_result):
-722                obs_list.append(obj)
-723    else:
-724        obs_list = objects
-725
-726    p_values = [o.p_value for o in obs_list]
-727
-728    bins = len(p_values)
-729    x = np.arange(0, 1.001, 0.001)
-730    plt.plot(x, x, 'k', zorder=1)
-731    plt.xlim(0, 1)
-732    plt.ylim(0, 1)
-733    plt.xlabel('p-value')
-734    plt.ylabel('Cumulative probability')
-735    plt.title(str(bins) + ' p-values')
-736
-737    n = np.arange(1, bins + 1) / np.float64(bins)
-738    Xs = np.sort(p_values)
-739    plt.step(Xs, n)
-740    diffs = n - Xs
-741    loc_max_diff = np.argmax(np.abs(diffs))
-742    loc = Xs[loc_max_diff]
-743    plt.annotate('', xy=(loc, loc), xytext=(loc, loc + diffs[loc_max_diff]), arrowprops=dict(arrowstyle='<->', shrinkA=0, shrinkB=0))
-744    plt.draw()
-745
-746    print(scipy.stats.kstest(p_values, 'uniform'))
+            
708def ks_test(objects=None):
+709    """Performs a Kolmogorov–Smirnov test for the p-values of all fit object.
+710
+711    Parameters
+712    ----------
+713    objects : list
+714        List of fit results to include in the analysis (optional).
+715    """
+716
+717    if objects is None:
+718        obs_list = []
+719        for obj in gc.get_objects():
+720            if isinstance(obj, Fit_result):
+721                obs_list.append(obj)
+722    else:
+723        obs_list = objects
+724
+725    p_values = [o.p_value for o in obs_list]
+726
+727    bins = len(p_values)
+728    x = np.arange(0, 1.001, 0.001)
+729    plt.plot(x, x, 'k', zorder=1)
+730    plt.xlim(0, 1)
+731    plt.ylim(0, 1)
+732    plt.xlabel('p-value')
+733    plt.ylabel('Cumulative probability')
+734    plt.title(str(bins) + ' p-values')
+735
+736    n = np.arange(1, bins + 1) / np.float64(bins)
+737    Xs = np.sort(p_values)
+738    plt.step(Xs, n)
+739    diffs = n - Xs
+740    loc_max_diff = np.argmax(np.abs(diffs))
+741    loc = Xs[loc_max_diff]
+742    plt.annotate('', xy=(loc, loc), xytext=(loc, loc + diffs[loc_max_diff]), arrowprops=dict(arrowstyle='<->', shrinkA=0, shrinkB=0))
+743    plt.draw()
+744
+745    print(scipy.stats.kstest(p_values, 'uniform'))