diff --git a/docs/pyerrors/fits.html b/docs/pyerrors/fits.html index 28f67466..1fbc6cae 100644 --- a/docs/pyerrors/fits.html +++ b/docs/pyerrors/fits.html @@ -348,589 +348,588 @@ 240 241 loc_priors = [] 242 for i_n, i_prior in enumerate(priors): -243 if isinstance(i_prior, Obs): -244 loc_priors.append(i_prior) -245 elif isinstance(i_prior, str): -246 loc_val, loc_dval = _extract_val_and_dval(i_prior) -247 loc_priors.append(cov_Obs(loc_val, loc_dval ** 2, '#prior' + str(i_n) + f"_{np.random.randint(2147483647):010d}")) -248 else: -249 raise TypeError("Prior entries need to be 'Obs' or 'str'.") -250 -251 prior_mask = np.arange(len(priors)) -252 output.priors = loc_priors -253 -254 elif isinstance(priors, dict): -255 loc_priors = [] -256 prior_mask = [] -257 output.priors = {} -258 for pos, prior in priors.items(): -259 if isinstance(pos, int): -260 prior_mask.append(pos) -261 else: -262 raise TypeError("Prior position needs to be an integer.") -263 if isinstance(prior, Obs): -264 loc_priors.append(prior) -265 elif isinstance(prior, str): -266 loc_val, loc_dval = _extract_val_and_dval(prior) -267 loc_priors.append(cov_Obs(loc_val, loc_dval ** 2, '#prior' + str(pos) + f"_{np.random.randint(2147483647):010d}")) -268 else: -269 raise TypeError("Prior entries need to be 'Obs' or 'str'.") -270 output.priors[pos] = loc_priors[-1] -271 if max(prior_mask) >= n_parms: -272 raise ValueError("Prior position out of range.") -273 else: -274 raise TypeError("Unkown type for `priors`.") -275 -276 p_f = [o.value for o in loc_priors] -277 dp_f = [o.dvalue for o in loc_priors] -278 if np.any(np.asarray(dp_f) <= 0.0): -279 raise Exception("No prior errors available, run the gamma method first.") -280 else: -281 p_f = dp_f = np.array([]) -282 prior_mask = [] -283 loc_priors = [] -284 -285 if 'initial_guess' in kwargs: -286 x0 = kwargs.get('initial_guess') -287 if len(x0) != n_parms: -288 raise ValueError('Initial guess does not have the correct length: %d vs. %d' % (len(x0), n_parms)) -289 else: -290 x0 = [0.1] * n_parms -291 -292 if priors is None: -293 def general_chisqfunc_uncorr(p, ivars, pr): -294 model = anp.concatenate([anp.array(funcd[key](p, xd[key])).reshape(-1) for key in key_ls]) -295 return (ivars - model) / dy_f -296 else: -297 def general_chisqfunc_uncorr(p, ivars, pr): -298 model = anp.concatenate([anp.array(funcd[key](p, xd[key])).reshape(-1) for key in key_ls]) -299 return anp.concatenate(((ivars - model) / dy_f, (p[prior_mask] - pr) / dp_f)) -300 -301 def chisqfunc_uncorr(p): -302 return anp.sum(general_chisqfunc_uncorr(p, y_f, p_f) ** 2) +243 loc_priors.append(_construct_prior_obs(i_prior, i_n)) +244 +245 prior_mask = np.arange(len(priors)) +246 output.priors = loc_priors +247 +248 elif isinstance(priors, dict): +249 loc_priors = [] +250 prior_mask = [] +251 output.priors = {} +252 for pos, prior in priors.items(): +253 if isinstance(pos, int): +254 prior_mask.append(pos) +255 else: +256 raise TypeError("Prior position needs to be an integer.") +257 loc_priors.append(_construct_prior_obs(prior, pos)) +258 +259 output.priors[pos] = loc_priors[-1] +260 if max(prior_mask) >= n_parms: +261 raise ValueError("Prior position out of range.") +262 else: +263 raise TypeError("Unkown type for `priors`.") +264 +265 p_f = [o.value for o in loc_priors] +266 dp_f = [o.dvalue for o in loc_priors] +267 if np.any(np.asarray(dp_f) <= 0.0): +268 raise Exception("No prior errors available, run the gamma method first.") +269 else: +270 p_f = dp_f = np.array([]) +271 prior_mask = [] +272 loc_priors = [] +273 +274 if 'initial_guess' in kwargs: +275 x0 = kwargs.get('initial_guess') +276 if len(x0) != n_parms: +277 raise ValueError('Initial guess does not have the correct length: %d vs. %d' % (len(x0), n_parms)) +278 else: +279 x0 = [0.1] * n_parms +280 +281 if priors is None: +282 def general_chisqfunc_uncorr(p, ivars, pr): +283 model = anp.concatenate([anp.array(funcd[key](p, xd[key])).reshape(-1) for key in key_ls]) +284 return (ivars - model) / dy_f +285 else: +286 def general_chisqfunc_uncorr(p, ivars, pr): +287 model = anp.concatenate([anp.array(funcd[key](p, xd[key])).reshape(-1) for key in key_ls]) +288 return anp.concatenate(((ivars - model) / dy_f, (p[prior_mask] - pr) / dp_f)) +289 +290 def chisqfunc_uncorr(p): +291 return anp.sum(general_chisqfunc_uncorr(p, y_f, p_f) ** 2) +292 +293 if kwargs.get('correlated_fit') is True: +294 corr = covariance(y_all, correlation=True, **kwargs) +295 covdiag = np.diag(1 / np.asarray(dy_f)) +296 condn = np.linalg.cond(corr) +297 if condn > 0.1 / np.finfo(float).eps: +298 raise Exception(f"Cannot invert correlation matrix as its condition number exceeds machine precision ({condn:1.2e})") +299 if condn > 1e13: +300 warnings.warn("Correlation matrix may be ill-conditioned, condition number: {%1.2e}" % (condn), RuntimeWarning) +301 chol = np.linalg.cholesky(corr) +302 chol_inv = scipy.linalg.solve_triangular(chol, covdiag, lower=True) 303 -304 if kwargs.get('correlated_fit') is True: -305 corr = covariance(y_all, correlation=True, **kwargs) -306 covdiag = np.diag(1 / np.asarray(dy_f)) -307 condn = np.linalg.cond(corr) -308 if condn > 0.1 / np.finfo(float).eps: -309 raise Exception(f"Cannot invert correlation matrix as its condition number exceeds machine precision ({condn:1.2e})") -310 if condn > 1e13: -311 warnings.warn("Correlation matrix may be ill-conditioned, condition number: {%1.2e}" % (condn), RuntimeWarning) -312 chol = np.linalg.cholesky(corr) -313 chol_inv = scipy.linalg.solve_triangular(chol, covdiag, lower=True) -314 -315 def general_chisqfunc(p, ivars, pr): -316 model = anp.concatenate([anp.array(funcd[key](p, xd[key])).reshape(-1) for key in key_ls]) -317 return anp.concatenate((anp.dot(chol_inv, (ivars - model)), (p[prior_mask] - pr) / dp_f)) -318 -319 def chisqfunc(p): -320 return anp.sum(general_chisqfunc(p, y_f, p_f) ** 2) -321 else: -322 general_chisqfunc = general_chisqfunc_uncorr -323 chisqfunc = chisqfunc_uncorr -324 -325 output.method = kwargs.get('method', 'Levenberg-Marquardt') -326 if not silent: -327 print('Method:', output.method) -328 -329 if output.method != 'Levenberg-Marquardt': -330 if output.method == 'migrad': -331 tolerance = 1e-4 # default value of 1e-1 set by iminuit can be problematic -332 if 'tol' in kwargs: -333 tolerance = kwargs.get('tol') -334 fit_result = iminuit.minimize(chisqfunc_uncorr, x0, tol=tolerance) # Stopping criterion 0.002 * tol * errordef -335 if kwargs.get('correlated_fit') is True: -336 fit_result = iminuit.minimize(chisqfunc, fit_result.x, tol=tolerance) -337 output.iterations = fit_result.nfev -338 else: -339 tolerance = 1e-12 -340 if 'tol' in kwargs: -341 tolerance = kwargs.get('tol') -342 fit_result = scipy.optimize.minimize(chisqfunc_uncorr, x0, method=kwargs.get('method'), tol=tolerance) -343 if kwargs.get('correlated_fit') is True: -344 fit_result = scipy.optimize.minimize(chisqfunc, fit_result.x, method=kwargs.get('method'), tol=tolerance) -345 output.iterations = fit_result.nit -346 -347 chisquare = fit_result.fun -348 -349 else: -350 if 'tol' in kwargs: -351 print('tol cannot be set for Levenberg-Marquardt') +304 def general_chisqfunc(p, ivars, pr): +305 model = anp.concatenate([anp.array(funcd[key](p, xd[key])).reshape(-1) for key in key_ls]) +306 return anp.concatenate((anp.dot(chol_inv, (ivars - model)), (p[prior_mask] - pr) / dp_f)) +307 +308 def chisqfunc(p): +309 return anp.sum(general_chisqfunc(p, y_f, p_f) ** 2) +310 else: +311 general_chisqfunc = general_chisqfunc_uncorr +312 chisqfunc = chisqfunc_uncorr +313 +314 output.method = kwargs.get('method', 'Levenberg-Marquardt') +315 if not silent: +316 print('Method:', output.method) +317 +318 if output.method != 'Levenberg-Marquardt': +319 if output.method == 'migrad': +320 tolerance = 1e-4 # default value of 1e-1 set by iminuit can be problematic +321 if 'tol' in kwargs: +322 tolerance = kwargs.get('tol') +323 fit_result = iminuit.minimize(chisqfunc_uncorr, x0, tol=tolerance) # Stopping criterion 0.002 * tol * errordef +324 if kwargs.get('correlated_fit') is True: +325 fit_result = iminuit.minimize(chisqfunc, fit_result.x, tol=tolerance) +326 output.iterations = fit_result.nfev +327 else: +328 tolerance = 1e-12 +329 if 'tol' in kwargs: +330 tolerance = kwargs.get('tol') +331 fit_result = scipy.optimize.minimize(chisqfunc_uncorr, x0, method=kwargs.get('method'), tol=tolerance) +332 if kwargs.get('correlated_fit') is True: +333 fit_result = scipy.optimize.minimize(chisqfunc, fit_result.x, method=kwargs.get('method'), tol=tolerance) +334 output.iterations = fit_result.nit +335 +336 chisquare = fit_result.fun +337 +338 else: +339 if 'tol' in kwargs: +340 print('tol cannot be set for Levenberg-Marquardt') +341 +342 def chisqfunc_residuals_uncorr(p): +343 return general_chisqfunc_uncorr(p, y_f, p_f) +344 +345 fit_result = scipy.optimize.least_squares(chisqfunc_residuals_uncorr, x0, method='lm', ftol=1e-15, gtol=1e-15, xtol=1e-15) +346 if kwargs.get('correlated_fit') is True: +347 +348 def chisqfunc_residuals(p): +349 return general_chisqfunc(p, y_f, p_f) +350 +351 fit_result = scipy.optimize.least_squares(chisqfunc_residuals, fit_result.x, method='lm', ftol=1e-15, gtol=1e-15, xtol=1e-15) 352 -353 def chisqfunc_residuals_uncorr(p): -354 return general_chisqfunc_uncorr(p, y_f, p_f) +353 chisquare = np.sum(fit_result.fun ** 2) +354 assert np.isclose(chisquare, chisqfunc(fit_result.x), atol=1e-14) 355 -356 fit_result = scipy.optimize.least_squares(chisqfunc_residuals_uncorr, x0, method='lm', ftol=1e-15, gtol=1e-15, xtol=1e-15) -357 if kwargs.get('correlated_fit') is True: -358 -359 def chisqfunc_residuals(p): -360 return general_chisqfunc(p, y_f, p_f) -361 -362 fit_result = scipy.optimize.least_squares(chisqfunc_residuals, fit_result.x, method='lm', ftol=1e-15, gtol=1e-15, xtol=1e-15) -363 -364 chisquare = np.sum(fit_result.fun ** 2) -365 assert np.isclose(chisquare, chisqfunc(fit_result.x), atol=1e-14) -366 -367 output.iterations = fit_result.nfev +356 output.iterations = fit_result.nfev +357 +358 if not fit_result.success: +359 raise Exception('The minimization procedure did not converge.') +360 +361 if x_all.shape[-1] - n_parms > 0: +362 output.chisquare = chisquare +363 output.dof = x_all.shape[-1] - n_parms + len(loc_priors) +364 output.chisquare_by_dof = output.chisquare / output.dof +365 output.p_value = 1 - scipy.stats.chi2.cdf(output.chisquare, output.dof) +366 else: +367 output.chisquare_by_dof = float('nan') 368 -369 if not fit_result.success: -370 raise Exception('The minimization procedure did not converge.') -371 -372 if x_all.shape[-1] - n_parms > 0: -373 output.chisquare = chisquare -374 output.dof = x_all.shape[-1] - n_parms + len(loc_priors) -375 output.chisquare_by_dof = output.chisquare / output.dof -376 output.p_value = 1 - scipy.stats.chi2.cdf(output.chisquare, output.dof) -377 else: -378 output.chisquare_by_dof = float('nan') -379 -380 output.message = fit_result.message -381 if not silent: -382 print(fit_result.message) -383 print('chisquare/d.o.f.:', output.chisquare_by_dof) -384 print('fit parameters', fit_result.x) -385 -386 def prepare_hat_matrix(): -387 hat_vector = [] -388 for key in key_ls: -389 if (len(xd[key]) != 0): -390 hat_vector.append(jacobian(funcd[key])(fit_result.x, xd[key])) -391 hat_vector = [item for sublist in hat_vector for item in sublist] -392 return hat_vector -393 -394 if kwargs.get('expected_chisquare') is True: -395 if kwargs.get('correlated_fit') is not True: -396 W = np.diag(1 / np.asarray(dy_f)) -397 cov = covariance(y_all) -398 hat_vector = prepare_hat_matrix() -399 A = W @ hat_vector -400 P_phi = A @ np.linalg.pinv(A.T @ A) @ A.T -401 expected_chisquare = np.trace((np.identity(x_all.shape[-1]) - P_phi) @ W @ cov @ W) -402 output.chisquare_by_expected_chisquare = output.chisquare / expected_chisquare -403 if not silent: -404 print('chisquare/expected_chisquare:', output.chisquare_by_expected_chisquare) -405 -406 fitp = fit_result.x -407 -408 try: -409 hess = hessian(chisqfunc)(fitp) -410 except TypeError: -411 raise Exception("It is required to use autograd.numpy instead of numpy within fit functions, see the documentation for details.") from None -412 -413 len_y = len(y_f) +369 output.message = fit_result.message +370 if not silent: +371 print(fit_result.message) +372 print('chisquare/d.o.f.:', output.chisquare_by_dof) +373 print('fit parameters', fit_result.x) +374 +375 def prepare_hat_matrix(): +376 hat_vector = [] +377 for key in key_ls: +378 if (len(xd[key]) != 0): +379 hat_vector.append(jacobian(funcd[key])(fit_result.x, xd[key])) +380 hat_vector = [item for sublist in hat_vector for item in sublist] +381 return hat_vector +382 +383 if kwargs.get('expected_chisquare') is True: +384 if kwargs.get('correlated_fit') is not True: +385 W = np.diag(1 / np.asarray(dy_f)) +386 cov = covariance(y_all) +387 hat_vector = prepare_hat_matrix() +388 A = W @ hat_vector +389 P_phi = A @ np.linalg.pinv(A.T @ A) @ A.T +390 expected_chisquare = np.trace((np.identity(x_all.shape[-1]) - P_phi) @ W @ cov @ W) +391 output.chisquare_by_expected_chisquare = output.chisquare / expected_chisquare +392 if not silent: +393 print('chisquare/expected_chisquare:', output.chisquare_by_expected_chisquare) +394 +395 fitp = fit_result.x +396 +397 try: +398 hess = hessian(chisqfunc)(fitp) +399 except TypeError: +400 raise Exception("It is required to use autograd.numpy instead of numpy within fit functions, see the documentation for details.") from None +401 +402 len_y = len(y_f) +403 +404 def chisqfunc_compact(d): +405 return anp.sum(general_chisqfunc(d[:n_parms], d[n_parms: n_parms + len_y], d[n_parms + len_y:]) ** 2) +406 +407 jac_jac_y = hessian(chisqfunc_compact)(np.concatenate((fitp, y_f, p_f))) +408 +409 # Compute hess^{-1} @ jac_jac_y[:n_parms + m, n_parms + m:] using LAPACK dgesv +410 try: +411 deriv_y = -scipy.linalg.solve(hess, jac_jac_y[:n_parms, n_parms:]) +412 except np.linalg.LinAlgError: +413 raise Exception("Cannot invert hessian matrix.") 414 -415 def chisqfunc_compact(d): -416 return anp.sum(general_chisqfunc(d[:n_parms], d[n_parms: n_parms + len_y], d[n_parms + len_y:]) ** 2) -417 -418 jac_jac_y = hessian(chisqfunc_compact)(np.concatenate((fitp, y_f, p_f))) -419 -420 # Compute hess^{-1} @ jac_jac_y[:n_parms + m, n_parms + m:] using LAPACK dgesv -421 try: -422 deriv_y = -scipy.linalg.solve(hess, jac_jac_y[:n_parms, n_parms:]) -423 except np.linalg.LinAlgError: -424 raise Exception("Cannot invert hessian matrix.") -425 -426 result = [] -427 for i in range(n_parms): -428 result.append(derived_observable(lambda x_all, **kwargs: (x_all[0] + np.finfo(np.float64).eps) / (y_all[0].value + np.finfo(np.float64).eps) * fitp[i], list(y_all) + loc_priors, man_grad=list(deriv_y[i]))) -429 -430 output.fit_parameters = result -431 -432 # Hotelling t-squared p-value for correlated fits. -433 if kwargs.get('correlated_fit') is True: -434 n_cov = np.min(np.vectorize(lambda x_all: x_all.N)(y_all)) -435 output.t2_p_value = 1 - scipy.stats.f.cdf((n_cov - output.dof) / (output.dof * (n_cov - 1)) * output.chisquare, -436 output.dof, n_cov - output.dof) +415 result = [] +416 for i in range(n_parms): +417 result.append(derived_observable(lambda x_all, **kwargs: (x_all[0] + np.finfo(np.float64).eps) / (y_all[0].value + np.finfo(np.float64).eps) * fitp[i], list(y_all) + loc_priors, man_grad=list(deriv_y[i]))) +418 +419 output.fit_parameters = result +420 +421 # Hotelling t-squared p-value for correlated fits. +422 if kwargs.get('correlated_fit') is True: +423 n_cov = np.min(np.vectorize(lambda x_all: x_all.N)(y_all)) +424 output.t2_p_value = 1 - scipy.stats.f.cdf((n_cov - output.dof) / (output.dof * (n_cov - 1)) * output.chisquare, +425 output.dof, n_cov - output.dof) +426 +427 if kwargs.get('resplot') is True: +428 for key in key_ls: +429 residual_plot(xd[key], yd[key], funcd[key], result, title=key) +430 +431 if kwargs.get('qqplot') is True: +432 for key in key_ls: +433 qqplot(xd[key], yd[key], funcd[key], result, title=key) +434 +435 return output +436 437 -438 if kwargs.get('resplot') is True: -439 for key in key_ls: -440 residual_plot(xd[key], yd[key], funcd[key], result, title=key) -441 -442 if kwargs.get('qqplot') is True: -443 for key in key_ls: -444 qqplot(xd[key], yd[key], funcd[key], result, title=key) -445 -446 return output -447 -448 -449def total_least_squares(x, y, func, silent=False, **kwargs): -450 r'''Performs a non-linear fit to y = func(x) and returns a list of Obs corresponding to the fit parameters. -451 -452 Parameters -453 ---------- -454 x : list -455 list of Obs, or a tuple of lists of Obs -456 y : list -457 list of Obs. The dvalues of the Obs are used as x- and yerror for the fit. -458 func : object -459 func has to be of the form -460 -461 ```python -462 import autograd.numpy as anp -463 -464 def func(a, x): -465 return a[0] + a[1] * x + a[2] * anp.sinh(x) -466 ``` -467 -468 For multiple x values func can be of the form -469 -470 ```python -471 def func(a, x): -472 (x1, x2) = x -473 return a[0] * x1 ** 2 + a[1] * x2 -474 ``` -475 -476 It is important that all numpy functions refer to autograd.numpy, otherwise the differentiation -477 will not work. -478 silent : bool, optional -479 If true all output to the console is omitted (default False). -480 initial_guess : list -481 can provide an initial guess for the input parameters. Relevant for non-linear -482 fits with many parameters. -483 expected_chisquare : bool -484 If true prints the expected chisquare which is -485 corrected by effects caused by correlated input data. -486 This can take a while as the full correlation matrix -487 has to be calculated (default False). -488 num_grad : bool -489 Use numerical differentation instead of automatic differentiation to perform the error propagation (default False). -490 -491 Notes -492 ----- -493 Based on the orthogonal distance regression module of scipy. -494 -495 Returns -496 ------- -497 output : Fit_result -498 Parameters and information on the fitted result. -499 ''' -500 -501 output = Fit_result() -502 -503 output.fit_function = func +438def total_least_squares(x, y, func, silent=False, **kwargs): +439 r'''Performs a non-linear fit to y = func(x) and returns a list of Obs corresponding to the fit parameters. +440 +441 Parameters +442 ---------- +443 x : list +444 list of Obs, or a tuple of lists of Obs +445 y : list +446 list of Obs. The dvalues of the Obs are used as x- and yerror for the fit. +447 func : object +448 func has to be of the form +449 +450 ```python +451 import autograd.numpy as anp +452 +453 def func(a, x): +454 return a[0] + a[1] * x + a[2] * anp.sinh(x) +455 ``` +456 +457 For multiple x values func can be of the form +458 +459 ```python +460 def func(a, x): +461 (x1, x2) = x +462 return a[0] * x1 ** 2 + a[1] * x2 +463 ``` +464 +465 It is important that all numpy functions refer to autograd.numpy, otherwise the differentiation +466 will not work. +467 silent : bool, optional +468 If true all output to the console is omitted (default False). +469 initial_guess : list +470 can provide an initial guess for the input parameters. Relevant for non-linear +471 fits with many parameters. +472 expected_chisquare : bool +473 If true prints the expected chisquare which is +474 corrected by effects caused by correlated input data. +475 This can take a while as the full correlation matrix +476 has to be calculated (default False). +477 num_grad : bool +478 Use numerical differentation instead of automatic differentiation to perform the error propagation (default False). +479 +480 Notes +481 ----- +482 Based on the orthogonal distance regression module of scipy. +483 +484 Returns +485 ------- +486 output : Fit_result +487 Parameters and information on the fitted result. +488 ''' +489 +490 output = Fit_result() +491 +492 output.fit_function = func +493 +494 x = np.array(x) +495 +496 x_shape = x.shape +497 +498 if kwargs.get('num_grad') is True: +499 jacobian = num_jacobian +500 hessian = num_hessian +501 else: +502 jacobian = auto_jacobian +503 hessian = auto_hessian 504 -505 x = np.array(x) -506 -507 x_shape = x.shape -508 -509 if kwargs.get('num_grad') is True: -510 jacobian = num_jacobian -511 hessian = num_hessian -512 else: -513 jacobian = auto_jacobian -514 hessian = auto_hessian -515 -516 if not callable(func): -517 raise TypeError('func has to be a function.') -518 -519 for i in range(42): -520 try: -521 func(np.arange(i), x.T[0]) -522 except TypeError: -523 continue -524 except IndexError: -525 continue -526 else: -527 break -528 else: -529 raise RuntimeError("Fit function is not valid.") -530 -531 n_parms = i -532 if not silent: -533 print('Fit with', n_parms, 'parameter' + 's' * (n_parms > 1)) +505 if not callable(func): +506 raise TypeError('func has to be a function.') +507 +508 for i in range(42): +509 try: +510 func(np.arange(i), x.T[0]) +511 except TypeError: +512 continue +513 except IndexError: +514 continue +515 else: +516 break +517 else: +518 raise RuntimeError("Fit function is not valid.") +519 +520 n_parms = i +521 if not silent: +522 print('Fit with', n_parms, 'parameter' + 's' * (n_parms > 1)) +523 +524 x_f = np.vectorize(lambda o: o.value)(x) +525 dx_f = np.vectorize(lambda o: o.dvalue)(x) +526 y_f = np.array([o.value for o in y]) +527 dy_f = np.array([o.dvalue for o in y]) +528 +529 if np.any(np.asarray(dx_f) <= 0.0): +530 raise Exception('No x errors available, run the gamma method first.') +531 +532 if np.any(np.asarray(dy_f) <= 0.0): +533 raise Exception('No y errors available, run the gamma method first.') 534 -535 x_f = np.vectorize(lambda o: o.value)(x) -536 dx_f = np.vectorize(lambda o: o.dvalue)(x) -537 y_f = np.array([o.value for o in y]) -538 dy_f = np.array([o.dvalue for o in y]) -539 -540 if np.any(np.asarray(dx_f) <= 0.0): -541 raise Exception('No x errors available, run the gamma method first.') -542 -543 if np.any(np.asarray(dy_f) <= 0.0): -544 raise Exception('No y errors available, run the gamma method first.') -545 -546 if 'initial_guess' in kwargs: -547 x0 = kwargs.get('initial_guess') -548 if len(x0) != n_parms: -549 raise Exception('Initial guess does not have the correct length: %d vs. %d' % (len(x0), n_parms)) -550 else: -551 x0 = [1] * n_parms -552 -553 data = RealData(x_f, y_f, sx=dx_f, sy=dy_f) -554 model = Model(func) -555 odr = ODR(data, model, x0, partol=np.finfo(np.float64).eps) -556 odr.set_job(fit_type=0, deriv=1) -557 out = odr.run() -558 -559 output.residual_variance = out.res_var +535 if 'initial_guess' in kwargs: +536 x0 = kwargs.get('initial_guess') +537 if len(x0) != n_parms: +538 raise Exception('Initial guess does not have the correct length: %d vs. %d' % (len(x0), n_parms)) +539 else: +540 x0 = [1] * n_parms +541 +542 data = RealData(x_f, y_f, sx=dx_f, sy=dy_f) +543 model = Model(func) +544 odr = ODR(data, model, x0, partol=np.finfo(np.float64).eps) +545 odr.set_job(fit_type=0, deriv=1) +546 out = odr.run() +547 +548 output.residual_variance = out.res_var +549 +550 output.method = 'ODR' +551 +552 output.message = out.stopreason +553 +554 output.xplus = out.xplus +555 +556 if not silent: +557 print('Method: ODR') +558 print(*out.stopreason) +559 print('Residual variance:', output.residual_variance) 560 -561 output.method = 'ODR' -562 -563 output.message = out.stopreason -564 -565 output.xplus = out.xplus -566 -567 if not silent: -568 print('Method: ODR') -569 print(*out.stopreason) -570 print('Residual variance:', output.residual_variance) -571 -572 if out.info > 3: -573 raise Exception('The minimization procedure did not converge.') -574 -575 m = x_f.size -576 -577 def odr_chisquare(p): -578 model = func(p[:n_parms], p[n_parms:].reshape(x_shape)) -579 chisq = anp.sum(((y_f - model) / dy_f) ** 2) + anp.sum(((x_f - p[n_parms:].reshape(x_shape)) / dx_f) ** 2) -580 return chisq -581 -582 if kwargs.get('expected_chisquare') is True: -583 W = np.diag(1 / np.asarray(np.concatenate((dy_f.ravel(), dx_f.ravel())))) -584 -585 if kwargs.get('covariance') is not None: -586 cov = kwargs.get('covariance') -587 else: -588 cov = covariance(np.concatenate((y, x.ravel()))) -589 -590 number_of_x_parameters = int(m / x_f.shape[-1]) -591 -592 old_jac = jacobian(func)(out.beta, out.xplus) -593 fused_row1 = np.concatenate((old_jac, np.concatenate((number_of_x_parameters * [np.zeros(old_jac.shape)]), axis=0))) -594 fused_row2 = np.concatenate((jacobian(lambda x, y: func(y, x))(out.xplus, out.beta).reshape(x_f.shape[-1], x_f.shape[-1] * number_of_x_parameters), np.identity(number_of_x_parameters * old_jac.shape[0]))) -595 new_jac = np.concatenate((fused_row1, fused_row2), axis=1) +561 if out.info > 3: +562 raise Exception('The minimization procedure did not converge.') +563 +564 m = x_f.size +565 +566 def odr_chisquare(p): +567 model = func(p[:n_parms], p[n_parms:].reshape(x_shape)) +568 chisq = anp.sum(((y_f - model) / dy_f) ** 2) + anp.sum(((x_f - p[n_parms:].reshape(x_shape)) / dx_f) ** 2) +569 return chisq +570 +571 if kwargs.get('expected_chisquare') is True: +572 W = np.diag(1 / np.asarray(np.concatenate((dy_f.ravel(), dx_f.ravel())))) +573 +574 if kwargs.get('covariance') is not None: +575 cov = kwargs.get('covariance') +576 else: +577 cov = covariance(np.concatenate((y, x.ravel()))) +578 +579 number_of_x_parameters = int(m / x_f.shape[-1]) +580 +581 old_jac = jacobian(func)(out.beta, out.xplus) +582 fused_row1 = np.concatenate((old_jac, np.concatenate((number_of_x_parameters * [np.zeros(old_jac.shape)]), axis=0))) +583 fused_row2 = np.concatenate((jacobian(lambda x, y: func(y, x))(out.xplus, out.beta).reshape(x_f.shape[-1], x_f.shape[-1] * number_of_x_parameters), np.identity(number_of_x_parameters * old_jac.shape[0]))) +584 new_jac = np.concatenate((fused_row1, fused_row2), axis=1) +585 +586 A = W @ new_jac +587 P_phi = A @ np.linalg.pinv(A.T @ A) @ A.T +588 expected_chisquare = np.trace((np.identity(P_phi.shape[0]) - P_phi) @ W @ cov @ W) +589 if expected_chisquare <= 0.0: +590 warnings.warn("Negative expected_chisquare.", RuntimeWarning) +591 expected_chisquare = np.abs(expected_chisquare) +592 output.chisquare_by_expected_chisquare = odr_chisquare(np.concatenate((out.beta, out.xplus.ravel()))) / expected_chisquare +593 if not silent: +594 print('chisquare/expected_chisquare:', +595 output.chisquare_by_expected_chisquare) 596 -597 A = W @ new_jac -598 P_phi = A @ np.linalg.pinv(A.T @ A) @ A.T -599 expected_chisquare = np.trace((np.identity(P_phi.shape[0]) - P_phi) @ W @ cov @ W) -600 if expected_chisquare <= 0.0: -601 warnings.warn("Negative expected_chisquare.", RuntimeWarning) -602 expected_chisquare = np.abs(expected_chisquare) -603 output.chisquare_by_expected_chisquare = odr_chisquare(np.concatenate((out.beta, out.xplus.ravel()))) / expected_chisquare -604 if not silent: -605 print('chisquare/expected_chisquare:', -606 output.chisquare_by_expected_chisquare) +597 fitp = out.beta +598 try: +599 hess = hessian(odr_chisquare)(np.concatenate((fitp, out.xplus.ravel()))) +600 except TypeError: +601 raise Exception("It is required to use autograd.numpy instead of numpy within fit functions, see the documentation for details.") from None +602 +603 def odr_chisquare_compact_x(d): +604 model = func(d[:n_parms], d[n_parms:n_parms + m].reshape(x_shape)) +605 chisq = anp.sum(((y_f - model) / dy_f) ** 2) + anp.sum(((d[n_parms + m:].reshape(x_shape) - d[n_parms:n_parms + m].reshape(x_shape)) / dx_f) ** 2) +606 return chisq 607 -608 fitp = out.beta -609 try: -610 hess = hessian(odr_chisquare)(np.concatenate((fitp, out.xplus.ravel()))) -611 except TypeError: -612 raise Exception("It is required to use autograd.numpy instead of numpy within fit functions, see the documentation for details.") from None -613 -614 def odr_chisquare_compact_x(d): -615 model = func(d[:n_parms], d[n_parms:n_parms + m].reshape(x_shape)) -616 chisq = anp.sum(((y_f - model) / dy_f) ** 2) + anp.sum(((d[n_parms + m:].reshape(x_shape) - d[n_parms:n_parms + m].reshape(x_shape)) / dx_f) ** 2) -617 return chisq -618 -619 jac_jac_x = hessian(odr_chisquare_compact_x)(np.concatenate((fitp, out.xplus.ravel(), x_f.ravel()))) +608 jac_jac_x = hessian(odr_chisquare_compact_x)(np.concatenate((fitp, out.xplus.ravel(), x_f.ravel()))) +609 +610 # Compute hess^{-1} @ jac_jac_x[:n_parms + m, n_parms + m:] using LAPACK dgesv +611 try: +612 deriv_x = -scipy.linalg.solve(hess, jac_jac_x[:n_parms + m, n_parms + m:]) +613 except np.linalg.LinAlgError: +614 raise Exception("Cannot invert hessian matrix.") +615 +616 def odr_chisquare_compact_y(d): +617 model = func(d[:n_parms], d[n_parms:n_parms + m].reshape(x_shape)) +618 chisq = anp.sum(((d[n_parms + m:] - model) / dy_f) ** 2) + anp.sum(((x_f - d[n_parms:n_parms + m].reshape(x_shape)) / dx_f) ** 2) +619 return chisq 620 -621 # Compute hess^{-1} @ jac_jac_x[:n_parms + m, n_parms + m:] using LAPACK dgesv -622 try: -623 deriv_x = -scipy.linalg.solve(hess, jac_jac_x[:n_parms + m, n_parms + m:]) -624 except np.linalg.LinAlgError: -625 raise Exception("Cannot invert hessian matrix.") -626 -627 def odr_chisquare_compact_y(d): -628 model = func(d[:n_parms], d[n_parms:n_parms + m].reshape(x_shape)) -629 chisq = anp.sum(((d[n_parms + m:] - model) / dy_f) ** 2) + anp.sum(((x_f - d[n_parms:n_parms + m].reshape(x_shape)) / dx_f) ** 2) -630 return chisq -631 -632 jac_jac_y = hessian(odr_chisquare_compact_y)(np.concatenate((fitp, out.xplus.ravel(), y_f))) -633 -634 # Compute hess^{-1} @ jac_jac_y[:n_parms + m, n_parms + m:] using LAPACK dgesv -635 try: -636 deriv_y = -scipy.linalg.solve(hess, jac_jac_y[:n_parms + m, n_parms + m:]) -637 except np.linalg.LinAlgError: -638 raise Exception("Cannot invert hessian matrix.") -639 -640 result = [] -641 for i in range(n_parms): -642 result.append(derived_observable(lambda my_var, **kwargs: (my_var[0] + np.finfo(np.float64).eps) / (x.ravel()[0].value + np.finfo(np.float64).eps) * out.beta[i], list(x.ravel()) + list(y), man_grad=list(deriv_x[i]) + list(deriv_y[i]))) -643 -644 output.fit_parameters = result -645 -646 output.odr_chisquare = odr_chisquare(np.concatenate((out.beta, out.xplus.ravel()))) -647 output.dof = x.shape[-1] - n_parms -648 output.p_value = 1 - scipy.stats.chi2.cdf(output.odr_chisquare, output.dof) -649 -650 return output -651 +621 jac_jac_y = hessian(odr_chisquare_compact_y)(np.concatenate((fitp, out.xplus.ravel(), y_f))) +622 +623 # Compute hess^{-1} @ jac_jac_y[:n_parms + m, n_parms + m:] using LAPACK dgesv +624 try: +625 deriv_y = -scipy.linalg.solve(hess, jac_jac_y[:n_parms + m, n_parms + m:]) +626 except np.linalg.LinAlgError: +627 raise Exception("Cannot invert hessian matrix.") +628 +629 result = [] +630 for i in range(n_parms): +631 result.append(derived_observable(lambda my_var, **kwargs: (my_var[0] + np.finfo(np.float64).eps) / (x.ravel()[0].value + np.finfo(np.float64).eps) * out.beta[i], list(x.ravel()) + list(y), man_grad=list(deriv_x[i]) + list(deriv_y[i]))) +632 +633 output.fit_parameters = result +634 +635 output.odr_chisquare = odr_chisquare(np.concatenate((out.beta, out.xplus.ravel()))) +636 output.dof = x.shape[-1] - n_parms +637 output.p_value = 1 - scipy.stats.chi2.cdf(output.odr_chisquare, output.dof) +638 +639 return output +640 +641 +642def fit_lin(x, y, **kwargs): +643 """Performs a linear fit to y = n + m * x and returns two Obs n, m. +644 +645 Parameters +646 ---------- +647 x : list +648 Can either be a list of floats in which case no xerror is assumed, or +649 a list of Obs, where the dvalues of the Obs are used as xerror for the fit. +650 y : list +651 List of Obs, the dvalues of the Obs are used as yerror for the fit. 652 -653def fit_lin(x, y, **kwargs): -654 """Performs a linear fit to y = n + m * x and returns two Obs n, m. -655 -656 Parameters -657 ---------- -658 x : list -659 Can either be a list of floats in which case no xerror is assumed, or -660 a list of Obs, where the dvalues of the Obs are used as xerror for the fit. -661 y : list -662 List of Obs, the dvalues of the Obs are used as yerror for the fit. -663 -664 Returns -665 ------- -666 fit_parameters : list[Obs] -667 LIist of fitted observables. -668 """ -669 -670 def f(a, x): -671 y = a[0] + a[1] * x -672 return y -673 -674 if all(isinstance(n, Obs) for n in x): -675 out = total_least_squares(x, y, f, **kwargs) -676 return out.fit_parameters -677 elif all(isinstance(n, float) or isinstance(n, int) for n in x) or isinstance(x, np.ndarray): -678 out = least_squares(x, y, f, **kwargs) -679 return out.fit_parameters -680 else: -681 raise TypeError('Unsupported types for x') -682 -683 -684def qqplot(x, o_y, func, p, title=""): -685 """Generates a quantile-quantile plot of the fit result which can be used to -686 check if the residuals of the fit are gaussian distributed. -687 -688 Returns -689 ------- -690 None -691 """ -692 -693 residuals = [] -694 for i_x, i_y in zip(x, o_y): -695 residuals.append((i_y - func(p, i_x)) / i_y.dvalue) -696 residuals = sorted(residuals) -697 my_y = [o.value for o in residuals] -698 probplot = scipy.stats.probplot(my_y) -699 my_x = probplot[0][0] -700 plt.figure(figsize=(8, 8 / 1.618)) -701 plt.errorbar(my_x, my_y, fmt='o') -702 fit_start = my_x[0] -703 fit_stop = my_x[-1] -704 samples = np.arange(fit_start, fit_stop, 0.01) -705 plt.plot(samples, samples, 'k--', zorder=11, label='Standard normal distribution') -706 plt.plot(samples, probplot[1][0] * samples + probplot[1][1], zorder=10, label='Least squares fit, r=' + str(np.around(probplot[1][2], 3)), marker='', ls='-') +653 Returns +654 ------- +655 fit_parameters : list[Obs] +656 LIist of fitted observables. +657 """ +658 +659 def f(a, x): +660 y = a[0] + a[1] * x +661 return y +662 +663 if all(isinstance(n, Obs) for n in x): +664 out = total_least_squares(x, y, f, **kwargs) +665 return out.fit_parameters +666 elif all(isinstance(n, float) or isinstance(n, int) for n in x) or isinstance(x, np.ndarray): +667 out = least_squares(x, y, f, **kwargs) +668 return out.fit_parameters +669 else: +670 raise TypeError('Unsupported types for x') +671 +672 +673def qqplot(x, o_y, func, p, title=""): +674 """Generates a quantile-quantile plot of the fit result which can be used to +675 check if the residuals of the fit are gaussian distributed. +676 +677 Returns +678 ------- +679 None +680 """ +681 +682 residuals = [] +683 for i_x, i_y in zip(x, o_y): +684 residuals.append((i_y - func(p, i_x)) / i_y.dvalue) +685 residuals = sorted(residuals) +686 my_y = [o.value for o in residuals] +687 probplot = scipy.stats.probplot(my_y) +688 my_x = probplot[0][0] +689 plt.figure(figsize=(8, 8 / 1.618)) +690 plt.errorbar(my_x, my_y, fmt='o') +691 fit_start = my_x[0] +692 fit_stop = my_x[-1] +693 samples = np.arange(fit_start, fit_stop, 0.01) +694 plt.plot(samples, samples, 'k--', zorder=11, label='Standard normal distribution') +695 plt.plot(samples, probplot[1][0] * samples + probplot[1][1], zorder=10, label='Least squares fit, r=' + str(np.around(probplot[1][2], 3)), marker='', ls='-') +696 +697 plt.xlabel('Theoretical quantiles') +698 plt.ylabel('Ordered Values') +699 plt.legend(title=title) +700 plt.draw() +701 +702 +703def residual_plot(x, y, func, fit_res, title=""): +704 """Generates a plot which compares the fit to the data and displays the corresponding residuals +705 +706 For uncorrelated data the residuals are expected to be distributed ~N(0,1). 707 -708 plt.xlabel('Theoretical quantiles') -709 plt.ylabel('Ordered Values') -710 plt.legend(title=title) -711 plt.draw() -712 -713 -714def residual_plot(x, y, func, fit_res, title=""): -715 """Generates a plot which compares the fit to the data and displays the corresponding residuals +708 Returns +709 ------- +710 None +711 """ +712 sorted_x = sorted(x) +713 xstart = sorted_x[0] - 0.5 * (sorted_x[1] - sorted_x[0]) +714 xstop = sorted_x[-1] + 0.5 * (sorted_x[-1] - sorted_x[-2]) +715 x_samples = np.arange(xstart, xstop + 0.01, 0.01) 716 -717 For uncorrelated data the residuals are expected to be distributed ~N(0,1). -718 -719 Returns -720 ------- -721 None -722 """ -723 sorted_x = sorted(x) -724 xstart = sorted_x[0] - 0.5 * (sorted_x[1] - sorted_x[0]) -725 xstop = sorted_x[-1] + 0.5 * (sorted_x[-1] - sorted_x[-2]) -726 x_samples = np.arange(xstart, xstop + 0.01, 0.01) -727 -728 plt.figure(figsize=(8, 8 / 1.618)) -729 gs = gridspec.GridSpec(2, 1, height_ratios=[3, 1], wspace=0.0, hspace=0.0) -730 ax0 = plt.subplot(gs[0]) -731 ax0.errorbar(x, [o.value for o in y], yerr=[o.dvalue for o in y], ls='none', fmt='o', capsize=3, markersize=5, label='Data') -732 ax0.plot(x_samples, func([o.value for o in fit_res], x_samples), label='Fit', zorder=10, ls='-', ms=0) -733 ax0.set_xticklabels([]) -734 ax0.set_xlim([xstart, xstop]) -735 ax0.set_xticklabels([]) -736 ax0.legend(title=title) -737 -738 residuals = (np.asarray([o.value for o in y]) - func([o.value for o in fit_res], np.asarray(x))) / np.asarray([o.dvalue for o in y]) -739 ax1 = plt.subplot(gs[1]) -740 ax1.plot(x, residuals, 'ko', ls='none', markersize=5) -741 ax1.tick_params(direction='out') -742 ax1.tick_params(axis="x", bottom=True, top=True, labelbottom=True) -743 ax1.axhline(y=0.0, ls='--', color='k', marker=" ") -744 ax1.fill_between(x_samples, -1.0, 1.0, alpha=0.1, facecolor='k') -745 ax1.set_xlim([xstart, xstop]) -746 ax1.set_ylabel('Residuals') -747 plt.subplots_adjust(wspace=None, hspace=None) -748 plt.draw() -749 -750 -751def error_band(x, func, beta): -752 """Calculate the error band for an array of sample values x, for given fit function func with optimized parameters beta. -753 -754 Returns -755 ------- -756 err : np.array(Obs) -757 Error band for an array of sample values x -758 """ -759 cov = covariance(beta) -760 if np.any(np.abs(cov - cov.T) > 1000 * np.finfo(np.float64).eps): -761 warnings.warn("Covariance matrix is not symmetric within floating point precision", RuntimeWarning) +717 plt.figure(figsize=(8, 8 / 1.618)) +718 gs = gridspec.GridSpec(2, 1, height_ratios=[3, 1], wspace=0.0, hspace=0.0) +719 ax0 = plt.subplot(gs[0]) +720 ax0.errorbar(x, [o.value for o in y], yerr=[o.dvalue for o in y], ls='none', fmt='o', capsize=3, markersize=5, label='Data') +721 ax0.plot(x_samples, func([o.value for o in fit_res], x_samples), label='Fit', zorder=10, ls='-', ms=0) +722 ax0.set_xticklabels([]) +723 ax0.set_xlim([xstart, xstop]) +724 ax0.set_xticklabels([]) +725 ax0.legend(title=title) +726 +727 residuals = (np.asarray([o.value for o in y]) - func([o.value for o in fit_res], np.asarray(x))) / np.asarray([o.dvalue for o in y]) +728 ax1 = plt.subplot(gs[1]) +729 ax1.plot(x, residuals, 'ko', ls='none', markersize=5) +730 ax1.tick_params(direction='out') +731 ax1.tick_params(axis="x", bottom=True, top=True, labelbottom=True) +732 ax1.axhline(y=0.0, ls='--', color='k', marker=" ") +733 ax1.fill_between(x_samples, -1.0, 1.0, alpha=0.1, facecolor='k') +734 ax1.set_xlim([xstart, xstop]) +735 ax1.set_ylabel('Residuals') +736 plt.subplots_adjust(wspace=None, hspace=None) +737 plt.draw() +738 +739 +740def error_band(x, func, beta): +741 """Calculate the error band for an array of sample values x, for given fit function func with optimized parameters beta. +742 +743 Returns +744 ------- +745 err : np.array(Obs) +746 Error band for an array of sample values x +747 """ +748 cov = covariance(beta) +749 if np.any(np.abs(cov - cov.T) > 1000 * np.finfo(np.float64).eps): +750 warnings.warn("Covariance matrix is not symmetric within floating point precision", RuntimeWarning) +751 +752 deriv = [] +753 for i, item in enumerate(x): +754 deriv.append(np.array(egrad(func)([o.value for o in beta], item))) +755 +756 err = [] +757 for i, item in enumerate(x): +758 err.append(np.sqrt(deriv[i] @ cov @ deriv[i])) +759 err = np.array(err) +760 +761 return err 762 -763 deriv = [] -764 for i, item in enumerate(x): -765 deriv.append(np.array(egrad(func)([o.value for o in beta], item))) +763 +764def ks_test(objects=None): +765 """Performs a Kolmogorov–Smirnov test for the p-values of all fit object. 766 -767 err = [] -768 for i, item in enumerate(x): -769 err.append(np.sqrt(deriv[i] @ cov @ deriv[i])) -770 err = np.array(err) +767 Parameters +768 ---------- +769 objects : list +770 List of fit results to include in the analysis (optional). 771 -772 return err -773 -774 -775def ks_test(objects=None): -776 """Performs a Kolmogorov–Smirnov test for the p-values of all fit object. -777 -778 Parameters -779 ---------- -780 objects : list -781 List of fit results to include in the analysis (optional). -782 -783 Returns -784 ------- -785 None -786 """ -787 -788 if objects is None: -789 obs_list = [] -790 for obj in gc.get_objects(): -791 if isinstance(obj, Fit_result): -792 obs_list.append(obj) -793 else: -794 obs_list = objects +772 Returns +773 ------- +774 None +775 """ +776 +777 if objects is None: +778 obs_list = [] +779 for obj in gc.get_objects(): +780 if isinstance(obj, Fit_result): +781 obs_list.append(obj) +782 else: +783 obs_list = objects +784 +785 p_values = [o.p_value for o in obs_list] +786 +787 bins = len(p_values) +788 x = np.arange(0, 1.001, 0.001) +789 plt.plot(x, x, 'k', zorder=1) +790 plt.xlim(0, 1) +791 plt.ylim(0, 1) +792 plt.xlabel('p-value') +793 plt.ylabel('Cumulative probability') +794 plt.title(str(bins) + ' p-values') 795 -796 p_values = [o.p_value for o in obs_list] -797 -798 bins = len(p_values) -799 x = np.arange(0, 1.001, 0.001) -800 plt.plot(x, x, 'k', zorder=1) -801 plt.xlim(0, 1) -802 plt.ylim(0, 1) -803 plt.xlabel('p-value') -804 plt.ylabel('Cumulative probability') -805 plt.title(str(bins) + ' p-values') +796 n = np.arange(1, bins + 1) / np.float64(bins) +797 Xs = np.sort(p_values) +798 plt.step(Xs, n) +799 diffs = n - Xs +800 loc_max_diff = np.argmax(np.abs(diffs)) +801 loc = Xs[loc_max_diff] +802 plt.annotate('', xy=(loc, loc), xytext=(loc, loc + diffs[loc_max_diff]), arrowprops=dict(arrowstyle='<->', shrinkA=0, shrinkB=0)) +803 plt.draw() +804 +805 print(scipy.stats.kstest(p_values, 'uniform')) 806 -807 n = np.arange(1, bins + 1) / np.float64(bins) -808 Xs = np.sort(p_values) -809 plt.step(Xs, n) -810 diffs = n - Xs -811 loc_max_diff = np.argmax(np.abs(diffs)) -812 loc = Xs[loc_max_diff] -813 plt.annotate('', xy=(loc, loc), xytext=(loc, loc + diffs[loc_max_diff]), arrowprops=dict(arrowstyle='<->', shrinkA=0, shrinkB=0)) -814 plt.draw() +807 +808def _extract_val_and_dval(string): +809 split_string = string.split('(') +810 if '.' in split_string[0] and '.' not in split_string[1][:-1]: +811 factor = 10 ** -len(split_string[0].partition('.')[2]) +812 else: +813 factor = 1 +814 return float(split_string[0]), float(split_string[1][:-1]) * factor 815 -816 print(scipy.stats.kstest(p_values, 'uniform')) -817 -818 -819def _extract_val_and_dval(string): -820 split_string = string.split('(') -821 if '.' in split_string[0] and '.' not in split_string[1][:-1]: -822 factor = 10 ** -len(split_string[0].partition('.')[2]) +816 +817def _construct_prior_obs(i_prior, i_n): +818 if isinstance(i_prior, Obs): +819 return i_prior +820 elif isinstance(i_prior, str): +821 loc_val, loc_dval = _extract_val_and_dval(i_prior) +822 return cov_Obs(loc_val, loc_dval ** 2, '#prior' + str(i_n) + f"_{np.random.randint(2147483647):010d}") 823 else: -824 factor = 1 -825 return float(split_string[0]), float(split_string[1][:-1]) * factor +824 raise TypeError("Prior entries need to be 'Obs' or 'str'.") @@ -1254,210 +1253,199 @@ Hotelling t-squared p-value for correlated fits. 241 242 loc_priors = [] 243 for i_n, i_prior in enumerate(priors): -244 if isinstance(i_prior, Obs): -245 loc_priors.append(i_prior) -246 elif isinstance(i_prior, str): -247 loc_val, loc_dval = _extract_val_and_dval(i_prior) -248 loc_priors.append(cov_Obs(loc_val, loc_dval ** 2, '#prior' + str(i_n) + f"_{np.random.randint(2147483647):010d}")) -249 else: -250 raise TypeError("Prior entries need to be 'Obs' or 'str'.") -251 -252 prior_mask = np.arange(len(priors)) -253 output.priors = loc_priors -254 -255 elif isinstance(priors, dict): -256 loc_priors = [] -257 prior_mask = [] -258 output.priors = {} -259 for pos, prior in priors.items(): -260 if isinstance(pos, int): -261 prior_mask.append(pos) -262 else: -263 raise TypeError("Prior position needs to be an integer.") -264 if isinstance(prior, Obs): -265 loc_priors.append(prior) -266 elif isinstance(prior, str): -267 loc_val, loc_dval = _extract_val_and_dval(prior) -268 loc_priors.append(cov_Obs(loc_val, loc_dval ** 2, '#prior' + str(pos) + f"_{np.random.randint(2147483647):010d}")) -269 else: -270 raise TypeError("Prior entries need to be 'Obs' or 'str'.") -271 output.priors[pos] = loc_priors[-1] -272 if max(prior_mask) >= n_parms: -273 raise ValueError("Prior position out of range.") -274 else: -275 raise TypeError("Unkown type for `priors`.") -276 -277 p_f = [o.value for o in loc_priors] -278 dp_f = [o.dvalue for o in loc_priors] -279 if np.any(np.asarray(dp_f) <= 0.0): -280 raise Exception("No prior errors available, run the gamma method first.") -281 else: -282 p_f = dp_f = np.array([]) -283 prior_mask = [] -284 loc_priors = [] -285 -286 if 'initial_guess' in kwargs: -287 x0 = kwargs.get('initial_guess') -288 if len(x0) != n_parms: -289 raise ValueError('Initial guess does not have the correct length: %d vs. %d' % (len(x0), n_parms)) -290 else: -291 x0 = [0.1] * n_parms -292 -293 if priors is None: -294 def general_chisqfunc_uncorr(p, ivars, pr): -295 model = anp.concatenate([anp.array(funcd[key](p, xd[key])).reshape(-1) for key in key_ls]) -296 return (ivars - model) / dy_f -297 else: -298 def general_chisqfunc_uncorr(p, ivars, pr): -299 model = anp.concatenate([anp.array(funcd[key](p, xd[key])).reshape(-1) for key in key_ls]) -300 return anp.concatenate(((ivars - model) / dy_f, (p[prior_mask] - pr) / dp_f)) -301 -302 def chisqfunc_uncorr(p): -303 return anp.sum(general_chisqfunc_uncorr(p, y_f, p_f) ** 2) +244 loc_priors.append(_construct_prior_obs(i_prior, i_n)) +245 +246 prior_mask = np.arange(len(priors)) +247 output.priors = loc_priors +248 +249 elif isinstance(priors, dict): +250 loc_priors = [] +251 prior_mask = [] +252 output.priors = {} +253 for pos, prior in priors.items(): +254 if isinstance(pos, int): +255 prior_mask.append(pos) +256 else: +257 raise TypeError("Prior position needs to be an integer.") +258 loc_priors.append(_construct_prior_obs(prior, pos)) +259 +260 output.priors[pos] = loc_priors[-1] +261 if max(prior_mask) >= n_parms: +262 raise ValueError("Prior position out of range.") +263 else: +264 raise TypeError("Unkown type for `priors`.") +265 +266 p_f = [o.value for o in loc_priors] +267 dp_f = [o.dvalue for o in loc_priors] +268 if np.any(np.asarray(dp_f) <= 0.0): +269 raise Exception("No prior errors available, run the gamma method first.") +270 else: +271 p_f = dp_f = np.array([]) +272 prior_mask = [] +273 loc_priors = [] +274 +275 if 'initial_guess' in kwargs: +276 x0 = kwargs.get('initial_guess') +277 if len(x0) != n_parms: +278 raise ValueError('Initial guess does not have the correct length: %d vs. %d' % (len(x0), n_parms)) +279 else: +280 x0 = [0.1] * n_parms +281 +282 if priors is None: +283 def general_chisqfunc_uncorr(p, ivars, pr): +284 model = anp.concatenate([anp.array(funcd[key](p, xd[key])).reshape(-1) for key in key_ls]) +285 return (ivars - model) / dy_f +286 else: +287 def general_chisqfunc_uncorr(p, ivars, pr): +288 model = anp.concatenate([anp.array(funcd[key](p, xd[key])).reshape(-1) for key in key_ls]) +289 return anp.concatenate(((ivars - model) / dy_f, (p[prior_mask] - pr) / dp_f)) +290 +291 def chisqfunc_uncorr(p): +292 return anp.sum(general_chisqfunc_uncorr(p, y_f, p_f) ** 2) +293 +294 if kwargs.get('correlated_fit') is True: +295 corr = covariance(y_all, correlation=True, **kwargs) +296 covdiag = np.diag(1 / np.asarray(dy_f)) +297 condn = np.linalg.cond(corr) +298 if condn > 0.1 / np.finfo(float).eps: +299 raise Exception(f"Cannot invert correlation matrix as its condition number exceeds machine precision ({condn:1.2e})") +300 if condn > 1e13: +301 warnings.warn("Correlation matrix may be ill-conditioned, condition number: {%1.2e}" % (condn), RuntimeWarning) +302 chol = np.linalg.cholesky(corr) +303 chol_inv = scipy.linalg.solve_triangular(chol, covdiag, lower=True) 304 -305 if kwargs.get('correlated_fit') is True: -306 corr = covariance(y_all, correlation=True, **kwargs) -307 covdiag = np.diag(1 / np.asarray(dy_f)) -308 condn = np.linalg.cond(corr) -309 if condn > 0.1 / np.finfo(float).eps: -310 raise Exception(f"Cannot invert correlation matrix as its condition number exceeds machine precision ({condn:1.2e})") -311 if condn > 1e13: -312 warnings.warn("Correlation matrix may be ill-conditioned, condition number: {%1.2e}" % (condn), RuntimeWarning) -313 chol = np.linalg.cholesky(corr) -314 chol_inv = scipy.linalg.solve_triangular(chol, covdiag, lower=True) -315 -316 def general_chisqfunc(p, ivars, pr): -317 model = anp.concatenate([anp.array(funcd[key](p, xd[key])).reshape(-1) for key in key_ls]) -318 return anp.concatenate((anp.dot(chol_inv, (ivars - model)), (p[prior_mask] - pr) / dp_f)) -319 -320 def chisqfunc(p): -321 return anp.sum(general_chisqfunc(p, y_f, p_f) ** 2) -322 else: -323 general_chisqfunc = general_chisqfunc_uncorr -324 chisqfunc = chisqfunc_uncorr -325 -326 output.method = kwargs.get('method', 'Levenberg-Marquardt') -327 if not silent: -328 print('Method:', output.method) -329 -330 if output.method != 'Levenberg-Marquardt': -331 if output.method == 'migrad': -332 tolerance = 1e-4 # default value of 1e-1 set by iminuit can be problematic -333 if 'tol' in kwargs: -334 tolerance = kwargs.get('tol') -335 fit_result = iminuit.minimize(chisqfunc_uncorr, x0, tol=tolerance) # Stopping criterion 0.002 * tol * errordef -336 if kwargs.get('correlated_fit') is True: -337 fit_result = iminuit.minimize(chisqfunc, fit_result.x, tol=tolerance) -338 output.iterations = fit_result.nfev -339 else: -340 tolerance = 1e-12 -341 if 'tol' in kwargs: -342 tolerance = kwargs.get('tol') -343 fit_result = scipy.optimize.minimize(chisqfunc_uncorr, x0, method=kwargs.get('method'), tol=tolerance) -344 if kwargs.get('correlated_fit') is True: -345 fit_result = scipy.optimize.minimize(chisqfunc, fit_result.x, method=kwargs.get('method'), tol=tolerance) -346 output.iterations = fit_result.nit -347 -348 chisquare = fit_result.fun -349 -350 else: -351 if 'tol' in kwargs: -352 print('tol cannot be set for Levenberg-Marquardt') +305 def general_chisqfunc(p, ivars, pr): +306 model = anp.concatenate([anp.array(funcd[key](p, xd[key])).reshape(-1) for key in key_ls]) +307 return anp.concatenate((anp.dot(chol_inv, (ivars - model)), (p[prior_mask] - pr) / dp_f)) +308 +309 def chisqfunc(p): +310 return anp.sum(general_chisqfunc(p, y_f, p_f) ** 2) +311 else: +312 general_chisqfunc = general_chisqfunc_uncorr +313 chisqfunc = chisqfunc_uncorr +314 +315 output.method = kwargs.get('method', 'Levenberg-Marquardt') +316 if not silent: +317 print('Method:', output.method) +318 +319 if output.method != 'Levenberg-Marquardt': +320 if output.method == 'migrad': +321 tolerance = 1e-4 # default value of 1e-1 set by iminuit can be problematic +322 if 'tol' in kwargs: +323 tolerance = kwargs.get('tol') +324 fit_result = iminuit.minimize(chisqfunc_uncorr, x0, tol=tolerance) # Stopping criterion 0.002 * tol * errordef +325 if kwargs.get('correlated_fit') is True: +326 fit_result = iminuit.minimize(chisqfunc, fit_result.x, tol=tolerance) +327 output.iterations = fit_result.nfev +328 else: +329 tolerance = 1e-12 +330 if 'tol' in kwargs: +331 tolerance = kwargs.get('tol') +332 fit_result = scipy.optimize.minimize(chisqfunc_uncorr, x0, method=kwargs.get('method'), tol=tolerance) +333 if kwargs.get('correlated_fit') is True: +334 fit_result = scipy.optimize.minimize(chisqfunc, fit_result.x, method=kwargs.get('method'), tol=tolerance) +335 output.iterations = fit_result.nit +336 +337 chisquare = fit_result.fun +338 +339 else: +340 if 'tol' in kwargs: +341 print('tol cannot be set for Levenberg-Marquardt') +342 +343 def chisqfunc_residuals_uncorr(p): +344 return general_chisqfunc_uncorr(p, y_f, p_f) +345 +346 fit_result = scipy.optimize.least_squares(chisqfunc_residuals_uncorr, x0, method='lm', ftol=1e-15, gtol=1e-15, xtol=1e-15) +347 if kwargs.get('correlated_fit') is True: +348 +349 def chisqfunc_residuals(p): +350 return general_chisqfunc(p, y_f, p_f) +351 +352 fit_result = scipy.optimize.least_squares(chisqfunc_residuals, fit_result.x, method='lm', ftol=1e-15, gtol=1e-15, xtol=1e-15) 353 -354 def chisqfunc_residuals_uncorr(p): -355 return general_chisqfunc_uncorr(p, y_f, p_f) +354 chisquare = np.sum(fit_result.fun ** 2) +355 assert np.isclose(chisquare, chisqfunc(fit_result.x), atol=1e-14) 356 -357 fit_result = scipy.optimize.least_squares(chisqfunc_residuals_uncorr, x0, method='lm', ftol=1e-15, gtol=1e-15, xtol=1e-15) -358 if kwargs.get('correlated_fit') is True: -359 -360 def chisqfunc_residuals(p): -361 return general_chisqfunc(p, y_f, p_f) -362 -363 fit_result = scipy.optimize.least_squares(chisqfunc_residuals, fit_result.x, method='lm', ftol=1e-15, gtol=1e-15, xtol=1e-15) -364 -365 chisquare = np.sum(fit_result.fun ** 2) -366 assert np.isclose(chisquare, chisqfunc(fit_result.x), atol=1e-14) -367 -368 output.iterations = fit_result.nfev +357 output.iterations = fit_result.nfev +358 +359 if not fit_result.success: +360 raise Exception('The minimization procedure did not converge.') +361 +362 if x_all.shape[-1] - n_parms > 0: +363 output.chisquare = chisquare +364 output.dof = x_all.shape[-1] - n_parms + len(loc_priors) +365 output.chisquare_by_dof = output.chisquare / output.dof +366 output.p_value = 1 - scipy.stats.chi2.cdf(output.chisquare, output.dof) +367 else: +368 output.chisquare_by_dof = float('nan') 369 -370 if not fit_result.success: -371 raise Exception('The minimization procedure did not converge.') -372 -373 if x_all.shape[-1] - n_parms > 0: -374 output.chisquare = chisquare -375 output.dof = x_all.shape[-1] - n_parms + len(loc_priors) -376 output.chisquare_by_dof = output.chisquare / output.dof -377 output.p_value = 1 - scipy.stats.chi2.cdf(output.chisquare, output.dof) -378 else: -379 output.chisquare_by_dof = float('nan') -380 -381 output.message = fit_result.message -382 if not silent: -383 print(fit_result.message) -384 print('chisquare/d.o.f.:', output.chisquare_by_dof) -385 print('fit parameters', fit_result.x) -386 -387 def prepare_hat_matrix(): -388 hat_vector = [] -389 for key in key_ls: -390 if (len(xd[key]) != 0): -391 hat_vector.append(jacobian(funcd[key])(fit_result.x, xd[key])) -392 hat_vector = [item for sublist in hat_vector for item in sublist] -393 return hat_vector -394 -395 if kwargs.get('expected_chisquare') is True: -396 if kwargs.get('correlated_fit') is not True: -397 W = np.diag(1 / np.asarray(dy_f)) -398 cov = covariance(y_all) -399 hat_vector = prepare_hat_matrix() -400 A = W @ hat_vector -401 P_phi = A @ np.linalg.pinv(A.T @ A) @ A.T -402 expected_chisquare = np.trace((np.identity(x_all.shape[-1]) - P_phi) @ W @ cov @ W) -403 output.chisquare_by_expected_chisquare = output.chisquare / expected_chisquare -404 if not silent: -405 print('chisquare/expected_chisquare:', output.chisquare_by_expected_chisquare) -406 -407 fitp = fit_result.x -408 -409 try: -410 hess = hessian(chisqfunc)(fitp) -411 except TypeError: -412 raise Exception("It is required to use autograd.numpy instead of numpy within fit functions, see the documentation for details.") from None -413 -414 len_y = len(y_f) +370 output.message = fit_result.message +371 if not silent: +372 print(fit_result.message) +373 print('chisquare/d.o.f.:', output.chisquare_by_dof) +374 print('fit parameters', fit_result.x) +375 +376 def prepare_hat_matrix(): +377 hat_vector = [] +378 for key in key_ls: +379 if (len(xd[key]) != 0): +380 hat_vector.append(jacobian(funcd[key])(fit_result.x, xd[key])) +381 hat_vector = [item for sublist in hat_vector for item in sublist] +382 return hat_vector +383 +384 if kwargs.get('expected_chisquare') is True: +385 if kwargs.get('correlated_fit') is not True: +386 W = np.diag(1 / np.asarray(dy_f)) +387 cov = covariance(y_all) +388 hat_vector = prepare_hat_matrix() +389 A = W @ hat_vector +390 P_phi = A @ np.linalg.pinv(A.T @ A) @ A.T +391 expected_chisquare = np.trace((np.identity(x_all.shape[-1]) - P_phi) @ W @ cov @ W) +392 output.chisquare_by_expected_chisquare = output.chisquare / expected_chisquare +393 if not silent: +394 print('chisquare/expected_chisquare:', output.chisquare_by_expected_chisquare) +395 +396 fitp = fit_result.x +397 +398 try: +399 hess = hessian(chisqfunc)(fitp) +400 except TypeError: +401 raise Exception("It is required to use autograd.numpy instead of numpy within fit functions, see the documentation for details.") from None +402 +403 len_y = len(y_f) +404 +405 def chisqfunc_compact(d): +406 return anp.sum(general_chisqfunc(d[:n_parms], d[n_parms: n_parms + len_y], d[n_parms + len_y:]) ** 2) +407 +408 jac_jac_y = hessian(chisqfunc_compact)(np.concatenate((fitp, y_f, p_f))) +409 +410 # Compute hess^{-1} @ jac_jac_y[:n_parms + m, n_parms + m:] using LAPACK dgesv +411 try: +412 deriv_y = -scipy.linalg.solve(hess, jac_jac_y[:n_parms, n_parms:]) +413 except np.linalg.LinAlgError: +414 raise Exception("Cannot invert hessian matrix.") 415 -416 def chisqfunc_compact(d): -417 return anp.sum(general_chisqfunc(d[:n_parms], d[n_parms: n_parms + len_y], d[n_parms + len_y:]) ** 2) -418 -419 jac_jac_y = hessian(chisqfunc_compact)(np.concatenate((fitp, y_f, p_f))) -420 -421 # Compute hess^{-1} @ jac_jac_y[:n_parms + m, n_parms + m:] using LAPACK dgesv -422 try: -423 deriv_y = -scipy.linalg.solve(hess, jac_jac_y[:n_parms, n_parms:]) -424 except np.linalg.LinAlgError: -425 raise Exception("Cannot invert hessian matrix.") -426 -427 result = [] -428 for i in range(n_parms): -429 result.append(derived_observable(lambda x_all, **kwargs: (x_all[0] + np.finfo(np.float64).eps) / (y_all[0].value + np.finfo(np.float64).eps) * fitp[i], list(y_all) + loc_priors, man_grad=list(deriv_y[i]))) -430 -431 output.fit_parameters = result -432 -433 # Hotelling t-squared p-value for correlated fits. -434 if kwargs.get('correlated_fit') is True: -435 n_cov = np.min(np.vectorize(lambda x_all: x_all.N)(y_all)) -436 output.t2_p_value = 1 - scipy.stats.f.cdf((n_cov - output.dof) / (output.dof * (n_cov - 1)) * output.chisquare, -437 output.dof, n_cov - output.dof) -438 -439 if kwargs.get('resplot') is True: -440 for key in key_ls: -441 residual_plot(xd[key], yd[key], funcd[key], result, title=key) -442 -443 if kwargs.get('qqplot') is True: -444 for key in key_ls: -445 qqplot(xd[key], yd[key], funcd[key], result, title=key) -446 -447 return output +416 result = [] +417 for i in range(n_parms): +418 result.append(derived_observable(lambda x_all, **kwargs: (x_all[0] + np.finfo(np.float64).eps) / (y_all[0].value + np.finfo(np.float64).eps) * fitp[i], list(y_all) + loc_priors, man_grad=list(deriv_y[i]))) +419 +420 output.fit_parameters = result +421 +422 # Hotelling t-squared p-value for correlated fits. +423 if kwargs.get('correlated_fit') is True: +424 n_cov = np.min(np.vectorize(lambda x_all: x_all.N)(y_all)) +425 output.t2_p_value = 1 - scipy.stats.f.cdf((n_cov - output.dof) / (output.dof * (n_cov - 1)) * output.chisquare, +426 output.dof, n_cov - output.dof) +427 +428 if kwargs.get('resplot') is True: +429 for key in key_ls: +430 residual_plot(xd[key], yd[key], funcd[key], result, title=key) +431 +432 if kwargs.get('qqplot') is True: +433 for key in key_ls: +434 qqplot(xd[key], yd[key], funcd[key], result, title=key) +435 +436 return output @@ -1573,208 +1561,208 @@ Parameters and information on the fitted result. -
450def total_least_squares(x, y, func, silent=False, **kwargs):
-451    r'''Performs a non-linear fit to y = func(x) and returns a list of Obs corresponding to the fit parameters.
-452
-453    Parameters
-454    ----------
-455    x : list
-456        list of Obs, or a tuple of lists of Obs
-457    y : list
-458        list of Obs. The dvalues of the Obs are used as x- and yerror for the fit.
-459    func : object
-460        func has to be of the form
-461
-462        ```python
-463        import autograd.numpy as anp
-464
-465        def func(a, x):
-466            return a[0] + a[1] * x + a[2] * anp.sinh(x)
-467        ```
-468
-469        For multiple x values func can be of the form
-470
-471        ```python
-472        def func(a, x):
-473            (x1, x2) = x
-474            return a[0] * x1 ** 2 + a[1] * x2
-475        ```
-476
-477        It is important that all numpy functions refer to autograd.numpy, otherwise the differentiation
-478        will not work.
-479    silent : bool, optional
-480        If true all output to the console is omitted (default False).
-481    initial_guess : list
-482        can provide an initial guess for the input parameters. Relevant for non-linear
-483        fits with many parameters.
-484    expected_chisquare : bool
-485        If true prints the expected chisquare which is
-486        corrected by effects caused by correlated input data.
-487        This can take a while as the full correlation matrix
-488        has to be calculated (default False).
-489    num_grad : bool
-490        Use numerical differentation instead of automatic differentiation to perform the error propagation (default False).
-491
-492    Notes
-493    -----
-494    Based on the orthogonal distance regression module of scipy.
-495
-496    Returns
-497    -------
-498    output : Fit_result
-499        Parameters and information on the fitted result.
-500    '''
-501
-502    output = Fit_result()
-503
-504    output.fit_function = func
+            
439def total_least_squares(x, y, func, silent=False, **kwargs):
+440    r'''Performs a non-linear fit to y = func(x) and returns a list of Obs corresponding to the fit parameters.
+441
+442    Parameters
+443    ----------
+444    x : list
+445        list of Obs, or a tuple of lists of Obs
+446    y : list
+447        list of Obs. The dvalues of the Obs are used as x- and yerror for the fit.
+448    func : object
+449        func has to be of the form
+450
+451        ```python
+452        import autograd.numpy as anp
+453
+454        def func(a, x):
+455            return a[0] + a[1] * x + a[2] * anp.sinh(x)
+456        ```
+457
+458        For multiple x values func can be of the form
+459
+460        ```python
+461        def func(a, x):
+462            (x1, x2) = x
+463            return a[0] * x1 ** 2 + a[1] * x2
+464        ```
+465
+466        It is important that all numpy functions refer to autograd.numpy, otherwise the differentiation
+467        will not work.
+468    silent : bool, optional
+469        If true all output to the console is omitted (default False).
+470    initial_guess : list
+471        can provide an initial guess for the input parameters. Relevant for non-linear
+472        fits with many parameters.
+473    expected_chisquare : bool
+474        If true prints the expected chisquare which is
+475        corrected by effects caused by correlated input data.
+476        This can take a while as the full correlation matrix
+477        has to be calculated (default False).
+478    num_grad : bool
+479        Use numerical differentation instead of automatic differentiation to perform the error propagation (default False).
+480
+481    Notes
+482    -----
+483    Based on the orthogonal distance regression module of scipy.
+484
+485    Returns
+486    -------
+487    output : Fit_result
+488        Parameters and information on the fitted result.
+489    '''
+490
+491    output = Fit_result()
+492
+493    output.fit_function = func
+494
+495    x = np.array(x)
+496
+497    x_shape = x.shape
+498
+499    if kwargs.get('num_grad') is True:
+500        jacobian = num_jacobian
+501        hessian = num_hessian
+502    else:
+503        jacobian = auto_jacobian
+504        hessian = auto_hessian
 505
-506    x = np.array(x)
-507
-508    x_shape = x.shape
-509
-510    if kwargs.get('num_grad') is True:
-511        jacobian = num_jacobian
-512        hessian = num_hessian
-513    else:
-514        jacobian = auto_jacobian
-515        hessian = auto_hessian
-516
-517    if not callable(func):
-518        raise TypeError('func has to be a function.')
-519
-520    for i in range(42):
-521        try:
-522            func(np.arange(i), x.T[0])
-523        except TypeError:
-524            continue
-525        except IndexError:
-526            continue
-527        else:
-528            break
-529    else:
-530        raise RuntimeError("Fit function is not valid.")
-531
-532    n_parms = i
-533    if not silent:
-534        print('Fit with', n_parms, 'parameter' + 's' * (n_parms > 1))
+506    if not callable(func):
+507        raise TypeError('func has to be a function.')
+508
+509    for i in range(42):
+510        try:
+511            func(np.arange(i), x.T[0])
+512        except TypeError:
+513            continue
+514        except IndexError:
+515            continue
+516        else:
+517            break
+518    else:
+519        raise RuntimeError("Fit function is not valid.")
+520
+521    n_parms = i
+522    if not silent:
+523        print('Fit with', n_parms, 'parameter' + 's' * (n_parms > 1))
+524
+525    x_f = np.vectorize(lambda o: o.value)(x)
+526    dx_f = np.vectorize(lambda o: o.dvalue)(x)
+527    y_f = np.array([o.value for o in y])
+528    dy_f = np.array([o.dvalue for o in y])
+529
+530    if np.any(np.asarray(dx_f) <= 0.0):
+531        raise Exception('No x errors available, run the gamma method first.')
+532
+533    if np.any(np.asarray(dy_f) <= 0.0):
+534        raise Exception('No y errors available, run the gamma method first.')
 535
-536    x_f = np.vectorize(lambda o: o.value)(x)
-537    dx_f = np.vectorize(lambda o: o.dvalue)(x)
-538    y_f = np.array([o.value for o in y])
-539    dy_f = np.array([o.dvalue for o in y])
-540
-541    if np.any(np.asarray(dx_f) <= 0.0):
-542        raise Exception('No x errors available, run the gamma method first.')
-543
-544    if np.any(np.asarray(dy_f) <= 0.0):
-545        raise Exception('No y errors available, run the gamma method first.')
-546
-547    if 'initial_guess' in kwargs:
-548        x0 = kwargs.get('initial_guess')
-549        if len(x0) != n_parms:
-550            raise Exception('Initial guess does not have the correct length: %d vs. %d' % (len(x0), n_parms))
-551    else:
-552        x0 = [1] * n_parms
-553
-554    data = RealData(x_f, y_f, sx=dx_f, sy=dy_f)
-555    model = Model(func)
-556    odr = ODR(data, model, x0, partol=np.finfo(np.float64).eps)
-557    odr.set_job(fit_type=0, deriv=1)
-558    out = odr.run()
-559
-560    output.residual_variance = out.res_var
+536    if 'initial_guess' in kwargs:
+537        x0 = kwargs.get('initial_guess')
+538        if len(x0) != n_parms:
+539            raise Exception('Initial guess does not have the correct length: %d vs. %d' % (len(x0), n_parms))
+540    else:
+541        x0 = [1] * n_parms
+542
+543    data = RealData(x_f, y_f, sx=dx_f, sy=dy_f)
+544    model = Model(func)
+545    odr = ODR(data, model, x0, partol=np.finfo(np.float64).eps)
+546    odr.set_job(fit_type=0, deriv=1)
+547    out = odr.run()
+548
+549    output.residual_variance = out.res_var
+550
+551    output.method = 'ODR'
+552
+553    output.message = out.stopreason
+554
+555    output.xplus = out.xplus
+556
+557    if not silent:
+558        print('Method: ODR')
+559        print(*out.stopreason)
+560        print('Residual variance:', output.residual_variance)
 561
-562    output.method = 'ODR'
-563
-564    output.message = out.stopreason
-565
-566    output.xplus = out.xplus
-567
-568    if not silent:
-569        print('Method: ODR')
-570        print(*out.stopreason)
-571        print('Residual variance:', output.residual_variance)
-572
-573    if out.info > 3:
-574        raise Exception('The minimization procedure did not converge.')
-575
-576    m = x_f.size
-577
-578    def odr_chisquare(p):
-579        model = func(p[:n_parms], p[n_parms:].reshape(x_shape))
-580        chisq = anp.sum(((y_f - model) / dy_f) ** 2) + anp.sum(((x_f - p[n_parms:].reshape(x_shape)) / dx_f) ** 2)
-581        return chisq
-582
-583    if kwargs.get('expected_chisquare') is True:
-584        W = np.diag(1 / np.asarray(np.concatenate((dy_f.ravel(), dx_f.ravel()))))
-585
-586        if kwargs.get('covariance') is not None:
-587            cov = kwargs.get('covariance')
-588        else:
-589            cov = covariance(np.concatenate((y, x.ravel())))
-590
-591        number_of_x_parameters = int(m / x_f.shape[-1])
-592
-593        old_jac = jacobian(func)(out.beta, out.xplus)
-594        fused_row1 = np.concatenate((old_jac, np.concatenate((number_of_x_parameters * [np.zeros(old_jac.shape)]), axis=0)))
-595        fused_row2 = np.concatenate((jacobian(lambda x, y: func(y, x))(out.xplus, out.beta).reshape(x_f.shape[-1], x_f.shape[-1] * number_of_x_parameters), np.identity(number_of_x_parameters * old_jac.shape[0])))
-596        new_jac = np.concatenate((fused_row1, fused_row2), axis=1)
+562    if out.info > 3:
+563        raise Exception('The minimization procedure did not converge.')
+564
+565    m = x_f.size
+566
+567    def odr_chisquare(p):
+568        model = func(p[:n_parms], p[n_parms:].reshape(x_shape))
+569        chisq = anp.sum(((y_f - model) / dy_f) ** 2) + anp.sum(((x_f - p[n_parms:].reshape(x_shape)) / dx_f) ** 2)
+570        return chisq
+571
+572    if kwargs.get('expected_chisquare') is True:
+573        W = np.diag(1 / np.asarray(np.concatenate((dy_f.ravel(), dx_f.ravel()))))
+574
+575        if kwargs.get('covariance') is not None:
+576            cov = kwargs.get('covariance')
+577        else:
+578            cov = covariance(np.concatenate((y, x.ravel())))
+579
+580        number_of_x_parameters = int(m / x_f.shape[-1])
+581
+582        old_jac = jacobian(func)(out.beta, out.xplus)
+583        fused_row1 = np.concatenate((old_jac, np.concatenate((number_of_x_parameters * [np.zeros(old_jac.shape)]), axis=0)))
+584        fused_row2 = np.concatenate((jacobian(lambda x, y: func(y, x))(out.xplus, out.beta).reshape(x_f.shape[-1], x_f.shape[-1] * number_of_x_parameters), np.identity(number_of_x_parameters * old_jac.shape[0])))
+585        new_jac = np.concatenate((fused_row1, fused_row2), axis=1)
+586
+587        A = W @ new_jac
+588        P_phi = A @ np.linalg.pinv(A.T @ A) @ A.T
+589        expected_chisquare = np.trace((np.identity(P_phi.shape[0]) - P_phi) @ W @ cov @ W)
+590        if expected_chisquare <= 0.0:
+591            warnings.warn("Negative expected_chisquare.", RuntimeWarning)
+592            expected_chisquare = np.abs(expected_chisquare)
+593        output.chisquare_by_expected_chisquare = odr_chisquare(np.concatenate((out.beta, out.xplus.ravel()))) / expected_chisquare
+594        if not silent:
+595            print('chisquare/expected_chisquare:',
+596                  output.chisquare_by_expected_chisquare)
 597
-598        A = W @ new_jac
-599        P_phi = A @ np.linalg.pinv(A.T @ A) @ A.T
-600        expected_chisquare = np.trace((np.identity(P_phi.shape[0]) - P_phi) @ W @ cov @ W)
-601        if expected_chisquare <= 0.0:
-602            warnings.warn("Negative expected_chisquare.", RuntimeWarning)
-603            expected_chisquare = np.abs(expected_chisquare)
-604        output.chisquare_by_expected_chisquare = odr_chisquare(np.concatenate((out.beta, out.xplus.ravel()))) / expected_chisquare
-605        if not silent:
-606            print('chisquare/expected_chisquare:',
-607                  output.chisquare_by_expected_chisquare)
+598    fitp = out.beta
+599    try:
+600        hess = hessian(odr_chisquare)(np.concatenate((fitp, out.xplus.ravel())))
+601    except TypeError:
+602        raise Exception("It is required to use autograd.numpy instead of numpy within fit functions, see the documentation for details.") from None
+603
+604    def odr_chisquare_compact_x(d):
+605        model = func(d[:n_parms], d[n_parms:n_parms + m].reshape(x_shape))
+606        chisq = anp.sum(((y_f - model) / dy_f) ** 2) + anp.sum(((d[n_parms + m:].reshape(x_shape) - d[n_parms:n_parms + m].reshape(x_shape)) / dx_f) ** 2)
+607        return chisq
 608
-609    fitp = out.beta
-610    try:
-611        hess = hessian(odr_chisquare)(np.concatenate((fitp, out.xplus.ravel())))
-612    except TypeError:
-613        raise Exception("It is required to use autograd.numpy instead of numpy within fit functions, see the documentation for details.") from None
-614
-615    def odr_chisquare_compact_x(d):
-616        model = func(d[:n_parms], d[n_parms:n_parms + m].reshape(x_shape))
-617        chisq = anp.sum(((y_f - model) / dy_f) ** 2) + anp.sum(((d[n_parms + m:].reshape(x_shape) - d[n_parms:n_parms + m].reshape(x_shape)) / dx_f) ** 2)
-618        return chisq
-619
-620    jac_jac_x = hessian(odr_chisquare_compact_x)(np.concatenate((fitp, out.xplus.ravel(), x_f.ravel())))
+609    jac_jac_x = hessian(odr_chisquare_compact_x)(np.concatenate((fitp, out.xplus.ravel(), x_f.ravel())))
+610
+611    # Compute hess^{-1} @ jac_jac_x[:n_parms + m, n_parms + m:] using LAPACK dgesv
+612    try:
+613        deriv_x = -scipy.linalg.solve(hess, jac_jac_x[:n_parms + m, n_parms + m:])
+614    except np.linalg.LinAlgError:
+615        raise Exception("Cannot invert hessian matrix.")
+616
+617    def odr_chisquare_compact_y(d):
+618        model = func(d[:n_parms], d[n_parms:n_parms + m].reshape(x_shape))
+619        chisq = anp.sum(((d[n_parms + m:] - model) / dy_f) ** 2) + anp.sum(((x_f - d[n_parms:n_parms + m].reshape(x_shape)) / dx_f) ** 2)
+620        return chisq
 621
-622    # Compute hess^{-1} @ jac_jac_x[:n_parms + m, n_parms + m:] using LAPACK dgesv
-623    try:
-624        deriv_x = -scipy.linalg.solve(hess, jac_jac_x[:n_parms + m, n_parms + m:])
-625    except np.linalg.LinAlgError:
-626        raise Exception("Cannot invert hessian matrix.")
-627
-628    def odr_chisquare_compact_y(d):
-629        model = func(d[:n_parms], d[n_parms:n_parms + m].reshape(x_shape))
-630        chisq = anp.sum(((d[n_parms + m:] - model) / dy_f) ** 2) + anp.sum(((x_f - d[n_parms:n_parms + m].reshape(x_shape)) / dx_f) ** 2)
-631        return chisq
-632
-633    jac_jac_y = hessian(odr_chisquare_compact_y)(np.concatenate((fitp, out.xplus.ravel(), y_f)))
-634
-635    # Compute hess^{-1} @ jac_jac_y[:n_parms + m, n_parms + m:] using LAPACK dgesv
-636    try:
-637        deriv_y = -scipy.linalg.solve(hess, jac_jac_y[:n_parms + m, n_parms + m:])
-638    except np.linalg.LinAlgError:
-639        raise Exception("Cannot invert hessian matrix.")
-640
-641    result = []
-642    for i in range(n_parms):
-643        result.append(derived_observable(lambda my_var, **kwargs: (my_var[0] + np.finfo(np.float64).eps) / (x.ravel()[0].value + np.finfo(np.float64).eps) * out.beta[i], list(x.ravel()) + list(y), man_grad=list(deriv_x[i]) + list(deriv_y[i])))
-644
-645    output.fit_parameters = result
-646
-647    output.odr_chisquare = odr_chisquare(np.concatenate((out.beta, out.xplus.ravel())))
-648    output.dof = x.shape[-1] - n_parms
-649    output.p_value = 1 - scipy.stats.chi2.cdf(output.odr_chisquare, output.dof)
-650
-651    return output
+622    jac_jac_y = hessian(odr_chisquare_compact_y)(np.concatenate((fitp, out.xplus.ravel(), y_f)))
+623
+624    # Compute hess^{-1} @ jac_jac_y[:n_parms + m, n_parms + m:] using LAPACK dgesv
+625    try:
+626        deriv_y = -scipy.linalg.solve(hess, jac_jac_y[:n_parms + m, n_parms + m:])
+627    except np.linalg.LinAlgError:
+628        raise Exception("Cannot invert hessian matrix.")
+629
+630    result = []
+631    for i in range(n_parms):
+632        result.append(derived_observable(lambda my_var, **kwargs: (my_var[0] + np.finfo(np.float64).eps) / (x.ravel()[0].value + np.finfo(np.float64).eps) * out.beta[i], list(x.ravel()) + list(y), man_grad=list(deriv_x[i]) + list(deriv_y[i])))
+633
+634    output.fit_parameters = result
+635
+636    output.odr_chisquare = odr_chisquare(np.concatenate((out.beta, out.xplus.ravel())))
+637    output.dof = x.shape[-1] - n_parms
+638    output.p_value = 1 - scipy.stats.chi2.cdf(output.odr_chisquare, output.dof)
+639
+640    return output
 
@@ -1848,35 +1836,35 @@ Parameters and information on the fitted result.
-
654def fit_lin(x, y, **kwargs):
-655    """Performs a linear fit to y = n + m * x and returns two Obs n, m.
-656
-657    Parameters
-658    ----------
-659    x : list
-660        Can either be a list of floats in which case no xerror is assumed, or
-661        a list of Obs, where the dvalues of the Obs are used as xerror for the fit.
-662    y : list
-663        List of Obs, the dvalues of the Obs are used as yerror for the fit.
-664
-665    Returns
-666    -------
-667    fit_parameters : list[Obs]
-668        LIist of fitted observables.
-669    """
-670
-671    def f(a, x):
-672        y = a[0] + a[1] * x
-673        return y
-674
-675    if all(isinstance(n, Obs) for n in x):
-676        out = total_least_squares(x, y, f, **kwargs)
-677        return out.fit_parameters
-678    elif all(isinstance(n, float) or isinstance(n, int) for n in x) or isinstance(x, np.ndarray):
-679        out = least_squares(x, y, f, **kwargs)
-680        return out.fit_parameters
-681    else:
-682        raise TypeError('Unsupported types for x')
+            
643def fit_lin(x, y, **kwargs):
+644    """Performs a linear fit to y = n + m * x and returns two Obs n, m.
+645
+646    Parameters
+647    ----------
+648    x : list
+649        Can either be a list of floats in which case no xerror is assumed, or
+650        a list of Obs, where the dvalues of the Obs are used as xerror for the fit.
+651    y : list
+652        List of Obs, the dvalues of the Obs are used as yerror for the fit.
+653
+654    Returns
+655    -------
+656    fit_parameters : list[Obs]
+657        LIist of fitted observables.
+658    """
+659
+660    def f(a, x):
+661        y = a[0] + a[1] * x
+662        return y
+663
+664    if all(isinstance(n, Obs) for n in x):
+665        out = total_least_squares(x, y, f, **kwargs)
+666        return out.fit_parameters
+667    elif all(isinstance(n, float) or isinstance(n, int) for n in x) or isinstance(x, np.ndarray):
+668        out = least_squares(x, y, f, **kwargs)
+669        return out.fit_parameters
+670    else:
+671        raise TypeError('Unsupported types for x')
 
@@ -1913,34 +1901,34 @@ LIist of fitted observables.
-
685def qqplot(x, o_y, func, p, title=""):
-686    """Generates a quantile-quantile plot of the fit result which can be used to
-687       check if the residuals of the fit are gaussian distributed.
-688
-689    Returns
-690    -------
-691    None
-692    """
-693
-694    residuals = []
-695    for i_x, i_y in zip(x, o_y):
-696        residuals.append((i_y - func(p, i_x)) / i_y.dvalue)
-697    residuals = sorted(residuals)
-698    my_y = [o.value for o in residuals]
-699    probplot = scipy.stats.probplot(my_y)
-700    my_x = probplot[0][0]
-701    plt.figure(figsize=(8, 8 / 1.618))
-702    plt.errorbar(my_x, my_y, fmt='o')
-703    fit_start = my_x[0]
-704    fit_stop = my_x[-1]
-705    samples = np.arange(fit_start, fit_stop, 0.01)
-706    plt.plot(samples, samples, 'k--', zorder=11, label='Standard normal distribution')
-707    plt.plot(samples, probplot[1][0] * samples + probplot[1][1], zorder=10, label='Least squares fit, r=' + str(np.around(probplot[1][2], 3)), marker='', ls='-')
-708
-709    plt.xlabel('Theoretical quantiles')
-710    plt.ylabel('Ordered Values')
-711    plt.legend(title=title)
-712    plt.draw()
+            
674def qqplot(x, o_y, func, p, title=""):
+675    """Generates a quantile-quantile plot of the fit result which can be used to
+676       check if the residuals of the fit are gaussian distributed.
+677
+678    Returns
+679    -------
+680    None
+681    """
+682
+683    residuals = []
+684    for i_x, i_y in zip(x, o_y):
+685        residuals.append((i_y - func(p, i_x)) / i_y.dvalue)
+686    residuals = sorted(residuals)
+687    my_y = [o.value for o in residuals]
+688    probplot = scipy.stats.probplot(my_y)
+689    my_x = probplot[0][0]
+690    plt.figure(figsize=(8, 8 / 1.618))
+691    plt.errorbar(my_x, my_y, fmt='o')
+692    fit_start = my_x[0]
+693    fit_stop = my_x[-1]
+694    samples = np.arange(fit_start, fit_stop, 0.01)
+695    plt.plot(samples, samples, 'k--', zorder=11, label='Standard normal distribution')
+696    plt.plot(samples, probplot[1][0] * samples + probplot[1][1], zorder=10, label='Least squares fit, r=' + str(np.around(probplot[1][2], 3)), marker='', ls='-')
+697
+698    plt.xlabel('Theoretical quantiles')
+699    plt.ylabel('Ordered Values')
+700    plt.legend(title=title)
+701    plt.draw()
 
@@ -1967,41 +1955,41 @@ LIist of fitted observables.
-
715def residual_plot(x, y, func, fit_res, title=""):
-716    """Generates a plot which compares the fit to the data and displays the corresponding residuals
+            
704def residual_plot(x, y, func, fit_res, title=""):
+705    """Generates a plot which compares the fit to the data and displays the corresponding residuals
+706
+707    For uncorrelated data the residuals are expected to be distributed ~N(0,1).
+708
+709    Returns
+710    -------
+711    None
+712    """
+713    sorted_x = sorted(x)
+714    xstart = sorted_x[0] - 0.5 * (sorted_x[1] - sorted_x[0])
+715    xstop = sorted_x[-1] + 0.5 * (sorted_x[-1] - sorted_x[-2])
+716    x_samples = np.arange(xstart, xstop + 0.01, 0.01)
 717
-718    For uncorrelated data the residuals are expected to be distributed ~N(0,1).
-719
-720    Returns
-721    -------
-722    None
-723    """
-724    sorted_x = sorted(x)
-725    xstart = sorted_x[0] - 0.5 * (sorted_x[1] - sorted_x[0])
-726    xstop = sorted_x[-1] + 0.5 * (sorted_x[-1] - sorted_x[-2])
-727    x_samples = np.arange(xstart, xstop + 0.01, 0.01)
-728
-729    plt.figure(figsize=(8, 8 / 1.618))
-730    gs = gridspec.GridSpec(2, 1, height_ratios=[3, 1], wspace=0.0, hspace=0.0)
-731    ax0 = plt.subplot(gs[0])
-732    ax0.errorbar(x, [o.value for o in y], yerr=[o.dvalue for o in y], ls='none', fmt='o', capsize=3, markersize=5, label='Data')
-733    ax0.plot(x_samples, func([o.value for o in fit_res], x_samples), label='Fit', zorder=10, ls='-', ms=0)
-734    ax0.set_xticklabels([])
-735    ax0.set_xlim([xstart, xstop])
-736    ax0.set_xticklabels([])
-737    ax0.legend(title=title)
-738
-739    residuals = (np.asarray([o.value for o in y]) - func([o.value for o in fit_res], np.asarray(x))) / np.asarray([o.dvalue for o in y])
-740    ax1 = plt.subplot(gs[1])
-741    ax1.plot(x, residuals, 'ko', ls='none', markersize=5)
-742    ax1.tick_params(direction='out')
-743    ax1.tick_params(axis="x", bottom=True, top=True, labelbottom=True)
-744    ax1.axhline(y=0.0, ls='--', color='k', marker=" ")
-745    ax1.fill_between(x_samples, -1.0, 1.0, alpha=0.1, facecolor='k')
-746    ax1.set_xlim([xstart, xstop])
-747    ax1.set_ylabel('Residuals')
-748    plt.subplots_adjust(wspace=None, hspace=None)
-749    plt.draw()
+718    plt.figure(figsize=(8, 8 / 1.618))
+719    gs = gridspec.GridSpec(2, 1, height_ratios=[3, 1], wspace=0.0, hspace=0.0)
+720    ax0 = plt.subplot(gs[0])
+721    ax0.errorbar(x, [o.value for o in y], yerr=[o.dvalue for o in y], ls='none', fmt='o', capsize=3, markersize=5, label='Data')
+722    ax0.plot(x_samples, func([o.value for o in fit_res], x_samples), label='Fit', zorder=10, ls='-', ms=0)
+723    ax0.set_xticklabels([])
+724    ax0.set_xlim([xstart, xstop])
+725    ax0.set_xticklabels([])
+726    ax0.legend(title=title)
+727
+728    residuals = (np.asarray([o.value for o in y]) - func([o.value for o in fit_res], np.asarray(x))) / np.asarray([o.dvalue for o in y])
+729    ax1 = plt.subplot(gs[1])
+730    ax1.plot(x, residuals, 'ko', ls='none', markersize=5)
+731    ax1.tick_params(direction='out')
+732    ax1.tick_params(axis="x", bottom=True, top=True, labelbottom=True)
+733    ax1.axhline(y=0.0, ls='--', color='k', marker=" ")
+734    ax1.fill_between(x_samples, -1.0, 1.0, alpha=0.1, facecolor='k')
+735    ax1.set_xlim([xstart, xstop])
+736    ax1.set_ylabel('Residuals')
+737    plt.subplots_adjust(wspace=None, hspace=None)
+738    plt.draw()
 
@@ -2029,28 +2017,28 @@ LIist of fitted observables.
-
752def error_band(x, func, beta):
-753    """Calculate the error band for an array of sample values x, for given fit function func with optimized parameters beta.
-754
-755    Returns
-756    -------
-757    err : np.array(Obs)
-758        Error band for an array of sample values x
-759    """
-760    cov = covariance(beta)
-761    if np.any(np.abs(cov - cov.T) > 1000 * np.finfo(np.float64).eps):
-762        warnings.warn("Covariance matrix is not symmetric within floating point precision", RuntimeWarning)
-763
-764    deriv = []
-765    for i, item in enumerate(x):
-766        deriv.append(np.array(egrad(func)([o.value for o in beta], item)))
-767
-768    err = []
-769    for i, item in enumerate(x):
-770        err.append(np.sqrt(deriv[i] @ cov @ deriv[i]))
-771    err = np.array(err)
-772
-773    return err
+            
741def error_band(x, func, beta):
+742    """Calculate the error band for an array of sample values x, for given fit function func with optimized parameters beta.
+743
+744    Returns
+745    -------
+746    err : np.array(Obs)
+747        Error band for an array of sample values x
+748    """
+749    cov = covariance(beta)
+750    if np.any(np.abs(cov - cov.T) > 1000 * np.finfo(np.float64).eps):
+751        warnings.warn("Covariance matrix is not symmetric within floating point precision", RuntimeWarning)
+752
+753    deriv = []
+754    for i, item in enumerate(x):
+755        deriv.append(np.array(egrad(func)([o.value for o in beta], item)))
+756
+757    err = []
+758    for i, item in enumerate(x):
+759        err.append(np.sqrt(deriv[i] @ cov @ deriv[i]))
+760    err = np.array(err)
+761
+762    return err
 
@@ -2077,48 +2065,48 @@ Error band for an array of sample values x
-
776def ks_test(objects=None):
-777    """Performs a Kolmogorov–Smirnov test for the p-values of all fit object.
-778
-779    Parameters
-780    ----------
-781    objects : list
-782        List of fit results to include in the analysis (optional).
-783
-784    Returns
-785    -------
-786    None
-787    """
-788
-789    if objects is None:
-790        obs_list = []
-791        for obj in gc.get_objects():
-792            if isinstance(obj, Fit_result):
-793                obs_list.append(obj)
-794    else:
-795        obs_list = objects
+            
765def ks_test(objects=None):
+766    """Performs a Kolmogorov–Smirnov test for the p-values of all fit object.
+767
+768    Parameters
+769    ----------
+770    objects : list
+771        List of fit results to include in the analysis (optional).
+772
+773    Returns
+774    -------
+775    None
+776    """
+777
+778    if objects is None:
+779        obs_list = []
+780        for obj in gc.get_objects():
+781            if isinstance(obj, Fit_result):
+782                obs_list.append(obj)
+783    else:
+784        obs_list = objects
+785
+786    p_values = [o.p_value for o in obs_list]
+787
+788    bins = len(p_values)
+789    x = np.arange(0, 1.001, 0.001)
+790    plt.plot(x, x, 'k', zorder=1)
+791    plt.xlim(0, 1)
+792    plt.ylim(0, 1)
+793    plt.xlabel('p-value')
+794    plt.ylabel('Cumulative probability')
+795    plt.title(str(bins) + ' p-values')
 796
-797    p_values = [o.p_value for o in obs_list]
-798
-799    bins = len(p_values)
-800    x = np.arange(0, 1.001, 0.001)
-801    plt.plot(x, x, 'k', zorder=1)
-802    plt.xlim(0, 1)
-803    plt.ylim(0, 1)
-804    plt.xlabel('p-value')
-805    plt.ylabel('Cumulative probability')
-806    plt.title(str(bins) + ' p-values')
-807
-808    n = np.arange(1, bins + 1) / np.float64(bins)
-809    Xs = np.sort(p_values)
-810    plt.step(Xs, n)
-811    diffs = n - Xs
-812    loc_max_diff = np.argmax(np.abs(diffs))
-813    loc = Xs[loc_max_diff]
-814    plt.annotate('', xy=(loc, loc), xytext=(loc, loc + diffs[loc_max_diff]), arrowprops=dict(arrowstyle='<->', shrinkA=0, shrinkB=0))
-815    plt.draw()
-816
-817    print(scipy.stats.kstest(p_values, 'uniform'))
+797    n = np.arange(1, bins + 1) / np.float64(bins)
+798    Xs = np.sort(p_values)
+799    plt.step(Xs, n)
+800    diffs = n - Xs
+801    loc_max_diff = np.argmax(np.abs(diffs))
+802    loc = Xs[loc_max_diff]
+803    plt.annotate('', xy=(loc, loc), xytext=(loc, loc + diffs[loc_max_diff]), arrowprops=dict(arrowstyle='<->', shrinkA=0, shrinkB=0))
+804    plt.draw()
+805
+806    print(scipy.stats.kstest(p_values, 'uniform'))