From 667d75c21e278a3392b33f12f249f6e636361034 Mon Sep 17 00:00:00 2001 From: fjosw Date: Wed, 8 Mar 2023 16:54:09 +0000 Subject: [PATCH] Documentation updated --- docs/pyerrors/fits.html | 1562 +++++++++++++++++++-------------------- 1 file changed, 779 insertions(+), 783 deletions(-) diff --git a/docs/pyerrors/fits.html b/docs/pyerrors/fits.html index 17854ff3..28f67466 100644 --- a/docs/pyerrors/fits.html +++ b/docs/pyerrors/fits.html @@ -312,10 +312,10 @@ 204 dy_f = [o.dvalue for o in y_all] 205 206 if len(x_all.shape) > 2: -207 raise Exception('Unknown format for x values') +207 raise ValueError("Unknown format for x values") 208 209 if np.any(np.asarray(dy_f) <= 0.0): -210 raise Exception('No y errors available, run the gamma method first.') +210 raise Exception("No y errors available, run the gamma method first.") 211 212 # number of fit parameters 213 n_parms_ls = [] @@ -384,7 +384,7 @@ 276 p_f = [o.value for o in loc_priors] 277 dp_f = [o.dvalue for o in loc_priors] 278 if np.any(np.asarray(dp_f) <= 0.0): -279 raise Exception('No prior errors available, run the gamma method first.') +279 raise Exception("No prior errors available, run the gamma method first.") 280 else: 281 p_f = dp_f = np.array([]) 282 prior_mask = [] @@ -393,7 +393,7 @@ 285 if 'initial_guess' in kwargs: 286 x0 = kwargs.get('initial_guess') 287 if len(x0) != n_parms: -288 raise Exception('Initial guess does not have the correct length: %d vs. %d' % (len(x0), n_parms)) +288 raise ValueError('Initial guess does not have the correct length: %d vs. %d' % (len(x0), n_parms)) 289 else: 290 x0 = [0.1] * n_parms 291 @@ -512,427 +512,425 @@ 404 print('chisquare/expected_chisquare:', output.chisquare_by_expected_chisquare) 405 406 fitp = fit_result.x -407 if np.any(np.asarray(dy_f) <= 0.0): -408 raise Exception('No y errors available, run the gamma method first.') -409 -410 try: -411 hess = hessian(chisqfunc)(fitp) -412 except TypeError: -413 raise Exception("It is required to use autograd.numpy instead of numpy within fit functions, see the documentation for details.") from None +407 +408 try: +409 hess = hessian(chisqfunc)(fitp) +410 except TypeError: +411 raise Exception("It is required to use autograd.numpy instead of numpy within fit functions, see the documentation for details.") from None +412 +413 len_y = len(y_f) 414 -415 len_y = len(y_f) -416 -417 def chisqfunc_compact(d): -418 return anp.sum(general_chisqfunc(d[:n_parms], d[n_parms: n_parms + len_y], d[n_parms + len_y:]) ** 2) +415 def chisqfunc_compact(d): +416 return anp.sum(general_chisqfunc(d[:n_parms], d[n_parms: n_parms + len_y], d[n_parms + len_y:]) ** 2) +417 +418 jac_jac_y = hessian(chisqfunc_compact)(np.concatenate((fitp, y_f, p_f))) 419 -420 jac_jac_y = hessian(chisqfunc_compact)(np.concatenate((fitp, y_f, p_f))) -421 -422 # Compute hess^{-1} @ jac_jac_y[:n_parms + m, n_parms + m:] using LAPACK dgesv -423 try: -424 deriv_y = -scipy.linalg.solve(hess, jac_jac_y[:n_parms, n_parms:]) -425 except np.linalg.LinAlgError: -426 raise Exception("Cannot invert hessian matrix.") -427 -428 result = [] -429 for i in range(n_parms): -430 result.append(derived_observable(lambda x_all, **kwargs: (x_all[0] + np.finfo(np.float64).eps) / (y_all[0].value + np.finfo(np.float64).eps) * fitp[i], list(y_all) + loc_priors, man_grad=list(deriv_y[i]))) +420 # Compute hess^{-1} @ jac_jac_y[:n_parms + m, n_parms + m:] using LAPACK dgesv +421 try: +422 deriv_y = -scipy.linalg.solve(hess, jac_jac_y[:n_parms, n_parms:]) +423 except np.linalg.LinAlgError: +424 raise Exception("Cannot invert hessian matrix.") +425 +426 result = [] +427 for i in range(n_parms): +428 result.append(derived_observable(lambda x_all, **kwargs: (x_all[0] + np.finfo(np.float64).eps) / (y_all[0].value + np.finfo(np.float64).eps) * fitp[i], list(y_all) + loc_priors, man_grad=list(deriv_y[i]))) +429 +430 output.fit_parameters = result 431 -432 output.fit_parameters = result -433 -434 # Hotelling t-squared p-value for correlated fits. -435 if kwargs.get('correlated_fit') is True: -436 n_cov = np.min(np.vectorize(lambda x_all: x_all.N)(y_all)) -437 output.t2_p_value = 1 - scipy.stats.f.cdf((n_cov - output.dof) / (output.dof * (n_cov - 1)) * output.chisquare, -438 output.dof, n_cov - output.dof) -439 -440 if kwargs.get('resplot') is True: -441 for key in key_ls: -442 residual_plot(xd[key], yd[key], funcd[key], result, title=key) -443 -444 if kwargs.get('qqplot') is True: -445 for key in key_ls: -446 qqplot(xd[key], yd[key], funcd[key], result, title=key) +432 # Hotelling t-squared p-value for correlated fits. +433 if kwargs.get('correlated_fit') is True: +434 n_cov = np.min(np.vectorize(lambda x_all: x_all.N)(y_all)) +435 output.t2_p_value = 1 - scipy.stats.f.cdf((n_cov - output.dof) / (output.dof * (n_cov - 1)) * output.chisquare, +436 output.dof, n_cov - output.dof) +437 +438 if kwargs.get('resplot') is True: +439 for key in key_ls: +440 residual_plot(xd[key], yd[key], funcd[key], result, title=key) +441 +442 if kwargs.get('qqplot') is True: +443 for key in key_ls: +444 qqplot(xd[key], yd[key], funcd[key], result, title=key) +445 +446 return output 447 -448 return output -449 -450 -451def total_least_squares(x, y, func, silent=False, **kwargs): -452 r'''Performs a non-linear fit to y = func(x) and returns a list of Obs corresponding to the fit parameters. -453 -454 Parameters -455 ---------- -456 x : list -457 list of Obs, or a tuple of lists of Obs -458 y : list -459 list of Obs. The dvalues of the Obs are used as x- and yerror for the fit. -460 func : object -461 func has to be of the form -462 -463 ```python -464 import autograd.numpy as anp -465 -466 def func(a, x): -467 return a[0] + a[1] * x + a[2] * anp.sinh(x) -468 ``` +448 +449def total_least_squares(x, y, func, silent=False, **kwargs): +450 r'''Performs a non-linear fit to y = func(x) and returns a list of Obs corresponding to the fit parameters. +451 +452 Parameters +453 ---------- +454 x : list +455 list of Obs, or a tuple of lists of Obs +456 y : list +457 list of Obs. The dvalues of the Obs are used as x- and yerror for the fit. +458 func : object +459 func has to be of the form +460 +461 ```python +462 import autograd.numpy as anp +463 +464 def func(a, x): +465 return a[0] + a[1] * x + a[2] * anp.sinh(x) +466 ``` +467 +468 For multiple x values func can be of the form 469 -470 For multiple x values func can be of the form -471 -472 ```python -473 def func(a, x): -474 (x1, x2) = x -475 return a[0] * x1 ** 2 + a[1] * x2 -476 ``` -477 -478 It is important that all numpy functions refer to autograd.numpy, otherwise the differentiation -479 will not work. -480 silent : bool, optional -481 If true all output to the console is omitted (default False). -482 initial_guess : list -483 can provide an initial guess for the input parameters. Relevant for non-linear -484 fits with many parameters. -485 expected_chisquare : bool -486 If true prints the expected chisquare which is -487 corrected by effects caused by correlated input data. -488 This can take a while as the full correlation matrix -489 has to be calculated (default False). -490 num_grad : bool -491 Use numerical differentation instead of automatic differentiation to perform the error propagation (default False). -492 -493 Notes -494 ----- -495 Based on the orthogonal distance regression module of scipy. -496 -497 Returns -498 ------- -499 output : Fit_result -500 Parameters and information on the fitted result. -501 ''' +470 ```python +471 def func(a, x): +472 (x1, x2) = x +473 return a[0] * x1 ** 2 + a[1] * x2 +474 ``` +475 +476 It is important that all numpy functions refer to autograd.numpy, otherwise the differentiation +477 will not work. +478 silent : bool, optional +479 If true all output to the console is omitted (default False). +480 initial_guess : list +481 can provide an initial guess for the input parameters. Relevant for non-linear +482 fits with many parameters. +483 expected_chisquare : bool +484 If true prints the expected chisquare which is +485 corrected by effects caused by correlated input data. +486 This can take a while as the full correlation matrix +487 has to be calculated (default False). +488 num_grad : bool +489 Use numerical differentation instead of automatic differentiation to perform the error propagation (default False). +490 +491 Notes +492 ----- +493 Based on the orthogonal distance regression module of scipy. +494 +495 Returns +496 ------- +497 output : Fit_result +498 Parameters and information on the fitted result. +499 ''' +500 +501 output = Fit_result() 502 -503 output = Fit_result() +503 output.fit_function = func 504 -505 output.fit_function = func +505 x = np.array(x) 506 -507 x = np.array(x) +507 x_shape = x.shape 508 -509 x_shape = x.shape -510 -511 if kwargs.get('num_grad') is True: -512 jacobian = num_jacobian -513 hessian = num_hessian -514 else: -515 jacobian = auto_jacobian -516 hessian = auto_hessian -517 -518 if not callable(func): -519 raise TypeError('func has to be a function.') -520 -521 for i in range(42): -522 try: -523 func(np.arange(i), x.T[0]) -524 except TypeError: +509 if kwargs.get('num_grad') is True: +510 jacobian = num_jacobian +511 hessian = num_hessian +512 else: +513 jacobian = auto_jacobian +514 hessian = auto_hessian +515 +516 if not callable(func): +517 raise TypeError('func has to be a function.') +518 +519 for i in range(42): +520 try: +521 func(np.arange(i), x.T[0]) +522 except TypeError: +523 continue +524 except IndexError: 525 continue -526 except IndexError: -527 continue -528 else: -529 break -530 else: -531 raise RuntimeError("Fit function is not valid.") -532 -533 n_parms = i -534 if not silent: -535 print('Fit with', n_parms, 'parameter' + 's' * (n_parms > 1)) -536 -537 x_f = np.vectorize(lambda o: o.value)(x) -538 dx_f = np.vectorize(lambda o: o.dvalue)(x) -539 y_f = np.array([o.value for o in y]) -540 dy_f = np.array([o.dvalue for o in y]) -541 -542 if np.any(np.asarray(dx_f) <= 0.0): -543 raise Exception('No x errors available, run the gamma method first.') -544 -545 if np.any(np.asarray(dy_f) <= 0.0): -546 raise Exception('No y errors available, run the gamma method first.') -547 -548 if 'initial_guess' in kwargs: -549 x0 = kwargs.get('initial_guess') -550 if len(x0) != n_parms: -551 raise Exception('Initial guess does not have the correct length: %d vs. %d' % (len(x0), n_parms)) -552 else: -553 x0 = [1] * n_parms -554 -555 data = RealData(x_f, y_f, sx=dx_f, sy=dy_f) -556 model = Model(func) -557 odr = ODR(data, model, x0, partol=np.finfo(np.float64).eps) -558 odr.set_job(fit_type=0, deriv=1) -559 out = odr.run() +526 else: +527 break +528 else: +529 raise RuntimeError("Fit function is not valid.") +530 +531 n_parms = i +532 if not silent: +533 print('Fit with', n_parms, 'parameter' + 's' * (n_parms > 1)) +534 +535 x_f = np.vectorize(lambda o: o.value)(x) +536 dx_f = np.vectorize(lambda o: o.dvalue)(x) +537 y_f = np.array([o.value for o in y]) +538 dy_f = np.array([o.dvalue for o in y]) +539 +540 if np.any(np.asarray(dx_f) <= 0.0): +541 raise Exception('No x errors available, run the gamma method first.') +542 +543 if np.any(np.asarray(dy_f) <= 0.0): +544 raise Exception('No y errors available, run the gamma method first.') +545 +546 if 'initial_guess' in kwargs: +547 x0 = kwargs.get('initial_guess') +548 if len(x0) != n_parms: +549 raise Exception('Initial guess does not have the correct length: %d vs. %d' % (len(x0), n_parms)) +550 else: +551 x0 = [1] * n_parms +552 +553 data = RealData(x_f, y_f, sx=dx_f, sy=dy_f) +554 model = Model(func) +555 odr = ODR(data, model, x0, partol=np.finfo(np.float64).eps) +556 odr.set_job(fit_type=0, deriv=1) +557 out = odr.run() +558 +559 output.residual_variance = out.res_var 560 -561 output.residual_variance = out.res_var +561 output.method = 'ODR' 562 -563 output.method = 'ODR' +563 output.message = out.stopreason 564 -565 output.message = out.stopreason +565 output.xplus = out.xplus 566 -567 output.xplus = out.xplus -568 -569 if not silent: -570 print('Method: ODR') -571 print(*out.stopreason) -572 print('Residual variance:', output.residual_variance) -573 -574 if out.info > 3: -575 raise Exception('The minimization procedure did not converge.') +567 if not silent: +568 print('Method: ODR') +569 print(*out.stopreason) +570 print('Residual variance:', output.residual_variance) +571 +572 if out.info > 3: +573 raise Exception('The minimization procedure did not converge.') +574 +575 m = x_f.size 576 -577 m = x_f.size -578 -579 def odr_chisquare(p): -580 model = func(p[:n_parms], p[n_parms:].reshape(x_shape)) -581 chisq = anp.sum(((y_f - model) / dy_f) ** 2) + anp.sum(((x_f - p[n_parms:].reshape(x_shape)) / dx_f) ** 2) -582 return chisq -583 -584 if kwargs.get('expected_chisquare') is True: -585 W = np.diag(1 / np.asarray(np.concatenate((dy_f.ravel(), dx_f.ravel())))) -586 -587 if kwargs.get('covariance') is not None: -588 cov = kwargs.get('covariance') -589 else: -590 cov = covariance(np.concatenate((y, x.ravel()))) +577 def odr_chisquare(p): +578 model = func(p[:n_parms], p[n_parms:].reshape(x_shape)) +579 chisq = anp.sum(((y_f - model) / dy_f) ** 2) + anp.sum(((x_f - p[n_parms:].reshape(x_shape)) / dx_f) ** 2) +580 return chisq +581 +582 if kwargs.get('expected_chisquare') is True: +583 W = np.diag(1 / np.asarray(np.concatenate((dy_f.ravel(), dx_f.ravel())))) +584 +585 if kwargs.get('covariance') is not None: +586 cov = kwargs.get('covariance') +587 else: +588 cov = covariance(np.concatenate((y, x.ravel()))) +589 +590 number_of_x_parameters = int(m / x_f.shape[-1]) 591 -592 number_of_x_parameters = int(m / x_f.shape[-1]) -593 -594 old_jac = jacobian(func)(out.beta, out.xplus) -595 fused_row1 = np.concatenate((old_jac, np.concatenate((number_of_x_parameters * [np.zeros(old_jac.shape)]), axis=0))) -596 fused_row2 = np.concatenate((jacobian(lambda x, y: func(y, x))(out.xplus, out.beta).reshape(x_f.shape[-1], x_f.shape[-1] * number_of_x_parameters), np.identity(number_of_x_parameters * old_jac.shape[0]))) -597 new_jac = np.concatenate((fused_row1, fused_row2), axis=1) -598 -599 A = W @ new_jac -600 P_phi = A @ np.linalg.pinv(A.T @ A) @ A.T -601 expected_chisquare = np.trace((np.identity(P_phi.shape[0]) - P_phi) @ W @ cov @ W) -602 if expected_chisquare <= 0.0: -603 warnings.warn("Negative expected_chisquare.", RuntimeWarning) -604 expected_chisquare = np.abs(expected_chisquare) -605 output.chisquare_by_expected_chisquare = odr_chisquare(np.concatenate((out.beta, out.xplus.ravel()))) / expected_chisquare -606 if not silent: -607 print('chisquare/expected_chisquare:', -608 output.chisquare_by_expected_chisquare) -609 -610 fitp = out.beta -611 try: -612 hess = hessian(odr_chisquare)(np.concatenate((fitp, out.xplus.ravel()))) -613 except TypeError: -614 raise Exception("It is required to use autograd.numpy instead of numpy within fit functions, see the documentation for details.") from None -615 -616 def odr_chisquare_compact_x(d): -617 model = func(d[:n_parms], d[n_parms:n_parms + m].reshape(x_shape)) -618 chisq = anp.sum(((y_f - model) / dy_f) ** 2) + anp.sum(((d[n_parms + m:].reshape(x_shape) - d[n_parms:n_parms + m].reshape(x_shape)) / dx_f) ** 2) -619 return chisq +592 old_jac = jacobian(func)(out.beta, out.xplus) +593 fused_row1 = np.concatenate((old_jac, np.concatenate((number_of_x_parameters * [np.zeros(old_jac.shape)]), axis=0))) +594 fused_row2 = np.concatenate((jacobian(lambda x, y: func(y, x))(out.xplus, out.beta).reshape(x_f.shape[-1], x_f.shape[-1] * number_of_x_parameters), np.identity(number_of_x_parameters * old_jac.shape[0]))) +595 new_jac = np.concatenate((fused_row1, fused_row2), axis=1) +596 +597 A = W @ new_jac +598 P_phi = A @ np.linalg.pinv(A.T @ A) @ A.T +599 expected_chisquare = np.trace((np.identity(P_phi.shape[0]) - P_phi) @ W @ cov @ W) +600 if expected_chisquare <= 0.0: +601 warnings.warn("Negative expected_chisquare.", RuntimeWarning) +602 expected_chisquare = np.abs(expected_chisquare) +603 output.chisquare_by_expected_chisquare = odr_chisquare(np.concatenate((out.beta, out.xplus.ravel()))) / expected_chisquare +604 if not silent: +605 print('chisquare/expected_chisquare:', +606 output.chisquare_by_expected_chisquare) +607 +608 fitp = out.beta +609 try: +610 hess = hessian(odr_chisquare)(np.concatenate((fitp, out.xplus.ravel()))) +611 except TypeError: +612 raise Exception("It is required to use autograd.numpy instead of numpy within fit functions, see the documentation for details.") from None +613 +614 def odr_chisquare_compact_x(d): +615 model = func(d[:n_parms], d[n_parms:n_parms + m].reshape(x_shape)) +616 chisq = anp.sum(((y_f - model) / dy_f) ** 2) + anp.sum(((d[n_parms + m:].reshape(x_shape) - d[n_parms:n_parms + m].reshape(x_shape)) / dx_f) ** 2) +617 return chisq +618 +619 jac_jac_x = hessian(odr_chisquare_compact_x)(np.concatenate((fitp, out.xplus.ravel(), x_f.ravel()))) 620 -621 jac_jac_x = hessian(odr_chisquare_compact_x)(np.concatenate((fitp, out.xplus.ravel(), x_f.ravel()))) -622 -623 # Compute hess^{-1} @ jac_jac_x[:n_parms + m, n_parms + m:] using LAPACK dgesv -624 try: -625 deriv_x = -scipy.linalg.solve(hess, jac_jac_x[:n_parms + m, n_parms + m:]) -626 except np.linalg.LinAlgError: -627 raise Exception("Cannot invert hessian matrix.") -628 -629 def odr_chisquare_compact_y(d): -630 model = func(d[:n_parms], d[n_parms:n_parms + m].reshape(x_shape)) -631 chisq = anp.sum(((d[n_parms + m:] - model) / dy_f) ** 2) + anp.sum(((x_f - d[n_parms:n_parms + m].reshape(x_shape)) / dx_f) ** 2) -632 return chisq +621 # Compute hess^{-1} @ jac_jac_x[:n_parms + m, n_parms + m:] using LAPACK dgesv +622 try: +623 deriv_x = -scipy.linalg.solve(hess, jac_jac_x[:n_parms + m, n_parms + m:]) +624 except np.linalg.LinAlgError: +625 raise Exception("Cannot invert hessian matrix.") +626 +627 def odr_chisquare_compact_y(d): +628 model = func(d[:n_parms], d[n_parms:n_parms + m].reshape(x_shape)) +629 chisq = anp.sum(((d[n_parms + m:] - model) / dy_f) ** 2) + anp.sum(((x_f - d[n_parms:n_parms + m].reshape(x_shape)) / dx_f) ** 2) +630 return chisq +631 +632 jac_jac_y = hessian(odr_chisquare_compact_y)(np.concatenate((fitp, out.xplus.ravel(), y_f))) 633 -634 jac_jac_y = hessian(odr_chisquare_compact_y)(np.concatenate((fitp, out.xplus.ravel(), y_f))) -635 -636 # Compute hess^{-1} @ jac_jac_y[:n_parms + m, n_parms + m:] using LAPACK dgesv -637 try: -638 deriv_y = -scipy.linalg.solve(hess, jac_jac_y[:n_parms + m, n_parms + m:]) -639 except np.linalg.LinAlgError: -640 raise Exception("Cannot invert hessian matrix.") -641 -642 result = [] -643 for i in range(n_parms): -644 result.append(derived_observable(lambda my_var, **kwargs: (my_var[0] + np.finfo(np.float64).eps) / (x.ravel()[0].value + np.finfo(np.float64).eps) * out.beta[i], list(x.ravel()) + list(y), man_grad=list(deriv_x[i]) + list(deriv_y[i]))) +634 # Compute hess^{-1} @ jac_jac_y[:n_parms + m, n_parms + m:] using LAPACK dgesv +635 try: +636 deriv_y = -scipy.linalg.solve(hess, jac_jac_y[:n_parms + m, n_parms + m:]) +637 except np.linalg.LinAlgError: +638 raise Exception("Cannot invert hessian matrix.") +639 +640 result = [] +641 for i in range(n_parms): +642 result.append(derived_observable(lambda my_var, **kwargs: (my_var[0] + np.finfo(np.float64).eps) / (x.ravel()[0].value + np.finfo(np.float64).eps) * out.beta[i], list(x.ravel()) + list(y), man_grad=list(deriv_x[i]) + list(deriv_y[i]))) +643 +644 output.fit_parameters = result 645 -646 output.fit_parameters = result -647 -648 output.odr_chisquare = odr_chisquare(np.concatenate((out.beta, out.xplus.ravel()))) -649 output.dof = x.shape[-1] - n_parms -650 output.p_value = 1 - scipy.stats.chi2.cdf(output.odr_chisquare, output.dof) +646 output.odr_chisquare = odr_chisquare(np.concatenate((out.beta, out.xplus.ravel()))) +647 output.dof = x.shape[-1] - n_parms +648 output.p_value = 1 - scipy.stats.chi2.cdf(output.odr_chisquare, output.dof) +649 +650 return output 651 -652 return output -653 -654 -655def fit_lin(x, y, **kwargs): -656 """Performs a linear fit to y = n + m * x and returns two Obs n, m. -657 -658 Parameters -659 ---------- -660 x : list -661 Can either be a list of floats in which case no xerror is assumed, or -662 a list of Obs, where the dvalues of the Obs are used as xerror for the fit. -663 y : list -664 List of Obs, the dvalues of the Obs are used as yerror for the fit. -665 -666 Returns -667 ------- -668 fit_parameters : list[Obs] -669 LIist of fitted observables. -670 """ -671 -672 def f(a, x): -673 y = a[0] + a[1] * x -674 return y -675 -676 if all(isinstance(n, Obs) for n in x): -677 out = total_least_squares(x, y, f, **kwargs) -678 return out.fit_parameters -679 elif all(isinstance(n, float) or isinstance(n, int) for n in x) or isinstance(x, np.ndarray): -680 out = least_squares(x, y, f, **kwargs) -681 return out.fit_parameters -682 else: -683 raise TypeError('Unsupported types for x') -684 -685 -686def qqplot(x, o_y, func, p, title=""): -687 """Generates a quantile-quantile plot of the fit result which can be used to -688 check if the residuals of the fit are gaussian distributed. -689 -690 Returns -691 ------- -692 None -693 """ -694 -695 residuals = [] -696 for i_x, i_y in zip(x, o_y): -697 residuals.append((i_y - func(p, i_x)) / i_y.dvalue) -698 residuals = sorted(residuals) -699 my_y = [o.value for o in residuals] -700 probplot = scipy.stats.probplot(my_y) -701 my_x = probplot[0][0] -702 plt.figure(figsize=(8, 8 / 1.618)) -703 plt.errorbar(my_x, my_y, fmt='o') -704 fit_start = my_x[0] -705 fit_stop = my_x[-1] -706 samples = np.arange(fit_start, fit_stop, 0.01) -707 plt.plot(samples, samples, 'k--', zorder=11, label='Standard normal distribution') -708 plt.plot(samples, probplot[1][0] * samples + probplot[1][1], zorder=10, label='Least squares fit, r=' + str(np.around(probplot[1][2], 3)), marker='', ls='-') -709 -710 plt.xlabel('Theoretical quantiles') -711 plt.ylabel('Ordered Values') -712 plt.legend(title=title) -713 plt.draw() -714 -715 -716def residual_plot(x, y, func, fit_res, title=""): -717 """Generates a plot which compares the fit to the data and displays the corresponding residuals +652 +653def fit_lin(x, y, **kwargs): +654 """Performs a linear fit to y = n + m * x and returns two Obs n, m. +655 +656 Parameters +657 ---------- +658 x : list +659 Can either be a list of floats in which case no xerror is assumed, or +660 a list of Obs, where the dvalues of the Obs are used as xerror for the fit. +661 y : list +662 List of Obs, the dvalues of the Obs are used as yerror for the fit. +663 +664 Returns +665 ------- +666 fit_parameters : list[Obs] +667 LIist of fitted observables. +668 """ +669 +670 def f(a, x): +671 y = a[0] + a[1] * x +672 return y +673 +674 if all(isinstance(n, Obs) for n in x): +675 out = total_least_squares(x, y, f, **kwargs) +676 return out.fit_parameters +677 elif all(isinstance(n, float) or isinstance(n, int) for n in x) or isinstance(x, np.ndarray): +678 out = least_squares(x, y, f, **kwargs) +679 return out.fit_parameters +680 else: +681 raise TypeError('Unsupported types for x') +682 +683 +684def qqplot(x, o_y, func, p, title=""): +685 """Generates a quantile-quantile plot of the fit result which can be used to +686 check if the residuals of the fit are gaussian distributed. +687 +688 Returns +689 ------- +690 None +691 """ +692 +693 residuals = [] +694 for i_x, i_y in zip(x, o_y): +695 residuals.append((i_y - func(p, i_x)) / i_y.dvalue) +696 residuals = sorted(residuals) +697 my_y = [o.value for o in residuals] +698 probplot = scipy.stats.probplot(my_y) +699 my_x = probplot[0][0] +700 plt.figure(figsize=(8, 8 / 1.618)) +701 plt.errorbar(my_x, my_y, fmt='o') +702 fit_start = my_x[0] +703 fit_stop = my_x[-1] +704 samples = np.arange(fit_start, fit_stop, 0.01) +705 plt.plot(samples, samples, 'k--', zorder=11, label='Standard normal distribution') +706 plt.plot(samples, probplot[1][0] * samples + probplot[1][1], zorder=10, label='Least squares fit, r=' + str(np.around(probplot[1][2], 3)), marker='', ls='-') +707 +708 plt.xlabel('Theoretical quantiles') +709 plt.ylabel('Ordered Values') +710 plt.legend(title=title) +711 plt.draw() +712 +713 +714def residual_plot(x, y, func, fit_res, title=""): +715 """Generates a plot which compares the fit to the data and displays the corresponding residuals +716 +717 For uncorrelated data the residuals are expected to be distributed ~N(0,1). 718 -719 For uncorrelated data the residuals are expected to be distributed ~N(0,1). -720 -721 Returns -722 ------- -723 None -724 """ -725 sorted_x = sorted(x) -726 xstart = sorted_x[0] - 0.5 * (sorted_x[1] - sorted_x[0]) -727 xstop = sorted_x[-1] + 0.5 * (sorted_x[-1] - sorted_x[-2]) -728 x_samples = np.arange(xstart, xstop + 0.01, 0.01) -729 -730 plt.figure(figsize=(8, 8 / 1.618)) -731 gs = gridspec.GridSpec(2, 1, height_ratios=[3, 1], wspace=0.0, hspace=0.0) -732 ax0 = plt.subplot(gs[0]) -733 ax0.errorbar(x, [o.value for o in y], yerr=[o.dvalue for o in y], ls='none', fmt='o', capsize=3, markersize=5, label='Data') -734 ax0.plot(x_samples, func([o.value for o in fit_res], x_samples), label='Fit', zorder=10, ls='-', ms=0) +719 Returns +720 ------- +721 None +722 """ +723 sorted_x = sorted(x) +724 xstart = sorted_x[0] - 0.5 * (sorted_x[1] - sorted_x[0]) +725 xstop = sorted_x[-1] + 0.5 * (sorted_x[-1] - sorted_x[-2]) +726 x_samples = np.arange(xstart, xstop + 0.01, 0.01) +727 +728 plt.figure(figsize=(8, 8 / 1.618)) +729 gs = gridspec.GridSpec(2, 1, height_ratios=[3, 1], wspace=0.0, hspace=0.0) +730 ax0 = plt.subplot(gs[0]) +731 ax0.errorbar(x, [o.value for o in y], yerr=[o.dvalue for o in y], ls='none', fmt='o', capsize=3, markersize=5, label='Data') +732 ax0.plot(x_samples, func([o.value for o in fit_res], x_samples), label='Fit', zorder=10, ls='-', ms=0) +733 ax0.set_xticklabels([]) +734 ax0.set_xlim([xstart, xstop]) 735 ax0.set_xticklabels([]) -736 ax0.set_xlim([xstart, xstop]) -737 ax0.set_xticklabels([]) -738 ax0.legend(title=title) -739 -740 residuals = (np.asarray([o.value for o in y]) - func([o.value for o in fit_res], np.asarray(x))) / np.asarray([o.dvalue for o in y]) -741 ax1 = plt.subplot(gs[1]) -742 ax1.plot(x, residuals, 'ko', ls='none', markersize=5) -743 ax1.tick_params(direction='out') -744 ax1.tick_params(axis="x", bottom=True, top=True, labelbottom=True) -745 ax1.axhline(y=0.0, ls='--', color='k', marker=" ") -746 ax1.fill_between(x_samples, -1.0, 1.0, alpha=0.1, facecolor='k') -747 ax1.set_xlim([xstart, xstop]) -748 ax1.set_ylabel('Residuals') -749 plt.subplots_adjust(wspace=None, hspace=None) -750 plt.draw() -751 -752 -753def error_band(x, func, beta): -754 """Calculate the error band for an array of sample values x, for given fit function func with optimized parameters beta. -755 -756 Returns -757 ------- -758 err : np.array(Obs) -759 Error band for an array of sample values x -760 """ -761 cov = covariance(beta) -762 if np.any(np.abs(cov - cov.T) > 1000 * np.finfo(np.float64).eps): -763 warnings.warn("Covariance matrix is not symmetric within floating point precision", RuntimeWarning) -764 -765 deriv = [] -766 for i, item in enumerate(x): -767 deriv.append(np.array(egrad(func)([o.value for o in beta], item))) -768 -769 err = [] -770 for i, item in enumerate(x): -771 err.append(np.sqrt(deriv[i] @ cov @ deriv[i])) -772 err = np.array(err) +736 ax0.legend(title=title) +737 +738 residuals = (np.asarray([o.value for o in y]) - func([o.value for o in fit_res], np.asarray(x))) / np.asarray([o.dvalue for o in y]) +739 ax1 = plt.subplot(gs[1]) +740 ax1.plot(x, residuals, 'ko', ls='none', markersize=5) +741 ax1.tick_params(direction='out') +742 ax1.tick_params(axis="x", bottom=True, top=True, labelbottom=True) +743 ax1.axhline(y=0.0, ls='--', color='k', marker=" ") +744 ax1.fill_between(x_samples, -1.0, 1.0, alpha=0.1, facecolor='k') +745 ax1.set_xlim([xstart, xstop]) +746 ax1.set_ylabel('Residuals') +747 plt.subplots_adjust(wspace=None, hspace=None) +748 plt.draw() +749 +750 +751def error_band(x, func, beta): +752 """Calculate the error band for an array of sample values x, for given fit function func with optimized parameters beta. +753 +754 Returns +755 ------- +756 err : np.array(Obs) +757 Error band for an array of sample values x +758 """ +759 cov = covariance(beta) +760 if np.any(np.abs(cov - cov.T) > 1000 * np.finfo(np.float64).eps): +761 warnings.warn("Covariance matrix is not symmetric within floating point precision", RuntimeWarning) +762 +763 deriv = [] +764 for i, item in enumerate(x): +765 deriv.append(np.array(egrad(func)([o.value for o in beta], item))) +766 +767 err = [] +768 for i, item in enumerate(x): +769 err.append(np.sqrt(deriv[i] @ cov @ deriv[i])) +770 err = np.array(err) +771 +772 return err 773 -774 return err -775 -776 -777def ks_test(objects=None): -778 """Performs a Kolmogorov–Smirnov test for the p-values of all fit object. -779 -780 Parameters -781 ---------- -782 objects : list -783 List of fit results to include in the analysis (optional). -784 -785 Returns -786 ------- -787 None -788 """ -789 -790 if objects is None: -791 obs_list = [] -792 for obj in gc.get_objects(): -793 if isinstance(obj, Fit_result): -794 obs_list.append(obj) -795 else: -796 obs_list = objects +774 +775def ks_test(objects=None): +776 """Performs a Kolmogorov–Smirnov test for the p-values of all fit object. +777 +778 Parameters +779 ---------- +780 objects : list +781 List of fit results to include in the analysis (optional). +782 +783 Returns +784 ------- +785 None +786 """ +787 +788 if objects is None: +789 obs_list = [] +790 for obj in gc.get_objects(): +791 if isinstance(obj, Fit_result): +792 obs_list.append(obj) +793 else: +794 obs_list = objects +795 +796 p_values = [o.p_value for o in obs_list] 797 -798 p_values = [o.p_value for o in obs_list] -799 -800 bins = len(p_values) -801 x = np.arange(0, 1.001, 0.001) -802 plt.plot(x, x, 'k', zorder=1) -803 plt.xlim(0, 1) -804 plt.ylim(0, 1) -805 plt.xlabel('p-value') -806 plt.ylabel('Cumulative probability') -807 plt.title(str(bins) + ' p-values') -808 -809 n = np.arange(1, bins + 1) / np.float64(bins) -810 Xs = np.sort(p_values) -811 plt.step(Xs, n) -812 diffs = n - Xs -813 loc_max_diff = np.argmax(np.abs(diffs)) -814 loc = Xs[loc_max_diff] -815 plt.annotate('', xy=(loc, loc), xytext=(loc, loc + diffs[loc_max_diff]), arrowprops=dict(arrowstyle='<->', shrinkA=0, shrinkB=0)) -816 plt.draw() +798 bins = len(p_values) +799 x = np.arange(0, 1.001, 0.001) +800 plt.plot(x, x, 'k', zorder=1) +801 plt.xlim(0, 1) +802 plt.ylim(0, 1) +803 plt.xlabel('p-value') +804 plt.ylabel('Cumulative probability') +805 plt.title(str(bins) + ' p-values') +806 +807 n = np.arange(1, bins + 1) / np.float64(bins) +808 Xs = np.sort(p_values) +809 plt.step(Xs, n) +810 diffs = n - Xs +811 loc_max_diff = np.argmax(np.abs(diffs)) +812 loc = Xs[loc_max_diff] +813 plt.annotate('', xy=(loc, loc), xytext=(loc, loc + diffs[loc_max_diff]), arrowprops=dict(arrowstyle='<->', shrinkA=0, shrinkB=0)) +814 plt.draw() +815 +816 print(scipy.stats.kstest(p_values, 'uniform')) 817 -818 print(scipy.stats.kstest(p_values, 'uniform')) -819 -820 -821def _extract_val_and_dval(string): -822 split_string = string.split('(') -823 if '.' in split_string[0] and '.' not in split_string[1][:-1]: -824 factor = 10 ** -len(split_string[0].partition('.')[2]) -825 else: -826 factor = 1 -827 return float(split_string[0]), float(split_string[1][:-1]) * factor +818 +819def _extract_val_and_dval(string): +820 split_string = string.split('(') +821 if '.' in split_string[0] and '.' not in split_string[1][:-1]: +822 factor = 10 ** -len(split_string[0].partition('.')[2]) +823 else: +824 factor = 1 +825 return float(split_string[0]), float(split_string[1][:-1]) * factor @@ -1220,10 +1218,10 @@ Hotelling t-squared p-value for correlated fits. 205 dy_f = [o.dvalue for o in y_all] 206 207 if len(x_all.shape) > 2: -208 raise Exception('Unknown format for x values') +208 raise ValueError("Unknown format for x values") 209 210 if np.any(np.asarray(dy_f) <= 0.0): -211 raise Exception('No y errors available, run the gamma method first.') +211 raise Exception("No y errors available, run the gamma method first.") 212 213 # number of fit parameters 214 n_parms_ls = [] @@ -1292,7 +1290,7 @@ Hotelling t-squared p-value for correlated fits. 277 p_f = [o.value for o in loc_priors] 278 dp_f = [o.dvalue for o in loc_priors] 279 if np.any(np.asarray(dp_f) <= 0.0): -280 raise Exception('No prior errors available, run the gamma method first.') +280 raise Exception("No prior errors available, run the gamma method first.") 281 else: 282 p_f = dp_f = np.array([]) 283 prior_mask = [] @@ -1301,7 +1299,7 @@ Hotelling t-squared p-value for correlated fits. 286 if 'initial_guess' in kwargs: 287 x0 = kwargs.get('initial_guess') 288 if len(x0) != n_parms: -289 raise Exception('Initial guess does not have the correct length: %d vs. %d' % (len(x0), n_parms)) +289 raise ValueError('Initial guess does not have the correct length: %d vs. %d' % (len(x0), n_parms)) 290 else: 291 x0 = [0.1] * n_parms 292 @@ -1420,48 +1418,46 @@ Hotelling t-squared p-value for correlated fits. 405 print('chisquare/expected_chisquare:', output.chisquare_by_expected_chisquare) 406 407 fitp = fit_result.x -408 if np.any(np.asarray(dy_f) <= 0.0): -409 raise Exception('No y errors available, run the gamma method first.') -410 -411 try: -412 hess = hessian(chisqfunc)(fitp) -413 except TypeError: -414 raise Exception("It is required to use autograd.numpy instead of numpy within fit functions, see the documentation for details.") from None +408 +409 try: +410 hess = hessian(chisqfunc)(fitp) +411 except TypeError: +412 raise Exception("It is required to use autograd.numpy instead of numpy within fit functions, see the documentation for details.") from None +413 +414 len_y = len(y_f) 415 -416 len_y = len(y_f) -417 -418 def chisqfunc_compact(d): -419 return anp.sum(general_chisqfunc(d[:n_parms], d[n_parms: n_parms + len_y], d[n_parms + len_y:]) ** 2) +416 def chisqfunc_compact(d): +417 return anp.sum(general_chisqfunc(d[:n_parms], d[n_parms: n_parms + len_y], d[n_parms + len_y:]) ** 2) +418 +419 jac_jac_y = hessian(chisqfunc_compact)(np.concatenate((fitp, y_f, p_f))) 420 -421 jac_jac_y = hessian(chisqfunc_compact)(np.concatenate((fitp, y_f, p_f))) -422 -423 # Compute hess^{-1} @ jac_jac_y[:n_parms + m, n_parms + m:] using LAPACK dgesv -424 try: -425 deriv_y = -scipy.linalg.solve(hess, jac_jac_y[:n_parms, n_parms:]) -426 except np.linalg.LinAlgError: -427 raise Exception("Cannot invert hessian matrix.") -428 -429 result = [] -430 for i in range(n_parms): -431 result.append(derived_observable(lambda x_all, **kwargs: (x_all[0] + np.finfo(np.float64).eps) / (y_all[0].value + np.finfo(np.float64).eps) * fitp[i], list(y_all) + loc_priors, man_grad=list(deriv_y[i]))) +421 # Compute hess^{-1} @ jac_jac_y[:n_parms + m, n_parms + m:] using LAPACK dgesv +422 try: +423 deriv_y = -scipy.linalg.solve(hess, jac_jac_y[:n_parms, n_parms:]) +424 except np.linalg.LinAlgError: +425 raise Exception("Cannot invert hessian matrix.") +426 +427 result = [] +428 for i in range(n_parms): +429 result.append(derived_observable(lambda x_all, **kwargs: (x_all[0] + np.finfo(np.float64).eps) / (y_all[0].value + np.finfo(np.float64).eps) * fitp[i], list(y_all) + loc_priors, man_grad=list(deriv_y[i]))) +430 +431 output.fit_parameters = result 432 -433 output.fit_parameters = result -434 -435 # Hotelling t-squared p-value for correlated fits. -436 if kwargs.get('correlated_fit') is True: -437 n_cov = np.min(np.vectorize(lambda x_all: x_all.N)(y_all)) -438 output.t2_p_value = 1 - scipy.stats.f.cdf((n_cov - output.dof) / (output.dof * (n_cov - 1)) * output.chisquare, -439 output.dof, n_cov - output.dof) -440 -441 if kwargs.get('resplot') is True: -442 for key in key_ls: -443 residual_plot(xd[key], yd[key], funcd[key], result, title=key) -444 -445 if kwargs.get('qqplot') is True: -446 for key in key_ls: -447 qqplot(xd[key], yd[key], funcd[key], result, title=key) -448 -449 return output +433 # Hotelling t-squared p-value for correlated fits. +434 if kwargs.get('correlated_fit') is True: +435 n_cov = np.min(np.vectorize(lambda x_all: x_all.N)(y_all)) +436 output.t2_p_value = 1 - scipy.stats.f.cdf((n_cov - output.dof) / (output.dof * (n_cov - 1)) * output.chisquare, +437 output.dof, n_cov - output.dof) +438 +439 if kwargs.get('resplot') is True: +440 for key in key_ls: +441 residual_plot(xd[key], yd[key], funcd[key], result, title=key) +442 +443 if kwargs.get('qqplot') is True: +444 for key in key_ls: +445 qqplot(xd[key], yd[key], funcd[key], result, title=key) +446 +447 return output @@ -1577,208 +1573,208 @@ Parameters and information on the fitted result. -
452def total_least_squares(x, y, func, silent=False, **kwargs):
-453    r'''Performs a non-linear fit to y = func(x) and returns a list of Obs corresponding to the fit parameters.
-454
-455    Parameters
-456    ----------
-457    x : list
-458        list of Obs, or a tuple of lists of Obs
-459    y : list
-460        list of Obs. The dvalues of the Obs are used as x- and yerror for the fit.
-461    func : object
-462        func has to be of the form
-463
-464        ```python
-465        import autograd.numpy as anp
-466
-467        def func(a, x):
-468            return a[0] + a[1] * x + a[2] * anp.sinh(x)
-469        ```
+            
450def total_least_squares(x, y, func, silent=False, **kwargs):
+451    r'''Performs a non-linear fit to y = func(x) and returns a list of Obs corresponding to the fit parameters.
+452
+453    Parameters
+454    ----------
+455    x : list
+456        list of Obs, or a tuple of lists of Obs
+457    y : list
+458        list of Obs. The dvalues of the Obs are used as x- and yerror for the fit.
+459    func : object
+460        func has to be of the form
+461
+462        ```python
+463        import autograd.numpy as anp
+464
+465        def func(a, x):
+466            return a[0] + a[1] * x + a[2] * anp.sinh(x)
+467        ```
+468
+469        For multiple x values func can be of the form
 470
-471        For multiple x values func can be of the form
-472
-473        ```python
-474        def func(a, x):
-475            (x1, x2) = x
-476            return a[0] * x1 ** 2 + a[1] * x2
-477        ```
-478
-479        It is important that all numpy functions refer to autograd.numpy, otherwise the differentiation
-480        will not work.
-481    silent : bool, optional
-482        If true all output to the console is omitted (default False).
-483    initial_guess : list
-484        can provide an initial guess for the input parameters. Relevant for non-linear
-485        fits with many parameters.
-486    expected_chisquare : bool
-487        If true prints the expected chisquare which is
-488        corrected by effects caused by correlated input data.
-489        This can take a while as the full correlation matrix
-490        has to be calculated (default False).
-491    num_grad : bool
-492        Use numerical differentation instead of automatic differentiation to perform the error propagation (default False).
-493
-494    Notes
-495    -----
-496    Based on the orthogonal distance regression module of scipy.
-497
-498    Returns
-499    -------
-500    output : Fit_result
-501        Parameters and information on the fitted result.
-502    '''
+471        ```python
+472        def func(a, x):
+473            (x1, x2) = x
+474            return a[0] * x1 ** 2 + a[1] * x2
+475        ```
+476
+477        It is important that all numpy functions refer to autograd.numpy, otherwise the differentiation
+478        will not work.
+479    silent : bool, optional
+480        If true all output to the console is omitted (default False).
+481    initial_guess : list
+482        can provide an initial guess for the input parameters. Relevant for non-linear
+483        fits with many parameters.
+484    expected_chisquare : bool
+485        If true prints the expected chisquare which is
+486        corrected by effects caused by correlated input data.
+487        This can take a while as the full correlation matrix
+488        has to be calculated (default False).
+489    num_grad : bool
+490        Use numerical differentation instead of automatic differentiation to perform the error propagation (default False).
+491
+492    Notes
+493    -----
+494    Based on the orthogonal distance regression module of scipy.
+495
+496    Returns
+497    -------
+498    output : Fit_result
+499        Parameters and information on the fitted result.
+500    '''
+501
+502    output = Fit_result()
 503
-504    output = Fit_result()
+504    output.fit_function = func
 505
-506    output.fit_function = func
+506    x = np.array(x)
 507
-508    x = np.array(x)
+508    x_shape = x.shape
 509
-510    x_shape = x.shape
-511
-512    if kwargs.get('num_grad') is True:
-513        jacobian = num_jacobian
-514        hessian = num_hessian
-515    else:
-516        jacobian = auto_jacobian
-517        hessian = auto_hessian
-518
-519    if not callable(func):
-520        raise TypeError('func has to be a function.')
-521
-522    for i in range(42):
-523        try:
-524            func(np.arange(i), x.T[0])
-525        except TypeError:
+510    if kwargs.get('num_grad') is True:
+511        jacobian = num_jacobian
+512        hessian = num_hessian
+513    else:
+514        jacobian = auto_jacobian
+515        hessian = auto_hessian
+516
+517    if not callable(func):
+518        raise TypeError('func has to be a function.')
+519
+520    for i in range(42):
+521        try:
+522            func(np.arange(i), x.T[0])
+523        except TypeError:
+524            continue
+525        except IndexError:
 526            continue
-527        except IndexError:
-528            continue
-529        else:
-530            break
-531    else:
-532        raise RuntimeError("Fit function is not valid.")
-533
-534    n_parms = i
-535    if not silent:
-536        print('Fit with', n_parms, 'parameter' + 's' * (n_parms > 1))
-537
-538    x_f = np.vectorize(lambda o: o.value)(x)
-539    dx_f = np.vectorize(lambda o: o.dvalue)(x)
-540    y_f = np.array([o.value for o in y])
-541    dy_f = np.array([o.dvalue for o in y])
-542
-543    if np.any(np.asarray(dx_f) <= 0.0):
-544        raise Exception('No x errors available, run the gamma method first.')
-545
-546    if np.any(np.asarray(dy_f) <= 0.0):
-547        raise Exception('No y errors available, run the gamma method first.')
-548
-549    if 'initial_guess' in kwargs:
-550        x0 = kwargs.get('initial_guess')
-551        if len(x0) != n_parms:
-552            raise Exception('Initial guess does not have the correct length: %d vs. %d' % (len(x0), n_parms))
-553    else:
-554        x0 = [1] * n_parms
-555
-556    data = RealData(x_f, y_f, sx=dx_f, sy=dy_f)
-557    model = Model(func)
-558    odr = ODR(data, model, x0, partol=np.finfo(np.float64).eps)
-559    odr.set_job(fit_type=0, deriv=1)
-560    out = odr.run()
+527        else:
+528            break
+529    else:
+530        raise RuntimeError("Fit function is not valid.")
+531
+532    n_parms = i
+533    if not silent:
+534        print('Fit with', n_parms, 'parameter' + 's' * (n_parms > 1))
+535
+536    x_f = np.vectorize(lambda o: o.value)(x)
+537    dx_f = np.vectorize(lambda o: o.dvalue)(x)
+538    y_f = np.array([o.value for o in y])
+539    dy_f = np.array([o.dvalue for o in y])
+540
+541    if np.any(np.asarray(dx_f) <= 0.0):
+542        raise Exception('No x errors available, run the gamma method first.')
+543
+544    if np.any(np.asarray(dy_f) <= 0.0):
+545        raise Exception('No y errors available, run the gamma method first.')
+546
+547    if 'initial_guess' in kwargs:
+548        x0 = kwargs.get('initial_guess')
+549        if len(x0) != n_parms:
+550            raise Exception('Initial guess does not have the correct length: %d vs. %d' % (len(x0), n_parms))
+551    else:
+552        x0 = [1] * n_parms
+553
+554    data = RealData(x_f, y_f, sx=dx_f, sy=dy_f)
+555    model = Model(func)
+556    odr = ODR(data, model, x0, partol=np.finfo(np.float64).eps)
+557    odr.set_job(fit_type=0, deriv=1)
+558    out = odr.run()
+559
+560    output.residual_variance = out.res_var
 561
-562    output.residual_variance = out.res_var
+562    output.method = 'ODR'
 563
-564    output.method = 'ODR'
+564    output.message = out.stopreason
 565
-566    output.message = out.stopreason
+566    output.xplus = out.xplus
 567
-568    output.xplus = out.xplus
-569
-570    if not silent:
-571        print('Method: ODR')
-572        print(*out.stopreason)
-573        print('Residual variance:', output.residual_variance)
-574
-575    if out.info > 3:
-576        raise Exception('The minimization procedure did not converge.')
+568    if not silent:
+569        print('Method: ODR')
+570        print(*out.stopreason)
+571        print('Residual variance:', output.residual_variance)
+572
+573    if out.info > 3:
+574        raise Exception('The minimization procedure did not converge.')
+575
+576    m = x_f.size
 577
-578    m = x_f.size
-579
-580    def odr_chisquare(p):
-581        model = func(p[:n_parms], p[n_parms:].reshape(x_shape))
-582        chisq = anp.sum(((y_f - model) / dy_f) ** 2) + anp.sum(((x_f - p[n_parms:].reshape(x_shape)) / dx_f) ** 2)
-583        return chisq
-584
-585    if kwargs.get('expected_chisquare') is True:
-586        W = np.diag(1 / np.asarray(np.concatenate((dy_f.ravel(), dx_f.ravel()))))
-587
-588        if kwargs.get('covariance') is not None:
-589            cov = kwargs.get('covariance')
-590        else:
-591            cov = covariance(np.concatenate((y, x.ravel())))
+578    def odr_chisquare(p):
+579        model = func(p[:n_parms], p[n_parms:].reshape(x_shape))
+580        chisq = anp.sum(((y_f - model) / dy_f) ** 2) + anp.sum(((x_f - p[n_parms:].reshape(x_shape)) / dx_f) ** 2)
+581        return chisq
+582
+583    if kwargs.get('expected_chisquare') is True:
+584        W = np.diag(1 / np.asarray(np.concatenate((dy_f.ravel(), dx_f.ravel()))))
+585
+586        if kwargs.get('covariance') is not None:
+587            cov = kwargs.get('covariance')
+588        else:
+589            cov = covariance(np.concatenate((y, x.ravel())))
+590
+591        number_of_x_parameters = int(m / x_f.shape[-1])
 592
-593        number_of_x_parameters = int(m / x_f.shape[-1])
-594
-595        old_jac = jacobian(func)(out.beta, out.xplus)
-596        fused_row1 = np.concatenate((old_jac, np.concatenate((number_of_x_parameters * [np.zeros(old_jac.shape)]), axis=0)))
-597        fused_row2 = np.concatenate((jacobian(lambda x, y: func(y, x))(out.xplus, out.beta).reshape(x_f.shape[-1], x_f.shape[-1] * number_of_x_parameters), np.identity(number_of_x_parameters * old_jac.shape[0])))
-598        new_jac = np.concatenate((fused_row1, fused_row2), axis=1)
-599
-600        A = W @ new_jac
-601        P_phi = A @ np.linalg.pinv(A.T @ A) @ A.T
-602        expected_chisquare = np.trace((np.identity(P_phi.shape[0]) - P_phi) @ W @ cov @ W)
-603        if expected_chisquare <= 0.0:
-604            warnings.warn("Negative expected_chisquare.", RuntimeWarning)
-605            expected_chisquare = np.abs(expected_chisquare)
-606        output.chisquare_by_expected_chisquare = odr_chisquare(np.concatenate((out.beta, out.xplus.ravel()))) / expected_chisquare
-607        if not silent:
-608            print('chisquare/expected_chisquare:',
-609                  output.chisquare_by_expected_chisquare)
-610
-611    fitp = out.beta
-612    try:
-613        hess = hessian(odr_chisquare)(np.concatenate((fitp, out.xplus.ravel())))
-614    except TypeError:
-615        raise Exception("It is required to use autograd.numpy instead of numpy within fit functions, see the documentation for details.") from None
-616
-617    def odr_chisquare_compact_x(d):
-618        model = func(d[:n_parms], d[n_parms:n_parms + m].reshape(x_shape))
-619        chisq = anp.sum(((y_f - model) / dy_f) ** 2) + anp.sum(((d[n_parms + m:].reshape(x_shape) - d[n_parms:n_parms + m].reshape(x_shape)) / dx_f) ** 2)
-620        return chisq
+593        old_jac = jacobian(func)(out.beta, out.xplus)
+594        fused_row1 = np.concatenate((old_jac, np.concatenate((number_of_x_parameters * [np.zeros(old_jac.shape)]), axis=0)))
+595        fused_row2 = np.concatenate((jacobian(lambda x, y: func(y, x))(out.xplus, out.beta).reshape(x_f.shape[-1], x_f.shape[-1] * number_of_x_parameters), np.identity(number_of_x_parameters * old_jac.shape[0])))
+596        new_jac = np.concatenate((fused_row1, fused_row2), axis=1)
+597
+598        A = W @ new_jac
+599        P_phi = A @ np.linalg.pinv(A.T @ A) @ A.T
+600        expected_chisquare = np.trace((np.identity(P_phi.shape[0]) - P_phi) @ W @ cov @ W)
+601        if expected_chisquare <= 0.0:
+602            warnings.warn("Negative expected_chisquare.", RuntimeWarning)
+603            expected_chisquare = np.abs(expected_chisquare)
+604        output.chisquare_by_expected_chisquare = odr_chisquare(np.concatenate((out.beta, out.xplus.ravel()))) / expected_chisquare
+605        if not silent:
+606            print('chisquare/expected_chisquare:',
+607                  output.chisquare_by_expected_chisquare)
+608
+609    fitp = out.beta
+610    try:
+611        hess = hessian(odr_chisquare)(np.concatenate((fitp, out.xplus.ravel())))
+612    except TypeError:
+613        raise Exception("It is required to use autograd.numpy instead of numpy within fit functions, see the documentation for details.") from None
+614
+615    def odr_chisquare_compact_x(d):
+616        model = func(d[:n_parms], d[n_parms:n_parms + m].reshape(x_shape))
+617        chisq = anp.sum(((y_f - model) / dy_f) ** 2) + anp.sum(((d[n_parms + m:].reshape(x_shape) - d[n_parms:n_parms + m].reshape(x_shape)) / dx_f) ** 2)
+618        return chisq
+619
+620    jac_jac_x = hessian(odr_chisquare_compact_x)(np.concatenate((fitp, out.xplus.ravel(), x_f.ravel())))
 621
-622    jac_jac_x = hessian(odr_chisquare_compact_x)(np.concatenate((fitp, out.xplus.ravel(), x_f.ravel())))
-623
-624    # Compute hess^{-1} @ jac_jac_x[:n_parms + m, n_parms + m:] using LAPACK dgesv
-625    try:
-626        deriv_x = -scipy.linalg.solve(hess, jac_jac_x[:n_parms + m, n_parms + m:])
-627    except np.linalg.LinAlgError:
-628        raise Exception("Cannot invert hessian matrix.")
-629
-630    def odr_chisquare_compact_y(d):
-631        model = func(d[:n_parms], d[n_parms:n_parms + m].reshape(x_shape))
-632        chisq = anp.sum(((d[n_parms + m:] - model) / dy_f) ** 2) + anp.sum(((x_f - d[n_parms:n_parms + m].reshape(x_shape)) / dx_f) ** 2)
-633        return chisq
+622    # Compute hess^{-1} @ jac_jac_x[:n_parms + m, n_parms + m:] using LAPACK dgesv
+623    try:
+624        deriv_x = -scipy.linalg.solve(hess, jac_jac_x[:n_parms + m, n_parms + m:])
+625    except np.linalg.LinAlgError:
+626        raise Exception("Cannot invert hessian matrix.")
+627
+628    def odr_chisquare_compact_y(d):
+629        model = func(d[:n_parms], d[n_parms:n_parms + m].reshape(x_shape))
+630        chisq = anp.sum(((d[n_parms + m:] - model) / dy_f) ** 2) + anp.sum(((x_f - d[n_parms:n_parms + m].reshape(x_shape)) / dx_f) ** 2)
+631        return chisq
+632
+633    jac_jac_y = hessian(odr_chisquare_compact_y)(np.concatenate((fitp, out.xplus.ravel(), y_f)))
 634
-635    jac_jac_y = hessian(odr_chisquare_compact_y)(np.concatenate((fitp, out.xplus.ravel(), y_f)))
-636
-637    # Compute hess^{-1} @ jac_jac_y[:n_parms + m, n_parms + m:] using LAPACK dgesv
-638    try:
-639        deriv_y = -scipy.linalg.solve(hess, jac_jac_y[:n_parms + m, n_parms + m:])
-640    except np.linalg.LinAlgError:
-641        raise Exception("Cannot invert hessian matrix.")
-642
-643    result = []
-644    for i in range(n_parms):
-645        result.append(derived_observable(lambda my_var, **kwargs: (my_var[0] + np.finfo(np.float64).eps) / (x.ravel()[0].value + np.finfo(np.float64).eps) * out.beta[i], list(x.ravel()) + list(y), man_grad=list(deriv_x[i]) + list(deriv_y[i])))
+635    # Compute hess^{-1} @ jac_jac_y[:n_parms + m, n_parms + m:] using LAPACK dgesv
+636    try:
+637        deriv_y = -scipy.linalg.solve(hess, jac_jac_y[:n_parms + m, n_parms + m:])
+638    except np.linalg.LinAlgError:
+639        raise Exception("Cannot invert hessian matrix.")
+640
+641    result = []
+642    for i in range(n_parms):
+643        result.append(derived_observable(lambda my_var, **kwargs: (my_var[0] + np.finfo(np.float64).eps) / (x.ravel()[0].value + np.finfo(np.float64).eps) * out.beta[i], list(x.ravel()) + list(y), man_grad=list(deriv_x[i]) + list(deriv_y[i])))
+644
+645    output.fit_parameters = result
 646
-647    output.fit_parameters = result
-648
-649    output.odr_chisquare = odr_chisquare(np.concatenate((out.beta, out.xplus.ravel())))
-650    output.dof = x.shape[-1] - n_parms
-651    output.p_value = 1 - scipy.stats.chi2.cdf(output.odr_chisquare, output.dof)
-652
-653    return output
+647    output.odr_chisquare = odr_chisquare(np.concatenate((out.beta, out.xplus.ravel())))
+648    output.dof = x.shape[-1] - n_parms
+649    output.p_value = 1 - scipy.stats.chi2.cdf(output.odr_chisquare, output.dof)
+650
+651    return output
 
@@ -1852,35 +1848,35 @@ Parameters and information on the fitted result.
-
656def fit_lin(x, y, **kwargs):
-657    """Performs a linear fit to y = n + m * x and returns two Obs n, m.
-658
-659    Parameters
-660    ----------
-661    x : list
-662        Can either be a list of floats in which case no xerror is assumed, or
-663        a list of Obs, where the dvalues of the Obs are used as xerror for the fit.
-664    y : list
-665        List of Obs, the dvalues of the Obs are used as yerror for the fit.
-666
-667    Returns
-668    -------
-669    fit_parameters : list[Obs]
-670        LIist of fitted observables.
-671    """
-672
-673    def f(a, x):
-674        y = a[0] + a[1] * x
-675        return y
-676
-677    if all(isinstance(n, Obs) for n in x):
-678        out = total_least_squares(x, y, f, **kwargs)
-679        return out.fit_parameters
-680    elif all(isinstance(n, float) or isinstance(n, int) for n in x) or isinstance(x, np.ndarray):
-681        out = least_squares(x, y, f, **kwargs)
-682        return out.fit_parameters
-683    else:
-684        raise TypeError('Unsupported types for x')
+            
654def fit_lin(x, y, **kwargs):
+655    """Performs a linear fit to y = n + m * x and returns two Obs n, m.
+656
+657    Parameters
+658    ----------
+659    x : list
+660        Can either be a list of floats in which case no xerror is assumed, or
+661        a list of Obs, where the dvalues of the Obs are used as xerror for the fit.
+662    y : list
+663        List of Obs, the dvalues of the Obs are used as yerror for the fit.
+664
+665    Returns
+666    -------
+667    fit_parameters : list[Obs]
+668        LIist of fitted observables.
+669    """
+670
+671    def f(a, x):
+672        y = a[0] + a[1] * x
+673        return y
+674
+675    if all(isinstance(n, Obs) for n in x):
+676        out = total_least_squares(x, y, f, **kwargs)
+677        return out.fit_parameters
+678    elif all(isinstance(n, float) or isinstance(n, int) for n in x) or isinstance(x, np.ndarray):
+679        out = least_squares(x, y, f, **kwargs)
+680        return out.fit_parameters
+681    else:
+682        raise TypeError('Unsupported types for x')
 
@@ -1917,34 +1913,34 @@ LIist of fitted observables.
-
687def qqplot(x, o_y, func, p, title=""):
-688    """Generates a quantile-quantile plot of the fit result which can be used to
-689       check if the residuals of the fit are gaussian distributed.
-690
-691    Returns
-692    -------
-693    None
-694    """
-695
-696    residuals = []
-697    for i_x, i_y in zip(x, o_y):
-698        residuals.append((i_y - func(p, i_x)) / i_y.dvalue)
-699    residuals = sorted(residuals)
-700    my_y = [o.value for o in residuals]
-701    probplot = scipy.stats.probplot(my_y)
-702    my_x = probplot[0][0]
-703    plt.figure(figsize=(8, 8 / 1.618))
-704    plt.errorbar(my_x, my_y, fmt='o')
-705    fit_start = my_x[0]
-706    fit_stop = my_x[-1]
-707    samples = np.arange(fit_start, fit_stop, 0.01)
-708    plt.plot(samples, samples, 'k--', zorder=11, label='Standard normal distribution')
-709    plt.plot(samples, probplot[1][0] * samples + probplot[1][1], zorder=10, label='Least squares fit, r=' + str(np.around(probplot[1][2], 3)), marker='', ls='-')
-710
-711    plt.xlabel('Theoretical quantiles')
-712    plt.ylabel('Ordered Values')
-713    plt.legend(title=title)
-714    plt.draw()
+            
685def qqplot(x, o_y, func, p, title=""):
+686    """Generates a quantile-quantile plot of the fit result which can be used to
+687       check if the residuals of the fit are gaussian distributed.
+688
+689    Returns
+690    -------
+691    None
+692    """
+693
+694    residuals = []
+695    for i_x, i_y in zip(x, o_y):
+696        residuals.append((i_y - func(p, i_x)) / i_y.dvalue)
+697    residuals = sorted(residuals)
+698    my_y = [o.value for o in residuals]
+699    probplot = scipy.stats.probplot(my_y)
+700    my_x = probplot[0][0]
+701    plt.figure(figsize=(8, 8 / 1.618))
+702    plt.errorbar(my_x, my_y, fmt='o')
+703    fit_start = my_x[0]
+704    fit_stop = my_x[-1]
+705    samples = np.arange(fit_start, fit_stop, 0.01)
+706    plt.plot(samples, samples, 'k--', zorder=11, label='Standard normal distribution')
+707    plt.plot(samples, probplot[1][0] * samples + probplot[1][1], zorder=10, label='Least squares fit, r=' + str(np.around(probplot[1][2], 3)), marker='', ls='-')
+708
+709    plt.xlabel('Theoretical quantiles')
+710    plt.ylabel('Ordered Values')
+711    plt.legend(title=title)
+712    plt.draw()
 
@@ -1971,41 +1967,41 @@ LIist of fitted observables.
-
717def residual_plot(x, y, func, fit_res, title=""):
-718    """Generates a plot which compares the fit to the data and displays the corresponding residuals
+            
715def residual_plot(x, y, func, fit_res, title=""):
+716    """Generates a plot which compares the fit to the data and displays the corresponding residuals
+717
+718    For uncorrelated data the residuals are expected to be distributed ~N(0,1).
 719
-720    For uncorrelated data the residuals are expected to be distributed ~N(0,1).
-721
-722    Returns
-723    -------
-724    None
-725    """
-726    sorted_x = sorted(x)
-727    xstart = sorted_x[0] - 0.5 * (sorted_x[1] - sorted_x[0])
-728    xstop = sorted_x[-1] + 0.5 * (sorted_x[-1] - sorted_x[-2])
-729    x_samples = np.arange(xstart, xstop + 0.01, 0.01)
-730
-731    plt.figure(figsize=(8, 8 / 1.618))
-732    gs = gridspec.GridSpec(2, 1, height_ratios=[3, 1], wspace=0.0, hspace=0.0)
-733    ax0 = plt.subplot(gs[0])
-734    ax0.errorbar(x, [o.value for o in y], yerr=[o.dvalue for o in y], ls='none', fmt='o', capsize=3, markersize=5, label='Data')
-735    ax0.plot(x_samples, func([o.value for o in fit_res], x_samples), label='Fit', zorder=10, ls='-', ms=0)
+720    Returns
+721    -------
+722    None
+723    """
+724    sorted_x = sorted(x)
+725    xstart = sorted_x[0] - 0.5 * (sorted_x[1] - sorted_x[0])
+726    xstop = sorted_x[-1] + 0.5 * (sorted_x[-1] - sorted_x[-2])
+727    x_samples = np.arange(xstart, xstop + 0.01, 0.01)
+728
+729    plt.figure(figsize=(8, 8 / 1.618))
+730    gs = gridspec.GridSpec(2, 1, height_ratios=[3, 1], wspace=0.0, hspace=0.0)
+731    ax0 = plt.subplot(gs[0])
+732    ax0.errorbar(x, [o.value for o in y], yerr=[o.dvalue for o in y], ls='none', fmt='o', capsize=3, markersize=5, label='Data')
+733    ax0.plot(x_samples, func([o.value for o in fit_res], x_samples), label='Fit', zorder=10, ls='-', ms=0)
+734    ax0.set_xticklabels([])
+735    ax0.set_xlim([xstart, xstop])
 736    ax0.set_xticklabels([])
-737    ax0.set_xlim([xstart, xstop])
-738    ax0.set_xticklabels([])
-739    ax0.legend(title=title)
-740
-741    residuals = (np.asarray([o.value for o in y]) - func([o.value for o in fit_res], np.asarray(x))) / np.asarray([o.dvalue for o in y])
-742    ax1 = plt.subplot(gs[1])
-743    ax1.plot(x, residuals, 'ko', ls='none', markersize=5)
-744    ax1.tick_params(direction='out')
-745    ax1.tick_params(axis="x", bottom=True, top=True, labelbottom=True)
-746    ax1.axhline(y=0.0, ls='--', color='k', marker=" ")
-747    ax1.fill_between(x_samples, -1.0, 1.0, alpha=0.1, facecolor='k')
-748    ax1.set_xlim([xstart, xstop])
-749    ax1.set_ylabel('Residuals')
-750    plt.subplots_adjust(wspace=None, hspace=None)
-751    plt.draw()
+737    ax0.legend(title=title)
+738
+739    residuals = (np.asarray([o.value for o in y]) - func([o.value for o in fit_res], np.asarray(x))) / np.asarray([o.dvalue for o in y])
+740    ax1 = plt.subplot(gs[1])
+741    ax1.plot(x, residuals, 'ko', ls='none', markersize=5)
+742    ax1.tick_params(direction='out')
+743    ax1.tick_params(axis="x", bottom=True, top=True, labelbottom=True)
+744    ax1.axhline(y=0.0, ls='--', color='k', marker=" ")
+745    ax1.fill_between(x_samples, -1.0, 1.0, alpha=0.1, facecolor='k')
+746    ax1.set_xlim([xstart, xstop])
+747    ax1.set_ylabel('Residuals')
+748    plt.subplots_adjust(wspace=None, hspace=None)
+749    plt.draw()
 
@@ -2033,28 +2029,28 @@ LIist of fitted observables.
-
754def error_band(x, func, beta):
-755    """Calculate the error band for an array of sample values x, for given fit function func with optimized parameters beta.
-756
-757    Returns
-758    -------
-759    err : np.array(Obs)
-760        Error band for an array of sample values x
-761    """
-762    cov = covariance(beta)
-763    if np.any(np.abs(cov - cov.T) > 1000 * np.finfo(np.float64).eps):
-764        warnings.warn("Covariance matrix is not symmetric within floating point precision", RuntimeWarning)
-765
-766    deriv = []
-767    for i, item in enumerate(x):
-768        deriv.append(np.array(egrad(func)([o.value for o in beta], item)))
-769
-770    err = []
-771    for i, item in enumerate(x):
-772        err.append(np.sqrt(deriv[i] @ cov @ deriv[i]))
-773    err = np.array(err)
-774
-775    return err
+            
752def error_band(x, func, beta):
+753    """Calculate the error band for an array of sample values x, for given fit function func with optimized parameters beta.
+754
+755    Returns
+756    -------
+757    err : np.array(Obs)
+758        Error band for an array of sample values x
+759    """
+760    cov = covariance(beta)
+761    if np.any(np.abs(cov - cov.T) > 1000 * np.finfo(np.float64).eps):
+762        warnings.warn("Covariance matrix is not symmetric within floating point precision", RuntimeWarning)
+763
+764    deriv = []
+765    for i, item in enumerate(x):
+766        deriv.append(np.array(egrad(func)([o.value for o in beta], item)))
+767
+768    err = []
+769    for i, item in enumerate(x):
+770        err.append(np.sqrt(deriv[i] @ cov @ deriv[i]))
+771    err = np.array(err)
+772
+773    return err
 
@@ -2081,48 +2077,48 @@ Error band for an array of sample values x
-
778def ks_test(objects=None):
-779    """Performs a Kolmogorov–Smirnov test for the p-values of all fit object.
-780
-781    Parameters
-782    ----------
-783    objects : list
-784        List of fit results to include in the analysis (optional).
-785
-786    Returns
-787    -------
-788    None
-789    """
-790
-791    if objects is None:
-792        obs_list = []
-793        for obj in gc.get_objects():
-794            if isinstance(obj, Fit_result):
-795                obs_list.append(obj)
-796    else:
-797        obs_list = objects
+            
776def ks_test(objects=None):
+777    """Performs a Kolmogorov–Smirnov test for the p-values of all fit object.
+778
+779    Parameters
+780    ----------
+781    objects : list
+782        List of fit results to include in the analysis (optional).
+783
+784    Returns
+785    -------
+786    None
+787    """
+788
+789    if objects is None:
+790        obs_list = []
+791        for obj in gc.get_objects():
+792            if isinstance(obj, Fit_result):
+793                obs_list.append(obj)
+794    else:
+795        obs_list = objects
+796
+797    p_values = [o.p_value for o in obs_list]
 798
-799    p_values = [o.p_value for o in obs_list]
-800
-801    bins = len(p_values)
-802    x = np.arange(0, 1.001, 0.001)
-803    plt.plot(x, x, 'k', zorder=1)
-804    plt.xlim(0, 1)
-805    plt.ylim(0, 1)
-806    plt.xlabel('p-value')
-807    plt.ylabel('Cumulative probability')
-808    plt.title(str(bins) + ' p-values')
-809
-810    n = np.arange(1, bins + 1) / np.float64(bins)
-811    Xs = np.sort(p_values)
-812    plt.step(Xs, n)
-813    diffs = n - Xs
-814    loc_max_diff = np.argmax(np.abs(diffs))
-815    loc = Xs[loc_max_diff]
-816    plt.annotate('', xy=(loc, loc), xytext=(loc, loc + diffs[loc_max_diff]), arrowprops=dict(arrowstyle='<->', shrinkA=0, shrinkB=0))
-817    plt.draw()
-818
-819    print(scipy.stats.kstest(p_values, 'uniform'))
+799    bins = len(p_values)
+800    x = np.arange(0, 1.001, 0.001)
+801    plt.plot(x, x, 'k', zorder=1)
+802    plt.xlim(0, 1)
+803    plt.ylim(0, 1)
+804    plt.xlabel('p-value')
+805    plt.ylabel('Cumulative probability')
+806    plt.title(str(bins) + ' p-values')
+807
+808    n = np.arange(1, bins + 1) / np.float64(bins)
+809    Xs = np.sort(p_values)
+810    plt.step(Xs, n)
+811    diffs = n - Xs
+812    loc_max_diff = np.argmax(np.abs(diffs))
+813    loc = Xs[loc_max_diff]
+814    plt.annotate('', xy=(loc, loc), xytext=(loc, loc + diffs[loc_max_diff]), arrowprops=dict(arrowstyle='<->', shrinkA=0, shrinkB=0))
+815    plt.draw()
+816
+817    print(scipy.stats.kstest(p_values, 'uniform'))