diff --git a/docs/pyerrors/fits.html b/docs/pyerrors/fits.html index 68f884fc..29aa40e9 100644 --- a/docs/pyerrors/fits.html +++ b/docs/pyerrors/fits.html @@ -585,15 +585,15 @@ 482 chol_inv = np.linalg.inv(chol) 483 chol_inv = np.dot(chol_inv, covdiag) 484 -485 def chisqfunc(p): +485 def chisqfunc_corr(p): 486 model = func(p, x) 487 chisq = anp.sum(anp.dot(chol_inv, (y_f - model)) ** 2) 488 return chisq -489 else: -490 def chisqfunc(p): -491 model = func(p, x) -492 chisq = anp.sum(((y_f - model) / dy_f) ** 2) -493 return chisq +489 +490 def chisqfunc(p): +491 model = func(p, x) +492 chisq = anp.sum(((y_f - model) / dy_f) ** 2) +493 return chisq 494 495 output.method = kwargs.get('method', 'Levenberg-Marquardt') 496 if not silent: @@ -602,246 +602,251 @@ 499 if output.method != 'Levenberg-Marquardt': 500 if output.method == 'migrad': 501 fit_result = iminuit.minimize(chisqfunc, x0, tol=1e-4) # Stopping criterion 0.002 * tol * errordef -502 output.iterations = fit_result.nfev -503 else: -504 fit_result = scipy.optimize.minimize(chisqfunc, x0, method=kwargs.get('method'), tol=1e-12) -505 output.iterations = fit_result.nit -506 -507 chisquare = fit_result.fun -508 -509 else: -510 if kwargs.get('correlated_fit') is True: -511 def chisqfunc_residuals(p): -512 model = func(p, x) -513 chisq = anp.dot(chol_inv, (y_f - model)) -514 return chisq -515 -516 else: -517 def chisqfunc_residuals(p): -518 model = func(p, x) -519 chisq = ((y_f - model) / dy_f) -520 return chisq -521 -522 fit_result = scipy.optimize.least_squares(chisqfunc_residuals, x0, method='lm', ftol=1e-15, gtol=1e-15, xtol=1e-15) -523 -524 chisquare = np.sum(fit_result.fun ** 2) -525 -526 output.iterations = fit_result.nfev -527 -528 if not fit_result.success: -529 raise Exception('The minimization procedure did not converge.') +502 if kwargs.get('correlated_fit') is True: +503 fit_result = iminuit.minimize(chisqfunc_corr, fit_result.x, tol=1e-4) # Stopping criterion 0.002 * tol * errordef +504 output.iterations = fit_result.nfev +505 else: +506 fit_result = scipy.optimize.minimize(chisqfunc, x0, method=kwargs.get('method'), tol=1e-12) +507 if kwargs.get('correlated_fit') is True: +508 fit_result = scipy.optimize.minimize(chisqfunc_corr, fit_result.x, method=kwargs.get('method'), tol=1e-12) +509 output.iterations = fit_result.nit +510 +511 chisquare = fit_result.fun +512 +513 else: +514 if kwargs.get('correlated_fit') is True: +515 def chisqfunc_residuals_corr(p): +516 model = func(p, x) +517 chisq = anp.dot(chol_inv, (y_f - model)) +518 return chisq +519 +520 def chisqfunc_residuals(p): +521 model = func(p, x) +522 chisq = ((y_f - model) / dy_f) +523 return chisq +524 +525 fit_result = scipy.optimize.least_squares(chisqfunc_residuals, x0, method='lm', ftol=1e-15, gtol=1e-15, xtol=1e-15) +526 if kwargs.get('correlated_fit') is True: +527 fit_result = scipy.optimize.least_squares(chisqfunc_residuals_corr, fit_result.x, method='lm', ftol=1e-15, gtol=1e-15, xtol=1e-15) +528 +529 chisquare = np.sum(fit_result.fun ** 2) 530 -531 if x.shape[-1] - n_parms > 0: -532 output.chisquare_by_dof = chisquare / (x.shape[-1] - n_parms) -533 else: -534 output.chisquare_by_dof = float('nan') +531 output.iterations = fit_result.nfev +532 +533 if not fit_result.success: +534 raise Exception('The minimization procedure did not converge.') 535 -536 output.message = fit_result.message -537 if not silent: -538 print(fit_result.message) -539 print('chisquare/d.o.f.:', output.chisquare_by_dof) +536 if x.shape[-1] - n_parms > 0: +537 output.chisquare_by_dof = chisquare / (x.shape[-1] - n_parms) +538 else: +539 output.chisquare_by_dof = float('nan') 540 -541 if kwargs.get('expected_chisquare') is True: -542 if kwargs.get('correlated_fit') is not True: -543 W = np.diag(1 / np.asarray(dy_f)) -544 cov = covariance(y) -545 A = W @ jacobian(func)(fit_result.x, x) -546 P_phi = A @ np.linalg.pinv(A.T @ A) @ A.T -547 expected_chisquare = np.trace((np.identity(x.shape[-1]) - P_phi) @ W @ cov @ W) -548 output.chisquare_by_expected_chisquare = chisquare / expected_chisquare -549 if not silent: -550 print('chisquare/expected_chisquare:', -551 output.chisquare_by_expected_chisquare) -552 -553 fitp = fit_result.x -554 try: -555 hess = jacobian(jacobian(chisqfunc))(fitp) -556 except TypeError: -557 raise Exception("It is required to use autograd.numpy instead of numpy within fit functions, see the documentation for details.") from None -558 condn = np.linalg.cond(hess) -559 if condn > 1e8: -560 warnings.warn("Hessian matrix might be ill-conditioned ({0:1.2e}), error propagation might be unreliable.\n \ -561 Maybe try rescaling the problem such that all parameters are of O(1).".format(condn), RuntimeWarning) -562 try: -563 hess_inv = np.linalg.inv(hess) -564 except np.linalg.LinAlgError: -565 raise Exception("Cannot invert hessian matrix.") -566 except Exception: -567 raise Exception("Unkown error in connection with Hessian inverse.") -568 -569 if kwargs.get('correlated_fit') is True: -570 def chisqfunc_compact(d): -571 model = func(d[:n_parms], x) -572 chisq = anp.sum(anp.dot(chol_inv, (d[n_parms:] - model)) ** 2) -573 return chisq -574 -575 else: -576 def chisqfunc_compact(d): -577 model = func(d[:n_parms], x) -578 chisq = anp.sum(((d[n_parms:] - model) / dy_f) ** 2) -579 return chisq -580 -581 jac_jac = jacobian(jacobian(chisqfunc_compact))(np.concatenate((fitp, y_f))) -582 -583 deriv = -hess_inv @ jac_jac[:n_parms, n_parms:] -584 -585 result = [] -586 for i in range(n_parms): -587 result.append(derived_observable(lambda x, **kwargs: (x[0] + np.finfo(np.float64).eps) / (y[0].value + np.finfo(np.float64).eps) * fit_result.x[i], list(y), man_grad=list(deriv[i]))) -588 -589 output.fit_parameters = result -590 -591 output.chisquare = chisqfunc(fit_result.x) -592 output.dof = x.shape[-1] - n_parms -593 output.p_value = 1 - chi2.cdf(output.chisquare, output.dof) -594 -595 if kwargs.get('resplot') is True: -596 residual_plot(x, y, func, result) -597 -598 if kwargs.get('qqplot') is True: -599 qqplot(x, y, func, result) -600 -601 return output +541 output.message = fit_result.message +542 if not silent: +543 print(fit_result.message) +544 print('chisquare/d.o.f.:', output.chisquare_by_dof) +545 +546 if kwargs.get('expected_chisquare') is True: +547 if kwargs.get('correlated_fit') is not True: +548 W = np.diag(1 / np.asarray(dy_f)) +549 cov = covariance(y) +550 A = W @ jacobian(func)(fit_result.x, x) +551 P_phi = A @ np.linalg.pinv(A.T @ A) @ A.T +552 expected_chisquare = np.trace((np.identity(x.shape[-1]) - P_phi) @ W @ cov @ W) +553 output.chisquare_by_expected_chisquare = chisquare / expected_chisquare +554 if not silent: +555 print('chisquare/expected_chisquare:', +556 output.chisquare_by_expected_chisquare) +557 +558 fitp = fit_result.x +559 try: +560 hess = jacobian(jacobian(chisqfunc))(fitp) +561 except TypeError: +562 raise Exception("It is required to use autograd.numpy instead of numpy within fit functions, see the documentation for details.") from None +563 condn = np.linalg.cond(hess) +564 if condn > 1e8: +565 warnings.warn("Hessian matrix might be ill-conditioned ({0:1.2e}), error propagation might be unreliable.\n \ +566 Maybe try rescaling the problem such that all parameters are of O(1).".format(condn), RuntimeWarning) +567 try: +568 hess_inv = np.linalg.inv(hess) +569 except np.linalg.LinAlgError: +570 raise Exception("Cannot invert hessian matrix.") +571 except Exception: +572 raise Exception("Unkown error in connection with Hessian inverse.") +573 +574 if kwargs.get('correlated_fit') is True: +575 def chisqfunc_compact(d): +576 model = func(d[:n_parms], x) +577 chisq = anp.sum(anp.dot(chol_inv, (d[n_parms:] - model)) ** 2) +578 return chisq +579 +580 else: +581 def chisqfunc_compact(d): +582 model = func(d[:n_parms], x) +583 chisq = anp.sum(((d[n_parms:] - model) / dy_f) ** 2) +584 return chisq +585 +586 jac_jac = jacobian(jacobian(chisqfunc_compact))(np.concatenate((fitp, y_f))) +587 +588 deriv = -hess_inv @ jac_jac[:n_parms, n_parms:] +589 +590 result = [] +591 for i in range(n_parms): +592 result.append(derived_observable(lambda x, **kwargs: (x[0] + np.finfo(np.float64).eps) / (y[0].value + np.finfo(np.float64).eps) * fit_result.x[i], list(y), man_grad=list(deriv[i]))) +593 +594 output.fit_parameters = result +595 +596 output.chisquare = chisqfunc(fit_result.x) +597 output.dof = x.shape[-1] - n_parms +598 output.p_value = 1 - chi2.cdf(output.chisquare, output.dof) +599 +600 if kwargs.get('resplot') is True: +601 residual_plot(x, y, func, result) 602 -603 -604def fit_lin(x, y, **kwargs): -605 """Performs a linear fit to y = n + m * x and returns two Obs n, m. -606 -607 Parameters -608 ---------- -609 x : list -610 Can either be a list of floats in which case no xerror is assumed, or -611 a list of Obs, where the dvalues of the Obs are used as xerror for the fit. -612 y : list -613 List of Obs, the dvalues of the Obs are used as yerror for the fit. -614 """ -615 -616 def f(a, x): -617 y = a[0] + a[1] * x -618 return y -619 -620 if all(isinstance(n, Obs) for n in x): -621 out = total_least_squares(x, y, f, **kwargs) -622 return out.fit_parameters -623 elif all(isinstance(n, float) or isinstance(n, int) for n in x) or isinstance(x, np.ndarray): -624 out = least_squares(x, y, f, **kwargs) -625 return out.fit_parameters -626 else: -627 raise Exception('Unsupported types for x') -628 -629 -630def qqplot(x, o_y, func, p): -631 """Generates a quantile-quantile plot of the fit result which can be used to -632 check if the residuals of the fit are gaussian distributed. -633 """ +603 if kwargs.get('qqplot') is True: +604 qqplot(x, y, func, result) +605 +606 return output +607 +608 +609def fit_lin(x, y, **kwargs): +610 """Performs a linear fit to y = n + m * x and returns two Obs n, m. +611 +612 Parameters +613 ---------- +614 x : list +615 Can either be a list of floats in which case no xerror is assumed, or +616 a list of Obs, where the dvalues of the Obs are used as xerror for the fit. +617 y : list +618 List of Obs, the dvalues of the Obs are used as yerror for the fit. +619 """ +620 +621 def f(a, x): +622 y = a[0] + a[1] * x +623 return y +624 +625 if all(isinstance(n, Obs) for n in x): +626 out = total_least_squares(x, y, f, **kwargs) +627 return out.fit_parameters +628 elif all(isinstance(n, float) or isinstance(n, int) for n in x) or isinstance(x, np.ndarray): +629 out = least_squares(x, y, f, **kwargs) +630 return out.fit_parameters +631 else: +632 raise Exception('Unsupported types for x') +633 634 -635 residuals = [] -636 for i_x, i_y in zip(x, o_y): -637 residuals.append((i_y - func(p, i_x)) / i_y.dvalue) -638 residuals = sorted(residuals) -639 my_y = [o.value for o in residuals] -640 probplot = scipy.stats.probplot(my_y) -641 my_x = probplot[0][0] -642 plt.figure(figsize=(8, 8 / 1.618)) -643 plt.errorbar(my_x, my_y, fmt='o') -644 fit_start = my_x[0] -645 fit_stop = my_x[-1] -646 samples = np.arange(fit_start, fit_stop, 0.01) -647 plt.plot(samples, samples, 'k--', zorder=11, label='Standard normal distribution') -648 plt.plot(samples, probplot[1][0] * samples + probplot[1][1], zorder=10, label='Least squares fit, r=' + str(np.around(probplot[1][2], 3)), marker='', ls='-') -649 -650 plt.xlabel('Theoretical quantiles') -651 plt.ylabel('Ordered Values') -652 plt.legend() -653 plt.draw() +635def qqplot(x, o_y, func, p): +636 """Generates a quantile-quantile plot of the fit result which can be used to +637 check if the residuals of the fit are gaussian distributed. +638 """ +639 +640 residuals = [] +641 for i_x, i_y in zip(x, o_y): +642 residuals.append((i_y - func(p, i_x)) / i_y.dvalue) +643 residuals = sorted(residuals) +644 my_y = [o.value for o in residuals] +645 probplot = scipy.stats.probplot(my_y) +646 my_x = probplot[0][0] +647 plt.figure(figsize=(8, 8 / 1.618)) +648 plt.errorbar(my_x, my_y, fmt='o') +649 fit_start = my_x[0] +650 fit_stop = my_x[-1] +651 samples = np.arange(fit_start, fit_stop, 0.01) +652 plt.plot(samples, samples, 'k--', zorder=11, label='Standard normal distribution') +653 plt.plot(samples, probplot[1][0] * samples + probplot[1][1], zorder=10, label='Least squares fit, r=' + str(np.around(probplot[1][2], 3)), marker='', ls='-') 654 -655 -656def residual_plot(x, y, func, fit_res): -657 """ Generates a plot which compares the fit to the data and displays the corresponding residuals""" -658 sorted_x = sorted(x) -659 xstart = sorted_x[0] - 0.5 * (sorted_x[1] - sorted_x[0]) -660 xstop = sorted_x[-1] + 0.5 * (sorted_x[-1] - sorted_x[-2]) -661 x_samples = np.arange(xstart, xstop + 0.01, 0.01) -662 -663 plt.figure(figsize=(8, 8 / 1.618)) -664 gs = gridspec.GridSpec(2, 1, height_ratios=[3, 1], wspace=0.0, hspace=0.0) -665 ax0 = plt.subplot(gs[0]) -666 ax0.errorbar(x, [o.value for o in y], yerr=[o.dvalue for o in y], ls='none', fmt='o', capsize=3, markersize=5, label='Data') -667 ax0.plot(x_samples, func([o.value for o in fit_res], x_samples), label='Fit', zorder=10, ls='-', ms=0) -668 ax0.set_xticklabels([]) -669 ax0.set_xlim([xstart, xstop]) -670 ax0.set_xticklabels([]) -671 ax0.legend() -672 -673 residuals = (np.asarray([o.value for o in y]) - func([o.value for o in fit_res], x)) / np.asarray([o.dvalue for o in y]) -674 ax1 = plt.subplot(gs[1]) -675 ax1.plot(x, residuals, 'ko', ls='none', markersize=5) -676 ax1.tick_params(direction='out') -677 ax1.tick_params(axis="x", bottom=True, top=True, labelbottom=True) -678 ax1.axhline(y=0.0, ls='--', color='k', marker=" ") -679 ax1.fill_between(x_samples, -1.0, 1.0, alpha=0.1, facecolor='k') -680 ax1.set_xlim([xstart, xstop]) -681 ax1.set_ylabel('Residuals') -682 plt.subplots_adjust(wspace=None, hspace=None) -683 plt.draw() -684 -685 -686def error_band(x, func, beta): -687 """Returns the error band for an array of sample values x, for given fit function func with optimized parameters beta.""" -688 cov = covariance(beta) -689 if np.any(np.abs(cov - cov.T) > 1000 * np.finfo(np.float64).eps): -690 warnings.warn("Covariance matrix is not symmetric within floating point precision", RuntimeWarning) -691 -692 deriv = [] -693 for i, item in enumerate(x): -694 deriv.append(np.array(egrad(func)([o.value for o in beta], item))) -695 -696 err = [] -697 for i, item in enumerate(x): -698 err.append(np.sqrt(deriv[i] @ cov @ deriv[i])) -699 err = np.array(err) +655 plt.xlabel('Theoretical quantiles') +656 plt.ylabel('Ordered Values') +657 plt.legend() +658 plt.draw() +659 +660 +661def residual_plot(x, y, func, fit_res): +662 """ Generates a plot which compares the fit to the data and displays the corresponding residuals""" +663 sorted_x = sorted(x) +664 xstart = sorted_x[0] - 0.5 * (sorted_x[1] - sorted_x[0]) +665 xstop = sorted_x[-1] + 0.5 * (sorted_x[-1] - sorted_x[-2]) +666 x_samples = np.arange(xstart, xstop + 0.01, 0.01) +667 +668 plt.figure(figsize=(8, 8 / 1.618)) +669 gs = gridspec.GridSpec(2, 1, height_ratios=[3, 1], wspace=0.0, hspace=0.0) +670 ax0 = plt.subplot(gs[0]) +671 ax0.errorbar(x, [o.value for o in y], yerr=[o.dvalue for o in y], ls='none', fmt='o', capsize=3, markersize=5, label='Data') +672 ax0.plot(x_samples, func([o.value for o in fit_res], x_samples), label='Fit', zorder=10, ls='-', ms=0) +673 ax0.set_xticklabels([]) +674 ax0.set_xlim([xstart, xstop]) +675 ax0.set_xticklabels([]) +676 ax0.legend() +677 +678 residuals = (np.asarray([o.value for o in y]) - func([o.value for o in fit_res], x)) / np.asarray([o.dvalue for o in y]) +679 ax1 = plt.subplot(gs[1]) +680 ax1.plot(x, residuals, 'ko', ls='none', markersize=5) +681 ax1.tick_params(direction='out') +682 ax1.tick_params(axis="x", bottom=True, top=True, labelbottom=True) +683 ax1.axhline(y=0.0, ls='--', color='k', marker=" ") +684 ax1.fill_between(x_samples, -1.0, 1.0, alpha=0.1, facecolor='k') +685 ax1.set_xlim([xstart, xstop]) +686 ax1.set_ylabel('Residuals') +687 plt.subplots_adjust(wspace=None, hspace=None) +688 plt.draw() +689 +690 +691def error_band(x, func, beta): +692 """Returns the error band for an array of sample values x, for given fit function func with optimized parameters beta.""" +693 cov = covariance(beta) +694 if np.any(np.abs(cov - cov.T) > 1000 * np.finfo(np.float64).eps): +695 warnings.warn("Covariance matrix is not symmetric within floating point precision", RuntimeWarning) +696 +697 deriv = [] +698 for i, item in enumerate(x): +699 deriv.append(np.array(egrad(func)([o.value for o in beta], item))) 700 -701 return err -702 -703 -704def ks_test(objects=None): -705 """Performs a Kolmogorov–Smirnov test for the p-values of all fit object. -706 -707 Parameters -708 ---------- -709 objects : list -710 List of fit results to include in the analysis (optional). -711 """ -712 -713 if objects is None: -714 obs_list = [] -715 for obj in gc.get_objects(): -716 if isinstance(obj, Fit_result): -717 obs_list.append(obj) -718 else: -719 obs_list = objects -720 -721 p_values = [o.p_value for o in obs_list] -722 -723 bins = len(p_values) -724 x = np.arange(0, 1.001, 0.001) -725 plt.plot(x, x, 'k', zorder=1) -726 plt.xlim(0, 1) -727 plt.ylim(0, 1) -728 plt.xlabel('p-value') -729 plt.ylabel('Cumulative probability') -730 plt.title(str(bins) + ' p-values') -731 -732 n = np.arange(1, bins + 1) / np.float64(bins) -733 Xs = np.sort(p_values) -734 plt.step(Xs, n) -735 diffs = n - Xs -736 loc_max_diff = np.argmax(np.abs(diffs)) -737 loc = Xs[loc_max_diff] -738 plt.annotate('', xy=(loc, loc), xytext=(loc, loc + diffs[loc_max_diff]), arrowprops=dict(arrowstyle='<->', shrinkA=0, shrinkB=0)) -739 plt.draw() -740 -741 print(scipy.stats.kstest(p_values, 'uniform')) +701 err = [] +702 for i, item in enumerate(x): +703 err.append(np.sqrt(deriv[i] @ cov @ deriv[i])) +704 err = np.array(err) +705 +706 return err +707 +708 +709def ks_test(objects=None): +710 """Performs a Kolmogorov–Smirnov test for the p-values of all fit object. +711 +712 Parameters +713 ---------- +714 objects : list +715 List of fit results to include in the analysis (optional). +716 """ +717 +718 if objects is None: +719 obs_list = [] +720 for obj in gc.get_objects(): +721 if isinstance(obj, Fit_result): +722 obs_list.append(obj) +723 else: +724 obs_list = objects +725 +726 p_values = [o.p_value for o in obs_list] +727 +728 bins = len(p_values) +729 x = np.arange(0, 1.001, 0.001) +730 plt.plot(x, x, 'k', zorder=1) +731 plt.xlim(0, 1) +732 plt.ylim(0, 1) +733 plt.xlabel('p-value') +734 plt.ylabel('Cumulative probability') +735 plt.title(str(bins) + ' p-values') +736 +737 n = np.arange(1, bins + 1) / np.float64(bins) +738 Xs = np.sort(p_values) +739 plt.step(Xs, n) +740 diffs = n - Xs +741 loc_max_diff = np.argmax(np.abs(diffs)) +742 loc = Xs[loc_max_diff] +743 plt.annotate('', xy=(loc, loc), xytext=(loc, loc + diffs[loc_max_diff]), arrowprops=dict(arrowstyle='<->', shrinkA=0, shrinkB=0)) +744 plt.draw() +745 +746 print(scipy.stats.kstest(p_values, 'uniform')) @@ -1355,30 +1360,30 @@ has to be calculated (default False). -
605def fit_lin(x, y, **kwargs):
-606    """Performs a linear fit to y = n + m * x and returns two Obs n, m.
-607
-608    Parameters
-609    ----------
-610    x : list
-611        Can either be a list of floats in which case no xerror is assumed, or
-612        a list of Obs, where the dvalues of the Obs are used as xerror for the fit.
-613    y : list
-614        List of Obs, the dvalues of the Obs are used as yerror for the fit.
-615    """
-616
-617    def f(a, x):
-618        y = a[0] + a[1] * x
-619        return y
-620
-621    if all(isinstance(n, Obs) for n in x):
-622        out = total_least_squares(x, y, f, **kwargs)
-623        return out.fit_parameters
-624    elif all(isinstance(n, float) or isinstance(n, int) for n in x) or isinstance(x, np.ndarray):
-625        out = least_squares(x, y, f, **kwargs)
-626        return out.fit_parameters
-627    else:
-628        raise Exception('Unsupported types for x')
+            
610def fit_lin(x, y, **kwargs):
+611    """Performs a linear fit to y = n + m * x and returns two Obs n, m.
+612
+613    Parameters
+614    ----------
+615    x : list
+616        Can either be a list of floats in which case no xerror is assumed, or
+617        a list of Obs, where the dvalues of the Obs are used as xerror for the fit.
+618    y : list
+619        List of Obs, the dvalues of the Obs are used as yerror for the fit.
+620    """
+621
+622    def f(a, x):
+623        y = a[0] + a[1] * x
+624        return y
+625
+626    if all(isinstance(n, Obs) for n in x):
+627        out = total_least_squares(x, y, f, **kwargs)
+628        return out.fit_parameters
+629    elif all(isinstance(n, float) or isinstance(n, int) for n in x) or isinstance(x, np.ndarray):
+630        out = least_squares(x, y, f, **kwargs)
+631        return out.fit_parameters
+632    else:
+633        raise Exception('Unsupported types for x')
 
@@ -1408,30 +1413,30 @@ List of Obs, the dvalues of the Obs are used as yerror for the fit.
-
631def qqplot(x, o_y, func, p):
-632    """Generates a quantile-quantile plot of the fit result which can be used to
-633       check if the residuals of the fit are gaussian distributed.
-634    """
-635
-636    residuals = []
-637    for i_x, i_y in zip(x, o_y):
-638        residuals.append((i_y - func(p, i_x)) / i_y.dvalue)
-639    residuals = sorted(residuals)
-640    my_y = [o.value for o in residuals]
-641    probplot = scipy.stats.probplot(my_y)
-642    my_x = probplot[0][0]
-643    plt.figure(figsize=(8, 8 / 1.618))
-644    plt.errorbar(my_x, my_y, fmt='o')
-645    fit_start = my_x[0]
-646    fit_stop = my_x[-1]
-647    samples = np.arange(fit_start, fit_stop, 0.01)
-648    plt.plot(samples, samples, 'k--', zorder=11, label='Standard normal distribution')
-649    plt.plot(samples, probplot[1][0] * samples + probplot[1][1], zorder=10, label='Least squares fit, r=' + str(np.around(probplot[1][2], 3)), marker='', ls='-')
-650
-651    plt.xlabel('Theoretical quantiles')
-652    plt.ylabel('Ordered Values')
-653    plt.legend()
-654    plt.draw()
+            
636def qqplot(x, o_y, func, p):
+637    """Generates a quantile-quantile plot of the fit result which can be used to
+638       check if the residuals of the fit are gaussian distributed.
+639    """
+640
+641    residuals = []
+642    for i_x, i_y in zip(x, o_y):
+643        residuals.append((i_y - func(p, i_x)) / i_y.dvalue)
+644    residuals = sorted(residuals)
+645    my_y = [o.value for o in residuals]
+646    probplot = scipy.stats.probplot(my_y)
+647    my_x = probplot[0][0]
+648    plt.figure(figsize=(8, 8 / 1.618))
+649    plt.errorbar(my_x, my_y, fmt='o')
+650    fit_start = my_x[0]
+651    fit_stop = my_x[-1]
+652    samples = np.arange(fit_start, fit_stop, 0.01)
+653    plt.plot(samples, samples, 'k--', zorder=11, label='Standard normal distribution')
+654    plt.plot(samples, probplot[1][0] * samples + probplot[1][1], zorder=10, label='Least squares fit, r=' + str(np.around(probplot[1][2], 3)), marker='', ls='-')
+655
+656    plt.xlabel('Theoretical quantiles')
+657    plt.ylabel('Ordered Values')
+658    plt.legend()
+659    plt.draw()
 
@@ -1452,34 +1457,34 @@ check if the residuals of the fit are gaussian distributed.

-
657def residual_plot(x, y, func, fit_res):
-658    """ Generates a plot which compares the fit to the data and displays the corresponding residuals"""
-659    sorted_x = sorted(x)
-660    xstart = sorted_x[0] - 0.5 * (sorted_x[1] - sorted_x[0])
-661    xstop = sorted_x[-1] + 0.5 * (sorted_x[-1] - sorted_x[-2])
-662    x_samples = np.arange(xstart, xstop + 0.01, 0.01)
-663
-664    plt.figure(figsize=(8, 8 / 1.618))
-665    gs = gridspec.GridSpec(2, 1, height_ratios=[3, 1], wspace=0.0, hspace=0.0)
-666    ax0 = plt.subplot(gs[0])
-667    ax0.errorbar(x, [o.value for o in y], yerr=[o.dvalue for o in y], ls='none', fmt='o', capsize=3, markersize=5, label='Data')
-668    ax0.plot(x_samples, func([o.value for o in fit_res], x_samples), label='Fit', zorder=10, ls='-', ms=0)
-669    ax0.set_xticklabels([])
-670    ax0.set_xlim([xstart, xstop])
-671    ax0.set_xticklabels([])
-672    ax0.legend()
-673
-674    residuals = (np.asarray([o.value for o in y]) - func([o.value for o in fit_res], x)) / np.asarray([o.dvalue for o in y])
-675    ax1 = plt.subplot(gs[1])
-676    ax1.plot(x, residuals, 'ko', ls='none', markersize=5)
-677    ax1.tick_params(direction='out')
-678    ax1.tick_params(axis="x", bottom=True, top=True, labelbottom=True)
-679    ax1.axhline(y=0.0, ls='--', color='k', marker=" ")
-680    ax1.fill_between(x_samples, -1.0, 1.0, alpha=0.1, facecolor='k')
-681    ax1.set_xlim([xstart, xstop])
-682    ax1.set_ylabel('Residuals')
-683    plt.subplots_adjust(wspace=None, hspace=None)
-684    plt.draw()
+            
662def residual_plot(x, y, func, fit_res):
+663    """ Generates a plot which compares the fit to the data and displays the corresponding residuals"""
+664    sorted_x = sorted(x)
+665    xstart = sorted_x[0] - 0.5 * (sorted_x[1] - sorted_x[0])
+666    xstop = sorted_x[-1] + 0.5 * (sorted_x[-1] - sorted_x[-2])
+667    x_samples = np.arange(xstart, xstop + 0.01, 0.01)
+668
+669    plt.figure(figsize=(8, 8 / 1.618))
+670    gs = gridspec.GridSpec(2, 1, height_ratios=[3, 1], wspace=0.0, hspace=0.0)
+671    ax0 = plt.subplot(gs[0])
+672    ax0.errorbar(x, [o.value for o in y], yerr=[o.dvalue for o in y], ls='none', fmt='o', capsize=3, markersize=5, label='Data')
+673    ax0.plot(x_samples, func([o.value for o in fit_res], x_samples), label='Fit', zorder=10, ls='-', ms=0)
+674    ax0.set_xticklabels([])
+675    ax0.set_xlim([xstart, xstop])
+676    ax0.set_xticklabels([])
+677    ax0.legend()
+678
+679    residuals = (np.asarray([o.value for o in y]) - func([o.value for o in fit_res], x)) / np.asarray([o.dvalue for o in y])
+680    ax1 = plt.subplot(gs[1])
+681    ax1.plot(x, residuals, 'ko', ls='none', markersize=5)
+682    ax1.tick_params(direction='out')
+683    ax1.tick_params(axis="x", bottom=True, top=True, labelbottom=True)
+684    ax1.axhline(y=0.0, ls='--', color='k', marker=" ")
+685    ax1.fill_between(x_samples, -1.0, 1.0, alpha=0.1, facecolor='k')
+686    ax1.set_xlim([xstart, xstop])
+687    ax1.set_ylabel('Residuals')
+688    plt.subplots_adjust(wspace=None, hspace=None)
+689    plt.draw()
 
@@ -1499,22 +1504,22 @@ check if the residuals of the fit are gaussian distributed.

-
687def error_band(x, func, beta):
-688    """Returns the error band for an array of sample values x, for given fit function func with optimized parameters beta."""
-689    cov = covariance(beta)
-690    if np.any(np.abs(cov - cov.T) > 1000 * np.finfo(np.float64).eps):
-691        warnings.warn("Covariance matrix is not symmetric within floating point precision", RuntimeWarning)
-692
-693    deriv = []
-694    for i, item in enumerate(x):
-695        deriv.append(np.array(egrad(func)([o.value for o in beta], item)))
-696
-697    err = []
-698    for i, item in enumerate(x):
-699        err.append(np.sqrt(deriv[i] @ cov @ deriv[i]))
-700    err = np.array(err)
+            
692def error_band(x, func, beta):
+693    """Returns the error band for an array of sample values x, for given fit function func with optimized parameters beta."""
+694    cov = covariance(beta)
+695    if np.any(np.abs(cov - cov.T) > 1000 * np.finfo(np.float64).eps):
+696        warnings.warn("Covariance matrix is not symmetric within floating point precision", RuntimeWarning)
+697
+698    deriv = []
+699    for i, item in enumerate(x):
+700        deriv.append(np.array(egrad(func)([o.value for o in beta], item)))
 701
-702    return err
+702    err = []
+703    for i, item in enumerate(x):
+704        err.append(np.sqrt(deriv[i] @ cov @ deriv[i]))
+705    err = np.array(err)
+706
+707    return err
 
@@ -1534,44 +1539,44 @@ check if the residuals of the fit are gaussian distributed.

-
705def ks_test(objects=None):
-706    """Performs a Kolmogorov–Smirnov test for the p-values of all fit object.
-707
-708    Parameters
-709    ----------
-710    objects : list
-711        List of fit results to include in the analysis (optional).
-712    """
-713
-714    if objects is None:
-715        obs_list = []
-716        for obj in gc.get_objects():
-717            if isinstance(obj, Fit_result):
-718                obs_list.append(obj)
-719    else:
-720        obs_list = objects
-721
-722    p_values = [o.p_value for o in obs_list]
-723
-724    bins = len(p_values)
-725    x = np.arange(0, 1.001, 0.001)
-726    plt.plot(x, x, 'k', zorder=1)
-727    plt.xlim(0, 1)
-728    plt.ylim(0, 1)
-729    plt.xlabel('p-value')
-730    plt.ylabel('Cumulative probability')
-731    plt.title(str(bins) + ' p-values')
-732
-733    n = np.arange(1, bins + 1) / np.float64(bins)
-734    Xs = np.sort(p_values)
-735    plt.step(Xs, n)
-736    diffs = n - Xs
-737    loc_max_diff = np.argmax(np.abs(diffs))
-738    loc = Xs[loc_max_diff]
-739    plt.annotate('', xy=(loc, loc), xytext=(loc, loc + diffs[loc_max_diff]), arrowprops=dict(arrowstyle='<->', shrinkA=0, shrinkB=0))
-740    plt.draw()
-741
-742    print(scipy.stats.kstest(p_values, 'uniform'))
+            
710def ks_test(objects=None):
+711    """Performs a Kolmogorov–Smirnov test for the p-values of all fit object.
+712
+713    Parameters
+714    ----------
+715    objects : list
+716        List of fit results to include in the analysis (optional).
+717    """
+718
+719    if objects is None:
+720        obs_list = []
+721        for obj in gc.get_objects():
+722            if isinstance(obj, Fit_result):
+723                obs_list.append(obj)
+724    else:
+725        obs_list = objects
+726
+727    p_values = [o.p_value for o in obs_list]
+728
+729    bins = len(p_values)
+730    x = np.arange(0, 1.001, 0.001)
+731    plt.plot(x, x, 'k', zorder=1)
+732    plt.xlim(0, 1)
+733    plt.ylim(0, 1)
+734    plt.xlabel('p-value')
+735    plt.ylabel('Cumulative probability')
+736    plt.title(str(bins) + ' p-values')
+737
+738    n = np.arange(1, bins + 1) / np.float64(bins)
+739    Xs = np.sort(p_values)
+740    plt.step(Xs, n)
+741    diffs = n - Xs
+742    loc_max_diff = np.argmax(np.abs(diffs))
+743    loc = Xs[loc_max_diff]
+744    plt.annotate('', xy=(loc, loc), xytext=(loc, loc + diffs[loc_max_diff]), arrowprops=dict(arrowstyle='<->', shrinkA=0, shrinkB=0))
+745    plt.draw()
+746
+747    print(scipy.stats.kstest(p_values, 'uniform'))