diff --git a/docs/pyerrors/fits.html b/docs/pyerrors/fits.html index 4ce5a268..5d49a01e 100644 --- a/docs/pyerrors/fits.html +++ b/docs/pyerrors/fits.html @@ -662,187 +662,190 @@ 559 560 fitp = fit_result.x 561 try: -562 hess = jacobian(jacobian(chisqfunc))(fitp) -563 except TypeError: -564 raise Exception("It is required to use autograd.numpy instead of numpy within fit functions, see the documentation for details.") from None -565 -566 if kwargs.get('correlated_fit') is True: -567 def chisqfunc_compact(d): -568 model = func(d[:n_parms], x) -569 chisq = anp.sum(anp.dot(chol_inv, (d[n_parms:] - model)) ** 2) -570 return chisq -571 -572 else: -573 def chisqfunc_compact(d): -574 model = func(d[:n_parms], x) -575 chisq = anp.sum(((d[n_parms:] - model) / dy_f) ** 2) -576 return chisq -577 -578 jac_jac = jacobian(jacobian(chisqfunc_compact))(np.concatenate((fitp, y_f))) -579 -580 # Compute hess^{-1} @ jac_jac[:n_parms, n_parms:] using LAPACK dgesv -581 try: -582 deriv = -scipy.linalg.solve(hess, jac_jac[:n_parms, n_parms:]) -583 except np.linalg.LinAlgError: -584 raise Exception("Cannot invert hessian matrix.") -585 -586 result = [] -587 for i in range(n_parms): -588 result.append(derived_observable(lambda x, **kwargs: (x[0] + np.finfo(np.float64).eps) / (y[0].value + np.finfo(np.float64).eps) * fit_result.x[i], list(y), man_grad=list(deriv[i]))) -589 -590 output.fit_parameters = result -591 -592 output.chisquare = chisquare -593 output.dof = x.shape[-1] - n_parms -594 output.p_value = 1 - chi2.cdf(output.chisquare, output.dof) -595 -596 if kwargs.get('resplot') is True: -597 residual_plot(x, y, func, result) +562 if kwargs.get('correlated_fit') is True: +563 hess = jacobian(jacobian(chisqfunc_corr))(fitp) +564 else: +565 hess = jacobian(jacobian(chisqfunc))(fitp) +566 except TypeError: +567 raise Exception("It is required to use autograd.numpy instead of numpy within fit functions, see the documentation for details.") from None +568 +569 if kwargs.get('correlated_fit') is True: +570 def chisqfunc_compact(d): +571 model = func(d[:n_parms], x) +572 chisq = anp.sum(anp.dot(chol_inv, (d[n_parms:] - model)) ** 2) +573 return chisq +574 +575 else: +576 def chisqfunc_compact(d): +577 model = func(d[:n_parms], x) +578 chisq = anp.sum(((d[n_parms:] - model) / dy_f) ** 2) +579 return chisq +580 +581 jac_jac = jacobian(jacobian(chisqfunc_compact))(np.concatenate((fitp, y_f))) +582 +583 # Compute hess^{-1} @ jac_jac[:n_parms, n_parms:] using LAPACK dgesv +584 try: +585 deriv = -scipy.linalg.solve(hess, jac_jac[:n_parms, n_parms:]) +586 except np.linalg.LinAlgError: +587 raise Exception("Cannot invert hessian matrix.") +588 +589 result = [] +590 for i in range(n_parms): +591 result.append(derived_observable(lambda x, **kwargs: (x[0] + np.finfo(np.float64).eps) / (y[0].value + np.finfo(np.float64).eps) * fit_result.x[i], list(y), man_grad=list(deriv[i]))) +592 +593 output.fit_parameters = result +594 +595 output.chisquare = chisquare +596 output.dof = x.shape[-1] - n_parms +597 output.p_value = 1 - chi2.cdf(output.chisquare, output.dof) 598 -599 if kwargs.get('qqplot') is True: -600 qqplot(x, y, func, result) +599 if kwargs.get('resplot') is True: +600 residual_plot(x, y, func, result) 601 -602 return output -603 +602 if kwargs.get('qqplot') is True: +603 qqplot(x, y, func, result) 604 -605def fit_lin(x, y, **kwargs): -606 """Performs a linear fit to y = n + m * x and returns two Obs n, m. +605 return output +606 607 -608 Parameters -609 ---------- -610 x : list -611 Can either be a list of floats in which case no xerror is assumed, or -612 a list of Obs, where the dvalues of the Obs are used as xerror for the fit. -613 y : list -614 List of Obs, the dvalues of the Obs are used as yerror for the fit. -615 """ -616 -617 def f(a, x): -618 y = a[0] + a[1] * x -619 return y -620 -621 if all(isinstance(n, Obs) for n in x): -622 out = total_least_squares(x, y, f, **kwargs) -623 return out.fit_parameters -624 elif all(isinstance(n, float) or isinstance(n, int) for n in x) or isinstance(x, np.ndarray): -625 out = least_squares(x, y, f, **kwargs) +608def fit_lin(x, y, **kwargs): +609 """Performs a linear fit to y = n + m * x and returns two Obs n, m. +610 +611 Parameters +612 ---------- +613 x : list +614 Can either be a list of floats in which case no xerror is assumed, or +615 a list of Obs, where the dvalues of the Obs are used as xerror for the fit. +616 y : list +617 List of Obs, the dvalues of the Obs are used as yerror for the fit. +618 """ +619 +620 def f(a, x): +621 y = a[0] + a[1] * x +622 return y +623 +624 if all(isinstance(n, Obs) for n in x): +625 out = total_least_squares(x, y, f, **kwargs) 626 return out.fit_parameters -627 else: -628 raise Exception('Unsupported types for x') -629 -630 -631def qqplot(x, o_y, func, p): -632 """Generates a quantile-quantile plot of the fit result which can be used to -633 check if the residuals of the fit are gaussian distributed. -634 """ -635 -636 residuals = [] -637 for i_x, i_y in zip(x, o_y): -638 residuals.append((i_y - func(p, i_x)) / i_y.dvalue) -639 residuals = sorted(residuals) -640 my_y = [o.value for o in residuals] -641 probplot = scipy.stats.probplot(my_y) -642 my_x = probplot[0][0] -643 plt.figure(figsize=(8, 8 / 1.618)) -644 plt.errorbar(my_x, my_y, fmt='o') -645 fit_start = my_x[0] -646 fit_stop = my_x[-1] -647 samples = np.arange(fit_start, fit_stop, 0.01) -648 plt.plot(samples, samples, 'k--', zorder=11, label='Standard normal distribution') -649 plt.plot(samples, probplot[1][0] * samples + probplot[1][1], zorder=10, label='Least squares fit, r=' + str(np.around(probplot[1][2], 3)), marker='', ls='-') -650 -651 plt.xlabel('Theoretical quantiles') -652 plt.ylabel('Ordered Values') -653 plt.legend() -654 plt.draw() -655 -656 -657def residual_plot(x, y, func, fit_res): -658 """ Generates a plot which compares the fit to the data and displays the corresponding residuals""" -659 sorted_x = sorted(x) -660 xstart = sorted_x[0] - 0.5 * (sorted_x[1] - sorted_x[0]) -661 xstop = sorted_x[-1] + 0.5 * (sorted_x[-1] - sorted_x[-2]) -662 x_samples = np.arange(xstart, xstop + 0.01, 0.01) -663 -664 plt.figure(figsize=(8, 8 / 1.618)) -665 gs = gridspec.GridSpec(2, 1, height_ratios=[3, 1], wspace=0.0, hspace=0.0) -666 ax0 = plt.subplot(gs[0]) -667 ax0.errorbar(x, [o.value for o in y], yerr=[o.dvalue for o in y], ls='none', fmt='o', capsize=3, markersize=5, label='Data') -668 ax0.plot(x_samples, func([o.value for o in fit_res], x_samples), label='Fit', zorder=10, ls='-', ms=0) -669 ax0.set_xticklabels([]) -670 ax0.set_xlim([xstart, xstop]) -671 ax0.set_xticklabels([]) -672 ax0.legend() -673 -674 residuals = (np.asarray([o.value for o in y]) - func([o.value for o in fit_res], x)) / np.asarray([o.dvalue for o in y]) -675 ax1 = plt.subplot(gs[1]) -676 ax1.plot(x, residuals, 'ko', ls='none', markersize=5) -677 ax1.tick_params(direction='out') -678 ax1.tick_params(axis="x", bottom=True, top=True, labelbottom=True) -679 ax1.axhline(y=0.0, ls='--', color='k', marker=" ") -680 ax1.fill_between(x_samples, -1.0, 1.0, alpha=0.1, facecolor='k') -681 ax1.set_xlim([xstart, xstop]) -682 ax1.set_ylabel('Residuals') -683 plt.subplots_adjust(wspace=None, hspace=None) -684 plt.draw() -685 -686 -687def error_band(x, func, beta): -688 """Returns the error band for an array of sample values x, for given fit function func with optimized parameters beta.""" -689 cov = covariance(beta) -690 if np.any(np.abs(cov - cov.T) > 1000 * np.finfo(np.float64).eps): -691 warnings.warn("Covariance matrix is not symmetric within floating point precision", RuntimeWarning) -692 -693 deriv = [] -694 for i, item in enumerate(x): -695 deriv.append(np.array(egrad(func)([o.value for o in beta], item))) -696 -697 err = [] -698 for i, item in enumerate(x): -699 err.append(np.sqrt(deriv[i] @ cov @ deriv[i])) -700 err = np.array(err) -701 -702 return err -703 +627 elif all(isinstance(n, float) or isinstance(n, int) for n in x) or isinstance(x, np.ndarray): +628 out = least_squares(x, y, f, **kwargs) +629 return out.fit_parameters +630 else: +631 raise Exception('Unsupported types for x') +632 +633 +634def qqplot(x, o_y, func, p): +635 """Generates a quantile-quantile plot of the fit result which can be used to +636 check if the residuals of the fit are gaussian distributed. +637 """ +638 +639 residuals = [] +640 for i_x, i_y in zip(x, o_y): +641 residuals.append((i_y - func(p, i_x)) / i_y.dvalue) +642 residuals = sorted(residuals) +643 my_y = [o.value for o in residuals] +644 probplot = scipy.stats.probplot(my_y) +645 my_x = probplot[0][0] +646 plt.figure(figsize=(8, 8 / 1.618)) +647 plt.errorbar(my_x, my_y, fmt='o') +648 fit_start = my_x[0] +649 fit_stop = my_x[-1] +650 samples = np.arange(fit_start, fit_stop, 0.01) +651 plt.plot(samples, samples, 'k--', zorder=11, label='Standard normal distribution') +652 plt.plot(samples, probplot[1][0] * samples + probplot[1][1], zorder=10, label='Least squares fit, r=' + str(np.around(probplot[1][2], 3)), marker='', ls='-') +653 +654 plt.xlabel('Theoretical quantiles') +655 plt.ylabel('Ordered Values') +656 plt.legend() +657 plt.draw() +658 +659 +660def residual_plot(x, y, func, fit_res): +661 """ Generates a plot which compares the fit to the data and displays the corresponding residuals""" +662 sorted_x = sorted(x) +663 xstart = sorted_x[0] - 0.5 * (sorted_x[1] - sorted_x[0]) +664 xstop = sorted_x[-1] + 0.5 * (sorted_x[-1] - sorted_x[-2]) +665 x_samples = np.arange(xstart, xstop + 0.01, 0.01) +666 +667 plt.figure(figsize=(8, 8 / 1.618)) +668 gs = gridspec.GridSpec(2, 1, height_ratios=[3, 1], wspace=0.0, hspace=0.0) +669 ax0 = plt.subplot(gs[0]) +670 ax0.errorbar(x, [o.value for o in y], yerr=[o.dvalue for o in y], ls='none', fmt='o', capsize=3, markersize=5, label='Data') +671 ax0.plot(x_samples, func([o.value for o in fit_res], x_samples), label='Fit', zorder=10, ls='-', ms=0) +672 ax0.set_xticklabels([]) +673 ax0.set_xlim([xstart, xstop]) +674 ax0.set_xticklabels([]) +675 ax0.legend() +676 +677 residuals = (np.asarray([o.value for o in y]) - func([o.value for o in fit_res], x)) / np.asarray([o.dvalue for o in y]) +678 ax1 = plt.subplot(gs[1]) +679 ax1.plot(x, residuals, 'ko', ls='none', markersize=5) +680 ax1.tick_params(direction='out') +681 ax1.tick_params(axis="x", bottom=True, top=True, labelbottom=True) +682 ax1.axhline(y=0.0, ls='--', color='k', marker=" ") +683 ax1.fill_between(x_samples, -1.0, 1.0, alpha=0.1, facecolor='k') +684 ax1.set_xlim([xstart, xstop]) +685 ax1.set_ylabel('Residuals') +686 plt.subplots_adjust(wspace=None, hspace=None) +687 plt.draw() +688 +689 +690def error_band(x, func, beta): +691 """Returns the error band for an array of sample values x, for given fit function func with optimized parameters beta.""" +692 cov = covariance(beta) +693 if np.any(np.abs(cov - cov.T) > 1000 * np.finfo(np.float64).eps): +694 warnings.warn("Covariance matrix is not symmetric within floating point precision", RuntimeWarning) +695 +696 deriv = [] +697 for i, item in enumerate(x): +698 deriv.append(np.array(egrad(func)([o.value for o in beta], item))) +699 +700 err = [] +701 for i, item in enumerate(x): +702 err.append(np.sqrt(deriv[i] @ cov @ deriv[i])) +703 err = np.array(err) 704 -705def ks_test(objects=None): -706 """Performs a Kolmogorov–Smirnov test for the p-values of all fit object. +705 return err +706 707 -708 Parameters -709 ---------- -710 objects : list -711 List of fit results to include in the analysis (optional). -712 """ -713 -714 if objects is None: -715 obs_list = [] -716 for obj in gc.get_objects(): -717 if isinstance(obj, Fit_result): -718 obs_list.append(obj) -719 else: -720 obs_list = objects -721 -722 p_values = [o.p_value for o in obs_list] -723 -724 bins = len(p_values) -725 x = np.arange(0, 1.001, 0.001) -726 plt.plot(x, x, 'k', zorder=1) -727 plt.xlim(0, 1) -728 plt.ylim(0, 1) -729 plt.xlabel('p-value') -730 plt.ylabel('Cumulative probability') -731 plt.title(str(bins) + ' p-values') -732 -733 n = np.arange(1, bins + 1) / np.float64(bins) -734 Xs = np.sort(p_values) -735 plt.step(Xs, n) -736 diffs = n - Xs -737 loc_max_diff = np.argmax(np.abs(diffs)) -738 loc = Xs[loc_max_diff] -739 plt.annotate('', xy=(loc, loc), xytext=(loc, loc + diffs[loc_max_diff]), arrowprops=dict(arrowstyle='<->', shrinkA=0, shrinkB=0)) -740 plt.draw() -741 -742 print(scipy.stats.kstest(p_values, 'uniform')) +708def ks_test(objects=None): +709 """Performs a Kolmogorov–Smirnov test for the p-values of all fit object. +710 +711 Parameters +712 ---------- +713 objects : list +714 List of fit results to include in the analysis (optional). +715 """ +716 +717 if objects is None: +718 obs_list = [] +719 for obj in gc.get_objects(): +720 if isinstance(obj, Fit_result): +721 obs_list.append(obj) +722 else: +723 obs_list = objects +724 +725 p_values = [o.p_value for o in obs_list] +726 +727 bins = len(p_values) +728 x = np.arange(0, 1.001, 0.001) +729 plt.plot(x, x, 'k', zorder=1) +730 plt.xlim(0, 1) +731 plt.ylim(0, 1) +732 plt.xlabel('p-value') +733 plt.ylabel('Cumulative probability') +734 plt.title(str(bins) + ' p-values') +735 +736 n = np.arange(1, bins + 1) / np.float64(bins) +737 Xs = np.sort(p_values) +738 plt.step(Xs, n) +739 diffs = n - Xs +740 loc_max_diff = np.argmax(np.abs(diffs)) +741 loc = Xs[loc_max_diff] +742 plt.annotate('', xy=(loc, loc), xytext=(loc, loc + diffs[loc_max_diff]), arrowprops=dict(arrowstyle='<->', shrinkA=0, shrinkB=0)) +743 plt.draw() +744 +745 print(scipy.stats.kstest(p_values, 'uniform')) @@ -1356,30 +1359,30 @@ has to be calculated (default False). -
606def fit_lin(x, y, **kwargs): -607 """Performs a linear fit to y = n + m * x and returns two Obs n, m. -608 -609 Parameters -610 ---------- -611 x : list -612 Can either be a list of floats in which case no xerror is assumed, or -613 a list of Obs, where the dvalues of the Obs are used as xerror for the fit. -614 y : list -615 List of Obs, the dvalues of the Obs are used as yerror for the fit. -616 """ -617 -618 def f(a, x): -619 y = a[0] + a[1] * x -620 return y -621 -622 if all(isinstance(n, Obs) for n in x): -623 out = total_least_squares(x, y, f, **kwargs) -624 return out.fit_parameters -625 elif all(isinstance(n, float) or isinstance(n, int) for n in x) or isinstance(x, np.ndarray): -626 out = least_squares(x, y, f, **kwargs) +@@ -1409,30 +1412,30 @@ List of Obs, the dvalues of the Obs are used as yerror for the fit.609def fit_lin(x, y, **kwargs): +610 """Performs a linear fit to y = n + m * x and returns two Obs n, m. +611 +612 Parameters +613 ---------- +614 x : list +615 Can either be a list of floats in which case no xerror is assumed, or +616 a list of Obs, where the dvalues of the Obs are used as xerror for the fit. +617 y : list +618 List of Obs, the dvalues of the Obs are used as yerror for the fit. +619 """ +620 +621 def f(a, x): +622 y = a[0] + a[1] * x +623 return y +624 +625 if all(isinstance(n, Obs) for n in x): +626 out = total_least_squares(x, y, f, **kwargs) 627 return out.fit_parameters -628 else: -629 raise Exception('Unsupported types for x') +628 elif all(isinstance(n, float) or isinstance(n, int) for n in x) or isinstance(x, np.ndarray): +629 out = least_squares(x, y, f, **kwargs) +630 return out.fit_parameters +631 else: +632 raise Exception('Unsupported types for x')
632def qqplot(x, o_y, func, p): -633 """Generates a quantile-quantile plot of the fit result which can be used to -634 check if the residuals of the fit are gaussian distributed. -635 """ -636 -637 residuals = [] -638 for i_x, i_y in zip(x, o_y): -639 residuals.append((i_y - func(p, i_x)) / i_y.dvalue) -640 residuals = sorted(residuals) -641 my_y = [o.value for o in residuals] -642 probplot = scipy.stats.probplot(my_y) -643 my_x = probplot[0][0] -644 plt.figure(figsize=(8, 8 / 1.618)) -645 plt.errorbar(my_x, my_y, fmt='o') -646 fit_start = my_x[0] -647 fit_stop = my_x[-1] -648 samples = np.arange(fit_start, fit_stop, 0.01) -649 plt.plot(samples, samples, 'k--', zorder=11, label='Standard normal distribution') -650 plt.plot(samples, probplot[1][0] * samples + probplot[1][1], zorder=10, label='Least squares fit, r=' + str(np.around(probplot[1][2], 3)), marker='', ls='-') -651 -652 plt.xlabel('Theoretical quantiles') -653 plt.ylabel('Ordered Values') -654 plt.legend() -655 plt.draw() +@@ -1453,34 +1456,34 @@ check if the residuals of the fit are gaussian distributed.635def qqplot(x, o_y, func, p): +636 """Generates a quantile-quantile plot of the fit result which can be used to +637 check if the residuals of the fit are gaussian distributed. +638 """ +639 +640 residuals = [] +641 for i_x, i_y in zip(x, o_y): +642 residuals.append((i_y - func(p, i_x)) / i_y.dvalue) +643 residuals = sorted(residuals) +644 my_y = [o.value for o in residuals] +645 probplot = scipy.stats.probplot(my_y) +646 my_x = probplot[0][0] +647 plt.figure(figsize=(8, 8 / 1.618)) +648 plt.errorbar(my_x, my_y, fmt='o') +649 fit_start = my_x[0] +650 fit_stop = my_x[-1] +651 samples = np.arange(fit_start, fit_stop, 0.01) +652 plt.plot(samples, samples, 'k--', zorder=11, label='Standard normal distribution') +653 plt.plot(samples, probplot[1][0] * samples + probplot[1][1], zorder=10, label='Least squares fit, r=' + str(np.around(probplot[1][2], 3)), marker='', ls='-') +654 +655 plt.xlabel('Theoretical quantiles') +656 plt.ylabel('Ordered Values') +657 plt.legend() +658 plt.draw()
658def residual_plot(x, y, func, fit_res): -659 """ Generates a plot which compares the fit to the data and displays the corresponding residuals""" -660 sorted_x = sorted(x) -661 xstart = sorted_x[0] - 0.5 * (sorted_x[1] - sorted_x[0]) -662 xstop = sorted_x[-1] + 0.5 * (sorted_x[-1] - sorted_x[-2]) -663 x_samples = np.arange(xstart, xstop + 0.01, 0.01) -664 -665 plt.figure(figsize=(8, 8 / 1.618)) -666 gs = gridspec.GridSpec(2, 1, height_ratios=[3, 1], wspace=0.0, hspace=0.0) -667 ax0 = plt.subplot(gs[0]) -668 ax0.errorbar(x, [o.value for o in y], yerr=[o.dvalue for o in y], ls='none', fmt='o', capsize=3, markersize=5, label='Data') -669 ax0.plot(x_samples, func([o.value for o in fit_res], x_samples), label='Fit', zorder=10, ls='-', ms=0) -670 ax0.set_xticklabels([]) -671 ax0.set_xlim([xstart, xstop]) -672 ax0.set_xticklabels([]) -673 ax0.legend() -674 -675 residuals = (np.asarray([o.value for o in y]) - func([o.value for o in fit_res], x)) / np.asarray([o.dvalue for o in y]) -676 ax1 = plt.subplot(gs[1]) -677 ax1.plot(x, residuals, 'ko', ls='none', markersize=5) -678 ax1.tick_params(direction='out') -679 ax1.tick_params(axis="x", bottom=True, top=True, labelbottom=True) -680 ax1.axhline(y=0.0, ls='--', color='k', marker=" ") -681 ax1.fill_between(x_samples, -1.0, 1.0, alpha=0.1, facecolor='k') -682 ax1.set_xlim([xstart, xstop]) -683 ax1.set_ylabel('Residuals') -684 plt.subplots_adjust(wspace=None, hspace=None) -685 plt.draw() +@@ -1500,22 +1503,22 @@ check if the residuals of the fit are gaussian distributed.661def residual_plot(x, y, func, fit_res): +662 """ Generates a plot which compares the fit to the data and displays the corresponding residuals""" +663 sorted_x = sorted(x) +664 xstart = sorted_x[0] - 0.5 * (sorted_x[1] - sorted_x[0]) +665 xstop = sorted_x[-1] + 0.5 * (sorted_x[-1] - sorted_x[-2]) +666 x_samples = np.arange(xstart, xstop + 0.01, 0.01) +667 +668 plt.figure(figsize=(8, 8 / 1.618)) +669 gs = gridspec.GridSpec(2, 1, height_ratios=[3, 1], wspace=0.0, hspace=0.0) +670 ax0 = plt.subplot(gs[0]) +671 ax0.errorbar(x, [o.value for o in y], yerr=[o.dvalue for o in y], ls='none', fmt='o', capsize=3, markersize=5, label='Data') +672 ax0.plot(x_samples, func([o.value for o in fit_res], x_samples), label='Fit', zorder=10, ls='-', ms=0) +673 ax0.set_xticklabels([]) +674 ax0.set_xlim([xstart, xstop]) +675 ax0.set_xticklabels([]) +676 ax0.legend() +677 +678 residuals = (np.asarray([o.value for o in y]) - func([o.value for o in fit_res], x)) / np.asarray([o.dvalue for o in y]) +679 ax1 = plt.subplot(gs[1]) +680 ax1.plot(x, residuals, 'ko', ls='none', markersize=5) +681 ax1.tick_params(direction='out') +682 ax1.tick_params(axis="x", bottom=True, top=True, labelbottom=True) +683 ax1.axhline(y=0.0, ls='--', color='k', marker=" ") +684 ax1.fill_between(x_samples, -1.0, 1.0, alpha=0.1, facecolor='k') +685 ax1.set_xlim([xstart, xstop]) +686 ax1.set_ylabel('Residuals') +687 plt.subplots_adjust(wspace=None, hspace=None) +688 plt.draw()
688def error_band(x, func, beta): -689 """Returns the error band for an array of sample values x, for given fit function func with optimized parameters beta.""" -690 cov = covariance(beta) -691 if np.any(np.abs(cov - cov.T) > 1000 * np.finfo(np.float64).eps): -692 warnings.warn("Covariance matrix is not symmetric within floating point precision", RuntimeWarning) -693 -694 deriv = [] -695 for i, item in enumerate(x): -696 deriv.append(np.array(egrad(func)([o.value for o in beta], item))) -697 -698 err = [] -699 for i, item in enumerate(x): -700 err.append(np.sqrt(deriv[i] @ cov @ deriv[i])) -701 err = np.array(err) -702 -703 return err +@@ -1535,44 +1538,44 @@ check if the residuals of the fit are gaussian distributed.691def error_band(x, func, beta): +692 """Returns the error band for an array of sample values x, for given fit function func with optimized parameters beta.""" +693 cov = covariance(beta) +694 if np.any(np.abs(cov - cov.T) > 1000 * np.finfo(np.float64).eps): +695 warnings.warn("Covariance matrix is not symmetric within floating point precision", RuntimeWarning) +696 +697 deriv = [] +698 for i, item in enumerate(x): +699 deriv.append(np.array(egrad(func)([o.value for o in beta], item))) +700 +701 err = [] +702 for i, item in enumerate(x): +703 err.append(np.sqrt(deriv[i] @ cov @ deriv[i])) +704 err = np.array(err) +705 +706 return err
706def ks_test(objects=None): -707 """Performs a Kolmogorov–Smirnov test for the p-values of all fit object. -708 -709 Parameters -710 ---------- -711 objects : list -712 List of fit results to include in the analysis (optional). -713 """ -714 -715 if objects is None: -716 obs_list = [] -717 for obj in gc.get_objects(): -718 if isinstance(obj, Fit_result): -719 obs_list.append(obj) -720 else: -721 obs_list = objects -722 -723 p_values = [o.p_value for o in obs_list] -724 -725 bins = len(p_values) -726 x = np.arange(0, 1.001, 0.001) -727 plt.plot(x, x, 'k', zorder=1) -728 plt.xlim(0, 1) -729 plt.ylim(0, 1) -730 plt.xlabel('p-value') -731 plt.ylabel('Cumulative probability') -732 plt.title(str(bins) + ' p-values') -733 -734 n = np.arange(1, bins + 1) / np.float64(bins) -735 Xs = np.sort(p_values) -736 plt.step(Xs, n) -737 diffs = n - Xs -738 loc_max_diff = np.argmax(np.abs(diffs)) -739 loc = Xs[loc_max_diff] -740 plt.annotate('', xy=(loc, loc), xytext=(loc, loc + diffs[loc_max_diff]), arrowprops=dict(arrowstyle='<->', shrinkA=0, shrinkB=0)) -741 plt.draw() -742 -743 print(scipy.stats.kstest(p_values, 'uniform')) +709def ks_test(objects=None): +710 """Performs a Kolmogorov–Smirnov test for the p-values of all fit object. +711 +712 Parameters +713 ---------- +714 objects : list +715 List of fit results to include in the analysis (optional). +716 """ +717 +718 if objects is None: +719 obs_list = [] +720 for obj in gc.get_objects(): +721 if isinstance(obj, Fit_result): +722 obs_list.append(obj) +723 else: +724 obs_list = objects +725 +726 p_values = [o.p_value for o in obs_list] +727 +728 bins = len(p_values) +729 x = np.arange(0, 1.001, 0.001) +730 plt.plot(x, x, 'k', zorder=1) +731 plt.xlim(0, 1) +732 plt.ylim(0, 1) +733 plt.xlabel('p-value') +734 plt.ylabel('Cumulative probability') +735 plt.title(str(bins) + ' p-values') +736 +737 n = np.arange(1, bins + 1) / np.float64(bins) +738 Xs = np.sort(p_values) +739 plt.step(Xs, n) +740 diffs = n - Xs +741 loc_max_diff = np.argmax(np.abs(diffs)) +742 loc = Xs[loc_max_diff] +743 plt.annotate('', xy=(loc, loc), xytext=(loc, loc + diffs[loc_max_diff]), arrowprops=dict(arrowstyle='<->', shrinkA=0, shrinkB=0)) +744 plt.draw() +745 +746 print(scipy.stats.kstest(p_values, 'uniform'))