diff --git a/docs/pyerrors/fits.html b/docs/pyerrors/fits.html index 1e69c4b7..e84840a6 100644 --- a/docs/pyerrors/fits.html +++ b/docs/pyerrors/fits.html @@ -628,217 +628,221 @@ 525 fit_result = scipy.optimize.least_squares(chisqfunc_residuals_corr, fit_result.x, method='lm', ftol=1e-15, gtol=1e-15, xtol=1e-15) 526 527 chisquare = np.sum(fit_result.fun ** 2) -528 -529 output.iterations = fit_result.nfev -530 -531 if not fit_result.success: -532 raise Exception('The minimization procedure did not converge.') -533 -534 if x.shape[-1] - n_parms > 0: -535 output.chisquare_by_dof = chisquare / (x.shape[-1] - n_parms) -536 else: -537 output.chisquare_by_dof = float('nan') -538 -539 output.message = fit_result.message -540 if not silent: -541 print(fit_result.message) -542 print('chisquare/d.o.f.:', output.chisquare_by_dof) -543 -544 if kwargs.get('expected_chisquare') is True: -545 if kwargs.get('correlated_fit') is not True: -546 W = np.diag(1 / np.asarray(dy_f)) -547 cov = covariance(y) -548 A = W @ jacobian(func)(fit_result.x, x) -549 P_phi = A @ np.linalg.pinv(A.T @ A) @ A.T -550 expected_chisquare = np.trace((np.identity(x.shape[-1]) - P_phi) @ W @ cov @ W) -551 output.chisquare_by_expected_chisquare = chisquare / expected_chisquare -552 if not silent: -553 print('chisquare/expected_chisquare:', -554 output.chisquare_by_expected_chisquare) -555 -556 fitp = fit_result.x -557 try: -558 hess = jacobian(jacobian(chisqfunc))(fitp) -559 except TypeError: -560 raise Exception("It is required to use autograd.numpy instead of numpy within fit functions, see the documentation for details.") from None -561 -562 if kwargs.get('correlated_fit') is True: -563 def chisqfunc_compact(d): -564 model = func(d[:n_parms], x) -565 chisq = anp.sum(anp.dot(chol_inv, (d[n_parms:] - model)) ** 2) -566 return chisq -567 -568 else: -569 def chisqfunc_compact(d): -570 model = func(d[:n_parms], x) -571 chisq = anp.sum(((d[n_parms:] - model) / dy_f) ** 2) -572 return chisq -573 -574 jac_jac = jacobian(jacobian(chisqfunc_compact))(np.concatenate((fitp, y_f))) -575 -576 # Compute hess^{-1} @ jac_jac[:n_parms, n_parms:] using LAPACK dgesv -577 try: -578 deriv = -scipy.linalg.solve(hess, jac_jac[:n_parms, n_parms:]) -579 except np.linalg.LinAlgError: -580 raise Exception("Cannot invert hessian matrix.") -581 -582 result = [] -583 for i in range(n_parms): -584 result.append(derived_observable(lambda x, **kwargs: (x[0] + np.finfo(np.float64).eps) / (y[0].value + np.finfo(np.float64).eps) * fit_result.x[i], list(y), man_grad=list(deriv[i]))) +528 if kwargs.get('correlated_fit') is True: +529 assert np.isclose(chisquare, chisqfunc_corr(fit_result.x), atol=1e-14) +530 else: +531 assert np.isclose(chisquare, chisqfunc(fit_result.x), atol=1e-14) +532 +533 output.iterations = fit_result.nfev +534 +535 if not fit_result.success: +536 raise Exception('The minimization procedure did not converge.') +537 +538 if x.shape[-1] - n_parms > 0: +539 output.chisquare_by_dof = chisquare / (x.shape[-1] - n_parms) +540 else: +541 output.chisquare_by_dof = float('nan') +542 +543 output.message = fit_result.message +544 if not silent: +545 print(fit_result.message) +546 print('chisquare/d.o.f.:', output.chisquare_by_dof) +547 +548 if kwargs.get('expected_chisquare') is True: +549 if kwargs.get('correlated_fit') is not True: +550 W = np.diag(1 / np.asarray(dy_f)) +551 cov = covariance(y) +552 A = W @ jacobian(func)(fit_result.x, x) +553 P_phi = A @ np.linalg.pinv(A.T @ A) @ A.T +554 expected_chisquare = np.trace((np.identity(x.shape[-1]) - P_phi) @ W @ cov @ W) +555 output.chisquare_by_expected_chisquare = chisquare / expected_chisquare +556 if not silent: +557 print('chisquare/expected_chisquare:', +558 output.chisquare_by_expected_chisquare) +559 +560 fitp = fit_result.x +561 try: +562 hess = jacobian(jacobian(chisqfunc))(fitp) +563 except TypeError: +564 raise Exception("It is required to use autograd.numpy instead of numpy within fit functions, see the documentation for details.") from None +565 +566 if kwargs.get('correlated_fit') is True: +567 def chisqfunc_compact(d): +568 model = func(d[:n_parms], x) +569 chisq = anp.sum(anp.dot(chol_inv, (d[n_parms:] - model)) ** 2) +570 return chisq +571 +572 else: +573 def chisqfunc_compact(d): +574 model = func(d[:n_parms], x) +575 chisq = anp.sum(((d[n_parms:] - model) / dy_f) ** 2) +576 return chisq +577 +578 jac_jac = jacobian(jacobian(chisqfunc_compact))(np.concatenate((fitp, y_f))) +579 +580 # Compute hess^{-1} @ jac_jac[:n_parms, n_parms:] using LAPACK dgesv +581 try: +582 deriv = -scipy.linalg.solve(hess, jac_jac[:n_parms, n_parms:]) +583 except np.linalg.LinAlgError: +584 raise Exception("Cannot invert hessian matrix.") 585 -586 output.fit_parameters = result -587 -588 output.chisquare = chisqfunc(fit_result.x) -589 output.dof = x.shape[-1] - n_parms -590 output.p_value = 1 - chi2.cdf(output.chisquare, output.dof) +586 result = [] +587 for i in range(n_parms): +588 result.append(derived_observable(lambda x, **kwargs: (x[0] + np.finfo(np.float64).eps) / (y[0].value + np.finfo(np.float64).eps) * fit_result.x[i], list(y), man_grad=list(deriv[i]))) +589 +590 output.fit_parameters = result 591 -592 if kwargs.get('resplot') is True: -593 residual_plot(x, y, func, result) -594 -595 if kwargs.get('qqplot') is True: -596 qqplot(x, y, func, result) -597 -598 return output -599 -600 -601def fit_lin(x, y, **kwargs): -602 """Performs a linear fit to y = n + m * x and returns two Obs n, m. +592 output.chisquare = chisquare +593 output.dof = x.shape[-1] - n_parms +594 output.p_value = 1 - chi2.cdf(output.chisquare, output.dof) +595 +596 if kwargs.get('resplot') is True: +597 residual_plot(x, y, func, result) +598 +599 if kwargs.get('qqplot') is True: +600 qqplot(x, y, func, result) +601 +602 return output 603 -604 Parameters -605 ---------- -606 x : list -607 Can either be a list of floats in which case no xerror is assumed, or -608 a list of Obs, where the dvalues of the Obs are used as xerror for the fit. -609 y : list -610 List of Obs, the dvalues of the Obs are used as yerror for the fit. -611 """ -612 -613 def f(a, x): -614 y = a[0] + a[1] * x -615 return y +604 +605def fit_lin(x, y, **kwargs): +606 """Performs a linear fit to y = n + m * x and returns two Obs n, m. +607 +608 Parameters +609 ---------- +610 x : list +611 Can either be a list of floats in which case no xerror is assumed, or +612 a list of Obs, where the dvalues of the Obs are used as xerror for the fit. +613 y : list +614 List of Obs, the dvalues of the Obs are used as yerror for the fit. +615 """ 616 -617 if all(isinstance(n, Obs) for n in x): -618 out = total_least_squares(x, y, f, **kwargs) -619 return out.fit_parameters -620 elif all(isinstance(n, float) or isinstance(n, int) for n in x) or isinstance(x, np.ndarray): -621 out = least_squares(x, y, f, **kwargs) -622 return out.fit_parameters -623 else: -624 raise Exception('Unsupported types for x') -625 -626 -627def qqplot(x, o_y, func, p): -628 """Generates a quantile-quantile plot of the fit result which can be used to -629 check if the residuals of the fit are gaussian distributed. -630 """ -631 -632 residuals = [] -633 for i_x, i_y in zip(x, o_y): -634 residuals.append((i_y - func(p, i_x)) / i_y.dvalue) -635 residuals = sorted(residuals) -636 my_y = [o.value for o in residuals] -637 probplot = scipy.stats.probplot(my_y) -638 my_x = probplot[0][0] -639 plt.figure(figsize=(8, 8 / 1.618)) -640 plt.errorbar(my_x, my_y, fmt='o') -641 fit_start = my_x[0] -642 fit_stop = my_x[-1] -643 samples = np.arange(fit_start, fit_stop, 0.01) -644 plt.plot(samples, samples, 'k--', zorder=11, label='Standard normal distribution') -645 plt.plot(samples, probplot[1][0] * samples + probplot[1][1], zorder=10, label='Least squares fit, r=' + str(np.around(probplot[1][2], 3)), marker='', ls='-') -646 -647 plt.xlabel('Theoretical quantiles') -648 plt.ylabel('Ordered Values') -649 plt.legend() -650 plt.draw() -651 -652 -653def residual_plot(x, y, func, fit_res): -654 """ Generates a plot which compares the fit to the data and displays the corresponding residuals""" -655 sorted_x = sorted(x) -656 xstart = sorted_x[0] - 0.5 * (sorted_x[1] - sorted_x[0]) -657 xstop = sorted_x[-1] + 0.5 * (sorted_x[-1] - sorted_x[-2]) -658 x_samples = np.arange(xstart, xstop + 0.01, 0.01) -659 -660 plt.figure(figsize=(8, 8 / 1.618)) -661 gs = gridspec.GridSpec(2, 1, height_ratios=[3, 1], wspace=0.0, hspace=0.0) -662 ax0 = plt.subplot(gs[0]) -663 ax0.errorbar(x, [o.value for o in y], yerr=[o.dvalue for o in y], ls='none', fmt='o', capsize=3, markersize=5, label='Data') -664 ax0.plot(x_samples, func([o.value for o in fit_res], x_samples), label='Fit', zorder=10, ls='-', ms=0) -665 ax0.set_xticklabels([]) -666 ax0.set_xlim([xstart, xstop]) -667 ax0.set_xticklabels([]) -668 ax0.legend() -669 -670 residuals = (np.asarray([o.value for o in y]) - func([o.value for o in fit_res], x)) / np.asarray([o.dvalue for o in y]) -671 ax1 = plt.subplot(gs[1]) -672 ax1.plot(x, residuals, 'ko', ls='none', markersize=5) -673 ax1.tick_params(direction='out') -674 ax1.tick_params(axis="x", bottom=True, top=True, labelbottom=True) -675 ax1.axhline(y=0.0, ls='--', color='k', marker=" ") -676 ax1.fill_between(x_samples, -1.0, 1.0, alpha=0.1, facecolor='k') -677 ax1.set_xlim([xstart, xstop]) -678 ax1.set_ylabel('Residuals') -679 plt.subplots_adjust(wspace=None, hspace=None) -680 plt.draw() -681 -682 -683def error_band(x, func, beta): -684 """Returns the error band for an array of sample values x, for given fit function func with optimized parameters beta.""" -685 cov = covariance(beta) -686 if np.any(np.abs(cov - cov.T) > 1000 * np.finfo(np.float64).eps): -687 warnings.warn("Covariance matrix is not symmetric within floating point precision", RuntimeWarning) -688 -689 deriv = [] -690 for i, item in enumerate(x): -691 deriv.append(np.array(egrad(func)([o.value for o in beta], item))) +617 def f(a, x): +618 y = a[0] + a[1] * x +619 return y +620 +621 if all(isinstance(n, Obs) for n in x): +622 out = total_least_squares(x, y, f, **kwargs) +623 return out.fit_parameters +624 elif all(isinstance(n, float) or isinstance(n, int) for n in x) or isinstance(x, np.ndarray): +625 out = least_squares(x, y, f, **kwargs) +626 return out.fit_parameters +627 else: +628 raise Exception('Unsupported types for x') +629 +630 +631def qqplot(x, o_y, func, p): +632 """Generates a quantile-quantile plot of the fit result which can be used to +633 check if the residuals of the fit are gaussian distributed. +634 """ +635 +636 residuals = [] +637 for i_x, i_y in zip(x, o_y): +638 residuals.append((i_y - func(p, i_x)) / i_y.dvalue) +639 residuals = sorted(residuals) +640 my_y = [o.value for o in residuals] +641 probplot = scipy.stats.probplot(my_y) +642 my_x = probplot[0][0] +643 plt.figure(figsize=(8, 8 / 1.618)) +644 plt.errorbar(my_x, my_y, fmt='o') +645 fit_start = my_x[0] +646 fit_stop = my_x[-1] +647 samples = np.arange(fit_start, fit_stop, 0.01) +648 plt.plot(samples, samples, 'k--', zorder=11, label='Standard normal distribution') +649 plt.plot(samples, probplot[1][0] * samples + probplot[1][1], zorder=10, label='Least squares fit, r=' + str(np.around(probplot[1][2], 3)), marker='', ls='-') +650 +651 plt.xlabel('Theoretical quantiles') +652 plt.ylabel('Ordered Values') +653 plt.legend() +654 plt.draw() +655 +656 +657def residual_plot(x, y, func, fit_res): +658 """ Generates a plot which compares the fit to the data and displays the corresponding residuals""" +659 sorted_x = sorted(x) +660 xstart = sorted_x[0] - 0.5 * (sorted_x[1] - sorted_x[0]) +661 xstop = sorted_x[-1] + 0.5 * (sorted_x[-1] - sorted_x[-2]) +662 x_samples = np.arange(xstart, xstop + 0.01, 0.01) +663 +664 plt.figure(figsize=(8, 8 / 1.618)) +665 gs = gridspec.GridSpec(2, 1, height_ratios=[3, 1], wspace=0.0, hspace=0.0) +666 ax0 = plt.subplot(gs[0]) +667 ax0.errorbar(x, [o.value for o in y], yerr=[o.dvalue for o in y], ls='none', fmt='o', capsize=3, markersize=5, label='Data') +668 ax0.plot(x_samples, func([o.value for o in fit_res], x_samples), label='Fit', zorder=10, ls='-', ms=0) +669 ax0.set_xticklabels([]) +670 ax0.set_xlim([xstart, xstop]) +671 ax0.set_xticklabels([]) +672 ax0.legend() +673 +674 residuals = (np.asarray([o.value for o in y]) - func([o.value for o in fit_res], x)) / np.asarray([o.dvalue for o in y]) +675 ax1 = plt.subplot(gs[1]) +676 ax1.plot(x, residuals, 'ko', ls='none', markersize=5) +677 ax1.tick_params(direction='out') +678 ax1.tick_params(axis="x", bottom=True, top=True, labelbottom=True) +679 ax1.axhline(y=0.0, ls='--', color='k', marker=" ") +680 ax1.fill_between(x_samples, -1.0, 1.0, alpha=0.1, facecolor='k') +681 ax1.set_xlim([xstart, xstop]) +682 ax1.set_ylabel('Residuals') +683 plt.subplots_adjust(wspace=None, hspace=None) +684 plt.draw() +685 +686 +687def error_band(x, func, beta): +688 """Returns the error band for an array of sample values x, for given fit function func with optimized parameters beta.""" +689 cov = covariance(beta) +690 if np.any(np.abs(cov - cov.T) > 1000 * np.finfo(np.float64).eps): +691 warnings.warn("Covariance matrix is not symmetric within floating point precision", RuntimeWarning) 692 -693 err = [] +693 deriv = [] 694 for i, item in enumerate(x): -695 err.append(np.sqrt(deriv[i] @ cov @ deriv[i])) -696 err = np.array(err) -697 -698 return err -699 -700 -701def ks_test(objects=None): -702 """Performs a Kolmogorov–Smirnov test for the p-values of all fit object. +695 deriv.append(np.array(egrad(func)([o.value for o in beta], item))) +696 +697 err = [] +698 for i, item in enumerate(x): +699 err.append(np.sqrt(deriv[i] @ cov @ deriv[i])) +700 err = np.array(err) +701 +702 return err 703 -704 Parameters -705 ---------- -706 objects : list -707 List of fit results to include in the analysis (optional). -708 """ -709 -710 if objects is None: -711 obs_list = [] -712 for obj in gc.get_objects(): -713 if isinstance(obj, Fit_result): -714 obs_list.append(obj) -715 else: -716 obs_list = objects -717 -718 p_values = [o.p_value for o in obs_list] -719 -720 bins = len(p_values) -721 x = np.arange(0, 1.001, 0.001) -722 plt.plot(x, x, 'k', zorder=1) -723 plt.xlim(0, 1) -724 plt.ylim(0, 1) -725 plt.xlabel('p-value') -726 plt.ylabel('Cumulative probability') -727 plt.title(str(bins) + ' p-values') -728 -729 n = np.arange(1, bins + 1) / np.float64(bins) -730 Xs = np.sort(p_values) -731 plt.step(Xs, n) -732 diffs = n - Xs -733 loc_max_diff = np.argmax(np.abs(diffs)) -734 loc = Xs[loc_max_diff] -735 plt.annotate('', xy=(loc, loc), xytext=(loc, loc + diffs[loc_max_diff]), arrowprops=dict(arrowstyle='<->', shrinkA=0, shrinkB=0)) -736 plt.draw() -737 -738 print(scipy.stats.kstest(p_values, 'uniform')) +704 +705def ks_test(objects=None): +706 """Performs a Kolmogorov–Smirnov test for the p-values of all fit object. +707 +708 Parameters +709 ---------- +710 objects : list +711 List of fit results to include in the analysis (optional). +712 """ +713 +714 if objects is None: +715 obs_list = [] +716 for obj in gc.get_objects(): +717 if isinstance(obj, Fit_result): +718 obs_list.append(obj) +719 else: +720 obs_list = objects +721 +722 p_values = [o.p_value for o in obs_list] +723 +724 bins = len(p_values) +725 x = np.arange(0, 1.001, 0.001) +726 plt.plot(x, x, 'k', zorder=1) +727 plt.xlim(0, 1) +728 plt.ylim(0, 1) +729 plt.xlabel('p-value') +730 plt.ylabel('Cumulative probability') +731 plt.title(str(bins) + ' p-values') +732 +733 n = np.arange(1, bins + 1) / np.float64(bins) +734 Xs = np.sort(p_values) +735 plt.step(Xs, n) +736 diffs = n - Xs +737 loc_max_diff = np.argmax(np.abs(diffs)) +738 loc = Xs[loc_max_diff] +739 plt.annotate('', xy=(loc, loc), xytext=(loc, loc + diffs[loc_max_diff]), arrowprops=dict(arrowstyle='<->', shrinkA=0, shrinkB=0)) +740 plt.draw() +741 +742 print(scipy.stats.kstest(p_values, 'uniform')) @@ -1350,30 +1354,30 @@ has to be calculated (default False). -
602def fit_lin(x, y, **kwargs): -603 """Performs a linear fit to y = n + m * x and returns two Obs n, m. -604 -605 Parameters -606 ---------- -607 x : list -608 Can either be a list of floats in which case no xerror is assumed, or -609 a list of Obs, where the dvalues of the Obs are used as xerror for the fit. -610 y : list -611 List of Obs, the dvalues of the Obs are used as yerror for the fit. -612 """ -613 -614 def f(a, x): -615 y = a[0] + a[1] * x -616 return y +@@ -1403,30 +1407,30 @@ List of Obs, the dvalues of the Obs are used as yerror for the fit.606def fit_lin(x, y, **kwargs): +607 """Performs a linear fit to y = n + m * x and returns two Obs n, m. +608 +609 Parameters +610 ---------- +611 x : list +612 Can either be a list of floats in which case no xerror is assumed, or +613 a list of Obs, where the dvalues of the Obs are used as xerror for the fit. +614 y : list +615 List of Obs, the dvalues of the Obs are used as yerror for the fit. +616 """ 617 -618 if all(isinstance(n, Obs) for n in x): -619 out = total_least_squares(x, y, f, **kwargs) -620 return out.fit_parameters -621 elif all(isinstance(n, float) or isinstance(n, int) for n in x) or isinstance(x, np.ndarray): -622 out = least_squares(x, y, f, **kwargs) -623 return out.fit_parameters -624 else: -625 raise Exception('Unsupported types for x') +618 def f(a, x): +619 y = a[0] + a[1] * x +620 return y +621 +622 if all(isinstance(n, Obs) for n in x): +623 out = total_least_squares(x, y, f, **kwargs) +624 return out.fit_parameters +625 elif all(isinstance(n, float) or isinstance(n, int) for n in x) or isinstance(x, np.ndarray): +626 out = least_squares(x, y, f, **kwargs) +627 return out.fit_parameters +628 else: +629 raise Exception('Unsupported types for x')
628def qqplot(x, o_y, func, p): -629 """Generates a quantile-quantile plot of the fit result which can be used to -630 check if the residuals of the fit are gaussian distributed. -631 """ -632 -633 residuals = [] -634 for i_x, i_y in zip(x, o_y): -635 residuals.append((i_y - func(p, i_x)) / i_y.dvalue) -636 residuals = sorted(residuals) -637 my_y = [o.value for o in residuals] -638 probplot = scipy.stats.probplot(my_y) -639 my_x = probplot[0][0] -640 plt.figure(figsize=(8, 8 / 1.618)) -641 plt.errorbar(my_x, my_y, fmt='o') -642 fit_start = my_x[0] -643 fit_stop = my_x[-1] -644 samples = np.arange(fit_start, fit_stop, 0.01) -645 plt.plot(samples, samples, 'k--', zorder=11, label='Standard normal distribution') -646 plt.plot(samples, probplot[1][0] * samples + probplot[1][1], zorder=10, label='Least squares fit, r=' + str(np.around(probplot[1][2], 3)), marker='', ls='-') -647 -648 plt.xlabel('Theoretical quantiles') -649 plt.ylabel('Ordered Values') -650 plt.legend() -651 plt.draw() +@@ -1447,34 +1451,34 @@ check if the residuals of the fit are gaussian distributed.632def qqplot(x, o_y, func, p): +633 """Generates a quantile-quantile plot of the fit result which can be used to +634 check if the residuals of the fit are gaussian distributed. +635 """ +636 +637 residuals = [] +638 for i_x, i_y in zip(x, o_y): +639 residuals.append((i_y - func(p, i_x)) / i_y.dvalue) +640 residuals = sorted(residuals) +641 my_y = [o.value for o in residuals] +642 probplot = scipy.stats.probplot(my_y) +643 my_x = probplot[0][0] +644 plt.figure(figsize=(8, 8 / 1.618)) +645 plt.errorbar(my_x, my_y, fmt='o') +646 fit_start = my_x[0] +647 fit_stop = my_x[-1] +648 samples = np.arange(fit_start, fit_stop, 0.01) +649 plt.plot(samples, samples, 'k--', zorder=11, label='Standard normal distribution') +650 plt.plot(samples, probplot[1][0] * samples + probplot[1][1], zorder=10, label='Least squares fit, r=' + str(np.around(probplot[1][2], 3)), marker='', ls='-') +651 +652 plt.xlabel('Theoretical quantiles') +653 plt.ylabel('Ordered Values') +654 plt.legend() +655 plt.draw()
654def residual_plot(x, y, func, fit_res): -655 """ Generates a plot which compares the fit to the data and displays the corresponding residuals""" -656 sorted_x = sorted(x) -657 xstart = sorted_x[0] - 0.5 * (sorted_x[1] - sorted_x[0]) -658 xstop = sorted_x[-1] + 0.5 * (sorted_x[-1] - sorted_x[-2]) -659 x_samples = np.arange(xstart, xstop + 0.01, 0.01) -660 -661 plt.figure(figsize=(8, 8 / 1.618)) -662 gs = gridspec.GridSpec(2, 1, height_ratios=[3, 1], wspace=0.0, hspace=0.0) -663 ax0 = plt.subplot(gs[0]) -664 ax0.errorbar(x, [o.value for o in y], yerr=[o.dvalue for o in y], ls='none', fmt='o', capsize=3, markersize=5, label='Data') -665 ax0.plot(x_samples, func([o.value for o in fit_res], x_samples), label='Fit', zorder=10, ls='-', ms=0) -666 ax0.set_xticklabels([]) -667 ax0.set_xlim([xstart, xstop]) -668 ax0.set_xticklabels([]) -669 ax0.legend() -670 -671 residuals = (np.asarray([o.value for o in y]) - func([o.value for o in fit_res], x)) / np.asarray([o.dvalue for o in y]) -672 ax1 = plt.subplot(gs[1]) -673 ax1.plot(x, residuals, 'ko', ls='none', markersize=5) -674 ax1.tick_params(direction='out') -675 ax1.tick_params(axis="x", bottom=True, top=True, labelbottom=True) -676 ax1.axhline(y=0.0, ls='--', color='k', marker=" ") -677 ax1.fill_between(x_samples, -1.0, 1.0, alpha=0.1, facecolor='k') -678 ax1.set_xlim([xstart, xstop]) -679 ax1.set_ylabel('Residuals') -680 plt.subplots_adjust(wspace=None, hspace=None) -681 plt.draw() +@@ -1494,22 +1498,22 @@ check if the residuals of the fit are gaussian distributed.658def residual_plot(x, y, func, fit_res): +659 """ Generates a plot which compares the fit to the data and displays the corresponding residuals""" +660 sorted_x = sorted(x) +661 xstart = sorted_x[0] - 0.5 * (sorted_x[1] - sorted_x[0]) +662 xstop = sorted_x[-1] + 0.5 * (sorted_x[-1] - sorted_x[-2]) +663 x_samples = np.arange(xstart, xstop + 0.01, 0.01) +664 +665 plt.figure(figsize=(8, 8 / 1.618)) +666 gs = gridspec.GridSpec(2, 1, height_ratios=[3, 1], wspace=0.0, hspace=0.0) +667 ax0 = plt.subplot(gs[0]) +668 ax0.errorbar(x, [o.value for o in y], yerr=[o.dvalue for o in y], ls='none', fmt='o', capsize=3, markersize=5, label='Data') +669 ax0.plot(x_samples, func([o.value for o in fit_res], x_samples), label='Fit', zorder=10, ls='-', ms=0) +670 ax0.set_xticklabels([]) +671 ax0.set_xlim([xstart, xstop]) +672 ax0.set_xticklabels([]) +673 ax0.legend() +674 +675 residuals = (np.asarray([o.value for o in y]) - func([o.value for o in fit_res], x)) / np.asarray([o.dvalue for o in y]) +676 ax1 = plt.subplot(gs[1]) +677 ax1.plot(x, residuals, 'ko', ls='none', markersize=5) +678 ax1.tick_params(direction='out') +679 ax1.tick_params(axis="x", bottom=True, top=True, labelbottom=True) +680 ax1.axhline(y=0.0, ls='--', color='k', marker=" ") +681 ax1.fill_between(x_samples, -1.0, 1.0, alpha=0.1, facecolor='k') +682 ax1.set_xlim([xstart, xstop]) +683 ax1.set_ylabel('Residuals') +684 plt.subplots_adjust(wspace=None, hspace=None) +685 plt.draw()
684def error_band(x, func, beta): -685 """Returns the error band for an array of sample values x, for given fit function func with optimized parameters beta.""" -686 cov = covariance(beta) -687 if np.any(np.abs(cov - cov.T) > 1000 * np.finfo(np.float64).eps): -688 warnings.warn("Covariance matrix is not symmetric within floating point precision", RuntimeWarning) -689 -690 deriv = [] -691 for i, item in enumerate(x): -692 deriv.append(np.array(egrad(func)([o.value for o in beta], item))) +@@ -1529,44 +1533,44 @@ check if the residuals of the fit are gaussian distributed.688def error_band(x, func, beta): +689 """Returns the error band for an array of sample values x, for given fit function func with optimized parameters beta.""" +690 cov = covariance(beta) +691 if np.any(np.abs(cov - cov.T) > 1000 * np.finfo(np.float64).eps): +692 warnings.warn("Covariance matrix is not symmetric within floating point precision", RuntimeWarning) 693 -694 err = [] +694 deriv = [] 695 for i, item in enumerate(x): -696 err.append(np.sqrt(deriv[i] @ cov @ deriv[i])) -697 err = np.array(err) -698 -699 return err +696 deriv.append(np.array(egrad(func)([o.value for o in beta], item))) +697 +698 err = [] +699 for i, item in enumerate(x): +700 err.append(np.sqrt(deriv[i] @ cov @ deriv[i])) +701 err = np.array(err) +702 +703 return err
702def ks_test(objects=None): -703 """Performs a Kolmogorov–Smirnov test for the p-values of all fit object. -704 -705 Parameters -706 ---------- -707 objects : list -708 List of fit results to include in the analysis (optional). -709 """ -710 -711 if objects is None: -712 obs_list = [] -713 for obj in gc.get_objects(): -714 if isinstance(obj, Fit_result): -715 obs_list.append(obj) -716 else: -717 obs_list = objects -718 -719 p_values = [o.p_value for o in obs_list] -720 -721 bins = len(p_values) -722 x = np.arange(0, 1.001, 0.001) -723 plt.plot(x, x, 'k', zorder=1) -724 plt.xlim(0, 1) -725 plt.ylim(0, 1) -726 plt.xlabel('p-value') -727 plt.ylabel('Cumulative probability') -728 plt.title(str(bins) + ' p-values') -729 -730 n = np.arange(1, bins + 1) / np.float64(bins) -731 Xs = np.sort(p_values) -732 plt.step(Xs, n) -733 diffs = n - Xs -734 loc_max_diff = np.argmax(np.abs(diffs)) -735 loc = Xs[loc_max_diff] -736 plt.annotate('', xy=(loc, loc), xytext=(loc, loc + diffs[loc_max_diff]), arrowprops=dict(arrowstyle='<->', shrinkA=0, shrinkB=0)) -737 plt.draw() -738 -739 print(scipy.stats.kstest(p_values, 'uniform')) +706def ks_test(objects=None): +707 """Performs a Kolmogorov–Smirnov test for the p-values of all fit object. +708 +709 Parameters +710 ---------- +711 objects : list +712 List of fit results to include in the analysis (optional). +713 """ +714 +715 if objects is None: +716 obs_list = [] +717 for obj in gc.get_objects(): +718 if isinstance(obj, Fit_result): +719 obs_list.append(obj) +720 else: +721 obs_list = objects +722 +723 p_values = [o.p_value for o in obs_list] +724 +725 bins = len(p_values) +726 x = np.arange(0, 1.001, 0.001) +727 plt.plot(x, x, 'k', zorder=1) +728 plt.xlim(0, 1) +729 plt.ylim(0, 1) +730 plt.xlabel('p-value') +731 plt.ylabel('Cumulative probability') +732 plt.title(str(bins) + ' p-values') +733 +734 n = np.arange(1, bins + 1) / np.float64(bins) +735 Xs = np.sort(p_values) +736 plt.step(Xs, n) +737 diffs = n - Xs +738 loc_max_diff = np.argmax(np.abs(diffs)) +739 loc = Xs[loc_max_diff] +740 plt.annotate('', xy=(loc, loc), xytext=(loc, loc + diffs[loc_max_diff]), arrowprops=dict(arrowstyle='<->', shrinkA=0, shrinkB=0)) +741 plt.draw() +742 +743 print(scipy.stats.kstest(p_values, 'uniform'))