From 68db55c6202946aaf735fbb5609460deafde1cd3 Mon Sep 17 00:00:00 2001 From: fjosw Date: Thu, 26 May 2022 09:25:29 +0000 Subject: [PATCH] Documentation updated --- docs/pyerrors/fits.html | 1222 +++++++++++++++++++-------------------- 1 file changed, 606 insertions(+), 616 deletions(-) diff --git a/docs/pyerrors/fits.html b/docs/pyerrors/fits.html index 29aa40e9..1e69c4b7 100644 --- a/docs/pyerrors/fits.html +++ b/docs/pyerrors/fits.html @@ -370,483 +370,475 @@ 267 hess = jacobian(jacobian(odr_chisquare))(np.concatenate((fitp, out.xplus.ravel()))) 268 except TypeError: 269 raise Exception("It is required to use autograd.numpy instead of numpy within fit functions, see the documentation for details.") from None -270 condn = np.linalg.cond(hess) -271 if condn > 1e8: -272 warnings.warn("Hessian matrix might be ill-conditioned ({0:1.2e}), error propagation might be unreliable.\n \ -273 Maybe try rescaling the problem such that all parameters are of O(1).".format(condn), RuntimeWarning) -274 try: -275 hess_inv = np.linalg.inv(hess) -276 except np.linalg.LinAlgError: -277 raise Exception("Cannot invert hessian matrix.") -278 except Exception: -279 raise Exception("Unkown error in connection with Hessian inverse.") -280 -281 def odr_chisquare_compact_x(d): -282 model = func(d[:n_parms], d[n_parms:n_parms + m].reshape(x_shape)) -283 chisq = anp.sum(((y_f - model) / dy_f) ** 2) + anp.sum(((d[n_parms + m:].reshape(x_shape) - d[n_parms:n_parms + m].reshape(x_shape)) / dx_f) ** 2) -284 return chisq -285 -286 jac_jac_x = jacobian(jacobian(odr_chisquare_compact_x))(np.concatenate((fitp, out.xplus.ravel(), x_f.ravel()))) -287 -288 deriv_x = -hess_inv @ jac_jac_x[:n_parms + m, n_parms + m:] -289 -290 def odr_chisquare_compact_y(d): -291 model = func(d[:n_parms], d[n_parms:n_parms + m].reshape(x_shape)) -292 chisq = anp.sum(((d[n_parms + m:] - model) / dy_f) ** 2) + anp.sum(((x_f - d[n_parms:n_parms + m].reshape(x_shape)) / dx_f) ** 2) -293 return chisq -294 -295 jac_jac_y = jacobian(jacobian(odr_chisquare_compact_y))(np.concatenate((fitp, out.xplus.ravel(), y_f))) +270 +271 def odr_chisquare_compact_x(d): +272 model = func(d[:n_parms], d[n_parms:n_parms + m].reshape(x_shape)) +273 chisq = anp.sum(((y_f - model) / dy_f) ** 2) + anp.sum(((d[n_parms + m:].reshape(x_shape) - d[n_parms:n_parms + m].reshape(x_shape)) / dx_f) ** 2) +274 return chisq +275 +276 jac_jac_x = jacobian(jacobian(odr_chisquare_compact_x))(np.concatenate((fitp, out.xplus.ravel(), x_f.ravel()))) +277 +278 # Compute hess^{-1} @ jac_jac_x[:n_parms + m, n_parms + m:] using LAPACK dgesv +279 try: +280 deriv_x = -scipy.linalg.solve(hess, jac_jac_x[:n_parms + m, n_parms + m:]) +281 except np.linalg.LinAlgError: +282 raise Exception("Cannot invert hessian matrix.") +283 +284 def odr_chisquare_compact_y(d): +285 model = func(d[:n_parms], d[n_parms:n_parms + m].reshape(x_shape)) +286 chisq = anp.sum(((d[n_parms + m:] - model) / dy_f) ** 2) + anp.sum(((x_f - d[n_parms:n_parms + m].reshape(x_shape)) / dx_f) ** 2) +287 return chisq +288 +289 jac_jac_y = jacobian(jacobian(odr_chisquare_compact_y))(np.concatenate((fitp, out.xplus.ravel(), y_f))) +290 +291 # Compute hess^{-1} @ jac_jac_y[:n_parms + m, n_parms + m:] using LAPACK dgesv +292 try: +293 deriv_y = -scipy.linalg.solve(hess, jac_jac_y[:n_parms + m, n_parms + m:]) +294 except np.linalg.LinAlgError: +295 raise Exception("Cannot invert hessian matrix.") 296 -297 deriv_y = -hess_inv @ jac_jac_y[:n_parms + m, n_parms + m:] -298 -299 result = [] -300 for i in range(n_parms): -301 result.append(derived_observable(lambda my_var, **kwargs: (my_var[0] + np.finfo(np.float64).eps) / (x.ravel()[0].value + np.finfo(np.float64).eps) * out.beta[i], list(x.ravel()) + list(y), man_grad=list(deriv_x[i]) + list(deriv_y[i]))) +297 result = [] +298 for i in range(n_parms): +299 result.append(derived_observable(lambda my_var, **kwargs: (my_var[0] + np.finfo(np.float64).eps) / (x.ravel()[0].value + np.finfo(np.float64).eps) * out.beta[i], list(x.ravel()) + list(y), man_grad=list(deriv_x[i]) + list(deriv_y[i]))) +300 +301 output.fit_parameters = result 302 -303 output.fit_parameters = result -304 -305 output.odr_chisquare = odr_chisquare(np.concatenate((out.beta, out.xplus.ravel()))) -306 output.dof = x.shape[-1] - n_parms -307 output.p_value = 1 - chi2.cdf(output.odr_chisquare, output.dof) +303 output.odr_chisquare = odr_chisquare(np.concatenate((out.beta, out.xplus.ravel()))) +304 output.dof = x.shape[-1] - n_parms +305 output.p_value = 1 - chi2.cdf(output.odr_chisquare, output.dof) +306 +307 return output 308 -309 return output -310 -311 -312def _prior_fit(x, y, func, priors, silent=False, **kwargs): -313 output = Fit_result() +309 +310def _prior_fit(x, y, func, priors, silent=False, **kwargs): +311 output = Fit_result() +312 +313 output.fit_function = func 314 -315 output.fit_function = func +315 x = np.asarray(x) 316 -317 x = np.asarray(x) -318 -319 if not callable(func): -320 raise TypeError('func has to be a function.') -321 -322 for i in range(100): -323 try: -324 func(np.arange(i), 0) -325 except Exception: -326 pass -327 else: -328 break +317 if not callable(func): +318 raise TypeError('func has to be a function.') +319 +320 for i in range(100): +321 try: +322 func(np.arange(i), 0) +323 except Exception: +324 pass +325 else: +326 break +327 +328 n_parms = i 329 -330 n_parms = i -331 -332 if n_parms != len(priors): -333 raise Exception('Priors does not have the correct length.') -334 -335 def extract_val_and_dval(string): -336 split_string = string.split('(') -337 if '.' in split_string[0] and '.' not in split_string[1][:-1]: -338 factor = 10 ** -len(split_string[0].partition('.')[2]) -339 else: -340 factor = 1 -341 return float(split_string[0]), float(split_string[1][:-1]) * factor -342 -343 loc_priors = [] -344 for i_n, i_prior in enumerate(priors): -345 if isinstance(i_prior, Obs): -346 loc_priors.append(i_prior) -347 else: -348 loc_val, loc_dval = extract_val_and_dval(i_prior) -349 loc_priors.append(cov_Obs(loc_val, loc_dval ** 2, '#prior' + str(i_n) + f"_{np.random.randint(2147483647):010d}")) +330 if n_parms != len(priors): +331 raise Exception('Priors does not have the correct length.') +332 +333 def extract_val_and_dval(string): +334 split_string = string.split('(') +335 if '.' in split_string[0] and '.' not in split_string[1][:-1]: +336 factor = 10 ** -len(split_string[0].partition('.')[2]) +337 else: +338 factor = 1 +339 return float(split_string[0]), float(split_string[1][:-1]) * factor +340 +341 loc_priors = [] +342 for i_n, i_prior in enumerate(priors): +343 if isinstance(i_prior, Obs): +344 loc_priors.append(i_prior) +345 else: +346 loc_val, loc_dval = extract_val_and_dval(i_prior) +347 loc_priors.append(cov_Obs(loc_val, loc_dval ** 2, '#prior' + str(i_n) + f"_{np.random.randint(2147483647):010d}")) +348 +349 output.priors = loc_priors 350 -351 output.priors = loc_priors -352 -353 if not silent: -354 print('Fit with', n_parms, 'parameter' + 's' * (n_parms > 1)) -355 -356 y_f = [o.value for o in y] -357 dy_f = [o.dvalue for o in y] -358 -359 if np.any(np.asarray(dy_f) <= 0.0): -360 raise Exception('No y errors available, run the gamma method first.') -361 -362 p_f = [o.value for o in loc_priors] -363 dp_f = [o.dvalue for o in loc_priors] -364 -365 if np.any(np.asarray(dp_f) <= 0.0): -366 raise Exception('No prior errors available, run the gamma method first.') -367 -368 if 'initial_guess' in kwargs: -369 x0 = kwargs.get('initial_guess') -370 if len(x0) != n_parms: -371 raise Exception('Initial guess does not have the correct length.') -372 else: -373 x0 = p_f -374 -375 def chisqfunc(p): -376 model = func(p, x) -377 chisq = anp.sum(((y_f - model) / dy_f) ** 2) + anp.sum(((p_f - p) / dp_f) ** 2) -378 return chisq -379 -380 if not silent: -381 print('Method: migrad') -382 -383 m = iminuit.Minuit(chisqfunc, x0) -384 m.errordef = 1 -385 m.print_level = 0 -386 if 'tol' in kwargs: -387 m.tol = kwargs.get('tol') -388 else: -389 m.tol = 1e-4 -390 m.migrad() -391 params = np.asarray(m.values) +351 if not silent: +352 print('Fit with', n_parms, 'parameter' + 's' * (n_parms > 1)) +353 +354 y_f = [o.value for o in y] +355 dy_f = [o.dvalue for o in y] +356 +357 if np.any(np.asarray(dy_f) <= 0.0): +358 raise Exception('No y errors available, run the gamma method first.') +359 +360 p_f = [o.value for o in loc_priors] +361 dp_f = [o.dvalue for o in loc_priors] +362 +363 if np.any(np.asarray(dp_f) <= 0.0): +364 raise Exception('No prior errors available, run the gamma method first.') +365 +366 if 'initial_guess' in kwargs: +367 x0 = kwargs.get('initial_guess') +368 if len(x0) != n_parms: +369 raise Exception('Initial guess does not have the correct length.') +370 else: +371 x0 = p_f +372 +373 def chisqfunc(p): +374 model = func(p, x) +375 chisq = anp.sum(((y_f - model) / dy_f) ** 2) + anp.sum(((p_f - p) / dp_f) ** 2) +376 return chisq +377 +378 if not silent: +379 print('Method: migrad') +380 +381 m = iminuit.Minuit(chisqfunc, x0) +382 m.errordef = 1 +383 m.print_level = 0 +384 if 'tol' in kwargs: +385 m.tol = kwargs.get('tol') +386 else: +387 m.tol = 1e-4 +388 m.migrad() +389 params = np.asarray(m.values) +390 +391 output.chisquare_by_dof = m.fval / len(x) 392 -393 output.chisquare_by_dof = m.fval / len(x) +393 output.method = 'migrad' 394 -395 output.method = 'migrad' -396 -397 if not silent: -398 print('chisquare/d.o.f.:', output.chisquare_by_dof) -399 -400 if not m.fmin.is_valid: -401 raise Exception('The minimization procedure did not converge.') +395 if not silent: +396 print('chisquare/d.o.f.:', output.chisquare_by_dof) +397 +398 if not m.fmin.is_valid: +399 raise Exception('The minimization procedure did not converge.') +400 +401 hess_inv = np.linalg.pinv(jacobian(jacobian(chisqfunc))(params)) 402 -403 hess_inv = np.linalg.pinv(jacobian(jacobian(chisqfunc))(params)) -404 -405 def chisqfunc_compact(d): -406 model = func(d[:n_parms], x) -407 chisq = anp.sum(((d[n_parms: n_parms + len(x)] - model) / dy_f) ** 2) + anp.sum(((d[n_parms + len(x):] - d[:n_parms]) / dp_f) ** 2) -408 return chisq +403 def chisqfunc_compact(d): +404 model = func(d[:n_parms], x) +405 chisq = anp.sum(((d[n_parms: n_parms + len(x)] - model) / dy_f) ** 2) + anp.sum(((d[n_parms + len(x):] - d[:n_parms]) / dp_f) ** 2) +406 return chisq +407 +408 jac_jac = jacobian(jacobian(chisqfunc_compact))(np.concatenate((params, y_f, p_f))) 409 -410 jac_jac = jacobian(jacobian(chisqfunc_compact))(np.concatenate((params, y_f, p_f))) +410 deriv = -hess_inv @ jac_jac[:n_parms, n_parms:] 411 -412 deriv = -hess_inv @ jac_jac[:n_parms, n_parms:] -413 -414 result = [] -415 for i in range(n_parms): -416 result.append(derived_observable(lambda x, **kwargs: (x[0] + np.finfo(np.float64).eps) / (y[0].value + np.finfo(np.float64).eps) * params[i], list(y) + list(loc_priors), man_grad=list(deriv[i]))) -417 -418 output.fit_parameters = result -419 output.chisquare = chisqfunc(np.asarray(params)) -420 -421 if kwargs.get('resplot') is True: -422 residual_plot(x, y, func, result) -423 -424 if kwargs.get('qqplot') is True: -425 qqplot(x, y, func, result) +412 result = [] +413 for i in range(n_parms): +414 result.append(derived_observable(lambda x, **kwargs: (x[0] + np.finfo(np.float64).eps) / (y[0].value + np.finfo(np.float64).eps) * params[i], list(y) + list(loc_priors), man_grad=list(deriv[i]))) +415 +416 output.fit_parameters = result +417 output.chisquare = chisqfunc(np.asarray(params)) +418 +419 if kwargs.get('resplot') is True: +420 residual_plot(x, y, func, result) +421 +422 if kwargs.get('qqplot') is True: +423 qqplot(x, y, func, result) +424 +425 return output 426 -427 return output -428 +427 +428def _standard_fit(x, y, func, silent=False, **kwargs): 429 -430def _standard_fit(x, y, func, silent=False, **kwargs): +430 output = Fit_result() 431 -432 output = Fit_result() +432 output.fit_function = func 433 -434 output.fit_function = func +434 x = np.asarray(x) 435 -436 x = np.asarray(x) -437 -438 if x.shape[-1] != len(y): -439 raise Exception('x and y input have to have the same length') -440 -441 if len(x.shape) > 2: -442 raise Exception('Unknown format for x values') -443 -444 if not callable(func): -445 raise TypeError('func has to be a function.') -446 -447 for i in range(25): -448 try: -449 func(np.arange(i), x.T[0]) -450 except Exception: -451 pass -452 else: -453 break +436 if x.shape[-1] != len(y): +437 raise Exception('x and y input have to have the same length') +438 +439 if len(x.shape) > 2: +440 raise Exception('Unknown format for x values') +441 +442 if not callable(func): +443 raise TypeError('func has to be a function.') +444 +445 for i in range(25): +446 try: +447 func(np.arange(i), x.T[0]) +448 except Exception: +449 pass +450 else: +451 break +452 +453 n_parms = i 454 -455 n_parms = i -456 -457 if not silent: -458 print('Fit with', n_parms, 'parameter' + 's' * (n_parms > 1)) -459 -460 y_f = [o.value for o in y] -461 dy_f = [o.dvalue for o in y] -462 -463 if np.any(np.asarray(dy_f) <= 0.0): -464 raise Exception('No y errors available, run the gamma method first.') -465 -466 if 'initial_guess' in kwargs: -467 x0 = kwargs.get('initial_guess') -468 if len(x0) != n_parms: -469 raise Exception('Initial guess does not have the correct length: %d vs. %d' % (len(x0), n_parms)) -470 else: -471 x0 = [0.1] * n_parms -472 -473 if kwargs.get('correlated_fit') is True: -474 corr = covariance(y, correlation=True, **kwargs) -475 covdiag = np.diag(1 / np.asarray(dy_f)) -476 condn = np.linalg.cond(corr) -477 if condn > 0.1 / np.finfo(float).eps: -478 raise Exception(f"Cannot invert correlation matrix as its condition number exceeds machine precision ({condn:1.2e})") -479 if condn > 1 / np.sqrt(np.finfo(float).eps): -480 warnings.warn("Correlation matrix may be ill-conditioned, condition number: {%1.2e}" % (condn), RuntimeWarning) -481 chol = np.linalg.cholesky(corr) -482 chol_inv = np.linalg.inv(chol) -483 chol_inv = np.dot(chol_inv, covdiag) -484 -485 def chisqfunc_corr(p): -486 model = func(p, x) -487 chisq = anp.sum(anp.dot(chol_inv, (y_f - model)) ** 2) -488 return chisq -489 -490 def chisqfunc(p): -491 model = func(p, x) -492 chisq = anp.sum(((y_f - model) / dy_f) ** 2) -493 return chisq -494 -495 output.method = kwargs.get('method', 'Levenberg-Marquardt') -496 if not silent: -497 print('Method:', output.method) -498 -499 if output.method != 'Levenberg-Marquardt': -500 if output.method == 'migrad': -501 fit_result = iminuit.minimize(chisqfunc, x0, tol=1e-4) # Stopping criterion 0.002 * tol * errordef -502 if kwargs.get('correlated_fit') is True: -503 fit_result = iminuit.minimize(chisqfunc_corr, fit_result.x, tol=1e-4) # Stopping criterion 0.002 * tol * errordef -504 output.iterations = fit_result.nfev -505 else: -506 fit_result = scipy.optimize.minimize(chisqfunc, x0, method=kwargs.get('method'), tol=1e-12) -507 if kwargs.get('correlated_fit') is True: -508 fit_result = scipy.optimize.minimize(chisqfunc_corr, fit_result.x, method=kwargs.get('method'), tol=1e-12) -509 output.iterations = fit_result.nit +455 if not silent: +456 print('Fit with', n_parms, 'parameter' + 's' * (n_parms > 1)) +457 +458 y_f = [o.value for o in y] +459 dy_f = [o.dvalue for o in y] +460 +461 if np.any(np.asarray(dy_f) <= 0.0): +462 raise Exception('No y errors available, run the gamma method first.') +463 +464 if 'initial_guess' in kwargs: +465 x0 = kwargs.get('initial_guess') +466 if len(x0) != n_parms: +467 raise Exception('Initial guess does not have the correct length: %d vs. %d' % (len(x0), n_parms)) +468 else: +469 x0 = [0.1] * n_parms +470 +471 if kwargs.get('correlated_fit') is True: +472 corr = covariance(y, correlation=True, **kwargs) +473 covdiag = np.diag(1 / np.asarray(dy_f)) +474 condn = np.linalg.cond(corr) +475 if condn > 0.1 / np.finfo(float).eps: +476 raise Exception(f"Cannot invert correlation matrix as its condition number exceeds machine precision ({condn:1.2e})") +477 if condn > 1 / np.sqrt(np.finfo(float).eps): +478 warnings.warn("Correlation matrix may be ill-conditioned, condition number: {%1.2e}" % (condn), RuntimeWarning) +479 chol = np.linalg.cholesky(corr) +480 chol_inv = np.linalg.inv(chol) +481 chol_inv = np.dot(chol_inv, covdiag) +482 +483 def chisqfunc_corr(p): +484 model = func(p, x) +485 chisq = anp.sum(anp.dot(chol_inv, (y_f - model)) ** 2) +486 return chisq +487 +488 def chisqfunc(p): +489 model = func(p, x) +490 chisq = anp.sum(((y_f - model) / dy_f) ** 2) +491 return chisq +492 +493 output.method = kwargs.get('method', 'Levenberg-Marquardt') +494 if not silent: +495 print('Method:', output.method) +496 +497 if output.method != 'Levenberg-Marquardt': +498 if output.method == 'migrad': +499 fit_result = iminuit.minimize(chisqfunc, x0, tol=1e-4) # Stopping criterion 0.002 * tol * errordef +500 if kwargs.get('correlated_fit') is True: +501 fit_result = iminuit.minimize(chisqfunc_corr, fit_result.x, tol=1e-4) # Stopping criterion 0.002 * tol * errordef +502 output.iterations = fit_result.nfev +503 else: +504 fit_result = scipy.optimize.minimize(chisqfunc, x0, method=kwargs.get('method'), tol=1e-12) +505 if kwargs.get('correlated_fit') is True: +506 fit_result = scipy.optimize.minimize(chisqfunc_corr, fit_result.x, method=kwargs.get('method'), tol=1e-12) +507 output.iterations = fit_result.nit +508 +509 chisquare = fit_result.fun 510 -511 chisquare = fit_result.fun -512 -513 else: -514 if kwargs.get('correlated_fit') is True: -515 def chisqfunc_residuals_corr(p): -516 model = func(p, x) -517 chisq = anp.dot(chol_inv, (y_f - model)) -518 return chisq -519 -520 def chisqfunc_residuals(p): -521 model = func(p, x) -522 chisq = ((y_f - model) / dy_f) -523 return chisq -524 -525 fit_result = scipy.optimize.least_squares(chisqfunc_residuals, x0, method='lm', ftol=1e-15, gtol=1e-15, xtol=1e-15) -526 if kwargs.get('correlated_fit') is True: -527 fit_result = scipy.optimize.least_squares(chisqfunc_residuals_corr, fit_result.x, method='lm', ftol=1e-15, gtol=1e-15, xtol=1e-15) +511 else: +512 if kwargs.get('correlated_fit') is True: +513 def chisqfunc_residuals_corr(p): +514 model = func(p, x) +515 chisq = anp.dot(chol_inv, (y_f - model)) +516 return chisq +517 +518 def chisqfunc_residuals(p): +519 model = func(p, x) +520 chisq = ((y_f - model) / dy_f) +521 return chisq +522 +523 fit_result = scipy.optimize.least_squares(chisqfunc_residuals, x0, method='lm', ftol=1e-15, gtol=1e-15, xtol=1e-15) +524 if kwargs.get('correlated_fit') is True: +525 fit_result = scipy.optimize.least_squares(chisqfunc_residuals_corr, fit_result.x, method='lm', ftol=1e-15, gtol=1e-15, xtol=1e-15) +526 +527 chisquare = np.sum(fit_result.fun ** 2) 528 -529 chisquare = np.sum(fit_result.fun ** 2) +529 output.iterations = fit_result.nfev 530 -531 output.iterations = fit_result.nfev -532 -533 if not fit_result.success: -534 raise Exception('The minimization procedure did not converge.') -535 -536 if x.shape[-1] - n_parms > 0: -537 output.chisquare_by_dof = chisquare / (x.shape[-1] - n_parms) -538 else: -539 output.chisquare_by_dof = float('nan') -540 -541 output.message = fit_result.message -542 if not silent: -543 print(fit_result.message) -544 print('chisquare/d.o.f.:', output.chisquare_by_dof) -545 -546 if kwargs.get('expected_chisquare') is True: -547 if kwargs.get('correlated_fit') is not True: -548 W = np.diag(1 / np.asarray(dy_f)) -549 cov = covariance(y) -550 A = W @ jacobian(func)(fit_result.x, x) -551 P_phi = A @ np.linalg.pinv(A.T @ A) @ A.T -552 expected_chisquare = np.trace((np.identity(x.shape[-1]) - P_phi) @ W @ cov @ W) -553 output.chisquare_by_expected_chisquare = chisquare / expected_chisquare -554 if not silent: -555 print('chisquare/expected_chisquare:', -556 output.chisquare_by_expected_chisquare) -557 -558 fitp = fit_result.x -559 try: -560 hess = jacobian(jacobian(chisqfunc))(fitp) -561 except TypeError: -562 raise Exception("It is required to use autograd.numpy instead of numpy within fit functions, see the documentation for details.") from None -563 condn = np.linalg.cond(hess) -564 if condn > 1e8: -565 warnings.warn("Hessian matrix might be ill-conditioned ({0:1.2e}), error propagation might be unreliable.\n \ -566 Maybe try rescaling the problem such that all parameters are of O(1).".format(condn), RuntimeWarning) -567 try: -568 hess_inv = np.linalg.inv(hess) -569 except np.linalg.LinAlgError: -570 raise Exception("Cannot invert hessian matrix.") -571 except Exception: -572 raise Exception("Unkown error in connection with Hessian inverse.") +531 if not fit_result.success: +532 raise Exception('The minimization procedure did not converge.') +533 +534 if x.shape[-1] - n_parms > 0: +535 output.chisquare_by_dof = chisquare / (x.shape[-1] - n_parms) +536 else: +537 output.chisquare_by_dof = float('nan') +538 +539 output.message = fit_result.message +540 if not silent: +541 print(fit_result.message) +542 print('chisquare/d.o.f.:', output.chisquare_by_dof) +543 +544 if kwargs.get('expected_chisquare') is True: +545 if kwargs.get('correlated_fit') is not True: +546 W = np.diag(1 / np.asarray(dy_f)) +547 cov = covariance(y) +548 A = W @ jacobian(func)(fit_result.x, x) +549 P_phi = A @ np.linalg.pinv(A.T @ A) @ A.T +550 expected_chisquare = np.trace((np.identity(x.shape[-1]) - P_phi) @ W @ cov @ W) +551 output.chisquare_by_expected_chisquare = chisquare / expected_chisquare +552 if not silent: +553 print('chisquare/expected_chisquare:', +554 output.chisquare_by_expected_chisquare) +555 +556 fitp = fit_result.x +557 try: +558 hess = jacobian(jacobian(chisqfunc))(fitp) +559 except TypeError: +560 raise Exception("It is required to use autograd.numpy instead of numpy within fit functions, see the documentation for details.") from None +561 +562 if kwargs.get('correlated_fit') is True: +563 def chisqfunc_compact(d): +564 model = func(d[:n_parms], x) +565 chisq = anp.sum(anp.dot(chol_inv, (d[n_parms:] - model)) ** 2) +566 return chisq +567 +568 else: +569 def chisqfunc_compact(d): +570 model = func(d[:n_parms], x) +571 chisq = anp.sum(((d[n_parms:] - model) / dy_f) ** 2) +572 return chisq 573 -574 if kwargs.get('correlated_fit') is True: -575 def chisqfunc_compact(d): -576 model = func(d[:n_parms], x) -577 chisq = anp.sum(anp.dot(chol_inv, (d[n_parms:] - model)) ** 2) -578 return chisq -579 -580 else: -581 def chisqfunc_compact(d): -582 model = func(d[:n_parms], x) -583 chisq = anp.sum(((d[n_parms:] - model) / dy_f) ** 2) -584 return chisq +574 jac_jac = jacobian(jacobian(chisqfunc_compact))(np.concatenate((fitp, y_f))) +575 +576 # Compute hess^{-1} @ jac_jac[:n_parms, n_parms:] using LAPACK dgesv +577 try: +578 deriv = -scipy.linalg.solve(hess, jac_jac[:n_parms, n_parms:]) +579 except np.linalg.LinAlgError: +580 raise Exception("Cannot invert hessian matrix.") +581 +582 result = [] +583 for i in range(n_parms): +584 result.append(derived_observable(lambda x, **kwargs: (x[0] + np.finfo(np.float64).eps) / (y[0].value + np.finfo(np.float64).eps) * fit_result.x[i], list(y), man_grad=list(deriv[i]))) 585 -586 jac_jac = jacobian(jacobian(chisqfunc_compact))(np.concatenate((fitp, y_f))) +586 output.fit_parameters = result 587 -588 deriv = -hess_inv @ jac_jac[:n_parms, n_parms:] -589 -590 result = [] -591 for i in range(n_parms): -592 result.append(derived_observable(lambda x, **kwargs: (x[0] + np.finfo(np.float64).eps) / (y[0].value + np.finfo(np.float64).eps) * fit_result.x[i], list(y), man_grad=list(deriv[i]))) -593 -594 output.fit_parameters = result -595 -596 output.chisquare = chisqfunc(fit_result.x) -597 output.dof = x.shape[-1] - n_parms -598 output.p_value = 1 - chi2.cdf(output.chisquare, output.dof) +588 output.chisquare = chisqfunc(fit_result.x) +589 output.dof = x.shape[-1] - n_parms +590 output.p_value = 1 - chi2.cdf(output.chisquare, output.dof) +591 +592 if kwargs.get('resplot') is True: +593 residual_plot(x, y, func, result) +594 +595 if kwargs.get('qqplot') is True: +596 qqplot(x, y, func, result) +597 +598 return output 599 -600 if kwargs.get('resplot') is True: -601 residual_plot(x, y, func, result) -602 -603 if kwargs.get('qqplot') is True: -604 qqplot(x, y, func, result) -605 -606 return output -607 -608 -609def fit_lin(x, y, **kwargs): -610 """Performs a linear fit to y = n + m * x and returns two Obs n, m. -611 -612 Parameters -613 ---------- -614 x : list -615 Can either be a list of floats in which case no xerror is assumed, or -616 a list of Obs, where the dvalues of the Obs are used as xerror for the fit. -617 y : list -618 List of Obs, the dvalues of the Obs are used as yerror for the fit. -619 """ -620 -621 def f(a, x): -622 y = a[0] + a[1] * x -623 return y -624 -625 if all(isinstance(n, Obs) for n in x): -626 out = total_least_squares(x, y, f, **kwargs) -627 return out.fit_parameters -628 elif all(isinstance(n, float) or isinstance(n, int) for n in x) or isinstance(x, np.ndarray): -629 out = least_squares(x, y, f, **kwargs) -630 return out.fit_parameters -631 else: -632 raise Exception('Unsupported types for x') -633 -634 -635def qqplot(x, o_y, func, p): -636 """Generates a quantile-quantile plot of the fit result which can be used to -637 check if the residuals of the fit are gaussian distributed. -638 """ -639 -640 residuals = [] -641 for i_x, i_y in zip(x, o_y): -642 residuals.append((i_y - func(p, i_x)) / i_y.dvalue) -643 residuals = sorted(residuals) -644 my_y = [o.value for o in residuals] -645 probplot = scipy.stats.probplot(my_y) -646 my_x = probplot[0][0] -647 plt.figure(figsize=(8, 8 / 1.618)) -648 plt.errorbar(my_x, my_y, fmt='o') -649 fit_start = my_x[0] -650 fit_stop = my_x[-1] -651 samples = np.arange(fit_start, fit_stop, 0.01) -652 plt.plot(samples, samples, 'k--', zorder=11, label='Standard normal distribution') -653 plt.plot(samples, probplot[1][0] * samples + probplot[1][1], zorder=10, label='Least squares fit, r=' + str(np.around(probplot[1][2], 3)), marker='', ls='-') -654 -655 plt.xlabel('Theoretical quantiles') -656 plt.ylabel('Ordered Values') -657 plt.legend() -658 plt.draw() +600 +601def fit_lin(x, y, **kwargs): +602 """Performs a linear fit to y = n + m * x and returns two Obs n, m. +603 +604 Parameters +605 ---------- +606 x : list +607 Can either be a list of floats in which case no xerror is assumed, or +608 a list of Obs, where the dvalues of the Obs are used as xerror for the fit. +609 y : list +610 List of Obs, the dvalues of the Obs are used as yerror for the fit. +611 """ +612 +613 def f(a, x): +614 y = a[0] + a[1] * x +615 return y +616 +617 if all(isinstance(n, Obs) for n in x): +618 out = total_least_squares(x, y, f, **kwargs) +619 return out.fit_parameters +620 elif all(isinstance(n, float) or isinstance(n, int) for n in x) or isinstance(x, np.ndarray): +621 out = least_squares(x, y, f, **kwargs) +622 return out.fit_parameters +623 else: +624 raise Exception('Unsupported types for x') +625 +626 +627def qqplot(x, o_y, func, p): +628 """Generates a quantile-quantile plot of the fit result which can be used to +629 check if the residuals of the fit are gaussian distributed. +630 """ +631 +632 residuals = [] +633 for i_x, i_y in zip(x, o_y): +634 residuals.append((i_y - func(p, i_x)) / i_y.dvalue) +635 residuals = sorted(residuals) +636 my_y = [o.value for o in residuals] +637 probplot = scipy.stats.probplot(my_y) +638 my_x = probplot[0][0] +639 plt.figure(figsize=(8, 8 / 1.618)) +640 plt.errorbar(my_x, my_y, fmt='o') +641 fit_start = my_x[0] +642 fit_stop = my_x[-1] +643 samples = np.arange(fit_start, fit_stop, 0.01) +644 plt.plot(samples, samples, 'k--', zorder=11, label='Standard normal distribution') +645 plt.plot(samples, probplot[1][0] * samples + probplot[1][1], zorder=10, label='Least squares fit, r=' + str(np.around(probplot[1][2], 3)), marker='', ls='-') +646 +647 plt.xlabel('Theoretical quantiles') +648 plt.ylabel('Ordered Values') +649 plt.legend() +650 plt.draw() +651 +652 +653def residual_plot(x, y, func, fit_res): +654 """ Generates a plot which compares the fit to the data and displays the corresponding residuals""" +655 sorted_x = sorted(x) +656 xstart = sorted_x[0] - 0.5 * (sorted_x[1] - sorted_x[0]) +657 xstop = sorted_x[-1] + 0.5 * (sorted_x[-1] - sorted_x[-2]) +658 x_samples = np.arange(xstart, xstop + 0.01, 0.01) 659 -660 -661def residual_plot(x, y, func, fit_res): -662 """ Generates a plot which compares the fit to the data and displays the corresponding residuals""" -663 sorted_x = sorted(x) -664 xstart = sorted_x[0] - 0.5 * (sorted_x[1] - sorted_x[0]) -665 xstop = sorted_x[-1] + 0.5 * (sorted_x[-1] - sorted_x[-2]) -666 x_samples = np.arange(xstart, xstop + 0.01, 0.01) -667 -668 plt.figure(figsize=(8, 8 / 1.618)) -669 gs = gridspec.GridSpec(2, 1, height_ratios=[3, 1], wspace=0.0, hspace=0.0) -670 ax0 = plt.subplot(gs[0]) -671 ax0.errorbar(x, [o.value for o in y], yerr=[o.dvalue for o in y], ls='none', fmt='o', capsize=3, markersize=5, label='Data') -672 ax0.plot(x_samples, func([o.value for o in fit_res], x_samples), label='Fit', zorder=10, ls='-', ms=0) -673 ax0.set_xticklabels([]) -674 ax0.set_xlim([xstart, xstop]) -675 ax0.set_xticklabels([]) -676 ax0.legend() -677 -678 residuals = (np.asarray([o.value for o in y]) - func([o.value for o in fit_res], x)) / np.asarray([o.dvalue for o in y]) -679 ax1 = plt.subplot(gs[1]) -680 ax1.plot(x, residuals, 'ko', ls='none', markersize=5) -681 ax1.tick_params(direction='out') -682 ax1.tick_params(axis="x", bottom=True, top=True, labelbottom=True) -683 ax1.axhline(y=0.0, ls='--', color='k', marker=" ") -684 ax1.fill_between(x_samples, -1.0, 1.0, alpha=0.1, facecolor='k') -685 ax1.set_xlim([xstart, xstop]) -686 ax1.set_ylabel('Residuals') -687 plt.subplots_adjust(wspace=None, hspace=None) -688 plt.draw() -689 -690 -691def error_band(x, func, beta): -692 """Returns the error band for an array of sample values x, for given fit function func with optimized parameters beta.""" -693 cov = covariance(beta) -694 if np.any(np.abs(cov - cov.T) > 1000 * np.finfo(np.float64).eps): -695 warnings.warn("Covariance matrix is not symmetric within floating point precision", RuntimeWarning) -696 -697 deriv = [] -698 for i, item in enumerate(x): -699 deriv.append(np.array(egrad(func)([o.value for o in beta], item))) +660 plt.figure(figsize=(8, 8 / 1.618)) +661 gs = gridspec.GridSpec(2, 1, height_ratios=[3, 1], wspace=0.0, hspace=0.0) +662 ax0 = plt.subplot(gs[0]) +663 ax0.errorbar(x, [o.value for o in y], yerr=[o.dvalue for o in y], ls='none', fmt='o', capsize=3, markersize=5, label='Data') +664 ax0.plot(x_samples, func([o.value for o in fit_res], x_samples), label='Fit', zorder=10, ls='-', ms=0) +665 ax0.set_xticklabels([]) +666 ax0.set_xlim([xstart, xstop]) +667 ax0.set_xticklabels([]) +668 ax0.legend() +669 +670 residuals = (np.asarray([o.value for o in y]) - func([o.value for o in fit_res], x)) / np.asarray([o.dvalue for o in y]) +671 ax1 = plt.subplot(gs[1]) +672 ax1.plot(x, residuals, 'ko', ls='none', markersize=5) +673 ax1.tick_params(direction='out') +674 ax1.tick_params(axis="x", bottom=True, top=True, labelbottom=True) +675 ax1.axhline(y=0.0, ls='--', color='k', marker=" ") +676 ax1.fill_between(x_samples, -1.0, 1.0, alpha=0.1, facecolor='k') +677 ax1.set_xlim([xstart, xstop]) +678 ax1.set_ylabel('Residuals') +679 plt.subplots_adjust(wspace=None, hspace=None) +680 plt.draw() +681 +682 +683def error_band(x, func, beta): +684 """Returns the error band for an array of sample values x, for given fit function func with optimized parameters beta.""" +685 cov = covariance(beta) +686 if np.any(np.abs(cov - cov.T) > 1000 * np.finfo(np.float64).eps): +687 warnings.warn("Covariance matrix is not symmetric within floating point precision", RuntimeWarning) +688 +689 deriv = [] +690 for i, item in enumerate(x): +691 deriv.append(np.array(egrad(func)([o.value for o in beta], item))) +692 +693 err = [] +694 for i, item in enumerate(x): +695 err.append(np.sqrt(deriv[i] @ cov @ deriv[i])) +696 err = np.array(err) +697 +698 return err +699 700 -701 err = [] -702 for i, item in enumerate(x): -703 err.append(np.sqrt(deriv[i] @ cov @ deriv[i])) -704 err = np.array(err) -705 -706 return err -707 -708 -709def ks_test(objects=None): -710 """Performs a Kolmogorov–Smirnov test for the p-values of all fit object. -711 -712 Parameters -713 ---------- -714 objects : list -715 List of fit results to include in the analysis (optional). -716 """ +701def ks_test(objects=None): +702 """Performs a Kolmogorov–Smirnov test for the p-values of all fit object. +703 +704 Parameters +705 ---------- +706 objects : list +707 List of fit results to include in the analysis (optional). +708 """ +709 +710 if objects is None: +711 obs_list = [] +712 for obj in gc.get_objects(): +713 if isinstance(obj, Fit_result): +714 obs_list.append(obj) +715 else: +716 obs_list = objects 717 -718 if objects is None: -719 obs_list = [] -720 for obj in gc.get_objects(): -721 if isinstance(obj, Fit_result): -722 obs_list.append(obj) -723 else: -724 obs_list = objects -725 -726 p_values = [o.p_value for o in obs_list] -727 -728 bins = len(p_values) -729 x = np.arange(0, 1.001, 0.001) -730 plt.plot(x, x, 'k', zorder=1) -731 plt.xlim(0, 1) -732 plt.ylim(0, 1) -733 plt.xlabel('p-value') -734 plt.ylabel('Cumulative probability') -735 plt.title(str(bins) + ' p-values') -736 -737 n = np.arange(1, bins + 1) / np.float64(bins) -738 Xs = np.sort(p_values) -739 plt.step(Xs, n) -740 diffs = n - Xs -741 loc_max_diff = np.argmax(np.abs(diffs)) -742 loc = Xs[loc_max_diff] -743 plt.annotate('', xy=(loc, loc), xytext=(loc, loc + diffs[loc_max_diff]), arrowprops=dict(arrowstyle='<->', shrinkA=0, shrinkB=0)) -744 plt.draw() -745 -746 print(scipy.stats.kstest(p_values, 'uniform')) +718 p_values = [o.p_value for o in obs_list] +719 +720 bins = len(p_values) +721 x = np.arange(0, 1.001, 0.001) +722 plt.plot(x, x, 'k', zorder=1) +723 plt.xlim(0, 1) +724 plt.ylim(0, 1) +725 plt.xlabel('p-value') +726 plt.ylabel('Cumulative probability') +727 plt.title(str(bins) + ' p-values') +728 +729 n = np.arange(1, bins + 1) / np.float64(bins) +730 Xs = np.sort(p_values) +731 plt.step(Xs, n) +732 diffs = n - Xs +733 loc_max_diff = np.argmax(np.abs(diffs)) +734 loc = Xs[loc_max_diff] +735 plt.annotate('', xy=(loc, loc), xytext=(loc, loc + diffs[loc_max_diff]), arrowprops=dict(arrowstyle='<->', shrinkA=0, shrinkB=0)) +736 plt.draw() +737 +738 print(scipy.stats.kstest(p_values, 'uniform')) @@ -1260,46 +1252,44 @@ If True, a quantile-quantile plot of the fit result is generated (default False) 268 hess = jacobian(jacobian(odr_chisquare))(np.concatenate((fitp, out.xplus.ravel()))) 269 except TypeError: 270 raise Exception("It is required to use autograd.numpy instead of numpy within fit functions, see the documentation for details.") from None -271 condn = np.linalg.cond(hess) -272 if condn > 1e8: -273 warnings.warn("Hessian matrix might be ill-conditioned ({0:1.2e}), error propagation might be unreliable.\n \ -274 Maybe try rescaling the problem such that all parameters are of O(1).".format(condn), RuntimeWarning) -275 try: -276 hess_inv = np.linalg.inv(hess) -277 except np.linalg.LinAlgError: -278 raise Exception("Cannot invert hessian matrix.") -279 except Exception: -280 raise Exception("Unkown error in connection with Hessian inverse.") -281 -282 def odr_chisquare_compact_x(d): -283 model = func(d[:n_parms], d[n_parms:n_parms + m].reshape(x_shape)) -284 chisq = anp.sum(((y_f - model) / dy_f) ** 2) + anp.sum(((d[n_parms + m:].reshape(x_shape) - d[n_parms:n_parms + m].reshape(x_shape)) / dx_f) ** 2) -285 return chisq -286 -287 jac_jac_x = jacobian(jacobian(odr_chisquare_compact_x))(np.concatenate((fitp, out.xplus.ravel(), x_f.ravel()))) -288 -289 deriv_x = -hess_inv @ jac_jac_x[:n_parms + m, n_parms + m:] -290 -291 def odr_chisquare_compact_y(d): -292 model = func(d[:n_parms], d[n_parms:n_parms + m].reshape(x_shape)) -293 chisq = anp.sum(((d[n_parms + m:] - model) / dy_f) ** 2) + anp.sum(((x_f - d[n_parms:n_parms + m].reshape(x_shape)) / dx_f) ** 2) -294 return chisq -295 -296 jac_jac_y = jacobian(jacobian(odr_chisquare_compact_y))(np.concatenate((fitp, out.xplus.ravel(), y_f))) +271 +272 def odr_chisquare_compact_x(d): +273 model = func(d[:n_parms], d[n_parms:n_parms + m].reshape(x_shape)) +274 chisq = anp.sum(((y_f - model) / dy_f) ** 2) + anp.sum(((d[n_parms + m:].reshape(x_shape) - d[n_parms:n_parms + m].reshape(x_shape)) / dx_f) ** 2) +275 return chisq +276 +277 jac_jac_x = jacobian(jacobian(odr_chisquare_compact_x))(np.concatenate((fitp, out.xplus.ravel(), x_f.ravel()))) +278 +279 # Compute hess^{-1} @ jac_jac_x[:n_parms + m, n_parms + m:] using LAPACK dgesv +280 try: +281 deriv_x = -scipy.linalg.solve(hess, jac_jac_x[:n_parms + m, n_parms + m:]) +282 except np.linalg.LinAlgError: +283 raise Exception("Cannot invert hessian matrix.") +284 +285 def odr_chisquare_compact_y(d): +286 model = func(d[:n_parms], d[n_parms:n_parms + m].reshape(x_shape)) +287 chisq = anp.sum(((d[n_parms + m:] - model) / dy_f) ** 2) + anp.sum(((x_f - d[n_parms:n_parms + m].reshape(x_shape)) / dx_f) ** 2) +288 return chisq +289 +290 jac_jac_y = jacobian(jacobian(odr_chisquare_compact_y))(np.concatenate((fitp, out.xplus.ravel(), y_f))) +291 +292 # Compute hess^{-1} @ jac_jac_y[:n_parms + m, n_parms + m:] using LAPACK dgesv +293 try: +294 deriv_y = -scipy.linalg.solve(hess, jac_jac_y[:n_parms + m, n_parms + m:]) +295 except np.linalg.LinAlgError: +296 raise Exception("Cannot invert hessian matrix.") 297 -298 deriv_y = -hess_inv @ jac_jac_y[:n_parms + m, n_parms + m:] -299 -300 result = [] -301 for i in range(n_parms): -302 result.append(derived_observable(lambda my_var, **kwargs: (my_var[0] + np.finfo(np.float64).eps) / (x.ravel()[0].value + np.finfo(np.float64).eps) * out.beta[i], list(x.ravel()) + list(y), man_grad=list(deriv_x[i]) + list(deriv_y[i]))) +298 result = [] +299 for i in range(n_parms): +300 result.append(derived_observable(lambda my_var, **kwargs: (my_var[0] + np.finfo(np.float64).eps) / (x.ravel()[0].value + np.finfo(np.float64).eps) * out.beta[i], list(x.ravel()) + list(y), man_grad=list(deriv_x[i]) + list(deriv_y[i]))) +301 +302 output.fit_parameters = result 303 -304 output.fit_parameters = result -305 -306 output.odr_chisquare = odr_chisquare(np.concatenate((out.beta, out.xplus.ravel()))) -307 output.dof = x.shape[-1] - n_parms -308 output.p_value = 1 - chi2.cdf(output.odr_chisquare, output.dof) -309 -310 return output +304 output.odr_chisquare = odr_chisquare(np.concatenate((out.beta, out.xplus.ravel()))) +305 output.dof = x.shape[-1] - n_parms +306 output.p_value = 1 - chi2.cdf(output.odr_chisquare, output.dof) +307 +308 return output @@ -1360,30 +1350,30 @@ has to be calculated (default False). -
610def fit_lin(x, y, **kwargs):
-611    """Performs a linear fit to y = n + m * x and returns two Obs n, m.
-612
-613    Parameters
-614    ----------
-615    x : list
-616        Can either be a list of floats in which case no xerror is assumed, or
-617        a list of Obs, where the dvalues of the Obs are used as xerror for the fit.
-618    y : list
-619        List of Obs, the dvalues of the Obs are used as yerror for the fit.
-620    """
-621
-622    def f(a, x):
-623        y = a[0] + a[1] * x
-624        return y
-625
-626    if all(isinstance(n, Obs) for n in x):
-627        out = total_least_squares(x, y, f, **kwargs)
-628        return out.fit_parameters
-629    elif all(isinstance(n, float) or isinstance(n, int) for n in x) or isinstance(x, np.ndarray):
-630        out = least_squares(x, y, f, **kwargs)
-631        return out.fit_parameters
-632    else:
-633        raise Exception('Unsupported types for x')
+            
602def fit_lin(x, y, **kwargs):
+603    """Performs a linear fit to y = n + m * x and returns two Obs n, m.
+604
+605    Parameters
+606    ----------
+607    x : list
+608        Can either be a list of floats in which case no xerror is assumed, or
+609        a list of Obs, where the dvalues of the Obs are used as xerror for the fit.
+610    y : list
+611        List of Obs, the dvalues of the Obs are used as yerror for the fit.
+612    """
+613
+614    def f(a, x):
+615        y = a[0] + a[1] * x
+616        return y
+617
+618    if all(isinstance(n, Obs) for n in x):
+619        out = total_least_squares(x, y, f, **kwargs)
+620        return out.fit_parameters
+621    elif all(isinstance(n, float) or isinstance(n, int) for n in x) or isinstance(x, np.ndarray):
+622        out = least_squares(x, y, f, **kwargs)
+623        return out.fit_parameters
+624    else:
+625        raise Exception('Unsupported types for x')
 
@@ -1413,30 +1403,30 @@ List of Obs, the dvalues of the Obs are used as yerror for the fit.
-
636def qqplot(x, o_y, func, p):
-637    """Generates a quantile-quantile plot of the fit result which can be used to
-638       check if the residuals of the fit are gaussian distributed.
-639    """
-640
-641    residuals = []
-642    for i_x, i_y in zip(x, o_y):
-643        residuals.append((i_y - func(p, i_x)) / i_y.dvalue)
-644    residuals = sorted(residuals)
-645    my_y = [o.value for o in residuals]
-646    probplot = scipy.stats.probplot(my_y)
-647    my_x = probplot[0][0]
-648    plt.figure(figsize=(8, 8 / 1.618))
-649    plt.errorbar(my_x, my_y, fmt='o')
-650    fit_start = my_x[0]
-651    fit_stop = my_x[-1]
-652    samples = np.arange(fit_start, fit_stop, 0.01)
-653    plt.plot(samples, samples, 'k--', zorder=11, label='Standard normal distribution')
-654    plt.plot(samples, probplot[1][0] * samples + probplot[1][1], zorder=10, label='Least squares fit, r=' + str(np.around(probplot[1][2], 3)), marker='', ls='-')
-655
-656    plt.xlabel('Theoretical quantiles')
-657    plt.ylabel('Ordered Values')
-658    plt.legend()
-659    plt.draw()
+            
628def qqplot(x, o_y, func, p):
+629    """Generates a quantile-quantile plot of the fit result which can be used to
+630       check if the residuals of the fit are gaussian distributed.
+631    """
+632
+633    residuals = []
+634    for i_x, i_y in zip(x, o_y):
+635        residuals.append((i_y - func(p, i_x)) / i_y.dvalue)
+636    residuals = sorted(residuals)
+637    my_y = [o.value for o in residuals]
+638    probplot = scipy.stats.probplot(my_y)
+639    my_x = probplot[0][0]
+640    plt.figure(figsize=(8, 8 / 1.618))
+641    plt.errorbar(my_x, my_y, fmt='o')
+642    fit_start = my_x[0]
+643    fit_stop = my_x[-1]
+644    samples = np.arange(fit_start, fit_stop, 0.01)
+645    plt.plot(samples, samples, 'k--', zorder=11, label='Standard normal distribution')
+646    plt.plot(samples, probplot[1][0] * samples + probplot[1][1], zorder=10, label='Least squares fit, r=' + str(np.around(probplot[1][2], 3)), marker='', ls='-')
+647
+648    plt.xlabel('Theoretical quantiles')
+649    plt.ylabel('Ordered Values')
+650    plt.legend()
+651    plt.draw()
 
@@ -1457,34 +1447,34 @@ check if the residuals of the fit are gaussian distributed.

-
662def residual_plot(x, y, func, fit_res):
-663    """ Generates a plot which compares the fit to the data and displays the corresponding residuals"""
-664    sorted_x = sorted(x)
-665    xstart = sorted_x[0] - 0.5 * (sorted_x[1] - sorted_x[0])
-666    xstop = sorted_x[-1] + 0.5 * (sorted_x[-1] - sorted_x[-2])
-667    x_samples = np.arange(xstart, xstop + 0.01, 0.01)
-668
-669    plt.figure(figsize=(8, 8 / 1.618))
-670    gs = gridspec.GridSpec(2, 1, height_ratios=[3, 1], wspace=0.0, hspace=0.0)
-671    ax0 = plt.subplot(gs[0])
-672    ax0.errorbar(x, [o.value for o in y], yerr=[o.dvalue for o in y], ls='none', fmt='o', capsize=3, markersize=5, label='Data')
-673    ax0.plot(x_samples, func([o.value for o in fit_res], x_samples), label='Fit', zorder=10, ls='-', ms=0)
-674    ax0.set_xticklabels([])
-675    ax0.set_xlim([xstart, xstop])
-676    ax0.set_xticklabels([])
-677    ax0.legend()
-678
-679    residuals = (np.asarray([o.value for o in y]) - func([o.value for o in fit_res], x)) / np.asarray([o.dvalue for o in y])
-680    ax1 = plt.subplot(gs[1])
-681    ax1.plot(x, residuals, 'ko', ls='none', markersize=5)
-682    ax1.tick_params(direction='out')
-683    ax1.tick_params(axis="x", bottom=True, top=True, labelbottom=True)
-684    ax1.axhline(y=0.0, ls='--', color='k', marker=" ")
-685    ax1.fill_between(x_samples, -1.0, 1.0, alpha=0.1, facecolor='k')
-686    ax1.set_xlim([xstart, xstop])
-687    ax1.set_ylabel('Residuals')
-688    plt.subplots_adjust(wspace=None, hspace=None)
-689    plt.draw()
+            
654def residual_plot(x, y, func, fit_res):
+655    """ Generates a plot which compares the fit to the data and displays the corresponding residuals"""
+656    sorted_x = sorted(x)
+657    xstart = sorted_x[0] - 0.5 * (sorted_x[1] - sorted_x[0])
+658    xstop = sorted_x[-1] + 0.5 * (sorted_x[-1] - sorted_x[-2])
+659    x_samples = np.arange(xstart, xstop + 0.01, 0.01)
+660
+661    plt.figure(figsize=(8, 8 / 1.618))
+662    gs = gridspec.GridSpec(2, 1, height_ratios=[3, 1], wspace=0.0, hspace=0.0)
+663    ax0 = plt.subplot(gs[0])
+664    ax0.errorbar(x, [o.value for o in y], yerr=[o.dvalue for o in y], ls='none', fmt='o', capsize=3, markersize=5, label='Data')
+665    ax0.plot(x_samples, func([o.value for o in fit_res], x_samples), label='Fit', zorder=10, ls='-', ms=0)
+666    ax0.set_xticklabels([])
+667    ax0.set_xlim([xstart, xstop])
+668    ax0.set_xticklabels([])
+669    ax0.legend()
+670
+671    residuals = (np.asarray([o.value for o in y]) - func([o.value for o in fit_res], x)) / np.asarray([o.dvalue for o in y])
+672    ax1 = plt.subplot(gs[1])
+673    ax1.plot(x, residuals, 'ko', ls='none', markersize=5)
+674    ax1.tick_params(direction='out')
+675    ax1.tick_params(axis="x", bottom=True, top=True, labelbottom=True)
+676    ax1.axhline(y=0.0, ls='--', color='k', marker=" ")
+677    ax1.fill_between(x_samples, -1.0, 1.0, alpha=0.1, facecolor='k')
+678    ax1.set_xlim([xstart, xstop])
+679    ax1.set_ylabel('Residuals')
+680    plt.subplots_adjust(wspace=None, hspace=None)
+681    plt.draw()
 
@@ -1504,22 +1494,22 @@ check if the residuals of the fit are gaussian distributed.

-
692def error_band(x, func, beta):
-693    """Returns the error band for an array of sample values x, for given fit function func with optimized parameters beta."""
-694    cov = covariance(beta)
-695    if np.any(np.abs(cov - cov.T) > 1000 * np.finfo(np.float64).eps):
-696        warnings.warn("Covariance matrix is not symmetric within floating point precision", RuntimeWarning)
-697
-698    deriv = []
-699    for i, item in enumerate(x):
-700        deriv.append(np.array(egrad(func)([o.value for o in beta], item)))
-701
-702    err = []
-703    for i, item in enumerate(x):
-704        err.append(np.sqrt(deriv[i] @ cov @ deriv[i]))
-705    err = np.array(err)
-706
-707    return err
+            
684def error_band(x, func, beta):
+685    """Returns the error band for an array of sample values x, for given fit function func with optimized parameters beta."""
+686    cov = covariance(beta)
+687    if np.any(np.abs(cov - cov.T) > 1000 * np.finfo(np.float64).eps):
+688        warnings.warn("Covariance matrix is not symmetric within floating point precision", RuntimeWarning)
+689
+690    deriv = []
+691    for i, item in enumerate(x):
+692        deriv.append(np.array(egrad(func)([o.value for o in beta], item)))
+693
+694    err = []
+695    for i, item in enumerate(x):
+696        err.append(np.sqrt(deriv[i] @ cov @ deriv[i]))
+697    err = np.array(err)
+698
+699    return err
 
@@ -1539,44 +1529,44 @@ check if the residuals of the fit are gaussian distributed.

-
710def ks_test(objects=None):
-711    """Performs a Kolmogorov–Smirnov test for the p-values of all fit object.
-712
-713    Parameters
-714    ----------
-715    objects : list
-716        List of fit results to include in the analysis (optional).
-717    """
+            
702def ks_test(objects=None):
+703    """Performs a Kolmogorov–Smirnov test for the p-values of all fit object.
+704
+705    Parameters
+706    ----------
+707    objects : list
+708        List of fit results to include in the analysis (optional).
+709    """
+710
+711    if objects is None:
+712        obs_list = []
+713        for obj in gc.get_objects():
+714            if isinstance(obj, Fit_result):
+715                obs_list.append(obj)
+716    else:
+717        obs_list = objects
 718
-719    if objects is None:
-720        obs_list = []
-721        for obj in gc.get_objects():
-722            if isinstance(obj, Fit_result):
-723                obs_list.append(obj)
-724    else:
-725        obs_list = objects
-726
-727    p_values = [o.p_value for o in obs_list]
-728
-729    bins = len(p_values)
-730    x = np.arange(0, 1.001, 0.001)
-731    plt.plot(x, x, 'k', zorder=1)
-732    plt.xlim(0, 1)
-733    plt.ylim(0, 1)
-734    plt.xlabel('p-value')
-735    plt.ylabel('Cumulative probability')
-736    plt.title(str(bins) + ' p-values')
-737
-738    n = np.arange(1, bins + 1) / np.float64(bins)
-739    Xs = np.sort(p_values)
-740    plt.step(Xs, n)
-741    diffs = n - Xs
-742    loc_max_diff = np.argmax(np.abs(diffs))
-743    loc = Xs[loc_max_diff]
-744    plt.annotate('', xy=(loc, loc), xytext=(loc, loc + diffs[loc_max_diff]), arrowprops=dict(arrowstyle='<->', shrinkA=0, shrinkB=0))
-745    plt.draw()
-746
-747    print(scipy.stats.kstest(p_values, 'uniform'))
+719    p_values = [o.p_value for o in obs_list]
+720
+721    bins = len(p_values)
+722    x = np.arange(0, 1.001, 0.001)
+723    plt.plot(x, x, 'k', zorder=1)
+724    plt.xlim(0, 1)
+725    plt.ylim(0, 1)
+726    plt.xlabel('p-value')
+727    plt.ylabel('Cumulative probability')
+728    plt.title(str(bins) + ' p-values')
+729
+730    n = np.arange(1, bins + 1) / np.float64(bins)
+731    Xs = np.sort(p_values)
+732    plt.step(Xs, n)
+733    diffs = n - Xs
+734    loc_max_diff = np.argmax(np.abs(diffs))
+735    loc = Xs[loc_max_diff]
+736    plt.annotate('', xy=(loc, loc), xytext=(loc, loc + diffs[loc_max_diff]), arrowprops=dict(arrowstyle='<->', shrinkA=0, shrinkB=0))
+737    plt.draw()
+738
+739    print(scipy.stats.kstest(p_values, 'uniform'))