From fb8f9e95906e2a18a9c2e00c2469ab021d0456bc Mon Sep 17 00:00:00 2001 From: fjosw Date: Wed, 25 May 2022 14:22:30 +0000 Subject: [PATCH] Documentation updated --- docs/pyerrors/fits.html | 1190 ++++++++++++++++++++------------------- 1 file changed, 610 insertions(+), 580 deletions(-) diff --git a/docs/pyerrors/fits.html b/docs/pyerrors/fits.html index a7a5946d..68f884fc 100644 --- a/docs/pyerrors/fits.html +++ b/docs/pyerrors/fits.html @@ -367,461 +367,481 @@ 264 265 fitp = out.beta 266 try: -267 hess_inv = np.linalg.pinv(jacobian(jacobian(odr_chisquare))(np.concatenate((fitp, out.xplus.ravel())))) +267 hess = jacobian(jacobian(odr_chisquare))(np.concatenate((fitp, out.xplus.ravel()))) 268 except TypeError: 269 raise Exception("It is required to use autograd.numpy instead of numpy within fit functions, see the documentation for details.") from None -270 -271 def odr_chisquare_compact_x(d): -272 model = func(d[:n_parms], d[n_parms:n_parms + m].reshape(x_shape)) -273 chisq = anp.sum(((y_f - model) / dy_f) ** 2) + anp.sum(((d[n_parms + m:].reshape(x_shape) - d[n_parms:n_parms + m].reshape(x_shape)) / dx_f) ** 2) -274 return chisq -275 -276 jac_jac_x = jacobian(jacobian(odr_chisquare_compact_x))(np.concatenate((fitp, out.xplus.ravel(), x_f.ravel()))) -277 -278 deriv_x = -hess_inv @ jac_jac_x[:n_parms + m, n_parms + m:] -279 -280 def odr_chisquare_compact_y(d): -281 model = func(d[:n_parms], d[n_parms:n_parms + m].reshape(x_shape)) -282 chisq = anp.sum(((d[n_parms + m:] - model) / dy_f) ** 2) + anp.sum(((x_f - d[n_parms:n_parms + m].reshape(x_shape)) / dx_f) ** 2) -283 return chisq -284 -285 jac_jac_y = jacobian(jacobian(odr_chisquare_compact_y))(np.concatenate((fitp, out.xplus.ravel(), y_f))) -286 -287 deriv_y = -hess_inv @ jac_jac_y[:n_parms + m, n_parms + m:] -288 -289 result = [] -290 for i in range(n_parms): -291 result.append(derived_observable(lambda my_var, **kwargs: (my_var[0] + np.finfo(np.float64).eps) / (x.ravel()[0].value + np.finfo(np.float64).eps) * out.beta[i], list(x.ravel()) + list(y), man_grad=list(deriv_x[i]) + list(deriv_y[i]))) -292 -293 output.fit_parameters = result +270 condn = np.linalg.cond(hess) +271 if condn > 1e8: +272 warnings.warn("Hessian matrix might be ill-conditioned ({0:1.2e}), error propagation might be unreliable.\n \ +273 Maybe try rescaling the problem such that all parameters are of O(1).".format(condn), RuntimeWarning) +274 try: +275 hess_inv = np.linalg.inv(hess) +276 except np.linalg.LinAlgError: +277 raise Exception("Cannot invert hessian matrix.") +278 except Exception: +279 raise Exception("Unkown error in connection with Hessian inverse.") +280 +281 def odr_chisquare_compact_x(d): +282 model = func(d[:n_parms], d[n_parms:n_parms + m].reshape(x_shape)) +283 chisq = anp.sum(((y_f - model) / dy_f) ** 2) + anp.sum(((d[n_parms + m:].reshape(x_shape) - d[n_parms:n_parms + m].reshape(x_shape)) / dx_f) ** 2) +284 return chisq +285 +286 jac_jac_x = jacobian(jacobian(odr_chisquare_compact_x))(np.concatenate((fitp, out.xplus.ravel(), x_f.ravel()))) +287 +288 deriv_x = -hess_inv @ jac_jac_x[:n_parms + m, n_parms + m:] +289 +290 def odr_chisquare_compact_y(d): +291 model = func(d[:n_parms], d[n_parms:n_parms + m].reshape(x_shape)) +292 chisq = anp.sum(((d[n_parms + m:] - model) / dy_f) ** 2) + anp.sum(((x_f - d[n_parms:n_parms + m].reshape(x_shape)) / dx_f) ** 2) +293 return chisq 294 -295 output.odr_chisquare = odr_chisquare(np.concatenate((out.beta, out.xplus.ravel()))) -296 output.dof = x.shape[-1] - n_parms -297 output.p_value = 1 - chi2.cdf(output.odr_chisquare, output.dof) +295 jac_jac_y = jacobian(jacobian(odr_chisquare_compact_y))(np.concatenate((fitp, out.xplus.ravel(), y_f))) +296 +297 deriv_y = -hess_inv @ jac_jac_y[:n_parms + m, n_parms + m:] 298 -299 return output -300 -301 -302def _prior_fit(x, y, func, priors, silent=False, **kwargs): -303 output = Fit_result() +299 result = [] +300 for i in range(n_parms): +301 result.append(derived_observable(lambda my_var, **kwargs: (my_var[0] + np.finfo(np.float64).eps) / (x.ravel()[0].value + np.finfo(np.float64).eps) * out.beta[i], list(x.ravel()) + list(y), man_grad=list(deriv_x[i]) + list(deriv_y[i]))) +302 +303 output.fit_parameters = result 304 -305 output.fit_function = func -306 -307 x = np.asarray(x) +305 output.odr_chisquare = odr_chisquare(np.concatenate((out.beta, out.xplus.ravel()))) +306 output.dof = x.shape[-1] - n_parms +307 output.p_value = 1 - chi2.cdf(output.odr_chisquare, output.dof) 308 -309 if not callable(func): -310 raise TypeError('func has to be a function.') +309 return output +310 311 -312 for i in range(100): -313 try: -314 func(np.arange(i), 0) -315 except Exception: -316 pass -317 else: -318 break -319 -320 n_parms = i +312def _prior_fit(x, y, func, priors, silent=False, **kwargs): +313 output = Fit_result() +314 +315 output.fit_function = func +316 +317 x = np.asarray(x) +318 +319 if not callable(func): +320 raise TypeError('func has to be a function.') 321 -322 if n_parms != len(priors): -323 raise Exception('Priors does not have the correct length.') -324 -325 def extract_val_and_dval(string): -326 split_string = string.split('(') -327 if '.' in split_string[0] and '.' not in split_string[1][:-1]: -328 factor = 10 ** -len(split_string[0].partition('.')[2]) -329 else: -330 factor = 1 -331 return float(split_string[0]), float(split_string[1][:-1]) * factor -332 -333 loc_priors = [] -334 for i_n, i_prior in enumerate(priors): -335 if isinstance(i_prior, Obs): -336 loc_priors.append(i_prior) -337 else: -338 loc_val, loc_dval = extract_val_and_dval(i_prior) -339 loc_priors.append(cov_Obs(loc_val, loc_dval ** 2, '#prior' + str(i_n) + f"_{np.random.randint(2147483647):010d}")) -340 -341 output.priors = loc_priors +322 for i in range(100): +323 try: +324 func(np.arange(i), 0) +325 except Exception: +326 pass +327 else: +328 break +329 +330 n_parms = i +331 +332 if n_parms != len(priors): +333 raise Exception('Priors does not have the correct length.') +334 +335 def extract_val_and_dval(string): +336 split_string = string.split('(') +337 if '.' in split_string[0] and '.' not in split_string[1][:-1]: +338 factor = 10 ** -len(split_string[0].partition('.')[2]) +339 else: +340 factor = 1 +341 return float(split_string[0]), float(split_string[1][:-1]) * factor 342 -343 if not silent: -344 print('Fit with', n_parms, 'parameter' + 's' * (n_parms > 1)) -345 -346 y_f = [o.value for o in y] -347 dy_f = [o.dvalue for o in y] -348 -349 if np.any(np.asarray(dy_f) <= 0.0): -350 raise Exception('No y errors available, run the gamma method first.') -351 -352 p_f = [o.value for o in loc_priors] -353 dp_f = [o.dvalue for o in loc_priors] -354 -355 if np.any(np.asarray(dp_f) <= 0.0): -356 raise Exception('No prior errors available, run the gamma method first.') -357 -358 if 'initial_guess' in kwargs: -359 x0 = kwargs.get('initial_guess') -360 if len(x0) != n_parms: -361 raise Exception('Initial guess does not have the correct length.') -362 else: -363 x0 = p_f +343 loc_priors = [] +344 for i_n, i_prior in enumerate(priors): +345 if isinstance(i_prior, Obs): +346 loc_priors.append(i_prior) +347 else: +348 loc_val, loc_dval = extract_val_and_dval(i_prior) +349 loc_priors.append(cov_Obs(loc_val, loc_dval ** 2, '#prior' + str(i_n) + f"_{np.random.randint(2147483647):010d}")) +350 +351 output.priors = loc_priors +352 +353 if not silent: +354 print('Fit with', n_parms, 'parameter' + 's' * (n_parms > 1)) +355 +356 y_f = [o.value for o in y] +357 dy_f = [o.dvalue for o in y] +358 +359 if np.any(np.asarray(dy_f) <= 0.0): +360 raise Exception('No y errors available, run the gamma method first.') +361 +362 p_f = [o.value for o in loc_priors] +363 dp_f = [o.dvalue for o in loc_priors] 364 -365 def chisqfunc(p): -366 model = func(p, x) -367 chisq = anp.sum(((y_f - model) / dy_f) ** 2) + anp.sum(((p_f - p) / dp_f) ** 2) -368 return chisq -369 -370 if not silent: -371 print('Method: migrad') -372 -373 m = iminuit.Minuit(chisqfunc, x0) -374 m.errordef = 1 -375 m.print_level = 0 -376 if 'tol' in kwargs: -377 m.tol = kwargs.get('tol') -378 else: -379 m.tol = 1e-4 -380 m.migrad() -381 params = np.asarray(m.values) +365 if np.any(np.asarray(dp_f) <= 0.0): +366 raise Exception('No prior errors available, run the gamma method first.') +367 +368 if 'initial_guess' in kwargs: +369 x0 = kwargs.get('initial_guess') +370 if len(x0) != n_parms: +371 raise Exception('Initial guess does not have the correct length.') +372 else: +373 x0 = p_f +374 +375 def chisqfunc(p): +376 model = func(p, x) +377 chisq = anp.sum(((y_f - model) / dy_f) ** 2) + anp.sum(((p_f - p) / dp_f) ** 2) +378 return chisq +379 +380 if not silent: +381 print('Method: migrad') 382 -383 output.chisquare_by_dof = m.fval / len(x) -384 -385 output.method = 'migrad' -386 -387 if not silent: -388 print('chisquare/d.o.f.:', output.chisquare_by_dof) -389 -390 if not m.fmin.is_valid: -391 raise Exception('The minimization procedure did not converge.') +383 m = iminuit.Minuit(chisqfunc, x0) +384 m.errordef = 1 +385 m.print_level = 0 +386 if 'tol' in kwargs: +387 m.tol = kwargs.get('tol') +388 else: +389 m.tol = 1e-4 +390 m.migrad() +391 params = np.asarray(m.values) 392 -393 hess_inv = np.linalg.pinv(jacobian(jacobian(chisqfunc))(params)) +393 output.chisquare_by_dof = m.fval / len(x) 394 -395 def chisqfunc_compact(d): -396 model = func(d[:n_parms], x) -397 chisq = anp.sum(((d[n_parms: n_parms + len(x)] - model) / dy_f) ** 2) + anp.sum(((d[n_parms + len(x):] - d[:n_parms]) / dp_f) ** 2) -398 return chisq +395 output.method = 'migrad' +396 +397 if not silent: +398 print('chisquare/d.o.f.:', output.chisquare_by_dof) 399 -400 jac_jac = jacobian(jacobian(chisqfunc_compact))(np.concatenate((params, y_f, p_f))) -401 -402 deriv = -hess_inv @ jac_jac[:n_parms, n_parms:] -403 -404 result = [] -405 for i in range(n_parms): -406 result.append(derived_observable(lambda x, **kwargs: (x[0] + np.finfo(np.float64).eps) / (y[0].value + np.finfo(np.float64).eps) * params[i], list(y) + list(loc_priors), man_grad=list(deriv[i]))) -407 -408 output.fit_parameters = result -409 output.chisquare = chisqfunc(np.asarray(params)) -410 -411 if kwargs.get('resplot') is True: -412 residual_plot(x, y, func, result) +400 if not m.fmin.is_valid: +401 raise Exception('The minimization procedure did not converge.') +402 +403 hess_inv = np.linalg.pinv(jacobian(jacobian(chisqfunc))(params)) +404 +405 def chisqfunc_compact(d): +406 model = func(d[:n_parms], x) +407 chisq = anp.sum(((d[n_parms: n_parms + len(x)] - model) / dy_f) ** 2) + anp.sum(((d[n_parms + len(x):] - d[:n_parms]) / dp_f) ** 2) +408 return chisq +409 +410 jac_jac = jacobian(jacobian(chisqfunc_compact))(np.concatenate((params, y_f, p_f))) +411 +412 deriv = -hess_inv @ jac_jac[:n_parms, n_parms:] 413 -414 if kwargs.get('qqplot') is True: -415 qqplot(x, y, func, result) -416 -417 return output -418 -419 -420def _standard_fit(x, y, func, silent=False, **kwargs): -421 -422 output = Fit_result() +414 result = [] +415 for i in range(n_parms): +416 result.append(derived_observable(lambda x, **kwargs: (x[0] + np.finfo(np.float64).eps) / (y[0].value + np.finfo(np.float64).eps) * params[i], list(y) + list(loc_priors), man_grad=list(deriv[i]))) +417 +418 output.fit_parameters = result +419 output.chisquare = chisqfunc(np.asarray(params)) +420 +421 if kwargs.get('resplot') is True: +422 residual_plot(x, y, func, result) 423 -424 output.fit_function = func -425 -426 x = np.asarray(x) -427 -428 if x.shape[-1] != len(y): -429 raise Exception('x and y input have to have the same length') -430 -431 if len(x.shape) > 2: -432 raise Exception('Unknown format for x values') +424 if kwargs.get('qqplot') is True: +425 qqplot(x, y, func, result) +426 +427 return output +428 +429 +430def _standard_fit(x, y, func, silent=False, **kwargs): +431 +432 output = Fit_result() 433 -434 if not callable(func): -435 raise TypeError('func has to be a function.') -436 -437 for i in range(25): -438 try: -439 func(np.arange(i), x.T[0]) -440 except Exception: -441 pass -442 else: -443 break -444 -445 n_parms = i +434 output.fit_function = func +435 +436 x = np.asarray(x) +437 +438 if x.shape[-1] != len(y): +439 raise Exception('x and y input have to have the same length') +440 +441 if len(x.shape) > 2: +442 raise Exception('Unknown format for x values') +443 +444 if not callable(func): +445 raise TypeError('func has to be a function.') 446 -447 if not silent: -448 print('Fit with', n_parms, 'parameter' + 's' * (n_parms > 1)) -449 -450 y_f = [o.value for o in y] -451 dy_f = [o.dvalue for o in y] -452 -453 if np.any(np.asarray(dy_f) <= 0.0): -454 raise Exception('No y errors available, run the gamma method first.') -455 -456 if 'initial_guess' in kwargs: -457 x0 = kwargs.get('initial_guess') -458 if len(x0) != n_parms: -459 raise Exception('Initial guess does not have the correct length: %d vs. %d' % (len(x0), n_parms)) -460 else: -461 x0 = [0.1] * n_parms +447 for i in range(25): +448 try: +449 func(np.arange(i), x.T[0]) +450 except Exception: +451 pass +452 else: +453 break +454 +455 n_parms = i +456 +457 if not silent: +458 print('Fit with', n_parms, 'parameter' + 's' * (n_parms > 1)) +459 +460 y_f = [o.value for o in y] +461 dy_f = [o.dvalue for o in y] 462 -463 if kwargs.get('correlated_fit') is True: -464 corr = covariance(y, correlation=True, **kwargs) -465 covdiag = np.diag(1 / np.asarray(dy_f)) -466 condn = np.linalg.cond(corr) -467 if condn > 0.1 / np.finfo(float).eps: -468 raise Exception(f"Cannot invert correlation matrix as its condition number exceeds machine precision ({condn:1.2e})") -469 if condn > 1 / np.sqrt(np.finfo(float).eps): -470 warnings.warn("Correlation matrix may be ill-conditioned, condition number: {%1.2e}" % (condn), RuntimeWarning) -471 chol = np.linalg.cholesky(corr) -472 chol_inv = np.linalg.inv(chol) -473 chol_inv = np.dot(chol_inv, covdiag) -474 -475 def chisqfunc(p): -476 model = func(p, x) -477 chisq = anp.sum(anp.dot(chol_inv, (y_f - model)) ** 2) -478 return chisq -479 else: -480 def chisqfunc(p): -481 model = func(p, x) -482 chisq = anp.sum(((y_f - model) / dy_f) ** 2) -483 return chisq +463 if np.any(np.asarray(dy_f) <= 0.0): +464 raise Exception('No y errors available, run the gamma method first.') +465 +466 if 'initial_guess' in kwargs: +467 x0 = kwargs.get('initial_guess') +468 if len(x0) != n_parms: +469 raise Exception('Initial guess does not have the correct length: %d vs. %d' % (len(x0), n_parms)) +470 else: +471 x0 = [0.1] * n_parms +472 +473 if kwargs.get('correlated_fit') is True: +474 corr = covariance(y, correlation=True, **kwargs) +475 covdiag = np.diag(1 / np.asarray(dy_f)) +476 condn = np.linalg.cond(corr) +477 if condn > 0.1 / np.finfo(float).eps: +478 raise Exception(f"Cannot invert correlation matrix as its condition number exceeds machine precision ({condn:1.2e})") +479 if condn > 1 / np.sqrt(np.finfo(float).eps): +480 warnings.warn("Correlation matrix may be ill-conditioned, condition number: {%1.2e}" % (condn), RuntimeWarning) +481 chol = np.linalg.cholesky(corr) +482 chol_inv = np.linalg.inv(chol) +483 chol_inv = np.dot(chol_inv, covdiag) 484 -485 output.method = kwargs.get('method', 'Levenberg-Marquardt') -486 if not silent: -487 print('Method:', output.method) -488 -489 if output.method != 'Levenberg-Marquardt': -490 if output.method == 'migrad': -491 fit_result = iminuit.minimize(chisqfunc, x0, tol=1e-4) # Stopping criterion 0.002 * tol * errordef -492 output.iterations = fit_result.nfev -493 else: -494 fit_result = scipy.optimize.minimize(chisqfunc, x0, method=kwargs.get('method'), tol=1e-12) -495 output.iterations = fit_result.nit -496 -497 chisquare = fit_result.fun +485 def chisqfunc(p): +486 model = func(p, x) +487 chisq = anp.sum(anp.dot(chol_inv, (y_f - model)) ** 2) +488 return chisq +489 else: +490 def chisqfunc(p): +491 model = func(p, x) +492 chisq = anp.sum(((y_f - model) / dy_f) ** 2) +493 return chisq +494 +495 output.method = kwargs.get('method', 'Levenberg-Marquardt') +496 if not silent: +497 print('Method:', output.method) 498 -499 else: -500 if kwargs.get('correlated_fit') is True: -501 def chisqfunc_residuals(p): -502 model = func(p, x) -503 chisq = anp.dot(chol_inv, (y_f - model)) -504 return chisq -505 -506 else: -507 def chisqfunc_residuals(p): -508 model = func(p, x) -509 chisq = ((y_f - model) / dy_f) -510 return chisq -511 -512 fit_result = scipy.optimize.least_squares(chisqfunc_residuals, x0, method='lm', ftol=1e-15, gtol=1e-15, xtol=1e-15) -513 -514 chisquare = np.sum(fit_result.fun ** 2) +499 if output.method != 'Levenberg-Marquardt': +500 if output.method == 'migrad': +501 fit_result = iminuit.minimize(chisqfunc, x0, tol=1e-4) # Stopping criterion 0.002 * tol * errordef +502 output.iterations = fit_result.nfev +503 else: +504 fit_result = scipy.optimize.minimize(chisqfunc, x0, method=kwargs.get('method'), tol=1e-12) +505 output.iterations = fit_result.nit +506 +507 chisquare = fit_result.fun +508 +509 else: +510 if kwargs.get('correlated_fit') is True: +511 def chisqfunc_residuals(p): +512 model = func(p, x) +513 chisq = anp.dot(chol_inv, (y_f - model)) +514 return chisq 515 -516 output.iterations = fit_result.nfev -517 -518 if not fit_result.success: -519 raise Exception('The minimization procedure did not converge.') -520 -521 if x.shape[-1] - n_parms > 0: -522 output.chisquare_by_dof = chisquare / (x.shape[-1] - n_parms) -523 else: -524 output.chisquare_by_dof = float('nan') +516 else: +517 def chisqfunc_residuals(p): +518 model = func(p, x) +519 chisq = ((y_f - model) / dy_f) +520 return chisq +521 +522 fit_result = scipy.optimize.least_squares(chisqfunc_residuals, x0, method='lm', ftol=1e-15, gtol=1e-15, xtol=1e-15) +523 +524 chisquare = np.sum(fit_result.fun ** 2) 525 -526 output.message = fit_result.message -527 if not silent: -528 print(fit_result.message) -529 print('chisquare/d.o.f.:', output.chisquare_by_dof) +526 output.iterations = fit_result.nfev +527 +528 if not fit_result.success: +529 raise Exception('The minimization procedure did not converge.') 530 -531 if kwargs.get('expected_chisquare') is True: -532 if kwargs.get('correlated_fit') is not True: -533 W = np.diag(1 / np.asarray(dy_f)) -534 cov = covariance(y) -535 A = W @ jacobian(func)(fit_result.x, x) -536 P_phi = A @ np.linalg.pinv(A.T @ A) @ A.T -537 expected_chisquare = np.trace((np.identity(x.shape[-1]) - P_phi) @ W @ cov @ W) -538 output.chisquare_by_expected_chisquare = chisquare / expected_chisquare -539 if not silent: -540 print('chisquare/expected_chisquare:', -541 output.chisquare_by_expected_chisquare) -542 -543 fitp = fit_result.x -544 try: -545 hess_inv = np.linalg.pinv(jacobian(jacobian(chisqfunc))(fitp)) -546 except TypeError: -547 raise Exception("It is required to use autograd.numpy instead of numpy within fit functions, see the documentation for details.") from None -548 -549 if kwargs.get('correlated_fit') is True: -550 def chisqfunc_compact(d): -551 model = func(d[:n_parms], x) -552 chisq = anp.sum(anp.dot(chol_inv, (d[n_parms:] - model)) ** 2) -553 return chisq -554 -555 else: -556 def chisqfunc_compact(d): -557 model = func(d[:n_parms], x) -558 chisq = anp.sum(((d[n_parms:] - model) / dy_f) ** 2) -559 return chisq -560 -561 jac_jac = jacobian(jacobian(chisqfunc_compact))(np.concatenate((fitp, y_f))) -562 -563 deriv = -hess_inv @ jac_jac[:n_parms, n_parms:] -564 -565 result = [] -566 for i in range(n_parms): -567 result.append(derived_observable(lambda x, **kwargs: (x[0] + np.finfo(np.float64).eps) / (y[0].value + np.finfo(np.float64).eps) * fit_result.x[i], list(y), man_grad=list(deriv[i]))) +531 if x.shape[-1] - n_parms > 0: +532 output.chisquare_by_dof = chisquare / (x.shape[-1] - n_parms) +533 else: +534 output.chisquare_by_dof = float('nan') +535 +536 output.message = fit_result.message +537 if not silent: +538 print(fit_result.message) +539 print('chisquare/d.o.f.:', output.chisquare_by_dof) +540 +541 if kwargs.get('expected_chisquare') is True: +542 if kwargs.get('correlated_fit') is not True: +543 W = np.diag(1 / np.asarray(dy_f)) +544 cov = covariance(y) +545 A = W @ jacobian(func)(fit_result.x, x) +546 P_phi = A @ np.linalg.pinv(A.T @ A) @ A.T +547 expected_chisquare = np.trace((np.identity(x.shape[-1]) - P_phi) @ W @ cov @ W) +548 output.chisquare_by_expected_chisquare = chisquare / expected_chisquare +549 if not silent: +550 print('chisquare/expected_chisquare:', +551 output.chisquare_by_expected_chisquare) +552 +553 fitp = fit_result.x +554 try: +555 hess = jacobian(jacobian(chisqfunc))(fitp) +556 except TypeError: +557 raise Exception("It is required to use autograd.numpy instead of numpy within fit functions, see the documentation for details.") from None +558 condn = np.linalg.cond(hess) +559 if condn > 1e8: +560 warnings.warn("Hessian matrix might be ill-conditioned ({0:1.2e}), error propagation might be unreliable.\n \ +561 Maybe try rescaling the problem such that all parameters are of O(1).".format(condn), RuntimeWarning) +562 try: +563 hess_inv = np.linalg.inv(hess) +564 except np.linalg.LinAlgError: +565 raise Exception("Cannot invert hessian matrix.") +566 except Exception: +567 raise Exception("Unkown error in connection with Hessian inverse.") 568 -569 output.fit_parameters = result -570 -571 output.chisquare = chisqfunc(fit_result.x) -572 output.dof = x.shape[-1] - n_parms -573 output.p_value = 1 - chi2.cdf(output.chisquare, output.dof) +569 if kwargs.get('correlated_fit') is True: +570 def chisqfunc_compact(d): +571 model = func(d[:n_parms], x) +572 chisq = anp.sum(anp.dot(chol_inv, (d[n_parms:] - model)) ** 2) +573 return chisq 574 -575 if kwargs.get('resplot') is True: -576 residual_plot(x, y, func, result) -577 -578 if kwargs.get('qqplot') is True: -579 qqplot(x, y, func, result) +575 else: +576 def chisqfunc_compact(d): +577 model = func(d[:n_parms], x) +578 chisq = anp.sum(((d[n_parms:] - model) / dy_f) ** 2) +579 return chisq 580 -581 return output +581 jac_jac = jacobian(jacobian(chisqfunc_compact))(np.concatenate((fitp, y_f))) 582 -583 -584def fit_lin(x, y, **kwargs): -585 """Performs a linear fit to y = n + m * x and returns two Obs n, m. -586 -587 Parameters -588 ---------- -589 x : list -590 Can either be a list of floats in which case no xerror is assumed, or -591 a list of Obs, where the dvalues of the Obs are used as xerror for the fit. -592 y : list -593 List of Obs, the dvalues of the Obs are used as yerror for the fit. -594 """ -595 -596 def f(a, x): -597 y = a[0] + a[1] * x -598 return y -599 -600 if all(isinstance(n, Obs) for n in x): -601 out = total_least_squares(x, y, f, **kwargs) -602 return out.fit_parameters -603 elif all(isinstance(n, float) or isinstance(n, int) for n in x) or isinstance(x, np.ndarray): -604 out = least_squares(x, y, f, **kwargs) -605 return out.fit_parameters -606 else: -607 raise Exception('Unsupported types for x') -608 -609 -610def qqplot(x, o_y, func, p): -611 """Generates a quantile-quantile plot of the fit result which can be used to -612 check if the residuals of the fit are gaussian distributed. -613 """ -614 -615 residuals = [] -616 for i_x, i_y in zip(x, o_y): -617 residuals.append((i_y - func(p, i_x)) / i_y.dvalue) -618 residuals = sorted(residuals) -619 my_y = [o.value for o in residuals] -620 probplot = scipy.stats.probplot(my_y) -621 my_x = probplot[0][0] -622 plt.figure(figsize=(8, 8 / 1.618)) -623 plt.errorbar(my_x, my_y, fmt='o') -624 fit_start = my_x[0] -625 fit_stop = my_x[-1] -626 samples = np.arange(fit_start, fit_stop, 0.01) -627 plt.plot(samples, samples, 'k--', zorder=11, label='Standard normal distribution') -628 plt.plot(samples, probplot[1][0] * samples + probplot[1][1], zorder=10, label='Least squares fit, r=' + str(np.around(probplot[1][2], 3)), marker='', ls='-') +583 deriv = -hess_inv @ jac_jac[:n_parms, n_parms:] +584 +585 result = [] +586 for i in range(n_parms): +587 result.append(derived_observable(lambda x, **kwargs: (x[0] + np.finfo(np.float64).eps) / (y[0].value + np.finfo(np.float64).eps) * fit_result.x[i], list(y), man_grad=list(deriv[i]))) +588 +589 output.fit_parameters = result +590 +591 output.chisquare = chisqfunc(fit_result.x) +592 output.dof = x.shape[-1] - n_parms +593 output.p_value = 1 - chi2.cdf(output.chisquare, output.dof) +594 +595 if kwargs.get('resplot') is True: +596 residual_plot(x, y, func, result) +597 +598 if kwargs.get('qqplot') is True: +599 qqplot(x, y, func, result) +600 +601 return output +602 +603 +604def fit_lin(x, y, **kwargs): +605 """Performs a linear fit to y = n + m * x and returns two Obs n, m. +606 +607 Parameters +608 ---------- +609 x : list +610 Can either be a list of floats in which case no xerror is assumed, or +611 a list of Obs, where the dvalues of the Obs are used as xerror for the fit. +612 y : list +613 List of Obs, the dvalues of the Obs are used as yerror for the fit. +614 """ +615 +616 def f(a, x): +617 y = a[0] + a[1] * x +618 return y +619 +620 if all(isinstance(n, Obs) for n in x): +621 out = total_least_squares(x, y, f, **kwargs) +622 return out.fit_parameters +623 elif all(isinstance(n, float) or isinstance(n, int) for n in x) or isinstance(x, np.ndarray): +624 out = least_squares(x, y, f, **kwargs) +625 return out.fit_parameters +626 else: +627 raise Exception('Unsupported types for x') +628 629 -630 plt.xlabel('Theoretical quantiles') -631 plt.ylabel('Ordered Values') -632 plt.legend() -633 plt.draw() +630def qqplot(x, o_y, func, p): +631 """Generates a quantile-quantile plot of the fit result which can be used to +632 check if the residuals of the fit are gaussian distributed. +633 """ 634 -635 -636def residual_plot(x, y, func, fit_res): -637 """ Generates a plot which compares the fit to the data and displays the corresponding residuals""" -638 sorted_x = sorted(x) -639 xstart = sorted_x[0] - 0.5 * (sorted_x[1] - sorted_x[0]) -640 xstop = sorted_x[-1] + 0.5 * (sorted_x[-1] - sorted_x[-2]) -641 x_samples = np.arange(xstart, xstop + 0.01, 0.01) -642 -643 plt.figure(figsize=(8, 8 / 1.618)) -644 gs = gridspec.GridSpec(2, 1, height_ratios=[3, 1], wspace=0.0, hspace=0.0) -645 ax0 = plt.subplot(gs[0]) -646 ax0.errorbar(x, [o.value for o in y], yerr=[o.dvalue for o in y], ls='none', fmt='o', capsize=3, markersize=5, label='Data') -647 ax0.plot(x_samples, func([o.value for o in fit_res], x_samples), label='Fit', zorder=10, ls='-', ms=0) -648 ax0.set_xticklabels([]) -649 ax0.set_xlim([xstart, xstop]) -650 ax0.set_xticklabels([]) -651 ax0.legend() -652 -653 residuals = (np.asarray([o.value for o in y]) - func([o.value for o in fit_res], x)) / np.asarray([o.dvalue for o in y]) -654 ax1 = plt.subplot(gs[1]) -655 ax1.plot(x, residuals, 'ko', ls='none', markersize=5) -656 ax1.tick_params(direction='out') -657 ax1.tick_params(axis="x", bottom=True, top=True, labelbottom=True) -658 ax1.axhline(y=0.0, ls='--', color='k', marker=" ") -659 ax1.fill_between(x_samples, -1.0, 1.0, alpha=0.1, facecolor='k') -660 ax1.set_xlim([xstart, xstop]) -661 ax1.set_ylabel('Residuals') -662 plt.subplots_adjust(wspace=None, hspace=None) -663 plt.draw() -664 -665 -666def error_band(x, func, beta): -667 """Returns the error band for an array of sample values x, for given fit function func with optimized parameters beta.""" -668 cov = covariance(beta) -669 if np.any(np.abs(cov - cov.T) > 1000 * np.finfo(np.float64).eps): -670 warnings.warn("Covariance matrix is not symmetric within floating point precision", RuntimeWarning) -671 -672 deriv = [] -673 for i, item in enumerate(x): -674 deriv.append(np.array(egrad(func)([o.value for o in beta], item))) -675 -676 err = [] -677 for i, item in enumerate(x): -678 err.append(np.sqrt(deriv[i] @ cov @ deriv[i])) -679 err = np.array(err) -680 -681 return err -682 -683 -684def ks_test(objects=None): -685 """Performs a Kolmogorov–Smirnov test for the p-values of all fit object. -686 -687 Parameters -688 ---------- -689 objects : list -690 List of fit results to include in the analysis (optional). -691 """ -692 -693 if objects is None: -694 obs_list = [] -695 for obj in gc.get_objects(): -696 if isinstance(obj, Fit_result): -697 obs_list.append(obj) -698 else: -699 obs_list = objects +635 residuals = [] +636 for i_x, i_y in zip(x, o_y): +637 residuals.append((i_y - func(p, i_x)) / i_y.dvalue) +638 residuals = sorted(residuals) +639 my_y = [o.value for o in residuals] +640 probplot = scipy.stats.probplot(my_y) +641 my_x = probplot[0][0] +642 plt.figure(figsize=(8, 8 / 1.618)) +643 plt.errorbar(my_x, my_y, fmt='o') +644 fit_start = my_x[0] +645 fit_stop = my_x[-1] +646 samples = np.arange(fit_start, fit_stop, 0.01) +647 plt.plot(samples, samples, 'k--', zorder=11, label='Standard normal distribution') +648 plt.plot(samples, probplot[1][0] * samples + probplot[1][1], zorder=10, label='Least squares fit, r=' + str(np.around(probplot[1][2], 3)), marker='', ls='-') +649 +650 plt.xlabel('Theoretical quantiles') +651 plt.ylabel('Ordered Values') +652 plt.legend() +653 plt.draw() +654 +655 +656def residual_plot(x, y, func, fit_res): +657 """ Generates a plot which compares the fit to the data and displays the corresponding residuals""" +658 sorted_x = sorted(x) +659 xstart = sorted_x[0] - 0.5 * (sorted_x[1] - sorted_x[0]) +660 xstop = sorted_x[-1] + 0.5 * (sorted_x[-1] - sorted_x[-2]) +661 x_samples = np.arange(xstart, xstop + 0.01, 0.01) +662 +663 plt.figure(figsize=(8, 8 / 1.618)) +664 gs = gridspec.GridSpec(2, 1, height_ratios=[3, 1], wspace=0.0, hspace=0.0) +665 ax0 = plt.subplot(gs[0]) +666 ax0.errorbar(x, [o.value for o in y], yerr=[o.dvalue for o in y], ls='none', fmt='o', capsize=3, markersize=5, label='Data') +667 ax0.plot(x_samples, func([o.value for o in fit_res], x_samples), label='Fit', zorder=10, ls='-', ms=0) +668 ax0.set_xticklabels([]) +669 ax0.set_xlim([xstart, xstop]) +670 ax0.set_xticklabels([]) +671 ax0.legend() +672 +673 residuals = (np.asarray([o.value for o in y]) - func([o.value for o in fit_res], x)) / np.asarray([o.dvalue for o in y]) +674 ax1 = plt.subplot(gs[1]) +675 ax1.plot(x, residuals, 'ko', ls='none', markersize=5) +676 ax1.tick_params(direction='out') +677 ax1.tick_params(axis="x", bottom=True, top=True, labelbottom=True) +678 ax1.axhline(y=0.0, ls='--', color='k', marker=" ") +679 ax1.fill_between(x_samples, -1.0, 1.0, alpha=0.1, facecolor='k') +680 ax1.set_xlim([xstart, xstop]) +681 ax1.set_ylabel('Residuals') +682 plt.subplots_adjust(wspace=None, hspace=None) +683 plt.draw() +684 +685 +686def error_band(x, func, beta): +687 """Returns the error band for an array of sample values x, for given fit function func with optimized parameters beta.""" +688 cov = covariance(beta) +689 if np.any(np.abs(cov - cov.T) > 1000 * np.finfo(np.float64).eps): +690 warnings.warn("Covariance matrix is not symmetric within floating point precision", RuntimeWarning) +691 +692 deriv = [] +693 for i, item in enumerate(x): +694 deriv.append(np.array(egrad(func)([o.value for o in beta], item))) +695 +696 err = [] +697 for i, item in enumerate(x): +698 err.append(np.sqrt(deriv[i] @ cov @ deriv[i])) +699 err = np.array(err) 700 -701 p_values = [o.p_value for o in obs_list] +701 return err 702 -703 bins = len(p_values) -704 x = np.arange(0, 1.001, 0.001) -705 plt.plot(x, x, 'k', zorder=1) -706 plt.xlim(0, 1) -707 plt.ylim(0, 1) -708 plt.xlabel('p-value') -709 plt.ylabel('Cumulative probability') -710 plt.title(str(bins) + ' p-values') -711 -712 n = np.arange(1, bins + 1) / np.float64(bins) -713 Xs = np.sort(p_values) -714 plt.step(Xs, n) -715 diffs = n - Xs -716 loc_max_diff = np.argmax(np.abs(diffs)) -717 loc = Xs[loc_max_diff] -718 plt.annotate('', xy=(loc, loc), xytext=(loc, loc + diffs[loc_max_diff]), arrowprops=dict(arrowstyle='<->', shrinkA=0, shrinkB=0)) -719 plt.draw() +703 +704def ks_test(objects=None): +705 """Performs a Kolmogorov–Smirnov test for the p-values of all fit object. +706 +707 Parameters +708 ---------- +709 objects : list +710 List of fit results to include in the analysis (optional). +711 """ +712 +713 if objects is None: +714 obs_list = [] +715 for obj in gc.get_objects(): +716 if isinstance(obj, Fit_result): +717 obs_list.append(obj) +718 else: +719 obs_list = objects 720 -721 print(scipy.stats.kstest(p_values, 'uniform')) +721 p_values = [o.p_value for o in obs_list] +722 +723 bins = len(p_values) +724 x = np.arange(0, 1.001, 0.001) +725 plt.plot(x, x, 'k', zorder=1) +726 plt.xlim(0, 1) +727 plt.ylim(0, 1) +728 plt.xlabel('p-value') +729 plt.ylabel('Cumulative probability') +730 plt.title(str(bins) + ' p-values') +731 +732 n = np.arange(1, bins + 1) / np.float64(bins) +733 Xs = np.sort(p_values) +734 plt.step(Xs, n) +735 diffs = n - Xs +736 loc_max_diff = np.argmax(np.abs(diffs)) +737 loc = Xs[loc_max_diff] +738 plt.annotate('', xy=(loc, loc), xytext=(loc, loc + diffs[loc_max_diff]), arrowprops=dict(arrowstyle='<->', shrinkA=0, shrinkB=0)) +739 plt.draw() +740 +741 print(scipy.stats.kstest(p_values, 'uniform')) @@ -1232,39 +1252,49 @@ If True, a quantile-quantile plot of the fit result is generated (default False) 265 266 fitp = out.beta 267 try: -268 hess_inv = np.linalg.pinv(jacobian(jacobian(odr_chisquare))(np.concatenate((fitp, out.xplus.ravel())))) +268 hess = jacobian(jacobian(odr_chisquare))(np.concatenate((fitp, out.xplus.ravel()))) 269 except TypeError: 270 raise Exception("It is required to use autograd.numpy instead of numpy within fit functions, see the documentation for details.") from None -271 -272 def odr_chisquare_compact_x(d): -273 model = func(d[:n_parms], d[n_parms:n_parms + m].reshape(x_shape)) -274 chisq = anp.sum(((y_f - model) / dy_f) ** 2) + anp.sum(((d[n_parms + m:].reshape(x_shape) - d[n_parms:n_parms + m].reshape(x_shape)) / dx_f) ** 2) -275 return chisq -276 -277 jac_jac_x = jacobian(jacobian(odr_chisquare_compact_x))(np.concatenate((fitp, out.xplus.ravel(), x_f.ravel()))) -278 -279 deriv_x = -hess_inv @ jac_jac_x[:n_parms + m, n_parms + m:] -280 -281 def odr_chisquare_compact_y(d): -282 model = func(d[:n_parms], d[n_parms:n_parms + m].reshape(x_shape)) -283 chisq = anp.sum(((d[n_parms + m:] - model) / dy_f) ** 2) + anp.sum(((x_f - d[n_parms:n_parms + m].reshape(x_shape)) / dx_f) ** 2) -284 return chisq -285 -286 jac_jac_y = jacobian(jacobian(odr_chisquare_compact_y))(np.concatenate((fitp, out.xplus.ravel(), y_f))) -287 -288 deriv_y = -hess_inv @ jac_jac_y[:n_parms + m, n_parms + m:] -289 -290 result = [] -291 for i in range(n_parms): -292 result.append(derived_observable(lambda my_var, **kwargs: (my_var[0] + np.finfo(np.float64).eps) / (x.ravel()[0].value + np.finfo(np.float64).eps) * out.beta[i], list(x.ravel()) + list(y), man_grad=list(deriv_x[i]) + list(deriv_y[i]))) -293 -294 output.fit_parameters = result +271 condn = np.linalg.cond(hess) +272 if condn > 1e8: +273 warnings.warn("Hessian matrix might be ill-conditioned ({0:1.2e}), error propagation might be unreliable.\n \ +274 Maybe try rescaling the problem such that all parameters are of O(1).".format(condn), RuntimeWarning) +275 try: +276 hess_inv = np.linalg.inv(hess) +277 except np.linalg.LinAlgError: +278 raise Exception("Cannot invert hessian matrix.") +279 except Exception: +280 raise Exception("Unkown error in connection with Hessian inverse.") +281 +282 def odr_chisquare_compact_x(d): +283 model = func(d[:n_parms], d[n_parms:n_parms + m].reshape(x_shape)) +284 chisq = anp.sum(((y_f - model) / dy_f) ** 2) + anp.sum(((d[n_parms + m:].reshape(x_shape) - d[n_parms:n_parms + m].reshape(x_shape)) / dx_f) ** 2) +285 return chisq +286 +287 jac_jac_x = jacobian(jacobian(odr_chisquare_compact_x))(np.concatenate((fitp, out.xplus.ravel(), x_f.ravel()))) +288 +289 deriv_x = -hess_inv @ jac_jac_x[:n_parms + m, n_parms + m:] +290 +291 def odr_chisquare_compact_y(d): +292 model = func(d[:n_parms], d[n_parms:n_parms + m].reshape(x_shape)) +293 chisq = anp.sum(((d[n_parms + m:] - model) / dy_f) ** 2) + anp.sum(((x_f - d[n_parms:n_parms + m].reshape(x_shape)) / dx_f) ** 2) +294 return chisq 295 -296 output.odr_chisquare = odr_chisquare(np.concatenate((out.beta, out.xplus.ravel()))) -297 output.dof = x.shape[-1] - n_parms -298 output.p_value = 1 - chi2.cdf(output.odr_chisquare, output.dof) +296 jac_jac_y = jacobian(jacobian(odr_chisquare_compact_y))(np.concatenate((fitp, out.xplus.ravel(), y_f))) +297 +298 deriv_y = -hess_inv @ jac_jac_y[:n_parms + m, n_parms + m:] 299 -300 return output +300 result = [] +301 for i in range(n_parms): +302 result.append(derived_observable(lambda my_var, **kwargs: (my_var[0] + np.finfo(np.float64).eps) / (x.ravel()[0].value + np.finfo(np.float64).eps) * out.beta[i], list(x.ravel()) + list(y), man_grad=list(deriv_x[i]) + list(deriv_y[i]))) +303 +304 output.fit_parameters = result +305 +306 output.odr_chisquare = odr_chisquare(np.concatenate((out.beta, out.xplus.ravel()))) +307 output.dof = x.shape[-1] - n_parms +308 output.p_value = 1 - chi2.cdf(output.odr_chisquare, output.dof) +309 +310 return output @@ -1325,30 +1355,30 @@ has to be calculated (default False). -
585def fit_lin(x, y, **kwargs):
-586    """Performs a linear fit to y = n + m * x and returns two Obs n, m.
-587
-588    Parameters
-589    ----------
-590    x : list
-591        Can either be a list of floats in which case no xerror is assumed, or
-592        a list of Obs, where the dvalues of the Obs are used as xerror for the fit.
-593    y : list
-594        List of Obs, the dvalues of the Obs are used as yerror for the fit.
-595    """
-596
-597    def f(a, x):
-598        y = a[0] + a[1] * x
-599        return y
-600
-601    if all(isinstance(n, Obs) for n in x):
-602        out = total_least_squares(x, y, f, **kwargs)
-603        return out.fit_parameters
-604    elif all(isinstance(n, float) or isinstance(n, int) for n in x) or isinstance(x, np.ndarray):
-605        out = least_squares(x, y, f, **kwargs)
-606        return out.fit_parameters
-607    else:
-608        raise Exception('Unsupported types for x')
+            
605def fit_lin(x, y, **kwargs):
+606    """Performs a linear fit to y = n + m * x and returns two Obs n, m.
+607
+608    Parameters
+609    ----------
+610    x : list
+611        Can either be a list of floats in which case no xerror is assumed, or
+612        a list of Obs, where the dvalues of the Obs are used as xerror for the fit.
+613    y : list
+614        List of Obs, the dvalues of the Obs are used as yerror for the fit.
+615    """
+616
+617    def f(a, x):
+618        y = a[0] + a[1] * x
+619        return y
+620
+621    if all(isinstance(n, Obs) for n in x):
+622        out = total_least_squares(x, y, f, **kwargs)
+623        return out.fit_parameters
+624    elif all(isinstance(n, float) or isinstance(n, int) for n in x) or isinstance(x, np.ndarray):
+625        out = least_squares(x, y, f, **kwargs)
+626        return out.fit_parameters
+627    else:
+628        raise Exception('Unsupported types for x')
 
@@ -1378,30 +1408,30 @@ List of Obs, the dvalues of the Obs are used as yerror for the fit.
-
611def qqplot(x, o_y, func, p):
-612    """Generates a quantile-quantile plot of the fit result which can be used to
-613       check if the residuals of the fit are gaussian distributed.
-614    """
-615
-616    residuals = []
-617    for i_x, i_y in zip(x, o_y):
-618        residuals.append((i_y - func(p, i_x)) / i_y.dvalue)
-619    residuals = sorted(residuals)
-620    my_y = [o.value for o in residuals]
-621    probplot = scipy.stats.probplot(my_y)
-622    my_x = probplot[0][0]
-623    plt.figure(figsize=(8, 8 / 1.618))
-624    plt.errorbar(my_x, my_y, fmt='o')
-625    fit_start = my_x[0]
-626    fit_stop = my_x[-1]
-627    samples = np.arange(fit_start, fit_stop, 0.01)
-628    plt.plot(samples, samples, 'k--', zorder=11, label='Standard normal distribution')
-629    plt.plot(samples, probplot[1][0] * samples + probplot[1][1], zorder=10, label='Least squares fit, r=' + str(np.around(probplot[1][2], 3)), marker='', ls='-')
-630
-631    plt.xlabel('Theoretical quantiles')
-632    plt.ylabel('Ordered Values')
-633    plt.legend()
-634    plt.draw()
+            
631def qqplot(x, o_y, func, p):
+632    """Generates a quantile-quantile plot of the fit result which can be used to
+633       check if the residuals of the fit are gaussian distributed.
+634    """
+635
+636    residuals = []
+637    for i_x, i_y in zip(x, o_y):
+638        residuals.append((i_y - func(p, i_x)) / i_y.dvalue)
+639    residuals = sorted(residuals)
+640    my_y = [o.value for o in residuals]
+641    probplot = scipy.stats.probplot(my_y)
+642    my_x = probplot[0][0]
+643    plt.figure(figsize=(8, 8 / 1.618))
+644    plt.errorbar(my_x, my_y, fmt='o')
+645    fit_start = my_x[0]
+646    fit_stop = my_x[-1]
+647    samples = np.arange(fit_start, fit_stop, 0.01)
+648    plt.plot(samples, samples, 'k--', zorder=11, label='Standard normal distribution')
+649    plt.plot(samples, probplot[1][0] * samples + probplot[1][1], zorder=10, label='Least squares fit, r=' + str(np.around(probplot[1][2], 3)), marker='', ls='-')
+650
+651    plt.xlabel('Theoretical quantiles')
+652    plt.ylabel('Ordered Values')
+653    plt.legend()
+654    plt.draw()
 
@@ -1422,34 +1452,34 @@ check if the residuals of the fit are gaussian distributed.

-
637def residual_plot(x, y, func, fit_res):
-638    """ Generates a plot which compares the fit to the data and displays the corresponding residuals"""
-639    sorted_x = sorted(x)
-640    xstart = sorted_x[0] - 0.5 * (sorted_x[1] - sorted_x[0])
-641    xstop = sorted_x[-1] + 0.5 * (sorted_x[-1] - sorted_x[-2])
-642    x_samples = np.arange(xstart, xstop + 0.01, 0.01)
-643
-644    plt.figure(figsize=(8, 8 / 1.618))
-645    gs = gridspec.GridSpec(2, 1, height_ratios=[3, 1], wspace=0.0, hspace=0.0)
-646    ax0 = plt.subplot(gs[0])
-647    ax0.errorbar(x, [o.value for o in y], yerr=[o.dvalue for o in y], ls='none', fmt='o', capsize=3, markersize=5, label='Data')
-648    ax0.plot(x_samples, func([o.value for o in fit_res], x_samples), label='Fit', zorder=10, ls='-', ms=0)
-649    ax0.set_xticklabels([])
-650    ax0.set_xlim([xstart, xstop])
-651    ax0.set_xticklabels([])
-652    ax0.legend()
-653
-654    residuals = (np.asarray([o.value for o in y]) - func([o.value for o in fit_res], x)) / np.asarray([o.dvalue for o in y])
-655    ax1 = plt.subplot(gs[1])
-656    ax1.plot(x, residuals, 'ko', ls='none', markersize=5)
-657    ax1.tick_params(direction='out')
-658    ax1.tick_params(axis="x", bottom=True, top=True, labelbottom=True)
-659    ax1.axhline(y=0.0, ls='--', color='k', marker=" ")
-660    ax1.fill_between(x_samples, -1.0, 1.0, alpha=0.1, facecolor='k')
-661    ax1.set_xlim([xstart, xstop])
-662    ax1.set_ylabel('Residuals')
-663    plt.subplots_adjust(wspace=None, hspace=None)
-664    plt.draw()
+            
657def residual_plot(x, y, func, fit_res):
+658    """ Generates a plot which compares the fit to the data and displays the corresponding residuals"""
+659    sorted_x = sorted(x)
+660    xstart = sorted_x[0] - 0.5 * (sorted_x[1] - sorted_x[0])
+661    xstop = sorted_x[-1] + 0.5 * (sorted_x[-1] - sorted_x[-2])
+662    x_samples = np.arange(xstart, xstop + 0.01, 0.01)
+663
+664    plt.figure(figsize=(8, 8 / 1.618))
+665    gs = gridspec.GridSpec(2, 1, height_ratios=[3, 1], wspace=0.0, hspace=0.0)
+666    ax0 = plt.subplot(gs[0])
+667    ax0.errorbar(x, [o.value for o in y], yerr=[o.dvalue for o in y], ls='none', fmt='o', capsize=3, markersize=5, label='Data')
+668    ax0.plot(x_samples, func([o.value for o in fit_res], x_samples), label='Fit', zorder=10, ls='-', ms=0)
+669    ax0.set_xticklabels([])
+670    ax0.set_xlim([xstart, xstop])
+671    ax0.set_xticklabels([])
+672    ax0.legend()
+673
+674    residuals = (np.asarray([o.value for o in y]) - func([o.value for o in fit_res], x)) / np.asarray([o.dvalue for o in y])
+675    ax1 = plt.subplot(gs[1])
+676    ax1.plot(x, residuals, 'ko', ls='none', markersize=5)
+677    ax1.tick_params(direction='out')
+678    ax1.tick_params(axis="x", bottom=True, top=True, labelbottom=True)
+679    ax1.axhline(y=0.0, ls='--', color='k', marker=" ")
+680    ax1.fill_between(x_samples, -1.0, 1.0, alpha=0.1, facecolor='k')
+681    ax1.set_xlim([xstart, xstop])
+682    ax1.set_ylabel('Residuals')
+683    plt.subplots_adjust(wspace=None, hspace=None)
+684    plt.draw()
 
@@ -1469,22 +1499,22 @@ check if the residuals of the fit are gaussian distributed.

-
667def error_band(x, func, beta):
-668    """Returns the error band for an array of sample values x, for given fit function func with optimized parameters beta."""
-669    cov = covariance(beta)
-670    if np.any(np.abs(cov - cov.T) > 1000 * np.finfo(np.float64).eps):
-671        warnings.warn("Covariance matrix is not symmetric within floating point precision", RuntimeWarning)
-672
-673    deriv = []
-674    for i, item in enumerate(x):
-675        deriv.append(np.array(egrad(func)([o.value for o in beta], item)))
-676
-677    err = []
-678    for i, item in enumerate(x):
-679        err.append(np.sqrt(deriv[i] @ cov @ deriv[i]))
-680    err = np.array(err)
-681
-682    return err
+            
687def error_band(x, func, beta):
+688    """Returns the error band for an array of sample values x, for given fit function func with optimized parameters beta."""
+689    cov = covariance(beta)
+690    if np.any(np.abs(cov - cov.T) > 1000 * np.finfo(np.float64).eps):
+691        warnings.warn("Covariance matrix is not symmetric within floating point precision", RuntimeWarning)
+692
+693    deriv = []
+694    for i, item in enumerate(x):
+695        deriv.append(np.array(egrad(func)([o.value for o in beta], item)))
+696
+697    err = []
+698    for i, item in enumerate(x):
+699        err.append(np.sqrt(deriv[i] @ cov @ deriv[i]))
+700    err = np.array(err)
+701
+702    return err
 
@@ -1504,44 +1534,44 @@ check if the residuals of the fit are gaussian distributed.

-
685def ks_test(objects=None):
-686    """Performs a Kolmogorov–Smirnov test for the p-values of all fit object.
-687
-688    Parameters
-689    ----------
-690    objects : list
-691        List of fit results to include in the analysis (optional).
-692    """
-693
-694    if objects is None:
-695        obs_list = []
-696        for obj in gc.get_objects():
-697            if isinstance(obj, Fit_result):
-698                obs_list.append(obj)
-699    else:
-700        obs_list = objects
-701
-702    p_values = [o.p_value for o in obs_list]
-703
-704    bins = len(p_values)
-705    x = np.arange(0, 1.001, 0.001)
-706    plt.plot(x, x, 'k', zorder=1)
-707    plt.xlim(0, 1)
-708    plt.ylim(0, 1)
-709    plt.xlabel('p-value')
-710    plt.ylabel('Cumulative probability')
-711    plt.title(str(bins) + ' p-values')
-712
-713    n = np.arange(1, bins + 1) / np.float64(bins)
-714    Xs = np.sort(p_values)
-715    plt.step(Xs, n)
-716    diffs = n - Xs
-717    loc_max_diff = np.argmax(np.abs(diffs))
-718    loc = Xs[loc_max_diff]
-719    plt.annotate('', xy=(loc, loc), xytext=(loc, loc + diffs[loc_max_diff]), arrowprops=dict(arrowstyle='<->', shrinkA=0, shrinkB=0))
-720    plt.draw()
+            
705def ks_test(objects=None):
+706    """Performs a Kolmogorov–Smirnov test for the p-values of all fit object.
+707
+708    Parameters
+709    ----------
+710    objects : list
+711        List of fit results to include in the analysis (optional).
+712    """
+713
+714    if objects is None:
+715        obs_list = []
+716        for obj in gc.get_objects():
+717            if isinstance(obj, Fit_result):
+718                obs_list.append(obj)
+719    else:
+720        obs_list = objects
 721
-722    print(scipy.stats.kstest(p_values, 'uniform'))
+722    p_values = [o.p_value for o in obs_list]
+723
+724    bins = len(p_values)
+725    x = np.arange(0, 1.001, 0.001)
+726    plt.plot(x, x, 'k', zorder=1)
+727    plt.xlim(0, 1)
+728    plt.ylim(0, 1)
+729    plt.xlabel('p-value')
+730    plt.ylabel('Cumulative probability')
+731    plt.title(str(bins) + ' p-values')
+732
+733    n = np.arange(1, bins + 1) / np.float64(bins)
+734    Xs = np.sort(p_values)
+735    plt.step(Xs, n)
+736    diffs = n - Xs
+737    loc_max_diff = np.argmax(np.abs(diffs))
+738    loc = Xs[loc_max_diff]
+739    plt.annotate('', xy=(loc, loc), xytext=(loc, loc + diffs[loc_max_diff]), arrowprops=dict(arrowstyle='<->', shrinkA=0, shrinkB=0))
+740    plt.draw()
+741
+742    print(scipy.stats.kstest(p_values, 'uniform'))