From 3585775d303f2bc23a7dee659517980ab471fc82 Mon Sep 17 00:00:00 2001 From: fjosw Date: Wed, 19 Feb 2025 17:16:41 +0000 Subject: [PATCH] Documentation updated --- docs/pyerrors/fits.html | 1934 ++++++++++++++++++++------------------- 1 file changed, 969 insertions(+), 965 deletions(-) diff --git a/docs/pyerrors/fits.html b/docs/pyerrors/fits.html index b6c6d318..802779dc 100644 --- a/docs/pyerrors/fits.html +++ b/docs/pyerrors/fits.html @@ -476,531 +476,533 @@ 365 if (chol_inv[1] != key_ls): 366 raise ValueError('The keys of inverse covariance matrix are not the same or do not appear in the same order as the x and y values.') 367 chol_inv = chol_inv[0] -368 else: -369 corr = covariance(y_all, correlation=True, **kwargs) -370 inverrdiag = np.diag(1 / np.asarray(dy_f)) -371 chol_inv = invert_corr_cov_cholesky(corr, inverrdiag) -372 -373 def general_chisqfunc(p, ivars, pr): -374 model = anp.concatenate([anp.array(funcd[key](p, xd[key])).reshape(-1) for key in key_ls]) -375 return anp.concatenate((anp.dot(chol_inv, (ivars - model)), (p[prior_mask] - pr) / dp_f)) -376 -377 def chisqfunc(p): -378 return anp.sum(general_chisqfunc(p, y_f, p_f) ** 2) -379 else: -380 general_chisqfunc = general_chisqfunc_uncorr -381 chisqfunc = chisqfunc_uncorr -382 -383 output.method = kwargs.get('method', 'Levenberg-Marquardt') -384 if not silent: -385 print('Method:', output.method) -386 -387 if output.method != 'Levenberg-Marquardt': -388 if output.method == 'migrad': -389 tolerance = 1e-4 # default value of 1e-1 set by iminuit can be problematic -390 if 'tol' in kwargs: -391 tolerance = kwargs.get('tol') -392 fit_result = iminuit.minimize(chisqfunc_uncorr, x0, tol=tolerance) # Stopping criterion 0.002 * tol * errordef -393 if kwargs.get('correlated_fit') is True: -394 fit_result = iminuit.minimize(chisqfunc, fit_result.x, tol=tolerance) -395 output.iterations = fit_result.nfev -396 else: -397 tolerance = 1e-12 -398 if 'tol' in kwargs: -399 tolerance = kwargs.get('tol') -400 fit_result = scipy.optimize.minimize(chisqfunc_uncorr, x0, method=kwargs.get('method'), tol=tolerance) -401 if kwargs.get('correlated_fit') is True: -402 fit_result = scipy.optimize.minimize(chisqfunc, fit_result.x, method=kwargs.get('method'), tol=tolerance) -403 output.iterations = fit_result.nit -404 -405 chisquare = fit_result.fun +368 if np.any(np.diag(chol_inv) <= 0) or (not np.all(chol_inv == np.tril(chol_inv))): +369 raise ValueError('The inverse covariance matrix inv_chol_cov_matrix[0] has to be a lower triangular matrix constructed from a Cholesky decomposition.') +370 else: +371 corr = covariance(y_all, correlation=True, **kwargs) +372 inverrdiag = np.diag(1 / np.asarray(dy_f)) +373 chol_inv = invert_corr_cov_cholesky(corr, inverrdiag) +374 +375 def general_chisqfunc(p, ivars, pr): +376 model = anp.concatenate([anp.array(funcd[key](p, xd[key])).reshape(-1) for key in key_ls]) +377 return anp.concatenate((anp.dot(chol_inv, (ivars - model)), (p[prior_mask] - pr) / dp_f)) +378 +379 def chisqfunc(p): +380 return anp.sum(general_chisqfunc(p, y_f, p_f) ** 2) +381 else: +382 general_chisqfunc = general_chisqfunc_uncorr +383 chisqfunc = chisqfunc_uncorr +384 +385 output.method = kwargs.get('method', 'Levenberg-Marquardt') +386 if not silent: +387 print('Method:', output.method) +388 +389 if output.method != 'Levenberg-Marquardt': +390 if output.method == 'migrad': +391 tolerance = 1e-4 # default value of 1e-1 set by iminuit can be problematic +392 if 'tol' in kwargs: +393 tolerance = kwargs.get('tol') +394 fit_result = iminuit.minimize(chisqfunc_uncorr, x0, tol=tolerance) # Stopping criterion 0.002 * tol * errordef +395 if kwargs.get('correlated_fit') is True: +396 fit_result = iminuit.minimize(chisqfunc, fit_result.x, tol=tolerance) +397 output.iterations = fit_result.nfev +398 else: +399 tolerance = 1e-12 +400 if 'tol' in kwargs: +401 tolerance = kwargs.get('tol') +402 fit_result = scipy.optimize.minimize(chisqfunc_uncorr, x0, method=kwargs.get('method'), tol=tolerance) +403 if kwargs.get('correlated_fit') is True: +404 fit_result = scipy.optimize.minimize(chisqfunc, fit_result.x, method=kwargs.get('method'), tol=tolerance) +405 output.iterations = fit_result.nit 406 -407 else: -408 if 'tol' in kwargs: -409 print('tol cannot be set for Levenberg-Marquardt') -410 -411 def chisqfunc_residuals_uncorr(p): -412 return general_chisqfunc_uncorr(p, y_f, p_f) -413 -414 fit_result = scipy.optimize.least_squares(chisqfunc_residuals_uncorr, x0, method='lm', ftol=1e-15, gtol=1e-15, xtol=1e-15) -415 if kwargs.get('correlated_fit') is True: -416 def chisqfunc_residuals(p): -417 return general_chisqfunc(p, y_f, p_f) -418 -419 fit_result = scipy.optimize.least_squares(chisqfunc_residuals, fit_result.x, method='lm', ftol=1e-15, gtol=1e-15, xtol=1e-15) +407 chisquare = fit_result.fun +408 +409 else: +410 if 'tol' in kwargs: +411 print('tol cannot be set for Levenberg-Marquardt') +412 +413 def chisqfunc_residuals_uncorr(p): +414 return general_chisqfunc_uncorr(p, y_f, p_f) +415 +416 fit_result = scipy.optimize.least_squares(chisqfunc_residuals_uncorr, x0, method='lm', ftol=1e-15, gtol=1e-15, xtol=1e-15) +417 if kwargs.get('correlated_fit') is True: +418 def chisqfunc_residuals(p): +419 return general_chisqfunc(p, y_f, p_f) 420 -421 chisquare = np.sum(fit_result.fun ** 2) -422 assert np.isclose(chisquare, chisqfunc(fit_result.x), atol=1e-14) -423 -424 output.iterations = fit_result.nfev +421 fit_result = scipy.optimize.least_squares(chisqfunc_residuals, fit_result.x, method='lm', ftol=1e-15, gtol=1e-15, xtol=1e-15) +422 +423 chisquare = np.sum(fit_result.fun ** 2) +424 assert np.isclose(chisquare, chisqfunc(fit_result.x), atol=1e-14) 425 -426 if not fit_result.success: -427 raise Exception('The minimization procedure did not converge.') -428 -429 output.chisquare = chisquare -430 output.dof = y_all.shape[-1] - n_parms + len(loc_priors) -431 output.p_value = 1 - scipy.stats.chi2.cdf(output.chisquare, output.dof) -432 if output.dof > 0: -433 output.chisquare_by_dof = output.chisquare / output.dof -434 else: -435 output.chisquare_by_dof = float('nan') -436 -437 output.message = fit_result.message -438 if not silent: -439 print(fit_result.message) -440 print('chisquare/d.o.f.:', output.chisquare_by_dof) -441 print('fit parameters', fit_result.x) -442 -443 def prepare_hat_matrix(): -444 hat_vector = [] -445 for key in key_ls: -446 if (len(xd[key]) != 0): -447 hat_vector.append(jacobian(funcd[key])(fit_result.x, xd[key])) -448 hat_vector = [item for sublist in hat_vector for item in sublist] -449 return hat_vector -450 -451 if kwargs.get('expected_chisquare') is True: -452 if kwargs.get('correlated_fit') is not True: -453 W = np.diag(1 / np.asarray(dy_f)) -454 cov = covariance(y_all) -455 hat_vector = prepare_hat_matrix() -456 A = W @ hat_vector -457 P_phi = A @ np.linalg.pinv(A.T @ A) @ A.T -458 expected_chisquare = np.trace((np.identity(y_all.shape[-1]) - P_phi) @ W @ cov @ W) -459 output.chisquare_by_expected_chisquare = output.chisquare / expected_chisquare -460 if not silent: -461 print('chisquare/expected_chisquare:', output.chisquare_by_expected_chisquare) -462 -463 fitp = fit_result.x +426 output.iterations = fit_result.nfev +427 +428 if not fit_result.success: +429 raise Exception('The minimization procedure did not converge.') +430 +431 output.chisquare = chisquare +432 output.dof = y_all.shape[-1] - n_parms + len(loc_priors) +433 output.p_value = 1 - scipy.stats.chi2.cdf(output.chisquare, output.dof) +434 if output.dof > 0: +435 output.chisquare_by_dof = output.chisquare / output.dof +436 else: +437 output.chisquare_by_dof = float('nan') +438 +439 output.message = fit_result.message +440 if not silent: +441 print(fit_result.message) +442 print('chisquare/d.o.f.:', output.chisquare_by_dof) +443 print('fit parameters', fit_result.x) +444 +445 def prepare_hat_matrix(): +446 hat_vector = [] +447 for key in key_ls: +448 if (len(xd[key]) != 0): +449 hat_vector.append(jacobian(funcd[key])(fit_result.x, xd[key])) +450 hat_vector = [item for sublist in hat_vector for item in sublist] +451 return hat_vector +452 +453 if kwargs.get('expected_chisquare') is True: +454 if kwargs.get('correlated_fit') is not True: +455 W = np.diag(1 / np.asarray(dy_f)) +456 cov = covariance(y_all) +457 hat_vector = prepare_hat_matrix() +458 A = W @ hat_vector +459 P_phi = A @ np.linalg.pinv(A.T @ A) @ A.T +460 expected_chisquare = np.trace((np.identity(y_all.shape[-1]) - P_phi) @ W @ cov @ W) +461 output.chisquare_by_expected_chisquare = output.chisquare / expected_chisquare +462 if not silent: +463 print('chisquare/expected_chisquare:', output.chisquare_by_expected_chisquare) 464 -465 try: -466 hess = hessian(chisqfunc)(fitp) -467 except TypeError: -468 raise Exception("It is required to use autograd.numpy instead of numpy within fit functions, see the documentation for details.") from None -469 -470 len_y = len(y_f) +465 fitp = fit_result.x +466 +467 try: +468 hess = hessian(chisqfunc)(fitp) +469 except TypeError: +470 raise Exception("It is required to use autograd.numpy instead of numpy within fit functions, see the documentation for details.") from None 471 -472 def chisqfunc_compact(d): -473 return anp.sum(general_chisqfunc(d[:n_parms], d[n_parms: n_parms + len_y], d[n_parms + len_y:]) ** 2) -474 -475 jac_jac_y = hessian(chisqfunc_compact)(np.concatenate((fitp, y_f, p_f))) +472 len_y = len(y_f) +473 +474 def chisqfunc_compact(d): +475 return anp.sum(general_chisqfunc(d[:n_parms], d[n_parms: n_parms + len_y], d[n_parms + len_y:]) ** 2) 476 -477 # Compute hess^{-1} @ jac_jac_y[:n_parms + m, n_parms + m:] using LAPACK dgesv -478 try: -479 deriv_y = -scipy.linalg.solve(hess, jac_jac_y[:n_parms, n_parms:]) -480 except np.linalg.LinAlgError: -481 raise Exception("Cannot invert hessian matrix.") -482 -483 result = [] -484 for i in range(n_parms): -485 result.append(derived_observable(lambda x_all, **kwargs: (x_all[0] + np.finfo(np.float64).eps) / (y_all[0].value + np.finfo(np.float64).eps) * fitp[i], list(y_all) + loc_priors, man_grad=list(deriv_y[i]))) -486 -487 output.fit_parameters = result +477 jac_jac_y = hessian(chisqfunc_compact)(np.concatenate((fitp, y_f, p_f))) +478 +479 # Compute hess^{-1} @ jac_jac_y[:n_parms + m, n_parms + m:] using LAPACK dgesv +480 try: +481 deriv_y = -scipy.linalg.solve(hess, jac_jac_y[:n_parms, n_parms:]) +482 except np.linalg.LinAlgError: +483 raise Exception("Cannot invert hessian matrix.") +484 +485 result = [] +486 for i in range(n_parms): +487 result.append(derived_observable(lambda x_all, **kwargs: (x_all[0] + np.finfo(np.float64).eps) / (y_all[0].value + np.finfo(np.float64).eps) * fitp[i], list(y_all) + loc_priors, man_grad=list(deriv_y[i]))) 488 -489 # Hotelling t-squared p-value for correlated fits. -490 if kwargs.get('correlated_fit') is True: -491 n_cov = np.min(np.vectorize(lambda x_all: x_all.N)(y_all)) -492 output.t2_p_value = 1 - scipy.stats.f.cdf((n_cov - output.dof) / (output.dof * (n_cov - 1)) * output.chisquare, -493 output.dof, n_cov - output.dof) -494 -495 if kwargs.get('resplot') is True: -496 for key in key_ls: -497 residual_plot(xd[key], yd[key], funcd[key], result, title=key) -498 -499 if kwargs.get('qqplot') is True: -500 for key in key_ls: -501 qqplot(xd[key], yd[key], funcd[key], result, title=key) -502 -503 return output +489 output.fit_parameters = result +490 +491 # Hotelling t-squared p-value for correlated fits. +492 if kwargs.get('correlated_fit') is True: +493 n_cov = np.min(np.vectorize(lambda x_all: x_all.N)(y_all)) +494 output.t2_p_value = 1 - scipy.stats.f.cdf((n_cov - output.dof) / (output.dof * (n_cov - 1)) * output.chisquare, +495 output.dof, n_cov - output.dof) +496 +497 if kwargs.get('resplot') is True: +498 for key in key_ls: +499 residual_plot(xd[key], yd[key], funcd[key], result, title=key) +500 +501 if kwargs.get('qqplot') is True: +502 for key in key_ls: +503 qqplot(xd[key], yd[key], funcd[key], result, title=key) 504 -505 -506def total_least_squares(x, y, func, silent=False, **kwargs): -507 r'''Performs a non-linear fit to y = func(x) and returns a list of Obs corresponding to the fit parameters. -508 -509 Parameters -510 ---------- -511 x : list -512 list of Obs, or a tuple of lists of Obs -513 y : list -514 list of Obs. The dvalues of the Obs are used as x- and yerror for the fit. -515 func : object -516 func has to be of the form -517 -518 ```python -519 import autograd.numpy as anp -520 -521 def func(a, x): -522 return a[0] + a[1] * x + a[2] * anp.sinh(x) -523 ``` -524 -525 For multiple x values func can be of the form +505 return output +506 +507 +508def total_least_squares(x, y, func, silent=False, **kwargs): +509 r'''Performs a non-linear fit to y = func(x) and returns a list of Obs corresponding to the fit parameters. +510 +511 Parameters +512 ---------- +513 x : list +514 list of Obs, or a tuple of lists of Obs +515 y : list +516 list of Obs. The dvalues of the Obs are used as x- and yerror for the fit. +517 func : object +518 func has to be of the form +519 +520 ```python +521 import autograd.numpy as anp +522 +523 def func(a, x): +524 return a[0] + a[1] * x + a[2] * anp.sinh(x) +525 ``` 526 -527 ```python -528 def func(a, x): -529 (x1, x2) = x -530 return a[0] * x1 ** 2 + a[1] * x2 -531 ``` -532 -533 It is important that all numpy functions refer to autograd.numpy, otherwise the differentiation -534 will not work. -535 silent : bool, optional -536 If true all output to the console is omitted (default False). -537 initial_guess : list -538 can provide an initial guess for the input parameters. Relevant for non-linear -539 fits with many parameters. -540 expected_chisquare : bool -541 If true prints the expected chisquare which is -542 corrected by effects caused by correlated input data. -543 This can take a while as the full correlation matrix -544 has to be calculated (default False). -545 num_grad : bool -546 Use numerical differentation instead of automatic differentiation to perform the error propagation (default False). -547 -548 Notes -549 ----- -550 Based on the orthogonal distance regression module of scipy. -551 -552 Returns -553 ------- -554 output : Fit_result -555 Parameters and information on the fitted result. -556 ''' -557 -558 output = Fit_result() +527 For multiple x values func can be of the form +528 +529 ```python +530 def func(a, x): +531 (x1, x2) = x +532 return a[0] * x1 ** 2 + a[1] * x2 +533 ``` +534 +535 It is important that all numpy functions refer to autograd.numpy, otherwise the differentiation +536 will not work. +537 silent : bool, optional +538 If true all output to the console is omitted (default False). +539 initial_guess : list +540 can provide an initial guess for the input parameters. Relevant for non-linear +541 fits with many parameters. +542 expected_chisquare : bool +543 If true prints the expected chisquare which is +544 corrected by effects caused by correlated input data. +545 This can take a while as the full correlation matrix +546 has to be calculated (default False). +547 num_grad : bool +548 Use numerical differentation instead of automatic differentiation to perform the error propagation (default False). +549 +550 Notes +551 ----- +552 Based on the orthogonal distance regression module of scipy. +553 +554 Returns +555 ------- +556 output : Fit_result +557 Parameters and information on the fitted result. +558 ''' 559 -560 output.fit_function = func +560 output = Fit_result() 561 -562 x = np.array(x) +562 output.fit_function = func 563 -564 x_shape = x.shape +564 x = np.array(x) 565 -566 if kwargs.get('num_grad') is True: -567 jacobian = num_jacobian -568 hessian = num_hessian -569 else: -570 jacobian = auto_jacobian -571 hessian = auto_hessian -572 -573 if not callable(func): -574 raise TypeError('func has to be a function.') -575 -576 for i in range(42): -577 try: -578 func(np.arange(i), x.T[0]) -579 except TypeError: -580 continue -581 except IndexError: +566 x_shape = x.shape +567 +568 if kwargs.get('num_grad') is True: +569 jacobian = num_jacobian +570 hessian = num_hessian +571 else: +572 jacobian = auto_jacobian +573 hessian = auto_hessian +574 +575 if not callable(func): +576 raise TypeError('func has to be a function.') +577 +578 for i in range(42): +579 try: +580 func(np.arange(i), x.T[0]) +581 except TypeError: 582 continue -583 else: -584 break -585 else: -586 raise RuntimeError("Fit function is not valid.") -587 -588 n_parms = i -589 if not silent: -590 print('Fit with', n_parms, 'parameter' + 's' * (n_parms > 1)) -591 -592 x_f = np.vectorize(lambda o: o.value)(x) -593 dx_f = np.vectorize(lambda o: o.dvalue)(x) -594 y_f = np.array([o.value for o in y]) -595 dy_f = np.array([o.dvalue for o in y]) -596 -597 if np.any(np.asarray(dx_f) <= 0.0): -598 raise Exception('No x errors available, run the gamma method first.') -599 -600 if np.any(np.asarray(dy_f) <= 0.0): -601 raise Exception('No y errors available, run the gamma method first.') -602 -603 if 'initial_guess' in kwargs: -604 x0 = kwargs.get('initial_guess') -605 if len(x0) != n_parms: -606 raise Exception('Initial guess does not have the correct length: %d vs. %d' % (len(x0), n_parms)) -607 else: -608 x0 = [1] * n_parms -609 -610 data = RealData(x_f, y_f, sx=dx_f, sy=dy_f) -611 model = Model(func) -612 odr = ODR(data, model, x0, partol=np.finfo(np.float64).eps) -613 odr.set_job(fit_type=0, deriv=1) -614 out = odr.run() -615 -616 output.residual_variance = out.res_var +583 except IndexError: +584 continue +585 else: +586 break +587 else: +588 raise RuntimeError("Fit function is not valid.") +589 +590 n_parms = i +591 if not silent: +592 print('Fit with', n_parms, 'parameter' + 's' * (n_parms > 1)) +593 +594 x_f = np.vectorize(lambda o: o.value)(x) +595 dx_f = np.vectorize(lambda o: o.dvalue)(x) +596 y_f = np.array([o.value for o in y]) +597 dy_f = np.array([o.dvalue for o in y]) +598 +599 if np.any(np.asarray(dx_f) <= 0.0): +600 raise Exception('No x errors available, run the gamma method first.') +601 +602 if np.any(np.asarray(dy_f) <= 0.0): +603 raise Exception('No y errors available, run the gamma method first.') +604 +605 if 'initial_guess' in kwargs: +606 x0 = kwargs.get('initial_guess') +607 if len(x0) != n_parms: +608 raise Exception('Initial guess does not have the correct length: %d vs. %d' % (len(x0), n_parms)) +609 else: +610 x0 = [1] * n_parms +611 +612 data = RealData(x_f, y_f, sx=dx_f, sy=dy_f) +613 model = Model(func) +614 odr = ODR(data, model, x0, partol=np.finfo(np.float64).eps) +615 odr.set_job(fit_type=0, deriv=1) +616 out = odr.run() 617 -618 output.method = 'ODR' +618 output.residual_variance = out.res_var 619 -620 output.message = out.stopreason +620 output.method = 'ODR' 621 -622 output.xplus = out.xplus +622 output.message = out.stopreason 623 -624 if not silent: -625 print('Method: ODR') -626 print(*out.stopreason) -627 print('Residual variance:', output.residual_variance) -628 -629 if out.info > 3: -630 raise Exception('The minimization procedure did not converge.') -631 -632 m = x_f.size +624 output.xplus = out.xplus +625 +626 if not silent: +627 print('Method: ODR') +628 print(*out.stopreason) +629 print('Residual variance:', output.residual_variance) +630 +631 if out.info > 3: +632 raise Exception('The minimization procedure did not converge.') 633 -634 def odr_chisquare(p): -635 model = func(p[:n_parms], p[n_parms:].reshape(x_shape)) -636 chisq = anp.sum(((y_f - model) / dy_f) ** 2) + anp.sum(((x_f - p[n_parms:].reshape(x_shape)) / dx_f) ** 2) -637 return chisq -638 -639 if kwargs.get('expected_chisquare') is True: -640 W = np.diag(1 / np.asarray(np.concatenate((dy_f.ravel(), dx_f.ravel())))) -641 -642 if kwargs.get('covariance') is not None: -643 cov = kwargs.get('covariance') -644 else: -645 cov = covariance(np.concatenate((y, x.ravel()))) -646 -647 number_of_x_parameters = int(m / x_f.shape[-1]) +634 m = x_f.size +635 +636 def odr_chisquare(p): +637 model = func(p[:n_parms], p[n_parms:].reshape(x_shape)) +638 chisq = anp.sum(((y_f - model) / dy_f) ** 2) + anp.sum(((x_f - p[n_parms:].reshape(x_shape)) / dx_f) ** 2) +639 return chisq +640 +641 if kwargs.get('expected_chisquare') is True: +642 W = np.diag(1 / np.asarray(np.concatenate((dy_f.ravel(), dx_f.ravel())))) +643 +644 if kwargs.get('covariance') is not None: +645 cov = kwargs.get('covariance') +646 else: +647 cov = covariance(np.concatenate((y, x.ravel()))) 648 -649 old_jac = jacobian(func)(out.beta, out.xplus) -650 fused_row1 = np.concatenate((old_jac, np.concatenate((number_of_x_parameters * [np.zeros(old_jac.shape)]), axis=0))) -651 fused_row2 = np.concatenate((jacobian(lambda x, y: func(y, x))(out.xplus, out.beta).reshape(x_f.shape[-1], x_f.shape[-1] * number_of_x_parameters), np.identity(number_of_x_parameters * old_jac.shape[0]))) -652 new_jac = np.concatenate((fused_row1, fused_row2), axis=1) -653 -654 A = W @ new_jac -655 P_phi = A @ np.linalg.pinv(A.T @ A) @ A.T -656 expected_chisquare = np.trace((np.identity(P_phi.shape[0]) - P_phi) @ W @ cov @ W) -657 if expected_chisquare <= 0.0: -658 warnings.warn("Negative expected_chisquare.", RuntimeWarning) -659 expected_chisquare = np.abs(expected_chisquare) -660 output.chisquare_by_expected_chisquare = odr_chisquare(np.concatenate((out.beta, out.xplus.ravel()))) / expected_chisquare -661 if not silent: -662 print('chisquare/expected_chisquare:', -663 output.chisquare_by_expected_chisquare) -664 -665 fitp = out.beta -666 try: -667 hess = hessian(odr_chisquare)(np.concatenate((fitp, out.xplus.ravel()))) -668 except TypeError: -669 raise Exception("It is required to use autograd.numpy instead of numpy within fit functions, see the documentation for details.") from None -670 -671 def odr_chisquare_compact_x(d): -672 model = func(d[:n_parms], d[n_parms:n_parms + m].reshape(x_shape)) -673 chisq = anp.sum(((y_f - model) / dy_f) ** 2) + anp.sum(((d[n_parms + m:].reshape(x_shape) - d[n_parms:n_parms + m].reshape(x_shape)) / dx_f) ** 2) -674 return chisq -675 -676 jac_jac_x = hessian(odr_chisquare_compact_x)(np.concatenate((fitp, out.xplus.ravel(), x_f.ravel()))) +649 number_of_x_parameters = int(m / x_f.shape[-1]) +650 +651 old_jac = jacobian(func)(out.beta, out.xplus) +652 fused_row1 = np.concatenate((old_jac, np.concatenate((number_of_x_parameters * [np.zeros(old_jac.shape)]), axis=0))) +653 fused_row2 = np.concatenate((jacobian(lambda x, y: func(y, x))(out.xplus, out.beta).reshape(x_f.shape[-1], x_f.shape[-1] * number_of_x_parameters), np.identity(number_of_x_parameters * old_jac.shape[0]))) +654 new_jac = np.concatenate((fused_row1, fused_row2), axis=1) +655 +656 A = W @ new_jac +657 P_phi = A @ np.linalg.pinv(A.T @ A) @ A.T +658 expected_chisquare = np.trace((np.identity(P_phi.shape[0]) - P_phi) @ W @ cov @ W) +659 if expected_chisquare <= 0.0: +660 warnings.warn("Negative expected_chisquare.", RuntimeWarning) +661 expected_chisquare = np.abs(expected_chisquare) +662 output.chisquare_by_expected_chisquare = odr_chisquare(np.concatenate((out.beta, out.xplus.ravel()))) / expected_chisquare +663 if not silent: +664 print('chisquare/expected_chisquare:', +665 output.chisquare_by_expected_chisquare) +666 +667 fitp = out.beta +668 try: +669 hess = hessian(odr_chisquare)(np.concatenate((fitp, out.xplus.ravel()))) +670 except TypeError: +671 raise Exception("It is required to use autograd.numpy instead of numpy within fit functions, see the documentation for details.") from None +672 +673 def odr_chisquare_compact_x(d): +674 model = func(d[:n_parms], d[n_parms:n_parms + m].reshape(x_shape)) +675 chisq = anp.sum(((y_f - model) / dy_f) ** 2) + anp.sum(((d[n_parms + m:].reshape(x_shape) - d[n_parms:n_parms + m].reshape(x_shape)) / dx_f) ** 2) +676 return chisq 677 -678 # Compute hess^{-1} @ jac_jac_x[:n_parms + m, n_parms + m:] using LAPACK dgesv -679 try: -680 deriv_x = -scipy.linalg.solve(hess, jac_jac_x[:n_parms + m, n_parms + m:]) -681 except np.linalg.LinAlgError: -682 raise Exception("Cannot invert hessian matrix.") -683 -684 def odr_chisquare_compact_y(d): -685 model = func(d[:n_parms], d[n_parms:n_parms + m].reshape(x_shape)) -686 chisq = anp.sum(((d[n_parms + m:] - model) / dy_f) ** 2) + anp.sum(((x_f - d[n_parms:n_parms + m].reshape(x_shape)) / dx_f) ** 2) -687 return chisq -688 -689 jac_jac_y = hessian(odr_chisquare_compact_y)(np.concatenate((fitp, out.xplus.ravel(), y_f))) +678 jac_jac_x = hessian(odr_chisquare_compact_x)(np.concatenate((fitp, out.xplus.ravel(), x_f.ravel()))) +679 +680 # Compute hess^{-1} @ jac_jac_x[:n_parms + m, n_parms + m:] using LAPACK dgesv +681 try: +682 deriv_x = -scipy.linalg.solve(hess, jac_jac_x[:n_parms + m, n_parms + m:]) +683 except np.linalg.LinAlgError: +684 raise Exception("Cannot invert hessian matrix.") +685 +686 def odr_chisquare_compact_y(d): +687 model = func(d[:n_parms], d[n_parms:n_parms + m].reshape(x_shape)) +688 chisq = anp.sum(((d[n_parms + m:] - model) / dy_f) ** 2) + anp.sum(((x_f - d[n_parms:n_parms + m].reshape(x_shape)) / dx_f) ** 2) +689 return chisq 690 -691 # Compute hess^{-1} @ jac_jac_y[:n_parms + m, n_parms + m:] using LAPACK dgesv -692 try: -693 deriv_y = -scipy.linalg.solve(hess, jac_jac_y[:n_parms + m, n_parms + m:]) -694 except np.linalg.LinAlgError: -695 raise Exception("Cannot invert hessian matrix.") -696 -697 result = [] -698 for i in range(n_parms): -699 result.append(derived_observable(lambda my_var, **kwargs: (my_var[0] + np.finfo(np.float64).eps) / (x.ravel()[0].value + np.finfo(np.float64).eps) * out.beta[i], list(x.ravel()) + list(y), man_grad=list(deriv_x[i]) + list(deriv_y[i]))) -700 -701 output.fit_parameters = result +691 jac_jac_y = hessian(odr_chisquare_compact_y)(np.concatenate((fitp, out.xplus.ravel(), y_f))) +692 +693 # Compute hess^{-1} @ jac_jac_y[:n_parms + m, n_parms + m:] using LAPACK dgesv +694 try: +695 deriv_y = -scipy.linalg.solve(hess, jac_jac_y[:n_parms + m, n_parms + m:]) +696 except np.linalg.LinAlgError: +697 raise Exception("Cannot invert hessian matrix.") +698 +699 result = [] +700 for i in range(n_parms): +701 result.append(derived_observable(lambda my_var, **kwargs: (my_var[0] + np.finfo(np.float64).eps) / (x.ravel()[0].value + np.finfo(np.float64).eps) * out.beta[i], list(x.ravel()) + list(y), man_grad=list(deriv_x[i]) + list(deriv_y[i]))) 702 -703 output.odr_chisquare = odr_chisquare(np.concatenate((out.beta, out.xplus.ravel()))) -704 output.dof = x.shape[-1] - n_parms -705 output.p_value = 1 - scipy.stats.chi2.cdf(output.odr_chisquare, output.dof) -706 -707 return output +703 output.fit_parameters = result +704 +705 output.odr_chisquare = odr_chisquare(np.concatenate((out.beta, out.xplus.ravel()))) +706 output.dof = x.shape[-1] - n_parms +707 output.p_value = 1 - scipy.stats.chi2.cdf(output.odr_chisquare, output.dof) 708 -709 -710def fit_lin(x, y, **kwargs): -711 """Performs a linear fit to y = n + m * x and returns two Obs n, m. -712 -713 Parameters -714 ---------- -715 x : list -716 Can either be a list of floats in which case no xerror is assumed, or -717 a list of Obs, where the dvalues of the Obs are used as xerror for the fit. -718 y : list -719 List of Obs, the dvalues of the Obs are used as yerror for the fit. -720 -721 Returns -722 ------- -723 fit_parameters : list[Obs] -724 LIist of fitted observables. -725 """ -726 -727 def f(a, x): -728 y = a[0] + a[1] * x -729 return y -730 -731 if all(isinstance(n, Obs) for n in x): -732 out = total_least_squares(x, y, f, **kwargs) -733 return out.fit_parameters -734 elif all(isinstance(n, float) or isinstance(n, int) for n in x) or isinstance(x, np.ndarray): -735 out = least_squares(x, y, f, **kwargs) -736 return out.fit_parameters -737 else: -738 raise TypeError('Unsupported types for x') -739 -740 -741def qqplot(x, o_y, func, p, title=""): -742 """Generates a quantile-quantile plot of the fit result which can be used to -743 check if the residuals of the fit are gaussian distributed. -744 -745 Returns -746 ------- -747 None -748 """ -749 -750 residuals = [] -751 for i_x, i_y in zip(x, o_y): -752 residuals.append((i_y - func(p, i_x)) / i_y.dvalue) -753 residuals = sorted(residuals) -754 my_y = [o.value for o in residuals] -755 probplot = scipy.stats.probplot(my_y) -756 my_x = probplot[0][0] -757 plt.figure(figsize=(8, 8 / 1.618)) -758 plt.errorbar(my_x, my_y, fmt='o') -759 fit_start = my_x[0] -760 fit_stop = my_x[-1] -761 samples = np.arange(fit_start, fit_stop, 0.01) -762 plt.plot(samples, samples, 'k--', zorder=11, label='Standard normal distribution') -763 plt.plot(samples, probplot[1][0] * samples + probplot[1][1], zorder=10, label='Least squares fit, r=' + str(np.around(probplot[1][2], 3)), marker='', ls='-') -764 -765 plt.xlabel('Theoretical quantiles') -766 plt.ylabel('Ordered Values') -767 plt.legend(title=title) -768 plt.draw() -769 -770 -771def residual_plot(x, y, func, fit_res, title=""): -772 """Generates a plot which compares the fit to the data and displays the corresponding residuals -773 -774 For uncorrelated data the residuals are expected to be distributed ~N(0,1). +709 return output +710 +711 +712def fit_lin(x, y, **kwargs): +713 """Performs a linear fit to y = n + m * x and returns two Obs n, m. +714 +715 Parameters +716 ---------- +717 x : list +718 Can either be a list of floats in which case no xerror is assumed, or +719 a list of Obs, where the dvalues of the Obs are used as xerror for the fit. +720 y : list +721 List of Obs, the dvalues of the Obs are used as yerror for the fit. +722 +723 Returns +724 ------- +725 fit_parameters : list[Obs] +726 LIist of fitted observables. +727 """ +728 +729 def f(a, x): +730 y = a[0] + a[1] * x +731 return y +732 +733 if all(isinstance(n, Obs) for n in x): +734 out = total_least_squares(x, y, f, **kwargs) +735 return out.fit_parameters +736 elif all(isinstance(n, float) or isinstance(n, int) for n in x) or isinstance(x, np.ndarray): +737 out = least_squares(x, y, f, **kwargs) +738 return out.fit_parameters +739 else: +740 raise TypeError('Unsupported types for x') +741 +742 +743def qqplot(x, o_y, func, p, title=""): +744 """Generates a quantile-quantile plot of the fit result which can be used to +745 check if the residuals of the fit are gaussian distributed. +746 +747 Returns +748 ------- +749 None +750 """ +751 +752 residuals = [] +753 for i_x, i_y in zip(x, o_y): +754 residuals.append((i_y - func(p, i_x)) / i_y.dvalue) +755 residuals = sorted(residuals) +756 my_y = [o.value for o in residuals] +757 probplot = scipy.stats.probplot(my_y) +758 my_x = probplot[0][0] +759 plt.figure(figsize=(8, 8 / 1.618)) +760 plt.errorbar(my_x, my_y, fmt='o') +761 fit_start = my_x[0] +762 fit_stop = my_x[-1] +763 samples = np.arange(fit_start, fit_stop, 0.01) +764 plt.plot(samples, samples, 'k--', zorder=11, label='Standard normal distribution') +765 plt.plot(samples, probplot[1][0] * samples + probplot[1][1], zorder=10, label='Least squares fit, r=' + str(np.around(probplot[1][2], 3)), marker='', ls='-') +766 +767 plt.xlabel('Theoretical quantiles') +768 plt.ylabel('Ordered Values') +769 plt.legend(title=title) +770 plt.draw() +771 +772 +773def residual_plot(x, y, func, fit_res, title=""): +774 """Generates a plot which compares the fit to the data and displays the corresponding residuals 775 -776 Returns -777 ------- -778 None -779 """ -780 sorted_x = sorted(x) -781 xstart = sorted_x[0] - 0.5 * (sorted_x[1] - sorted_x[0]) -782 xstop = sorted_x[-1] + 0.5 * (sorted_x[-1] - sorted_x[-2]) -783 x_samples = np.arange(xstart, xstop + 0.01, 0.01) -784 -785 plt.figure(figsize=(8, 8 / 1.618)) -786 gs = gridspec.GridSpec(2, 1, height_ratios=[3, 1], wspace=0.0, hspace=0.0) -787 ax0 = plt.subplot(gs[0]) -788 ax0.errorbar(x, [o.value for o in y], yerr=[o.dvalue for o in y], ls='none', fmt='o', capsize=3, markersize=5, label='Data') -789 ax0.plot(x_samples, func([o.value for o in fit_res], x_samples), label='Fit', zorder=10, ls='-', ms=0) -790 ax0.set_xticklabels([]) -791 ax0.set_xlim([xstart, xstop]) +776 For uncorrelated data the residuals are expected to be distributed ~N(0,1). +777 +778 Returns +779 ------- +780 None +781 """ +782 sorted_x = sorted(x) +783 xstart = sorted_x[0] - 0.5 * (sorted_x[1] - sorted_x[0]) +784 xstop = sorted_x[-1] + 0.5 * (sorted_x[-1] - sorted_x[-2]) +785 x_samples = np.arange(xstart, xstop + 0.01, 0.01) +786 +787 plt.figure(figsize=(8, 8 / 1.618)) +788 gs = gridspec.GridSpec(2, 1, height_ratios=[3, 1], wspace=0.0, hspace=0.0) +789 ax0 = plt.subplot(gs[0]) +790 ax0.errorbar(x, [o.value for o in y], yerr=[o.dvalue for o in y], ls='none', fmt='o', capsize=3, markersize=5, label='Data') +791 ax0.plot(x_samples, func([o.value for o in fit_res], x_samples), label='Fit', zorder=10, ls='-', ms=0) 792 ax0.set_xticklabels([]) -793 ax0.legend(title=title) -794 -795 residuals = (np.asarray([o.value for o in y]) - func([o.value for o in fit_res], np.asarray(x))) / np.asarray([o.dvalue for o in y]) -796 ax1 = plt.subplot(gs[1]) -797 ax1.plot(x, residuals, 'ko', ls='none', markersize=5) -798 ax1.tick_params(direction='out') -799 ax1.tick_params(axis="x", bottom=True, top=True, labelbottom=True) -800 ax1.axhline(y=0.0, ls='--', color='k', marker=" ") -801 ax1.fill_between(x_samples, -1.0, 1.0, alpha=0.1, facecolor='k') -802 ax1.set_xlim([xstart, xstop]) -803 ax1.set_ylabel('Residuals') -804 plt.subplots_adjust(wspace=None, hspace=None) -805 plt.draw() -806 -807 -808def error_band(x, func, beta): -809 """Calculate the error band for an array of sample values x, for given fit function func with optimized parameters beta. -810 -811 Returns -812 ------- -813 err : np.array(Obs) -814 Error band for an array of sample values x -815 """ -816 cov = covariance(beta) -817 if np.any(np.abs(cov - cov.T) > 1000 * np.finfo(np.float64).eps): -818 warnings.warn("Covariance matrix is not symmetric within floating point precision", RuntimeWarning) -819 -820 deriv = [] -821 for i, item in enumerate(x): -822 deriv.append(np.array(egrad(func)([o.value for o in beta], item))) -823 -824 err = [] -825 for i, item in enumerate(x): -826 err.append(np.sqrt(deriv[i] @ cov @ deriv[i])) -827 err = np.array(err) -828 -829 return err +793 ax0.set_xlim([xstart, xstop]) +794 ax0.set_xticklabels([]) +795 ax0.legend(title=title) +796 +797 residuals = (np.asarray([o.value for o in y]) - func([o.value for o in fit_res], np.asarray(x))) / np.asarray([o.dvalue for o in y]) +798 ax1 = plt.subplot(gs[1]) +799 ax1.plot(x, residuals, 'ko', ls='none', markersize=5) +800 ax1.tick_params(direction='out') +801 ax1.tick_params(axis="x", bottom=True, top=True, labelbottom=True) +802 ax1.axhline(y=0.0, ls='--', color='k', marker=" ") +803 ax1.fill_between(x_samples, -1.0, 1.0, alpha=0.1, facecolor='k') +804 ax1.set_xlim([xstart, xstop]) +805 ax1.set_ylabel('Residuals') +806 plt.subplots_adjust(wspace=None, hspace=None) +807 plt.draw() +808 +809 +810def error_band(x, func, beta): +811 """Calculate the error band for an array of sample values x, for given fit function func with optimized parameters beta. +812 +813 Returns +814 ------- +815 err : np.array(Obs) +816 Error band for an array of sample values x +817 """ +818 cov = covariance(beta) +819 if np.any(np.abs(cov - cov.T) > 1000 * np.finfo(np.float64).eps): +820 warnings.warn("Covariance matrix is not symmetric within floating point precision", RuntimeWarning) +821 +822 deriv = [] +823 for i, item in enumerate(x): +824 deriv.append(np.array(egrad(func)([o.value for o in beta], item))) +825 +826 err = [] +827 for i, item in enumerate(x): +828 err.append(np.sqrt(deriv[i] @ cov @ deriv[i])) +829 err = np.array(err) 830 -831 -832def ks_test(objects=None): -833 """Performs a Kolmogorov–Smirnov test for the p-values of all fit object. -834 -835 Parameters -836 ---------- -837 objects : list -838 List of fit results to include in the analysis (optional). -839 -840 Returns -841 ------- -842 None -843 """ -844 -845 if objects is None: -846 obs_list = [] -847 for obj in gc.get_objects(): -848 if isinstance(obj, Fit_result): -849 obs_list.append(obj) -850 else: -851 obs_list = objects -852 -853 p_values = [o.p_value for o in obs_list] +831 return err +832 +833 +834def ks_test(objects=None): +835 """Performs a Kolmogorov–Smirnov test for the p-values of all fit object. +836 +837 Parameters +838 ---------- +839 objects : list +840 List of fit results to include in the analysis (optional). +841 +842 Returns +843 ------- +844 None +845 """ +846 +847 if objects is None: +848 obs_list = [] +849 for obj in gc.get_objects(): +850 if isinstance(obj, Fit_result): +851 obs_list.append(obj) +852 else: +853 obs_list = objects 854 -855 bins = len(p_values) -856 x = np.arange(0, 1.001, 0.001) -857 plt.plot(x, x, 'k', zorder=1) -858 plt.xlim(0, 1) -859 plt.ylim(0, 1) -860 plt.xlabel('p-value') -861 plt.ylabel('Cumulative probability') -862 plt.title(str(bins) + ' p-values') -863 -864 n = np.arange(1, bins + 1) / np.float64(bins) -865 Xs = np.sort(p_values) -866 plt.step(Xs, n) -867 diffs = n - Xs -868 loc_max_diff = np.argmax(np.abs(diffs)) -869 loc = Xs[loc_max_diff] -870 plt.annotate('', xy=(loc, loc), xytext=(loc, loc + diffs[loc_max_diff]), arrowprops=dict(arrowstyle='<->', shrinkA=0, shrinkB=0)) -871 plt.draw() -872 -873 print(scipy.stats.kstest(p_values, 'uniform')) +855 p_values = [o.p_value for o in obs_list] +856 +857 bins = len(p_values) +858 x = np.arange(0, 1.001, 0.001) +859 plt.plot(x, x, 'k', zorder=1) +860 plt.xlim(0, 1) +861 plt.ylim(0, 1) +862 plt.xlabel('p-value') +863 plt.ylabel('Cumulative probability') +864 plt.title(str(bins) + ' p-values') +865 +866 n = np.arange(1, bins + 1) / np.float64(bins) +867 Xs = np.sort(p_values) +868 plt.step(Xs, n) +869 diffs = n - Xs +870 loc_max_diff = np.argmax(np.abs(diffs)) +871 loc = Xs[loc_max_diff] +872 plt.annotate('', xy=(loc, loc), xytext=(loc, loc + diffs[loc_max_diff]), arrowprops=dict(arrowstyle='<->', shrinkA=0, shrinkB=0)) +873 plt.draw() 874 -875 -876def _extract_val_and_dval(string): -877 split_string = string.split('(') -878 if '.' in split_string[0] and '.' not in split_string[1][:-1]: -879 factor = 10 ** -len(split_string[0].partition('.')[2]) -880 else: -881 factor = 1 -882 return float(split_string[0]), float(split_string[1][:-1]) * factor -883 -884 -885def _construct_prior_obs(i_prior, i_n): -886 if isinstance(i_prior, Obs): -887 return i_prior -888 elif isinstance(i_prior, str): -889 loc_val, loc_dval = _extract_val_and_dval(i_prior) -890 return cov_Obs(loc_val, loc_dval ** 2, '#prior' + str(i_n) + f"_{np.random.randint(2147483647):010d}") -891 else: -892 raise TypeError("Prior entries need to be 'Obs' or 'str'.") +875 print(scipy.stats.kstest(p_values, 'uniform')) +876 +877 +878def _extract_val_and_dval(string): +879 split_string = string.split('(') +880 if '.' in split_string[0] and '.' not in split_string[1][:-1]: +881 factor = 10 ** -len(split_string[0].partition('.')[2]) +882 else: +883 factor = 1 +884 return float(split_string[0]), float(split_string[1][:-1]) * factor +885 +886 +887def _construct_prior_obs(i_prior, i_n): +888 if isinstance(i_prior, Obs): +889 return i_prior +890 elif isinstance(i_prior, str): +891 loc_val, loc_dval = _extract_val_and_dval(i_prior) +892 return cov_Obs(loc_val, loc_dval ** 2, '#prior' + str(i_n) + f"_{np.random.randint(2147483647):010d}") +893 else: +894 raise TypeError("Prior entries need to be 'Obs' or 'str'.") @@ -1450,142 +1452,144 @@ Hotelling t-squared p-value for correlated fits. 366 if (chol_inv[1] != key_ls): 367 raise ValueError('The keys of inverse covariance matrix are not the same or do not appear in the same order as the x and y values.') 368 chol_inv = chol_inv[0] -369 else: -370 corr = covariance(y_all, correlation=True, **kwargs) -371 inverrdiag = np.diag(1 / np.asarray(dy_f)) -372 chol_inv = invert_corr_cov_cholesky(corr, inverrdiag) -373 -374 def general_chisqfunc(p, ivars, pr): -375 model = anp.concatenate([anp.array(funcd[key](p, xd[key])).reshape(-1) for key in key_ls]) -376 return anp.concatenate((anp.dot(chol_inv, (ivars - model)), (p[prior_mask] - pr) / dp_f)) -377 -378 def chisqfunc(p): -379 return anp.sum(general_chisqfunc(p, y_f, p_f) ** 2) -380 else: -381 general_chisqfunc = general_chisqfunc_uncorr -382 chisqfunc = chisqfunc_uncorr -383 -384 output.method = kwargs.get('method', 'Levenberg-Marquardt') -385 if not silent: -386 print('Method:', output.method) -387 -388 if output.method != 'Levenberg-Marquardt': -389 if output.method == 'migrad': -390 tolerance = 1e-4 # default value of 1e-1 set by iminuit can be problematic -391 if 'tol' in kwargs: -392 tolerance = kwargs.get('tol') -393 fit_result = iminuit.minimize(chisqfunc_uncorr, x0, tol=tolerance) # Stopping criterion 0.002 * tol * errordef -394 if kwargs.get('correlated_fit') is True: -395 fit_result = iminuit.minimize(chisqfunc, fit_result.x, tol=tolerance) -396 output.iterations = fit_result.nfev -397 else: -398 tolerance = 1e-12 -399 if 'tol' in kwargs: -400 tolerance = kwargs.get('tol') -401 fit_result = scipy.optimize.minimize(chisqfunc_uncorr, x0, method=kwargs.get('method'), tol=tolerance) -402 if kwargs.get('correlated_fit') is True: -403 fit_result = scipy.optimize.minimize(chisqfunc, fit_result.x, method=kwargs.get('method'), tol=tolerance) -404 output.iterations = fit_result.nit -405 -406 chisquare = fit_result.fun +369 if np.any(np.diag(chol_inv) <= 0) or (not np.all(chol_inv == np.tril(chol_inv))): +370 raise ValueError('The inverse covariance matrix inv_chol_cov_matrix[0] has to be a lower triangular matrix constructed from a Cholesky decomposition.') +371 else: +372 corr = covariance(y_all, correlation=True, **kwargs) +373 inverrdiag = np.diag(1 / np.asarray(dy_f)) +374 chol_inv = invert_corr_cov_cholesky(corr, inverrdiag) +375 +376 def general_chisqfunc(p, ivars, pr): +377 model = anp.concatenate([anp.array(funcd[key](p, xd[key])).reshape(-1) for key in key_ls]) +378 return anp.concatenate((anp.dot(chol_inv, (ivars - model)), (p[prior_mask] - pr) / dp_f)) +379 +380 def chisqfunc(p): +381 return anp.sum(general_chisqfunc(p, y_f, p_f) ** 2) +382 else: +383 general_chisqfunc = general_chisqfunc_uncorr +384 chisqfunc = chisqfunc_uncorr +385 +386 output.method = kwargs.get('method', 'Levenberg-Marquardt') +387 if not silent: +388 print('Method:', output.method) +389 +390 if output.method != 'Levenberg-Marquardt': +391 if output.method == 'migrad': +392 tolerance = 1e-4 # default value of 1e-1 set by iminuit can be problematic +393 if 'tol' in kwargs: +394 tolerance = kwargs.get('tol') +395 fit_result = iminuit.minimize(chisqfunc_uncorr, x0, tol=tolerance) # Stopping criterion 0.002 * tol * errordef +396 if kwargs.get('correlated_fit') is True: +397 fit_result = iminuit.minimize(chisqfunc, fit_result.x, tol=tolerance) +398 output.iterations = fit_result.nfev +399 else: +400 tolerance = 1e-12 +401 if 'tol' in kwargs: +402 tolerance = kwargs.get('tol') +403 fit_result = scipy.optimize.minimize(chisqfunc_uncorr, x0, method=kwargs.get('method'), tol=tolerance) +404 if kwargs.get('correlated_fit') is True: +405 fit_result = scipy.optimize.minimize(chisqfunc, fit_result.x, method=kwargs.get('method'), tol=tolerance) +406 output.iterations = fit_result.nit 407 -408 else: -409 if 'tol' in kwargs: -410 print('tol cannot be set for Levenberg-Marquardt') -411 -412 def chisqfunc_residuals_uncorr(p): -413 return general_chisqfunc_uncorr(p, y_f, p_f) -414 -415 fit_result = scipy.optimize.least_squares(chisqfunc_residuals_uncorr, x0, method='lm', ftol=1e-15, gtol=1e-15, xtol=1e-15) -416 if kwargs.get('correlated_fit') is True: -417 def chisqfunc_residuals(p): -418 return general_chisqfunc(p, y_f, p_f) -419 -420 fit_result = scipy.optimize.least_squares(chisqfunc_residuals, fit_result.x, method='lm', ftol=1e-15, gtol=1e-15, xtol=1e-15) +408 chisquare = fit_result.fun +409 +410 else: +411 if 'tol' in kwargs: +412 print('tol cannot be set for Levenberg-Marquardt') +413 +414 def chisqfunc_residuals_uncorr(p): +415 return general_chisqfunc_uncorr(p, y_f, p_f) +416 +417 fit_result = scipy.optimize.least_squares(chisqfunc_residuals_uncorr, x0, method='lm', ftol=1e-15, gtol=1e-15, xtol=1e-15) +418 if kwargs.get('correlated_fit') is True: +419 def chisqfunc_residuals(p): +420 return general_chisqfunc(p, y_f, p_f) 421 -422 chisquare = np.sum(fit_result.fun ** 2) -423 assert np.isclose(chisquare, chisqfunc(fit_result.x), atol=1e-14) -424 -425 output.iterations = fit_result.nfev +422 fit_result = scipy.optimize.least_squares(chisqfunc_residuals, fit_result.x, method='lm', ftol=1e-15, gtol=1e-15, xtol=1e-15) +423 +424 chisquare = np.sum(fit_result.fun ** 2) +425 assert np.isclose(chisquare, chisqfunc(fit_result.x), atol=1e-14) 426 -427 if not fit_result.success: -428 raise Exception('The minimization procedure did not converge.') -429 -430 output.chisquare = chisquare -431 output.dof = y_all.shape[-1] - n_parms + len(loc_priors) -432 output.p_value = 1 - scipy.stats.chi2.cdf(output.chisquare, output.dof) -433 if output.dof > 0: -434 output.chisquare_by_dof = output.chisquare / output.dof -435 else: -436 output.chisquare_by_dof = float('nan') -437 -438 output.message = fit_result.message -439 if not silent: -440 print(fit_result.message) -441 print('chisquare/d.o.f.:', output.chisquare_by_dof) -442 print('fit parameters', fit_result.x) -443 -444 def prepare_hat_matrix(): -445 hat_vector = [] -446 for key in key_ls: -447 if (len(xd[key]) != 0): -448 hat_vector.append(jacobian(funcd[key])(fit_result.x, xd[key])) -449 hat_vector = [item for sublist in hat_vector for item in sublist] -450 return hat_vector -451 -452 if kwargs.get('expected_chisquare') is True: -453 if kwargs.get('correlated_fit') is not True: -454 W = np.diag(1 / np.asarray(dy_f)) -455 cov = covariance(y_all) -456 hat_vector = prepare_hat_matrix() -457 A = W @ hat_vector -458 P_phi = A @ np.linalg.pinv(A.T @ A) @ A.T -459 expected_chisquare = np.trace((np.identity(y_all.shape[-1]) - P_phi) @ W @ cov @ W) -460 output.chisquare_by_expected_chisquare = output.chisquare / expected_chisquare -461 if not silent: -462 print('chisquare/expected_chisquare:', output.chisquare_by_expected_chisquare) -463 -464 fitp = fit_result.x +427 output.iterations = fit_result.nfev +428 +429 if not fit_result.success: +430 raise Exception('The minimization procedure did not converge.') +431 +432 output.chisquare = chisquare +433 output.dof = y_all.shape[-1] - n_parms + len(loc_priors) +434 output.p_value = 1 - scipy.stats.chi2.cdf(output.chisquare, output.dof) +435 if output.dof > 0: +436 output.chisquare_by_dof = output.chisquare / output.dof +437 else: +438 output.chisquare_by_dof = float('nan') +439 +440 output.message = fit_result.message +441 if not silent: +442 print(fit_result.message) +443 print('chisquare/d.o.f.:', output.chisquare_by_dof) +444 print('fit parameters', fit_result.x) +445 +446 def prepare_hat_matrix(): +447 hat_vector = [] +448 for key in key_ls: +449 if (len(xd[key]) != 0): +450 hat_vector.append(jacobian(funcd[key])(fit_result.x, xd[key])) +451 hat_vector = [item for sublist in hat_vector for item in sublist] +452 return hat_vector +453 +454 if kwargs.get('expected_chisquare') is True: +455 if kwargs.get('correlated_fit') is not True: +456 W = np.diag(1 / np.asarray(dy_f)) +457 cov = covariance(y_all) +458 hat_vector = prepare_hat_matrix() +459 A = W @ hat_vector +460 P_phi = A @ np.linalg.pinv(A.T @ A) @ A.T +461 expected_chisquare = np.trace((np.identity(y_all.shape[-1]) - P_phi) @ W @ cov @ W) +462 output.chisquare_by_expected_chisquare = output.chisquare / expected_chisquare +463 if not silent: +464 print('chisquare/expected_chisquare:', output.chisquare_by_expected_chisquare) 465 -466 try: -467 hess = hessian(chisqfunc)(fitp) -468 except TypeError: -469 raise Exception("It is required to use autograd.numpy instead of numpy within fit functions, see the documentation for details.") from None -470 -471 len_y = len(y_f) +466 fitp = fit_result.x +467 +468 try: +469 hess = hessian(chisqfunc)(fitp) +470 except TypeError: +471 raise Exception("It is required to use autograd.numpy instead of numpy within fit functions, see the documentation for details.") from None 472 -473 def chisqfunc_compact(d): -474 return anp.sum(general_chisqfunc(d[:n_parms], d[n_parms: n_parms + len_y], d[n_parms + len_y:]) ** 2) -475 -476 jac_jac_y = hessian(chisqfunc_compact)(np.concatenate((fitp, y_f, p_f))) +473 len_y = len(y_f) +474 +475 def chisqfunc_compact(d): +476 return anp.sum(general_chisqfunc(d[:n_parms], d[n_parms: n_parms + len_y], d[n_parms + len_y:]) ** 2) 477 -478 # Compute hess^{-1} @ jac_jac_y[:n_parms + m, n_parms + m:] using LAPACK dgesv -479 try: -480 deriv_y = -scipy.linalg.solve(hess, jac_jac_y[:n_parms, n_parms:]) -481 except np.linalg.LinAlgError: -482 raise Exception("Cannot invert hessian matrix.") -483 -484 result = [] -485 for i in range(n_parms): -486 result.append(derived_observable(lambda x_all, **kwargs: (x_all[0] + np.finfo(np.float64).eps) / (y_all[0].value + np.finfo(np.float64).eps) * fitp[i], list(y_all) + loc_priors, man_grad=list(deriv_y[i]))) -487 -488 output.fit_parameters = result +478 jac_jac_y = hessian(chisqfunc_compact)(np.concatenate((fitp, y_f, p_f))) +479 +480 # Compute hess^{-1} @ jac_jac_y[:n_parms + m, n_parms + m:] using LAPACK dgesv +481 try: +482 deriv_y = -scipy.linalg.solve(hess, jac_jac_y[:n_parms, n_parms:]) +483 except np.linalg.LinAlgError: +484 raise Exception("Cannot invert hessian matrix.") +485 +486 result = [] +487 for i in range(n_parms): +488 result.append(derived_observable(lambda x_all, **kwargs: (x_all[0] + np.finfo(np.float64).eps) / (y_all[0].value + np.finfo(np.float64).eps) * fitp[i], list(y_all) + loc_priors, man_grad=list(deriv_y[i]))) 489 -490 # Hotelling t-squared p-value for correlated fits. -491 if kwargs.get('correlated_fit') is True: -492 n_cov = np.min(np.vectorize(lambda x_all: x_all.N)(y_all)) -493 output.t2_p_value = 1 - scipy.stats.f.cdf((n_cov - output.dof) / (output.dof * (n_cov - 1)) * output.chisquare, -494 output.dof, n_cov - output.dof) -495 -496 if kwargs.get('resplot') is True: -497 for key in key_ls: -498 residual_plot(xd[key], yd[key], funcd[key], result, title=key) -499 -500 if kwargs.get('qqplot') is True: -501 for key in key_ls: -502 qqplot(xd[key], yd[key], funcd[key], result, title=key) -503 -504 return output +490 output.fit_parameters = result +491 +492 # Hotelling t-squared p-value for correlated fits. +493 if kwargs.get('correlated_fit') is True: +494 n_cov = np.min(np.vectorize(lambda x_all: x_all.N)(y_all)) +495 output.t2_p_value = 1 - scipy.stats.f.cdf((n_cov - output.dof) / (output.dof * (n_cov - 1)) * output.chisquare, +496 output.dof, n_cov - output.dof) +497 +498 if kwargs.get('resplot') is True: +499 for key in key_ls: +500 residual_plot(xd[key], yd[key], funcd[key], result, title=key) +501 +502 if kwargs.get('qqplot') is True: +503 for key in key_ls: +504 qqplot(xd[key], yd[key], funcd[key], result, title=key) +505 +506 return output @@ -1762,208 +1766,208 @@ Parameters and information on the fitted result. -
507def total_least_squares(x, y, func, silent=False, **kwargs):
-508    r'''Performs a non-linear fit to y = func(x) and returns a list of Obs corresponding to the fit parameters.
-509
-510    Parameters
-511    ----------
-512    x : list
-513        list of Obs, or a tuple of lists of Obs
-514    y : list
-515        list of Obs. The dvalues of the Obs are used as x- and yerror for the fit.
-516    func : object
-517        func has to be of the form
-518
-519        ```python
-520        import autograd.numpy as anp
-521
-522        def func(a, x):
-523            return a[0] + a[1] * x + a[2] * anp.sinh(x)
-524        ```
-525
-526        For multiple x values func can be of the form
+            
509def total_least_squares(x, y, func, silent=False, **kwargs):
+510    r'''Performs a non-linear fit to y = func(x) and returns a list of Obs corresponding to the fit parameters.
+511
+512    Parameters
+513    ----------
+514    x : list
+515        list of Obs, or a tuple of lists of Obs
+516    y : list
+517        list of Obs. The dvalues of the Obs are used as x- and yerror for the fit.
+518    func : object
+519        func has to be of the form
+520
+521        ```python
+522        import autograd.numpy as anp
+523
+524        def func(a, x):
+525            return a[0] + a[1] * x + a[2] * anp.sinh(x)
+526        ```
 527
-528        ```python
-529        def func(a, x):
-530            (x1, x2) = x
-531            return a[0] * x1 ** 2 + a[1] * x2
-532        ```
-533
-534        It is important that all numpy functions refer to autograd.numpy, otherwise the differentiation
-535        will not work.
-536    silent : bool, optional
-537        If true all output to the console is omitted (default False).
-538    initial_guess : list
-539        can provide an initial guess for the input parameters. Relevant for non-linear
-540        fits with many parameters.
-541    expected_chisquare : bool
-542        If true prints the expected chisquare which is
-543        corrected by effects caused by correlated input data.
-544        This can take a while as the full correlation matrix
-545        has to be calculated (default False).
-546    num_grad : bool
-547        Use numerical differentation instead of automatic differentiation to perform the error propagation (default False).
-548
-549    Notes
-550    -----
-551    Based on the orthogonal distance regression module of scipy.
-552
-553    Returns
-554    -------
-555    output : Fit_result
-556        Parameters and information on the fitted result.
-557    '''
-558
-559    output = Fit_result()
+528        For multiple x values func can be of the form
+529
+530        ```python
+531        def func(a, x):
+532            (x1, x2) = x
+533            return a[0] * x1 ** 2 + a[1] * x2
+534        ```
+535
+536        It is important that all numpy functions refer to autograd.numpy, otherwise the differentiation
+537        will not work.
+538    silent : bool, optional
+539        If true all output to the console is omitted (default False).
+540    initial_guess : list
+541        can provide an initial guess for the input parameters. Relevant for non-linear
+542        fits with many parameters.
+543    expected_chisquare : bool
+544        If true prints the expected chisquare which is
+545        corrected by effects caused by correlated input data.
+546        This can take a while as the full correlation matrix
+547        has to be calculated (default False).
+548    num_grad : bool
+549        Use numerical differentation instead of automatic differentiation to perform the error propagation (default False).
+550
+551    Notes
+552    -----
+553    Based on the orthogonal distance regression module of scipy.
+554
+555    Returns
+556    -------
+557    output : Fit_result
+558        Parameters and information on the fitted result.
+559    '''
 560
-561    output.fit_function = func
+561    output = Fit_result()
 562
-563    x = np.array(x)
+563    output.fit_function = func
 564
-565    x_shape = x.shape
+565    x = np.array(x)
 566
-567    if kwargs.get('num_grad') is True:
-568        jacobian = num_jacobian
-569        hessian = num_hessian
-570    else:
-571        jacobian = auto_jacobian
-572        hessian = auto_hessian
-573
-574    if not callable(func):
-575        raise TypeError('func has to be a function.')
-576
-577    for i in range(42):
-578        try:
-579            func(np.arange(i), x.T[0])
-580        except TypeError:
-581            continue
-582        except IndexError:
+567    x_shape = x.shape
+568
+569    if kwargs.get('num_grad') is True:
+570        jacobian = num_jacobian
+571        hessian = num_hessian
+572    else:
+573        jacobian = auto_jacobian
+574        hessian = auto_hessian
+575
+576    if not callable(func):
+577        raise TypeError('func has to be a function.')
+578
+579    for i in range(42):
+580        try:
+581            func(np.arange(i), x.T[0])
+582        except TypeError:
 583            continue
-584        else:
-585            break
-586    else:
-587        raise RuntimeError("Fit function is not valid.")
-588
-589    n_parms = i
-590    if not silent:
-591        print('Fit with', n_parms, 'parameter' + 's' * (n_parms > 1))
-592
-593    x_f = np.vectorize(lambda o: o.value)(x)
-594    dx_f = np.vectorize(lambda o: o.dvalue)(x)
-595    y_f = np.array([o.value for o in y])
-596    dy_f = np.array([o.dvalue for o in y])
-597
-598    if np.any(np.asarray(dx_f) <= 0.0):
-599        raise Exception('No x errors available, run the gamma method first.')
-600
-601    if np.any(np.asarray(dy_f) <= 0.0):
-602        raise Exception('No y errors available, run the gamma method first.')
-603
-604    if 'initial_guess' in kwargs:
-605        x0 = kwargs.get('initial_guess')
-606        if len(x0) != n_parms:
-607            raise Exception('Initial guess does not have the correct length: %d vs. %d' % (len(x0), n_parms))
-608    else:
-609        x0 = [1] * n_parms
-610
-611    data = RealData(x_f, y_f, sx=dx_f, sy=dy_f)
-612    model = Model(func)
-613    odr = ODR(data, model, x0, partol=np.finfo(np.float64).eps)
-614    odr.set_job(fit_type=0, deriv=1)
-615    out = odr.run()
-616
-617    output.residual_variance = out.res_var
+584        except IndexError:
+585            continue
+586        else:
+587            break
+588    else:
+589        raise RuntimeError("Fit function is not valid.")
+590
+591    n_parms = i
+592    if not silent:
+593        print('Fit with', n_parms, 'parameter' + 's' * (n_parms > 1))
+594
+595    x_f = np.vectorize(lambda o: o.value)(x)
+596    dx_f = np.vectorize(lambda o: o.dvalue)(x)
+597    y_f = np.array([o.value for o in y])
+598    dy_f = np.array([o.dvalue for o in y])
+599
+600    if np.any(np.asarray(dx_f) <= 0.0):
+601        raise Exception('No x errors available, run the gamma method first.')
+602
+603    if np.any(np.asarray(dy_f) <= 0.0):
+604        raise Exception('No y errors available, run the gamma method first.')
+605
+606    if 'initial_guess' in kwargs:
+607        x0 = kwargs.get('initial_guess')
+608        if len(x0) != n_parms:
+609            raise Exception('Initial guess does not have the correct length: %d vs. %d' % (len(x0), n_parms))
+610    else:
+611        x0 = [1] * n_parms
+612
+613    data = RealData(x_f, y_f, sx=dx_f, sy=dy_f)
+614    model = Model(func)
+615    odr = ODR(data, model, x0, partol=np.finfo(np.float64).eps)
+616    odr.set_job(fit_type=0, deriv=1)
+617    out = odr.run()
 618
-619    output.method = 'ODR'
+619    output.residual_variance = out.res_var
 620
-621    output.message = out.stopreason
+621    output.method = 'ODR'
 622
-623    output.xplus = out.xplus
+623    output.message = out.stopreason
 624
-625    if not silent:
-626        print('Method: ODR')
-627        print(*out.stopreason)
-628        print('Residual variance:', output.residual_variance)
-629
-630    if out.info > 3:
-631        raise Exception('The minimization procedure did not converge.')
-632
-633    m = x_f.size
+625    output.xplus = out.xplus
+626
+627    if not silent:
+628        print('Method: ODR')
+629        print(*out.stopreason)
+630        print('Residual variance:', output.residual_variance)
+631
+632    if out.info > 3:
+633        raise Exception('The minimization procedure did not converge.')
 634
-635    def odr_chisquare(p):
-636        model = func(p[:n_parms], p[n_parms:].reshape(x_shape))
-637        chisq = anp.sum(((y_f - model) / dy_f) ** 2) + anp.sum(((x_f - p[n_parms:].reshape(x_shape)) / dx_f) ** 2)
-638        return chisq
-639
-640    if kwargs.get('expected_chisquare') is True:
-641        W = np.diag(1 / np.asarray(np.concatenate((dy_f.ravel(), dx_f.ravel()))))
-642
-643        if kwargs.get('covariance') is not None:
-644            cov = kwargs.get('covariance')
-645        else:
-646            cov = covariance(np.concatenate((y, x.ravel())))
-647
-648        number_of_x_parameters = int(m / x_f.shape[-1])
+635    m = x_f.size
+636
+637    def odr_chisquare(p):
+638        model = func(p[:n_parms], p[n_parms:].reshape(x_shape))
+639        chisq = anp.sum(((y_f - model) / dy_f) ** 2) + anp.sum(((x_f - p[n_parms:].reshape(x_shape)) / dx_f) ** 2)
+640        return chisq
+641
+642    if kwargs.get('expected_chisquare') is True:
+643        W = np.diag(1 / np.asarray(np.concatenate((dy_f.ravel(), dx_f.ravel()))))
+644
+645        if kwargs.get('covariance') is not None:
+646            cov = kwargs.get('covariance')
+647        else:
+648            cov = covariance(np.concatenate((y, x.ravel())))
 649
-650        old_jac = jacobian(func)(out.beta, out.xplus)
-651        fused_row1 = np.concatenate((old_jac, np.concatenate((number_of_x_parameters * [np.zeros(old_jac.shape)]), axis=0)))
-652        fused_row2 = np.concatenate((jacobian(lambda x, y: func(y, x))(out.xplus, out.beta).reshape(x_f.shape[-1], x_f.shape[-1] * number_of_x_parameters), np.identity(number_of_x_parameters * old_jac.shape[0])))
-653        new_jac = np.concatenate((fused_row1, fused_row2), axis=1)
-654
-655        A = W @ new_jac
-656        P_phi = A @ np.linalg.pinv(A.T @ A) @ A.T
-657        expected_chisquare = np.trace((np.identity(P_phi.shape[0]) - P_phi) @ W @ cov @ W)
-658        if expected_chisquare <= 0.0:
-659            warnings.warn("Negative expected_chisquare.", RuntimeWarning)
-660            expected_chisquare = np.abs(expected_chisquare)
-661        output.chisquare_by_expected_chisquare = odr_chisquare(np.concatenate((out.beta, out.xplus.ravel()))) / expected_chisquare
-662        if not silent:
-663            print('chisquare/expected_chisquare:',
-664                  output.chisquare_by_expected_chisquare)
-665
-666    fitp = out.beta
-667    try:
-668        hess = hessian(odr_chisquare)(np.concatenate((fitp, out.xplus.ravel())))
-669    except TypeError:
-670        raise Exception("It is required to use autograd.numpy instead of numpy within fit functions, see the documentation for details.") from None
-671
-672    def odr_chisquare_compact_x(d):
-673        model = func(d[:n_parms], d[n_parms:n_parms + m].reshape(x_shape))
-674        chisq = anp.sum(((y_f - model) / dy_f) ** 2) + anp.sum(((d[n_parms + m:].reshape(x_shape) - d[n_parms:n_parms + m].reshape(x_shape)) / dx_f) ** 2)
-675        return chisq
-676
-677    jac_jac_x = hessian(odr_chisquare_compact_x)(np.concatenate((fitp, out.xplus.ravel(), x_f.ravel())))
+650        number_of_x_parameters = int(m / x_f.shape[-1])
+651
+652        old_jac = jacobian(func)(out.beta, out.xplus)
+653        fused_row1 = np.concatenate((old_jac, np.concatenate((number_of_x_parameters * [np.zeros(old_jac.shape)]), axis=0)))
+654        fused_row2 = np.concatenate((jacobian(lambda x, y: func(y, x))(out.xplus, out.beta).reshape(x_f.shape[-1], x_f.shape[-1] * number_of_x_parameters), np.identity(number_of_x_parameters * old_jac.shape[0])))
+655        new_jac = np.concatenate((fused_row1, fused_row2), axis=1)
+656
+657        A = W @ new_jac
+658        P_phi = A @ np.linalg.pinv(A.T @ A) @ A.T
+659        expected_chisquare = np.trace((np.identity(P_phi.shape[0]) - P_phi) @ W @ cov @ W)
+660        if expected_chisquare <= 0.0:
+661            warnings.warn("Negative expected_chisquare.", RuntimeWarning)
+662            expected_chisquare = np.abs(expected_chisquare)
+663        output.chisquare_by_expected_chisquare = odr_chisquare(np.concatenate((out.beta, out.xplus.ravel()))) / expected_chisquare
+664        if not silent:
+665            print('chisquare/expected_chisquare:',
+666                  output.chisquare_by_expected_chisquare)
+667
+668    fitp = out.beta
+669    try:
+670        hess = hessian(odr_chisquare)(np.concatenate((fitp, out.xplus.ravel())))
+671    except TypeError:
+672        raise Exception("It is required to use autograd.numpy instead of numpy within fit functions, see the documentation for details.") from None
+673
+674    def odr_chisquare_compact_x(d):
+675        model = func(d[:n_parms], d[n_parms:n_parms + m].reshape(x_shape))
+676        chisq = anp.sum(((y_f - model) / dy_f) ** 2) + anp.sum(((d[n_parms + m:].reshape(x_shape) - d[n_parms:n_parms + m].reshape(x_shape)) / dx_f) ** 2)
+677        return chisq
 678
-679    # Compute hess^{-1} @ jac_jac_x[:n_parms + m, n_parms + m:] using LAPACK dgesv
-680    try:
-681        deriv_x = -scipy.linalg.solve(hess, jac_jac_x[:n_parms + m, n_parms + m:])
-682    except np.linalg.LinAlgError:
-683        raise Exception("Cannot invert hessian matrix.")
-684
-685    def odr_chisquare_compact_y(d):
-686        model = func(d[:n_parms], d[n_parms:n_parms + m].reshape(x_shape))
-687        chisq = anp.sum(((d[n_parms + m:] - model) / dy_f) ** 2) + anp.sum(((x_f - d[n_parms:n_parms + m].reshape(x_shape)) / dx_f) ** 2)
-688        return chisq
-689
-690    jac_jac_y = hessian(odr_chisquare_compact_y)(np.concatenate((fitp, out.xplus.ravel(), y_f)))
+679    jac_jac_x = hessian(odr_chisquare_compact_x)(np.concatenate((fitp, out.xplus.ravel(), x_f.ravel())))
+680
+681    # Compute hess^{-1} @ jac_jac_x[:n_parms + m, n_parms + m:] using LAPACK dgesv
+682    try:
+683        deriv_x = -scipy.linalg.solve(hess, jac_jac_x[:n_parms + m, n_parms + m:])
+684    except np.linalg.LinAlgError:
+685        raise Exception("Cannot invert hessian matrix.")
+686
+687    def odr_chisquare_compact_y(d):
+688        model = func(d[:n_parms], d[n_parms:n_parms + m].reshape(x_shape))
+689        chisq = anp.sum(((d[n_parms + m:] - model) / dy_f) ** 2) + anp.sum(((x_f - d[n_parms:n_parms + m].reshape(x_shape)) / dx_f) ** 2)
+690        return chisq
 691
-692    # Compute hess^{-1} @ jac_jac_y[:n_parms + m, n_parms + m:] using LAPACK dgesv
-693    try:
-694        deriv_y = -scipy.linalg.solve(hess, jac_jac_y[:n_parms + m, n_parms + m:])
-695    except np.linalg.LinAlgError:
-696        raise Exception("Cannot invert hessian matrix.")
-697
-698    result = []
-699    for i in range(n_parms):
-700        result.append(derived_observable(lambda my_var, **kwargs: (my_var[0] + np.finfo(np.float64).eps) / (x.ravel()[0].value + np.finfo(np.float64).eps) * out.beta[i], list(x.ravel()) + list(y), man_grad=list(deriv_x[i]) + list(deriv_y[i])))
-701
-702    output.fit_parameters = result
+692    jac_jac_y = hessian(odr_chisquare_compact_y)(np.concatenate((fitp, out.xplus.ravel(), y_f)))
+693
+694    # Compute hess^{-1} @ jac_jac_y[:n_parms + m, n_parms + m:] using LAPACK dgesv
+695    try:
+696        deriv_y = -scipy.linalg.solve(hess, jac_jac_y[:n_parms + m, n_parms + m:])
+697    except np.linalg.LinAlgError:
+698        raise Exception("Cannot invert hessian matrix.")
+699
+700    result = []
+701    for i in range(n_parms):
+702        result.append(derived_observable(lambda my_var, **kwargs: (my_var[0] + np.finfo(np.float64).eps) / (x.ravel()[0].value + np.finfo(np.float64).eps) * out.beta[i], list(x.ravel()) + list(y), man_grad=list(deriv_x[i]) + list(deriv_y[i])))
 703
-704    output.odr_chisquare = odr_chisquare(np.concatenate((out.beta, out.xplus.ravel())))
-705    output.dof = x.shape[-1] - n_parms
-706    output.p_value = 1 - scipy.stats.chi2.cdf(output.odr_chisquare, output.dof)
-707
-708    return output
+704    output.fit_parameters = result
+705
+706    output.odr_chisquare = odr_chisquare(np.concatenate((out.beta, out.xplus.ravel())))
+707    output.dof = x.shape[-1] - n_parms
+708    output.p_value = 1 - scipy.stats.chi2.cdf(output.odr_chisquare, output.dof)
+709
+710    return output
 
@@ -2037,35 +2041,35 @@ Parameters and information on the fitted result.
-
711def fit_lin(x, y, **kwargs):
-712    """Performs a linear fit to y = n + m * x and returns two Obs n, m.
-713
-714    Parameters
-715    ----------
-716    x : list
-717        Can either be a list of floats in which case no xerror is assumed, or
-718        a list of Obs, where the dvalues of the Obs are used as xerror for the fit.
-719    y : list
-720        List of Obs, the dvalues of the Obs are used as yerror for the fit.
-721
-722    Returns
-723    -------
-724    fit_parameters : list[Obs]
-725        LIist of fitted observables.
-726    """
-727
-728    def f(a, x):
-729        y = a[0] + a[1] * x
-730        return y
-731
-732    if all(isinstance(n, Obs) for n in x):
-733        out = total_least_squares(x, y, f, **kwargs)
-734        return out.fit_parameters
-735    elif all(isinstance(n, float) or isinstance(n, int) for n in x) or isinstance(x, np.ndarray):
-736        out = least_squares(x, y, f, **kwargs)
-737        return out.fit_parameters
-738    else:
-739        raise TypeError('Unsupported types for x')
+            
713def fit_lin(x, y, **kwargs):
+714    """Performs a linear fit to y = n + m * x and returns two Obs n, m.
+715
+716    Parameters
+717    ----------
+718    x : list
+719        Can either be a list of floats in which case no xerror is assumed, or
+720        a list of Obs, where the dvalues of the Obs are used as xerror for the fit.
+721    y : list
+722        List of Obs, the dvalues of the Obs are used as yerror for the fit.
+723
+724    Returns
+725    -------
+726    fit_parameters : list[Obs]
+727        LIist of fitted observables.
+728    """
+729
+730    def f(a, x):
+731        y = a[0] + a[1] * x
+732        return y
+733
+734    if all(isinstance(n, Obs) for n in x):
+735        out = total_least_squares(x, y, f, **kwargs)
+736        return out.fit_parameters
+737    elif all(isinstance(n, float) or isinstance(n, int) for n in x) or isinstance(x, np.ndarray):
+738        out = least_squares(x, y, f, **kwargs)
+739        return out.fit_parameters
+740    else:
+741        raise TypeError('Unsupported types for x')
 
@@ -2102,34 +2106,34 @@ LIist of fitted observables.
-
742def qqplot(x, o_y, func, p, title=""):
-743    """Generates a quantile-quantile plot of the fit result which can be used to
-744       check if the residuals of the fit are gaussian distributed.
-745
-746    Returns
-747    -------
-748    None
-749    """
-750
-751    residuals = []
-752    for i_x, i_y in zip(x, o_y):
-753        residuals.append((i_y - func(p, i_x)) / i_y.dvalue)
-754    residuals = sorted(residuals)
-755    my_y = [o.value for o in residuals]
-756    probplot = scipy.stats.probplot(my_y)
-757    my_x = probplot[0][0]
-758    plt.figure(figsize=(8, 8 / 1.618))
-759    plt.errorbar(my_x, my_y, fmt='o')
-760    fit_start = my_x[0]
-761    fit_stop = my_x[-1]
-762    samples = np.arange(fit_start, fit_stop, 0.01)
-763    plt.plot(samples, samples, 'k--', zorder=11, label='Standard normal distribution')
-764    plt.plot(samples, probplot[1][0] * samples + probplot[1][1], zorder=10, label='Least squares fit, r=' + str(np.around(probplot[1][2], 3)), marker='', ls='-')
-765
-766    plt.xlabel('Theoretical quantiles')
-767    plt.ylabel('Ordered Values')
-768    plt.legend(title=title)
-769    plt.draw()
+            
744def qqplot(x, o_y, func, p, title=""):
+745    """Generates a quantile-quantile plot of the fit result which can be used to
+746       check if the residuals of the fit are gaussian distributed.
+747
+748    Returns
+749    -------
+750    None
+751    """
+752
+753    residuals = []
+754    for i_x, i_y in zip(x, o_y):
+755        residuals.append((i_y - func(p, i_x)) / i_y.dvalue)
+756    residuals = sorted(residuals)
+757    my_y = [o.value for o in residuals]
+758    probplot = scipy.stats.probplot(my_y)
+759    my_x = probplot[0][0]
+760    plt.figure(figsize=(8, 8 / 1.618))
+761    plt.errorbar(my_x, my_y, fmt='o')
+762    fit_start = my_x[0]
+763    fit_stop = my_x[-1]
+764    samples = np.arange(fit_start, fit_stop, 0.01)
+765    plt.plot(samples, samples, 'k--', zorder=11, label='Standard normal distribution')
+766    plt.plot(samples, probplot[1][0] * samples + probplot[1][1], zorder=10, label='Least squares fit, r=' + str(np.around(probplot[1][2], 3)), marker='', ls='-')
+767
+768    plt.xlabel('Theoretical quantiles')
+769    plt.ylabel('Ordered Values')
+770    plt.legend(title=title)
+771    plt.draw()
 
@@ -2156,41 +2160,41 @@ LIist of fitted observables.
-
772def residual_plot(x, y, func, fit_res, title=""):
-773    """Generates a plot which compares the fit to the data and displays the corresponding residuals
-774
-775    For uncorrelated data the residuals are expected to be distributed ~N(0,1).
+            
774def residual_plot(x, y, func, fit_res, title=""):
+775    """Generates a plot which compares the fit to the data and displays the corresponding residuals
 776
-777    Returns
-778    -------
-779    None
-780    """
-781    sorted_x = sorted(x)
-782    xstart = sorted_x[0] - 0.5 * (sorted_x[1] - sorted_x[0])
-783    xstop = sorted_x[-1] + 0.5 * (sorted_x[-1] - sorted_x[-2])
-784    x_samples = np.arange(xstart, xstop + 0.01, 0.01)
-785
-786    plt.figure(figsize=(8, 8 / 1.618))
-787    gs = gridspec.GridSpec(2, 1, height_ratios=[3, 1], wspace=0.0, hspace=0.0)
-788    ax0 = plt.subplot(gs[0])
-789    ax0.errorbar(x, [o.value for o in y], yerr=[o.dvalue for o in y], ls='none', fmt='o', capsize=3, markersize=5, label='Data')
-790    ax0.plot(x_samples, func([o.value for o in fit_res], x_samples), label='Fit', zorder=10, ls='-', ms=0)
-791    ax0.set_xticklabels([])
-792    ax0.set_xlim([xstart, xstop])
+777    For uncorrelated data the residuals are expected to be distributed ~N(0,1).
+778
+779    Returns
+780    -------
+781    None
+782    """
+783    sorted_x = sorted(x)
+784    xstart = sorted_x[0] - 0.5 * (sorted_x[1] - sorted_x[0])
+785    xstop = sorted_x[-1] + 0.5 * (sorted_x[-1] - sorted_x[-2])
+786    x_samples = np.arange(xstart, xstop + 0.01, 0.01)
+787
+788    plt.figure(figsize=(8, 8 / 1.618))
+789    gs = gridspec.GridSpec(2, 1, height_ratios=[3, 1], wspace=0.0, hspace=0.0)
+790    ax0 = plt.subplot(gs[0])
+791    ax0.errorbar(x, [o.value for o in y], yerr=[o.dvalue for o in y], ls='none', fmt='o', capsize=3, markersize=5, label='Data')
+792    ax0.plot(x_samples, func([o.value for o in fit_res], x_samples), label='Fit', zorder=10, ls='-', ms=0)
 793    ax0.set_xticklabels([])
-794    ax0.legend(title=title)
-795
-796    residuals = (np.asarray([o.value for o in y]) - func([o.value for o in fit_res], np.asarray(x))) / np.asarray([o.dvalue for o in y])
-797    ax1 = plt.subplot(gs[1])
-798    ax1.plot(x, residuals, 'ko', ls='none', markersize=5)
-799    ax1.tick_params(direction='out')
-800    ax1.tick_params(axis="x", bottom=True, top=True, labelbottom=True)
-801    ax1.axhline(y=0.0, ls='--', color='k', marker=" ")
-802    ax1.fill_between(x_samples, -1.0, 1.0, alpha=0.1, facecolor='k')
-803    ax1.set_xlim([xstart, xstop])
-804    ax1.set_ylabel('Residuals')
-805    plt.subplots_adjust(wspace=None, hspace=None)
-806    plt.draw()
+794    ax0.set_xlim([xstart, xstop])
+795    ax0.set_xticklabels([])
+796    ax0.legend(title=title)
+797
+798    residuals = (np.asarray([o.value for o in y]) - func([o.value for o in fit_res], np.asarray(x))) / np.asarray([o.dvalue for o in y])
+799    ax1 = plt.subplot(gs[1])
+800    ax1.plot(x, residuals, 'ko', ls='none', markersize=5)
+801    ax1.tick_params(direction='out')
+802    ax1.tick_params(axis="x", bottom=True, top=True, labelbottom=True)
+803    ax1.axhline(y=0.0, ls='--', color='k', marker=" ")
+804    ax1.fill_between(x_samples, -1.0, 1.0, alpha=0.1, facecolor='k')
+805    ax1.set_xlim([xstart, xstop])
+806    ax1.set_ylabel('Residuals')
+807    plt.subplots_adjust(wspace=None, hspace=None)
+808    plt.draw()
 
@@ -2218,28 +2222,28 @@ LIist of fitted observables.
-
809def error_band(x, func, beta):
-810    """Calculate the error band for an array of sample values x, for given fit function func with optimized parameters beta.
-811
-812    Returns
-813    -------
-814    err : np.array(Obs)
-815        Error band for an array of sample values x
-816    """
-817    cov = covariance(beta)
-818    if np.any(np.abs(cov - cov.T) > 1000 * np.finfo(np.float64).eps):
-819        warnings.warn("Covariance matrix is not symmetric within floating point precision", RuntimeWarning)
-820
-821    deriv = []
-822    for i, item in enumerate(x):
-823        deriv.append(np.array(egrad(func)([o.value for o in beta], item)))
-824
-825    err = []
-826    for i, item in enumerate(x):
-827        err.append(np.sqrt(deriv[i] @ cov @ deriv[i]))
-828    err = np.array(err)
-829
-830    return err
+            
811def error_band(x, func, beta):
+812    """Calculate the error band for an array of sample values x, for given fit function func with optimized parameters beta.
+813
+814    Returns
+815    -------
+816    err : np.array(Obs)
+817        Error band for an array of sample values x
+818    """
+819    cov = covariance(beta)
+820    if np.any(np.abs(cov - cov.T) > 1000 * np.finfo(np.float64).eps):
+821        warnings.warn("Covariance matrix is not symmetric within floating point precision", RuntimeWarning)
+822
+823    deriv = []
+824    for i, item in enumerate(x):
+825        deriv.append(np.array(egrad(func)([o.value for o in beta], item)))
+826
+827    err = []
+828    for i, item in enumerate(x):
+829        err.append(np.sqrt(deriv[i] @ cov @ deriv[i]))
+830    err = np.array(err)
+831
+832    return err
 
@@ -2266,48 +2270,48 @@ Error band for an array of sample values x
-
833def ks_test(objects=None):
-834    """Performs a Kolmogorov–Smirnov test for the p-values of all fit object.
-835
-836    Parameters
-837    ----------
-838    objects : list
-839        List of fit results to include in the analysis (optional).
-840
-841    Returns
-842    -------
-843    None
-844    """
-845
-846    if objects is None:
-847        obs_list = []
-848        for obj in gc.get_objects():
-849            if isinstance(obj, Fit_result):
-850                obs_list.append(obj)
-851    else:
-852        obs_list = objects
-853
-854    p_values = [o.p_value for o in obs_list]
+            
835def ks_test(objects=None):
+836    """Performs a Kolmogorov–Smirnov test for the p-values of all fit object.
+837
+838    Parameters
+839    ----------
+840    objects : list
+841        List of fit results to include in the analysis (optional).
+842
+843    Returns
+844    -------
+845    None
+846    """
+847
+848    if objects is None:
+849        obs_list = []
+850        for obj in gc.get_objects():
+851            if isinstance(obj, Fit_result):
+852                obs_list.append(obj)
+853    else:
+854        obs_list = objects
 855
-856    bins = len(p_values)
-857    x = np.arange(0, 1.001, 0.001)
-858    plt.plot(x, x, 'k', zorder=1)
-859    plt.xlim(0, 1)
-860    plt.ylim(0, 1)
-861    plt.xlabel('p-value')
-862    plt.ylabel('Cumulative probability')
-863    plt.title(str(bins) + ' p-values')
-864
-865    n = np.arange(1, bins + 1) / np.float64(bins)
-866    Xs = np.sort(p_values)
-867    plt.step(Xs, n)
-868    diffs = n - Xs
-869    loc_max_diff = np.argmax(np.abs(diffs))
-870    loc = Xs[loc_max_diff]
-871    plt.annotate('', xy=(loc, loc), xytext=(loc, loc + diffs[loc_max_diff]), arrowprops=dict(arrowstyle='<->', shrinkA=0, shrinkB=0))
-872    plt.draw()
-873
-874    print(scipy.stats.kstest(p_values, 'uniform'))
+856    p_values = [o.p_value for o in obs_list]
+857
+858    bins = len(p_values)
+859    x = np.arange(0, 1.001, 0.001)
+860    plt.plot(x, x, 'k', zorder=1)
+861    plt.xlim(0, 1)
+862    plt.ylim(0, 1)
+863    plt.xlabel('p-value')
+864    plt.ylabel('Cumulative probability')
+865    plt.title(str(bins) + ' p-values')
+866
+867    n = np.arange(1, bins + 1) / np.float64(bins)
+868    Xs = np.sort(p_values)
+869    plt.step(Xs, n)
+870    diffs = n - Xs
+871    loc_max_diff = np.argmax(np.abs(diffs))
+872    loc = Xs[loc_max_diff]
+873    plt.annotate('', xy=(loc, loc), xytext=(loc, loc + diffs[loc_max_diff]), arrowprops=dict(arrowstyle='<->', shrinkA=0, shrinkB=0))
+874    plt.draw()
+875
+876    print(scipy.stats.kstest(p_values, 'uniform'))