pyerrors.fits

   1import gc
   2from collections.abc import Sequence
   3import warnings
   4import numpy as np
   5import autograd.numpy as anp
   6import scipy.optimize
   7import scipy.stats
   8import matplotlib.pyplot as plt
   9from matplotlib import gridspec
  10from scipy.odr import ODR, Model, RealData
  11import iminuit
  12from autograd import jacobian as auto_jacobian
  13from autograd import hessian as auto_hessian
  14from autograd import elementwise_grad as egrad
  15from numdifftools import Jacobian as num_jacobian
  16from numdifftools import Hessian as num_hessian
  17from .obs import Obs, derived_observable, covariance, cov_Obs
  18
  19
  20class Fit_result(Sequence):
  21    """Represents fit results.
  22
  23    Attributes
  24    ----------
  25    fit_parameters : list
  26        results for the individual fit parameters,
  27        also accessible via indices.
  28    chisquare_by_dof : float
  29        reduced chisquare.
  30    p_value : float
  31        p-value of the fit
  32    t2_p_value : float
  33        Hotelling t-squared p-value for correlated fits.
  34    """
  35
  36    def __init__(self):
  37        self.fit_parameters = None
  38
  39    def __getitem__(self, idx):
  40        return self.fit_parameters[idx]
  41
  42    def __len__(self):
  43        return len(self.fit_parameters)
  44
  45    def gamma_method(self, **kwargs):
  46        """Apply the gamma method to all fit parameters"""
  47        [o.gamma_method(**kwargs) for o in self.fit_parameters]
  48
  49    gm = gamma_method
  50
  51    def __str__(self):
  52        my_str = 'Goodness of fit:\n'
  53        if hasattr(self, 'chisquare_by_dof'):
  54            my_str += '\u03C7\u00b2/d.o.f. = ' + f'{self.chisquare_by_dof:2.6f}' + '\n'
  55        elif hasattr(self, 'residual_variance'):
  56            my_str += 'residual variance = ' + f'{self.residual_variance:2.6f}' + '\n'
  57        if hasattr(self, 'chisquare_by_expected_chisquare'):
  58            my_str += '\u03C7\u00b2/\u03C7\u00b2exp  = ' + f'{self.chisquare_by_expected_chisquare:2.6f}' + '\n'
  59        if hasattr(self, 'p_value'):
  60            my_str += 'p-value   = ' + f'{self.p_value:2.4f}' + '\n'
  61        if hasattr(self, 't2_p_value'):
  62            my_str += 't\u00B2p-value = ' + f'{self.t2_p_value:2.4f}' + '\n'
  63        my_str += 'Fit parameters:\n'
  64        for i_par, par in enumerate(self.fit_parameters):
  65            my_str += str(i_par) + '\t' + ' ' * int(par >= 0) + str(par).rjust(int(par < 0.0)) + '\n'
  66        return my_str
  67
  68    def __repr__(self):
  69        m = max(map(len, list(self.__dict__.keys()))) + 1
  70        return '\n'.join([key.rjust(m) + ': ' + repr(value) for key, value in sorted(self.__dict__.items())])
  71
  72
  73def least_squares(x, y, func, priors=None, silent=False, **kwargs):
  74    r'''Performs a non-linear fit to y = func(x).
  75        ```
  76
  77    Parameters
  78    ----------
  79    For an uncombined fit:
  80
  81    x : list
  82        list of floats.
  83    y : list
  84        list of Obs.
  85    func : object
  86        fit function, has to be of the form
  87
  88        ```python
  89        import autograd.numpy as anp
  90
  91        def func(a, x):
  92            return a[0] + a[1] * x + a[2] * anp.sinh(x)
  93        ```
  94
  95        For multiple x values func can be of the form
  96
  97        ```python
  98        def func(a, x):
  99            (x1, x2) = x
 100            return a[0] * x1 ** 2 + a[1] * x2
 101        ```
 102        It is important that all numpy functions refer to autograd.numpy, otherwise the differentiation
 103        will not work.
 104
 105    OR For a combined fit:
 106
 107    x : dict
 108        dict of lists.
 109    y : dict
 110        dict of lists of Obs.
 111    funcs : dict
 112        dict of objects
 113        fit functions have to be of the form (here a[0] is the common fit parameter)
 114        ```python
 115        import autograd.numpy as anp
 116        funcs = {"a": func_a,
 117                "b": func_b}
 118
 119        def func_a(a, x):
 120            return a[1] * anp.exp(-a[0] * x)
 121
 122        def func_b(a, x):
 123            return a[2] * anp.exp(-a[0] * x)
 124
 125        It is important that all numpy functions refer to autograd.numpy, otherwise the differentiation
 126        will not work.
 127
 128    priors : list, optional
 129        priors has to be a list with an entry for every parameter in the fit. The entries can either be
 130        Obs (e.g. results from a previous fit) or strings containing a value and an error formatted like
 131        0.548(23), 500(40) or 0.5(0.4)
 132    silent : bool, optional
 133        If true all output to the console is omitted (default False).
 134    initial_guess : list
 135        can provide an initial guess for the input parameters. Relevant for
 136        non-linear fits with many parameters. In case of correlated fits the guess is used to perform
 137        an uncorrelated fit which then serves as guess for the correlated fit.
 138    method : str, optional
 139        can be used to choose an alternative method for the minimization of chisquare.
 140        The possible methods are the ones which can be used for scipy.optimize.minimize and
 141        migrad of iminuit. If no method is specified, Levenberg-Marquard is used.
 142        Reliable alternatives are migrad, Powell and Nelder-Mead.
 143    tol: float, optional
 144        can be used (only for combined fits and methods other than Levenberg-Marquard) to set the tolerance for convergence
 145        to a different value to either speed up convergence at the cost of a larger error on the fitted parameters (and possibly
 146        invalid estimates for parameter uncertainties) or smaller values to get more accurate parameter values
 147        The stopping criterion depends on the method, e.g. migrad: edm_max = 0.002 * tol * errordef (EDM criterion: edm < edm_max)
 148    correlated_fit : bool
 149        If True, use the full inverse covariance matrix in the definition of the chisquare cost function.
 150        For details about how the covariance matrix is estimated see `pyerrors.obs.covariance`.
 151        In practice the correlation matrix is Cholesky decomposed and inverted (instead of the covariance matrix).
 152        This procedure should be numerically more stable as the correlation matrix is typically better conditioned (Jacobi preconditioning).
 153        At the moment this option only works for `prior==None` and when no `method` is given.
 154    expected_chisquare : bool
 155        If True estimates the expected chisquare which is
 156        corrected by effects caused by correlated input data (default False).
 157    resplot : bool
 158        If True, a plot which displays fit, data and residuals is generated (default False).
 159    qqplot : bool
 160        If True, a quantile-quantile plot of the fit result is generated (default False).
 161    num_grad : bool
 162        Use numerical differentation instead of automatic differentiation to perform the error propagation (default False).
 163
 164    Returns
 165    -------
 166    output : Fit_result
 167        Parameters and information on the fitted result.
 168    '''
 169    if priors is not None:
 170        return _prior_fit(x, y, func, priors, silent=silent, **kwargs)
 171
 172    elif (type(x) == dict and type(y) == dict and type(func) == dict):
 173        return _combined_fit(x, y, func, silent=silent, **kwargs)
 174    elif (type(x) == dict or type(y) == dict or type(func) == dict):
 175        raise TypeError("All arguments have to be dictionaries in order to perform a combined fit.")
 176    else:
 177        return _standard_fit(x, y, func, silent=silent, **kwargs)
 178
 179
 180def total_least_squares(x, y, func, silent=False, **kwargs):
 181    r'''Performs a non-linear fit to y = func(x) and returns a list of Obs corresponding to the fit parameters.
 182
 183    Parameters
 184    ----------
 185    x : list
 186        list of Obs, or a tuple of lists of Obs
 187    y : list
 188        list of Obs. The dvalues of the Obs are used as x- and yerror for the fit.
 189    func : object
 190        func has to be of the form
 191
 192        ```python
 193        import autograd.numpy as anp
 194
 195        def func(a, x):
 196            return a[0] + a[1] * x + a[2] * anp.sinh(x)
 197        ```
 198
 199        For multiple x values func can be of the form
 200
 201        ```python
 202        def func(a, x):
 203            (x1, x2) = x
 204            return a[0] * x1 ** 2 + a[1] * x2
 205        ```
 206
 207        It is important that all numpy functions refer to autograd.numpy, otherwise the differentiation
 208        will not work.
 209    silent : bool, optional
 210        If true all output to the console is omitted (default False).
 211    initial_guess : list
 212        can provide an initial guess for the input parameters. Relevant for non-linear
 213        fits with many parameters.
 214    expected_chisquare : bool
 215        If true prints the expected chisquare which is
 216        corrected by effects caused by correlated input data.
 217        This can take a while as the full correlation matrix
 218        has to be calculated (default False).
 219    num_grad : bool
 220        Use numerical differentation instead of automatic differentiation to perform the error propagation (default False).
 221
 222    Notes
 223    -----
 224    Based on the orthogonal distance regression module of scipy.
 225
 226    Returns
 227    -------
 228    output : Fit_result
 229        Parameters and information on the fitted result.
 230    '''
 231
 232    output = Fit_result()
 233
 234    output.fit_function = func
 235
 236    x = np.array(x)
 237
 238    x_shape = x.shape
 239
 240    if kwargs.get('num_grad') is True:
 241        jacobian = num_jacobian
 242        hessian = num_hessian
 243    else:
 244        jacobian = auto_jacobian
 245        hessian = auto_hessian
 246
 247    if not callable(func):
 248        raise TypeError('func has to be a function.')
 249
 250    for i in range(42):
 251        try:
 252            func(np.arange(i), x.T[0])
 253        except TypeError:
 254            continue
 255        except IndexError:
 256            continue
 257        else:
 258            break
 259    else:
 260        raise RuntimeError("Fit function is not valid.")
 261
 262    n_parms = i
 263    if not silent:
 264        print('Fit with', n_parms, 'parameter' + 's' * (n_parms > 1))
 265
 266    x_f = np.vectorize(lambda o: o.value)(x)
 267    dx_f = np.vectorize(lambda o: o.dvalue)(x)
 268    y_f = np.array([o.value for o in y])
 269    dy_f = np.array([o.dvalue for o in y])
 270
 271    if np.any(np.asarray(dx_f) <= 0.0):
 272        raise Exception('No x errors available, run the gamma method first.')
 273
 274    if np.any(np.asarray(dy_f) <= 0.0):
 275        raise Exception('No y errors available, run the gamma method first.')
 276
 277    if 'initial_guess' in kwargs:
 278        x0 = kwargs.get('initial_guess')
 279        if len(x0) != n_parms:
 280            raise Exception('Initial guess does not have the correct length: %d vs. %d' % (len(x0), n_parms))
 281    else:
 282        x0 = [1] * n_parms
 283
 284    data = RealData(x_f, y_f, sx=dx_f, sy=dy_f)
 285    model = Model(func)
 286    odr = ODR(data, model, x0, partol=np.finfo(np.float64).eps)
 287    odr.set_job(fit_type=0, deriv=1)
 288    out = odr.run()
 289
 290    output.residual_variance = out.res_var
 291
 292    output.method = 'ODR'
 293
 294    output.message = out.stopreason
 295
 296    output.xplus = out.xplus
 297
 298    if not silent:
 299        print('Method: ODR')
 300        print(*out.stopreason)
 301        print('Residual variance:', output.residual_variance)
 302
 303    if out.info > 3:
 304        raise Exception('The minimization procedure did not converge.')
 305
 306    m = x_f.size
 307
 308    def odr_chisquare(p):
 309        model = func(p[:n_parms], p[n_parms:].reshape(x_shape))
 310        chisq = anp.sum(((y_f - model) / dy_f) ** 2) + anp.sum(((x_f - p[n_parms:].reshape(x_shape)) / dx_f) ** 2)
 311        return chisq
 312
 313    if kwargs.get('expected_chisquare') is True:
 314        W = np.diag(1 / np.asarray(np.concatenate((dy_f.ravel(), dx_f.ravel()))))
 315
 316        if kwargs.get('covariance') is not None:
 317            cov = kwargs.get('covariance')
 318        else:
 319            cov = covariance(np.concatenate((y, x.ravel())))
 320
 321        number_of_x_parameters = int(m / x_f.shape[-1])
 322
 323        old_jac = jacobian(func)(out.beta, out.xplus)
 324        fused_row1 = np.concatenate((old_jac, np.concatenate((number_of_x_parameters * [np.zeros(old_jac.shape)]), axis=0)))
 325        fused_row2 = np.concatenate((jacobian(lambda x, y: func(y, x))(out.xplus, out.beta).reshape(x_f.shape[-1], x_f.shape[-1] * number_of_x_parameters), np.identity(number_of_x_parameters * old_jac.shape[0])))
 326        new_jac = np.concatenate((fused_row1, fused_row2), axis=1)
 327
 328        A = W @ new_jac
 329        P_phi = A @ np.linalg.pinv(A.T @ A) @ A.T
 330        expected_chisquare = np.trace((np.identity(P_phi.shape[0]) - P_phi) @ W @ cov @ W)
 331        if expected_chisquare <= 0.0:
 332            warnings.warn("Negative expected_chisquare.", RuntimeWarning)
 333            expected_chisquare = np.abs(expected_chisquare)
 334        output.chisquare_by_expected_chisquare = odr_chisquare(np.concatenate((out.beta, out.xplus.ravel()))) / expected_chisquare
 335        if not silent:
 336            print('chisquare/expected_chisquare:',
 337                  output.chisquare_by_expected_chisquare)
 338
 339    fitp = out.beta
 340    try:
 341        hess = hessian(odr_chisquare)(np.concatenate((fitp, out.xplus.ravel())))
 342    except TypeError:
 343        raise Exception("It is required to use autograd.numpy instead of numpy within fit functions, see the documentation for details.") from None
 344
 345    def odr_chisquare_compact_x(d):
 346        model = func(d[:n_parms], d[n_parms:n_parms + m].reshape(x_shape))
 347        chisq = anp.sum(((y_f - model) / dy_f) ** 2) + anp.sum(((d[n_parms + m:].reshape(x_shape) - d[n_parms:n_parms + m].reshape(x_shape)) / dx_f) ** 2)
 348        return chisq
 349
 350    jac_jac_x = hessian(odr_chisquare_compact_x)(np.concatenate((fitp, out.xplus.ravel(), x_f.ravel())))
 351
 352    # Compute hess^{-1} @ jac_jac_x[:n_parms + m, n_parms + m:] using LAPACK dgesv
 353    try:
 354        deriv_x = -scipy.linalg.solve(hess, jac_jac_x[:n_parms + m, n_parms + m:])
 355    except np.linalg.LinAlgError:
 356        raise Exception("Cannot invert hessian matrix.")
 357
 358    def odr_chisquare_compact_y(d):
 359        model = func(d[:n_parms], d[n_parms:n_parms + m].reshape(x_shape))
 360        chisq = anp.sum(((d[n_parms + m:] - model) / dy_f) ** 2) + anp.sum(((x_f - d[n_parms:n_parms + m].reshape(x_shape)) / dx_f) ** 2)
 361        return chisq
 362
 363    jac_jac_y = hessian(odr_chisquare_compact_y)(np.concatenate((fitp, out.xplus.ravel(), y_f)))
 364
 365    # Compute hess^{-1} @ jac_jac_y[:n_parms + m, n_parms + m:] using LAPACK dgesv
 366    try:
 367        deriv_y = -scipy.linalg.solve(hess, jac_jac_y[:n_parms + m, n_parms + m:])
 368    except np.linalg.LinAlgError:
 369        raise Exception("Cannot invert hessian matrix.")
 370
 371    result = []
 372    for i in range(n_parms):
 373        result.append(derived_observable(lambda my_var, **kwargs: (my_var[0] + np.finfo(np.float64).eps) / (x.ravel()[0].value + np.finfo(np.float64).eps) * out.beta[i], list(x.ravel()) + list(y), man_grad=list(deriv_x[i]) + list(deriv_y[i])))
 374
 375    output.fit_parameters = result
 376
 377    output.odr_chisquare = odr_chisquare(np.concatenate((out.beta, out.xplus.ravel())))
 378    output.dof = x.shape[-1] - n_parms
 379    output.p_value = 1 - scipy.stats.chi2.cdf(output.odr_chisquare, output.dof)
 380
 381    return output
 382
 383
 384def _prior_fit(x, y, func, priors, silent=False, **kwargs):
 385    output = Fit_result()
 386
 387    output.fit_function = func
 388
 389    x = np.asarray(x)
 390
 391    if kwargs.get('num_grad') is True:
 392        hessian = num_hessian
 393    else:
 394        hessian = auto_hessian
 395
 396    if not callable(func):
 397        raise TypeError('func has to be a function.')
 398
 399    for i in range(100):
 400        try:
 401            func(np.arange(i), 0)
 402        except TypeError:
 403            continue
 404        except IndexError:
 405            continue
 406        else:
 407            break
 408    else:
 409        raise RuntimeError("Fit function is not valid.")
 410
 411    n_parms = i
 412
 413    if n_parms != len(priors):
 414        raise Exception('Priors does not have the correct length.')
 415
 416    def extract_val_and_dval(string):
 417        split_string = string.split('(')
 418        if '.' in split_string[0] and '.' not in split_string[1][:-1]:
 419            factor = 10 ** -len(split_string[0].partition('.')[2])
 420        else:
 421            factor = 1
 422        return float(split_string[0]), float(split_string[1][:-1]) * factor
 423
 424    loc_priors = []
 425    for i_n, i_prior in enumerate(priors):
 426        if isinstance(i_prior, Obs):
 427            loc_priors.append(i_prior)
 428        else:
 429            loc_val, loc_dval = extract_val_and_dval(i_prior)
 430            loc_priors.append(cov_Obs(loc_val, loc_dval ** 2, '#prior' + str(i_n) + f"_{np.random.randint(2147483647):010d}"))
 431
 432    output.priors = loc_priors
 433
 434    if not silent:
 435        print('Fit with', n_parms, 'parameter' + 's' * (n_parms > 1))
 436
 437    y_f = [o.value for o in y]
 438    dy_f = [o.dvalue for o in y]
 439
 440    if np.any(np.asarray(dy_f) <= 0.0):
 441        raise Exception('No y errors available, run the gamma method first.')
 442
 443    p_f = [o.value for o in loc_priors]
 444    dp_f = [o.dvalue for o in loc_priors]
 445
 446    if np.any(np.asarray(dp_f) <= 0.0):
 447        raise Exception('No prior errors available, run the gamma method first.')
 448
 449    if 'initial_guess' in kwargs:
 450        x0 = kwargs.get('initial_guess')
 451        if len(x0) != n_parms:
 452            raise Exception('Initial guess does not have the correct length.')
 453    else:
 454        x0 = p_f
 455
 456    def chisqfunc(p):
 457        model = func(p, x)
 458        chisq = anp.sum(((y_f - model) / dy_f) ** 2) + anp.sum(((p_f - p) / dp_f) ** 2)
 459        return chisq
 460
 461    if not silent:
 462        print('Method: migrad')
 463
 464    m = iminuit.Minuit(chisqfunc, x0)
 465    m.errordef = 1
 466    m.print_level = 0
 467    if 'tol' in kwargs:
 468        m.tol = kwargs.get('tol')
 469    else:
 470        m.tol = 1e-4
 471    m.migrad()
 472    params = np.asarray(m.values)
 473
 474    output.chisquare_by_dof = m.fval / len(x)
 475
 476    output.method = 'migrad'
 477
 478    if not silent:
 479        print('chisquare/d.o.f.:', output.chisquare_by_dof)
 480
 481    if not m.fmin.is_valid:
 482        raise Exception('The minimization procedure did not converge.')
 483
 484    hess = hessian(chisqfunc)(params)
 485    hess_inv = np.linalg.pinv(hess)
 486
 487    def chisqfunc_compact(d):
 488        model = func(d[:n_parms], x)
 489        chisq = anp.sum(((d[n_parms: n_parms + len(x)] - model) / dy_f) ** 2) + anp.sum(((d[n_parms + len(x):] - d[:n_parms]) / dp_f) ** 2)
 490        return chisq
 491
 492    jac_jac = hessian(chisqfunc_compact)(np.concatenate((params, y_f, p_f)))
 493
 494    deriv = -hess_inv @ jac_jac[:n_parms, n_parms:]
 495
 496    result = []
 497    for i in range(n_parms):
 498        result.append(derived_observable(lambda x, **kwargs: (x[0] + np.finfo(np.float64).eps) / (y[0].value + np.finfo(np.float64).eps) * params[i], list(y) + list(loc_priors), man_grad=list(deriv[i])))
 499
 500    output.fit_parameters = result
 501    output.chisquare = chisqfunc(np.asarray(params))
 502
 503    if kwargs.get('resplot') is True:
 504        residual_plot(x, y, func, result)
 505
 506    if kwargs.get('qqplot') is True:
 507        qqplot(x, y, func, result)
 508
 509    return output
 510
 511
 512def _standard_fit(x, y, func, silent=False, **kwargs):
 513    output = Fit_result()
 514
 515    output.fit_function = func
 516
 517    x = np.asarray(x)
 518
 519    if kwargs.get('num_grad') is True:
 520        jacobian = num_jacobian
 521        hessian = num_hessian
 522    else:
 523        jacobian = auto_jacobian
 524        hessian = auto_hessian
 525
 526    if x.shape[-1] != len(y):
 527        raise Exception('x and y input have to have the same length')
 528
 529    if len(x.shape) > 2:
 530        raise Exception('Unknown format for x values')
 531
 532    if not callable(func):
 533        raise TypeError('func has to be a function.')
 534
 535    for i in range(42):
 536        try:
 537            func(np.arange(i), x.T[0])
 538        except TypeError:
 539            continue
 540        except IndexError:
 541            continue
 542        else:
 543            break
 544    else:
 545        raise RuntimeError("Fit function is not valid.")
 546
 547    n_parms = i
 548
 549    if not silent:
 550        print('Fit with', n_parms, 'parameter' + 's' * (n_parms > 1))
 551
 552    y_f = [o.value for o in y]
 553    dy_f = [o.dvalue for o in y]
 554
 555    if np.any(np.asarray(dy_f) <= 0.0):
 556        raise Exception('No y errors available, run the gamma method first.')
 557
 558    if 'initial_guess' in kwargs:
 559        x0 = kwargs.get('initial_guess')
 560        if len(x0) != n_parms:
 561            raise Exception('Initial guess does not have the correct length: %d vs. %d' % (len(x0), n_parms))
 562    else:
 563        x0 = [0.1] * n_parms
 564
 565    if kwargs.get('correlated_fit') is True:
 566        corr = covariance(y, correlation=True, **kwargs)
 567        covdiag = np.diag(1 / np.asarray(dy_f))
 568        condn = np.linalg.cond(corr)
 569        if condn > 0.1 / np.finfo(float).eps:
 570            raise Exception(f"Cannot invert correlation matrix as its condition number exceeds machine precision ({condn:1.2e})")
 571        if condn > 1e13:
 572            warnings.warn("Correlation matrix may be ill-conditioned, condition number: {%1.2e}" % (condn), RuntimeWarning)
 573        chol = np.linalg.cholesky(corr)
 574        chol_inv = scipy.linalg.solve_triangular(chol, covdiag, lower=True)
 575
 576        def chisqfunc_corr(p):
 577            model = func(p, x)
 578            chisq = anp.sum(anp.dot(chol_inv, (y_f - model)) ** 2)
 579            return chisq
 580
 581    def chisqfunc(p):
 582        model = func(p, x)
 583        chisq = anp.sum(((y_f - model) / dy_f) ** 2)
 584        return chisq
 585
 586    output.method = kwargs.get('method', 'Levenberg-Marquardt')
 587    if not silent:
 588        print('Method:', output.method)
 589
 590    if output.method != 'Levenberg-Marquardt':
 591        if output.method == 'migrad':
 592            fit_result = iminuit.minimize(chisqfunc, x0, tol=1e-4)  # Stopping criterion 0.002 * tol * errordef
 593            if kwargs.get('correlated_fit') is True:
 594                fit_result = iminuit.minimize(chisqfunc_corr, fit_result.x, tol=1e-4)  # Stopping criterion 0.002 * tol * errordef
 595            output.iterations = fit_result.nfev
 596        else:
 597            fit_result = scipy.optimize.minimize(chisqfunc, x0, method=kwargs.get('method'), tol=1e-12)
 598            if kwargs.get('correlated_fit') is True:
 599                fit_result = scipy.optimize.minimize(chisqfunc_corr, fit_result.x, method=kwargs.get('method'), tol=1e-12)
 600            output.iterations = fit_result.nit
 601
 602        chisquare = fit_result.fun
 603
 604    else:
 605        if kwargs.get('correlated_fit') is True:
 606            def chisqfunc_residuals_corr(p):
 607                model = func(p, x)
 608                chisq = anp.dot(chol_inv, (y_f - model))
 609                return chisq
 610
 611        def chisqfunc_residuals(p):
 612            model = func(p, x)
 613            chisq = ((y_f - model) / dy_f)
 614            return chisq
 615
 616        fit_result = scipy.optimize.least_squares(chisqfunc_residuals, x0, method='lm', ftol=1e-15, gtol=1e-15, xtol=1e-15)
 617        if kwargs.get('correlated_fit') is True:
 618            fit_result = scipy.optimize.least_squares(chisqfunc_residuals_corr, fit_result.x, method='lm', ftol=1e-15, gtol=1e-15, xtol=1e-15)
 619
 620        chisquare = np.sum(fit_result.fun ** 2)
 621        if kwargs.get('correlated_fit') is True:
 622            assert np.isclose(chisquare, chisqfunc_corr(fit_result.x), atol=1e-14)
 623        else:
 624            assert np.isclose(chisquare, chisqfunc(fit_result.x), atol=1e-14)
 625
 626        output.iterations = fit_result.nfev
 627
 628    if not fit_result.success:
 629        raise Exception('The minimization procedure did not converge.')
 630
 631    if x.shape[-1] - n_parms > 0:
 632        output.chisquare_by_dof = chisquare / (x.shape[-1] - n_parms)
 633    else:
 634        output.chisquare_by_dof = float('nan')
 635
 636    output.message = fit_result.message
 637    if not silent:
 638        print(fit_result.message)
 639        print('chisquare/d.o.f.:', output.chisquare_by_dof)
 640
 641    if kwargs.get('expected_chisquare') is True:
 642        if kwargs.get('correlated_fit') is not True:
 643            W = np.diag(1 / np.asarray(dy_f))
 644            cov = covariance(y)
 645            A = W @ jacobian(func)(fit_result.x, x)
 646            P_phi = A @ np.linalg.pinv(A.T @ A) @ A.T
 647            expected_chisquare = np.trace((np.identity(x.shape[-1]) - P_phi) @ W @ cov @ W)
 648            output.chisquare_by_expected_chisquare = chisquare / expected_chisquare
 649            if not silent:
 650                print('chisquare/expected_chisquare:',
 651                      output.chisquare_by_expected_chisquare)
 652
 653    fitp = fit_result.x
 654    try:
 655        if kwargs.get('correlated_fit') is True:
 656            hess = hessian(chisqfunc_corr)(fitp)
 657        else:
 658            hess = hessian(chisqfunc)(fitp)
 659    except TypeError:
 660        raise Exception("It is required to use autograd.numpy instead of numpy within fit functions, see the documentation for details.") from None
 661
 662    if kwargs.get('correlated_fit') is True:
 663        def chisqfunc_compact(d):
 664            model = func(d[:n_parms], x)
 665            chisq = anp.sum(anp.dot(chol_inv, (d[n_parms:] - model)) ** 2)
 666            return chisq
 667
 668    else:
 669        def chisqfunc_compact(d):
 670            model = func(d[:n_parms], x)
 671            chisq = anp.sum(((d[n_parms:] - model) / dy_f) ** 2)
 672            return chisq
 673
 674    jac_jac = hessian(chisqfunc_compact)(np.concatenate((fitp, y_f)))
 675
 676    # Compute hess^{-1} @ jac_jac[:n_parms, n_parms:] using LAPACK dgesv
 677    try:
 678        deriv = -scipy.linalg.solve(hess, jac_jac[:n_parms, n_parms:])
 679    except np.linalg.LinAlgError:
 680        raise Exception("Cannot invert hessian matrix.")
 681
 682    result = []
 683    for i in range(n_parms):
 684        result.append(derived_observable(lambda x, **kwargs: (x[0] + np.finfo(np.float64).eps) / (y[0].value + np.finfo(np.float64).eps) * fit_result.x[i], list(y), man_grad=list(deriv[i])))
 685
 686    output.fit_parameters = result
 687
 688    output.chisquare = chisquare
 689    output.dof = x.shape[-1] - n_parms
 690    output.p_value = 1 - scipy.stats.chi2.cdf(output.chisquare, output.dof)
 691    # Hotelling t-squared p-value for correlated fits.
 692    if kwargs.get('correlated_fit') is True:
 693        n_cov = np.min(np.vectorize(lambda x: x.N)(y))
 694        output.t2_p_value = 1 - scipy.stats.f.cdf((n_cov - output.dof) / (output.dof * (n_cov - 1)) * output.chisquare,
 695                                                  output.dof, n_cov - output.dof)
 696
 697    if kwargs.get('resplot') is True:
 698        residual_plot(x, y, func, result)
 699
 700    if kwargs.get('qqplot') is True:
 701        qqplot(x, y, func, result)
 702
 703    return output
 704
 705
 706def _combined_fit(x, y, func, silent=False, **kwargs):
 707
 708    output = Fit_result()
 709    output.fit_function = func
 710
 711    if kwargs.get('num_grad') is True:
 712        jacobian = num_jacobian
 713        hessian = num_hessian
 714    else:
 715        jacobian = auto_jacobian
 716        hessian = auto_hessian
 717
 718    key_ls = sorted(list(x.keys()))
 719
 720    if sorted(list(y.keys())) != key_ls:
 721        raise Exception('x and y dictionaries do not contain the same keys.')
 722
 723    if sorted(list(func.keys())) != key_ls:
 724        raise Exception('x and func dictionaries do not contain the same keys.')
 725
 726    x_all = np.concatenate([np.array(x[key]) for key in key_ls])
 727    y_all = np.concatenate([np.array(y[key]) for key in key_ls])
 728
 729    y_f = [o.value for o in y_all]
 730    dy_f = [o.dvalue for o in y_all]
 731
 732    if len(x_all.shape) > 2:
 733        raise Exception('Unknown format for x values')
 734
 735    if np.any(np.asarray(dy_f) <= 0.0):
 736        raise Exception('No y errors available, run the gamma method first.')
 737
 738    # number of fit parameters
 739    n_parms_ls = []
 740    for key in key_ls:
 741        if not callable(func[key]):
 742            raise TypeError('func (key=' + key + ') is not a function.')
 743        if len(x[key]) != len(y[key]):
 744            raise Exception('x and y input (key=' + key + ') do not have the same length')
 745        for i in range(100):
 746            try:
 747                func[key](np.arange(i), x_all.T[0])
 748            except TypeError:
 749                continue
 750            except IndexError:
 751                continue
 752            else:
 753                break
 754        else:
 755            raise RuntimeError("Fit function (key=" + key + ") is not valid.")
 756        n_parms = i
 757        n_parms_ls.append(n_parms)
 758    n_parms = max(n_parms_ls)
 759    if not silent:
 760        print('Fit with', n_parms, 'parameter' + 's' * (n_parms > 1))
 761
 762    if 'initial_guess' in kwargs:
 763        x0 = kwargs.get('initial_guess')
 764        if len(x0) != n_parms:
 765            raise Exception('Initial guess does not have the correct length: %d vs. %d' % (len(x0), n_parms))
 766    else:
 767        x0 = [0.1] * n_parms
 768
 769    if kwargs.get('correlated_fit') is True:
 770        corr = covariance(y_all, correlation=True, **kwargs)
 771        covdiag = np.diag(1 / np.asarray(dy_f))
 772        condn = np.linalg.cond(corr)
 773        if condn > 0.1 / np.finfo(float).eps:
 774            raise Exception(f"Cannot invert correlation matrix as its condition number exceeds machine precision ({condn:1.2e})")
 775        if condn > 1e13:
 776            warnings.warn("Correlation matrix may be ill-conditioned, condition number: {%1.2e}" % (condn), RuntimeWarning)
 777        chol = np.linalg.cholesky(corr)
 778        chol_inv = scipy.linalg.solve_triangular(chol, covdiag, lower=True)
 779
 780        def chisqfunc_corr(p):
 781            model = np.concatenate([np.array(func[key](p, np.asarray(x[key]))) for key in key_ls])
 782            chisq = anp.sum(anp.dot(chol_inv, (y_f - model)) ** 2)
 783            return chisq
 784
 785    def chisqfunc(p):
 786        func_list = np.concatenate([[func[k]] * len(x[k]) for k in key_ls])
 787        model = anp.array([func_list[i](p, x_all[i]) for i in range(len(x_all))])
 788        chisq = anp.sum(((y_f - model) / dy_f) ** 2)
 789        return chisq
 790
 791    output.method = kwargs.get('method', 'Levenberg-Marquardt')
 792    if not silent:
 793        print('Method:', output.method)
 794
 795    if output.method != 'Levenberg-Marquardt':
 796        if output.method == 'migrad':
 797            tolerance = 1e-4  # default value of 1e-1 set by iminuit can be problematic
 798            if 'tol' in kwargs:
 799                tolerance = kwargs.get('tol')
 800            fit_result = iminuit.minimize(chisqfunc, x0, tol=tolerance)  # Stopping criterion 0.002 * tol * errordef
 801            if kwargs.get('correlated_fit') is True:
 802                fit_result = iminuit.minimize(chisqfunc_corr, fit_result.x, tol=tolerance)
 803            output.iterations = fit_result.nfev
 804        else:
 805            tolerance = 1e-12
 806            if 'tol' in kwargs:
 807                tolerance = kwargs.get('tol')
 808            fit_result = scipy.optimize.minimize(chisqfunc, x0, method=kwargs.get('method'), tol=tolerance)
 809            if kwargs.get('correlated_fit') is True:
 810                fit_result = scipy.optimize.minimize(chisqfunc_corr, fit_result.x, method=kwargs.get('method'), tol=tolerance)
 811            output.iterations = fit_result.nit
 812
 813        chisquare = fit_result.fun
 814
 815    else:
 816        if kwargs.get('correlated_fit') is True:
 817            def chisqfunc_residuals_corr(p):
 818                model = np.concatenate([np.array(func[key](p, np.asarray(x[key]))) for key in key_ls])
 819                chisq = anp.dot(chol_inv, (y_f - model))
 820                return chisq
 821
 822        def chisqfunc_residuals(p):
 823            model = np.concatenate([np.array(func[key](p, np.asarray(x[key]))) for key in key_ls])
 824            chisq = ((y_f - model) / dy_f)
 825            return chisq
 826
 827        if 'tol' in kwargs:
 828            print('tol cannot be set for Levenberg-Marquardt')
 829
 830        fit_result = scipy.optimize.least_squares(chisqfunc_residuals, x0, method='lm', ftol=1e-15, gtol=1e-15, xtol=1e-15)
 831        if kwargs.get('correlated_fit') is True:
 832            fit_result = scipy.optimize.least_squares(chisqfunc_residuals_corr, fit_result.x, method='lm', ftol=1e-15, gtol=1e-15, xtol=1e-15)
 833
 834        chisquare = np.sum(fit_result.fun ** 2)
 835        if kwargs.get('correlated_fit') is True:
 836            assert np.isclose(chisquare, chisqfunc_corr(fit_result.x), atol=1e-14)
 837        else:
 838            assert np.isclose(chisquare, chisqfunc(fit_result.x), atol=1e-14)
 839
 840        output.iterations = fit_result.nfev
 841
 842    if not fit_result.success:
 843        raise Exception('The minimization procedure did not converge.')
 844
 845    if x_all.shape[-1] - n_parms > 0:
 846        output.chisquare = chisquare
 847        output.dof = x_all.shape[-1] - n_parms
 848        output.chisquare_by_dof = output.chisquare / output.dof
 849        output.p_value = 1 - scipy.stats.chi2.cdf(output.chisquare, output.dof)
 850    else:
 851        output.chisquare_by_dof = float('nan')
 852
 853    output.message = fit_result.message
 854    if not silent:
 855        print(fit_result.message)
 856        print('chisquare/d.o.f.:', output.chisquare_by_dof)
 857        print('fit parameters', fit_result.x)
 858
 859    def prepare_hat_matrix():
 860        hat_vector = []
 861        for key in key_ls:
 862            x_array = np.asarray(x[key])
 863            if (len(x_array) != 0):
 864                hat_vector.append(jacobian(func[key])(fit_result.x, x_array))
 865        hat_vector = [item for sublist in hat_vector for item in sublist]
 866        return hat_vector
 867
 868    if kwargs.get('expected_chisquare') is True:
 869        if kwargs.get('correlated_fit') is not True:
 870            W = np.diag(1 / np.asarray(dy_f))
 871            cov = covariance(y_all)
 872            hat_vector = prepare_hat_matrix()
 873            A = W @ hat_vector  # hat_vector = 'jacobian(func)(fit_result.x, x)'
 874            P_phi = A @ np.linalg.pinv(A.T @ A) @ A.T
 875            expected_chisquare = np.trace((np.identity(x_all.shape[-1]) - P_phi) @ W @ cov @ W)
 876            output.chisquare_by_expected_chisquare = output.chisquare / expected_chisquare
 877            if not silent:
 878                print('chisquare/expected_chisquare:', output.chisquare_by_expected_chisquare)
 879
 880    fitp = fit_result.x
 881    if np.any(np.asarray(dy_f) <= 0.0):
 882        raise Exception('No y errors available, run the gamma method first.')
 883
 884    try:
 885        if kwargs.get('correlated_fit') is True:
 886            hess = hessian(chisqfunc_corr)(fitp)
 887        else:
 888            hess = hessian(chisqfunc)(fitp)
 889    except TypeError:
 890        raise Exception("It is required to use autograd.numpy instead of numpy within fit functions, see the documentation for details.") from None
 891
 892    if kwargs.get('correlated_fit') is True:
 893        def chisqfunc_compact(d):
 894            func_list = np.concatenate([[func[k]] * len(x[k]) for k in key_ls])
 895            model = anp.array([func_list[i](d[:n_parms], x_all[i]) for i in range(len(x_all))])
 896            chisq = anp.sum(anp.dot(chol_inv, (d[n_parms:] - model)) ** 2)
 897            return chisq
 898    else:
 899        def chisqfunc_compact(d):
 900            func_list = np.concatenate([[func[k]] * len(x[k]) for k in key_ls])
 901            model = anp.array([func_list[i](d[:n_parms], x_all[i]) for i in range(len(x_all))])
 902            chisq = anp.sum(((d[n_parms:] - model) / dy_f) ** 2)
 903            return chisq
 904
 905    jac_jac_y = hessian(chisqfunc_compact)(np.concatenate((fitp, y_f)))
 906
 907    # Compute hess^{-1} @ jac_jac_y[:n_parms + m, n_parms + m:] using LAPACK dgesv
 908    try:
 909        deriv_y = -scipy.linalg.solve(hess, jac_jac_y[:n_parms, n_parms:])
 910    except np.linalg.LinAlgError:
 911        raise Exception("Cannot invert hessian matrix.")
 912
 913    result = []
 914    for i in range(n_parms):
 915        result.append(derived_observable(lambda x_all, **kwargs: (x_all[0] + np.finfo(np.float64).eps) / (y_all[0].value + np.finfo(np.float64).eps) * fitp[i], list(y_all), man_grad=list(deriv_y[i])))
 916
 917    output.fit_parameters = result
 918
 919    if kwargs.get('correlated_fit') is True:
 920        n_cov = np.min(np.vectorize(lambda x_all: x_all.N)(y_all))
 921        output.t2_p_value = 1 - scipy.stats.f.cdf((n_cov - output.dof) / (output.dof * (n_cov - 1)) * output.chisquare,
 922                                                  output.dof, n_cov - output.dof)
 923
 924    return output
 925
 926
 927def fit_lin(x, y, **kwargs):
 928    """Performs a linear fit to y = n + m * x and returns two Obs n, m.
 929
 930    Parameters
 931    ----------
 932    x : list
 933        Can either be a list of floats in which case no xerror is assumed, or
 934        a list of Obs, where the dvalues of the Obs are used as xerror for the fit.
 935    y : list
 936        List of Obs, the dvalues of the Obs are used as yerror for the fit.
 937
 938    Returns
 939    -------
 940    fit_parameters : list[Obs]
 941        LIist of fitted observables.
 942    """
 943
 944    def f(a, x):
 945        y = a[0] + a[1] * x
 946        return y
 947
 948    if all(isinstance(n, Obs) for n in x):
 949        out = total_least_squares(x, y, f, **kwargs)
 950        return out.fit_parameters
 951    elif all(isinstance(n, float) or isinstance(n, int) for n in x) or isinstance(x, np.ndarray):
 952        out = least_squares(x, y, f, **kwargs)
 953        return out.fit_parameters
 954    else:
 955        raise Exception('Unsupported types for x')
 956
 957
 958def qqplot(x, o_y, func, p):
 959    """Generates a quantile-quantile plot of the fit result which can be used to
 960       check if the residuals of the fit are gaussian distributed.
 961
 962    Returns
 963    -------
 964    None
 965    """
 966
 967    residuals = []
 968    for i_x, i_y in zip(x, o_y):
 969        residuals.append((i_y - func(p, i_x)) / i_y.dvalue)
 970    residuals = sorted(residuals)
 971    my_y = [o.value for o in residuals]
 972    probplot = scipy.stats.probplot(my_y)
 973    my_x = probplot[0][0]
 974    plt.figure(figsize=(8, 8 / 1.618))
 975    plt.errorbar(my_x, my_y, fmt='o')
 976    fit_start = my_x[0]
 977    fit_stop = my_x[-1]
 978    samples = np.arange(fit_start, fit_stop, 0.01)
 979    plt.plot(samples, samples, 'k--', zorder=11, label='Standard normal distribution')
 980    plt.plot(samples, probplot[1][0] * samples + probplot[1][1], zorder=10, label='Least squares fit, r=' + str(np.around(probplot[1][2], 3)), marker='', ls='-')
 981
 982    plt.xlabel('Theoretical quantiles')
 983    plt.ylabel('Ordered Values')
 984    plt.legend()
 985    plt.draw()
 986
 987
 988def residual_plot(x, y, func, fit_res):
 989    """Generates a plot which compares the fit to the data and displays the corresponding residuals
 990
 991    Returns
 992    -------
 993    None
 994    """
 995    sorted_x = sorted(x)
 996    xstart = sorted_x[0] - 0.5 * (sorted_x[1] - sorted_x[0])
 997    xstop = sorted_x[-1] + 0.5 * (sorted_x[-1] - sorted_x[-2])
 998    x_samples = np.arange(xstart, xstop + 0.01, 0.01)
 999
1000    plt.figure(figsize=(8, 8 / 1.618))
1001    gs = gridspec.GridSpec(2, 1, height_ratios=[3, 1], wspace=0.0, hspace=0.0)
1002    ax0 = plt.subplot(gs[0])
1003    ax0.errorbar(x, [o.value for o in y], yerr=[o.dvalue for o in y], ls='none', fmt='o', capsize=3, markersize=5, label='Data')
1004    ax0.plot(x_samples, func([o.value for o in fit_res], x_samples), label='Fit', zorder=10, ls='-', ms=0)
1005    ax0.set_xticklabels([])
1006    ax0.set_xlim([xstart, xstop])
1007    ax0.set_xticklabels([])
1008    ax0.legend()
1009
1010    residuals = (np.asarray([o.value for o in y]) - func([o.value for o in fit_res], x)) / np.asarray([o.dvalue for o in y])
1011    ax1 = plt.subplot(gs[1])
1012    ax1.plot(x, residuals, 'ko', ls='none', markersize=5)
1013    ax1.tick_params(direction='out')
1014    ax1.tick_params(axis="x", bottom=True, top=True, labelbottom=True)
1015    ax1.axhline(y=0.0, ls='--', color='k', marker=" ")
1016    ax1.fill_between(x_samples, -1.0, 1.0, alpha=0.1, facecolor='k')
1017    ax1.set_xlim([xstart, xstop])
1018    ax1.set_ylabel('Residuals')
1019    plt.subplots_adjust(wspace=None, hspace=None)
1020    plt.draw()
1021
1022
1023def error_band(x, func, beta):
1024    """Calculate the error band for an array of sample values x, for given fit function func with optimized parameters beta.
1025
1026    Returns
1027    -------
1028    err : np.array(Obs)
1029        Error band for an array of sample values x
1030    """
1031    cov = covariance(beta)
1032    if np.any(np.abs(cov - cov.T) > 1000 * np.finfo(np.float64).eps):
1033        warnings.warn("Covariance matrix is not symmetric within floating point precision", RuntimeWarning)
1034
1035    deriv = []
1036    for i, item in enumerate(x):
1037        deriv.append(np.array(egrad(func)([o.value for o in beta], item)))
1038
1039    err = []
1040    for i, item in enumerate(x):
1041        err.append(np.sqrt(deriv[i] @ cov @ deriv[i]))
1042    err = np.array(err)
1043
1044    return err
1045
1046
1047def ks_test(objects=None):
1048    """Performs a Kolmogorov–Smirnov test for the p-values of all fit object.
1049
1050    Parameters
1051    ----------
1052    objects : list
1053        List of fit results to include in the analysis (optional).
1054
1055    Returns
1056    -------
1057    None
1058    """
1059
1060    if objects is None:
1061        obs_list = []
1062        for obj in gc.get_objects():
1063            if isinstance(obj, Fit_result):
1064                obs_list.append(obj)
1065    else:
1066        obs_list = objects
1067
1068    p_values = [o.p_value for o in obs_list]
1069
1070    bins = len(p_values)
1071    x = np.arange(0, 1.001, 0.001)
1072    plt.plot(x, x, 'k', zorder=1)
1073    plt.xlim(0, 1)
1074    plt.ylim(0, 1)
1075    plt.xlabel('p-value')
1076    plt.ylabel('Cumulative probability')
1077    plt.title(str(bins) + ' p-values')
1078
1079    n = np.arange(1, bins + 1) / np.float64(bins)
1080    Xs = np.sort(p_values)
1081    plt.step(Xs, n)
1082    diffs = n - Xs
1083    loc_max_diff = np.argmax(np.abs(diffs))
1084    loc = Xs[loc_max_diff]
1085    plt.annotate('', xy=(loc, loc), xytext=(loc, loc + diffs[loc_max_diff]), arrowprops=dict(arrowstyle='<->', shrinkA=0, shrinkB=0))
1086    plt.draw()
1087
1088    print(scipy.stats.kstest(p_values, 'uniform'))
class Fit_result(collections.abc.Sequence):
21class Fit_result(Sequence):
22    """Represents fit results.
23
24    Attributes
25    ----------
26    fit_parameters : list
27        results for the individual fit parameters,
28        also accessible via indices.
29    chisquare_by_dof : float
30        reduced chisquare.
31    p_value : float
32        p-value of the fit
33    t2_p_value : float
34        Hotelling t-squared p-value for correlated fits.
35    """
36
37    def __init__(self):
38        self.fit_parameters = None
39
40    def __getitem__(self, idx):
41        return self.fit_parameters[idx]
42
43    def __len__(self):
44        return len(self.fit_parameters)
45
46    def gamma_method(self, **kwargs):
47        """Apply the gamma method to all fit parameters"""
48        [o.gamma_method(**kwargs) for o in self.fit_parameters]
49
50    gm = gamma_method
51
52    def __str__(self):
53        my_str = 'Goodness of fit:\n'
54        if hasattr(self, 'chisquare_by_dof'):
55            my_str += '\u03C7\u00b2/d.o.f. = ' + f'{self.chisquare_by_dof:2.6f}' + '\n'
56        elif hasattr(self, 'residual_variance'):
57            my_str += 'residual variance = ' + f'{self.residual_variance:2.6f}' + '\n'
58        if hasattr(self, 'chisquare_by_expected_chisquare'):
59            my_str += '\u03C7\u00b2/\u03C7\u00b2exp  = ' + f'{self.chisquare_by_expected_chisquare:2.6f}' + '\n'
60        if hasattr(self, 'p_value'):
61            my_str += 'p-value   = ' + f'{self.p_value:2.4f}' + '\n'
62        if hasattr(self, 't2_p_value'):
63            my_str += 't\u00B2p-value = ' + f'{self.t2_p_value:2.4f}' + '\n'
64        my_str += 'Fit parameters:\n'
65        for i_par, par in enumerate(self.fit_parameters):
66            my_str += str(i_par) + '\t' + ' ' * int(par >= 0) + str(par).rjust(int(par < 0.0)) + '\n'
67        return my_str
68
69    def __repr__(self):
70        m = max(map(len, list(self.__dict__.keys()))) + 1
71        return '\n'.join([key.rjust(m) + ': ' + repr(value) for key, value in sorted(self.__dict__.items())])

Represents fit results.

Attributes
  • fit_parameters (list): results for the individual fit parameters, also accessible via indices.
  • chisquare_by_dof (float): reduced chisquare.
  • p_value (float): p-value of the fit
  • t2_p_value (float): Hotelling t-squared p-value for correlated fits.
def gamma_method(self, **kwargs):
46    def gamma_method(self, **kwargs):
47        """Apply the gamma method to all fit parameters"""
48        [o.gamma_method(**kwargs) for o in self.fit_parameters]

Apply the gamma method to all fit parameters

def gm(self, **kwargs):
46    def gamma_method(self, **kwargs):
47        """Apply the gamma method to all fit parameters"""
48        [o.gamma_method(**kwargs) for o in self.fit_parameters]

Apply the gamma method to all fit parameters

Inherited Members
collections.abc.Sequence
index
count
def least_squares(x, y, func, priors=None, silent=False, **kwargs):
 74def least_squares(x, y, func, priors=None, silent=False, **kwargs):
 75    r'''Performs a non-linear fit to y = func(x).
 76        ```
 77
 78    Parameters
 79    ----------
 80    For an uncombined fit:
 81
 82    x : list
 83        list of floats.
 84    y : list
 85        list of Obs.
 86    func : object
 87        fit function, has to be of the form
 88
 89        ```python
 90        import autograd.numpy as anp
 91
 92        def func(a, x):
 93            return a[0] + a[1] * x + a[2] * anp.sinh(x)
 94        ```
 95
 96        For multiple x values func can be of the form
 97
 98        ```python
 99        def func(a, x):
100            (x1, x2) = x
101            return a[0] * x1 ** 2 + a[1] * x2
102        ```
103        It is important that all numpy functions refer to autograd.numpy, otherwise the differentiation
104        will not work.
105
106    OR For a combined fit:
107
108    x : dict
109        dict of lists.
110    y : dict
111        dict of lists of Obs.
112    funcs : dict
113        dict of objects
114        fit functions have to be of the form (here a[0] is the common fit parameter)
115        ```python
116        import autograd.numpy as anp
117        funcs = {"a": func_a,
118                "b": func_b}
119
120        def func_a(a, x):
121            return a[1] * anp.exp(-a[0] * x)
122
123        def func_b(a, x):
124            return a[2] * anp.exp(-a[0] * x)
125
126        It is important that all numpy functions refer to autograd.numpy, otherwise the differentiation
127        will not work.
128
129    priors : list, optional
130        priors has to be a list with an entry for every parameter in the fit. The entries can either be
131        Obs (e.g. results from a previous fit) or strings containing a value and an error formatted like
132        0.548(23), 500(40) or 0.5(0.4)
133    silent : bool, optional
134        If true all output to the console is omitted (default False).
135    initial_guess : list
136        can provide an initial guess for the input parameters. Relevant for
137        non-linear fits with many parameters. In case of correlated fits the guess is used to perform
138        an uncorrelated fit which then serves as guess for the correlated fit.
139    method : str, optional
140        can be used to choose an alternative method for the minimization of chisquare.
141        The possible methods are the ones which can be used for scipy.optimize.minimize and
142        migrad of iminuit. If no method is specified, Levenberg-Marquard is used.
143        Reliable alternatives are migrad, Powell and Nelder-Mead.
144    tol: float, optional
145        can be used (only for combined fits and methods other than Levenberg-Marquard) to set the tolerance for convergence
146        to a different value to either speed up convergence at the cost of a larger error on the fitted parameters (and possibly
147        invalid estimates for parameter uncertainties) or smaller values to get more accurate parameter values
148        The stopping criterion depends on the method, e.g. migrad: edm_max = 0.002 * tol * errordef (EDM criterion: edm < edm_max)
149    correlated_fit : bool
150        If True, use the full inverse covariance matrix in the definition of the chisquare cost function.
151        For details about how the covariance matrix is estimated see `pyerrors.obs.covariance`.
152        In practice the correlation matrix is Cholesky decomposed and inverted (instead of the covariance matrix).
153        This procedure should be numerically more stable as the correlation matrix is typically better conditioned (Jacobi preconditioning).
154        At the moment this option only works for `prior==None` and when no `method` is given.
155    expected_chisquare : bool
156        If True estimates the expected chisquare which is
157        corrected by effects caused by correlated input data (default False).
158    resplot : bool
159        If True, a plot which displays fit, data and residuals is generated (default False).
160    qqplot : bool
161        If True, a quantile-quantile plot of the fit result is generated (default False).
162    num_grad : bool
163        Use numerical differentation instead of automatic differentiation to perform the error propagation (default False).
164
165    Returns
166    -------
167    output : Fit_result
168        Parameters and information on the fitted result.
169    '''
170    if priors is not None:
171        return _prior_fit(x, y, func, priors, silent=silent, **kwargs)
172
173    elif (type(x) == dict and type(y) == dict and type(func) == dict):
174        return _combined_fit(x, y, func, silent=silent, **kwargs)
175    elif (type(x) == dict or type(y) == dict or type(func) == dict):
176        raise TypeError("All arguments have to be dictionaries in order to perform a combined fit.")
177    else:
178        return _standard_fit(x, y, func, silent=silent, **kwargs)

Performs a non-linear fit to y = func(x). ```

Parameters
  • For an uncombined fit:
  • x (list): list of floats.
  • y (list): list of Obs.
  • func (object): fit function, has to be of the form

    import autograd.numpy as anp
    
    def func(a, x):
       return a[0] + a[1] * x + a[2] * anp.sinh(x)
    

    For multiple x values func can be of the form

    def func(a, x):
       (x1, x2) = x
       return a[0] * x1 ** 2 + a[1] * x2
    

    It is important that all numpy functions refer to autograd.numpy, otherwise the differentiation will not work.

  • OR For a combined fit:
  • x (dict): dict of lists.
  • y (dict): dict of lists of Obs.
  • funcs (dict): dict of objects fit functions have to be of the form (here a[0] is the common fit parameter) ```python import autograd.numpy as anp funcs = {"a": func_a, "b": func_b}

    def func_a(a, x): return a[1] * anp.exp(-a[0] * x)

    def func_b(a, x): return a[2] * anp.exp(-a[0] * x)

    It is important that all numpy functions refer to autograd.numpy, otherwise the differentiation will not work.

  • priors (list, optional): priors has to be a list with an entry for every parameter in the fit. The entries can either be Obs (e.g. results from a previous fit) or strings containing a value and an error formatted like 0.548(23), 500(40) or 0.5(0.4)
  • silent (bool, optional): If true all output to the console is omitted (default False).
  • initial_guess (list): can provide an initial guess for the input parameters. Relevant for non-linear fits with many parameters. In case of correlated fits the guess is used to perform an uncorrelated fit which then serves as guess for the correlated fit.
  • method (str, optional): can be used to choose an alternative method for the minimization of chisquare. The possible methods are the ones which can be used for scipy.optimize.minimize and migrad of iminuit. If no method is specified, Levenberg-Marquard is used. Reliable alternatives are migrad, Powell and Nelder-Mead.
  • tol (float, optional): can be used (only for combined fits and methods other than Levenberg-Marquard) to set the tolerance for convergence to a different value to either speed up convergence at the cost of a larger error on the fitted parameters (and possibly invalid estimates for parameter uncertainties) or smaller values to get more accurate parameter values The stopping criterion depends on the method, e.g. migrad: edm_max = 0.002 * tol * errordef (EDM criterion: edm < edm_max)
  • correlated_fit (bool): If True, use the full inverse covariance matrix in the definition of the chisquare cost function. For details about how the covariance matrix is estimated see pyerrors.obs.covariance. In practice the correlation matrix is Cholesky decomposed and inverted (instead of the covariance matrix). This procedure should be numerically more stable as the correlation matrix is typically better conditioned (Jacobi preconditioning). At the moment this option only works for prior==None and when no method is given.
  • expected_chisquare (bool): If True estimates the expected chisquare which is corrected by effects caused by correlated input data (default False).
  • resplot (bool): If True, a plot which displays fit, data and residuals is generated (default False).
  • qqplot (bool): If True, a quantile-quantile plot of the fit result is generated (default False).
  • num_grad (bool): Use numerical differentation instead of automatic differentiation to perform the error propagation (default False).
Returns
  • output (Fit_result): Parameters and information on the fitted result.
def total_least_squares(x, y, func, silent=False, **kwargs):
181def total_least_squares(x, y, func, silent=False, **kwargs):
182    r'''Performs a non-linear fit to y = func(x) and returns a list of Obs corresponding to the fit parameters.
183
184    Parameters
185    ----------
186    x : list
187        list of Obs, or a tuple of lists of Obs
188    y : list
189        list of Obs. The dvalues of the Obs are used as x- and yerror for the fit.
190    func : object
191        func has to be of the form
192
193        ```python
194        import autograd.numpy as anp
195
196        def func(a, x):
197            return a[0] + a[1] * x + a[2] * anp.sinh(x)
198        ```
199
200        For multiple x values func can be of the form
201
202        ```python
203        def func(a, x):
204            (x1, x2) = x
205            return a[0] * x1 ** 2 + a[1] * x2
206        ```
207
208        It is important that all numpy functions refer to autograd.numpy, otherwise the differentiation
209        will not work.
210    silent : bool, optional
211        If true all output to the console is omitted (default False).
212    initial_guess : list
213        can provide an initial guess for the input parameters. Relevant for non-linear
214        fits with many parameters.
215    expected_chisquare : bool
216        If true prints the expected chisquare which is
217        corrected by effects caused by correlated input data.
218        This can take a while as the full correlation matrix
219        has to be calculated (default False).
220    num_grad : bool
221        Use numerical differentation instead of automatic differentiation to perform the error propagation (default False).
222
223    Notes
224    -----
225    Based on the orthogonal distance regression module of scipy.
226
227    Returns
228    -------
229    output : Fit_result
230        Parameters and information on the fitted result.
231    '''
232
233    output = Fit_result()
234
235    output.fit_function = func
236
237    x = np.array(x)
238
239    x_shape = x.shape
240
241    if kwargs.get('num_grad') is True:
242        jacobian = num_jacobian
243        hessian = num_hessian
244    else:
245        jacobian = auto_jacobian
246        hessian = auto_hessian
247
248    if not callable(func):
249        raise TypeError('func has to be a function.')
250
251    for i in range(42):
252        try:
253            func(np.arange(i), x.T[0])
254        except TypeError:
255            continue
256        except IndexError:
257            continue
258        else:
259            break
260    else:
261        raise RuntimeError("Fit function is not valid.")
262
263    n_parms = i
264    if not silent:
265        print('Fit with', n_parms, 'parameter' + 's' * (n_parms > 1))
266
267    x_f = np.vectorize(lambda o: o.value)(x)
268    dx_f = np.vectorize(lambda o: o.dvalue)(x)
269    y_f = np.array([o.value for o in y])
270    dy_f = np.array([o.dvalue for o in y])
271
272    if np.any(np.asarray(dx_f) <= 0.0):
273        raise Exception('No x errors available, run the gamma method first.')
274
275    if np.any(np.asarray(dy_f) <= 0.0):
276        raise Exception('No y errors available, run the gamma method first.')
277
278    if 'initial_guess' in kwargs:
279        x0 = kwargs.get('initial_guess')
280        if len(x0) != n_parms:
281            raise Exception('Initial guess does not have the correct length: %d vs. %d' % (len(x0), n_parms))
282    else:
283        x0 = [1] * n_parms
284
285    data = RealData(x_f, y_f, sx=dx_f, sy=dy_f)
286    model = Model(func)
287    odr = ODR(data, model, x0, partol=np.finfo(np.float64).eps)
288    odr.set_job(fit_type=0, deriv=1)
289    out = odr.run()
290
291    output.residual_variance = out.res_var
292
293    output.method = 'ODR'
294
295    output.message = out.stopreason
296
297    output.xplus = out.xplus
298
299    if not silent:
300        print('Method: ODR')
301        print(*out.stopreason)
302        print('Residual variance:', output.residual_variance)
303
304    if out.info > 3:
305        raise Exception('The minimization procedure did not converge.')
306
307    m = x_f.size
308
309    def odr_chisquare(p):
310        model = func(p[:n_parms], p[n_parms:].reshape(x_shape))
311        chisq = anp.sum(((y_f - model) / dy_f) ** 2) + anp.sum(((x_f - p[n_parms:].reshape(x_shape)) / dx_f) ** 2)
312        return chisq
313
314    if kwargs.get('expected_chisquare') is True:
315        W = np.diag(1 / np.asarray(np.concatenate((dy_f.ravel(), dx_f.ravel()))))
316
317        if kwargs.get('covariance') is not None:
318            cov = kwargs.get('covariance')
319        else:
320            cov = covariance(np.concatenate((y, x.ravel())))
321
322        number_of_x_parameters = int(m / x_f.shape[-1])
323
324        old_jac = jacobian(func)(out.beta, out.xplus)
325        fused_row1 = np.concatenate((old_jac, np.concatenate((number_of_x_parameters * [np.zeros(old_jac.shape)]), axis=0)))
326        fused_row2 = np.concatenate((jacobian(lambda x, y: func(y, x))(out.xplus, out.beta).reshape(x_f.shape[-1], x_f.shape[-1] * number_of_x_parameters), np.identity(number_of_x_parameters * old_jac.shape[0])))
327        new_jac = np.concatenate((fused_row1, fused_row2), axis=1)
328
329        A = W @ new_jac
330        P_phi = A @ np.linalg.pinv(A.T @ A) @ A.T
331        expected_chisquare = np.trace((np.identity(P_phi.shape[0]) - P_phi) @ W @ cov @ W)
332        if expected_chisquare <= 0.0:
333            warnings.warn("Negative expected_chisquare.", RuntimeWarning)
334            expected_chisquare = np.abs(expected_chisquare)
335        output.chisquare_by_expected_chisquare = odr_chisquare(np.concatenate((out.beta, out.xplus.ravel()))) / expected_chisquare
336        if not silent:
337            print('chisquare/expected_chisquare:',
338                  output.chisquare_by_expected_chisquare)
339
340    fitp = out.beta
341    try:
342        hess = hessian(odr_chisquare)(np.concatenate((fitp, out.xplus.ravel())))
343    except TypeError:
344        raise Exception("It is required to use autograd.numpy instead of numpy within fit functions, see the documentation for details.") from None
345
346    def odr_chisquare_compact_x(d):
347        model = func(d[:n_parms], d[n_parms:n_parms + m].reshape(x_shape))
348        chisq = anp.sum(((y_f - model) / dy_f) ** 2) + anp.sum(((d[n_parms + m:].reshape(x_shape) - d[n_parms:n_parms + m].reshape(x_shape)) / dx_f) ** 2)
349        return chisq
350
351    jac_jac_x = hessian(odr_chisquare_compact_x)(np.concatenate((fitp, out.xplus.ravel(), x_f.ravel())))
352
353    # Compute hess^{-1} @ jac_jac_x[:n_parms + m, n_parms + m:] using LAPACK dgesv
354    try:
355        deriv_x = -scipy.linalg.solve(hess, jac_jac_x[:n_parms + m, n_parms + m:])
356    except np.linalg.LinAlgError:
357        raise Exception("Cannot invert hessian matrix.")
358
359    def odr_chisquare_compact_y(d):
360        model = func(d[:n_parms], d[n_parms:n_parms + m].reshape(x_shape))
361        chisq = anp.sum(((d[n_parms + m:] - model) / dy_f) ** 2) + anp.sum(((x_f - d[n_parms:n_parms + m].reshape(x_shape)) / dx_f) ** 2)
362        return chisq
363
364    jac_jac_y = hessian(odr_chisquare_compact_y)(np.concatenate((fitp, out.xplus.ravel(), y_f)))
365
366    # Compute hess^{-1} @ jac_jac_y[:n_parms + m, n_parms + m:] using LAPACK dgesv
367    try:
368        deriv_y = -scipy.linalg.solve(hess, jac_jac_y[:n_parms + m, n_parms + m:])
369    except np.linalg.LinAlgError:
370        raise Exception("Cannot invert hessian matrix.")
371
372    result = []
373    for i in range(n_parms):
374        result.append(derived_observable(lambda my_var, **kwargs: (my_var[0] + np.finfo(np.float64).eps) / (x.ravel()[0].value + np.finfo(np.float64).eps) * out.beta[i], list(x.ravel()) + list(y), man_grad=list(deriv_x[i]) + list(deriv_y[i])))
375
376    output.fit_parameters = result
377
378    output.odr_chisquare = odr_chisquare(np.concatenate((out.beta, out.xplus.ravel())))
379    output.dof = x.shape[-1] - n_parms
380    output.p_value = 1 - scipy.stats.chi2.cdf(output.odr_chisquare, output.dof)
381
382    return output

Performs a non-linear fit to y = func(x) and returns a list of Obs corresponding to the fit parameters.

Parameters
  • x (list): list of Obs, or a tuple of lists of Obs
  • y (list): list of Obs. The dvalues of the Obs are used as x- and yerror for the fit.
  • func (object): func has to be of the form

    import autograd.numpy as anp
    
    def func(a, x):
       return a[0] + a[1] * x + a[2] * anp.sinh(x)
    

    For multiple x values func can be of the form

    def func(a, x):
       (x1, x2) = x
       return a[0] * x1 ** 2 + a[1] * x2
    

    It is important that all numpy functions refer to autograd.numpy, otherwise the differentiation will not work.

  • silent (bool, optional): If true all output to the console is omitted (default False).
  • initial_guess (list): can provide an initial guess for the input parameters. Relevant for non-linear fits with many parameters.
  • expected_chisquare (bool): If true prints the expected chisquare which is corrected by effects caused by correlated input data. This can take a while as the full correlation matrix has to be calculated (default False).
  • num_grad (bool): Use numerical differentation instead of automatic differentiation to perform the error propagation (default False).
Notes

Based on the orthogonal distance regression module of scipy.

Returns
  • output (Fit_result): Parameters and information on the fitted result.
def fit_lin(x, y, **kwargs):
928def fit_lin(x, y, **kwargs):
929    """Performs a linear fit to y = n + m * x and returns two Obs n, m.
930
931    Parameters
932    ----------
933    x : list
934        Can either be a list of floats in which case no xerror is assumed, or
935        a list of Obs, where the dvalues of the Obs are used as xerror for the fit.
936    y : list
937        List of Obs, the dvalues of the Obs are used as yerror for the fit.
938
939    Returns
940    -------
941    fit_parameters : list[Obs]
942        LIist of fitted observables.
943    """
944
945    def f(a, x):
946        y = a[0] + a[1] * x
947        return y
948
949    if all(isinstance(n, Obs) for n in x):
950        out = total_least_squares(x, y, f, **kwargs)
951        return out.fit_parameters
952    elif all(isinstance(n, float) or isinstance(n, int) for n in x) or isinstance(x, np.ndarray):
953        out = least_squares(x, y, f, **kwargs)
954        return out.fit_parameters
955    else:
956        raise Exception('Unsupported types for x')

Performs a linear fit to y = n + m * x and returns two Obs n, m.

Parameters
  • x (list): Can either be a list of floats in which case no xerror is assumed, or a list of Obs, where the dvalues of the Obs are used as xerror for the fit.
  • y (list): List of Obs, the dvalues of the Obs are used as yerror for the fit.
Returns
  • fit_parameters (list[Obs]): LIist of fitted observables.
def qqplot(x, o_y, func, p):
959def qqplot(x, o_y, func, p):
960    """Generates a quantile-quantile plot of the fit result which can be used to
961       check if the residuals of the fit are gaussian distributed.
962
963    Returns
964    -------
965    None
966    """
967
968    residuals = []
969    for i_x, i_y in zip(x, o_y):
970        residuals.append((i_y - func(p, i_x)) / i_y.dvalue)
971    residuals = sorted(residuals)
972    my_y = [o.value for o in residuals]
973    probplot = scipy.stats.probplot(my_y)
974    my_x = probplot[0][0]
975    plt.figure(figsize=(8, 8 / 1.618))
976    plt.errorbar(my_x, my_y, fmt='o')
977    fit_start = my_x[0]
978    fit_stop = my_x[-1]
979    samples = np.arange(fit_start, fit_stop, 0.01)
980    plt.plot(samples, samples, 'k--', zorder=11, label='Standard normal distribution')
981    plt.plot(samples, probplot[1][0] * samples + probplot[1][1], zorder=10, label='Least squares fit, r=' + str(np.around(probplot[1][2], 3)), marker='', ls='-')
982
983    plt.xlabel('Theoretical quantiles')
984    plt.ylabel('Ordered Values')
985    plt.legend()
986    plt.draw()

Generates a quantile-quantile plot of the fit result which can be used to check if the residuals of the fit are gaussian distributed.

Returns
  • None
def residual_plot(x, y, func, fit_res):
 989def residual_plot(x, y, func, fit_res):
 990    """Generates a plot which compares the fit to the data and displays the corresponding residuals
 991
 992    Returns
 993    -------
 994    None
 995    """
 996    sorted_x = sorted(x)
 997    xstart = sorted_x[0] - 0.5 * (sorted_x[1] - sorted_x[0])
 998    xstop = sorted_x[-1] + 0.5 * (sorted_x[-1] - sorted_x[-2])
 999    x_samples = np.arange(xstart, xstop + 0.01, 0.01)
1000
1001    plt.figure(figsize=(8, 8 / 1.618))
1002    gs = gridspec.GridSpec(2, 1, height_ratios=[3, 1], wspace=0.0, hspace=0.0)
1003    ax0 = plt.subplot(gs[0])
1004    ax0.errorbar(x, [o.value for o in y], yerr=[o.dvalue for o in y], ls='none', fmt='o', capsize=3, markersize=5, label='Data')
1005    ax0.plot(x_samples, func([o.value for o in fit_res], x_samples), label='Fit', zorder=10, ls='-', ms=0)
1006    ax0.set_xticklabels([])
1007    ax0.set_xlim([xstart, xstop])
1008    ax0.set_xticklabels([])
1009    ax0.legend()
1010
1011    residuals = (np.asarray([o.value for o in y]) - func([o.value for o in fit_res], x)) / np.asarray([o.dvalue for o in y])
1012    ax1 = plt.subplot(gs[1])
1013    ax1.plot(x, residuals, 'ko', ls='none', markersize=5)
1014    ax1.tick_params(direction='out')
1015    ax1.tick_params(axis="x", bottom=True, top=True, labelbottom=True)
1016    ax1.axhline(y=0.0, ls='--', color='k', marker=" ")
1017    ax1.fill_between(x_samples, -1.0, 1.0, alpha=0.1, facecolor='k')
1018    ax1.set_xlim([xstart, xstop])
1019    ax1.set_ylabel('Residuals')
1020    plt.subplots_adjust(wspace=None, hspace=None)
1021    plt.draw()

Generates a plot which compares the fit to the data and displays the corresponding residuals

Returns
  • None
def error_band(x, func, beta):
1024def error_band(x, func, beta):
1025    """Calculate the error band for an array of sample values x, for given fit function func with optimized parameters beta.
1026
1027    Returns
1028    -------
1029    err : np.array(Obs)
1030        Error band for an array of sample values x
1031    """
1032    cov = covariance(beta)
1033    if np.any(np.abs(cov - cov.T) > 1000 * np.finfo(np.float64).eps):
1034        warnings.warn("Covariance matrix is not symmetric within floating point precision", RuntimeWarning)
1035
1036    deriv = []
1037    for i, item in enumerate(x):
1038        deriv.append(np.array(egrad(func)([o.value for o in beta], item)))
1039
1040    err = []
1041    for i, item in enumerate(x):
1042        err.append(np.sqrt(deriv[i] @ cov @ deriv[i]))
1043    err = np.array(err)
1044
1045    return err

Calculate the error band for an array of sample values x, for given fit function func with optimized parameters beta.

Returns
  • err (np.array(Obs)): Error band for an array of sample values x
def ks_test(objects=None):
1048def ks_test(objects=None):
1049    """Performs a Kolmogorov–Smirnov test for the p-values of all fit object.
1050
1051    Parameters
1052    ----------
1053    objects : list
1054        List of fit results to include in the analysis (optional).
1055
1056    Returns
1057    -------
1058    None
1059    """
1060
1061    if objects is None:
1062        obs_list = []
1063        for obj in gc.get_objects():
1064            if isinstance(obj, Fit_result):
1065                obs_list.append(obj)
1066    else:
1067        obs_list = objects
1068
1069    p_values = [o.p_value for o in obs_list]
1070
1071    bins = len(p_values)
1072    x = np.arange(0, 1.001, 0.001)
1073    plt.plot(x, x, 'k', zorder=1)
1074    plt.xlim(0, 1)
1075    plt.ylim(0, 1)
1076    plt.xlabel('p-value')
1077    plt.ylabel('Cumulative probability')
1078    plt.title(str(bins) + ' p-values')
1079
1080    n = np.arange(1, bins + 1) / np.float64(bins)
1081    Xs = np.sort(p_values)
1082    plt.step(Xs, n)
1083    diffs = n - Xs
1084    loc_max_diff = np.argmax(np.abs(diffs))
1085    loc = Xs[loc_max_diff]
1086    plt.annotate('', xy=(loc, loc), xytext=(loc, loc + diffs[loc_max_diff]), arrowprops=dict(arrowstyle='<->', shrinkA=0, shrinkB=0))
1087    plt.draw()
1088
1089    print(scipy.stats.kstest(p_values, 'uniform'))

Performs a Kolmogorov–Smirnov test for the p-values of all fit object.

Parameters
  • objects (list): List of fit results to include in the analysis (optional).
Returns
  • None