diff --git a/docs/index.html b/docs/index.html new file mode 100644 index 00000000..34e05776 --- /dev/null +++ b/docs/index.html @@ -0,0 +1,260 @@ + + + + + + + Module List – pdoc 8.0.1 + + + + + + + + + + + + + +
+ + pdoc logo + + +
+
+
+ + \ No newline at end of file diff --git a/docs/pyerrors.html b/docs/pyerrors.html new file mode 100644 index 00000000..eeb88eb9 --- /dev/null +++ b/docs/pyerrors.html @@ -0,0 +1,411 @@ + + + + + + + pyerrors API documentation + + + + + + + + + + + +
+
+

+pyerrors

+ +

What is pyerrors?

+ +

pyerrors is a python package for error computation and propagation of Markov chain Monte Carlo data.

+ +

Getting started

+ +
import numpy as np
+import pyerrors as pe
+
+my_obs = pe.Obs([samples], ['ensemble_name'])
+my_new_obs = 2 * np.log(my_obs) / my_obs
+my_new_obs.gamma_method()
+my_new_obs.details()
+print(my_new_obs)
+
+ +

The Obs class

+ +

pyerrors.obs.Obs

+ +
import pyerrors as pe
+
+my_obs = pe.Obs([samples], ['ensemble_name'])
+
+ +

Multiple ensembles/replica

+ +

Irregular Monte Carlo chains

+ +

Error propagation

+ +

Automatic differentiation, cite Alberto,

+ +

numpy overloaded

+ +
import numpy as np
+import pyerrors as pe
+
+my_obs = pe.Obs([samples], ['ensemble_name'])
+my_new_obs = 2 * np.log(my_obs) / my_obs
+my_new_obs.gamma_method()
+my_new_obs.details()
+
+ +

Error estimation

+ +

pyerrors.obs.Obs.gamma_method

+ +

$\delta_i\delta_j$

+ +

Exponential tails

+ +

Covariance

+ +

Optimization / fits / roots

+ +

Complex observables

+ +

Matrix operations

+ +

Input

+
+ +
+ View Source +
r'''
+# What is pyerrors?
+`pyerrors` is a python package for error computation and propagation of Markov chain Monte Carlo data.
+
+## Getting started
+
+```python
+import numpy as np
+import pyerrors as pe
+
+my_obs = pe.Obs([samples], ['ensemble_name'])
+my_new_obs = 2 * np.log(my_obs) / my_obs
+my_new_obs.gamma_method()
+my_new_obs.details()
+print(my_new_obs)
+```
+# The `Obs` class
+`pyerrors.obs.Obs`
+```python
+import pyerrors as pe
+
+my_obs = pe.Obs([samples], ['ensemble_name'])
+```
+
+## Multiple ensembles/replica
+
+## Irregular Monte Carlo chains
+
+# Error propagation
+Automatic differentiation, cite Alberto,
+
+numpy overloaded
+```python
+import numpy as np
+import pyerrors as pe
+
+my_obs = pe.Obs([samples], ['ensemble_name'])
+my_new_obs = 2 * np.log(my_obs) / my_obs
+my_new_obs.gamma_method()
+my_new_obs.details()
+```
+
+# Error estimation
+`pyerrors.obs.Obs.gamma_method`
+
+$\delta_i\delta_j$
+
+## Exponential tails
+
+## Covariance
+
+# Optimization / fits / roots
+
+# Complex observables
+
+# Matrix operations
+
+# Input
+'''
+from .obs import *
+from .correlators import *
+from .fits import *
+from . import dirac
+from . import linalg
+from . import misc
+from . import mpm
+from . import npr
+from . import roots
+
+from .version import __version__
+
+ +
+ +
+
+ + \ No newline at end of file diff --git a/docs/pyerrors/correlators.html b/docs/pyerrors/correlators.html new file mode 100644 index 00000000..5d2d2551 --- /dev/null +++ b/docs/pyerrors/correlators.html @@ -0,0 +1,3351 @@ + + + + + + + pyerrors.correlators API documentation + + + + + + + + + + + +
+
+

+pyerrors.correlators

+ + +
+ View Source +
import warnings
+import numpy as np
+import autograd.numpy as anp
+import matplotlib.pyplot as plt
+import scipy.linalg
+from .obs import Obs, dump_object, reweight, correlate
+from .fits import least_squares
+from .linalg import eigh, inv, cholesky
+from .roots import find_root
+
+
+class Corr:
+    """The class for a correlator (time dependent sequence of pe.Obs).
+
+    Everything, this class does, can be achieved using lists or arrays of Obs.
+    But it is simply more convenient to have a dedicated object for correlators.
+    One often wants to add or multiply correlators of the same length at every timeslice and it is inconvinient
+    to iterate over all timeslices for every operation. This is especially true, when dealing with smearing matrices.
+
+    The correlator can have two types of content: An Obs at every timeslice OR a GEVP
+    smearing matrix at every timeslice. Other dependency (eg. spacial) are not supported.
+
+    """
+
+    def __init__(self, data_input, padding_front=0, padding_back=0, prange=None):
+        # All data_input should be a list of things at different timeslices. This needs to be verified
+
+        if not isinstance(data_input, list):
+            raise TypeError('Corr__init__ expects a list of timeslices.')
+        # data_input can have multiple shapes. The simplest one is a list of Obs.
+        # We check, if this is the case
+        if all([isinstance(item, Obs) for item in data_input]):
+            self.content = [np.asarray([item]) for item in data_input]
+            # Wrapping the Obs in an array ensures that the data structure is consistent with smearing matrices.
+            self.N = 1  # number of smearings
+
+        # data_input in the form [np.array(Obs,NxN)]
+        elif all([isinstance(item, np.ndarray) or item is None for item in data_input]) and any([isinstance(item, np.ndarray) for item in data_input]):
+            self.content = data_input
+
+            noNull = [a for a in self.content if not (a is None)]  # To check if the matrices are correct for all undefined elements
+            self.N = noNull[0].shape[0]
+            # The checks are now identical to the case above
+            if self.N > 1 and noNull[0].shape[0] != noNull[0].shape[1]:
+                raise Exception("Smearing matrices are not NxN")
+            if (not all([item.shape == noNull[0].shape for item in noNull])):
+                raise Exception("Items in data_input are not of identical shape." + str(noNull))
+        else:  # In case its a list of something else.
+            raise Exception("data_input contains item of wrong type")
+
+        self.tag = None
+
+        # We now apply some padding to our list. In case that our list represents a correlator of length T but is not defined at every value.
+        # An undefined timeslice is represented by the None object
+        self.content = [None] * padding_front + self.content + [None] * padding_back
+        self.T = len(self.content)  # for convenience: will be used a lot
+
+        # The attribute "range" [start,end] marks a range of two timeslices.
+        # This is useful for keeping track of plateaus and fitranges.
+        # The range can be inherited from other Corrs, if the operation should not alter a chosen range eg. multiplication with a constant.
+        self.prange = prange
+
+        self.gamma_method()
+
+    def __getitem__(self, idx):
+        """Return the content of timeslice idx"""
+        if len(self.content[idx]) == 1:
+            return self.content[idx][0]
+        else:
+            return self.content[idx]
+
+    @property
+    def reweighted(self):
+        bool_array = np.array([list(map(lambda x: x.reweighted, o)) for o in self.content])
+        if np.all(bool_array == 1):
+            return True
+        elif np.all(bool_array == 0):
+            return False
+        else:
+            raise Exception("Reweighting status of correlator corrupted.")
+
+    def gamma_method(self):
+        """Apply the gamma method to the content of the Corr."""
+        for item in self.content:
+            if not(item is None):
+                if self.N == 1:
+                    item[0].gamma_method()
+                else:
+                    for i in range(self.N):
+                        for j in range(self.N):
+                            item[i, j].gamma_method()
+
+    # We need to project the Correlator with a Vector to get a single value at each timeslice.
+    # The method can use one or two vectors.
+    # If two are specified it returns v1@G@v2 (the order might be very important.)
+    # By default it will return the lowest source, which usually means unsmeared-unsmeared (0,0), but it does not have to
+    def projected(self, vector_l=None, vector_r=None):
+        if self.N == 1:
+            raise Exception("Trying to project a Corr, that already has N=1.")
+            # This Exception is in no way necessary. One could just return self
+            # But there is no scenario, where a user would want that to happen and the error message might be more informative.
+
+        self.gamma_method()
+
+        if vector_l is None:
+            vector_l, vector_r = np.asarray([1.] + (self.N - 1) * [0.]), np.asarray([1.] + (self.N - 1) * [0.])
+        elif(vector_r is None):
+            vector_r = vector_l
+
+        if not vector_l.shape == vector_r.shape == (self.N,):
+            raise Exception("Vectors are of wrong shape!")
+
+        # We always normalize before projecting! But we only raise a warning, when it is clear, they where not meant to be normalized.
+        if (not (0.95 < vector_r @ vector_r < 1.05)) or (not (0.95 < vector_l @ vector_l < 1.05)):
+            print("Vectors are normalized before projection!")
+
+        vector_l, vector_r = vector_l / np.sqrt((vector_l @ vector_l)), vector_r / np.sqrt(vector_r @ vector_r)
+
+        newcontent = [None if (item is None) else np.asarray([vector_l.T @ item @ vector_r]) for item in self.content]
+        return Corr(newcontent)
+
+    def sum(self):
+        return np.sqrt(self.N) * self.projected(np.ones(self.N))
+
+    # For purposes of debugging and verification, one might want to see a single smearing level. smearing will return a Corr at the specified i,j. where both are integers 0<=i,j<N.
+    def smearing(self, i, j):
+        if self.N == 1:
+            raise Exception("Trying to pick smearing from projected Corr")
+        newcontent = [None if(item is None) else item[i, j] for item in self.content]
+        return Corr(newcontent)
+
+    # Obs and Matplotlib do not play nicely
+    # We often want to retrieve x,y,y_err as lists to pass them to something like pyplot.errorbar
+    def plottable(self):
+        """Outputs the correlator in a plotable format.
+
+        Outputs three lists containing the timeslice index, the value on each
+        timeslice and the error on each timeslice.
+        """
+        if self.N != 1:
+            raise Exception("Can only make Corr[N=1] plottable")  # We could also autoproject to the groundstate or expect vectors, but this is supposed to be a super simple function.
+        x_list = [x for x in range(self.T) if not self.content[x] is None]
+        y_list = [y[0].value for y in self.content if y is not None]
+        y_err_list = [y[0].dvalue for y in self.content if y is not None]
+
+        return x_list, y_list, y_err_list
+
+    # symmetric returns a Corr, that has been symmetrized.
+    # A symmetry checker is still to be implemented
+    # The method will not delete any redundant timeslices (Bad for memory, Great for convenience)
+    def symmetric(self):
+        """ Symmetrize the correlator around x0=0."""
+        if self.T % 2 != 0:
+            raise Exception("Can not symmetrize odd T")
+
+        if np.argmax(np.abs(self.content)) != 0:
+            warnings.warn("Correlator does not seem to be symmetric around x0=0.", RuntimeWarning)
+
+        newcontent = [self.content[0]]
+        for t in range(1, self.T):
+            if (self.content[t] is None) or (self.content[self.T - t] is None):
+                newcontent.append(None)
+            else:
+                newcontent.append(0.5 * (self.content[t] + self.content[self.T - t]))
+        if(all([x is None for x in newcontent])):
+            raise Exception("Corr could not be symmetrized: No redundant values")
+        return Corr(newcontent, prange=self.prange)
+
+    def anti_symmetric(self):
+        """Anti-symmetrize the correlator around x0=0."""
+        if self.T % 2 != 0:
+            raise Exception("Can not symmetrize odd T")
+
+        if not all([o.is_zero_within_error() for o in self.content[0]]):
+            warnings.warn("Correlator does not seem to be anti-symmetric around x0=0.", RuntimeWarning)
+
+        newcontent = [self.content[0]]
+        for t in range(1, self.T):
+            if (self.content[t] is None) or (self.content[self.T - t] is None):
+                newcontent.append(None)
+            else:
+                newcontent.append(0.5 * (self.content[t] - self.content[self.T - t]))
+        if(all([x is None for x in newcontent])):
+            raise Exception("Corr could not be symmetrized: No redundant values")
+        return Corr(newcontent, prange=self.prange)
+
+    # This method will symmetrice the matrices and therefore make them positive definit.
+    def smearing_symmetric(self):
+        if self.N > 1:
+            transposed = [None if (G is None) else G.T for G in self.content]
+            return 0.5 * (Corr(transposed) + self)
+        if self.N == 1:
+            raise Exception("Trying to symmetrize a smearing matrix, that already has N=1.")
+
+    # We also include a simple GEVP method based on Scipy.linalg
+    def GEVP(self, t0, ts, state=1):
+        if (self.content[t0] is None) or (self.content[ts] is None):
+            raise Exception("Corr not defined at t0/ts")
+        G0, Gt = np.empty([self.N, self.N], dtype="double"), np.empty([self.N, self.N], dtype="double")
+        for i in range(self.N):
+            for j in range(self.N):
+                G0[i, j] = self.content[t0][i, j].value
+                Gt[i, j] = self.content[ts][i, j].value
+
+        sp_val, sp_vec = scipy.linalg.eig(Gt, G0)
+        sp_vec = sp_vec[:, np.argsort(sp_val)[-state]]  # We only want the eigenvector belonging to the selected state
+        sp_vec = sp_vec / np.sqrt(sp_vec @ sp_vec)
+        return sp_vec
+
+    def Eigenvalue(self, t0, state=1):
+        G = self.smearing_symmetric()
+        G0 = G.content[t0]
+        L = cholesky(G0)
+        Li = inv(L)
+        LT = L.T
+        LTi = inv(LT)
+        newcontent = []
+        for t in range(self.T):
+            Gt = G.content[t]
+            M = Li @ Gt @ LTi
+            eigenvalues = eigh(M)[0]
+            eigenvalue = eigenvalues[-state]
+            newcontent.append(eigenvalue)
+        return Corr(newcontent)
+
+    def roll(self, dt):
+        """Periodically shift the correlator by dt timeslices
+
+        Attributes:
+        -----------
+        dt : int
+            number of timeslices
+        """
+        return Corr(list(np.roll(np.array(self.content, dtype=object), dt)))
+
+    def reverse(self):
+        """Reverse the time ordering of the Corr"""
+        return Corr(self.content[::-1])
+
+    def correlate(self, partner):
+        """Correlate the correlator with another correlator or Obs"""
+        new_content = []
+        for x0, t_slice in enumerate(self.content):
+            if t_slice is None:
+                new_content.append(None)
+            else:
+                if isinstance(partner, Corr):
+                    if partner.content[x0] is None:
+                        new_content.append(None)
+                    else:
+                        new_content.append(np.array([correlate(o, partner.content[x0][0]) for o in t_slice]))
+                elif isinstance(partner, Obs):
+                    new_content.append(np.array([correlate(o, partner) for o in t_slice]))
+                else:
+                    raise Exception("Can only correlate with an Obs or a Corr.")
+
+        return Corr(new_content)
+
+    def reweight(self, weight, **kwargs):
+        """Reweight the correlator.
+
+        Parameters
+        ----------
+        weight : Obs
+            Reweighting factor. An Observable that has to be defined on a superset of the
+            configurations in obs[i].idl for all i.
+
+        Keyword arguments
+        -----------------
+        all_configs : bool
+            if True, the reweighted observables are normalized by the average of
+            the reweighting factor on all configurations in weight.idl and not
+            on the configurations in obs[i].idl.
+        """
+        new_content = []
+        for t_slice in self.content:
+            if t_slice is None:
+                new_content.append(None)
+            else:
+                new_content.append(np.array(reweight(weight, t_slice, **kwargs)))
+        return Corr(new_content)
+
+    def T_symmetry(self, partner, parity=+1):
+        """Return the time symmetry average of the correlator and its partner
+
+        Attributes:
+        -----------
+        partner : Corr
+            Time symmetry partner of the Corr
+        partity : int
+            Parity quantum number of the correlator, can be +1 or -1
+        """
+        if not isinstance(partner, Corr):
+            raise Exception("T partner has to be a Corr object.")
+        if parity not in [+1, -1]:
+            raise Exception("Parity has to be +1 or -1.")
+        T_partner = parity * partner.reverse()
+
+        t_slices = []
+        for x0, t_slice in enumerate((self - T_partner).content):
+            if t_slice is not None:
+                if not t_slice[0].is_zero_within_error(5):
+                    t_slices.append(x0)
+        if t_slices:
+            warnings.warn("T symmetry partners do not agree within 5 sigma on time slices " + str(t_slices) + ".", RuntimeWarning)
+
+        return (self + T_partner) / 2
+
+    def deriv(self, symmetric=True):
+        """Return the first derivative of the correlator with respect to x0.
+
+        Attributes:
+        -----------
+        symmetric : bool
+            decides whether symmertic of simple finite differences are used. Default: True
+        """
+        if not symmetric:
+            newcontent = []
+            for t in range(self.T - 1):
+                if (self.content[t] is None) or (self.content[t + 1] is None):
+                    newcontent.append(None)
+                else:
+                    newcontent.append(self.content[t + 1] - self.content[t])
+            if(all([x is None for x in newcontent])):
+                raise Exception("Derivative is undefined at all timeslices")
+            return Corr(newcontent, padding_back=1)
+        if symmetric:
+            newcontent = []
+            for t in range(1, self.T - 1):
+                if (self.content[t - 1] is None) or (self.content[t + 1] is None):
+                    newcontent.append(None)
+                else:
+                    newcontent.append(0.5 * (self.content[t + 1] - self.content[t - 1]))
+            if(all([x is None for x in newcontent])):
+                raise Exception('Derivative is undefined at all timeslices')
+            return Corr(newcontent, padding_back=1, padding_front=1)
+
+    def second_deriv(self):
+        """Return the second derivative of the correlator with respect to x0."""
+        newcontent = []
+        for t in range(1, self.T - 1):
+            if (self.content[t - 1] is None) or (self.content[t + 1] is None):
+                newcontent.append(None)
+            else:
+                newcontent.append((self.content[t + 1] - 2 * self.content[t] + self.content[t - 1]))
+        if(all([x is None for x in newcontent])):
+            raise Exception("Derivative is undefined at all timeslices")
+        return Corr(newcontent, padding_back=1, padding_front=1)
+
+    def m_eff(self, variant='log', guess=1.0):
+        """Returns the effective mass of the correlator as correlator object
+
+        Parameters
+        ----------
+        variant : str
+            log: uses the standard effective mass log(C(t) / C(t+1))
+            cosh : Use periodicitiy of the correlator by solving C(t) / C(t+1) = cosh(m * (t - T/2)) / cosh(m * (t + 1 - T/2)) for m.
+            sinh : Use anti-periodicitiy of the correlator by solving C(t) / C(t+1) = sinh(m * (t - T/2)) / sinh(m * (t + 1 - T/2)) for m.
+            See, e.g., arXiv:1205.5380
+        guess : float
+            guess for the root finder, only relevant for the root variant
+        """
+        if self.N != 1:
+            raise Exception('Correlator must be projected before getting m_eff')
+        if variant == 'log':
+            newcontent = []
+            for t in range(self.T - 1):
+                if (self.content[t] is None) or (self.content[t + 1] is None):
+                    newcontent.append(None)
+                else:
+                    newcontent.append(self.content[t] / self.content[t + 1])
+            if(all([x is None for x in newcontent])):
+                raise Exception('m_eff is undefined at all timeslices')
+
+            return np.log(Corr(newcontent, padding_back=1))
+
+        elif variant in ['periodic', 'cosh', 'sinh']:
+            if variant in ['periodic', 'cosh']:
+                func = anp.cosh
+            else:
+                func = anp.sinh
+
+            def root_function(x, d):
+                return func(x * (t - self.T / 2)) / func(x * (t + 1 - self.T / 2)) - d
+
+            newcontent = []
+            for t in range(self.T - 1):
+                if (self.content[t] is None) or (self.content[t + 1] is None):
+                    newcontent.append(None)
+                # Fill the two timeslices in the middle of the lattice with their predecessors
+                elif variant == 'sinh' and t in [self.T / 2, self.T / 2 - 1]:
+                    newcontent.append(newcontent[-1])
+                else:
+                    newcontent.append(np.abs(find_root(self.content[t][0] / self.content[t + 1][0], root_function, guess=guess)))
+            if(all([x is None for x in newcontent])):
+                raise Exception('m_eff is undefined at all timeslices')
+
+            return Corr(newcontent, padding_back=1)
+
+        elif variant == 'arccosh':
+            newcontent = []
+            for t in range(1, self.T - 1):
+                if (self.content[t] is None) or (self.content[t + 1] is None) or (self.content[t - 1] is None):
+                    newcontent.append(None)
+                else:
+                    newcontent.append((self.content[t + 1] + self.content[t - 1]) / (2 * self.content[t]))
+            if(all([x is None for x in newcontent])):
+                raise Exception("m_eff is undefined at all timeslices")
+            return np.arccosh(Corr(newcontent, padding_back=1, padding_front=1))
+
+        else:
+            raise Exception('Unkown variant.')
+
+    def fit(self, function, fitrange=None, silent=False, **kwargs):
+        """Fits function to the data
+
+        Attributes:
+        -----------
+        function : obj
+            function to fit to the data. See fits.least_squares for details.
+        fitrange : list
+            Range in which the function is to be fitted to the data.
+            If not specified, self.prange or all timeslices are used.
+        silent : bool
+            Decides whether output is printed to the standard output.
+        """
+        if self.N != 1:
+            raise Exception("Correlator must be projected before fitting")
+
+        # The default behaviour is:
+        # 1 use explicit fitrange
+        # if none is provided, use the range of the corr
+        # if this is also not set, use the whole length of the corr (This could come with a warning!)
+
+        if fitrange is None:
+            if self.prange:
+                fitrange = self.prange
+            else:
+                fitrange = [0, self.T]
+
+        xs = [x for x in range(fitrange[0], fitrange[1] + 1) if not self.content[x] is None]
+        ys = [self.content[x][0] for x in range(fitrange[0], fitrange[1] + 1) if not self.content[x] is None]
+        result = least_squares(xs, ys, function, silent=silent, **kwargs)
+        result.gamma_method()
+        return result
+
+    def plateau(self, plateau_range=None, method="fit"):
+        """ Extract a plateu value from a Corr object
+
+        Attributes:
+        -----------
+        plateau_range : list
+            list with two entries, indicating the first and the last timeslice
+            of the plateau region.
+        method : str
+            method to extract the plateau.
+                'fit' fits a constant to the plateau region
+                'avg', 'average' or 'mean' just average over the given timeslices.
+        """
+        if not plateau_range:
+            if self.prange:
+                plateau_range = self.prange
+            else:
+                raise Exception("no plateau range provided")
+        if self.N != 1:
+            raise Exception("Correlator must be projected before getting a plateau.")
+        if(all([self.content[t] is None for t in range(plateau_range[0], plateau_range[1] + 1)])):
+            raise Exception("plateau is undefined at all timeslices in plateaurange.")
+        if method == "fit":
+            def const_func(a, t):
+                return a[0]
+            return self.fit(const_func, plateau_range)[0]
+        elif method in ["avg", "average", "mean"]:
+            returnvalue = np.mean([item[0] for item in self.content[plateau_range[0]:plateau_range[1] + 1] if item is not None])
+            returnvalue.gamma_method()
+            return returnvalue
+
+        else:
+            raise Exception("Unsupported plateau method: " + method)
+
+    def set_prange(self, prange):
+        """Sets the attribute prange of the Corr object."""
+        if not len(prange) == 2:
+            raise Exception("prange must be a list or array with two values")
+        if not ((isinstance(prange[0], int)) and (isinstance(prange[1], int))):
+            raise Exception("Start and end point must be integers")
+        if not (0 <= prange[0] <= self.T and 0 <= prange[1] <= self.T and prange[0] < prange[1]):
+            raise Exception("Start and end point must define a range in the interval 0,T")
+
+        self.prange = prange
+        return
+
+    def show(self, x_range=None, comp=None, y_range=None, logscale=False, plateau=None, fit_res=None, ylabel=None, save=None):
+        """Plots the correlator, uses tag as label if available.
+
+        Parameters
+        ----------
+        x_range : list
+            list of two values, determining the range of the x-axis e.g. [4, 8]
+        comp : Corr or list of Corr
+            Correlator or list of correlators which are plotted for comparison.
+        logscale : bool
+            Sets y-axis to logscale
+        plateau : Obs
+            plateau to be visualized in the figure
+        fit_res : Fit_result
+            Fit_result object to be visualized
+        ylabel : str
+            Label for the y-axis
+        save : str
+            path to file in which the figure should be saved
+        """
+        if self.N != 1:
+            raise Exception("Correlator must be projected before plotting")
+        if x_range is None:
+            x_range = [0, self.T]
+
+        fig = plt.figure()
+        ax1 = fig.add_subplot(111)
+
+        x, y, y_err = self.plottable()
+        ax1.errorbar(x, y, y_err, label=self.tag)
+        if logscale:
+            ax1.set_yscale('log')
+        else:
+            # we generate ylim instead of using autoscaling.
+            if y_range is None:
+                try:
+                    y_min = min([(x[0].value - x[0].dvalue) for x in self.content[x_range[0]: x_range[1] + 1] if (x is not None) and x[0].dvalue < 2 * np.abs(x[0].value)])
+                    y_max = max([(x[0].value + x[0].dvalue) for x in self.content[x_range[0]: x_range[1] + 1] if (x is not None) and x[0].dvalue < 2 * np.abs(x[0].value)])
+                    ax1.set_ylim([y_min - 0.1 * (y_max - y_min), y_max + 0.1 * (y_max - y_min)])
+                except:
+                    pass
+            else:
+                ax1.set_ylim(y_range)
+        if comp:
+            if isinstance(comp, Corr) or isinstance(comp, list):
+                for corr in comp if isinstance(comp, list) else [comp]:
+                    x, y, y_err = corr.plottable()
+                    plt.errorbar(x, y, y_err, label=corr.tag, mfc=plt.rcParams['axes.facecolor'])
+            else:
+                raise Exception('comp must be a correlator or a list of correlators.')
+
+        if plateau:
+            if isinstance(plateau, Obs):
+                ax1.axhline(y=plateau.value, linewidth=2, color=plt.rcParams['text.color'], alpha=0.6, marker=',', ls='--', label=str(plateau))
+                ax1.axhspan(plateau.value - plateau.dvalue, plateau.value + plateau.dvalue, alpha=0.25, color=plt.rcParams['text.color'], ls='-')
+            else:
+                raise Exception('plateau must be an Obs')
+        if self.prange:
+            ax1.axvline(self.prange[0], 0, 1, ls='-', marker=',')
+            ax1.axvline(self.prange[1], 0, 1, ls='-', marker=',')
+
+        if fit_res:
+            x_samples = np.arange(x_range[0], x_range[1] + 1, 0.05)
+            ax1.plot(x_samples,
+                     fit_res.fit_function([o.value for o in fit_res.fit_parameters], x_samples),
+                     ls='-', marker=',', lw=2)
+
+        ax1.set_xlabel(r'$x_0 / a$')
+        if ylabel:
+            ax1.set_ylabel(ylabel)
+        ax1.set_xlim([x_range[0] - 0.5, x_range[1] + 0.5])
+
+        handles, labels = ax1.get_legend_handles_labels()
+        if labels:
+            ax1.legend()
+        plt.draw()
+
+        if save:
+            if isinstance(save, str):
+                fig.savefig(save)
+            else:
+                raise Exception("Safe has to be a string.")
+
+        return
+
+    def dump(self, filename):
+        """Dumps the Corr into a pickel file
+
+        Attributes:
+        -----------
+        filename : str
+            Name of the file
+        """
+        dump_object(self, filename)
+        return
+
+    def print(self, range=[0, None]):
+        print(self.__repr__(range))
+
+    def __repr__(self, range=[0, None]):
+        content_string = ""
+        if self.tag is not None:
+            content_string += "Description: " + self.tag + "\n"
+        if range[1]:
+            range[1] += 1
+        content_string += 'x0/a\tCorr(x0/a)\n------------------\n'
+        for i, sub_corr in enumerate(self.content[range[0]:range[1]]):
+            if sub_corr is None:
+                content_string += str(i + range[0]) + '\n'
+            else:
+                content_string += str(i + range[0])
+                for element in sub_corr:
+                    content_string += '\t' + ' ' * int(element >= 0) + str(element)
+                content_string += '\n'
+        return content_string
+
+    def __str__(self):
+        return self.__repr__()
+
+    # We define the basic operations, that can be performed with correlators.
+    # While */+- get defined here, they only work for Corr*Obs and not Obs*Corr.
+    # This is because Obs*Corr checks Obs.__mul__ first and does not catch an exception.
+    # One could try and tell Obs to check if the y in __mul__ is a Corr and
+
+    def __add__(self, y):
+        if isinstance(y, Corr):
+            if ((self.N != y.N) or (self.T != y.T)):
+                raise Exception("Addition of Corrs with different shape")
+            newcontent = []
+            for t in range(self.T):
+                if (self.content[t] is None) or (y.content[t] is None):
+                    newcontent.append(None)
+                else:
+                    newcontent.append(self.content[t] + y.content[t])
+            return Corr(newcontent)
+
+        elif isinstance(y, Obs) or isinstance(y, int) or isinstance(y, float):
+            newcontent = []
+            for t in range(self.T):
+                if (self.content[t] is None):
+                    newcontent.append(None)
+                else:
+                    newcontent.append(self.content[t] + y)
+            return Corr(newcontent, prange=self.prange)
+        else:
+            raise TypeError("Corr + wrong type")
+
+    def __mul__(self, y):
+        if isinstance(y, Corr):
+            if not((self.N == 1 or y.N == 1 or self.N == y.N) and self.T == y.T):
+                raise Exception("Multiplication of Corr object requires N=N or N=1 and T=T")
+            newcontent = []
+            for t in range(self.T):
+                if (self.content[t] is None) or (y.content[t] is None):
+                    newcontent.append(None)
+                else:
+                    newcontent.append(self.content[t] * y.content[t])
+            return Corr(newcontent)
+
+        elif isinstance(y, Obs) or isinstance(y, int) or isinstance(y, float):
+            newcontent = []
+            for t in range(self.T):
+                if (self.content[t] is None):
+                    newcontent.append(None)
+                else:
+                    newcontent.append(self.content[t] * y)
+            return Corr(newcontent, prange=self.prange)
+        else:
+            raise TypeError("Corr * wrong type")
+
+    def __truediv__(self, y):
+        if isinstance(y, Corr):
+            if not((self.N == 1 or y.N == 1 or self.N == y.N) and self.T == y.T):
+                raise Exception("Multiplication of Corr object requires N=N or N=1 and T=T")
+            newcontent = []
+            for t in range(self.T):
+                if (self.content[t] is None) or (y.content[t] is None):
+                    newcontent.append(None)
+                else:
+                    newcontent.append(self.content[t] / y.content[t])
+            # Here we set the entire timeslice to undefined, if one of the smearings has encountered an division by zero.
+            # While this might throw away perfectly good values in other smearings, we will never have to check, if all values in our matrix are defined
+            for t in range(self.T):
+                if newcontent[t] is None:
+                    continue
+                if np.isnan(np.sum(newcontent[t]).value):
+                    newcontent[t] = None
+
+            if all([item is None for item in newcontent]):
+                raise Exception("Division returns completely undefined correlator")
+            return Corr(newcontent)
+
+        elif isinstance(y, Obs):
+            if y.value == 0:
+                raise Exception('Division by zero will return undefined correlator')
+            newcontent = []
+            for t in range(self.T):
+                if (self.content[t] is None):
+                    newcontent.append(None)
+                else:
+                    newcontent.append(self.content[t] / y)
+            return Corr(newcontent, prange=self.prange)
+
+        elif isinstance(y, int) or isinstance(y, float):
+            if y == 0:
+                raise Exception('Division by zero will return undefined correlator')
+            newcontent = []
+            for t in range(self.T):
+                if (self.content[t] is None):
+                    newcontent.append(None)
+                else:
+                    newcontent.append(self.content[t] / y)
+            return Corr(newcontent, prange=self.prange)
+        else:
+            raise TypeError('Corr / wrong type')
+
+    def __neg__(self):
+        newcontent = [None if (item is None) else -1. * item for item in self.content]
+        return Corr(newcontent, prange=self.prange)
+
+    def __sub__(self, y):
+        return self + (-y)
+
+    def __pow__(self, y):
+        if isinstance(y, Obs) or isinstance(y, int) or isinstance(y, float):
+            newcontent = [None if (item is None) else item**y for item in self.content]
+            return Corr(newcontent, prange=self.prange)
+        else:
+            raise TypeError('Type of exponent not supported')
+
+    def __abs__(self):
+        newcontent = [None if (item is None) else np.abs(item) for item in self.content]
+        return Corr(newcontent, prange=self.prange)
+
+    # The numpy functions:
+    def sqrt(self):
+        return self**0.5
+
+    def log(self):
+        newcontent = [None if (item is None) else np.log(item) for item in self.content]
+        return Corr(newcontent, prange=self.prange)
+
+    def exp(self):
+        newcontent = [None if (item is None) else np.exp(item) for item in self.content]
+        return Corr(newcontent, prange=self.prange)
+
+    def _apply_func_to_corr(self, func):
+        newcontent = [None if (item is None) else func(item) for item in self.content]
+        for t in range(self.T):
+            if newcontent[t] is None:
+                continue
+            if np.isnan(np.sum(newcontent[t]).value):
+                newcontent[t] = None
+        if all([item is None for item in newcontent]):
+            raise Exception('Operation returns undefined correlator')
+        return Corr(newcontent)
+
+    def sin(self):
+        return self._apply_func_to_corr(np.sin)
+
+    def cos(self):
+        return self._apply_func_to_corr(np.cos)
+
+    def tan(self):
+        return self._apply_func_to_corr(np.tan)
+
+    def sinh(self):
+        return self._apply_func_to_corr(np.sinh)
+
+    def cosh(self):
+        return self._apply_func_to_corr(np.cosh)
+
+    def tanh(self):
+        return self._apply_func_to_corr(np.tanh)
+
+    def arcsin(self):
+        return self._apply_func_to_corr(np.arcsin)
+
+    def arccos(self):
+        return self._apply_func_to_corr(np.arccos)
+
+    def arctan(self):
+        return self._apply_func_to_corr(np.arctan)
+
+    def arcsinh(self):
+        return self._apply_func_to_corr(np.arcsinh)
+
+    def arccosh(self):
+        return self._apply_func_to_corr(np.arccosh)
+
+    def arctanh(self):
+        return self._apply_func_to_corr(np.arctanh)
+
+    # Right hand side operations (require tweak in main module to work)
+    def __radd__(self, y):
+        return self + y
+
+    def __rsub__(self, y):
+        return -self + y
+
+    def __rmul__(self, y):
+        return self * y
+
+    def __rtruediv__(self, y):
+        return (self / y) ** (-1)
+
+ +
+ +
+
+
+ #   + + + class + Corr: +
+ +
+ View Source +
class Corr:
+    """The class for a correlator (time dependent sequence of pe.Obs).
+
+    Everything, this class does, can be achieved using lists or arrays of Obs.
+    But it is simply more convenient to have a dedicated object for correlators.
+    One often wants to add or multiply correlators of the same length at every timeslice and it is inconvinient
+    to iterate over all timeslices for every operation. This is especially true, when dealing with smearing matrices.
+
+    The correlator can have two types of content: An Obs at every timeslice OR a GEVP
+    smearing matrix at every timeslice. Other dependency (eg. spacial) are not supported.
+
+    """
+
+    def __init__(self, data_input, padding_front=0, padding_back=0, prange=None):
+        # All data_input should be a list of things at different timeslices. This needs to be verified
+
+        if not isinstance(data_input, list):
+            raise TypeError('Corr__init__ expects a list of timeslices.')
+        # data_input can have multiple shapes. The simplest one is a list of Obs.
+        # We check, if this is the case
+        if all([isinstance(item, Obs) for item in data_input]):
+            self.content = [np.asarray([item]) for item in data_input]
+            # Wrapping the Obs in an array ensures that the data structure is consistent with smearing matrices.
+            self.N = 1  # number of smearings
+
+        # data_input in the form [np.array(Obs,NxN)]
+        elif all([isinstance(item, np.ndarray) or item is None for item in data_input]) and any([isinstance(item, np.ndarray) for item in data_input]):
+            self.content = data_input
+
+            noNull = [a for a in self.content if not (a is None)]  # To check if the matrices are correct for all undefined elements
+            self.N = noNull[0].shape[0]
+            # The checks are now identical to the case above
+            if self.N > 1 and noNull[0].shape[0] != noNull[0].shape[1]:
+                raise Exception("Smearing matrices are not NxN")
+            if (not all([item.shape == noNull[0].shape for item in noNull])):
+                raise Exception("Items in data_input are not of identical shape." + str(noNull))
+        else:  # In case its a list of something else.
+            raise Exception("data_input contains item of wrong type")
+
+        self.tag = None
+
+        # We now apply some padding to our list. In case that our list represents a correlator of length T but is not defined at every value.
+        # An undefined timeslice is represented by the None object
+        self.content = [None] * padding_front + self.content + [None] * padding_back
+        self.T = len(self.content)  # for convenience: will be used a lot
+
+        # The attribute "range" [start,end] marks a range of two timeslices.
+        # This is useful for keeping track of plateaus and fitranges.
+        # The range can be inherited from other Corrs, if the operation should not alter a chosen range eg. multiplication with a constant.
+        self.prange = prange
+
+        self.gamma_method()
+
+    def __getitem__(self, idx):
+        """Return the content of timeslice idx"""
+        if len(self.content[idx]) == 1:
+            return self.content[idx][0]
+        else:
+            return self.content[idx]
+
+    @property
+    def reweighted(self):
+        bool_array = np.array([list(map(lambda x: x.reweighted, o)) for o in self.content])
+        if np.all(bool_array == 1):
+            return True
+        elif np.all(bool_array == 0):
+            return False
+        else:
+            raise Exception("Reweighting status of correlator corrupted.")
+
+    def gamma_method(self):
+        """Apply the gamma method to the content of the Corr."""
+        for item in self.content:
+            if not(item is None):
+                if self.N == 1:
+                    item[0].gamma_method()
+                else:
+                    for i in range(self.N):
+                        for j in range(self.N):
+                            item[i, j].gamma_method()
+
+    # We need to project the Correlator with a Vector to get a single value at each timeslice.
+    # The method can use one or two vectors.
+    # If two are specified it returns v1@G@v2 (the order might be very important.)
+    # By default it will return the lowest source, which usually means unsmeared-unsmeared (0,0), but it does not have to
+    def projected(self, vector_l=None, vector_r=None):
+        if self.N == 1:
+            raise Exception("Trying to project a Corr, that already has N=1.")
+            # This Exception is in no way necessary. One could just return self
+            # But there is no scenario, where a user would want that to happen and the error message might be more informative.
+
+        self.gamma_method()
+
+        if vector_l is None:
+            vector_l, vector_r = np.asarray([1.] + (self.N - 1) * [0.]), np.asarray([1.] + (self.N - 1) * [0.])
+        elif(vector_r is None):
+            vector_r = vector_l
+
+        if not vector_l.shape == vector_r.shape == (self.N,):
+            raise Exception("Vectors are of wrong shape!")
+
+        # We always normalize before projecting! But we only raise a warning, when it is clear, they where not meant to be normalized.
+        if (not (0.95 < vector_r @ vector_r < 1.05)) or (not (0.95 < vector_l @ vector_l < 1.05)):
+            print("Vectors are normalized before projection!")
+
+        vector_l, vector_r = vector_l / np.sqrt((vector_l @ vector_l)), vector_r / np.sqrt(vector_r @ vector_r)
+
+        newcontent = [None if (item is None) else np.asarray([vector_l.T @ item @ vector_r]) for item in self.content]
+        return Corr(newcontent)
+
+    def sum(self):
+        return np.sqrt(self.N) * self.projected(np.ones(self.N))
+
+    # For purposes of debugging and verification, one might want to see a single smearing level. smearing will return a Corr at the specified i,j. where both are integers 0<=i,j<N.
+    def smearing(self, i, j):
+        if self.N == 1:
+            raise Exception("Trying to pick smearing from projected Corr")
+        newcontent = [None if(item is None) else item[i, j] for item in self.content]
+        return Corr(newcontent)
+
+    # Obs and Matplotlib do not play nicely
+    # We often want to retrieve x,y,y_err as lists to pass them to something like pyplot.errorbar
+    def plottable(self):
+        """Outputs the correlator in a plotable format.
+
+        Outputs three lists containing the timeslice index, the value on each
+        timeslice and the error on each timeslice.
+        """
+        if self.N != 1:
+            raise Exception("Can only make Corr[N=1] plottable")  # We could also autoproject to the groundstate or expect vectors, but this is supposed to be a super simple function.
+        x_list = [x for x in range(self.T) if not self.content[x] is None]
+        y_list = [y[0].value for y in self.content if y is not None]
+        y_err_list = [y[0].dvalue for y in self.content if y is not None]
+
+        return x_list, y_list, y_err_list
+
+    # symmetric returns a Corr, that has been symmetrized.
+    # A symmetry checker is still to be implemented
+    # The method will not delete any redundant timeslices (Bad for memory, Great for convenience)
+    def symmetric(self):
+        """ Symmetrize the correlator around x0=0."""
+        if self.T % 2 != 0:
+            raise Exception("Can not symmetrize odd T")
+
+        if np.argmax(np.abs(self.content)) != 0:
+            warnings.warn("Correlator does not seem to be symmetric around x0=0.", RuntimeWarning)
+
+        newcontent = [self.content[0]]
+        for t in range(1, self.T):
+            if (self.content[t] is None) or (self.content[self.T - t] is None):
+                newcontent.append(None)
+            else:
+                newcontent.append(0.5 * (self.content[t] + self.content[self.T - t]))
+        if(all([x is None for x in newcontent])):
+            raise Exception("Corr could not be symmetrized: No redundant values")
+        return Corr(newcontent, prange=self.prange)
+
+    def anti_symmetric(self):
+        """Anti-symmetrize the correlator around x0=0."""
+        if self.T % 2 != 0:
+            raise Exception("Can not symmetrize odd T")
+
+        if not all([o.is_zero_within_error() for o in self.content[0]]):
+            warnings.warn("Correlator does not seem to be anti-symmetric around x0=0.", RuntimeWarning)
+
+        newcontent = [self.content[0]]
+        for t in range(1, self.T):
+            if (self.content[t] is None) or (self.content[self.T - t] is None):
+                newcontent.append(None)
+            else:
+                newcontent.append(0.5 * (self.content[t] - self.content[self.T - t]))
+        if(all([x is None for x in newcontent])):
+            raise Exception("Corr could not be symmetrized: No redundant values")
+        return Corr(newcontent, prange=self.prange)
+
+    # This method will symmetrice the matrices and therefore make them positive definit.
+    def smearing_symmetric(self):
+        if self.N > 1:
+            transposed = [None if (G is None) else G.T for G in self.content]
+            return 0.5 * (Corr(transposed) + self)
+        if self.N == 1:
+            raise Exception("Trying to symmetrize a smearing matrix, that already has N=1.")
+
+    # We also include a simple GEVP method based on Scipy.linalg
+    def GEVP(self, t0, ts, state=1):
+        if (self.content[t0] is None) or (self.content[ts] is None):
+            raise Exception("Corr not defined at t0/ts")
+        G0, Gt = np.empty([self.N, self.N], dtype="double"), np.empty([self.N, self.N], dtype="double")
+        for i in range(self.N):
+            for j in range(self.N):
+                G0[i, j] = self.content[t0][i, j].value
+                Gt[i, j] = self.content[ts][i, j].value
+
+        sp_val, sp_vec = scipy.linalg.eig(Gt, G0)
+        sp_vec = sp_vec[:, np.argsort(sp_val)[-state]]  # We only want the eigenvector belonging to the selected state
+        sp_vec = sp_vec / np.sqrt(sp_vec @ sp_vec)
+        return sp_vec
+
+    def Eigenvalue(self, t0, state=1):
+        G = self.smearing_symmetric()
+        G0 = G.content[t0]
+        L = cholesky(G0)
+        Li = inv(L)
+        LT = L.T
+        LTi = inv(LT)
+        newcontent = []
+        for t in range(self.T):
+            Gt = G.content[t]
+            M = Li @ Gt @ LTi
+            eigenvalues = eigh(M)[0]
+            eigenvalue = eigenvalues[-state]
+            newcontent.append(eigenvalue)
+        return Corr(newcontent)
+
+    def roll(self, dt):
+        """Periodically shift the correlator by dt timeslices
+
+        Attributes:
+        -----------
+        dt : int
+            number of timeslices
+        """
+        return Corr(list(np.roll(np.array(self.content, dtype=object), dt)))
+
+    def reverse(self):
+        """Reverse the time ordering of the Corr"""
+        return Corr(self.content[::-1])
+
+    def correlate(self, partner):
+        """Correlate the correlator with another correlator or Obs"""
+        new_content = []
+        for x0, t_slice in enumerate(self.content):
+            if t_slice is None:
+                new_content.append(None)
+            else:
+                if isinstance(partner, Corr):
+                    if partner.content[x0] is None:
+                        new_content.append(None)
+                    else:
+                        new_content.append(np.array([correlate(o, partner.content[x0][0]) for o in t_slice]))
+                elif isinstance(partner, Obs):
+                    new_content.append(np.array([correlate(o, partner) for o in t_slice]))
+                else:
+                    raise Exception("Can only correlate with an Obs or a Corr.")
+
+        return Corr(new_content)
+
+    def reweight(self, weight, **kwargs):
+        """Reweight the correlator.
+
+        Parameters
+        ----------
+        weight : Obs
+            Reweighting factor. An Observable that has to be defined on a superset of the
+            configurations in obs[i].idl for all i.
+
+        Keyword arguments
+        -----------------
+        all_configs : bool
+            if True, the reweighted observables are normalized by the average of
+            the reweighting factor on all configurations in weight.idl and not
+            on the configurations in obs[i].idl.
+        """
+        new_content = []
+        for t_slice in self.content:
+            if t_slice is None:
+                new_content.append(None)
+            else:
+                new_content.append(np.array(reweight(weight, t_slice, **kwargs)))
+        return Corr(new_content)
+
+    def T_symmetry(self, partner, parity=+1):
+        """Return the time symmetry average of the correlator and its partner
+
+        Attributes:
+        -----------
+        partner : Corr
+            Time symmetry partner of the Corr
+        partity : int
+            Parity quantum number of the correlator, can be +1 or -1
+        """
+        if not isinstance(partner, Corr):
+            raise Exception("T partner has to be a Corr object.")
+        if parity not in [+1, -1]:
+            raise Exception("Parity has to be +1 or -1.")
+        T_partner = parity * partner.reverse()
+
+        t_slices = []
+        for x0, t_slice in enumerate((self - T_partner).content):
+            if t_slice is not None:
+                if not t_slice[0].is_zero_within_error(5):
+                    t_slices.append(x0)
+        if t_slices:
+            warnings.warn("T symmetry partners do not agree within 5 sigma on time slices " + str(t_slices) + ".", RuntimeWarning)
+
+        return (self + T_partner) / 2
+
+    def deriv(self, symmetric=True):
+        """Return the first derivative of the correlator with respect to x0.
+
+        Attributes:
+        -----------
+        symmetric : bool
+            decides whether symmertic of simple finite differences are used. Default: True
+        """
+        if not symmetric:
+            newcontent = []
+            for t in range(self.T - 1):
+                if (self.content[t] is None) or (self.content[t + 1] is None):
+                    newcontent.append(None)
+                else:
+                    newcontent.append(self.content[t + 1] - self.content[t])
+            if(all([x is None for x in newcontent])):
+                raise Exception("Derivative is undefined at all timeslices")
+            return Corr(newcontent, padding_back=1)
+        if symmetric:
+            newcontent = []
+            for t in range(1, self.T - 1):
+                if (self.content[t - 1] is None) or (self.content[t + 1] is None):
+                    newcontent.append(None)
+                else:
+                    newcontent.append(0.5 * (self.content[t + 1] - self.content[t - 1]))
+            if(all([x is None for x in newcontent])):
+                raise Exception('Derivative is undefined at all timeslices')
+            return Corr(newcontent, padding_back=1, padding_front=1)
+
+    def second_deriv(self):
+        """Return the second derivative of the correlator with respect to x0."""
+        newcontent = []
+        for t in range(1, self.T - 1):
+            if (self.content[t - 1] is None) or (self.content[t + 1] is None):
+                newcontent.append(None)
+            else:
+                newcontent.append((self.content[t + 1] - 2 * self.content[t] + self.content[t - 1]))
+        if(all([x is None for x in newcontent])):
+            raise Exception("Derivative is undefined at all timeslices")
+        return Corr(newcontent, padding_back=1, padding_front=1)
+
+    def m_eff(self, variant='log', guess=1.0):
+        """Returns the effective mass of the correlator as correlator object
+
+        Parameters
+        ----------
+        variant : str
+            log: uses the standard effective mass log(C(t) / C(t+1))
+            cosh : Use periodicitiy of the correlator by solving C(t) / C(t+1) = cosh(m * (t - T/2)) / cosh(m * (t + 1 - T/2)) for m.
+            sinh : Use anti-periodicitiy of the correlator by solving C(t) / C(t+1) = sinh(m * (t - T/2)) / sinh(m * (t + 1 - T/2)) for m.
+            See, e.g., arXiv:1205.5380
+        guess : float
+            guess for the root finder, only relevant for the root variant
+        """
+        if self.N != 1:
+            raise Exception('Correlator must be projected before getting m_eff')
+        if variant == 'log':
+            newcontent = []
+            for t in range(self.T - 1):
+                if (self.content[t] is None) or (self.content[t + 1] is None):
+                    newcontent.append(None)
+                else:
+                    newcontent.append(self.content[t] / self.content[t + 1])
+            if(all([x is None for x in newcontent])):
+                raise Exception('m_eff is undefined at all timeslices')
+
+            return np.log(Corr(newcontent, padding_back=1))
+
+        elif variant in ['periodic', 'cosh', 'sinh']:
+            if variant in ['periodic', 'cosh']:
+                func = anp.cosh
+            else:
+                func = anp.sinh
+
+            def root_function(x, d):
+                return func(x * (t - self.T / 2)) / func(x * (t + 1 - self.T / 2)) - d
+
+            newcontent = []
+            for t in range(self.T - 1):
+                if (self.content[t] is None) or (self.content[t + 1] is None):
+                    newcontent.append(None)
+                # Fill the two timeslices in the middle of the lattice with their predecessors
+                elif variant == 'sinh' and t in [self.T / 2, self.T / 2 - 1]:
+                    newcontent.append(newcontent[-1])
+                else:
+                    newcontent.append(np.abs(find_root(self.content[t][0] / self.content[t + 1][0], root_function, guess=guess)))
+            if(all([x is None for x in newcontent])):
+                raise Exception('m_eff is undefined at all timeslices')
+
+            return Corr(newcontent, padding_back=1)
+
+        elif variant == 'arccosh':
+            newcontent = []
+            for t in range(1, self.T - 1):
+                if (self.content[t] is None) or (self.content[t + 1] is None) or (self.content[t - 1] is None):
+                    newcontent.append(None)
+                else:
+                    newcontent.append((self.content[t + 1] + self.content[t - 1]) / (2 * self.content[t]))
+            if(all([x is None for x in newcontent])):
+                raise Exception("m_eff is undefined at all timeslices")
+            return np.arccosh(Corr(newcontent, padding_back=1, padding_front=1))
+
+        else:
+            raise Exception('Unkown variant.')
+
+    def fit(self, function, fitrange=None, silent=False, **kwargs):
+        """Fits function to the data
+
+        Attributes:
+        -----------
+        function : obj
+            function to fit to the data. See fits.least_squares for details.
+        fitrange : list
+            Range in which the function is to be fitted to the data.
+            If not specified, self.prange or all timeslices are used.
+        silent : bool
+            Decides whether output is printed to the standard output.
+        """
+        if self.N != 1:
+            raise Exception("Correlator must be projected before fitting")
+
+        # The default behaviour is:
+        # 1 use explicit fitrange
+        # if none is provided, use the range of the corr
+        # if this is also not set, use the whole length of the corr (This could come with a warning!)
+
+        if fitrange is None:
+            if self.prange:
+                fitrange = self.prange
+            else:
+                fitrange = [0, self.T]
+
+        xs = [x for x in range(fitrange[0], fitrange[1] + 1) if not self.content[x] is None]
+        ys = [self.content[x][0] for x in range(fitrange[0], fitrange[1] + 1) if not self.content[x] is None]
+        result = least_squares(xs, ys, function, silent=silent, **kwargs)
+        result.gamma_method()
+        return result
+
+    def plateau(self, plateau_range=None, method="fit"):
+        """ Extract a plateu value from a Corr object
+
+        Attributes:
+        -----------
+        plateau_range : list
+            list with two entries, indicating the first and the last timeslice
+            of the plateau region.
+        method : str
+            method to extract the plateau.
+                'fit' fits a constant to the plateau region
+                'avg', 'average' or 'mean' just average over the given timeslices.
+        """
+        if not plateau_range:
+            if self.prange:
+                plateau_range = self.prange
+            else:
+                raise Exception("no plateau range provided")
+        if self.N != 1:
+            raise Exception("Correlator must be projected before getting a plateau.")
+        if(all([self.content[t] is None for t in range(plateau_range[0], plateau_range[1] + 1)])):
+            raise Exception("plateau is undefined at all timeslices in plateaurange.")
+        if method == "fit":
+            def const_func(a, t):
+                return a[0]
+            return self.fit(const_func, plateau_range)[0]
+        elif method in ["avg", "average", "mean"]:
+            returnvalue = np.mean([item[0] for item in self.content[plateau_range[0]:plateau_range[1] + 1] if item is not None])
+            returnvalue.gamma_method()
+            return returnvalue
+
+        else:
+            raise Exception("Unsupported plateau method: " + method)
+
+    def set_prange(self, prange):
+        """Sets the attribute prange of the Corr object."""
+        if not len(prange) == 2:
+            raise Exception("prange must be a list or array with two values")
+        if not ((isinstance(prange[0], int)) and (isinstance(prange[1], int))):
+            raise Exception("Start and end point must be integers")
+        if not (0 <= prange[0] <= self.T and 0 <= prange[1] <= self.T and prange[0] < prange[1]):
+            raise Exception("Start and end point must define a range in the interval 0,T")
+
+        self.prange = prange
+        return
+
+    def show(self, x_range=None, comp=None, y_range=None, logscale=False, plateau=None, fit_res=None, ylabel=None, save=None):
+        """Plots the correlator, uses tag as label if available.
+
+        Parameters
+        ----------
+        x_range : list
+            list of two values, determining the range of the x-axis e.g. [4, 8]
+        comp : Corr or list of Corr
+            Correlator or list of correlators which are plotted for comparison.
+        logscale : bool
+            Sets y-axis to logscale
+        plateau : Obs
+            plateau to be visualized in the figure
+        fit_res : Fit_result
+            Fit_result object to be visualized
+        ylabel : str
+            Label for the y-axis
+        save : str
+            path to file in which the figure should be saved
+        """
+        if self.N != 1:
+            raise Exception("Correlator must be projected before plotting")
+        if x_range is None:
+            x_range = [0, self.T]
+
+        fig = plt.figure()
+        ax1 = fig.add_subplot(111)
+
+        x, y, y_err = self.plottable()
+        ax1.errorbar(x, y, y_err, label=self.tag)
+        if logscale:
+            ax1.set_yscale('log')
+        else:
+            # we generate ylim instead of using autoscaling.
+            if y_range is None:
+                try:
+                    y_min = min([(x[0].value - x[0].dvalue) for x in self.content[x_range[0]: x_range[1] + 1] if (x is not None) and x[0].dvalue < 2 * np.abs(x[0].value)])
+                    y_max = max([(x[0].value + x[0].dvalue) for x in self.content[x_range[0]: x_range[1] + 1] if (x is not None) and x[0].dvalue < 2 * np.abs(x[0].value)])
+                    ax1.set_ylim([y_min - 0.1 * (y_max - y_min), y_max + 0.1 * (y_max - y_min)])
+                except:
+                    pass
+            else:
+                ax1.set_ylim(y_range)
+        if comp:
+            if isinstance(comp, Corr) or isinstance(comp, list):
+                for corr in comp if isinstance(comp, list) else [comp]:
+                    x, y, y_err = corr.plottable()
+                    plt.errorbar(x, y, y_err, label=corr.tag, mfc=plt.rcParams['axes.facecolor'])
+            else:
+                raise Exception('comp must be a correlator or a list of correlators.')
+
+        if plateau:
+            if isinstance(plateau, Obs):
+                ax1.axhline(y=plateau.value, linewidth=2, color=plt.rcParams['text.color'], alpha=0.6, marker=',', ls='--', label=str(plateau))
+                ax1.axhspan(plateau.value - plateau.dvalue, plateau.value + plateau.dvalue, alpha=0.25, color=plt.rcParams['text.color'], ls='-')
+            else:
+                raise Exception('plateau must be an Obs')
+        if self.prange:
+            ax1.axvline(self.prange[0], 0, 1, ls='-', marker=',')
+            ax1.axvline(self.prange[1], 0, 1, ls='-', marker=',')
+
+        if fit_res:
+            x_samples = np.arange(x_range[0], x_range[1] + 1, 0.05)
+            ax1.plot(x_samples,
+                     fit_res.fit_function([o.value for o in fit_res.fit_parameters], x_samples),
+                     ls='-', marker=',', lw=2)
+
+        ax1.set_xlabel(r'$x_0 / a$')
+        if ylabel:
+            ax1.set_ylabel(ylabel)
+        ax1.set_xlim([x_range[0] - 0.5, x_range[1] + 0.5])
+
+        handles, labels = ax1.get_legend_handles_labels()
+        if labels:
+            ax1.legend()
+        plt.draw()
+
+        if save:
+            if isinstance(save, str):
+                fig.savefig(save)
+            else:
+                raise Exception("Safe has to be a string.")
+
+        return
+
+    def dump(self, filename):
+        """Dumps the Corr into a pickel file
+
+        Attributes:
+        -----------
+        filename : str
+            Name of the file
+        """
+        dump_object(self, filename)
+        return
+
+    def print(self, range=[0, None]):
+        print(self.__repr__(range))
+
+    def __repr__(self, range=[0, None]):
+        content_string = ""
+        if self.tag is not None:
+            content_string += "Description: " + self.tag + "\n"
+        if range[1]:
+            range[1] += 1
+        content_string += 'x0/a\tCorr(x0/a)\n------------------\n'
+        for i, sub_corr in enumerate(self.content[range[0]:range[1]]):
+            if sub_corr is None:
+                content_string += str(i + range[0]) + '\n'
+            else:
+                content_string += str(i + range[0])
+                for element in sub_corr:
+                    content_string += '\t' + ' ' * int(element >= 0) + str(element)
+                content_string += '\n'
+        return content_string
+
+    def __str__(self):
+        return self.__repr__()
+
+    # We define the basic operations, that can be performed with correlators.
+    # While */+- get defined here, they only work for Corr*Obs and not Obs*Corr.
+    # This is because Obs*Corr checks Obs.__mul__ first and does not catch an exception.
+    # One could try and tell Obs to check if the y in __mul__ is a Corr and
+
+    def __add__(self, y):
+        if isinstance(y, Corr):
+            if ((self.N != y.N) or (self.T != y.T)):
+                raise Exception("Addition of Corrs with different shape")
+            newcontent = []
+            for t in range(self.T):
+                if (self.content[t] is None) or (y.content[t] is None):
+                    newcontent.append(None)
+                else:
+                    newcontent.append(self.content[t] + y.content[t])
+            return Corr(newcontent)
+
+        elif isinstance(y, Obs) or isinstance(y, int) or isinstance(y, float):
+            newcontent = []
+            for t in range(self.T):
+                if (self.content[t] is None):
+                    newcontent.append(None)
+                else:
+                    newcontent.append(self.content[t] + y)
+            return Corr(newcontent, prange=self.prange)
+        else:
+            raise TypeError("Corr + wrong type")
+
+    def __mul__(self, y):
+        if isinstance(y, Corr):
+            if not((self.N == 1 or y.N == 1 or self.N == y.N) and self.T == y.T):
+                raise Exception("Multiplication of Corr object requires N=N or N=1 and T=T")
+            newcontent = []
+            for t in range(self.T):
+                if (self.content[t] is None) or (y.content[t] is None):
+                    newcontent.append(None)
+                else:
+                    newcontent.append(self.content[t] * y.content[t])
+            return Corr(newcontent)
+
+        elif isinstance(y, Obs) or isinstance(y, int) or isinstance(y, float):
+            newcontent = []
+            for t in range(self.T):
+                if (self.content[t] is None):
+                    newcontent.append(None)
+                else:
+                    newcontent.append(self.content[t] * y)
+            return Corr(newcontent, prange=self.prange)
+        else:
+            raise TypeError("Corr * wrong type")
+
+    def __truediv__(self, y):
+        if isinstance(y, Corr):
+            if not((self.N == 1 or y.N == 1 or self.N == y.N) and self.T == y.T):
+                raise Exception("Multiplication of Corr object requires N=N or N=1 and T=T")
+            newcontent = []
+            for t in range(self.T):
+                if (self.content[t] is None) or (y.content[t] is None):
+                    newcontent.append(None)
+                else:
+                    newcontent.append(self.content[t] / y.content[t])
+            # Here we set the entire timeslice to undefined, if one of the smearings has encountered an division by zero.
+            # While this might throw away perfectly good values in other smearings, we will never have to check, if all values in our matrix are defined
+            for t in range(self.T):
+                if newcontent[t] is None:
+                    continue
+                if np.isnan(np.sum(newcontent[t]).value):
+                    newcontent[t] = None
+
+            if all([item is None for item in newcontent]):
+                raise Exception("Division returns completely undefined correlator")
+            return Corr(newcontent)
+
+        elif isinstance(y, Obs):
+            if y.value == 0:
+                raise Exception('Division by zero will return undefined correlator')
+            newcontent = []
+            for t in range(self.T):
+                if (self.content[t] is None):
+                    newcontent.append(None)
+                else:
+                    newcontent.append(self.content[t] / y)
+            return Corr(newcontent, prange=self.prange)
+
+        elif isinstance(y, int) or isinstance(y, float):
+            if y == 0:
+                raise Exception('Division by zero will return undefined correlator')
+            newcontent = []
+            for t in range(self.T):
+                if (self.content[t] is None):
+                    newcontent.append(None)
+                else:
+                    newcontent.append(self.content[t] / y)
+            return Corr(newcontent, prange=self.prange)
+        else:
+            raise TypeError('Corr / wrong type')
+
+    def __neg__(self):
+        newcontent = [None if (item is None) else -1. * item for item in self.content]
+        return Corr(newcontent, prange=self.prange)
+
+    def __sub__(self, y):
+        return self + (-y)
+
+    def __pow__(self, y):
+        if isinstance(y, Obs) or isinstance(y, int) or isinstance(y, float):
+            newcontent = [None if (item is None) else item**y for item in self.content]
+            return Corr(newcontent, prange=self.prange)
+        else:
+            raise TypeError('Type of exponent not supported')
+
+    def __abs__(self):
+        newcontent = [None if (item is None) else np.abs(item) for item in self.content]
+        return Corr(newcontent, prange=self.prange)
+
+    # The numpy functions:
+    def sqrt(self):
+        return self**0.5
+
+    def log(self):
+        newcontent = [None if (item is None) else np.log(item) for item in self.content]
+        return Corr(newcontent, prange=self.prange)
+
+    def exp(self):
+        newcontent = [None if (item is None) else np.exp(item) for item in self.content]
+        return Corr(newcontent, prange=self.prange)
+
+    def _apply_func_to_corr(self, func):
+        newcontent = [None if (item is None) else func(item) for item in self.content]
+        for t in range(self.T):
+            if newcontent[t] is None:
+                continue
+            if np.isnan(np.sum(newcontent[t]).value):
+                newcontent[t] = None
+        if all([item is None for item in newcontent]):
+            raise Exception('Operation returns undefined correlator')
+        return Corr(newcontent)
+
+    def sin(self):
+        return self._apply_func_to_corr(np.sin)
+
+    def cos(self):
+        return self._apply_func_to_corr(np.cos)
+
+    def tan(self):
+        return self._apply_func_to_corr(np.tan)
+
+    def sinh(self):
+        return self._apply_func_to_corr(np.sinh)
+
+    def cosh(self):
+        return self._apply_func_to_corr(np.cosh)
+
+    def tanh(self):
+        return self._apply_func_to_corr(np.tanh)
+
+    def arcsin(self):
+        return self._apply_func_to_corr(np.arcsin)
+
+    def arccos(self):
+        return self._apply_func_to_corr(np.arccos)
+
+    def arctan(self):
+        return self._apply_func_to_corr(np.arctan)
+
+    def arcsinh(self):
+        return self._apply_func_to_corr(np.arcsinh)
+
+    def arccosh(self):
+        return self._apply_func_to_corr(np.arccosh)
+
+    def arctanh(self):
+        return self._apply_func_to_corr(np.arctanh)
+
+    # Right hand side operations (require tweak in main module to work)
+    def __radd__(self, y):
+        return self + y
+
+    def __rsub__(self, y):
+        return -self + y
+
+    def __rmul__(self, y):
+        return self * y
+
+    def __rtruediv__(self, y):
+        return (self / y) ** (-1)
+
+ +
+ +

The class for a correlator (time dependent sequence of pe.Obs).

+ +

Everything, this class does, can be achieved using lists or arrays of Obs. +But it is simply more convenient to have a dedicated object for correlators. +One often wants to add or multiply correlators of the same length at every timeslice and it is inconvinient +to iterate over all timeslices for every operation. This is especially true, when dealing with smearing matrices.

+ +

The correlator can have two types of content: An Obs at every timeslice OR a GEVP +smearing matrix at every timeslice. Other dependency (eg. spacial) are not supported.

+
+ + +
+
#   + + + Corr(data_input, padding_front=0, padding_back=0, prange=None) +
+ +
+ View Source +
    def __init__(self, data_input, padding_front=0, padding_back=0, prange=None):
+        # All data_input should be a list of things at different timeslices. This needs to be verified
+
+        if not isinstance(data_input, list):
+            raise TypeError('Corr__init__ expects a list of timeslices.')
+        # data_input can have multiple shapes. The simplest one is a list of Obs.
+        # We check, if this is the case
+        if all([isinstance(item, Obs) for item in data_input]):
+            self.content = [np.asarray([item]) for item in data_input]
+            # Wrapping the Obs in an array ensures that the data structure is consistent with smearing matrices.
+            self.N = 1  # number of smearings
+
+        # data_input in the form [np.array(Obs,NxN)]
+        elif all([isinstance(item, np.ndarray) or item is None for item in data_input]) and any([isinstance(item, np.ndarray) for item in data_input]):
+            self.content = data_input
+
+            noNull = [a for a in self.content if not (a is None)]  # To check if the matrices are correct for all undefined elements
+            self.N = noNull[0].shape[0]
+            # The checks are now identical to the case above
+            if self.N > 1 and noNull[0].shape[0] != noNull[0].shape[1]:
+                raise Exception("Smearing matrices are not NxN")
+            if (not all([item.shape == noNull[0].shape for item in noNull])):
+                raise Exception("Items in data_input are not of identical shape." + str(noNull))
+        else:  # In case its a list of something else.
+            raise Exception("data_input contains item of wrong type")
+
+        self.tag = None
+
+        # We now apply some padding to our list. In case that our list represents a correlator of length T but is not defined at every value.
+        # An undefined timeslice is represented by the None object
+        self.content = [None] * padding_front + self.content + [None] * padding_back
+        self.T = len(self.content)  # for convenience: will be used a lot
+
+        # The attribute "range" [start,end] marks a range of two timeslices.
+        # This is useful for keeping track of plateaus and fitranges.
+        # The range can be inherited from other Corrs, if the operation should not alter a chosen range eg. multiplication with a constant.
+        self.prange = prange
+
+        self.gamma_method()
+
+ +
+ + + +
+
+
#   + + reweighted +
+ + + +
+
+
#   + + + def + gamma_method(self): +
+ +
+ View Source +
    def gamma_method(self):
+        """Apply the gamma method to the content of the Corr."""
+        for item in self.content:
+            if not(item is None):
+                if self.N == 1:
+                    item[0].gamma_method()
+                else:
+                    for i in range(self.N):
+                        for j in range(self.N):
+                            item[i, j].gamma_method()
+
+ +
+ +

Apply the gamma method to the content of the Corr.

+
+ + +
+
+
#   + + + def + projected(self, vector_l=None, vector_r=None): +
+ +
+ View Source +
    def projected(self, vector_l=None, vector_r=None):
+        if self.N == 1:
+            raise Exception("Trying to project a Corr, that already has N=1.")
+            # This Exception is in no way necessary. One could just return self
+            # But there is no scenario, where a user would want that to happen and the error message might be more informative.
+
+        self.gamma_method()
+
+        if vector_l is None:
+            vector_l, vector_r = np.asarray([1.] + (self.N - 1) * [0.]), np.asarray([1.] + (self.N - 1) * [0.])
+        elif(vector_r is None):
+            vector_r = vector_l
+
+        if not vector_l.shape == vector_r.shape == (self.N,):
+            raise Exception("Vectors are of wrong shape!")
+
+        # We always normalize before projecting! But we only raise a warning, when it is clear, they where not meant to be normalized.
+        if (not (0.95 < vector_r @ vector_r < 1.05)) or (not (0.95 < vector_l @ vector_l < 1.05)):
+            print("Vectors are normalized before projection!")
+
+        vector_l, vector_r = vector_l / np.sqrt((vector_l @ vector_l)), vector_r / np.sqrt(vector_r @ vector_r)
+
+        newcontent = [None if (item is None) else np.asarray([vector_l.T @ item @ vector_r]) for item in self.content]
+        return Corr(newcontent)
+
+ +
+ + + +
+
+
#   + + + def + sum(self): +
+ +
+ View Source +
    def sum(self):
+        return np.sqrt(self.N) * self.projected(np.ones(self.N))
+
+ +
+ + + +
+
+
#   + + + def + smearing(self, i, j): +
+ +
+ View Source +
    def smearing(self, i, j):
+        if self.N == 1:
+            raise Exception("Trying to pick smearing from projected Corr")
+        newcontent = [None if(item is None) else item[i, j] for item in self.content]
+        return Corr(newcontent)
+
+ +
+ + + +
+
+
#   + + + def + plottable(self): +
+ +
+ View Source +
    def plottable(self):
+        """Outputs the correlator in a plotable format.
+
+        Outputs three lists containing the timeslice index, the value on each
+        timeslice and the error on each timeslice.
+        """
+        if self.N != 1:
+            raise Exception("Can only make Corr[N=1] plottable")  # We could also autoproject to the groundstate or expect vectors, but this is supposed to be a super simple function.
+        x_list = [x for x in range(self.T) if not self.content[x] is None]
+        y_list = [y[0].value for y in self.content if y is not None]
+        y_err_list = [y[0].dvalue for y in self.content if y is not None]
+
+        return x_list, y_list, y_err_list
+
+ +
+ +

Outputs the correlator in a plotable format.

+ +

Outputs three lists containing the timeslice index, the value on each +timeslice and the error on each timeslice.

+
+ + +
+
+
#   + + + def + symmetric(self): +
+ +
+ View Source +
    def symmetric(self):
+        """ Symmetrize the correlator around x0=0."""
+        if self.T % 2 != 0:
+            raise Exception("Can not symmetrize odd T")
+
+        if np.argmax(np.abs(self.content)) != 0:
+            warnings.warn("Correlator does not seem to be symmetric around x0=0.", RuntimeWarning)
+
+        newcontent = [self.content[0]]
+        for t in range(1, self.T):
+            if (self.content[t] is None) or (self.content[self.T - t] is None):
+                newcontent.append(None)
+            else:
+                newcontent.append(0.5 * (self.content[t] + self.content[self.T - t]))
+        if(all([x is None for x in newcontent])):
+            raise Exception("Corr could not be symmetrized: No redundant values")
+        return Corr(newcontent, prange=self.prange)
+
+ +
+ +

Symmetrize the correlator around x0=0.

+
+ + +
+
+
#   + + + def + anti_symmetric(self): +
+ +
+ View Source +
    def anti_symmetric(self):
+        """Anti-symmetrize the correlator around x0=0."""
+        if self.T % 2 != 0:
+            raise Exception("Can not symmetrize odd T")
+
+        if not all([o.is_zero_within_error() for o in self.content[0]]):
+            warnings.warn("Correlator does not seem to be anti-symmetric around x0=0.", RuntimeWarning)
+
+        newcontent = [self.content[0]]
+        for t in range(1, self.T):
+            if (self.content[t] is None) or (self.content[self.T - t] is None):
+                newcontent.append(None)
+            else:
+                newcontent.append(0.5 * (self.content[t] - self.content[self.T - t]))
+        if(all([x is None for x in newcontent])):
+            raise Exception("Corr could not be symmetrized: No redundant values")
+        return Corr(newcontent, prange=self.prange)
+
+ +
+ +

Anti-symmetrize the correlator around x0=0.

+
+ + +
+
+
#   + + + def + smearing_symmetric(self): +
+ +
+ View Source +
    def smearing_symmetric(self):
+        if self.N > 1:
+            transposed = [None if (G is None) else G.T for G in self.content]
+            return 0.5 * (Corr(transposed) + self)
+        if self.N == 1:
+            raise Exception("Trying to symmetrize a smearing matrix, that already has N=1.")
+
+ +
+ + + +
+
+
#   + + + def + GEVP(self, t0, ts, state=1): +
+ +
+ View Source +
    def GEVP(self, t0, ts, state=1):
+        if (self.content[t0] is None) or (self.content[ts] is None):
+            raise Exception("Corr not defined at t0/ts")
+        G0, Gt = np.empty([self.N, self.N], dtype="double"), np.empty([self.N, self.N], dtype="double")
+        for i in range(self.N):
+            for j in range(self.N):
+                G0[i, j] = self.content[t0][i, j].value
+                Gt[i, j] = self.content[ts][i, j].value
+
+        sp_val, sp_vec = scipy.linalg.eig(Gt, G0)
+        sp_vec = sp_vec[:, np.argsort(sp_val)[-state]]  # We only want the eigenvector belonging to the selected state
+        sp_vec = sp_vec / np.sqrt(sp_vec @ sp_vec)
+        return sp_vec
+
+ +
+ + + +
+
+
#   + + + def + Eigenvalue(self, t0, state=1): +
+ +
+ View Source +
    def Eigenvalue(self, t0, state=1):
+        G = self.smearing_symmetric()
+        G0 = G.content[t0]
+        L = cholesky(G0)
+        Li = inv(L)
+        LT = L.T
+        LTi = inv(LT)
+        newcontent = []
+        for t in range(self.T):
+            Gt = G.content[t]
+            M = Li @ Gt @ LTi
+            eigenvalues = eigh(M)[0]
+            eigenvalue = eigenvalues[-state]
+            newcontent.append(eigenvalue)
+        return Corr(newcontent)
+
+ +
+ + + +
+
+
#   + + + def + roll(self, dt): +
+ +
+ View Source +
    def roll(self, dt):
+        """Periodically shift the correlator by dt timeslices
+
+        Attributes:
+        -----------
+        dt : int
+            number of timeslices
+        """
+        return Corr(list(np.roll(np.array(self.content, dtype=object), dt)))
+
+ +
+ +

Periodically shift the correlator by dt timeslices

+ +

Attributes:

+ +

dt : int + number of timeslices

+
+ + +
+
+
#   + + + def + reverse(self): +
+ +
+ View Source +
    def reverse(self):
+        """Reverse the time ordering of the Corr"""
+        return Corr(self.content[::-1])
+
+ +
+ +

Reverse the time ordering of the Corr

+
+ + +
+
+
#   + + + def + correlate(self, partner): +
+ +
+ View Source +
    def correlate(self, partner):
+        """Correlate the correlator with another correlator or Obs"""
+        new_content = []
+        for x0, t_slice in enumerate(self.content):
+            if t_slice is None:
+                new_content.append(None)
+            else:
+                if isinstance(partner, Corr):
+                    if partner.content[x0] is None:
+                        new_content.append(None)
+                    else:
+                        new_content.append(np.array([correlate(o, partner.content[x0][0]) for o in t_slice]))
+                elif isinstance(partner, Obs):
+                    new_content.append(np.array([correlate(o, partner) for o in t_slice]))
+                else:
+                    raise Exception("Can only correlate with an Obs or a Corr.")
+
+        return Corr(new_content)
+
+ +
+ +

Correlate the correlator with another correlator or Obs

+
+ + +
+
+
#   + + + def + reweight(self, weight, **kwargs): +
+ +
+ View Source +
    def reweight(self, weight, **kwargs):
+        """Reweight the correlator.
+
+        Parameters
+        ----------
+        weight : Obs
+            Reweighting factor. An Observable that has to be defined on a superset of the
+            configurations in obs[i].idl for all i.
+
+        Keyword arguments
+        -----------------
+        all_configs : bool
+            if True, the reweighted observables are normalized by the average of
+            the reweighting factor on all configurations in weight.idl and not
+            on the configurations in obs[i].idl.
+        """
+        new_content = []
+        for t_slice in self.content:
+            if t_slice is None:
+                new_content.append(None)
+            else:
+                new_content.append(np.array(reweight(weight, t_slice, **kwargs)))
+        return Corr(new_content)
+
+ +
+ +

Reweight the correlator.

+ +
Parameters
+ +
    +
  • weight (Obs): +Reweighting factor. An Observable that has to be defined on a superset of the +configurations in obs[i].idl for all i.
  • +
+ +
Keyword arguments
+ +

all_configs : bool + if True, the reweighted observables are normalized by the average of + the reweighting factor on all configurations in weight.idl and not + on the configurations in obs[i].idl.

+
+ + +
+
+
#   + + + def + T_symmetry(self, partner, parity=1): +
+ +
+ View Source +
    def T_symmetry(self, partner, parity=+1):
+        """Return the time symmetry average of the correlator and its partner
+
+        Attributes:
+        -----------
+        partner : Corr
+            Time symmetry partner of the Corr
+        partity : int
+            Parity quantum number of the correlator, can be +1 or -1
+        """
+        if not isinstance(partner, Corr):
+            raise Exception("T partner has to be a Corr object.")
+        if parity not in [+1, -1]:
+            raise Exception("Parity has to be +1 or -1.")
+        T_partner = parity * partner.reverse()
+
+        t_slices = []
+        for x0, t_slice in enumerate((self - T_partner).content):
+            if t_slice is not None:
+                if not t_slice[0].is_zero_within_error(5):
+                    t_slices.append(x0)
+        if t_slices:
+            warnings.warn("T symmetry partners do not agree within 5 sigma on time slices " + str(t_slices) + ".", RuntimeWarning)
+
+        return (self + T_partner) / 2
+
+ +
+ +

Return the time symmetry average of the correlator and its partner

+ +

Attributes:

+ +

partner : Corr + Time symmetry partner of the Corr +partity : int + Parity quantum number of the correlator, can be +1 or -1

+
+ + +
+
+
#   + + + def + deriv(self, symmetric=True): +
+ +
+ View Source +
    def deriv(self, symmetric=True):
+        """Return the first derivative of the correlator with respect to x0.
+
+        Attributes:
+        -----------
+        symmetric : bool
+            decides whether symmertic of simple finite differences are used. Default: True
+        """
+        if not symmetric:
+            newcontent = []
+            for t in range(self.T - 1):
+                if (self.content[t] is None) or (self.content[t + 1] is None):
+                    newcontent.append(None)
+                else:
+                    newcontent.append(self.content[t + 1] - self.content[t])
+            if(all([x is None for x in newcontent])):
+                raise Exception("Derivative is undefined at all timeslices")
+            return Corr(newcontent, padding_back=1)
+        if symmetric:
+            newcontent = []
+            for t in range(1, self.T - 1):
+                if (self.content[t - 1] is None) or (self.content[t + 1] is None):
+                    newcontent.append(None)
+                else:
+                    newcontent.append(0.5 * (self.content[t + 1] - self.content[t - 1]))
+            if(all([x is None for x in newcontent])):
+                raise Exception('Derivative is undefined at all timeslices')
+            return Corr(newcontent, padding_back=1, padding_front=1)
+
+ +
+ +

Return the first derivative of the correlator with respect to x0.

+ +

Attributes:

+ +

symmetric : bool + decides whether symmertic of simple finite differences are used. Default: True

+
+ + +
+
+
#   + + + def + second_deriv(self): +
+ +
+ View Source +
    def second_deriv(self):
+        """Return the second derivative of the correlator with respect to x0."""
+        newcontent = []
+        for t in range(1, self.T - 1):
+            if (self.content[t - 1] is None) or (self.content[t + 1] is None):
+                newcontent.append(None)
+            else:
+                newcontent.append((self.content[t + 1] - 2 * self.content[t] + self.content[t - 1]))
+        if(all([x is None for x in newcontent])):
+            raise Exception("Derivative is undefined at all timeslices")
+        return Corr(newcontent, padding_back=1, padding_front=1)
+
+ +
+ +

Return the second derivative of the correlator with respect to x0.

+
+ + +
+
+
#   + + + def + m_eff(self, variant='log', guess=1.0): +
+ +
+ View Source +
    def m_eff(self, variant='log', guess=1.0):
+        """Returns the effective mass of the correlator as correlator object
+
+        Parameters
+        ----------
+        variant : str
+            log: uses the standard effective mass log(C(t) / C(t+1))
+            cosh : Use periodicitiy of the correlator by solving C(t) / C(t+1) = cosh(m * (t - T/2)) / cosh(m * (t + 1 - T/2)) for m.
+            sinh : Use anti-periodicitiy of the correlator by solving C(t) / C(t+1) = sinh(m * (t - T/2)) / sinh(m * (t + 1 - T/2)) for m.
+            See, e.g., arXiv:1205.5380
+        guess : float
+            guess for the root finder, only relevant for the root variant
+        """
+        if self.N != 1:
+            raise Exception('Correlator must be projected before getting m_eff')
+        if variant == 'log':
+            newcontent = []
+            for t in range(self.T - 1):
+                if (self.content[t] is None) or (self.content[t + 1] is None):
+                    newcontent.append(None)
+                else:
+                    newcontent.append(self.content[t] / self.content[t + 1])
+            if(all([x is None for x in newcontent])):
+                raise Exception('m_eff is undefined at all timeslices')
+
+            return np.log(Corr(newcontent, padding_back=1))
+
+        elif variant in ['periodic', 'cosh', 'sinh']:
+            if variant in ['periodic', 'cosh']:
+                func = anp.cosh
+            else:
+                func = anp.sinh
+
+            def root_function(x, d):
+                return func(x * (t - self.T / 2)) / func(x * (t + 1 - self.T / 2)) - d
+
+            newcontent = []
+            for t in range(self.T - 1):
+                if (self.content[t] is None) or (self.content[t + 1] is None):
+                    newcontent.append(None)
+                # Fill the two timeslices in the middle of the lattice with their predecessors
+                elif variant == 'sinh' and t in [self.T / 2, self.T / 2 - 1]:
+                    newcontent.append(newcontent[-1])
+                else:
+                    newcontent.append(np.abs(find_root(self.content[t][0] / self.content[t + 1][0], root_function, guess=guess)))
+            if(all([x is None for x in newcontent])):
+                raise Exception('m_eff is undefined at all timeslices')
+
+            return Corr(newcontent, padding_back=1)
+
+        elif variant == 'arccosh':
+            newcontent = []
+            for t in range(1, self.T - 1):
+                if (self.content[t] is None) or (self.content[t + 1] is None) or (self.content[t - 1] is None):
+                    newcontent.append(None)
+                else:
+                    newcontent.append((self.content[t + 1] + self.content[t - 1]) / (2 * self.content[t]))
+            if(all([x is None for x in newcontent])):
+                raise Exception("m_eff is undefined at all timeslices")
+            return np.arccosh(Corr(newcontent, padding_back=1, padding_front=1))
+
+        else:
+            raise Exception('Unkown variant.')
+
+ +
+ +

Returns the effective mass of the correlator as correlator object

+ +
Parameters
+ +
    +
  • variant (str): +log: uses the standard effective mass log(C(t) / C(t+1)) +cosh : Use periodicitiy of the correlator by solving C(t) / C(t+1) = cosh(m * (t - T/2)) / cosh(m * (t + 1 - T/2)) for m. +sinh : Use anti-periodicitiy of the correlator by solving C(t) / C(t+1) = sinh(m * (t - T/2)) / sinh(m * (t + 1 - T/2)) for m. +See, e.g., arXiv:1205.5380
  • +
  • guess (float): +guess for the root finder, only relevant for the root variant
  • +
+
+ + +
+
+
#   + + + def + fit(self, function, fitrange=None, silent=False, **kwargs): +
+ +
+ View Source +
    def fit(self, function, fitrange=None, silent=False, **kwargs):
+        """Fits function to the data
+
+        Attributes:
+        -----------
+        function : obj
+            function to fit to the data. See fits.least_squares for details.
+        fitrange : list
+            Range in which the function is to be fitted to the data.
+            If not specified, self.prange or all timeslices are used.
+        silent : bool
+            Decides whether output is printed to the standard output.
+        """
+        if self.N != 1:
+            raise Exception("Correlator must be projected before fitting")
+
+        # The default behaviour is:
+        # 1 use explicit fitrange
+        # if none is provided, use the range of the corr
+        # if this is also not set, use the whole length of the corr (This could come with a warning!)
+
+        if fitrange is None:
+            if self.prange:
+                fitrange = self.prange
+            else:
+                fitrange = [0, self.T]
+
+        xs = [x for x in range(fitrange[0], fitrange[1] + 1) if not self.content[x] is None]
+        ys = [self.content[x][0] for x in range(fitrange[0], fitrange[1] + 1) if not self.content[x] is None]
+        result = least_squares(xs, ys, function, silent=silent, **kwargs)
+        result.gamma_method()
+        return result
+
+ +
+ +

Fits function to the data

+ +

Attributes:

+ +

function : obj + function to fit to the data. See fits.least_squares for details. +fitrange : list + Range in which the function is to be fitted to the data. + If not specified, self.prange or all timeslices are used. +silent : bool + Decides whether output is printed to the standard output.

+
+ + +
+
+
#   + + + def + plateau(self, plateau_range=None, method='fit'): +
+ +
+ View Source +
    def plateau(self, plateau_range=None, method="fit"):
+        """ Extract a plateu value from a Corr object
+
+        Attributes:
+        -----------
+        plateau_range : list
+            list with two entries, indicating the first and the last timeslice
+            of the plateau region.
+        method : str
+            method to extract the plateau.
+                'fit' fits a constant to the plateau region
+                'avg', 'average' or 'mean' just average over the given timeslices.
+        """
+        if not plateau_range:
+            if self.prange:
+                plateau_range = self.prange
+            else:
+                raise Exception("no plateau range provided")
+        if self.N != 1:
+            raise Exception("Correlator must be projected before getting a plateau.")
+        if(all([self.content[t] is None for t in range(plateau_range[0], plateau_range[1] + 1)])):
+            raise Exception("plateau is undefined at all timeslices in plateaurange.")
+        if method == "fit":
+            def const_func(a, t):
+                return a[0]
+            return self.fit(const_func, plateau_range)[0]
+        elif method in ["avg", "average", "mean"]:
+            returnvalue = np.mean([item[0] for item in self.content[plateau_range[0]:plateau_range[1] + 1] if item is not None])
+            returnvalue.gamma_method()
+            return returnvalue
+
+        else:
+            raise Exception("Unsupported plateau method: " + method)
+
+ +
+ +

Extract a plateu value from a Corr object

+ +

Attributes:

+ +

plateau_range : list + list with two entries, indicating the first and the last timeslice + of the plateau region. +method : str + method to extract the plateau. + 'fit' fits a constant to the plateau region + 'avg', 'average' or 'mean' just average over the given timeslices.

+
+ + +
+
+
#   + + + def + set_prange(self, prange): +
+ +
+ View Source +
    def set_prange(self, prange):
+        """Sets the attribute prange of the Corr object."""
+        if not len(prange) == 2:
+            raise Exception("prange must be a list or array with two values")
+        if not ((isinstance(prange[0], int)) and (isinstance(prange[1], int))):
+            raise Exception("Start and end point must be integers")
+        if not (0 <= prange[0] <= self.T and 0 <= prange[1] <= self.T and prange[0] < prange[1]):
+            raise Exception("Start and end point must define a range in the interval 0,T")
+
+        self.prange = prange
+        return
+
+ +
+ +

Sets the attribute prange of the Corr object.

+
+ + +
+
+
#   + + + def + show( + self, + x_range=None, + comp=None, + y_range=None, + logscale=False, + plateau=None, + fit_res=None, + ylabel=None, + save=None +): +
+ +
+ View Source +
    def show(self, x_range=None, comp=None, y_range=None, logscale=False, plateau=None, fit_res=None, ylabel=None, save=None):
+        """Plots the correlator, uses tag as label if available.
+
+        Parameters
+        ----------
+        x_range : list
+            list of two values, determining the range of the x-axis e.g. [4, 8]
+        comp : Corr or list of Corr
+            Correlator or list of correlators which are plotted for comparison.
+        logscale : bool
+            Sets y-axis to logscale
+        plateau : Obs
+            plateau to be visualized in the figure
+        fit_res : Fit_result
+            Fit_result object to be visualized
+        ylabel : str
+            Label for the y-axis
+        save : str
+            path to file in which the figure should be saved
+        """
+        if self.N != 1:
+            raise Exception("Correlator must be projected before plotting")
+        if x_range is None:
+            x_range = [0, self.T]
+
+        fig = plt.figure()
+        ax1 = fig.add_subplot(111)
+
+        x, y, y_err = self.plottable()
+        ax1.errorbar(x, y, y_err, label=self.tag)
+        if logscale:
+            ax1.set_yscale('log')
+        else:
+            # we generate ylim instead of using autoscaling.
+            if y_range is None:
+                try:
+                    y_min = min([(x[0].value - x[0].dvalue) for x in self.content[x_range[0]: x_range[1] + 1] if (x is not None) and x[0].dvalue < 2 * np.abs(x[0].value)])
+                    y_max = max([(x[0].value + x[0].dvalue) for x in self.content[x_range[0]: x_range[1] + 1] if (x is not None) and x[0].dvalue < 2 * np.abs(x[0].value)])
+                    ax1.set_ylim([y_min - 0.1 * (y_max - y_min), y_max + 0.1 * (y_max - y_min)])
+                except:
+                    pass
+            else:
+                ax1.set_ylim(y_range)
+        if comp:
+            if isinstance(comp, Corr) or isinstance(comp, list):
+                for corr in comp if isinstance(comp, list) else [comp]:
+                    x, y, y_err = corr.plottable()
+                    plt.errorbar(x, y, y_err, label=corr.tag, mfc=plt.rcParams['axes.facecolor'])
+            else:
+                raise Exception('comp must be a correlator or a list of correlators.')
+
+        if plateau:
+            if isinstance(plateau, Obs):
+                ax1.axhline(y=plateau.value, linewidth=2, color=plt.rcParams['text.color'], alpha=0.6, marker=',', ls='--', label=str(plateau))
+                ax1.axhspan(plateau.value - plateau.dvalue, plateau.value + plateau.dvalue, alpha=0.25, color=plt.rcParams['text.color'], ls='-')
+            else:
+                raise Exception('plateau must be an Obs')
+        if self.prange:
+            ax1.axvline(self.prange[0], 0, 1, ls='-', marker=',')
+            ax1.axvline(self.prange[1], 0, 1, ls='-', marker=',')
+
+        if fit_res:
+            x_samples = np.arange(x_range[0], x_range[1] + 1, 0.05)
+            ax1.plot(x_samples,
+                     fit_res.fit_function([o.value for o in fit_res.fit_parameters], x_samples),
+                     ls='-', marker=',', lw=2)
+
+        ax1.set_xlabel(r'$x_0 / a$')
+        if ylabel:
+            ax1.set_ylabel(ylabel)
+        ax1.set_xlim([x_range[0] - 0.5, x_range[1] + 0.5])
+
+        handles, labels = ax1.get_legend_handles_labels()
+        if labels:
+            ax1.legend()
+        plt.draw()
+
+        if save:
+            if isinstance(save, str):
+                fig.savefig(save)
+            else:
+                raise Exception("Safe has to be a string.")
+
+        return
+
+ +
+ +

Plots the correlator, uses tag as label if available.

+ +
Parameters
+ +
    +
  • x_range (list): +list of two values, determining the range of the x-axis e.g. [4, 8]
  • +
  • comp (Corr or list of Corr): +Correlator or list of correlators which are plotted for comparison.
  • +
  • logscale (bool): +Sets y-axis to logscale
  • +
  • plateau (Obs): +plateau to be visualized in the figure
  • +
  • fit_res (Fit_result): +Fit_result object to be visualized
  • +
  • ylabel (str): +Label for the y-axis
  • +
  • save (str): +path to file in which the figure should be saved
  • +
+
+ + +
+
+
#   + + + def + dump(self, filename): +
+ +
+ View Source +
    def dump(self, filename):
+        """Dumps the Corr into a pickel file
+
+        Attributes:
+        -----------
+        filename : str
+            Name of the file
+        """
+        dump_object(self, filename)
+        return
+
+ +
+ +

Dumps the Corr into a pickel file

+ +

Attributes:

+ +

filename : str + Name of the file

+
+ + +
+
+
#   + + + def + print(self, range=[0, None]): +
+ +
+ View Source +
    def print(self, range=[0, None]):
+        print(self.__repr__(range))
+
+ +
+ + + +
+
+
#   + + + def + sqrt(self): +
+ +
+ View Source +
    def sqrt(self):
+        return self**0.5
+
+ +
+ + + +
+
+
#   + + + def + log(self): +
+ +
+ View Source +
    def log(self):
+        newcontent = [None if (item is None) else np.log(item) for item in self.content]
+        return Corr(newcontent, prange=self.prange)
+
+ +
+ + + +
+
+
#   + + + def + exp(self): +
+ +
+ View Source +
    def exp(self):
+        newcontent = [None if (item is None) else np.exp(item) for item in self.content]
+        return Corr(newcontent, prange=self.prange)
+
+ +
+ + + +
+
+
#   + + + def + sin(self): +
+ +
+ View Source +
    def sin(self):
+        return self._apply_func_to_corr(np.sin)
+
+ +
+ + + +
+
+
#   + + + def + cos(self): +
+ +
+ View Source +
    def cos(self):
+        return self._apply_func_to_corr(np.cos)
+
+ +
+ + + +
+
+
#   + + + def + tan(self): +
+ +
+ View Source +
    def tan(self):
+        return self._apply_func_to_corr(np.tan)
+
+ +
+ + + +
+
+
#   + + + def + sinh(self): +
+ +
+ View Source +
    def sinh(self):
+        return self._apply_func_to_corr(np.sinh)
+
+ +
+ + + +
+
+
#   + + + def + cosh(self): +
+ +
+ View Source +
    def cosh(self):
+        return self._apply_func_to_corr(np.cosh)
+
+ +
+ + + +
+
+
#   + + + def + tanh(self): +
+ +
+ View Source +
    def tanh(self):
+        return self._apply_func_to_corr(np.tanh)
+
+ +
+ + + +
+
+
#   + + + def + arcsin(self): +
+ +
+ View Source +
    def arcsin(self):
+        return self._apply_func_to_corr(np.arcsin)
+
+ +
+ + + +
+
+
#   + + + def + arccos(self): +
+ +
+ View Source +
    def arccos(self):
+        return self._apply_func_to_corr(np.arccos)
+
+ +
+ + + +
+
+
#   + + + def + arctan(self): +
+ +
+ View Source +
    def arctan(self):
+        return self._apply_func_to_corr(np.arctan)
+
+ +
+ + + +
+
+
#   + + + def + arcsinh(self): +
+ +
+ View Source +
    def arcsinh(self):
+        return self._apply_func_to_corr(np.arcsinh)
+
+ +
+ + + +
+
+
#   + + + def + arccosh(self): +
+ +
+ View Source +
    def arccosh(self):
+        return self._apply_func_to_corr(np.arccosh)
+
+ +
+ + + +
+
+
#   + + + def + arctanh(self): +
+ +
+ View Source +
    def arctanh(self):
+        return self._apply_func_to_corr(np.arctanh)
+
+ +
+ + + +
+
+
+ + \ No newline at end of file diff --git a/docs/pyerrors/dirac.html b/docs/pyerrors/dirac.html new file mode 100644 index 00000000..dad16599 --- /dev/null +++ b/docs/pyerrors/dirac.html @@ -0,0 +1,363 @@ + + + + + + + pyerrors.dirac API documentation + + + + + + + + + + + +
+
+

+pyerrors.dirac

+ + +
+ View Source +
import numpy as np
+
+
+gammaX = np.array(
+    [[0, 0, 0, 1j], [0, 0, 1j, 0], [0, -1j, 0, 0], [-1j, 0, 0, 0]],
+    dtype=complex)
+gammaY = np.array(
+    [[0, 0, 0, -1], [0, 0, 1, 0], [0, 1, 0, 0], [-1, 0, 0, 0]],
+    dtype=complex)
+gammaZ = np.array(
+    [[0, 0, 1j, 0], [0, 0, 0, -1j], [-1j, 0, 0, 0], [0, 1j, 0, 0]],
+    dtype=complex)
+gammaT = np.array(
+    [[0, 0, 1, 0], [0, 0, 0, 1], [1, 0, 0, 0], [0, 1, 0, 0]],
+    dtype=complex)
+gamma = np.array([gammaX, gammaY, gammaZ, gammaT])
+gamma5 = np.array(
+    [[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, -1, 0], [0, 0, 0, -1]],
+    dtype=complex)
+identity = np.array(
+    [[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]],
+    dtype=complex)
+
+
+def Grid_gamma(gamma_tag):
+    """Returns gamma matrix in Grid labeling."""
+    if gamma_tag == 'Identity':
+        g = identity
+    elif gamma_tag == 'Gamma5':
+        g = gamma5
+    elif gamma_tag == 'GammaX':
+        g = gamma[0]
+    elif gamma_tag == 'GammaY':
+        g = gamma[1]
+    elif gamma_tag == 'GammaZ':
+        g = gamma[2]
+    elif gamma_tag == 'GammaT':
+        g = gamma[3]
+    elif gamma_tag == 'GammaXGamma5':
+        g = gamma[0] @ gamma5
+    elif gamma_tag == 'GammaYGamma5':
+        g = gamma[1] @ gamma5
+    elif gamma_tag == 'GammaZGamma5':
+        g = gamma[2] @ gamma5
+    elif gamma_tag == 'GammaTGamma5':
+        g = gamma[3] @ gamma5
+    elif gamma_tag == 'SigmaXT':
+        g = 0.5 * (gamma[0] @ gamma[3] - gamma[3] @ gamma[0])
+    elif gamma_tag == 'SigmaXY':
+        g = 0.5 * (gamma[0] @ gamma[1] - gamma[1] @ gamma[0])
+    elif gamma_tag == 'SigmaXZ':
+        g = 0.5 * (gamma[0] @ gamma[2] - gamma[2] @ gamma[0])
+    elif gamma_tag == 'SigmaYT':
+        g = 0.5 * (gamma[1] @ gamma[3] - gamma[3] @ gamma[1])
+    elif gamma_tag == 'SigmaYZ':
+        g = 0.5 * (gamma[1] @ gamma[2] - gamma[2] @ gamma[1])
+    elif gamma_tag == 'SigmaZT':
+        g = 0.5 * (gamma[2] @ gamma[3] - gamma[3] @ gamma[2])
+    else:
+        raise Exception('Unkown gamma structure', gamma_tag)
+    return g
+
+ +
+ +
+
+
#   + + + def + Grid_gamma(gamma_tag): +
+ +
+ View Source +
def Grid_gamma(gamma_tag):
+    """Returns gamma matrix in Grid labeling."""
+    if gamma_tag == 'Identity':
+        g = identity
+    elif gamma_tag == 'Gamma5':
+        g = gamma5
+    elif gamma_tag == 'GammaX':
+        g = gamma[0]
+    elif gamma_tag == 'GammaY':
+        g = gamma[1]
+    elif gamma_tag == 'GammaZ':
+        g = gamma[2]
+    elif gamma_tag == 'GammaT':
+        g = gamma[3]
+    elif gamma_tag == 'GammaXGamma5':
+        g = gamma[0] @ gamma5
+    elif gamma_tag == 'GammaYGamma5':
+        g = gamma[1] @ gamma5
+    elif gamma_tag == 'GammaZGamma5':
+        g = gamma[2] @ gamma5
+    elif gamma_tag == 'GammaTGamma5':
+        g = gamma[3] @ gamma5
+    elif gamma_tag == 'SigmaXT':
+        g = 0.5 * (gamma[0] @ gamma[3] - gamma[3] @ gamma[0])
+    elif gamma_tag == 'SigmaXY':
+        g = 0.5 * (gamma[0] @ gamma[1] - gamma[1] @ gamma[0])
+    elif gamma_tag == 'SigmaXZ':
+        g = 0.5 * (gamma[0] @ gamma[2] - gamma[2] @ gamma[0])
+    elif gamma_tag == 'SigmaYT':
+        g = 0.5 * (gamma[1] @ gamma[3] - gamma[3] @ gamma[1])
+    elif gamma_tag == 'SigmaYZ':
+        g = 0.5 * (gamma[1] @ gamma[2] - gamma[2] @ gamma[1])
+    elif gamma_tag == 'SigmaZT':
+        g = 0.5 * (gamma[2] @ gamma[3] - gamma[3] @ gamma[2])
+    else:
+        raise Exception('Unkown gamma structure', gamma_tag)
+    return g
+
+ +
+ +

Returns gamma matrix in Grid labeling.

+
+ + +
+
+ + \ No newline at end of file diff --git a/docs/pyerrors/fits.html b/docs/pyerrors/fits.html new file mode 100644 index 00000000..1f62ce8d --- /dev/null +++ b/docs/pyerrors/fits.html @@ -0,0 +1,1997 @@ + + + + + + + pyerrors.fits API documentation + + + + + + + + + + + +
+
+

+pyerrors.fits

+ + +
+ View Source +
import gc
+from collections.abc import Sequence
+import warnings
+import numpy as np
+import autograd.numpy as anp
+import scipy.optimize
+import scipy.stats
+import matplotlib.pyplot as plt
+from matplotlib import gridspec
+from scipy.odr import ODR, Model, RealData
+import iminuit
+from autograd import jacobian
+from autograd import elementwise_grad as egrad
+from .obs import Obs, derived_observable, covariance, pseudo_Obs
+
+
+class Fit_result(Sequence):
+    """Represents fit results.
+
+    Attributes
+    ----------
+    fit_parameters : list
+        results for the individual fit parameters,
+        also accesible via indices.
+    """
+
+    def __init__(self):
+        self.fit_parameters = None
+
+    def __getitem__(self, idx):
+        return self.fit_parameters[idx]
+
+    def __len__(self):
+        return len(self.fit_parameters)
+
+    def gamma_method(self):
+        """Apply the gamma method to all fit parameters"""
+        [o.gamma_method() for o in self.fit_parameters]
+
+    def __str__(self):
+        self.gamma_method()
+        my_str = 'Goodness of fit:\n'
+        if hasattr(self, 'chisquare_by_dof'):
+            my_str += '\u03C7\u00b2/d.o.f. = ' + f'{self.chisquare_by_dof:2.6f}' + '\n'
+        elif hasattr(self, 'residual_variance'):
+            my_str += 'residual variance = ' + f'{self.residual_variance:2.6f}' + '\n'
+        if hasattr(self, 'chisquare_by_expected_chisquare'):
+            my_str += '\u03C7\u00b2/\u03C7\u00b2exp  = ' + f'{self.chisquare_by_expected_chisquare:2.6f}' + '\n'
+        my_str += 'Fit parameters:\n'
+        for i_par, par in enumerate(self.fit_parameters):
+            my_str += str(i_par) + '\t' + ' ' * int(par >= 0) + str(par).rjust(int(par < 0.0)) + '\n'
+        return my_str
+
+    def __repr__(self):
+        m = max(map(len, list(self.__dict__.keys()))) + 1
+        return '\n'.join([key.rjust(m) + ': ' + repr(value) for key, value in sorted(self.__dict__.items())])
+
+
+def least_squares(x, y, func, priors=None, silent=False, **kwargs):
+    """Performs a non-linear fit to y = func(x).
+
+    Arguments:
+    ----------
+    x : list
+        list of floats.
+    y : list
+        list of Obs.
+    func : object
+        fit function, has to be of the form
+
+        def func(a, x):
+            return a[0] + a[1] * x + a[2] * anp.sinh(x)
+
+        For multiple x values func can be of the form
+
+        def func(a, x):
+            (x1, x2) = x
+            return a[0] * x1 ** 2 + a[1] * x2
+
+        It is important that all numpy functions refer to autograd.numpy, otherwise the differentiation
+        will not work
+    priors : list, optional
+        priors has to be a list with an entry for every parameter in the fit. The entries can either be
+        Obs (e.g. results from a previous fit) or strings containing a value and an error formatted like
+        0.548(23), 500(40) or 0.5(0.4)
+        It is important for the subsequent error estimation that the e_tag for the gamma method is large
+        enough.
+    silent : bool, optional
+        If true all output to the console is omitted (default False).
+
+
+    Keyword arguments
+    -----------------
+    initial_guess -- can provide an initial guess for the input parameters. Relevant for
+                     non-linear fits with many parameters.
+    method -- can be used to choose an alternative method for the minimization of chisquare.
+              The possible methods are the ones which can be used for scipy.optimize.minimize and
+              migrad of iminuit. If no method is specified, Levenberg-Marquard is used.
+              Reliable alternatives are migrad, Powell and Nelder-Mead.
+    resplot -- If true, a plot which displays fit, data and residuals is generated (default False).
+    qqplot -- If true, a quantile-quantile plot of the fit result is generated (default False).
+    expected_chisquare -- If true prints the expected chisquare which is
+                          corrected by effects caused by correlated input data.
+                          This can take a while as the full correlation matrix
+                          has to be calculated (default False).
+    """
+    if priors is not None:
+        return _prior_fit(x, y, func, priors, silent=silent, **kwargs)
+    else:
+        return _standard_fit(x, y, func, silent=silent, **kwargs)
+
+
+def standard_fit(x, y, func, silent=False, **kwargs):
+    warnings.warn("standard_fit renamed to least_squares", DeprecationWarning)
+    return least_squares(x, y, func, silent=silent, **kwargs)
+
+
+def _standard_fit(x, y, func, silent=False, **kwargs):
+
+    output = Fit_result()
+
+    output.fit_function = func
+
+    x = np.asarray(x)
+
+    if x.shape[-1] != len(y):
+        raise Exception('x and y input have to have the same length')
+
+    if len(x.shape) > 2:
+        raise Exception('Unkown format for x values')
+
+    if not callable(func):
+        raise TypeError('func has to be a function.')
+
+    for i in range(25):
+        try:
+            func(np.arange(i), x.T[0])
+        except:
+            pass
+        else:
+            break
+
+    n_parms = i
+
+    if not silent:
+        print('Fit with', n_parms, 'parameters')
+
+    y_f = [o.value for o in y]
+    dy_f = [o.dvalue for o in y]
+
+    if np.any(np.asarray(dy_f) <= 0.0):
+        raise Exception('No y errors available, run the gamma method first.')
+
+    if 'initial_guess' in kwargs:
+        x0 = kwargs.get('initial_guess')
+        if len(x0) != n_parms:
+            raise Exception('Initial guess does not have the correct length.')
+    else:
+        x0 = [0.1] * n_parms
+
+    def chisqfunc(p):
+        model = func(p, x)
+        chisq = anp.sum(((y_f - model) / dy_f) ** 2)
+        return chisq
+
+    if 'method' in kwargs:
+        output.method = kwargs.get('method')
+        if not silent:
+            print('Method:', kwargs.get('method'))
+        if kwargs.get('method') == 'migrad':
+            fit_result = iminuit.minimize(chisqfunc, x0)
+            fit_result = iminuit.minimize(chisqfunc, fit_result.x)
+        else:
+            fit_result = scipy.optimize.minimize(chisqfunc, x0, method=kwargs.get('method'))
+            fit_result = scipy.optimize.minimize(chisqfunc, fit_result.x, method=kwargs.get('method'), tol=1e-12)
+
+        chisquare = fit_result.fun
+
+        output.iterations = fit_result.nit
+    else:
+        output.method = 'Levenberg-Marquardt'
+        if not silent:
+            print('Method: Levenberg-Marquardt')
+
+        def chisqfunc_residuals(p):
+            model = func(p, x)
+            chisq = ((y_f - model) / dy_f)
+            return chisq
+
+        fit_result = scipy.optimize.least_squares(chisqfunc_residuals, x0, method='lm', ftol=1e-15, gtol=1e-15, xtol=1e-15)
+
+        chisquare = np.sum(fit_result.fun ** 2)
+
+        output.iterations = fit_result.nfev
+
+    if not fit_result.success:
+        raise Exception('The minimization procedure did not converge.')
+
+    if x.shape[-1] - n_parms > 0:
+        output.chisquare_by_dof = chisquare / (x.shape[-1] - n_parms)
+    else:
+        output.chisquare_by_dof = float('nan')
+
+    output.message = fit_result.message
+    if not silent:
+        print(fit_result.message)
+        print('chisquare/d.o.f.:', output.chisquare_by_dof)
+
+    if kwargs.get('expected_chisquare') is True:
+        W = np.diag(1 / np.asarray(dy_f))
+        cov = covariance_matrix(y)
+        A = W @ jacobian(func)(fit_result.x, x)
+        P_phi = A @ np.linalg.inv(A.T @ A) @ A.T
+        expected_chisquare = np.trace((np.identity(x.shape[-1]) - P_phi) @ W @ cov @ W)
+        output.chisquare_by_expected_chisquare = chisquare / expected_chisquare
+        if not silent:
+            print('chisquare/expected_chisquare:',
+                  output.chisquare_by_expected_chisquare)
+
+    hess_inv = np.linalg.pinv(jacobian(jacobian(chisqfunc))(fit_result.x))
+
+    def chisqfunc_compact(d):
+        model = func(d[:n_parms], x)
+        chisq = anp.sum(((d[n_parms:] - model) / dy_f) ** 2)
+        return chisq
+
+    jac_jac = jacobian(jacobian(chisqfunc_compact))(np.concatenate((fit_result.x, y_f)))
+
+    deriv = -hess_inv @ jac_jac[:n_parms, n_parms:]
+
+    result = []
+    for i in range(n_parms):
+        result.append(derived_observable(lambda x, **kwargs: x[0], [pseudo_Obs(fit_result.x[i], 0.0, y[0].names[0], y[0].shape[y[0].names[0]])] + list(y), man_grad=[0] + list(deriv[i])))
+
+    output.fit_parameters = result
+
+    output.chisquare = chisqfunc(fit_result.x)
+    output.dof = x.shape[-1] - n_parms
+
+    if kwargs.get('resplot') is True:
+        residual_plot(x, y, func, result)
+
+    if kwargs.get('qqplot') is True:
+        qqplot(x, y, func, result)
+
+    return output
+
+
+def odr_fit(x, y, func, silent=False, **kwargs):
+    warnings.warn("odr_fit renamed to total_least_squares", DeprecationWarning)
+    return total_least_squares(x, y, func, silent=silent, **kwargs)
+
+
+def total_least_squares(x, y, func, silent=False, **kwargs):
+    """Performs a non-linear fit to y = func(x) and returns a list of Obs corresponding to the fit parameters.
+
+    x : list
+        list of Obs, or a tuple of lists of Obs
+    y : list
+        list of Obs. The dvalues of the Obs are used as x- and yerror for the fit.
+    func : object
+        func has to be of the form
+
+        def func(a, x):
+            y = a[0] + a[1] * x + a[2] * anp.sinh(x)
+            return y
+
+        For multiple x values func can be of the form
+
+        def func(a, x):
+            (x1, x2) = x
+            return a[0] * x1 ** 2 + a[1] * x2
+
+        It is important that all numpy functions refer to autograd.numpy, otherwise the differentiation
+        will not work.
+    silent : bool, optional
+        If true all output to the console is omitted (default False).
+    Based on the orthogonal distance regression module of scipy
+
+    Keyword arguments
+    -----------------
+    initial_guess -- can provide an initial guess for the input parameters. Relevant for non-linear
+                     fits with many parameters.
+    expected_chisquare -- If true prints the expected chisquare which is
+                          corrected by effects caused by correlated input data.
+                          This can take a while as the full correlation matrix
+                          has to be calculated (default False).
+    """
+
+    output = Fit_result()
+
+    output.fit_function = func
+
+    x = np.array(x)
+
+    x_shape = x.shape
+
+    if not callable(func):
+        raise TypeError('func has to be a function.')
+
+    for i in range(25):
+        try:
+            func(np.arange(i), x.T[0])
+        except:
+            pass
+        else:
+            break
+
+    n_parms = i
+    if not silent:
+        print('Fit with', n_parms, 'parameters')
+
+    x_f = np.vectorize(lambda o: o.value)(x)
+    dx_f = np.vectorize(lambda o: o.dvalue)(x)
+    y_f = np.array([o.value for o in y])
+    dy_f = np.array([o.dvalue for o in y])
+
+    if np.any(np.asarray(dx_f) <= 0.0):
+        raise Exception('No x errors available, run the gamma method first.')
+
+    if np.any(np.asarray(dy_f) <= 0.0):
+        raise Exception('No y errors available, run the gamma method first.')
+
+    if 'initial_guess' in kwargs:
+        x0 = kwargs.get('initial_guess')
+        if len(x0) != n_parms:
+            raise Exception('Initial guess does not have the correct length.')
+    else:
+        x0 = [1] * n_parms
+
+    data = RealData(x_f, y_f, sx=dx_f, sy=dy_f)
+    model = Model(func)
+    odr = ODR(data, model, x0, partol=np.finfo(np.float64).eps)
+    odr.set_job(fit_type=0, deriv=1)
+    out = odr.run()
+
+    output.residual_variance = out.res_var
+
+    output.method = 'ODR'
+
+    output.message = out.stopreason
+
+    output.xplus = out.xplus
+
+    if not silent:
+        print('Method: ODR')
+        print(*out.stopreason)
+        print('Residual variance:', output.residual_variance)
+
+    if out.info > 3:
+        raise Exception('The minimization procedure did not converge.')
+
+    m = x_f.size
+
+    def odr_chisquare(p):
+        model = func(p[:n_parms], p[n_parms:].reshape(x_shape))
+        chisq = anp.sum(((y_f - model) / dy_f) ** 2) + anp.sum(((x_f - p[n_parms:].reshape(x_shape)) / dx_f) ** 2)
+        return chisq
+
+    if kwargs.get('expected_chisquare') is True:
+        W = np.diag(1 / np.asarray(np.concatenate((dy_f.ravel(), dx_f.ravel()))))
+
+        if kwargs.get('covariance') is not None:
+            cov = kwargs.get('covariance')
+        else:
+            cov = covariance_matrix(np.concatenate((y, x.ravel())))
+
+        number_of_x_parameters = int(m / x_f.shape[-1])
+
+        old_jac = jacobian(func)(out.beta, out.xplus)
+        fused_row1 = np.concatenate((old_jac, np.concatenate((number_of_x_parameters * [np.zeros(old_jac.shape)]), axis=0)))
+        fused_row2 = np.concatenate((jacobian(lambda x, y: func(y, x))(out.xplus, out.beta).reshape(x_f.shape[-1], x_f.shape[-1] * number_of_x_parameters), np.identity(number_of_x_parameters * old_jac.shape[0])))
+        new_jac = np.concatenate((fused_row1, fused_row2), axis=1)
+
+        A = W @ new_jac
+        P_phi = A @ np.linalg.inv(A.T @ A) @ A.T
+        expected_chisquare = np.trace((np.identity(P_phi.shape[0]) - P_phi) @ W @ cov @ W)
+        if expected_chisquare <= 0.0:
+            warnings.warn("Negative expected_chisquare.", RuntimeWarning)
+            expected_chisquare = np.abs(expected_chisquare)
+        output.chisquare_by_expected_chisquare = odr_chisquare(np.concatenate((out.beta, out.xplus.ravel()))) / expected_chisquare
+        if not silent:
+            print('chisquare/expected_chisquare:',
+                  output.chisquare_by_expected_chisquare)
+
+    hess_inv = np.linalg.pinv(jacobian(jacobian(odr_chisquare))(np.concatenate((out.beta, out.xplus.ravel()))))
+
+    def odr_chisquare_compact_x(d):
+        model = func(d[:n_parms], d[n_parms:n_parms + m].reshape(x_shape))
+        chisq = anp.sum(((y_f - model) / dy_f) ** 2) + anp.sum(((d[n_parms + m:].reshape(x_shape) - d[n_parms:n_parms + m].reshape(x_shape)) / dx_f) ** 2)
+        return chisq
+
+    jac_jac_x = jacobian(jacobian(odr_chisquare_compact_x))(np.concatenate((out.beta, out.xplus.ravel(), x_f.ravel())))
+
+    deriv_x = -hess_inv @ jac_jac_x[:n_parms + m, n_parms + m:]
+
+    def odr_chisquare_compact_y(d):
+        model = func(d[:n_parms], d[n_parms:n_parms + m].reshape(x_shape))
+        chisq = anp.sum(((d[n_parms + m:] - model) / dy_f) ** 2) + anp.sum(((x_f - d[n_parms:n_parms + m].reshape(x_shape)) / dx_f) ** 2)
+        return chisq
+
+    jac_jac_y = jacobian(jacobian(odr_chisquare_compact_y))(np.concatenate((out.beta, out.xplus.ravel(), y_f)))
+
+    deriv_y = -hess_inv @ jac_jac_y[:n_parms + m, n_parms + m:]
+
+    result = []
+    for i in range(n_parms):
+        result.append(derived_observable(lambda x, **kwargs: x[0], [pseudo_Obs(out.beta[i], 0.0, y[0].names[0], y[0].shape[y[0].names[0]])] + list(x.ravel()) + list(y), man_grad=[0] + list(deriv_x[i]) + list(deriv_y[i])))
+
+    output.fit_parameters = result
+
+    output.odr_chisquare = odr_chisquare(np.concatenate((out.beta, out.xplus.ravel())))
+    output.dof = x.shape[-1] - n_parms
+
+    return output
+
+
+def prior_fit(x, y, func, priors, silent=False, **kwargs):
+    warnings.warn("prior_fit renamed to least_squares", DeprecationWarning)
+    return least_squares(x, y, func, priors=priors, silent=silent, **kwargs)
+
+
+def _prior_fit(x, y, func, priors, silent=False, **kwargs):
+    output = Fit_result()
+
+    output.fit_function = func
+
+    if Obs.e_tag_global < 4:
+        warnings.warn("e_tag_global is smaller than 4, this can cause problems when calculating errors from fits with priors", RuntimeWarning)
+
+    x = np.asarray(x)
+
+    if not callable(func):
+        raise TypeError('func has to be a function.')
+
+    for i in range(100):
+        try:
+            func(np.arange(i), 0)
+        except:
+            pass
+        else:
+            break
+
+    n_parms = i
+
+    if n_parms != len(priors):
+        raise Exception('Priors does not have the correct length.')
+
+    def extract_val_and_dval(string):
+        split_string = string.split('(')
+        if '.' in split_string[0] and '.' not in split_string[1][:-1]:
+            factor = 10 ** -len(split_string[0].partition('.')[2])
+        else:
+            factor = 1
+        return float(split_string[0]), float(split_string[1][:-1]) * factor
+
+    loc_priors = []
+    for i_n, i_prior in enumerate(priors):
+        if isinstance(i_prior, Obs):
+            loc_priors.append(i_prior)
+        else:
+            loc_val, loc_dval = extract_val_and_dval(i_prior)
+            loc_priors.append(pseudo_Obs(loc_val, loc_dval, 'p' + str(i_n)))
+
+    output.priors = loc_priors
+
+    if not silent:
+        print('Fit with', n_parms, 'parameters')
+
+    y_f = [o.value for o in y]
+    dy_f = [o.dvalue for o in y]
+
+    if np.any(np.asarray(dy_f) <= 0.0):
+        raise Exception('No y errors available, run the gamma method first.')
+
+    p_f = [o.value for o in loc_priors]
+    dp_f = [o.dvalue for o in loc_priors]
+
+    if np.any(np.asarray(dp_f) <= 0.0):
+        raise Exception('No prior errors available, run the gamma method first.')
+
+    if 'initial_guess' in kwargs:
+        x0 = kwargs.get('initial_guess')
+        if len(x0) != n_parms:
+            raise Exception('Initial guess does not have the correct length.')
+    else:
+        x0 = p_f
+
+    def chisqfunc(p):
+        model = func(p, x)
+        chisq = anp.sum(((y_f - model) / dy_f) ** 2) + anp.sum(((p_f - p) / dp_f) ** 2)
+        return chisq
+
+    if not silent:
+        print('Method: migrad')
+
+    m = iminuit.Minuit.from_array_func(chisqfunc, x0, error=np.asarray(x0) * 0.01, errordef=1, print_level=0)
+    if 'tol' in kwargs:
+        m.tol = kwargs.get('tol')
+    else:
+        m.tol = 1e-4
+    m.migrad()
+    params = np.asarray(m.values.values())
+
+    output.chisquare_by_dof = m.fval / len(x)
+
+    output.method = 'migrad'
+
+    if not silent:
+        print('chisquare/d.o.f.:', output.chisquare_by_dof)
+
+    if not m.get_fmin().is_valid:
+        raise Exception('The minimization procedure did not converge.')
+
+    hess_inv = np.linalg.pinv(jacobian(jacobian(chisqfunc))(params))
+
+    def chisqfunc_compact(d):
+        model = func(d[:n_parms], x)
+        chisq = anp.sum(((d[n_parms: n_parms + len(x)] - model) / dy_f) ** 2) + anp.sum(((d[n_parms + len(x):] - d[:n_parms]) / dp_f) ** 2)
+        return chisq
+
+    jac_jac = jacobian(jacobian(chisqfunc_compact))(np.concatenate((params, y_f, p_f)))
+
+    deriv = -hess_inv @ jac_jac[:n_parms, n_parms:]
+
+    result = []
+    for i in range(n_parms):
+        result.append(derived_observable(lambda x, **kwargs: x[0], [pseudo_Obs(params[i], 0.0, y[0].names[0], y[0].shape[y[0].names[0]])] + list(y) + list(loc_priors), man_grad=[0] + list(deriv[i])))
+
+    output.fit_parameters = result
+    output.chisquare = chisqfunc(np.asarray(params))
+
+    if kwargs.get('resplot') is True:
+        residual_plot(x, y, func, result)
+
+    if kwargs.get('qqplot') is True:
+        qqplot(x, y, func, result)
+
+    return output
+
+
+def fit_lin(x, y, **kwargs):
+    """Performs a linear fit to y = n + m * x and returns two Obs n, m.
+
+    y has to be a list of Obs, the dvalues of the Obs are used as yerror for the fit.
+    x can either be a list of floats in which case no xerror is assumed, or
+    a list of Obs, where the dvalues of the Obs are used as xerror for the fit.
+    """
+
+    def f(a, x):
+        y = a[0] + a[1] * x
+        return y
+
+    if all(isinstance(n, Obs) for n in x):
+        out = odr_fit(x, y, f, **kwargs)
+        return out.fit_parameters
+    elif all(isinstance(n, float) or isinstance(n, int) for n in x) or isinstance(x, np.ndarray):
+        out = standard_fit(x, y, f, **kwargs)
+        return out.fit_parameters
+    else:
+        raise Exception('Unsupported types for x')
+
+
+def qqplot(x, o_y, func, p):
+    """ Generates a quantile-quantile plot of the fit result which can be used to
+        check if the residuals of the fit are gaussian distributed.
+    """
+
+    residuals = []
+    for i_x, i_y in zip(x, o_y):
+        residuals.append((i_y - func(p, i_x)) / i_y.dvalue)
+    residuals = sorted(residuals)
+    my_y = [o.value for o in residuals]
+    probplot = scipy.stats.probplot(my_y)
+    my_x = probplot[0][0]
+    plt.figure(figsize=(8, 8 / 1.618))
+    plt.errorbar(my_x, my_y, fmt='o')
+    fit_start = my_x[0]
+    fit_stop = my_x[-1]
+    samples = np.arange(fit_start, fit_stop, 0.01)
+    plt.plot(samples, samples, 'k--', zorder=11, label='Standard normal distribution')
+    plt.plot(samples, probplot[1][0] * samples + probplot[1][1], zorder=10, label='Least squares fit, r=' + str(np.around(probplot[1][2], 3)))
+
+    plt.xlabel('Theoretical quantiles')
+    plt.ylabel('Ordered Values')
+    plt.legend()
+    plt.show()
+
+
+def residual_plot(x, y, func, fit_res):
+    """ Generates a plot which compares the fit to the data and displays the corresponding residuals"""
+    xstart = x[0] - 0.5
+    xstop = x[-1] + 0.5
+    x_samples = np.arange(xstart, xstop, 0.01)
+
+    plt.figure(figsize=(8, 8 / 1.618))
+    gs = gridspec.GridSpec(2, 1, height_ratios=[3, 1], wspace=0.0, hspace=0.0)
+    ax0 = plt.subplot(gs[0])
+    ax0.errorbar(x, [o.value for o in y], yerr=[o.dvalue for o in y], ls='none', fmt='o', capsize=3, markersize=5, label='Data')
+    ax0.plot(x_samples, func([o.value for o in fit_res], x_samples), label='Fit', zorder=10, ls='-', ms=0)
+    ax0.set_xticklabels([])
+    ax0.set_xlim([xstart, xstop])
+    ax0.set_xticklabels([])
+    ax0.legend()
+
+    residuals = (np.asarray([o.value for o in y]) - func([o.value for o in fit_res], x)) / np.asarray([o.dvalue for o in y])
+    ax1 = plt.subplot(gs[1])
+    ax1.plot(x, residuals, 'ko', ls='none', markersize=5)
+    ax1.tick_params(direction='out')
+    ax1.tick_params(axis="x", bottom=True, top=True, labelbottom=True)
+    ax1.axhline(y=0.0, ls='--', color='k')
+    ax1.fill_between(x_samples, -1.0, 1.0, alpha=0.1, facecolor='k')
+    ax1.set_xlim([xstart, xstop])
+    ax1.set_ylabel('Residuals')
+    plt.subplots_adjust(wspace=None, hspace=None)
+    plt.show()
+
+
+def covariance_matrix(y):
+    """Returns the covariance matrix of y."""
+    length = len(y)
+    cov = np.zeros((length, length))
+    for i, item in enumerate(y):
+        for j, jtem in enumerate(y[:i + 1]):
+            if i == j:
+                cov[i, j] = item.dvalue ** 2
+            else:
+                cov[i, j] = covariance(item, jtem)
+    return cov + cov.T - np.diag(np.diag(cov))
+
+
+def error_band(x, func, beta):
+    """Returns the error band for an array of sample values x, for given fit function func with optimized parameters beta."""
+    cov = covariance_matrix(beta)
+    if np.any(np.abs(cov - cov.T) > 1000 * np.finfo(np.float64).eps):
+        warnings.warn("Covariance matrix is not symmetric within floating point precision", RuntimeWarning)
+
+    deriv = []
+    for i, item in enumerate(x):
+        deriv.append(np.array(egrad(func)([o.value for o in beta], item)))
+
+    err = []
+    for i, item in enumerate(x):
+        err.append(np.sqrt(deriv[i] @ cov @ deriv[i]))
+    err = np.array(err)
+
+    return err
+
+
+def ks_test(obs=None):
+    """Performs a Kolmogorov–Smirnov test for the Q-values of all fit object.
+
+    If no list is given all Obs in memory are used.
+
+    Disclaimer: The determination of the individual Q-values as well as this function have not been tested yet.
+    """
+
+    raise Exception('Not yet implemented')
+
+    if obs is None:
+        obs_list = []
+        for obj in gc.get_objects():
+            if isinstance(obj, Obs):
+                obs_list.append(obj)
+    else:
+        obs_list = obs
+
+    # TODO: Rework to apply to Q-values of all fits in memory
+    Qs = []
+    for obs_i in obs_list:
+        for ens in obs_i.e_names:
+            if obs_i.e_Q[ens] is not None:
+                Qs.append(obs_i.e_Q[ens])
+
+    bins = len(Qs)
+    x = np.arange(0, 1.001, 0.001)
+    plt.plot(x, x, 'k', zorder=1)
+    plt.xlim(0, 1)
+    plt.ylim(0, 1)
+    plt.xlabel('Q value')
+    plt.ylabel('Cumulative probability')
+    plt.title(str(bins) + ' Q values')
+
+    n = np.arange(1, bins + 1) / np.float64(bins)
+    Xs = np.sort(Qs)
+    plt.step(Xs, n)
+    diffs = n - Xs
+    loc_max_diff = np.argmax(np.abs(diffs))
+    loc = Xs[loc_max_diff]
+    plt.annotate(s='', xy=(loc, loc), xytext=(loc, loc + diffs[loc_max_diff]), arrowprops=dict(arrowstyle='<->', shrinkA=0, shrinkB=0))
+    plt.show()
+
+    print(scipy.stats.kstest(Qs, 'uniform'))
+
+
+def fit_general(x, y, func, silent=False, **kwargs):
+    """Performs a non-linear fit to y = func(x) and returns a list of Obs corresponding to the fit parameters.
+
+    Plausibility of the results should be checked. To control the numerical differentiation
+    the kwargs of numdifftools.step_generators.MaxStepGenerator can be used.
+
+    func has to be of the form
+
+    def func(a, x):
+        y = a[0] + a[1] * x + a[2] * np.sinh(x)
+        return y
+
+    y has to be a list of Obs, the dvalues of the Obs are used as yerror for the fit.
+    x can either be a list of floats in which case no xerror is assumed, or
+    a list of Obs, where the dvalues of the Obs are used as xerror for the fit.
+
+    Keyword arguments
+    -----------------
+    silent -- If true all output to the console is omitted (default False).
+    initial_guess -- can provide an initial guess for the input parameters. Relevant for non-linear fits
+                     with many parameters.
+    """
+
+    warnings.warn("New fit functions with exact error propagation are now available as alternative.", DeprecationWarning)
+
+    if not callable(func):
+        raise TypeError('func has to be a function.')
+
+    for i in range(10):
+        try:
+            func(np.arange(i), 0)
+        except:
+            pass
+        else:
+            break
+    n_parms = i
+    if not silent:
+        print('Fit with', n_parms, 'parameters')
+
+    global print_output, beta0
+    print_output = 1
+    if 'initial_guess' in kwargs:
+        beta0 = kwargs.get('initial_guess')
+        if len(beta0) != n_parms:
+            raise Exception('Initial guess does not have the correct length.')
+    else:
+        beta0 = np.arange(n_parms)
+
+    if len(x) != len(y):
+        raise Exception('x and y have to have the same length')
+
+    if all(isinstance(n, Obs) for n in x):
+        obs = x + y
+        x_constants = None
+        xerr = [o.dvalue for o in x]
+        yerr = [o.dvalue for o in y]
+    elif all(isinstance(n, float) or isinstance(n, int) for n in x) or isinstance(x, np.ndarray):
+        obs = y
+        x_constants = x
+        xerr = None
+        yerr = [o.dvalue for o in y]
+    else:
+        raise Exception('Unsupported types for x')
+
+    def do_the_fit(obs, **kwargs):
+
+        global print_output, beta0
+
+        func = kwargs.get('function')
+        yerr = kwargs.get('yerr')
+        length = len(yerr)
+
+        xerr = kwargs.get('xerr')
+
+        if length == len(obs):
+            assert 'x_constants' in kwargs
+            data = RealData(kwargs.get('x_constants'), obs, sy=yerr)
+            fit_type = 2
+        elif length == len(obs) // 2:
+            data = RealData(obs[:length], obs[length:], sx=xerr, sy=yerr)
+            fit_type = 0
+        else:
+            raise Exception('x and y do not fit together.')
+
+        model = Model(func)
+
+        odr = ODR(data, model, beta0, partol=np.finfo(np.float64).eps)
+        odr.set_job(fit_type=fit_type, deriv=1)
+        output = odr.run()
+        if print_output and not silent:
+            print(*output.stopreason)
+            print('chisquare/d.o.f.:', output.res_var)
+            print_output = 0
+        beta0 = output.beta
+        return output.beta[kwargs.get('n')]
+    res = []
+    for n in range(n_parms):
+        res.append(derived_observable(do_the_fit, obs, function=func, xerr=xerr, yerr=yerr, x_constants=x_constants, num_grad=True, n=n, **kwargs))
+    return res
+
+ +
+ +
+
+
+ #   + + + class + Fit_result(collections.abc.Sequence): +
+ +
+ View Source +
class Fit_result(Sequence):
+    """Represents fit results.
+
+    Attributes
+    ----------
+    fit_parameters : list
+        results for the individual fit parameters,
+        also accesible via indices.
+    """
+
+    def __init__(self):
+        self.fit_parameters = None
+
+    def __getitem__(self, idx):
+        return self.fit_parameters[idx]
+
+    def __len__(self):
+        return len(self.fit_parameters)
+
+    def gamma_method(self):
+        """Apply the gamma method to all fit parameters"""
+        [o.gamma_method() for o in self.fit_parameters]
+
+    def __str__(self):
+        self.gamma_method()
+        my_str = 'Goodness of fit:\n'
+        if hasattr(self, 'chisquare_by_dof'):
+            my_str += '\u03C7\u00b2/d.o.f. = ' + f'{self.chisquare_by_dof:2.6f}' + '\n'
+        elif hasattr(self, 'residual_variance'):
+            my_str += 'residual variance = ' + f'{self.residual_variance:2.6f}' + '\n'
+        if hasattr(self, 'chisquare_by_expected_chisquare'):
+            my_str += '\u03C7\u00b2/\u03C7\u00b2exp  = ' + f'{self.chisquare_by_expected_chisquare:2.6f}' + '\n'
+        my_str += 'Fit parameters:\n'
+        for i_par, par in enumerate(self.fit_parameters):
+            my_str += str(i_par) + '\t' + ' ' * int(par >= 0) + str(par).rjust(int(par < 0.0)) + '\n'
+        return my_str
+
+    def __repr__(self):
+        m = max(map(len, list(self.__dict__.keys()))) + 1
+        return '\n'.join([key.rjust(m) + ': ' + repr(value) for key, value in sorted(self.__dict__.items())])
+
+ +
+ +

Represents fit results.

+ +
Attributes
+ +
    +
  • fit_parameters (list): +results for the individual fit parameters, +also accesible via indices.
  • +
+
+ + +
+
#   + + + Fit_result() +
+ +
+ View Source +
    def __init__(self):
+        self.fit_parameters = None
+
+ +
+ + + +
+
+
#   + + + def + gamma_method(self): +
+ +
+ View Source +
    def gamma_method(self):
+        """Apply the gamma method to all fit parameters"""
+        [o.gamma_method() for o in self.fit_parameters]
+
+ +
+ +

Apply the gamma method to all fit parameters

+
+ + +
+
+
Inherited Members
+
+
collections.abc.Sequence
+
index
+
count
+ +
+
+
+
+
+
#   + + + def + least_squares(x, y, func, priors=None, silent=False, **kwargs): +
+ +
+ View Source +
def least_squares(x, y, func, priors=None, silent=False, **kwargs):
+    """Performs a non-linear fit to y = func(x).
+
+    Arguments:
+    ----------
+    x : list
+        list of floats.
+    y : list
+        list of Obs.
+    func : object
+        fit function, has to be of the form
+
+        def func(a, x):
+            return a[0] + a[1] * x + a[2] * anp.sinh(x)
+
+        For multiple x values func can be of the form
+
+        def func(a, x):
+            (x1, x2) = x
+            return a[0] * x1 ** 2 + a[1] * x2
+
+        It is important that all numpy functions refer to autograd.numpy, otherwise the differentiation
+        will not work
+    priors : list, optional
+        priors has to be a list with an entry for every parameter in the fit. The entries can either be
+        Obs (e.g. results from a previous fit) or strings containing a value and an error formatted like
+        0.548(23), 500(40) or 0.5(0.4)
+        It is important for the subsequent error estimation that the e_tag for the gamma method is large
+        enough.
+    silent : bool, optional
+        If true all output to the console is omitted (default False).
+
+
+    Keyword arguments
+    -----------------
+    initial_guess -- can provide an initial guess for the input parameters. Relevant for
+                     non-linear fits with many parameters.
+    method -- can be used to choose an alternative method for the minimization of chisquare.
+              The possible methods are the ones which can be used for scipy.optimize.minimize and
+              migrad of iminuit. If no method is specified, Levenberg-Marquard is used.
+              Reliable alternatives are migrad, Powell and Nelder-Mead.
+    resplot -- If true, a plot which displays fit, data and residuals is generated (default False).
+    qqplot -- If true, a quantile-quantile plot of the fit result is generated (default False).
+    expected_chisquare -- If true prints the expected chisquare which is
+                          corrected by effects caused by correlated input data.
+                          This can take a while as the full correlation matrix
+                          has to be calculated (default False).
+    """
+    if priors is not None:
+        return _prior_fit(x, y, func, priors, silent=silent, **kwargs)
+    else:
+        return _standard_fit(x, y, func, silent=silent, **kwargs)
+
+ +
+ +

Performs a non-linear fit to y = func(x).

+ +

Arguments:

+ +

x : list + list of floats. +y : list + list of Obs. +func : object + fit function, has to be of the form

+ +
def func(a, x):
+    return a[0] + a[1] * x + a[2] * anp.sinh(x)
+
+For multiple x values func can be of the form
+
+def func(a, x):
+    (x1, x2) = x
+    return a[0] * x1 ** 2 + a[1] * x2
+
+It is important that all numpy functions refer to autograd.numpy, otherwise the differentiation
+will not work
+
+ +

priors : list, optional + priors has to be a list with an entry for every parameter in the fit. The entries can either be + Obs (e.g. results from a previous fit) or strings containing a value and an error formatted like + 0.548(23), 500(40) or 0.5(0.4) + It is important for the subsequent error estimation that the e_tag for the gamma method is large + enough. +silent : bool, optional + If true all output to the console is omitted (default False).

+ +
Keyword arguments
+ +

initial_guess -- can provide an initial guess for the input parameters. Relevant for + non-linear fits with many parameters. +method -- can be used to choose an alternative method for the minimization of chisquare. + The possible methods are the ones which can be used for scipy.optimize.minimize and + migrad of iminuit. If no method is specified, Levenberg-Marquard is used. + Reliable alternatives are migrad, Powell and Nelder-Mead. +resplot -- If true, a plot which displays fit, data and residuals is generated (default False). +qqplot -- If true, a quantile-quantile plot of the fit result is generated (default False). +expected_chisquare -- If true prints the expected chisquare which is + corrected by effects caused by correlated input data. + This can take a while as the full correlation matrix + has to be calculated (default False).

+
+ + +
+
+
#   + + + def + standard_fit(x, y, func, silent=False, **kwargs): +
+ +
+ View Source +
def standard_fit(x, y, func, silent=False, **kwargs):
+    warnings.warn("standard_fit renamed to least_squares", DeprecationWarning)
+    return least_squares(x, y, func, silent=silent, **kwargs)
+
+ +
+ + + +
+
+
#   + + + def + odr_fit(x, y, func, silent=False, **kwargs): +
+ +
+ View Source +
def odr_fit(x, y, func, silent=False, **kwargs):
+    warnings.warn("odr_fit renamed to total_least_squares", DeprecationWarning)
+    return total_least_squares(x, y, func, silent=silent, **kwargs)
+
+ +
+ + + +
+
+
#   + + + def + total_least_squares(x, y, func, silent=False, **kwargs): +
+ +
+ View Source +
def total_least_squares(x, y, func, silent=False, **kwargs):
+    """Performs a non-linear fit to y = func(x) and returns a list of Obs corresponding to the fit parameters.
+
+    x : list
+        list of Obs, or a tuple of lists of Obs
+    y : list
+        list of Obs. The dvalues of the Obs are used as x- and yerror for the fit.
+    func : object
+        func has to be of the form
+
+        def func(a, x):
+            y = a[0] + a[1] * x + a[2] * anp.sinh(x)
+            return y
+
+        For multiple x values func can be of the form
+
+        def func(a, x):
+            (x1, x2) = x
+            return a[0] * x1 ** 2 + a[1] * x2
+
+        It is important that all numpy functions refer to autograd.numpy, otherwise the differentiation
+        will not work.
+    silent : bool, optional
+        If true all output to the console is omitted (default False).
+    Based on the orthogonal distance regression module of scipy
+
+    Keyword arguments
+    -----------------
+    initial_guess -- can provide an initial guess for the input parameters. Relevant for non-linear
+                     fits with many parameters.
+    expected_chisquare -- If true prints the expected chisquare which is
+                          corrected by effects caused by correlated input data.
+                          This can take a while as the full correlation matrix
+                          has to be calculated (default False).
+    """
+
+    output = Fit_result()
+
+    output.fit_function = func
+
+    x = np.array(x)
+
+    x_shape = x.shape
+
+    if not callable(func):
+        raise TypeError('func has to be a function.')
+
+    for i in range(25):
+        try:
+            func(np.arange(i), x.T[0])
+        except:
+            pass
+        else:
+            break
+
+    n_parms = i
+    if not silent:
+        print('Fit with', n_parms, 'parameters')
+
+    x_f = np.vectorize(lambda o: o.value)(x)
+    dx_f = np.vectorize(lambda o: o.dvalue)(x)
+    y_f = np.array([o.value for o in y])
+    dy_f = np.array([o.dvalue for o in y])
+
+    if np.any(np.asarray(dx_f) <= 0.0):
+        raise Exception('No x errors available, run the gamma method first.')
+
+    if np.any(np.asarray(dy_f) <= 0.0):
+        raise Exception('No y errors available, run the gamma method first.')
+
+    if 'initial_guess' in kwargs:
+        x0 = kwargs.get('initial_guess')
+        if len(x0) != n_parms:
+            raise Exception('Initial guess does not have the correct length.')
+    else:
+        x0 = [1] * n_parms
+
+    data = RealData(x_f, y_f, sx=dx_f, sy=dy_f)
+    model = Model(func)
+    odr = ODR(data, model, x0, partol=np.finfo(np.float64).eps)
+    odr.set_job(fit_type=0, deriv=1)
+    out = odr.run()
+
+    output.residual_variance = out.res_var
+
+    output.method = 'ODR'
+
+    output.message = out.stopreason
+
+    output.xplus = out.xplus
+
+    if not silent:
+        print('Method: ODR')
+        print(*out.stopreason)
+        print('Residual variance:', output.residual_variance)
+
+    if out.info > 3:
+        raise Exception('The minimization procedure did not converge.')
+
+    m = x_f.size
+
+    def odr_chisquare(p):
+        model = func(p[:n_parms], p[n_parms:].reshape(x_shape))
+        chisq = anp.sum(((y_f - model) / dy_f) ** 2) + anp.sum(((x_f - p[n_parms:].reshape(x_shape)) / dx_f) ** 2)
+        return chisq
+
+    if kwargs.get('expected_chisquare') is True:
+        W = np.diag(1 / np.asarray(np.concatenate((dy_f.ravel(), dx_f.ravel()))))
+
+        if kwargs.get('covariance') is not None:
+            cov = kwargs.get('covariance')
+        else:
+            cov = covariance_matrix(np.concatenate((y, x.ravel())))
+
+        number_of_x_parameters = int(m / x_f.shape[-1])
+
+        old_jac = jacobian(func)(out.beta, out.xplus)
+        fused_row1 = np.concatenate((old_jac, np.concatenate((number_of_x_parameters * [np.zeros(old_jac.shape)]), axis=0)))
+        fused_row2 = np.concatenate((jacobian(lambda x, y: func(y, x))(out.xplus, out.beta).reshape(x_f.shape[-1], x_f.shape[-1] * number_of_x_parameters), np.identity(number_of_x_parameters * old_jac.shape[0])))
+        new_jac = np.concatenate((fused_row1, fused_row2), axis=1)
+
+        A = W @ new_jac
+        P_phi = A @ np.linalg.inv(A.T @ A) @ A.T
+        expected_chisquare = np.trace((np.identity(P_phi.shape[0]) - P_phi) @ W @ cov @ W)
+        if expected_chisquare <= 0.0:
+            warnings.warn("Negative expected_chisquare.", RuntimeWarning)
+            expected_chisquare = np.abs(expected_chisquare)
+        output.chisquare_by_expected_chisquare = odr_chisquare(np.concatenate((out.beta, out.xplus.ravel()))) / expected_chisquare
+        if not silent:
+            print('chisquare/expected_chisquare:',
+                  output.chisquare_by_expected_chisquare)
+
+    hess_inv = np.linalg.pinv(jacobian(jacobian(odr_chisquare))(np.concatenate((out.beta, out.xplus.ravel()))))
+
+    def odr_chisquare_compact_x(d):
+        model = func(d[:n_parms], d[n_parms:n_parms + m].reshape(x_shape))
+        chisq = anp.sum(((y_f - model) / dy_f) ** 2) + anp.sum(((d[n_parms + m:].reshape(x_shape) - d[n_parms:n_parms + m].reshape(x_shape)) / dx_f) ** 2)
+        return chisq
+
+    jac_jac_x = jacobian(jacobian(odr_chisquare_compact_x))(np.concatenate((out.beta, out.xplus.ravel(), x_f.ravel())))
+
+    deriv_x = -hess_inv @ jac_jac_x[:n_parms + m, n_parms + m:]
+
+    def odr_chisquare_compact_y(d):
+        model = func(d[:n_parms], d[n_parms:n_parms + m].reshape(x_shape))
+        chisq = anp.sum(((d[n_parms + m:] - model) / dy_f) ** 2) + anp.sum(((x_f - d[n_parms:n_parms + m].reshape(x_shape)) / dx_f) ** 2)
+        return chisq
+
+    jac_jac_y = jacobian(jacobian(odr_chisquare_compact_y))(np.concatenate((out.beta, out.xplus.ravel(), y_f)))
+
+    deriv_y = -hess_inv @ jac_jac_y[:n_parms + m, n_parms + m:]
+
+    result = []
+    for i in range(n_parms):
+        result.append(derived_observable(lambda x, **kwargs: x[0], [pseudo_Obs(out.beta[i], 0.0, y[0].names[0], y[0].shape[y[0].names[0]])] + list(x.ravel()) + list(y), man_grad=[0] + list(deriv_x[i]) + list(deriv_y[i])))
+
+    output.fit_parameters = result
+
+    output.odr_chisquare = odr_chisquare(np.concatenate((out.beta, out.xplus.ravel())))
+    output.dof = x.shape[-1] - n_parms
+
+    return output
+
+ +
+ +

Performs a non-linear fit to y = func(x) and returns a list of Obs corresponding to the fit parameters.

+ +

x : list + list of Obs, or a tuple of lists of Obs +y : list + list of Obs. The dvalues of the Obs are used as x- and yerror for the fit. +func : object + func has to be of the form

+ +
def func(a, x):
+    y = a[0] + a[1] * x + a[2] * anp.sinh(x)
+    return y
+
+For multiple x values func can be of the form
+
+def func(a, x):
+    (x1, x2) = x
+    return a[0] * x1 ** 2 + a[1] * x2
+
+It is important that all numpy functions refer to autograd.numpy, otherwise the differentiation
+will not work.
+
+ +

silent : bool, optional + If true all output to the console is omitted (default False). +Based on the orthogonal distance regression module of scipy

+ +
Keyword arguments
+ +

initial_guess -- can provide an initial guess for the input parameters. Relevant for non-linear + fits with many parameters. +expected_chisquare -- If true prints the expected chisquare which is + corrected by effects caused by correlated input data. + This can take a while as the full correlation matrix + has to be calculated (default False).

+
+ + +
+
+
#   + + + def + prior_fit(x, y, func, priors, silent=False, **kwargs): +
+ +
+ View Source +
def prior_fit(x, y, func, priors, silent=False, **kwargs):
+    warnings.warn("prior_fit renamed to least_squares", DeprecationWarning)
+    return least_squares(x, y, func, priors=priors, silent=silent, **kwargs)
+
+ +
+ + + +
+
+
#   + + + def + fit_lin(x, y, **kwargs): +
+ +
+ View Source +
def fit_lin(x, y, **kwargs):
+    """Performs a linear fit to y = n + m * x and returns two Obs n, m.
+
+    y has to be a list of Obs, the dvalues of the Obs are used as yerror for the fit.
+    x can either be a list of floats in which case no xerror is assumed, or
+    a list of Obs, where the dvalues of the Obs are used as xerror for the fit.
+    """
+
+    def f(a, x):
+        y = a[0] + a[1] * x
+        return y
+
+    if all(isinstance(n, Obs) for n in x):
+        out = odr_fit(x, y, f, **kwargs)
+        return out.fit_parameters
+    elif all(isinstance(n, float) or isinstance(n, int) for n in x) or isinstance(x, np.ndarray):
+        out = standard_fit(x, y, f, **kwargs)
+        return out.fit_parameters
+    else:
+        raise Exception('Unsupported types for x')
+
+ +
+ +

Performs a linear fit to y = n + m * x and returns two Obs n, m.

+ +

y has to be a list of Obs, the dvalues of the Obs are used as yerror for the fit. +x can either be a list of floats in which case no xerror is assumed, or +a list of Obs, where the dvalues of the Obs are used as xerror for the fit.

+
+ + +
+
+
#   + + + def + qqplot(x, o_y, func, p): +
+ +
+ View Source +
def qqplot(x, o_y, func, p):
+    """ Generates a quantile-quantile plot of the fit result which can be used to
+        check if the residuals of the fit are gaussian distributed.
+    """
+
+    residuals = []
+    for i_x, i_y in zip(x, o_y):
+        residuals.append((i_y - func(p, i_x)) / i_y.dvalue)
+    residuals = sorted(residuals)
+    my_y = [o.value for o in residuals]
+    probplot = scipy.stats.probplot(my_y)
+    my_x = probplot[0][0]
+    plt.figure(figsize=(8, 8 / 1.618))
+    plt.errorbar(my_x, my_y, fmt='o')
+    fit_start = my_x[0]
+    fit_stop = my_x[-1]
+    samples = np.arange(fit_start, fit_stop, 0.01)
+    plt.plot(samples, samples, 'k--', zorder=11, label='Standard normal distribution')
+    plt.plot(samples, probplot[1][0] * samples + probplot[1][1], zorder=10, label='Least squares fit, r=' + str(np.around(probplot[1][2], 3)))
+
+    plt.xlabel('Theoretical quantiles')
+    plt.ylabel('Ordered Values')
+    plt.legend()
+    plt.show()
+
+ +
+ +

Generates a quantile-quantile plot of the fit result which can be used to +check if the residuals of the fit are gaussian distributed.

+
+ + +
+
+
#   + + + def + residual_plot(x, y, func, fit_res): +
+ +
+ View Source +
def residual_plot(x, y, func, fit_res):
+    """ Generates a plot which compares the fit to the data and displays the corresponding residuals"""
+    xstart = x[0] - 0.5
+    xstop = x[-1] + 0.5
+    x_samples = np.arange(xstart, xstop, 0.01)
+
+    plt.figure(figsize=(8, 8 / 1.618))
+    gs = gridspec.GridSpec(2, 1, height_ratios=[3, 1], wspace=0.0, hspace=0.0)
+    ax0 = plt.subplot(gs[0])
+    ax0.errorbar(x, [o.value for o in y], yerr=[o.dvalue for o in y], ls='none', fmt='o', capsize=3, markersize=5, label='Data')
+    ax0.plot(x_samples, func([o.value for o in fit_res], x_samples), label='Fit', zorder=10, ls='-', ms=0)
+    ax0.set_xticklabels([])
+    ax0.set_xlim([xstart, xstop])
+    ax0.set_xticklabels([])
+    ax0.legend()
+
+    residuals = (np.asarray([o.value for o in y]) - func([o.value for o in fit_res], x)) / np.asarray([o.dvalue for o in y])
+    ax1 = plt.subplot(gs[1])
+    ax1.plot(x, residuals, 'ko', ls='none', markersize=5)
+    ax1.tick_params(direction='out')
+    ax1.tick_params(axis="x", bottom=True, top=True, labelbottom=True)
+    ax1.axhline(y=0.0, ls='--', color='k')
+    ax1.fill_between(x_samples, -1.0, 1.0, alpha=0.1, facecolor='k')
+    ax1.set_xlim([xstart, xstop])
+    ax1.set_ylabel('Residuals')
+    plt.subplots_adjust(wspace=None, hspace=None)
+    plt.show()
+
+ +
+ +

Generates a plot which compares the fit to the data and displays the corresponding residuals

+
+ + +
+
+
#   + + + def + covariance_matrix(y): +
+ +
+ View Source +
def covariance_matrix(y):
+    """Returns the covariance matrix of y."""
+    length = len(y)
+    cov = np.zeros((length, length))
+    for i, item in enumerate(y):
+        for j, jtem in enumerate(y[:i + 1]):
+            if i == j:
+                cov[i, j] = item.dvalue ** 2
+            else:
+                cov[i, j] = covariance(item, jtem)
+    return cov + cov.T - np.diag(np.diag(cov))
+
+ +
+ +

Returns the covariance matrix of y.

+
+ + +
+
+
#   + + + def + error_band(x, func, beta): +
+ +
+ View Source +
def error_band(x, func, beta):
+    """Returns the error band for an array of sample values x, for given fit function func with optimized parameters beta."""
+    cov = covariance_matrix(beta)
+    if np.any(np.abs(cov - cov.T) > 1000 * np.finfo(np.float64).eps):
+        warnings.warn("Covariance matrix is not symmetric within floating point precision", RuntimeWarning)
+
+    deriv = []
+    for i, item in enumerate(x):
+        deriv.append(np.array(egrad(func)([o.value for o in beta], item)))
+
+    err = []
+    for i, item in enumerate(x):
+        err.append(np.sqrt(deriv[i] @ cov @ deriv[i]))
+    err = np.array(err)
+
+    return err
+
+ +
+ +

Returns the error band for an array of sample values x, for given fit function func with optimized parameters beta.

+
+ + +
+
+
#   + + + def + ks_test(obs=None): +
+ +
+ View Source +
def ks_test(obs=None):
+    """Performs a Kolmogorov–Smirnov test for the Q-values of all fit object.
+
+    If no list is given all Obs in memory are used.
+
+    Disclaimer: The determination of the individual Q-values as well as this function have not been tested yet.
+    """
+
+    raise Exception('Not yet implemented')
+
+    if obs is None:
+        obs_list = []
+        for obj in gc.get_objects():
+            if isinstance(obj, Obs):
+                obs_list.append(obj)
+    else:
+        obs_list = obs
+
+    # TODO: Rework to apply to Q-values of all fits in memory
+    Qs = []
+    for obs_i in obs_list:
+        for ens in obs_i.e_names:
+            if obs_i.e_Q[ens] is not None:
+                Qs.append(obs_i.e_Q[ens])
+
+    bins = len(Qs)
+    x = np.arange(0, 1.001, 0.001)
+    plt.plot(x, x, 'k', zorder=1)
+    plt.xlim(0, 1)
+    plt.ylim(0, 1)
+    plt.xlabel('Q value')
+    plt.ylabel('Cumulative probability')
+    plt.title(str(bins) + ' Q values')
+
+    n = np.arange(1, bins + 1) / np.float64(bins)
+    Xs = np.sort(Qs)
+    plt.step(Xs, n)
+    diffs = n - Xs
+    loc_max_diff = np.argmax(np.abs(diffs))
+    loc = Xs[loc_max_diff]
+    plt.annotate(s='', xy=(loc, loc), xytext=(loc, loc + diffs[loc_max_diff]), arrowprops=dict(arrowstyle='<->', shrinkA=0, shrinkB=0))
+    plt.show()
+
+    print(scipy.stats.kstest(Qs, 'uniform'))
+
+ +
+ +

Performs a Kolmogorov–Smirnov test for the Q-values of all fit object.

+ +

If no list is given all Obs in memory are used.

+ +

Disclaimer: The determination of the individual Q-values as well as this function have not been tested yet.

+
+ + +
+
+
#   + + + def + fit_general(x, y, func, silent=False, **kwargs): +
+ +
+ View Source +
def fit_general(x, y, func, silent=False, **kwargs):
+    """Performs a non-linear fit to y = func(x) and returns a list of Obs corresponding to the fit parameters.
+
+    Plausibility of the results should be checked. To control the numerical differentiation
+    the kwargs of numdifftools.step_generators.MaxStepGenerator can be used.
+
+    func has to be of the form
+
+    def func(a, x):
+        y = a[0] + a[1] * x + a[2] * np.sinh(x)
+        return y
+
+    y has to be a list of Obs, the dvalues of the Obs are used as yerror for the fit.
+    x can either be a list of floats in which case no xerror is assumed, or
+    a list of Obs, where the dvalues of the Obs are used as xerror for the fit.
+
+    Keyword arguments
+    -----------------
+    silent -- If true all output to the console is omitted (default False).
+    initial_guess -- can provide an initial guess for the input parameters. Relevant for non-linear fits
+                     with many parameters.
+    """
+
+    warnings.warn("New fit functions with exact error propagation are now available as alternative.", DeprecationWarning)
+
+    if not callable(func):
+        raise TypeError('func has to be a function.')
+
+    for i in range(10):
+        try:
+            func(np.arange(i), 0)
+        except:
+            pass
+        else:
+            break
+    n_parms = i
+    if not silent:
+        print('Fit with', n_parms, 'parameters')
+
+    global print_output, beta0
+    print_output = 1
+    if 'initial_guess' in kwargs:
+        beta0 = kwargs.get('initial_guess')
+        if len(beta0) != n_parms:
+            raise Exception('Initial guess does not have the correct length.')
+    else:
+        beta0 = np.arange(n_parms)
+
+    if len(x) != len(y):
+        raise Exception('x and y have to have the same length')
+
+    if all(isinstance(n, Obs) for n in x):
+        obs = x + y
+        x_constants = None
+        xerr = [o.dvalue for o in x]
+        yerr = [o.dvalue for o in y]
+    elif all(isinstance(n, float) or isinstance(n, int) for n in x) or isinstance(x, np.ndarray):
+        obs = y
+        x_constants = x
+        xerr = None
+        yerr = [o.dvalue for o in y]
+    else:
+        raise Exception('Unsupported types for x')
+
+    def do_the_fit(obs, **kwargs):
+
+        global print_output, beta0
+
+        func = kwargs.get('function')
+        yerr = kwargs.get('yerr')
+        length = len(yerr)
+
+        xerr = kwargs.get('xerr')
+
+        if length == len(obs):
+            assert 'x_constants' in kwargs
+            data = RealData(kwargs.get('x_constants'), obs, sy=yerr)
+            fit_type = 2
+        elif length == len(obs) // 2:
+            data = RealData(obs[:length], obs[length:], sx=xerr, sy=yerr)
+            fit_type = 0
+        else:
+            raise Exception('x and y do not fit together.')
+
+        model = Model(func)
+
+        odr = ODR(data, model, beta0, partol=np.finfo(np.float64).eps)
+        odr.set_job(fit_type=fit_type, deriv=1)
+        output = odr.run()
+        if print_output and not silent:
+            print(*output.stopreason)
+            print('chisquare/d.o.f.:', output.res_var)
+            print_output = 0
+        beta0 = output.beta
+        return output.beta[kwargs.get('n')]
+    res = []
+    for n in range(n_parms):
+        res.append(derived_observable(do_the_fit, obs, function=func, xerr=xerr, yerr=yerr, x_constants=x_constants, num_grad=True, n=n, **kwargs))
+    return res
+
+ +
+ +

Performs a non-linear fit to y = func(x) and returns a list of Obs corresponding to the fit parameters.

+ +

Plausibility of the results should be checked. To control the numerical differentiation +the kwargs of numdifftools.step_generators.MaxStepGenerator can be used.

+ +

func has to be of the form

+ +

def func(a, x): + y = a[0] + a[1] * x + a[2] * np.sinh(x) + return y

+ +

y has to be a list of Obs, the dvalues of the Obs are used as yerror for the fit. +x can either be a list of floats in which case no xerror is assumed, or +a list of Obs, where the dvalues of the Obs are used as xerror for the fit.

+ +
Keyword arguments
+ +

silent -- If true all output to the console is omitted (default False). +initial_guess -- can provide an initial guess for the input parameters. Relevant for non-linear fits + with many parameters.

+
+ + +
+
+ + \ No newline at end of file diff --git a/docs/pyerrors/input.html b/docs/pyerrors/input.html new file mode 100644 index 00000000..5fb35eea --- /dev/null +++ b/docs/pyerrors/input.html @@ -0,0 +1,252 @@ + + + + + + + pyerrors.input API documentation + + + + + + + + + + + +
+
+

+pyerrors.input

+ + +
+ View Source +
from . import bdio
+from . import hadrons
+from . import sfcf
+from . import openQCD
+from . import misc
+
+ +
+ +
+
+ + \ No newline at end of file diff --git a/docs/pyerrors/input/bdio.html b/docs/pyerrors/input/bdio.html new file mode 100644 index 00000000..055691ab --- /dev/null +++ b/docs/pyerrors/input/bdio.html @@ -0,0 +1,1623 @@ + + + + + + + pyerrors.input.bdio API documentation + + + + + + + + + + + +
+
+

+pyerrors.input.bdio

+ + +
+ View Source +
#!/usr/bin/env python
+# coding: utf-8
+
+import ctypes
+import hashlib
+import autograd.numpy as np  # Thinly-wrapped numpy
+from ..obs import Obs
+
+
+def read_ADerrors(file_path, bdio_path='./libbdio.so', **kwargs):
+    """ Extract generic MCMC data from a bdio file
+
+    read_ADerrors requires bdio to be compiled into a shared library. This can be achieved by
+    adding the flag -fPIC to CC and changing the all target to
+
+    all:		bdio.o $(LIBDIR)
+                gcc -shared -Wl,-soname,libbdio.so -o $(BUILDDIR)/libbdio.so $(BUILDDIR)/bdio.o
+                cp $(BUILDDIR)/libbdio.so $(LIBDIR)/
+
+    Parameters
+    ----------
+    file_path -- path to the bdio file
+    bdio_path -- path to the shared bdio library libbdio.so (default ./libbdio.so)
+    """
+    bdio = ctypes.cdll.LoadLibrary(bdio_path)
+
+    bdio_open = bdio.bdio_open
+    bdio_open.restype = ctypes.c_void_p
+
+    bdio_close = bdio.bdio_close
+    bdio_close.restype = ctypes.c_int
+    bdio_close.argtypes = [ctypes.c_void_p]
+
+    bdio_seek_record = bdio.bdio_seek_record
+    bdio_seek_record.restype = ctypes.c_int
+    bdio_seek_record.argtypes = [ctypes.c_void_p]
+
+    bdio_get_rlen = bdio.bdio_get_rlen
+    bdio_get_rlen.restype = ctypes.c_int
+    bdio_get_rlen.argtypes = [ctypes.c_void_p]
+
+    bdio_get_ruinfo = bdio.bdio_get_ruinfo
+    bdio_get_ruinfo.restype = ctypes.c_int
+    bdio_get_ruinfo.argtypes = [ctypes.c_void_p]
+
+    bdio_read = bdio.bdio_read
+    bdio_read.restype = ctypes.c_size_t
+    bdio_read.argtypes = [ctypes.c_char_p, ctypes.c_size_t, ctypes.c_void_p]
+
+    bdio_read_f64 = bdio.bdio_read_f64
+    bdio_read_f64.restype = ctypes.c_size_t
+    bdio_read_f64.argtypes = [ctypes.c_void_p, ctypes.c_size_t, ctypes.c_void_p]
+
+    bdio_read_int32 = bdio.bdio_read_int32
+    bdio_read_int32.restype = ctypes.c_size_t
+    bdio_read_int32.argtypes = [ctypes.c_void_p, ctypes.c_size_t, ctypes.c_void_p]
+
+    b_path = file_path.encode('utf-8')
+    read = 'r'
+    b_read = read.encode('utf-8')
+
+    fbdio = bdio_open(ctypes.c_char_p(b_path), ctypes.c_char_p(b_read), None)
+
+    return_list = []
+
+    print('Reading of bdio file started')
+    while 1 > 0:
+        bdio_seek_record(fbdio)
+        ruinfo = bdio_get_ruinfo(fbdio)
+
+        if ruinfo == 7:
+            print('MD5sum found')  # For now we just ignore these entries and do not perform any checks on them
+            continue
+
+        if ruinfo < 0:
+            # EOF reached
+            break
+        bdio_get_rlen(fbdio)
+
+        def read_c_double():
+            d_buf = ctypes.c_double
+            pd_buf = d_buf()
+            ppd_buf = ctypes.c_void_p(ctypes.addressof(pd_buf))
+            bdio_read_f64(ppd_buf, ctypes.c_size_t(8), ctypes.c_void_p(fbdio))
+            return pd_buf.value
+
+        mean = read_c_double()
+        print('mean', mean)
+
+        def read_c_size_t():
+            d_buf = ctypes.c_size_t
+            pd_buf = d_buf()
+            ppd_buf = ctypes.c_void_p(ctypes.addressof(pd_buf))
+            bdio_read_int32(ppd_buf, ctypes.c_size_t(4), ctypes.c_void_p(fbdio))
+            return pd_buf.value
+
+        neid = read_c_size_t()
+        print('neid', neid)
+
+        ndata = []
+        for index in range(neid):
+            ndata.append(read_c_size_t())
+        print('ndata', ndata)
+
+        nrep = []
+        for index in range(neid):
+            nrep.append(read_c_size_t())
+        print('nrep', nrep)
+
+        vrep = []
+        for index in range(neid):
+            vrep.append([])
+            for jndex in range(nrep[index]):
+                vrep[-1].append(read_c_size_t())
+        print('vrep', vrep)
+
+        ids = []
+        for index in range(neid):
+            ids.append(read_c_size_t())
+        print('ids', ids)
+
+        nt = []
+        for index in range(neid):
+            nt.append(read_c_size_t())
+        print('nt', nt)
+
+        zero = []
+        for index in range(neid):
+            zero.append(read_c_double())
+        print('zero', zero)
+
+        four = []
+        for index in range(neid):
+            four.append(read_c_double())
+        print('four', four)
+
+        d_buf = ctypes.c_double * np.sum(ndata)
+        pd_buf = d_buf()
+        ppd_buf = ctypes.c_void_p(ctypes.addressof(pd_buf))
+        bdio_read_f64(ppd_buf, ctypes.c_size_t(8 * np.sum(ndata)), ctypes.c_void_p(fbdio))
+        delta = pd_buf[:]
+
+        samples = np.split(np.asarray(delta) + mean, np.cumsum([a for su in vrep for a in su])[:-1])
+        no_reps = [len(o) for o in vrep]
+        assert len(ids) == len(no_reps)
+        tmp_names = []
+        ens_length = max([len(str(o)) for o in ids])
+        for loc_id, reps in zip(ids, no_reps):
+            for index in range(reps):
+                missing_chars = ens_length - len(str(loc_id))
+                tmp_names.append(str(loc_id) + ' ' * missing_chars + '|r' + '{0:03d}'.format(index))
+
+        return_list.append(Obs(samples, tmp_names))
+
+    bdio_close(fbdio)
+    print()
+    print(len(return_list), 'observable(s) extracted.')
+    return return_list
+
+
+def write_ADerrors(obs_list, file_path, bdio_path='./libbdio.so', **kwargs):
+    """ Write Obs to a bdio file according to ADerrors conventions
+
+    read_mesons requires bdio to be compiled into a shared library. This can be achieved by
+    adding the flag -fPIC to CC and changing the all target to
+
+    all:		bdio.o $(LIBDIR)
+                gcc -shared -Wl,-soname,libbdio.so -o $(BUILDDIR)/libbdio.so $(BUILDDIR)/bdio.o
+                cp $(BUILDDIR)/libbdio.so $(LIBDIR)/
+
+    Parameters
+    ----------
+    file_path -- path to the bdio file
+    bdio_path -- path to the shared bdio library libbdio.so (default ./libbdio.so)
+    """
+
+    for obs in obs_list:
+        if not hasattr(obs, 'e_names'):
+            raise Exception('Run the gamma method first for all obs.')
+
+    bdio = ctypes.cdll.LoadLibrary(bdio_path)
+
+    bdio_open = bdio.bdio_open
+    bdio_open.restype = ctypes.c_void_p
+
+    bdio_close = bdio.bdio_close
+    bdio_close.restype = ctypes.c_int
+    bdio_close.argtypes = [ctypes.c_void_p]
+
+    bdio_start_record = bdio.bdio_start_record
+    bdio_start_record.restype = ctypes.c_int
+    bdio_start_record.argtypes = [ctypes.c_size_t, ctypes.c_size_t, ctypes.c_void_p]
+
+    bdio_flush_record = bdio.bdio_flush_record
+    bdio_flush_record.restype = ctypes.c_int
+    bdio_flush_record.argytpes = [ctypes.c_void_p]
+
+    bdio_write_f64 = bdio.bdio_write_f64
+    bdio_write_f64.restype = ctypes.c_size_t
+    bdio_write_f64.argtypes = [ctypes.c_void_p, ctypes.c_size_t, ctypes.c_void_p]
+
+    bdio_write_int32 = bdio.bdio_write_int32
+    bdio_write_int32.restype = ctypes.c_size_t
+    bdio_write_int32.argtypes = [ctypes.c_void_p, ctypes.c_size_t, ctypes.c_void_p]
+
+    b_path = file_path.encode('utf-8')
+    write = 'w'
+    b_write = write.encode('utf-8')
+    form = 'pyerrors ADerror export'
+    b_form = form.encode('utf-8')
+
+    fbdio = bdio_open(ctypes.c_char_p(b_path), ctypes.c_char_p(b_write), b_form)
+
+    for obs in obs_list:
+        # mean = obs.value
+        neid = len(obs.e_names)
+        vrep = [[obs.shape[o] for o in sl] for sl in list(obs.e_content.values())]
+        vrep_write = [item for sublist in vrep for item in sublist]
+        ndata = [np.sum(o) for o in vrep]
+        nrep = [len(o) for o in vrep]
+        print('ndata', ndata)
+        print('nrep', nrep)
+        print('vrep', vrep)
+        keys = list(obs.e_content.keys())
+        ids = []
+        for key in keys:
+            try:  # Try to convert key to integer
+                ids.append(int(key))
+            except:  # If not possible construct a hash
+                ids.append(int(hashlib.sha256(key.encode('utf-8')).hexdigest(), 16) % 10 ** 8)
+        print('ids', ids)
+        nt = []
+        for e, e_name in enumerate(obs.e_names):
+
+            r_length = []
+            for r_name in obs.e_content[e_name]:
+                r_length.append(len(obs.deltas[r_name]))
+
+            # e_N = np.sum(r_length)
+            nt.append(max(r_length) // 2)
+        print('nt', nt)
+        zero = neid * [0.0]
+        four = neid * [4.0]
+        print('zero', zero)
+        print('four', four)
+        delta = np.concatenate([item for sublist in [[obs.deltas[o] for o in sl] for sl in list(obs.e_content.values())] for item in sublist])
+
+        bdio_start_record(0x00, 8, fbdio)
+
+        def write_c_double(double):
+            pd_buf = ctypes.c_double(double)
+            ppd_buf = ctypes.c_void_p(ctypes.addressof(pd_buf))
+            bdio_write_f64(ppd_buf, ctypes.c_size_t(8), ctypes.c_void_p(fbdio))
+
+        def write_c_size_t(int32):
+            pd_buf = ctypes.c_size_t(int32)
+            ppd_buf = ctypes.c_void_p(ctypes.addressof(pd_buf))
+            bdio_write_int32(ppd_buf, ctypes.c_size_t(4), ctypes.c_void_p(fbdio))
+
+        write_c_double(obs.value)
+        write_c_size_t(neid)
+
+        for element in ndata:
+            write_c_size_t(element)
+        for element in nrep:
+            write_c_size_t(element)
+        for element in vrep_write:
+            write_c_size_t(element)
+        for element in ids:
+            write_c_size_t(element)
+        for element in nt:
+            write_c_size_t(element)
+
+        for element in zero:
+            write_c_double(element)
+        for element in four:
+            write_c_double(element)
+
+        for element in delta:
+            write_c_double(element)
+
+    bdio_close(fbdio)
+    return 0
+
+
+def _get_kwd(string, key):
+    return (string.split(key, 1)[1]).split(" ", 1)[0]
+
+
+def _get_corr_name(string, key):
+    return (string.split(key, 1)[1]).split(' NDIM=', 1)[0]
+
+
+def read_mesons(file_path, bdio_path='./libbdio.so', **kwargs):
+    """ Extract mesons data from a bdio file and return it as a dictionary
+
+    The dictionary can be accessed with a tuple consisting of (type, source_position, kappa1, kappa2)
+
+    read_mesons requires bdio to be compiled into a shared library. This can be achieved by
+    adding the flag -fPIC to CC and changing the all target to
+
+    all:		bdio.o $(LIBDIR)
+                gcc -shared -Wl,-soname,libbdio.so -o $(BUILDDIR)/libbdio.so $(BUILDDIR)/bdio.o
+                cp $(BUILDDIR)/libbdio.so $(LIBDIR)/
+
+    Parameters
+    ----------
+    file_path -- path to the bdio file
+    bdio_path -- path to the shared bdio library libbdio.so (default ./libbdio.so)
+    stop -- stops reading at given configuration number (default None)
+    alternative_ensemble_name -- Manually overwrite ensemble name
+    """
+    bdio = ctypes.cdll.LoadLibrary(bdio_path)
+
+    bdio_open = bdio.bdio_open
+    bdio_open.restype = ctypes.c_void_p
+
+    bdio_close = bdio.bdio_close
+    bdio_close.restype = ctypes.c_int
+    bdio_close.argtypes = [ctypes.c_void_p]
+
+    bdio_seek_record = bdio.bdio_seek_record
+    bdio_seek_record.restype = ctypes.c_int
+    bdio_seek_record.argtypes = [ctypes.c_void_p]
+
+    bdio_get_rlen = bdio.bdio_get_rlen
+    bdio_get_rlen.restype = ctypes.c_int
+    bdio_get_rlen.argtypes = [ctypes.c_void_p]
+
+    bdio_get_ruinfo = bdio.bdio_get_ruinfo
+    bdio_get_ruinfo.restype = ctypes.c_int
+    bdio_get_ruinfo.argtypes = [ctypes.c_void_p]
+
+    bdio_read = bdio.bdio_read
+    bdio_read.restype = ctypes.c_size_t
+    bdio_read.argtypes = [ctypes.c_char_p, ctypes.c_size_t, ctypes.c_void_p]
+
+    bdio_read_f64 = bdio.bdio_read_f64
+    bdio_read_f64.restype = ctypes.c_size_t
+    bdio_read_f64.argtypes = [ctypes.c_void_p, ctypes.c_size_t, ctypes.c_void_p]
+
+    b_path = file_path.encode('utf-8')
+    read = 'r'
+    b_read = read.encode('utf-8')
+    form = 'Generic Correlator Format 1.0'
+    b_form = form.encode('utf-8')
+
+    ensemble_name = ''
+    volume = []  # lattice volume
+    boundary_conditions = []
+    corr_name = []  # Contains correlator names
+    corr_type = []  # Contains correlator data type (important for reading out numerical data)
+    corr_props = []  # Contanis propagator types (Component of corr_kappa)
+    d0 = 0  # tvals
+    d1 = 0  # nnoise
+    prop_kappa = []  # Contains propagator kappas (Component of corr_kappa)
+    prop_source = []  # Contains propagator source positions
+    # Check noise type for multiple replica?
+    cnfg_no = -1
+    corr_no = -1
+    data = []
+
+    fbdio = bdio_open(ctypes.c_char_p(b_path), ctypes.c_char_p(b_read), ctypes.c_char_p(b_form))
+
+    print('Reading of bdio file started')
+    while 1 > 0:
+        bdio_seek_record(fbdio)
+        ruinfo = bdio_get_ruinfo(fbdio)
+        if ruinfo < 0:
+            # EOF reached
+            break
+        rlen = bdio_get_rlen(fbdio)
+        if ruinfo == 5:
+            d_buf = ctypes.c_double * (2 + d0 * d1 * 2)
+            pd_buf = d_buf()
+            ppd_buf = ctypes.c_void_p(ctypes.addressof(pd_buf))
+            iread = bdio_read_f64(ppd_buf, ctypes.c_size_t(rlen), ctypes.c_void_p(fbdio))
+            if corr_type[corr_no] == 'complex':
+                tmp_mean = np.mean(np.asarray(np.split(np.asarray(pd_buf[2 + 2 * d1:-2 * d1:2]), d0 - 2)), axis=1)
+            else:
+                tmp_mean = np.mean(np.asarray(np.split(np.asarray(pd_buf[2 + d1:-d0 * d1 - d1]), d0 - 2)), axis=1)
+
+            data[corr_no].append(tmp_mean)
+            corr_no += 1
+        else:
+            alt_buf = ctypes.create_string_buffer(1024)
+            palt_buf = ctypes.c_char_p(ctypes.addressof(alt_buf))
+            iread = bdio_read(palt_buf, ctypes.c_size_t(rlen), ctypes.c_void_p(fbdio))
+            if rlen != iread:
+                print('Error')
+            for i, item in enumerate(alt_buf):
+                if item == b'\x00':
+                    alt_buf[i] = b' '
+            tmp_string = (alt_buf[:].decode("utf-8")).rstrip()
+            if ruinfo == 0:
+                ensemble_name = _get_kwd(tmp_string, 'ENSEMBLE=')
+                volume.append(int(_get_kwd(tmp_string, 'L0=')))
+                volume.append(int(_get_kwd(tmp_string, 'L1=')))
+                volume.append(int(_get_kwd(tmp_string, 'L2=')))
+                volume.append(int(_get_kwd(tmp_string, 'L3=')))
+                boundary_conditions.append(_get_kwd(tmp_string, 'BC0='))
+                boundary_conditions.append(_get_kwd(tmp_string, 'BC1='))
+                boundary_conditions.append(_get_kwd(tmp_string, 'BC2='))
+                boundary_conditions.append(_get_kwd(tmp_string, 'BC3='))
+
+            if ruinfo == 1:
+                corr_name.append(_get_corr_name(tmp_string, 'CORR_NAME='))
+                corr_type.append(_get_kwd(tmp_string, 'DATATYPE='))
+                corr_props.append([_get_kwd(tmp_string, 'PROP0='), _get_kwd(tmp_string, 'PROP1=')])
+                if d0 == 0:
+                    d0 = int(_get_kwd(tmp_string, 'D0='))
+                else:
+                    if d0 != int(_get_kwd(tmp_string, 'D0=')):
+                        print('Error: Varying number of time values')
+                if d1 == 0:
+                    d1 = int(_get_kwd(tmp_string, 'D1='))
+                else:
+                    if d1 != int(_get_kwd(tmp_string, 'D1=')):
+                        print('Error: Varying number of random sources')
+            if ruinfo == 2:
+                prop_kappa.append(_get_kwd(tmp_string, 'KAPPA='))
+                prop_source.append(_get_kwd(tmp_string, 'x0='))
+            if ruinfo == 4:
+                if 'stop' in kwargs:
+                    if cnfg_no >= kwargs.get('stop') - 1:
+                        break
+                cnfg_no += 1
+                print('\r%s %i' % ('Reading configuration', cnfg_no + 1), end='\r')
+                if cnfg_no == 0:
+                    no_corrs = len(corr_name)
+                    data = []
+                    for c in range(no_corrs):
+                        data.append([])
+
+                corr_no = 0
+    bdio_close(fbdio)
+
+    print('\nEnsemble: ', ensemble_name)
+    if 'alternative_ensemble_name' in kwargs:
+        ensemble_name = kwargs.get('alternative_ensemble_name')
+        print('Ensemble name overwritten to', ensemble_name)
+    print('Lattice volume: ', volume)
+    print('Boundary conditions: ', boundary_conditions)
+    print('Number of time values: ', d0)
+    print('Number of random sources: ', d1)
+    print('Number of corrs: ', len(corr_name))
+    print('Number of configurations: ', cnfg_no + 1)
+
+    corr_kappa = []  # Contains kappa values for both propagators of given correlation function
+    corr_source = []
+    for item in corr_props:
+        corr_kappa.append([float(prop_kappa[int(item[0])]), float(prop_kappa[int(item[1])])])
+        if prop_source[int(item[0])] != prop_source[int(item[1])]:
+            raise Exception('Source position do not match for correlator' + str(item))
+        else:
+            corr_source.append(int(prop_source[int(item[0])]))
+
+    result = {}
+    for c in range(no_corrs):
+        tmp_corr = []
+        for t in range(d0 - 2):
+            tmp_corr.append(Obs([np.asarray(data[c])[:, t]], [ensemble_name]))
+        result[(corr_name[c], corr_source[c]) + tuple(sorted(corr_kappa[c]))] = tmp_corr
+
+    # Check that all data entries have the same number of configurations
+    if len(set([o[0].N for o in list(result.values())])) != 1:
+        raise Exception('Error: Not all correlators have the same number of configurations. bdio file is possibly corrupted.')
+
+    return result
+
+
+def read_dSdm(file_path, bdio_path='./libbdio.so', **kwargs):
+    """ Extract dSdm data from a bdio file and return it as a dictionary
+
+    The dictionary can be accessed with a tuple consisting of (type, kappa)
+
+    read_dSdm requires bdio to be compiled into a shared library. This can be achieved by
+    adding the flag -fPIC to CC and changing the all target to
+
+    all:		bdio.o $(LIBDIR)
+                gcc -shared -Wl,-soname,libbdio.so -o $(BUILDDIR)/libbdio.so $(BUILDDIR)/bdio.o
+                cp $(BUILDDIR)/libbdio.so $(LIBDIR)/
+
+    Parameters
+    ----------
+    file_path -- path to the bdio file
+    bdio_path -- path to the shared bdio library libbdio.so (default ./libbdio.so)
+    stop -- stops reading at given configuration number (default None)
+    """
+    bdio = ctypes.cdll.LoadLibrary(bdio_path)
+
+    bdio_open = bdio.bdio_open
+    bdio_open.restype = ctypes.c_void_p
+
+    bdio_close = bdio.bdio_close
+    bdio_close.restype = ctypes.c_int
+    bdio_close.argtypes = [ctypes.c_void_p]
+
+    bdio_seek_record = bdio.bdio_seek_record
+    bdio_seek_record.restype = ctypes.c_int
+    bdio_seek_record.argtypes = [ctypes.c_void_p]
+
+    bdio_get_rlen = bdio.bdio_get_rlen
+    bdio_get_rlen.restype = ctypes.c_int
+    bdio_get_rlen.argtypes = [ctypes.c_void_p]
+
+    bdio_get_ruinfo = bdio.bdio_get_ruinfo
+    bdio_get_ruinfo.restype = ctypes.c_int
+    bdio_get_ruinfo.argtypes = [ctypes.c_void_p]
+
+    bdio_read = bdio.bdio_read
+    bdio_read.restype = ctypes.c_size_t
+    bdio_read.argtypes = [ctypes.c_char_p, ctypes.c_size_t, ctypes.c_void_p]
+
+    bdio_read_f64 = bdio.bdio_read_f64
+    bdio_read_f64.restype = ctypes.c_size_t
+    bdio_read_f64.argtypes = [ctypes.c_void_p, ctypes.c_size_t, ctypes.c_void_p]
+
+    b_path = file_path.encode('utf-8')
+    read = 'r'
+    b_read = read.encode('utf-8')
+    form = 'Generic Correlator Format 1.0'
+    b_form = form.encode('utf-8')
+
+    ensemble_name = ''
+    volume = []  # lattice volume
+    boundary_conditions = []
+    corr_name = []  # Contains correlator names
+    corr_type = []  # Contains correlator data type (important for reading out numerical data)
+    corr_props = []  # Contains propagator types (Component of corr_kappa)
+    d0 = 0  # tvals
+    # d1 = 0  # nnoise
+    prop_kappa = []  # Contains propagator kappas (Component of corr_kappa)
+    # Check noise type for multiple replica?
+    cnfg_no = -1
+    corr_no = -1
+    data = []
+
+    fbdio = bdio_open(ctypes.c_char_p(b_path), ctypes.c_char_p(b_read), ctypes.c_char_p(b_form))
+
+    print('Reading of bdio file started')
+    while 1 > 0:
+        bdio_seek_record(fbdio)
+        ruinfo = bdio_get_ruinfo(fbdio)
+        if ruinfo < 0:
+            # EOF reached
+            break
+        rlen = bdio_get_rlen(fbdio)
+        if ruinfo == 5:
+            d_buf = ctypes.c_double * (2 + d0)
+            pd_buf = d_buf()
+            ppd_buf = ctypes.c_void_p(ctypes.addressof(pd_buf))
+            iread = bdio_read_f64(ppd_buf, ctypes.c_size_t(rlen), ctypes.c_void_p(fbdio))
+            tmp_mean = np.mean(np.asarray(pd_buf[2:]))
+
+            data[corr_no].append(tmp_mean)
+            corr_no += 1
+        else:
+            alt_buf = ctypes.create_string_buffer(1024)
+            palt_buf = ctypes.c_char_p(ctypes.addressof(alt_buf))
+            iread = bdio_read(palt_buf, ctypes.c_size_t(rlen), ctypes.c_void_p(fbdio))
+            if rlen != iread:
+                print('Error')
+            for i, item in enumerate(alt_buf):
+                if item == b'\x00':
+                    alt_buf[i] = b' '
+            tmp_string = (alt_buf[:].decode("utf-8")).rstrip()
+            if ruinfo == 0:
+                creator = _get_kwd(tmp_string, 'CREATOR=')
+                ensemble_name = _get_kwd(tmp_string, 'ENSEMBLE=')
+                volume.append(int(_get_kwd(tmp_string, 'L0=')))
+                volume.append(int(_get_kwd(tmp_string, 'L1=')))
+                volume.append(int(_get_kwd(tmp_string, 'L2=')))
+                volume.append(int(_get_kwd(tmp_string, 'L3=')))
+                boundary_conditions.append(_get_kwd(tmp_string, 'BC0='))
+                boundary_conditions.append(_get_kwd(tmp_string, 'BC1='))
+                boundary_conditions.append(_get_kwd(tmp_string, 'BC2='))
+                boundary_conditions.append(_get_kwd(tmp_string, 'BC3='))
+
+            if ruinfo == 1:
+                corr_name.append(_get_corr_name(tmp_string, 'CORR_NAME='))
+                corr_type.append(_get_kwd(tmp_string, 'DATATYPE='))
+                corr_props.append(_get_kwd(tmp_string, 'PROP0='))
+                if d0 == 0:
+                    d0 = int(_get_kwd(tmp_string, 'D0='))
+                else:
+                    if d0 != int(_get_kwd(tmp_string, 'D0=')):
+                        print('Error: Varying number of time values')
+            if ruinfo == 2:
+                prop_kappa.append(_get_kwd(tmp_string, 'KAPPA='))
+            if ruinfo == 4:
+                if 'stop' in kwargs:
+                    if cnfg_no >= kwargs.get('stop') - 1:
+                        break
+                cnfg_no += 1
+                print('\r%s %i' % ('Reading configuration', cnfg_no + 1), end='\r')
+                if cnfg_no == 0:
+                    no_corrs = len(corr_name)
+                    data = []
+                    for c in range(no_corrs):
+                        data.append([])
+
+                corr_no = 0
+    bdio_close(fbdio)
+
+    print('\nCreator: ', creator)
+    print('Ensemble: ', ensemble_name)
+    print('Lattice volume: ', volume)
+    print('Boundary conditions: ', boundary_conditions)
+    print('Number of random sources: ', d0)
+    print('Number of corrs: ', len(corr_name))
+    print('Number of configurations: ', cnfg_no + 1)
+
+    corr_kappa = []  # Contains kappa values for both propagators of given correlation function
+    for item in corr_props:
+        corr_kappa.append(float(prop_kappa[int(item)]))
+
+    result = {}
+    for c in range(no_corrs):
+        result[(corr_name[c], str(corr_kappa[c]))] = Obs([np.asarray(data[c])], [ensemble_name])
+
+    # Check that all data entries have the same number of configurations
+    if len(set([o.N for o in list(result.values())])) != 1:
+        raise Exception('Error: Not all correlators have the same number of configurations. bdio file is possibly corrupted.')
+
+    return result
+
+ +
+ +
+
+
#   + + + def + read_ADerrors(file_path, bdio_path='./libbdio.so', **kwargs): +
+ +
+ View Source +
def read_ADerrors(file_path, bdio_path='./libbdio.so', **kwargs):
+    """ Extract generic MCMC data from a bdio file
+
+    read_ADerrors requires bdio to be compiled into a shared library. This can be achieved by
+    adding the flag -fPIC to CC and changing the all target to
+
+    all:		bdio.o $(LIBDIR)
+                gcc -shared -Wl,-soname,libbdio.so -o $(BUILDDIR)/libbdio.so $(BUILDDIR)/bdio.o
+                cp $(BUILDDIR)/libbdio.so $(LIBDIR)/
+
+    Parameters
+    ----------
+    file_path -- path to the bdio file
+    bdio_path -- path to the shared bdio library libbdio.so (default ./libbdio.so)
+    """
+    bdio = ctypes.cdll.LoadLibrary(bdio_path)
+
+    bdio_open = bdio.bdio_open
+    bdio_open.restype = ctypes.c_void_p
+
+    bdio_close = bdio.bdio_close
+    bdio_close.restype = ctypes.c_int
+    bdio_close.argtypes = [ctypes.c_void_p]
+
+    bdio_seek_record = bdio.bdio_seek_record
+    bdio_seek_record.restype = ctypes.c_int
+    bdio_seek_record.argtypes = [ctypes.c_void_p]
+
+    bdio_get_rlen = bdio.bdio_get_rlen
+    bdio_get_rlen.restype = ctypes.c_int
+    bdio_get_rlen.argtypes = [ctypes.c_void_p]
+
+    bdio_get_ruinfo = bdio.bdio_get_ruinfo
+    bdio_get_ruinfo.restype = ctypes.c_int
+    bdio_get_ruinfo.argtypes = [ctypes.c_void_p]
+
+    bdio_read = bdio.bdio_read
+    bdio_read.restype = ctypes.c_size_t
+    bdio_read.argtypes = [ctypes.c_char_p, ctypes.c_size_t, ctypes.c_void_p]
+
+    bdio_read_f64 = bdio.bdio_read_f64
+    bdio_read_f64.restype = ctypes.c_size_t
+    bdio_read_f64.argtypes = [ctypes.c_void_p, ctypes.c_size_t, ctypes.c_void_p]
+
+    bdio_read_int32 = bdio.bdio_read_int32
+    bdio_read_int32.restype = ctypes.c_size_t
+    bdio_read_int32.argtypes = [ctypes.c_void_p, ctypes.c_size_t, ctypes.c_void_p]
+
+    b_path = file_path.encode('utf-8')
+    read = 'r'
+    b_read = read.encode('utf-8')
+
+    fbdio = bdio_open(ctypes.c_char_p(b_path), ctypes.c_char_p(b_read), None)
+
+    return_list = []
+
+    print('Reading of bdio file started')
+    while 1 > 0:
+        bdio_seek_record(fbdio)
+        ruinfo = bdio_get_ruinfo(fbdio)
+
+        if ruinfo == 7:
+            print('MD5sum found')  # For now we just ignore these entries and do not perform any checks on them
+            continue
+
+        if ruinfo < 0:
+            # EOF reached
+            break
+        bdio_get_rlen(fbdio)
+
+        def read_c_double():
+            d_buf = ctypes.c_double
+            pd_buf = d_buf()
+            ppd_buf = ctypes.c_void_p(ctypes.addressof(pd_buf))
+            bdio_read_f64(ppd_buf, ctypes.c_size_t(8), ctypes.c_void_p(fbdio))
+            return pd_buf.value
+
+        mean = read_c_double()
+        print('mean', mean)
+
+        def read_c_size_t():
+            d_buf = ctypes.c_size_t
+            pd_buf = d_buf()
+            ppd_buf = ctypes.c_void_p(ctypes.addressof(pd_buf))
+            bdio_read_int32(ppd_buf, ctypes.c_size_t(4), ctypes.c_void_p(fbdio))
+            return pd_buf.value
+
+        neid = read_c_size_t()
+        print('neid', neid)
+
+        ndata = []
+        for index in range(neid):
+            ndata.append(read_c_size_t())
+        print('ndata', ndata)
+
+        nrep = []
+        for index in range(neid):
+            nrep.append(read_c_size_t())
+        print('nrep', nrep)
+
+        vrep = []
+        for index in range(neid):
+            vrep.append([])
+            for jndex in range(nrep[index]):
+                vrep[-1].append(read_c_size_t())
+        print('vrep', vrep)
+
+        ids = []
+        for index in range(neid):
+            ids.append(read_c_size_t())
+        print('ids', ids)
+
+        nt = []
+        for index in range(neid):
+            nt.append(read_c_size_t())
+        print('nt', nt)
+
+        zero = []
+        for index in range(neid):
+            zero.append(read_c_double())
+        print('zero', zero)
+
+        four = []
+        for index in range(neid):
+            four.append(read_c_double())
+        print('four', four)
+
+        d_buf = ctypes.c_double * np.sum(ndata)
+        pd_buf = d_buf()
+        ppd_buf = ctypes.c_void_p(ctypes.addressof(pd_buf))
+        bdio_read_f64(ppd_buf, ctypes.c_size_t(8 * np.sum(ndata)), ctypes.c_void_p(fbdio))
+        delta = pd_buf[:]
+
+        samples = np.split(np.asarray(delta) + mean, np.cumsum([a for su in vrep for a in su])[:-1])
+        no_reps = [len(o) for o in vrep]
+        assert len(ids) == len(no_reps)
+        tmp_names = []
+        ens_length = max([len(str(o)) for o in ids])
+        for loc_id, reps in zip(ids, no_reps):
+            for index in range(reps):
+                missing_chars = ens_length - len(str(loc_id))
+                tmp_names.append(str(loc_id) + ' ' * missing_chars + '|r' + '{0:03d}'.format(index))
+
+        return_list.append(Obs(samples, tmp_names))
+
+    bdio_close(fbdio)
+    print()
+    print(len(return_list), 'observable(s) extracted.')
+    return return_list
+
+ +
+ +

Extract generic MCMC data from a bdio file

+ +

read_ADerrors requires bdio to be compiled into a shared library. This can be achieved by +adding the flag -fPIC to CC and changing the all target to

+ +

all: bdio.o $(LIBDIR) + gcc -shared -Wl,-soname,libbdio.so -o $(BUILDDIR)/libbdio.so $(BUILDDIR)/bdio.o + cp $(BUILDDIR)/libbdio.so $(LIBDIR)/

+ +
Parameters
+ +
    +
  • file_path -- path to the bdio file
  • +
  • bdio_path -- path to the shared bdio library libbdio.so (default ./libbdio.so)
  • +
+
+ + +
+
+
#   + + + def + write_ADerrors(obs_list, file_path, bdio_path='./libbdio.so', **kwargs): +
+ +
+ View Source +
def write_ADerrors(obs_list, file_path, bdio_path='./libbdio.so', **kwargs):
+    """ Write Obs to a bdio file according to ADerrors conventions
+
+    read_mesons requires bdio to be compiled into a shared library. This can be achieved by
+    adding the flag -fPIC to CC and changing the all target to
+
+    all:		bdio.o $(LIBDIR)
+                gcc -shared -Wl,-soname,libbdio.so -o $(BUILDDIR)/libbdio.so $(BUILDDIR)/bdio.o
+                cp $(BUILDDIR)/libbdio.so $(LIBDIR)/
+
+    Parameters
+    ----------
+    file_path -- path to the bdio file
+    bdio_path -- path to the shared bdio library libbdio.so (default ./libbdio.so)
+    """
+
+    for obs in obs_list:
+        if not hasattr(obs, 'e_names'):
+            raise Exception('Run the gamma method first for all obs.')
+
+    bdio = ctypes.cdll.LoadLibrary(bdio_path)
+
+    bdio_open = bdio.bdio_open
+    bdio_open.restype = ctypes.c_void_p
+
+    bdio_close = bdio.bdio_close
+    bdio_close.restype = ctypes.c_int
+    bdio_close.argtypes = [ctypes.c_void_p]
+
+    bdio_start_record = bdio.bdio_start_record
+    bdio_start_record.restype = ctypes.c_int
+    bdio_start_record.argtypes = [ctypes.c_size_t, ctypes.c_size_t, ctypes.c_void_p]
+
+    bdio_flush_record = bdio.bdio_flush_record
+    bdio_flush_record.restype = ctypes.c_int
+    bdio_flush_record.argytpes = [ctypes.c_void_p]
+
+    bdio_write_f64 = bdio.bdio_write_f64
+    bdio_write_f64.restype = ctypes.c_size_t
+    bdio_write_f64.argtypes = [ctypes.c_void_p, ctypes.c_size_t, ctypes.c_void_p]
+
+    bdio_write_int32 = bdio.bdio_write_int32
+    bdio_write_int32.restype = ctypes.c_size_t
+    bdio_write_int32.argtypes = [ctypes.c_void_p, ctypes.c_size_t, ctypes.c_void_p]
+
+    b_path = file_path.encode('utf-8')
+    write = 'w'
+    b_write = write.encode('utf-8')
+    form = 'pyerrors ADerror export'
+    b_form = form.encode('utf-8')
+
+    fbdio = bdio_open(ctypes.c_char_p(b_path), ctypes.c_char_p(b_write), b_form)
+
+    for obs in obs_list:
+        # mean = obs.value
+        neid = len(obs.e_names)
+        vrep = [[obs.shape[o] for o in sl] for sl in list(obs.e_content.values())]
+        vrep_write = [item for sublist in vrep for item in sublist]
+        ndata = [np.sum(o) for o in vrep]
+        nrep = [len(o) for o in vrep]
+        print('ndata', ndata)
+        print('nrep', nrep)
+        print('vrep', vrep)
+        keys = list(obs.e_content.keys())
+        ids = []
+        for key in keys:
+            try:  # Try to convert key to integer
+                ids.append(int(key))
+            except:  # If not possible construct a hash
+                ids.append(int(hashlib.sha256(key.encode('utf-8')).hexdigest(), 16) % 10 ** 8)
+        print('ids', ids)
+        nt = []
+        for e, e_name in enumerate(obs.e_names):
+
+            r_length = []
+            for r_name in obs.e_content[e_name]:
+                r_length.append(len(obs.deltas[r_name]))
+
+            # e_N = np.sum(r_length)
+            nt.append(max(r_length) // 2)
+        print('nt', nt)
+        zero = neid * [0.0]
+        four = neid * [4.0]
+        print('zero', zero)
+        print('four', four)
+        delta = np.concatenate([item for sublist in [[obs.deltas[o] for o in sl] for sl in list(obs.e_content.values())] for item in sublist])
+
+        bdio_start_record(0x00, 8, fbdio)
+
+        def write_c_double(double):
+            pd_buf = ctypes.c_double(double)
+            ppd_buf = ctypes.c_void_p(ctypes.addressof(pd_buf))
+            bdio_write_f64(ppd_buf, ctypes.c_size_t(8), ctypes.c_void_p(fbdio))
+
+        def write_c_size_t(int32):
+            pd_buf = ctypes.c_size_t(int32)
+            ppd_buf = ctypes.c_void_p(ctypes.addressof(pd_buf))
+            bdio_write_int32(ppd_buf, ctypes.c_size_t(4), ctypes.c_void_p(fbdio))
+
+        write_c_double(obs.value)
+        write_c_size_t(neid)
+
+        for element in ndata:
+            write_c_size_t(element)
+        for element in nrep:
+            write_c_size_t(element)
+        for element in vrep_write:
+            write_c_size_t(element)
+        for element in ids:
+            write_c_size_t(element)
+        for element in nt:
+            write_c_size_t(element)
+
+        for element in zero:
+            write_c_double(element)
+        for element in four:
+            write_c_double(element)
+
+        for element in delta:
+            write_c_double(element)
+
+    bdio_close(fbdio)
+    return 0
+
+ +
+ +

Write Obs to a bdio file according to ADerrors conventions

+ +

read_mesons requires bdio to be compiled into a shared library. This can be achieved by +adding the flag -fPIC to CC and changing the all target to

+ +

all: bdio.o $(LIBDIR) + gcc -shared -Wl,-soname,libbdio.so -o $(BUILDDIR)/libbdio.so $(BUILDDIR)/bdio.o + cp $(BUILDDIR)/libbdio.so $(LIBDIR)/

+ +
Parameters
+ +
    +
  • file_path -- path to the bdio file
  • +
  • bdio_path -- path to the shared bdio library libbdio.so (default ./libbdio.so)
  • +
+
+ + +
+
+
#   + + + def + read_mesons(file_path, bdio_path='./libbdio.so', **kwargs): +
+ +
+ View Source +
def read_mesons(file_path, bdio_path='./libbdio.so', **kwargs):
+    """ Extract mesons data from a bdio file and return it as a dictionary
+
+    The dictionary can be accessed with a tuple consisting of (type, source_position, kappa1, kappa2)
+
+    read_mesons requires bdio to be compiled into a shared library. This can be achieved by
+    adding the flag -fPIC to CC and changing the all target to
+
+    all:		bdio.o $(LIBDIR)
+                gcc -shared -Wl,-soname,libbdio.so -o $(BUILDDIR)/libbdio.so $(BUILDDIR)/bdio.o
+                cp $(BUILDDIR)/libbdio.so $(LIBDIR)/
+
+    Parameters
+    ----------
+    file_path -- path to the bdio file
+    bdio_path -- path to the shared bdio library libbdio.so (default ./libbdio.so)
+    stop -- stops reading at given configuration number (default None)
+    alternative_ensemble_name -- Manually overwrite ensemble name
+    """
+    bdio = ctypes.cdll.LoadLibrary(bdio_path)
+
+    bdio_open = bdio.bdio_open
+    bdio_open.restype = ctypes.c_void_p
+
+    bdio_close = bdio.bdio_close
+    bdio_close.restype = ctypes.c_int
+    bdio_close.argtypes = [ctypes.c_void_p]
+
+    bdio_seek_record = bdio.bdio_seek_record
+    bdio_seek_record.restype = ctypes.c_int
+    bdio_seek_record.argtypes = [ctypes.c_void_p]
+
+    bdio_get_rlen = bdio.bdio_get_rlen
+    bdio_get_rlen.restype = ctypes.c_int
+    bdio_get_rlen.argtypes = [ctypes.c_void_p]
+
+    bdio_get_ruinfo = bdio.bdio_get_ruinfo
+    bdio_get_ruinfo.restype = ctypes.c_int
+    bdio_get_ruinfo.argtypes = [ctypes.c_void_p]
+
+    bdio_read = bdio.bdio_read
+    bdio_read.restype = ctypes.c_size_t
+    bdio_read.argtypes = [ctypes.c_char_p, ctypes.c_size_t, ctypes.c_void_p]
+
+    bdio_read_f64 = bdio.bdio_read_f64
+    bdio_read_f64.restype = ctypes.c_size_t
+    bdio_read_f64.argtypes = [ctypes.c_void_p, ctypes.c_size_t, ctypes.c_void_p]
+
+    b_path = file_path.encode('utf-8')
+    read = 'r'
+    b_read = read.encode('utf-8')
+    form = 'Generic Correlator Format 1.0'
+    b_form = form.encode('utf-8')
+
+    ensemble_name = ''
+    volume = []  # lattice volume
+    boundary_conditions = []
+    corr_name = []  # Contains correlator names
+    corr_type = []  # Contains correlator data type (important for reading out numerical data)
+    corr_props = []  # Contanis propagator types (Component of corr_kappa)
+    d0 = 0  # tvals
+    d1 = 0  # nnoise
+    prop_kappa = []  # Contains propagator kappas (Component of corr_kappa)
+    prop_source = []  # Contains propagator source positions
+    # Check noise type for multiple replica?
+    cnfg_no = -1
+    corr_no = -1
+    data = []
+
+    fbdio = bdio_open(ctypes.c_char_p(b_path), ctypes.c_char_p(b_read), ctypes.c_char_p(b_form))
+
+    print('Reading of bdio file started')
+    while 1 > 0:
+        bdio_seek_record(fbdio)
+        ruinfo = bdio_get_ruinfo(fbdio)
+        if ruinfo < 0:
+            # EOF reached
+            break
+        rlen = bdio_get_rlen(fbdio)
+        if ruinfo == 5:
+            d_buf = ctypes.c_double * (2 + d0 * d1 * 2)
+            pd_buf = d_buf()
+            ppd_buf = ctypes.c_void_p(ctypes.addressof(pd_buf))
+            iread = bdio_read_f64(ppd_buf, ctypes.c_size_t(rlen), ctypes.c_void_p(fbdio))
+            if corr_type[corr_no] == 'complex':
+                tmp_mean = np.mean(np.asarray(np.split(np.asarray(pd_buf[2 + 2 * d1:-2 * d1:2]), d0 - 2)), axis=1)
+            else:
+                tmp_mean = np.mean(np.asarray(np.split(np.asarray(pd_buf[2 + d1:-d0 * d1 - d1]), d0 - 2)), axis=1)
+
+            data[corr_no].append(tmp_mean)
+            corr_no += 1
+        else:
+            alt_buf = ctypes.create_string_buffer(1024)
+            palt_buf = ctypes.c_char_p(ctypes.addressof(alt_buf))
+            iread = bdio_read(palt_buf, ctypes.c_size_t(rlen), ctypes.c_void_p(fbdio))
+            if rlen != iread:
+                print('Error')
+            for i, item in enumerate(alt_buf):
+                if item == b'\x00':
+                    alt_buf[i] = b' '
+            tmp_string = (alt_buf[:].decode("utf-8")).rstrip()
+            if ruinfo == 0:
+                ensemble_name = _get_kwd(tmp_string, 'ENSEMBLE=')
+                volume.append(int(_get_kwd(tmp_string, 'L0=')))
+                volume.append(int(_get_kwd(tmp_string, 'L1=')))
+                volume.append(int(_get_kwd(tmp_string, 'L2=')))
+                volume.append(int(_get_kwd(tmp_string, 'L3=')))
+                boundary_conditions.append(_get_kwd(tmp_string, 'BC0='))
+                boundary_conditions.append(_get_kwd(tmp_string, 'BC1='))
+                boundary_conditions.append(_get_kwd(tmp_string, 'BC2='))
+                boundary_conditions.append(_get_kwd(tmp_string, 'BC3='))
+
+            if ruinfo == 1:
+                corr_name.append(_get_corr_name(tmp_string, 'CORR_NAME='))
+                corr_type.append(_get_kwd(tmp_string, 'DATATYPE='))
+                corr_props.append([_get_kwd(tmp_string, 'PROP0='), _get_kwd(tmp_string, 'PROP1=')])
+                if d0 == 0:
+                    d0 = int(_get_kwd(tmp_string, 'D0='))
+                else:
+                    if d0 != int(_get_kwd(tmp_string, 'D0=')):
+                        print('Error: Varying number of time values')
+                if d1 == 0:
+                    d1 = int(_get_kwd(tmp_string, 'D1='))
+                else:
+                    if d1 != int(_get_kwd(tmp_string, 'D1=')):
+                        print('Error: Varying number of random sources')
+            if ruinfo == 2:
+                prop_kappa.append(_get_kwd(tmp_string, 'KAPPA='))
+                prop_source.append(_get_kwd(tmp_string, 'x0='))
+            if ruinfo == 4:
+                if 'stop' in kwargs:
+                    if cnfg_no >= kwargs.get('stop') - 1:
+                        break
+                cnfg_no += 1
+                print('\r%s %i' % ('Reading configuration', cnfg_no + 1), end='\r')
+                if cnfg_no == 0:
+                    no_corrs = len(corr_name)
+                    data = []
+                    for c in range(no_corrs):
+                        data.append([])
+
+                corr_no = 0
+    bdio_close(fbdio)
+
+    print('\nEnsemble: ', ensemble_name)
+    if 'alternative_ensemble_name' in kwargs:
+        ensemble_name = kwargs.get('alternative_ensemble_name')
+        print('Ensemble name overwritten to', ensemble_name)
+    print('Lattice volume: ', volume)
+    print('Boundary conditions: ', boundary_conditions)
+    print('Number of time values: ', d0)
+    print('Number of random sources: ', d1)
+    print('Number of corrs: ', len(corr_name))
+    print('Number of configurations: ', cnfg_no + 1)
+
+    corr_kappa = []  # Contains kappa values for both propagators of given correlation function
+    corr_source = []
+    for item in corr_props:
+        corr_kappa.append([float(prop_kappa[int(item[0])]), float(prop_kappa[int(item[1])])])
+        if prop_source[int(item[0])] != prop_source[int(item[1])]:
+            raise Exception('Source position do not match for correlator' + str(item))
+        else:
+            corr_source.append(int(prop_source[int(item[0])]))
+
+    result = {}
+    for c in range(no_corrs):
+        tmp_corr = []
+        for t in range(d0 - 2):
+            tmp_corr.append(Obs([np.asarray(data[c])[:, t]], [ensemble_name]))
+        result[(corr_name[c], corr_source[c]) + tuple(sorted(corr_kappa[c]))] = tmp_corr
+
+    # Check that all data entries have the same number of configurations
+    if len(set([o[0].N for o in list(result.values())])) != 1:
+        raise Exception('Error: Not all correlators have the same number of configurations. bdio file is possibly corrupted.')
+
+    return result
+
+ +
+ +

Extract mesons data from a bdio file and return it as a dictionary

+ +

The dictionary can be accessed with a tuple consisting of (type, source_position, kappa1, kappa2)

+ +

read_mesons requires bdio to be compiled into a shared library. This can be achieved by +adding the flag -fPIC to CC and changing the all target to

+ +

all: bdio.o $(LIBDIR) + gcc -shared -Wl,-soname,libbdio.so -o $(BUILDDIR)/libbdio.so $(BUILDDIR)/bdio.o + cp $(BUILDDIR)/libbdio.so $(LIBDIR)/

+ +
Parameters
+ +
    +
  • file_path -- path to the bdio file
  • +
  • bdio_path -- path to the shared bdio library libbdio.so (default ./libbdio.so)
  • +
  • stop -- stops reading at given configuration number (default None)
  • +
  • alternative_ensemble_name -- Manually overwrite ensemble name
  • +
+
+ + +
+
+
#   + + + def + read_dSdm(file_path, bdio_path='./libbdio.so', **kwargs): +
+ +
+ View Source +
def read_dSdm(file_path, bdio_path='./libbdio.so', **kwargs):
+    """ Extract dSdm data from a bdio file and return it as a dictionary
+
+    The dictionary can be accessed with a tuple consisting of (type, kappa)
+
+    read_dSdm requires bdio to be compiled into a shared library. This can be achieved by
+    adding the flag -fPIC to CC and changing the all target to
+
+    all:		bdio.o $(LIBDIR)
+                gcc -shared -Wl,-soname,libbdio.so -o $(BUILDDIR)/libbdio.so $(BUILDDIR)/bdio.o
+                cp $(BUILDDIR)/libbdio.so $(LIBDIR)/
+
+    Parameters
+    ----------
+    file_path -- path to the bdio file
+    bdio_path -- path to the shared bdio library libbdio.so (default ./libbdio.so)
+    stop -- stops reading at given configuration number (default None)
+    """
+    bdio = ctypes.cdll.LoadLibrary(bdio_path)
+
+    bdio_open = bdio.bdio_open
+    bdio_open.restype = ctypes.c_void_p
+
+    bdio_close = bdio.bdio_close
+    bdio_close.restype = ctypes.c_int
+    bdio_close.argtypes = [ctypes.c_void_p]
+
+    bdio_seek_record = bdio.bdio_seek_record
+    bdio_seek_record.restype = ctypes.c_int
+    bdio_seek_record.argtypes = [ctypes.c_void_p]
+
+    bdio_get_rlen = bdio.bdio_get_rlen
+    bdio_get_rlen.restype = ctypes.c_int
+    bdio_get_rlen.argtypes = [ctypes.c_void_p]
+
+    bdio_get_ruinfo = bdio.bdio_get_ruinfo
+    bdio_get_ruinfo.restype = ctypes.c_int
+    bdio_get_ruinfo.argtypes = [ctypes.c_void_p]
+
+    bdio_read = bdio.bdio_read
+    bdio_read.restype = ctypes.c_size_t
+    bdio_read.argtypes = [ctypes.c_char_p, ctypes.c_size_t, ctypes.c_void_p]
+
+    bdio_read_f64 = bdio.bdio_read_f64
+    bdio_read_f64.restype = ctypes.c_size_t
+    bdio_read_f64.argtypes = [ctypes.c_void_p, ctypes.c_size_t, ctypes.c_void_p]
+
+    b_path = file_path.encode('utf-8')
+    read = 'r'
+    b_read = read.encode('utf-8')
+    form = 'Generic Correlator Format 1.0'
+    b_form = form.encode('utf-8')
+
+    ensemble_name = ''
+    volume = []  # lattice volume
+    boundary_conditions = []
+    corr_name = []  # Contains correlator names
+    corr_type = []  # Contains correlator data type (important for reading out numerical data)
+    corr_props = []  # Contains propagator types (Component of corr_kappa)
+    d0 = 0  # tvals
+    # d1 = 0  # nnoise
+    prop_kappa = []  # Contains propagator kappas (Component of corr_kappa)
+    # Check noise type for multiple replica?
+    cnfg_no = -1
+    corr_no = -1
+    data = []
+
+    fbdio = bdio_open(ctypes.c_char_p(b_path), ctypes.c_char_p(b_read), ctypes.c_char_p(b_form))
+
+    print('Reading of bdio file started')
+    while 1 > 0:
+        bdio_seek_record(fbdio)
+        ruinfo = bdio_get_ruinfo(fbdio)
+        if ruinfo < 0:
+            # EOF reached
+            break
+        rlen = bdio_get_rlen(fbdio)
+        if ruinfo == 5:
+            d_buf = ctypes.c_double * (2 + d0)
+            pd_buf = d_buf()
+            ppd_buf = ctypes.c_void_p(ctypes.addressof(pd_buf))
+            iread = bdio_read_f64(ppd_buf, ctypes.c_size_t(rlen), ctypes.c_void_p(fbdio))
+            tmp_mean = np.mean(np.asarray(pd_buf[2:]))
+
+            data[corr_no].append(tmp_mean)
+            corr_no += 1
+        else:
+            alt_buf = ctypes.create_string_buffer(1024)
+            palt_buf = ctypes.c_char_p(ctypes.addressof(alt_buf))
+            iread = bdio_read(palt_buf, ctypes.c_size_t(rlen), ctypes.c_void_p(fbdio))
+            if rlen != iread:
+                print('Error')
+            for i, item in enumerate(alt_buf):
+                if item == b'\x00':
+                    alt_buf[i] = b' '
+            tmp_string = (alt_buf[:].decode("utf-8")).rstrip()
+            if ruinfo == 0:
+                creator = _get_kwd(tmp_string, 'CREATOR=')
+                ensemble_name = _get_kwd(tmp_string, 'ENSEMBLE=')
+                volume.append(int(_get_kwd(tmp_string, 'L0=')))
+                volume.append(int(_get_kwd(tmp_string, 'L1=')))
+                volume.append(int(_get_kwd(tmp_string, 'L2=')))
+                volume.append(int(_get_kwd(tmp_string, 'L3=')))
+                boundary_conditions.append(_get_kwd(tmp_string, 'BC0='))
+                boundary_conditions.append(_get_kwd(tmp_string, 'BC1='))
+                boundary_conditions.append(_get_kwd(tmp_string, 'BC2='))
+                boundary_conditions.append(_get_kwd(tmp_string, 'BC3='))
+
+            if ruinfo == 1:
+                corr_name.append(_get_corr_name(tmp_string, 'CORR_NAME='))
+                corr_type.append(_get_kwd(tmp_string, 'DATATYPE='))
+                corr_props.append(_get_kwd(tmp_string, 'PROP0='))
+                if d0 == 0:
+                    d0 = int(_get_kwd(tmp_string, 'D0='))
+                else:
+                    if d0 != int(_get_kwd(tmp_string, 'D0=')):
+                        print('Error: Varying number of time values')
+            if ruinfo == 2:
+                prop_kappa.append(_get_kwd(tmp_string, 'KAPPA='))
+            if ruinfo == 4:
+                if 'stop' in kwargs:
+                    if cnfg_no >= kwargs.get('stop') - 1:
+                        break
+                cnfg_no += 1
+                print('\r%s %i' % ('Reading configuration', cnfg_no + 1), end='\r')
+                if cnfg_no == 0:
+                    no_corrs = len(corr_name)
+                    data = []
+                    for c in range(no_corrs):
+                        data.append([])
+
+                corr_no = 0
+    bdio_close(fbdio)
+
+    print('\nCreator: ', creator)
+    print('Ensemble: ', ensemble_name)
+    print('Lattice volume: ', volume)
+    print('Boundary conditions: ', boundary_conditions)
+    print('Number of random sources: ', d0)
+    print('Number of corrs: ', len(corr_name))
+    print('Number of configurations: ', cnfg_no + 1)
+
+    corr_kappa = []  # Contains kappa values for both propagators of given correlation function
+    for item in corr_props:
+        corr_kappa.append(float(prop_kappa[int(item)]))
+
+    result = {}
+    for c in range(no_corrs):
+        result[(corr_name[c], str(corr_kappa[c]))] = Obs([np.asarray(data[c])], [ensemble_name])
+
+    # Check that all data entries have the same number of configurations
+    if len(set([o.N for o in list(result.values())])) != 1:
+        raise Exception('Error: Not all correlators have the same number of configurations. bdio file is possibly corrupted.')
+
+    return result
+
+ +
+ +

Extract dSdm data from a bdio file and return it as a dictionary

+ +

The dictionary can be accessed with a tuple consisting of (type, kappa)

+ +

read_dSdm requires bdio to be compiled into a shared library. This can be achieved by +adding the flag -fPIC to CC and changing the all target to

+ +

all: bdio.o $(LIBDIR) + gcc -shared -Wl,-soname,libbdio.so -o $(BUILDDIR)/libbdio.so $(BUILDDIR)/bdio.o + cp $(BUILDDIR)/libbdio.so $(LIBDIR)/

+ +
Parameters
+ +
    +
  • file_path -- path to the bdio file
  • +
  • bdio_path -- path to the shared bdio library libbdio.so (default ./libbdio.so)
  • +
  • stop -- stops reading at given configuration number (default None)
  • +
+
+ + +
+
+ + \ No newline at end of file diff --git a/docs/pyerrors/input/hadrons.html b/docs/pyerrors/input/hadrons.html new file mode 100644 index 00000000..3ed3dd36 --- /dev/null +++ b/docs/pyerrors/input/hadrons.html @@ -0,0 +1,666 @@ + + + + + + + pyerrors.input.hadrons API documentation + + + + + + + + + + + +
+
+

+pyerrors.input.hadrons

+ + +
+ View Source +
#!/usr/bin/env python
+# coding: utf-8
+
+import os
+import h5py
+import numpy as np
+from ..obs import Obs, CObs
+from ..correlators import Corr
+from ..npr import Npr_matrix
+
+
+def _get_files(path, filestem):
+    ls = []
+    for (dirpath, dirnames, filenames) in os.walk(path):
+        ls.extend(filenames)
+        break
+
+    # Clean up file list
+    files = []
+    for line in ls:
+        if line.startswith(filestem):
+            files.append(line)
+
+    if not files:
+        raise Exception('No files starting with', filestem, 'in folder', path)
+
+    def get_cnfg_number(n):
+        return int(n[len(filestem) + 1:-3])
+
+    # Sort according to configuration number
+    files.sort(key=get_cnfg_number)
+
+    # Check that configurations are evenly spaced
+    cnfg_numbers = []
+    for line in files:
+        cnfg_numbers.append(get_cnfg_number(line))
+
+    if not all(np.diff(cnfg_numbers) == np.diff(cnfg_numbers)[0]):
+        raise Exception('Configurations are not evenly spaced.')
+
+    return files
+
+
+def read_meson_hd5(path, filestem, ens_id, meson='meson_0', tree='meson'):
+    """Read hadrons meson hdf5 file and extract the meson labeled 'meson'
+
+    Parameters
+    -----------------
+    path : str
+        path to the files to read
+    filestem : str
+        namestem of the files to read
+    ens_id : str
+        name of the ensemble, required for internal bookkeeping
+    meson : str
+        label of the meson to be extracted, standard value meson_0 which
+        corresponds to the pseudoscalar pseudoscalar two-point function.
+    tree : str
+        Label of the upmost directory in the hdf5 file, default 'meson'
+        for outputs of the Meson module. Can be altered to read input
+        from other modules with similar structures.
+    """
+
+    files = _get_files(path, filestem)
+
+    corr_data = []
+    infos = []
+    for hd5_file in files:
+        file = h5py.File(path + '/' + hd5_file, "r")
+        raw_data = list(file[tree + '/' + meson + '/corr'])
+        real_data = [o[0] for o in raw_data]
+        corr_data.append(real_data)
+        if not infos:
+            for k, i in file[tree + '/' + meson].attrs.items():
+                infos.append(k + ': ' + i[0].decode())
+        file.close()
+    corr_data = np.array(corr_data)
+
+    l_obs = []
+    for c in corr_data.T:
+        l_obs.append(Obs([c], [ens_id]))
+
+    corr = Corr(l_obs)
+    corr.tag = r", ".join(infos)
+    return corr
+
+
+def read_ExternalLeg_hd5(path, filestem, ens_id, order='F'):
+    """Read hadrons ExternalLeg hdf5 file and output an array of CObs
+
+    Parameters
+    -----------------
+    path -- path to the files to read
+    filestem -- namestem of the files to read
+    ens_id -- name of the ensemble, required for internal bookkeeping
+    order -- order in which the array is to be reshaped,
+             'F' for the first index changing fastest (9 4x4 matrices) default.
+             'C' for the last index changing fastest (16 3x3 matrices),
+    """
+
+    files = _get_files(path, filestem)
+
+    mom = None
+
+    corr_data = []
+    for hd5_file in files:
+        file = h5py.File(path + '/' + hd5_file, "r")
+        raw_data = file['ExternalLeg/corr'][0][0].view('complex')
+        corr_data.append(raw_data)
+        if mom is not None:
+            assert np.allclose(mom, np.array(str(file['ExternalLeg/info'].attrs['pIn'])[3:-2].strip().split(' '), dtype=int))
+        else:
+            mom = np.array(str(file['ExternalLeg/info'].attrs['pIn'])[3:-2].strip().split(' '), dtype=int)
+        file.close()
+    corr_data = np.array(corr_data)
+
+    rolled_array = np.rollaxis(corr_data, 0, 5)
+
+    matrix = np.empty((rolled_array.shape[:-1]), dtype=object)
+    for si, sj, ci, cj in np.ndindex(rolled_array.shape[:-1]):
+        real = Obs([rolled_array[si, sj, ci, cj].real], [ens_id])
+        imag = Obs([rolled_array[si, sj, ci, cj].imag], [ens_id])
+        matrix[si, sj, ci, cj] = CObs(real, imag)
+        matrix[si, sj, ci, cj].gamma_method()
+
+    return Npr_matrix(matrix.swapaxes(1, 2).reshape((12, 12), order=order), mom_in=mom)
+
+
+def read_Bilinear_hd5(path, filestem, ens_id, order='F'):
+    """Read hadrons Bilinear hdf5 file and output an array of CObs
+
+    Parameters
+    -----------------
+    path -- path to the files to read
+    filestem -- namestem of the files to read
+    ens_id -- name of the ensemble, required for internal bookkeeping
+    order -- order in which the array is to be reshaped,
+             'F' for the first index changing fastest (9 4x4 matrices) default.
+             'C' for the last index changing fastest (16 3x3 matrices),
+    """
+
+    files = _get_files(path, filestem)
+
+    mom_in = None
+    mom_out = None
+
+    corr_data = {}
+    for hd5_file in files:
+        file = h5py.File(path + '/' + hd5_file, "r")
+        for i in range(16):
+            name = file['Bilinear/Bilinear_' + str(i) + '/info'].attrs['gamma'][0].decode('UTF-8')
+            if name not in corr_data:
+                corr_data[name] = []
+            raw_data = file['Bilinear/Bilinear_' + str(i) + '/corr'][0][0].view('complex')
+            corr_data[name].append(raw_data)
+            if mom_in is not None:
+                assert np.allclose(mom_in, np.array(str(file['Bilinear/Bilinear_' + str(i) + '/info'].attrs['pIn'])[3:-2].strip().split(' '), dtype=int))
+            else:
+                mom_in = np.array(str(file['Bilinear/Bilinear_' + str(i) + '/info'].attrs['pIn'])[3:-2].strip().split(' '), dtype=int)
+            if mom_out is not None:
+                assert np.allclose(mom_out, np.array(str(file['Bilinear/Bilinear_' + str(i) + '/info'].attrs['pOut'])[3:-2].strip().split(' '), dtype=int))
+            else:
+                mom_out = np.array(str(file['Bilinear/Bilinear_' + str(i) + '/info'].attrs['pOut'])[3:-2].strip().split(' '), dtype=int)
+
+        file.close()
+
+    result_dict = {}
+
+    for key, data in corr_data.items():
+        local_data = np.array(data)
+
+        rolled_array = np.rollaxis(local_data, 0, 5)
+
+        matrix = np.empty((rolled_array.shape[:-1]), dtype=object)
+        for si, sj, ci, cj in np.ndindex(rolled_array.shape[:-1]):
+            real = Obs([rolled_array[si, sj, ci, cj].real], [ens_id])
+            imag = Obs([rolled_array[si, sj, ci, cj].imag], [ens_id])
+            matrix[si, sj, ci, cj] = CObs(real, imag)
+            matrix[si, sj, ci, cj].gamma_method()
+
+        result_dict[key] = Npr_matrix(matrix.swapaxes(1, 2).reshape((12, 12), order=order), mom_in=mom_in, mom_out=mom_out)
+
+    return result_dict
+
+ +
+ +
+
+
#   + + + def + read_meson_hd5(path, filestem, ens_id, meson='meson_0', tree='meson'): +
+ +
+ View Source +
def read_meson_hd5(path, filestem, ens_id, meson='meson_0', tree='meson'):
+    """Read hadrons meson hdf5 file and extract the meson labeled 'meson'
+
+    Parameters
+    -----------------
+    path : str
+        path to the files to read
+    filestem : str
+        namestem of the files to read
+    ens_id : str
+        name of the ensemble, required for internal bookkeeping
+    meson : str
+        label of the meson to be extracted, standard value meson_0 which
+        corresponds to the pseudoscalar pseudoscalar two-point function.
+    tree : str
+        Label of the upmost directory in the hdf5 file, default 'meson'
+        for outputs of the Meson module. Can be altered to read input
+        from other modules with similar structures.
+    """
+
+    files = _get_files(path, filestem)
+
+    corr_data = []
+    infos = []
+    for hd5_file in files:
+        file = h5py.File(path + '/' + hd5_file, "r")
+        raw_data = list(file[tree + '/' + meson + '/corr'])
+        real_data = [o[0] for o in raw_data]
+        corr_data.append(real_data)
+        if not infos:
+            for k, i in file[tree + '/' + meson].attrs.items():
+                infos.append(k + ': ' + i[0].decode())
+        file.close()
+    corr_data = np.array(corr_data)
+
+    l_obs = []
+    for c in corr_data.T:
+        l_obs.append(Obs([c], [ens_id]))
+
+    corr = Corr(l_obs)
+    corr.tag = r", ".join(infos)
+    return corr
+
+ +
+ +

Read hadrons meson hdf5 file and extract the meson labeled 'meson'

+ +
Parameters
+ +
    +
  • path (str): +path to the files to read
  • +
  • filestem (str): +namestem of the files to read
  • +
  • ens_id (str): +name of the ensemble, required for internal bookkeeping
  • +
  • meson (str): +label of the meson to be extracted, standard value meson_0 which +corresponds to the pseudoscalar pseudoscalar two-point function.
  • +
  • tree (str): +Label of the upmost directory in the hdf5 file, default 'meson' +for outputs of the Meson module. Can be altered to read input +from other modules with similar structures.
  • +
+
+ + +
+
+
#   + + + def + read_ExternalLeg_hd5(path, filestem, ens_id, order='F'): +
+ +
+ View Source +
def read_ExternalLeg_hd5(path, filestem, ens_id, order='F'):
+    """Read hadrons ExternalLeg hdf5 file and output an array of CObs
+
+    Parameters
+    -----------------
+    path -- path to the files to read
+    filestem -- namestem of the files to read
+    ens_id -- name of the ensemble, required for internal bookkeeping
+    order -- order in which the array is to be reshaped,
+             'F' for the first index changing fastest (9 4x4 matrices) default.
+             'C' for the last index changing fastest (16 3x3 matrices),
+    """
+
+    files = _get_files(path, filestem)
+
+    mom = None
+
+    corr_data = []
+    for hd5_file in files:
+        file = h5py.File(path + '/' + hd5_file, "r")
+        raw_data = file['ExternalLeg/corr'][0][0].view('complex')
+        corr_data.append(raw_data)
+        if mom is not None:
+            assert np.allclose(mom, np.array(str(file['ExternalLeg/info'].attrs['pIn'])[3:-2].strip().split(' '), dtype=int))
+        else:
+            mom = np.array(str(file['ExternalLeg/info'].attrs['pIn'])[3:-2].strip().split(' '), dtype=int)
+        file.close()
+    corr_data = np.array(corr_data)
+
+    rolled_array = np.rollaxis(corr_data, 0, 5)
+
+    matrix = np.empty((rolled_array.shape[:-1]), dtype=object)
+    for si, sj, ci, cj in np.ndindex(rolled_array.shape[:-1]):
+        real = Obs([rolled_array[si, sj, ci, cj].real], [ens_id])
+        imag = Obs([rolled_array[si, sj, ci, cj].imag], [ens_id])
+        matrix[si, sj, ci, cj] = CObs(real, imag)
+        matrix[si, sj, ci, cj].gamma_method()
+
+    return Npr_matrix(matrix.swapaxes(1, 2).reshape((12, 12), order=order), mom_in=mom)
+
+ +
+ +

Read hadrons ExternalLeg hdf5 file and output an array of CObs

+ +
Parameters
+ +
    +
  • path -- path to the files to read
  • +
  • filestem -- namestem of the files to read
  • +
  • ens_id -- name of the ensemble, required for internal bookkeeping
  • +
  • order -- order in which the array is to be reshaped,: 'F' for the first index changing fastest (9 4x4 matrices) default. +'C' for the last index changing fastest (16 3x3 matrices),
  • +
+
+ + +
+
+
#   + + + def + read_Bilinear_hd5(path, filestem, ens_id, order='F'): +
+ +
+ View Source +
def read_Bilinear_hd5(path, filestem, ens_id, order='F'):
+    """Read hadrons Bilinear hdf5 file and output an array of CObs
+
+    Parameters
+    -----------------
+    path -- path to the files to read
+    filestem -- namestem of the files to read
+    ens_id -- name of the ensemble, required for internal bookkeeping
+    order -- order in which the array is to be reshaped,
+             'F' for the first index changing fastest (9 4x4 matrices) default.
+             'C' for the last index changing fastest (16 3x3 matrices),
+    """
+
+    files = _get_files(path, filestem)
+
+    mom_in = None
+    mom_out = None
+
+    corr_data = {}
+    for hd5_file in files:
+        file = h5py.File(path + '/' + hd5_file, "r")
+        for i in range(16):
+            name = file['Bilinear/Bilinear_' + str(i) + '/info'].attrs['gamma'][0].decode('UTF-8')
+            if name not in corr_data:
+                corr_data[name] = []
+            raw_data = file['Bilinear/Bilinear_' + str(i) + '/corr'][0][0].view('complex')
+            corr_data[name].append(raw_data)
+            if mom_in is not None:
+                assert np.allclose(mom_in, np.array(str(file['Bilinear/Bilinear_' + str(i) + '/info'].attrs['pIn'])[3:-2].strip().split(' '), dtype=int))
+            else:
+                mom_in = np.array(str(file['Bilinear/Bilinear_' + str(i) + '/info'].attrs['pIn'])[3:-2].strip().split(' '), dtype=int)
+            if mom_out is not None:
+                assert np.allclose(mom_out, np.array(str(file['Bilinear/Bilinear_' + str(i) + '/info'].attrs['pOut'])[3:-2].strip().split(' '), dtype=int))
+            else:
+                mom_out = np.array(str(file['Bilinear/Bilinear_' + str(i) + '/info'].attrs['pOut'])[3:-2].strip().split(' '), dtype=int)
+
+        file.close()
+
+    result_dict = {}
+
+    for key, data in corr_data.items():
+        local_data = np.array(data)
+
+        rolled_array = np.rollaxis(local_data, 0, 5)
+
+        matrix = np.empty((rolled_array.shape[:-1]), dtype=object)
+        for si, sj, ci, cj in np.ndindex(rolled_array.shape[:-1]):
+            real = Obs([rolled_array[si, sj, ci, cj].real], [ens_id])
+            imag = Obs([rolled_array[si, sj, ci, cj].imag], [ens_id])
+            matrix[si, sj, ci, cj] = CObs(real, imag)
+            matrix[si, sj, ci, cj].gamma_method()
+
+        result_dict[key] = Npr_matrix(matrix.swapaxes(1, 2).reshape((12, 12), order=order), mom_in=mom_in, mom_out=mom_out)
+
+    return result_dict
+
+ +
+ +

Read hadrons Bilinear hdf5 file and output an array of CObs

+ +
Parameters
+ +
    +
  • path -- path to the files to read
  • +
  • filestem -- namestem of the files to read
  • +
  • ens_id -- name of the ensemble, required for internal bookkeeping
  • +
  • order -- order in which the array is to be reshaped,: 'F' for the first index changing fastest (9 4x4 matrices) default. +'C' for the last index changing fastest (16 3x3 matrices),
  • +
+
+ + +
+
+ + \ No newline at end of file diff --git a/docs/pyerrors/input/misc.html b/docs/pyerrors/input/misc.html new file mode 100644 index 00000000..d07702a3 --- /dev/null +++ b/docs/pyerrors/input/misc.html @@ -0,0 +1,521 @@ + + + + + + + pyerrors.input.misc API documentation + + + + + + + + + + + +
+
+

+pyerrors.input.misc

+ + +
+ View Source +
#!/usr/bin/env python
+# coding: utf-8
+
+import os
+import fnmatch
+import re
+import struct
+import numpy as np  # Thinly-wrapped numpy
+from ..obs import Obs
+
+
+def read_pbp(path, prefix, **kwargs):
+    """Read pbp format from given folder structure. Returns a list of length nrw
+
+    Keyword arguments
+    -----------------
+    r_start -- list which contains the first config to be read for each replicum
+    r_stop -- list which contains the last config to be read for each replicum
+
+    """
+
+    extract_nfct = 1
+
+    ls = []
+    for (dirpath, dirnames, filenames) in os.walk(path):
+        ls.extend(filenames)
+        break
+
+    if not ls:
+        raise Exception('Error, directory not found')
+
+    # Exclude files with different names
+    for exc in ls:
+        if not fnmatch.fnmatch(exc, prefix + '*.dat'):
+            ls = list(set(ls) - set([exc]))
+    if len(ls) > 1:
+        ls.sort(key=lambda x: int(re.findall(r'\d+', x[len(prefix):])[0]))
+    replica = len(ls)
+
+    if 'r_start' in kwargs:
+        r_start = kwargs.get('r_start')
+        if len(r_start) != replica:
+            raise Exception('r_start does not match number of replicas')
+        # Adjust Configuration numbering to python index
+        r_start = [o - 1 if o else None for o in r_start]
+    else:
+        r_start = [None] * replica
+
+    if 'r_stop' in kwargs:
+        r_stop = kwargs.get('r_stop')
+        if len(r_stop) != replica:
+            raise Exception('r_stop does not match number of replicas')
+    else:
+        r_stop = [None] * replica
+
+    print(r'Read <bar{psi}\psi> from', prefix[:-1], ',', replica, 'replica', end='')
+
+    print_err = 0
+    if 'print_err' in kwargs:
+        print_err = 1
+        print()
+
+    deltas = []
+
+    for rep in range(replica):
+        tmp_array = []
+        with open(path + '/' + ls[rep], 'rb') as fp:
+
+            # header
+            t = fp.read(4)  # number of reweighting factors
+            if rep == 0:
+                nrw = struct.unpack('i', t)[0]
+                for k in range(nrw):
+                    deltas.append([])
+            else:
+                if nrw != struct.unpack('i', t)[0]:
+                    raise Exception('Error: different number of reweighting factors for replicum', rep)
+
+            for k in range(nrw):
+                tmp_array.append([])
+
+            # This block is necessary for openQCD1.6 ms1 files
+            nfct = []
+            if extract_nfct == 1:
+                for i in range(nrw):
+                    t = fp.read(4)
+                    nfct.append(struct.unpack('i', t)[0])
+                print('nfct: ', nfct)  # Hasenbusch factor, 1 for rat reweighting
+            else:
+                for i in range(nrw):
+                    nfct.append(1)
+
+            nsrc = []
+            for i in range(nrw):
+                t = fp.read(4)
+                nsrc.append(struct.unpack('i', t)[0])
+
+            # body
+            while 0 < 1:
+                t = fp.read(4)
+                if len(t) < 4:
+                    break
+                if print_err:
+                    config_no = struct.unpack('i', t)
+                for i in range(nrw):
+                    tmp_nfct = 1.0
+                    for j in range(nfct[i]):
+                        t = fp.read(8 * nsrc[i])
+                        t = fp.read(8 * nsrc[i])
+                        tmp_rw = struct.unpack('d' * nsrc[i], t)
+                        tmp_nfct *= np.mean(np.asarray(tmp_rw))
+                        if print_err:
+                            print(config_no, i, j, np.mean(np.asarray(tmp_rw)), np.std(np.asarray(tmp_rw)))
+                            print('Sources:', np.asarray(tmp_rw))
+                            print('Partial factor:', tmp_nfct)
+                    tmp_array[i].append(tmp_nfct)
+
+            for k in range(nrw):
+                deltas[k].append(tmp_array[k][r_start[rep]:r_stop[rep]])
+
+    rep_names = []
+    for entry in ls:
+        truncated_entry = entry.split('.')[0]
+        idx = truncated_entry.index('r')
+        rep_names.append(truncated_entry[:idx] + '|' + truncated_entry[idx:])
+    print(',', nrw, r'<bar{psi}\psi> with', nsrc, 'sources')
+    result = []
+    for t in range(nrw):
+        result.append(Obs(deltas[t], rep_names))
+
+    return result
+
+ +
+ +
+
+
#   + + + def + read_pbp(path, prefix, **kwargs): +
+ +
+ View Source +
def read_pbp(path, prefix, **kwargs):
+    """Read pbp format from given folder structure. Returns a list of length nrw
+
+    Keyword arguments
+    -----------------
+    r_start -- list which contains the first config to be read for each replicum
+    r_stop -- list which contains the last config to be read for each replicum
+
+    """
+
+    extract_nfct = 1
+
+    ls = []
+    for (dirpath, dirnames, filenames) in os.walk(path):
+        ls.extend(filenames)
+        break
+
+    if not ls:
+        raise Exception('Error, directory not found')
+
+    # Exclude files with different names
+    for exc in ls:
+        if not fnmatch.fnmatch(exc, prefix + '*.dat'):
+            ls = list(set(ls) - set([exc]))
+    if len(ls) > 1:
+        ls.sort(key=lambda x: int(re.findall(r'\d+', x[len(prefix):])[0]))
+    replica = len(ls)
+
+    if 'r_start' in kwargs:
+        r_start = kwargs.get('r_start')
+        if len(r_start) != replica:
+            raise Exception('r_start does not match number of replicas')
+        # Adjust Configuration numbering to python index
+        r_start = [o - 1 if o else None for o in r_start]
+    else:
+        r_start = [None] * replica
+
+    if 'r_stop' in kwargs:
+        r_stop = kwargs.get('r_stop')
+        if len(r_stop) != replica:
+            raise Exception('r_stop does not match number of replicas')
+    else:
+        r_stop = [None] * replica
+
+    print(r'Read <bar{psi}\psi> from', prefix[:-1], ',', replica, 'replica', end='')
+
+    print_err = 0
+    if 'print_err' in kwargs:
+        print_err = 1
+        print()
+
+    deltas = []
+
+    for rep in range(replica):
+        tmp_array = []
+        with open(path + '/' + ls[rep], 'rb') as fp:
+
+            # header
+            t = fp.read(4)  # number of reweighting factors
+            if rep == 0:
+                nrw = struct.unpack('i', t)[0]
+                for k in range(nrw):
+                    deltas.append([])
+            else:
+                if nrw != struct.unpack('i', t)[0]:
+                    raise Exception('Error: different number of reweighting factors for replicum', rep)
+
+            for k in range(nrw):
+                tmp_array.append([])
+
+            # This block is necessary for openQCD1.6 ms1 files
+            nfct = []
+            if extract_nfct == 1:
+                for i in range(nrw):
+                    t = fp.read(4)
+                    nfct.append(struct.unpack('i', t)[0])
+                print('nfct: ', nfct)  # Hasenbusch factor, 1 for rat reweighting
+            else:
+                for i in range(nrw):
+                    nfct.append(1)
+
+            nsrc = []
+            for i in range(nrw):
+                t = fp.read(4)
+                nsrc.append(struct.unpack('i', t)[0])
+
+            # body
+            while 0 < 1:
+                t = fp.read(4)
+                if len(t) < 4:
+                    break
+                if print_err:
+                    config_no = struct.unpack('i', t)
+                for i in range(nrw):
+                    tmp_nfct = 1.0
+                    for j in range(nfct[i]):
+                        t = fp.read(8 * nsrc[i])
+                        t = fp.read(8 * nsrc[i])
+                        tmp_rw = struct.unpack('d' * nsrc[i], t)
+                        tmp_nfct *= np.mean(np.asarray(tmp_rw))
+                        if print_err:
+                            print(config_no, i, j, np.mean(np.asarray(tmp_rw)), np.std(np.asarray(tmp_rw)))
+                            print('Sources:', np.asarray(tmp_rw))
+                            print('Partial factor:', tmp_nfct)
+                    tmp_array[i].append(tmp_nfct)
+
+            for k in range(nrw):
+                deltas[k].append(tmp_array[k][r_start[rep]:r_stop[rep]])
+
+    rep_names = []
+    for entry in ls:
+        truncated_entry = entry.split('.')[0]
+        idx = truncated_entry.index('r')
+        rep_names.append(truncated_entry[:idx] + '|' + truncated_entry[idx:])
+    print(',', nrw, r'<bar{psi}\psi> with', nsrc, 'sources')
+    result = []
+    for t in range(nrw):
+        result.append(Obs(deltas[t], rep_names))
+
+    return result
+
+ +
+ +

Read pbp format from given folder structure. Returns a list of length nrw

+ +
Keyword arguments
+ +

r_start -- list which contains the first config to be read for each replicum +r_stop -- list which contains the last config to be read for each replicum

+
+ + +
+
+ + \ No newline at end of file diff --git a/docs/pyerrors/input/openQCD.html b/docs/pyerrors/input/openQCD.html new file mode 100644 index 00000000..e3ceb04c --- /dev/null +++ b/docs/pyerrors/input/openQCD.html @@ -0,0 +1,934 @@ + + + + + + + pyerrors.input.openQCD API documentation + + + + + + + + + + + +
+
+

+pyerrors.input.openQCD

+ + +
+ View Source +
#!/usr/bin/env python
+# coding: utf-8
+
+import os
+import fnmatch
+import re
+import struct
+import numpy as np  # Thinly-wrapped numpy
+from ..obs import Obs
+from ..fits import fit_lin
+
+
+def read_rwms(path, prefix, version='2.0', names=None, **kwargs):
+    """Read rwms format from given folder structure. Returns a list of length nrw
+
+    Attributes
+    -----------------
+    version -- version of openQCD, default 2.0
+
+    Keyword arguments
+    -----------------
+    r_start -- list which contains the first config to be read for each replicum
+    r_stop -- list which contains the last config to be read for each replicum
+    postfix -- postfix of the file to read, e.g. '.ms1' for openQCD-files
+    """
+    known_oqcd_versions = ['1.4', '1.6', '2.0']
+    if not (version in known_oqcd_versions):
+        raise Exception('Unknown openQCD version defined!')
+    print("Working with openQCD version " + version)
+    if 'postfix' in kwargs:
+        postfix = kwargs.get('postfix')
+    else:
+        postfix = ''
+    ls = []
+    for (dirpath, dirnames, filenames) in os.walk(path):
+        ls.extend(filenames)
+        break
+
+    if not ls:
+        raise Exception('Error, directory not found')
+
+    # Exclude files with different names
+    for exc in ls:
+        if not fnmatch.fnmatch(exc, prefix + '*' + postfix + '.dat'):
+            ls = list(set(ls) - set([exc]))
+    if len(ls) > 1:
+        ls.sort(key=lambda x: int(re.findall(r'\d+', x[len(prefix):])[0]))
+    replica = len(ls)
+
+    if 'r_start' in kwargs:
+        r_start = kwargs.get('r_start')
+        if len(r_start) != replica:
+            raise Exception('r_start does not match number of replicas')
+        # Adjust Configuration numbering to python index
+        r_start = [o - 1 if o else None for o in r_start]
+    else:
+        r_start = [None] * replica
+
+    if 'r_stop' in kwargs:
+        r_stop = kwargs.get('r_stop')
+        if len(r_stop) != replica:
+            raise Exception('r_stop does not match number of replicas')
+    else:
+        r_stop = [None] * replica
+
+    print('Read reweighting factors from', prefix[:-1], ',', replica, 'replica', end='')
+
+    # Adjust replica names to new bookmarking system
+    if names is None:
+        rep_names = []
+        for entry in ls:
+            truncated_entry = entry.split('.')[0]
+            idx = truncated_entry.index('r')
+            rep_names.append(truncated_entry[:idx] + '|' + truncated_entry[idx:])
+
+    print_err = 0
+    if 'print_err' in kwargs:
+        print_err = 1
+        print()
+
+    deltas = []
+
+    for rep in range(replica):
+        tmp_array = []
+        with open(path + '/' + ls[rep], 'rb') as fp:
+
+            # header
+            t = fp.read(4)  # number of reweighting factors
+            if rep == 0:
+                nrw = struct.unpack('i', t)[0]
+                if version == '2.0':
+                    nrw = int(nrw / 2)
+                for k in range(nrw):
+                    deltas.append([])
+            else:
+                if ((nrw != struct.unpack('i', t)[0] and (not version == '2.0')) or (nrw != struct.unpack('i', t)[0] / 2 and version == '2.0')):  # little weird if-clause due to the /2 operation needed.
+                    raise Exception('Error: different number of reweighting factors for replicum', rep)
+
+            for k in range(nrw):
+                tmp_array.append([])
+
+            # This block is necessary for openQCD1.6 and openQCD2.0 ms1 files
+            nfct = []
+            if version in ['1.6', '2.0']:
+                for i in range(nrw):
+                    t = fp.read(4)
+                    nfct.append(struct.unpack('i', t)[0])
+                # print('nfct: ', nfct) # Hasenbusch factor, 1 for rat reweighting
+            else:
+                for i in range(nrw):
+                    nfct.append(1)
+
+            nsrc = []
+            for i in range(nrw):
+                t = fp.read(4)
+                nsrc.append(struct.unpack('i', t)[0])
+            if version == '2.0':
+                if not struct.unpack('i', fp.read(4))[0] == 0:
+                    print('something is wrong!')
+
+            # body
+            while 0 < 1:
+                t = fp.read(4)
+                if len(t) < 4:
+                    break
+                if print_err:
+                    config_no = struct.unpack('i', t)
+                for i in range(nrw):
+                    if(version == '2.0'):
+                        tmpd = _read_array_openQCD2(fp)
+                        tmpd = _read_array_openQCD2(fp)
+                        tmp_rw = tmpd['arr']
+                        tmp_nfct = 1.0
+                        for j in range(tmpd['n'][0]):
+                            tmp_nfct *= np.mean(np.exp(-np.asarray(tmp_rw[j])))
+                            if print_err:
+                                print(config_no, i, j, np.mean(np.exp(-np.asarray(tmp_rw[j]))), np.std(np.exp(-np.asarray(tmp_rw[j]))))
+                                print('Sources:', np.exp(-np.asarray(tmp_rw[j])))
+                                print('Partial factor:', tmp_nfct)
+                    elif version == '1.6' or version == '1.4':
+                        tmp_nfct = 1.0
+                        for j in range(nfct[i]):
+                            t = fp.read(8 * nsrc[i])
+                            t = fp.read(8 * nsrc[i])
+                            tmp_rw = struct.unpack('d' * nsrc[i], t)
+                            tmp_nfct *= np.mean(np.exp(-np.asarray(tmp_rw)))
+                            if print_err:
+                                print(config_no, i, j, np.mean(np.exp(-np.asarray(tmp_rw))), np.std(np.exp(-np.asarray(tmp_rw))))
+                                print('Sources:', np.exp(-np.asarray(tmp_rw)))
+                                print('Partial factor:', tmp_nfct)
+                    tmp_array[i].append(tmp_nfct)
+
+            for k in range(nrw):
+                deltas[k].append(tmp_array[k][r_start[rep]:r_stop[rep]])
+
+    print(',', nrw, 'reweighting factors with', nsrc, 'sources')
+    result = []
+    for t in range(nrw):
+        if names is None:
+            result.append(Obs(deltas[t], rep_names))
+        else:
+            print(names)
+            result.append(Obs(deltas[t], names))
+    return result
+
+
+def extract_t0(path, prefix, dtr_read, xmin, spatial_extent, fit_range=5, **kwargs):
+    """Extract t0 from given .ms.dat files. Returns t0 as Obs.
+
+    It is assumed that all boundary effects have sufficiently decayed at x0=xmin.
+    The data around the zero crossing of t^2<E> - 0.3 is fitted with a linear function
+    from which the exact root is extracted.
+    Only works with openQCD v 1.2.
+
+    Parameters
+    ----------
+    path -- Path to .ms.dat files
+    prefix -- Ensemble prefix
+    dtr_read -- Determines how many trajectories should be skipped when reading the ms.dat files.
+                Corresponds to dtr_cnfg / dtr_ms in the openQCD input file.
+    xmin -- First timeslice where the boundary effects have sufficiently decayed.
+    spatial_extent -- spatial extent of the lattice, required for normalization.
+    fit_range -- Number of data points left and right of the zero crossing to be included in the linear fit. (Default: 5)
+
+    Keyword arguments
+    -----------------
+    r_start -- list which contains the first config to be read for each replicum.
+    r_stop -- list which contains the last config to be read for each replicum.
+    plaquette -- If true extract the plaquette estimate of t0 instead.
+    """
+
+    ls = []
+    for (dirpath, dirnames, filenames) in os.walk(path):
+        ls.extend(filenames)
+        break
+
+    if not ls:
+        raise Exception('Error, directory not found')
+
+    # Exclude files with different names
+    for exc in ls:
+        if not fnmatch.fnmatch(exc, prefix + '*.ms.dat'):
+            ls = list(set(ls) - set([exc]))
+    if len(ls) > 1:
+        ls.sort(key=lambda x: int(re.findall(r'\d+', x[len(prefix):])[0]))
+    replica = len(ls)
+
+    if 'r_start' in kwargs:
+        r_start = kwargs.get('r_start')
+        if len(r_start) != replica:
+            raise Exception('r_start does not match number of replicas')
+        # Adjust Configuration numbering to python index
+        r_start = [o - 1 if o else None for o in r_start]
+    else:
+        r_start = [None] * replica
+
+    if 'r_stop' in kwargs:
+        r_stop = kwargs.get('r_stop')
+        if len(r_stop) != replica:
+            raise Exception('r_stop does not match number of replicas')
+    else:
+        r_stop = [None] * replica
+
+    print('Extract t0 from', prefix, ',', replica, 'replica')
+
+    Ysum = []
+
+    for rep in range(replica):
+
+        with open(path + '/' + ls[rep], 'rb') as fp:
+            # Read header
+            t = fp.read(12)
+            header = struct.unpack('iii', t)
+            if rep == 0:
+                dn = header[0]
+                nn = header[1]
+                tmax = header[2]
+            elif dn != header[0] or nn != header[1] or tmax != header[2]:
+                raise Exception('Replica parameters do not match.')
+
+            t = fp.read(8)
+            if rep == 0:
+                eps = struct.unpack('d', t)[0]
+                print('Step size:', eps, ', Maximal t value:', dn * (nn) * eps)
+            elif eps != struct.unpack('d', t)[0]:
+                raise Exception('Values for eps do not match among replica.')
+
+            Ysl = []
+
+            # Read body
+            while 0 < 1:
+                t = fp.read(4)
+                if(len(t) < 4):
+                    break
+                nc = struct.unpack('i', t)[0]
+
+                t = fp.read(8 * tmax * (nn + 1))
+                if kwargs.get('plaquette'):
+                    if nc % dtr_read == 0:
+                        Ysl.append(struct.unpack('d' * tmax * (nn + 1), t))
+                t = fp.read(8 * tmax * (nn + 1))
+                if not kwargs.get('plaquette'):
+                    if nc % dtr_read == 0:
+                        Ysl.append(struct.unpack('d' * tmax * (nn + 1), t))
+                t = fp.read(8 * tmax * (nn + 1))
+
+        Ysum.append([])
+        for i, item in enumerate(Ysl):
+            Ysum[-1].append([np.mean(item[current + xmin:current + tmax - xmin]) for current in range(0, len(item), tmax)])
+
+    t2E_dict = {}
+    for n in range(nn + 1):
+        samples = []
+        for nrep, rep in enumerate(Ysum):
+            samples.append([])
+            for cnfg in rep:
+                samples[-1].append(cnfg[n])
+            samples[-1] = samples[-1][r_start[nrep]:r_stop[nrep]]
+        new_obs = Obs(samples, [(w.split('.'))[0] for w in ls])
+        t2E_dict[n * dn * eps] = (n * dn * eps) ** 2 * new_obs / (spatial_extent ** 3) - 0.3
+
+    zero_crossing = np.argmax(np.array([o.value for o in t2E_dict.values()]) > 0.0)
+
+    x = list(t2E_dict.keys())[zero_crossing - fit_range: zero_crossing + fit_range]
+    y = list(t2E_dict.values())[zero_crossing - fit_range: zero_crossing + fit_range]
+    [o.gamma_method() for o in y]
+
+    fit_result = fit_lin(x, y)
+    return -fit_result[0] / fit_result[1]
+
+
+def _parse_array_openQCD2(d, n, size, wa, quadrupel=False):
+    arr = []
+    if d == 2:
+        tot = 0
+        for i in range(n[d - 1] - 1):
+            if quadrupel:
+                tmp = wa[tot:n[d - 1]]
+                tmp2 = []
+                for i in range(len(tmp)):
+                    if i % 2 == 0:
+                        tmp2.append(tmp[i])
+                arr.append(tmp2)
+            else:
+                arr.append(np.asarray(wa[tot:n[d - 1]]))
+    return arr
+
+
+# mimic the read_array routine of openQCD-2.0.
+# fp is the opened file handle
+# returns the dict array
+# at this point we only parse a 2d array
+# d = 2
+# n = [nfct[irw], 2*nsrc[irw]]
+def _read_array_openQCD2(fp):
+    t = fp.read(4)
+    d = struct.unpack('i', t)[0]
+    t = fp.read(4 * d)
+    n = struct.unpack('%di' % (d), t)
+    t = fp.read(4)
+    size = struct.unpack('i', t)[0]
+    if size == 4:
+        types = 'i'
+    elif size == 8:
+        types = 'd'
+    elif size == 16:
+        types = 'dd'
+    else:
+        print('Type not known!')
+    m = n[0]
+    for i in range(1, d):
+        m *= n[i]
+
+    t = fp.read(m * size)
+    tmp = struct.unpack('%d%s' % (m, types), t)
+
+    arr = _parse_array_openQCD2(d, n, size, tmp, quadrupel=True)
+    return {'d': d, 'n': n, 'size': size, 'arr': arr}
+
+ +
+ +
+
+
#   + + + def + read_rwms(path, prefix, version='2.0', names=None, **kwargs): +
+ +
+ View Source +
def read_rwms(path, prefix, version='2.0', names=None, **kwargs):
+    """Read rwms format from given folder structure. Returns a list of length nrw
+
+    Attributes
+    -----------------
+    version -- version of openQCD, default 2.0
+
+    Keyword arguments
+    -----------------
+    r_start -- list which contains the first config to be read for each replicum
+    r_stop -- list which contains the last config to be read for each replicum
+    postfix -- postfix of the file to read, e.g. '.ms1' for openQCD-files
+    """
+    known_oqcd_versions = ['1.4', '1.6', '2.0']
+    if not (version in known_oqcd_versions):
+        raise Exception('Unknown openQCD version defined!')
+    print("Working with openQCD version " + version)
+    if 'postfix' in kwargs:
+        postfix = kwargs.get('postfix')
+    else:
+        postfix = ''
+    ls = []
+    for (dirpath, dirnames, filenames) in os.walk(path):
+        ls.extend(filenames)
+        break
+
+    if not ls:
+        raise Exception('Error, directory not found')
+
+    # Exclude files with different names
+    for exc in ls:
+        if not fnmatch.fnmatch(exc, prefix + '*' + postfix + '.dat'):
+            ls = list(set(ls) - set([exc]))
+    if len(ls) > 1:
+        ls.sort(key=lambda x: int(re.findall(r'\d+', x[len(prefix):])[0]))
+    replica = len(ls)
+
+    if 'r_start' in kwargs:
+        r_start = kwargs.get('r_start')
+        if len(r_start) != replica:
+            raise Exception('r_start does not match number of replicas')
+        # Adjust Configuration numbering to python index
+        r_start = [o - 1 if o else None for o in r_start]
+    else:
+        r_start = [None] * replica
+
+    if 'r_stop' in kwargs:
+        r_stop = kwargs.get('r_stop')
+        if len(r_stop) != replica:
+            raise Exception('r_stop does not match number of replicas')
+    else:
+        r_stop = [None] * replica
+
+    print('Read reweighting factors from', prefix[:-1], ',', replica, 'replica', end='')
+
+    # Adjust replica names to new bookmarking system
+    if names is None:
+        rep_names = []
+        for entry in ls:
+            truncated_entry = entry.split('.')[0]
+            idx = truncated_entry.index('r')
+            rep_names.append(truncated_entry[:idx] + '|' + truncated_entry[idx:])
+
+    print_err = 0
+    if 'print_err' in kwargs:
+        print_err = 1
+        print()
+
+    deltas = []
+
+    for rep in range(replica):
+        tmp_array = []
+        with open(path + '/' + ls[rep], 'rb') as fp:
+
+            # header
+            t = fp.read(4)  # number of reweighting factors
+            if rep == 0:
+                nrw = struct.unpack('i', t)[0]
+                if version == '2.0':
+                    nrw = int(nrw / 2)
+                for k in range(nrw):
+                    deltas.append([])
+            else:
+                if ((nrw != struct.unpack('i', t)[0] and (not version == '2.0')) or (nrw != struct.unpack('i', t)[0] / 2 and version == '2.0')):  # little weird if-clause due to the /2 operation needed.
+                    raise Exception('Error: different number of reweighting factors for replicum', rep)
+
+            for k in range(nrw):
+                tmp_array.append([])
+
+            # This block is necessary for openQCD1.6 and openQCD2.0 ms1 files
+            nfct = []
+            if version in ['1.6', '2.0']:
+                for i in range(nrw):
+                    t = fp.read(4)
+                    nfct.append(struct.unpack('i', t)[0])
+                # print('nfct: ', nfct) # Hasenbusch factor, 1 for rat reweighting
+            else:
+                for i in range(nrw):
+                    nfct.append(1)
+
+            nsrc = []
+            for i in range(nrw):
+                t = fp.read(4)
+                nsrc.append(struct.unpack('i', t)[0])
+            if version == '2.0':
+                if not struct.unpack('i', fp.read(4))[0] == 0:
+                    print('something is wrong!')
+
+            # body
+            while 0 < 1:
+                t = fp.read(4)
+                if len(t) < 4:
+                    break
+                if print_err:
+                    config_no = struct.unpack('i', t)
+                for i in range(nrw):
+                    if(version == '2.0'):
+                        tmpd = _read_array_openQCD2(fp)
+                        tmpd = _read_array_openQCD2(fp)
+                        tmp_rw = tmpd['arr']
+                        tmp_nfct = 1.0
+                        for j in range(tmpd['n'][0]):
+                            tmp_nfct *= np.mean(np.exp(-np.asarray(tmp_rw[j])))
+                            if print_err:
+                                print(config_no, i, j, np.mean(np.exp(-np.asarray(tmp_rw[j]))), np.std(np.exp(-np.asarray(tmp_rw[j]))))
+                                print('Sources:', np.exp(-np.asarray(tmp_rw[j])))
+                                print('Partial factor:', tmp_nfct)
+                    elif version == '1.6' or version == '1.4':
+                        tmp_nfct = 1.0
+                        for j in range(nfct[i]):
+                            t = fp.read(8 * nsrc[i])
+                            t = fp.read(8 * nsrc[i])
+                            tmp_rw = struct.unpack('d' * nsrc[i], t)
+                            tmp_nfct *= np.mean(np.exp(-np.asarray(tmp_rw)))
+                            if print_err:
+                                print(config_no, i, j, np.mean(np.exp(-np.asarray(tmp_rw))), np.std(np.exp(-np.asarray(tmp_rw))))
+                                print('Sources:', np.exp(-np.asarray(tmp_rw)))
+                                print('Partial factor:', tmp_nfct)
+                    tmp_array[i].append(tmp_nfct)
+
+            for k in range(nrw):
+                deltas[k].append(tmp_array[k][r_start[rep]:r_stop[rep]])
+
+    print(',', nrw, 'reweighting factors with', nsrc, 'sources')
+    result = []
+    for t in range(nrw):
+        if names is None:
+            result.append(Obs(deltas[t], rep_names))
+        else:
+            print(names)
+            result.append(Obs(deltas[t], names))
+    return result
+
+ +
+ +

Read rwms format from given folder structure. Returns a list of length nrw

+ +
Attributes
+ +
    +
  • version -- version of openQCD, default 2.0
  • +
+ +
Keyword arguments
+ +

r_start -- list which contains the first config to be read for each replicum +r_stop -- list which contains the last config to be read for each replicum +postfix -- postfix of the file to read, e.g. '.ms1' for openQCD-files

+
+ + +
+
+
#   + + + def + extract_t0(path, prefix, dtr_read, xmin, spatial_extent, fit_range=5, **kwargs): +
+ +
+ View Source +
def extract_t0(path, prefix, dtr_read, xmin, spatial_extent, fit_range=5, **kwargs):
+    """Extract t0 from given .ms.dat files. Returns t0 as Obs.
+
+    It is assumed that all boundary effects have sufficiently decayed at x0=xmin.
+    The data around the zero crossing of t^2<E> - 0.3 is fitted with a linear function
+    from which the exact root is extracted.
+    Only works with openQCD v 1.2.
+
+    Parameters
+    ----------
+    path -- Path to .ms.dat files
+    prefix -- Ensemble prefix
+    dtr_read -- Determines how many trajectories should be skipped when reading the ms.dat files.
+                Corresponds to dtr_cnfg / dtr_ms in the openQCD input file.
+    xmin -- First timeslice where the boundary effects have sufficiently decayed.
+    spatial_extent -- spatial extent of the lattice, required for normalization.
+    fit_range -- Number of data points left and right of the zero crossing to be included in the linear fit. (Default: 5)
+
+    Keyword arguments
+    -----------------
+    r_start -- list which contains the first config to be read for each replicum.
+    r_stop -- list which contains the last config to be read for each replicum.
+    plaquette -- If true extract the plaquette estimate of t0 instead.
+    """
+
+    ls = []
+    for (dirpath, dirnames, filenames) in os.walk(path):
+        ls.extend(filenames)
+        break
+
+    if not ls:
+        raise Exception('Error, directory not found')
+
+    # Exclude files with different names
+    for exc in ls:
+        if not fnmatch.fnmatch(exc, prefix + '*.ms.dat'):
+            ls = list(set(ls) - set([exc]))
+    if len(ls) > 1:
+        ls.sort(key=lambda x: int(re.findall(r'\d+', x[len(prefix):])[0]))
+    replica = len(ls)
+
+    if 'r_start' in kwargs:
+        r_start = kwargs.get('r_start')
+        if len(r_start) != replica:
+            raise Exception('r_start does not match number of replicas')
+        # Adjust Configuration numbering to python index
+        r_start = [o - 1 if o else None for o in r_start]
+    else:
+        r_start = [None] * replica
+
+    if 'r_stop' in kwargs:
+        r_stop = kwargs.get('r_stop')
+        if len(r_stop) != replica:
+            raise Exception('r_stop does not match number of replicas')
+    else:
+        r_stop = [None] * replica
+
+    print('Extract t0 from', prefix, ',', replica, 'replica')
+
+    Ysum = []
+
+    for rep in range(replica):
+
+        with open(path + '/' + ls[rep], 'rb') as fp:
+            # Read header
+            t = fp.read(12)
+            header = struct.unpack('iii', t)
+            if rep == 0:
+                dn = header[0]
+                nn = header[1]
+                tmax = header[2]
+            elif dn != header[0] or nn != header[1] or tmax != header[2]:
+                raise Exception('Replica parameters do not match.')
+
+            t = fp.read(8)
+            if rep == 0:
+                eps = struct.unpack('d', t)[0]
+                print('Step size:', eps, ', Maximal t value:', dn * (nn) * eps)
+            elif eps != struct.unpack('d', t)[0]:
+                raise Exception('Values for eps do not match among replica.')
+
+            Ysl = []
+
+            # Read body
+            while 0 < 1:
+                t = fp.read(4)
+                if(len(t) < 4):
+                    break
+                nc = struct.unpack('i', t)[0]
+
+                t = fp.read(8 * tmax * (nn + 1))
+                if kwargs.get('plaquette'):
+                    if nc % dtr_read == 0:
+                        Ysl.append(struct.unpack('d' * tmax * (nn + 1), t))
+                t = fp.read(8 * tmax * (nn + 1))
+                if not kwargs.get('plaquette'):
+                    if nc % dtr_read == 0:
+                        Ysl.append(struct.unpack('d' * tmax * (nn + 1), t))
+                t = fp.read(8 * tmax * (nn + 1))
+
+        Ysum.append([])
+        for i, item in enumerate(Ysl):
+            Ysum[-1].append([np.mean(item[current + xmin:current + tmax - xmin]) for current in range(0, len(item), tmax)])
+
+    t2E_dict = {}
+    for n in range(nn + 1):
+        samples = []
+        for nrep, rep in enumerate(Ysum):
+            samples.append([])
+            for cnfg in rep:
+                samples[-1].append(cnfg[n])
+            samples[-1] = samples[-1][r_start[nrep]:r_stop[nrep]]
+        new_obs = Obs(samples, [(w.split('.'))[0] for w in ls])
+        t2E_dict[n * dn * eps] = (n * dn * eps) ** 2 * new_obs / (spatial_extent ** 3) - 0.3
+
+    zero_crossing = np.argmax(np.array([o.value for o in t2E_dict.values()]) > 0.0)
+
+    x = list(t2E_dict.keys())[zero_crossing - fit_range: zero_crossing + fit_range]
+    y = list(t2E_dict.values())[zero_crossing - fit_range: zero_crossing + fit_range]
+    [o.gamma_method() for o in y]
+
+    fit_result = fit_lin(x, y)
+    return -fit_result[0] / fit_result[1]
+
+ +
+ +

Extract t0 from given .ms.dat files. Returns t0 as Obs.

+ +

It is assumed that all boundary effects have sufficiently decayed at x0=xmin. +The data around the zero crossing of t^2 - 0.3 is fitted with a linear function +from which the exact root is extracted. +Only works with openQCD v 1.2.

+ +
Parameters
+ +
    +
  • path -- Path to .ms.dat files
  • +
  • prefix -- Ensemble prefix
  • +
  • dtr_read -- Determines how many trajectories should be skipped when reading the ms.dat files.: Corresponds to dtr_cnfg / dtr_ms in the openQCD input file.
  • +
  • xmin -- First timeslice where the boundary effects have sufficiently decayed.
  • +
  • spatial_extent -- spatial extent of the lattice, required for normalization.
  • +
  • fit_range -- Number of data points left and right of the zero crossing to be included in the linear fit. (Default (5)):
  • +
+ +
Keyword arguments
+ +

r_start -- list which contains the first config to be read for each replicum. +r_stop -- list which contains the last config to be read for each replicum. +plaquette -- If true extract the plaquette estimate of t0 instead.

+
+ + +
+
+ + \ No newline at end of file diff --git a/docs/pyerrors/input/sfcf.html b/docs/pyerrors/input/sfcf.html new file mode 100644 index 00000000..accf40c0 --- /dev/null +++ b/docs/pyerrors/input/sfcf.html @@ -0,0 +1,916 @@ + + + + + + + pyerrors.input.sfcf API documentation + + + + + + + + + + + +
+
+

+pyerrors.input.sfcf

+ + +
+ View Source +
#!/usr/bin/env python
+# coding: utf-8
+
+import os
+import fnmatch
+import re
+import numpy as np  # Thinly-wrapped numpy
+from ..obs import Obs
+
+
+def read_sfcf(path, prefix, name, **kwargs):
+    """Read sfcf C format from given folder structure.
+
+    Keyword arguments
+    -----------------
+    im -- if True, read imaginary instead of real part of the correlation function.
+    single -- if True, read a boundary-to-boundary correlation function with a single value
+    b2b -- if True, read a time-dependent boundary-to-boundary correlation function
+    names -- Alternative labeling for replicas/ensembles. Has to have the appropriate length
+    """
+    if kwargs.get('im'):
+        im = 1
+        part = 'imaginary'
+    else:
+        im = 0
+        part = 'real'
+
+    if kwargs.get('single'):
+        b2b = 1
+        single = 1
+    else:
+        b2b = 0
+        single = 0
+
+    if kwargs.get('b2b'):
+        b2b = 1
+
+    read = 0
+    T = 0
+    start = 0
+    ls = []
+    for (dirpath, dirnames, filenames) in os.walk(path):
+        ls.extend(dirnames)
+        break
+    if not ls:
+        raise Exception('Error, directory not found')
+    for exc in ls:
+        if fnmatch.fnmatch(exc, prefix + '*'):
+            ls = list(set(ls) - set(exc))
+    if len(ls) > 1:
+        ls.sort(key=lambda x: int(re.findall(r'\d+', x[len(prefix):])[0]))
+    replica = len(ls)
+    print('Read', part, 'part of', name, 'from', prefix, ',', replica, 'replica')
+    if 'names' in kwargs:
+        new_names = kwargs.get('names')
+        if len(new_names) != replica:
+            raise Exception('Names does not have the required length', replica)
+    else:
+        # Adjust replica names to new bookmarking system
+        new_names = []
+        for entry in ls:
+            idx = entry.index('r')
+            new_names.append(entry[:idx] + '|' + entry[idx:])
+
+    print(replica, 'replica')
+    for i, item in enumerate(ls):
+        print(item)
+        sub_ls = []
+        for (dirpath, dirnames, filenames) in os.walk(path + '/' + item):
+            sub_ls.extend(dirnames)
+            break
+        for exc in sub_ls:
+            if fnmatch.fnmatch(exc, 'cfg*'):
+                sub_ls = list(set(sub_ls) - set(exc))
+        sub_ls.sort(key=lambda x: int(x[3:]))
+        no_cfg = len(sub_ls)
+        print(no_cfg, 'configurations')
+
+        if i == 0:
+            with open(path + '/' + item + '/' + sub_ls[0] + '/' + name) as fp:
+                for k, line in enumerate(fp):
+                    if read == 1 and not line.strip() and k > start + 1:
+                        break
+                    if read == 1 and k >= start:
+                        T += 1
+                    if '[correlator]' in line:
+                        read = 1
+                        start = k + 7 + b2b
+                        T -= b2b
+
+            deltas = []
+            for j in range(T):
+                deltas.append([])
+
+        sublength = len(sub_ls)
+        for j in range(T):
+            deltas[j].append(np.zeros(sublength))
+
+        for cnfg, subitem in enumerate(sub_ls):
+            with open(path + '/' + item + '/' + subitem + '/' + name) as fp:
+                for k, line in enumerate(fp):
+                    if(k >= start and k < start + T):
+                        floats = list(map(float, line.split()))
+                        deltas[k - start][i][cnfg] = floats[1 + im - single]
+
+    result = []
+    for t in range(T):
+        result.append(Obs(deltas[t], new_names))
+
+    return result
+
+
+def read_sfcf_c(path, prefix, name, quarks='.*', noffset=0, wf=0, wf2=0, **kwargs):
+    """Read sfcf c format from given folder structure.
+
+    Arguments
+    -----------------
+    quarks -- Label of the quarks used in the sfcf input file
+    noffset -- Offset of the source (only relevant when wavefunctions are used)
+    wf -- ID of wave function
+    wf2 -- ID of the second wavefunction (only relevant for boundary-to-boundary correlation functions)
+
+    Keyword arguments
+    -----------------
+    im -- if True, read imaginary instead of real part of the correlation function.
+    b2b -- if True, read a time-dependent boundary-to-boundary correlation function
+    names -- Alternative labeling for replicas/ensembles. Has to have the appropriate length
+    ens_name : str
+        replaces the name of the ensemble
+    """
+
+    if kwargs.get('im'):
+        im = 1
+        part = 'imaginary'
+    else:
+        im = 0
+        part = 'real'
+
+    if kwargs.get('b2b'):
+        b2b = 1
+    else:
+        b2b = 0
+
+    T = 0
+    ls = []
+    for (dirpath, dirnames, filenames) in os.walk(path):
+        ls.extend(dirnames)
+        break
+    if not ls:
+        raise Exception('Error, directory not found')
+    # Exclude folders with different names
+    for exc in ls:
+        if not fnmatch.fnmatch(exc, prefix + '*'):
+            ls = list(set(ls) - set([exc]))
+    if len(ls) > 1:
+        ls.sort(key=lambda x: int(re.findall(r'\d+', x[len(prefix):])[0]))  # New version, to cope with ids, etc.
+    replica = len(ls)
+    if 'names' in kwargs:
+        new_names = kwargs.get('names')
+        if len(new_names) != replica:
+            raise Exception('Names does not have the required length', replica)
+    else:
+        # Adjust replica names to new bookmarking system
+        new_names = []
+        for entry in ls:
+            idx = entry.index('r')
+            if 'ens_name' in kwargs:
+                new_names.append(kwargs.get('ens_name') + '|' + entry[idx:])
+            else:
+                new_names.append(entry[:idx] + '|' + entry[idx:])
+
+    print('Read', part, 'part of', name, 'from', prefix[:-1], ',', replica, 'replica')
+    for i, item in enumerate(ls):
+        sub_ls = []
+        for (dirpath, dirnames, filenames) in os.walk(path + '/' + item):
+            sub_ls.extend(filenames)
+            break
+        for exc in sub_ls:
+            if not fnmatch.fnmatch(exc, prefix + '*'):
+                sub_ls = list(set(sub_ls) - set([exc]))
+        sub_ls.sort(key=lambda x: int(re.findall(r'\d+', x)[-1]))
+
+        first_cfg = int(re.findall(r'\d+', sub_ls[0])[-1])
+
+        last_cfg = len(sub_ls) + first_cfg - 1
+
+        for cfg in range(1, len(sub_ls)):
+            if int(re.findall(r'\d+', sub_ls[cfg])[-1]) != first_cfg + cfg:
+                last_cfg = cfg + first_cfg - 1
+                break
+
+        no_cfg = last_cfg - first_cfg + 1
+        print(item, ':', no_cfg, 'evenly spaced configurations (', first_cfg, '-', last_cfg, ') ,', len(sub_ls) - no_cfg, 'configs omitted\n')
+
+        if i == 0:
+            pattern = 'name      ' + name + '\nquarks    ' + quarks + '\noffset    ' + str(noffset) + '\nwf        ' + str(wf)
+            if b2b:
+                pattern += '\nwf_2      ' + str(wf2)
+
+            with open(path + '/' + item + '/' + sub_ls[0], 'r') as file:
+                content = file.read()
+                match = re.search(pattern, content)
+                if match:
+                    start_read = content.count('\n', 0, match.start()) + 5 + b2b
+                    end_match = re.search(r'\n\s*\n', content[match.start():])
+                    T = content[match.start():].count('\n', 0, end_match.start()) - 4 - b2b
+                    assert T > 0
+                    print(T, 'entries, starting to read in line', start_read)
+                else:
+                    raise Exception('Correlator with pattern\n' + pattern + '\nnot found.')
+
+            deltas = []
+            for j in range(T):
+                deltas.append([])
+
+        sublength = no_cfg
+        for j in range(T):
+            deltas[j].append(np.zeros(sublength))
+
+        for cfg in range(no_cfg):
+            with open(path + '/' + item + '/' + sub_ls[cfg]) as fp:
+                for k, line in enumerate(fp):
+                    if k == start_read - 5 - b2b:
+                        if line.strip() != 'name      ' + name:
+                            raise Exception('Wrong format', sub_ls[cfg])
+                    if(k >= start_read and k < start_read + T):
+                        floats = list(map(float, line.split()))
+                        deltas[k - start_read][i][cfg] = floats[-2:][im]
+
+    result = []
+    for t in range(T):
+        result.append(Obs(deltas[t], new_names))
+    return result
+
+
+def read_qtop(path, prefix, **kwargs):
+    """Read qtop format from given folder structure.
+
+    Keyword arguments
+    -----------------
+    target -- specifies the topological sector to be reweighted to (default 0)
+    full -- if true read the charge instead of the reweighting factor.
+    """
+
+    if 'target' in kwargs:
+        target = kwargs.get('target')
+    else:
+        target = 0
+
+    if kwargs.get('full'):
+        full = 1
+    else:
+        full = 0
+
+    ls = []
+    for (dirpath, dirnames, filenames) in os.walk(path):
+        ls.extend(filenames)
+        break
+
+    if not ls:
+        raise Exception('Error, directory not found')
+
+    # Exclude files with different names
+    for exc in ls:
+        if not fnmatch.fnmatch(exc, prefix + '*'):
+            ls = list(set(ls) - set([exc]))
+    if len(ls) > 1:
+        ls.sort(key=lambda x: int(re.findall(r'\d+', x[len(prefix):])[0]))  # New version, to cope with ids, etc.
+    replica = len(ls)
+    print('Read Q_top from', prefix[:-1], ',', replica, 'replica')
+
+    deltas = []
+
+    for rep in range(replica):
+        tmp = []
+        with open(path + '/' + ls[rep]) as fp:
+            for k, line in enumerate(fp):
+                floats = list(map(float, line.split()))
+                if full == 1:
+                    tmp.append(floats[1])
+                else:
+                    if int(floats[1]) == target:
+                        tmp.append(1.0)
+                    else:
+                        tmp.append(0.0)
+
+        deltas.append(np.array(tmp))
+
+    rep_names = []
+    for entry in ls:
+        truncated_entry = entry.split('.')[0]
+        idx = truncated_entry.index('r')
+        rep_names.append(truncated_entry[:idx] + '|' + truncated_entry[idx:])
+
+    result = Obs(deltas, rep_names)
+
+    return result
+
+ +
+ +
+
+
#   + + + def + read_sfcf(path, prefix, name, **kwargs): +
+ +
+ View Source +
def read_sfcf(path, prefix, name, **kwargs):
+    """Read sfcf C format from given folder structure.
+
+    Keyword arguments
+    -----------------
+    im -- if True, read imaginary instead of real part of the correlation function.
+    single -- if True, read a boundary-to-boundary correlation function with a single value
+    b2b -- if True, read a time-dependent boundary-to-boundary correlation function
+    names -- Alternative labeling for replicas/ensembles. Has to have the appropriate length
+    """
+    if kwargs.get('im'):
+        im = 1
+        part = 'imaginary'
+    else:
+        im = 0
+        part = 'real'
+
+    if kwargs.get('single'):
+        b2b = 1
+        single = 1
+    else:
+        b2b = 0
+        single = 0
+
+    if kwargs.get('b2b'):
+        b2b = 1
+
+    read = 0
+    T = 0
+    start = 0
+    ls = []
+    for (dirpath, dirnames, filenames) in os.walk(path):
+        ls.extend(dirnames)
+        break
+    if not ls:
+        raise Exception('Error, directory not found')
+    for exc in ls:
+        if fnmatch.fnmatch(exc, prefix + '*'):
+            ls = list(set(ls) - set(exc))
+    if len(ls) > 1:
+        ls.sort(key=lambda x: int(re.findall(r'\d+', x[len(prefix):])[0]))
+    replica = len(ls)
+    print('Read', part, 'part of', name, 'from', prefix, ',', replica, 'replica')
+    if 'names' in kwargs:
+        new_names = kwargs.get('names')
+        if len(new_names) != replica:
+            raise Exception('Names does not have the required length', replica)
+    else:
+        # Adjust replica names to new bookmarking system
+        new_names = []
+        for entry in ls:
+            idx = entry.index('r')
+            new_names.append(entry[:idx] + '|' + entry[idx:])
+
+    print(replica, 'replica')
+    for i, item in enumerate(ls):
+        print(item)
+        sub_ls = []
+        for (dirpath, dirnames, filenames) in os.walk(path + '/' + item):
+            sub_ls.extend(dirnames)
+            break
+        for exc in sub_ls:
+            if fnmatch.fnmatch(exc, 'cfg*'):
+                sub_ls = list(set(sub_ls) - set(exc))
+        sub_ls.sort(key=lambda x: int(x[3:]))
+        no_cfg = len(sub_ls)
+        print(no_cfg, 'configurations')
+
+        if i == 0:
+            with open(path + '/' + item + '/' + sub_ls[0] + '/' + name) as fp:
+                for k, line in enumerate(fp):
+                    if read == 1 and not line.strip() and k > start + 1:
+                        break
+                    if read == 1 and k >= start:
+                        T += 1
+                    if '[correlator]' in line:
+                        read = 1
+                        start = k + 7 + b2b
+                        T -= b2b
+
+            deltas = []
+            for j in range(T):
+                deltas.append([])
+
+        sublength = len(sub_ls)
+        for j in range(T):
+            deltas[j].append(np.zeros(sublength))
+
+        for cnfg, subitem in enumerate(sub_ls):
+            with open(path + '/' + item + '/' + subitem + '/' + name) as fp:
+                for k, line in enumerate(fp):
+                    if(k >= start and k < start + T):
+                        floats = list(map(float, line.split()))
+                        deltas[k - start][i][cnfg] = floats[1 + im - single]
+
+    result = []
+    for t in range(T):
+        result.append(Obs(deltas[t], new_names))
+
+    return result
+
+ +
+ +

Read sfcf C format from given folder structure.

+ +
Keyword arguments
+ +

im -- if True, read imaginary instead of real part of the correlation function. +single -- if True, read a boundary-to-boundary correlation function with a single value +b2b -- if True, read a time-dependent boundary-to-boundary correlation function +names -- Alternative labeling for replicas/ensembles. Has to have the appropriate length

+
+ + +
+
+
#   + + + def + read_sfcf_c(path, prefix, name, quarks='.*', noffset=0, wf=0, wf2=0, **kwargs): +
+ +
+ View Source +
def read_sfcf_c(path, prefix, name, quarks='.*', noffset=0, wf=0, wf2=0, **kwargs):
+    """Read sfcf c format from given folder structure.
+
+    Arguments
+    -----------------
+    quarks -- Label of the quarks used in the sfcf input file
+    noffset -- Offset of the source (only relevant when wavefunctions are used)
+    wf -- ID of wave function
+    wf2 -- ID of the second wavefunction (only relevant for boundary-to-boundary correlation functions)
+
+    Keyword arguments
+    -----------------
+    im -- if True, read imaginary instead of real part of the correlation function.
+    b2b -- if True, read a time-dependent boundary-to-boundary correlation function
+    names -- Alternative labeling for replicas/ensembles. Has to have the appropriate length
+    ens_name : str
+        replaces the name of the ensemble
+    """
+
+    if kwargs.get('im'):
+        im = 1
+        part = 'imaginary'
+    else:
+        im = 0
+        part = 'real'
+
+    if kwargs.get('b2b'):
+        b2b = 1
+    else:
+        b2b = 0
+
+    T = 0
+    ls = []
+    for (dirpath, dirnames, filenames) in os.walk(path):
+        ls.extend(dirnames)
+        break
+    if not ls:
+        raise Exception('Error, directory not found')
+    # Exclude folders with different names
+    for exc in ls:
+        if not fnmatch.fnmatch(exc, prefix + '*'):
+            ls = list(set(ls) - set([exc]))
+    if len(ls) > 1:
+        ls.sort(key=lambda x: int(re.findall(r'\d+', x[len(prefix):])[0]))  # New version, to cope with ids, etc.
+    replica = len(ls)
+    if 'names' in kwargs:
+        new_names = kwargs.get('names')
+        if len(new_names) != replica:
+            raise Exception('Names does not have the required length', replica)
+    else:
+        # Adjust replica names to new bookmarking system
+        new_names = []
+        for entry in ls:
+            idx = entry.index('r')
+            if 'ens_name' in kwargs:
+                new_names.append(kwargs.get('ens_name') + '|' + entry[idx:])
+            else:
+                new_names.append(entry[:idx] + '|' + entry[idx:])
+
+    print('Read', part, 'part of', name, 'from', prefix[:-1], ',', replica, 'replica')
+    for i, item in enumerate(ls):
+        sub_ls = []
+        for (dirpath, dirnames, filenames) in os.walk(path + '/' + item):
+            sub_ls.extend(filenames)
+            break
+        for exc in sub_ls:
+            if not fnmatch.fnmatch(exc, prefix + '*'):
+                sub_ls = list(set(sub_ls) - set([exc]))
+        sub_ls.sort(key=lambda x: int(re.findall(r'\d+', x)[-1]))
+
+        first_cfg = int(re.findall(r'\d+', sub_ls[0])[-1])
+
+        last_cfg = len(sub_ls) + first_cfg - 1
+
+        for cfg in range(1, len(sub_ls)):
+            if int(re.findall(r'\d+', sub_ls[cfg])[-1]) != first_cfg + cfg:
+                last_cfg = cfg + first_cfg - 1
+                break
+
+        no_cfg = last_cfg - first_cfg + 1
+        print(item, ':', no_cfg, 'evenly spaced configurations (', first_cfg, '-', last_cfg, ') ,', len(sub_ls) - no_cfg, 'configs omitted\n')
+
+        if i == 0:
+            pattern = 'name      ' + name + '\nquarks    ' + quarks + '\noffset    ' + str(noffset) + '\nwf        ' + str(wf)
+            if b2b:
+                pattern += '\nwf_2      ' + str(wf2)
+
+            with open(path + '/' + item + '/' + sub_ls[0], 'r') as file:
+                content = file.read()
+                match = re.search(pattern, content)
+                if match:
+                    start_read = content.count('\n', 0, match.start()) + 5 + b2b
+                    end_match = re.search(r'\n\s*\n', content[match.start():])
+                    T = content[match.start():].count('\n', 0, end_match.start()) - 4 - b2b
+                    assert T > 0
+                    print(T, 'entries, starting to read in line', start_read)
+                else:
+                    raise Exception('Correlator with pattern\n' + pattern + '\nnot found.')
+
+            deltas = []
+            for j in range(T):
+                deltas.append([])
+
+        sublength = no_cfg
+        for j in range(T):
+            deltas[j].append(np.zeros(sublength))
+
+        for cfg in range(no_cfg):
+            with open(path + '/' + item + '/' + sub_ls[cfg]) as fp:
+                for k, line in enumerate(fp):
+                    if k == start_read - 5 - b2b:
+                        if line.strip() != 'name      ' + name:
+                            raise Exception('Wrong format', sub_ls[cfg])
+                    if(k >= start_read and k < start_read + T):
+                        floats = list(map(float, line.split()))
+                        deltas[k - start_read][i][cfg] = floats[-2:][im]
+
+    result = []
+    for t in range(T):
+        result.append(Obs(deltas[t], new_names))
+    return result
+
+ +
+ +

Read sfcf c format from given folder structure.

+ +
Arguments
+ +

quarks -- Label of the quarks used in the sfcf input file +noffset -- Offset of the source (only relevant when wavefunctions are used) +wf -- ID of wave function +wf2 -- ID of the second wavefunction (only relevant for boundary-to-boundary correlation functions)

+ +
Keyword arguments
+ +

im -- if True, read imaginary instead of real part of the correlation function. +b2b -- if True, read a time-dependent boundary-to-boundary correlation function +names -- Alternative labeling for replicas/ensembles. Has to have the appropriate length +ens_name : str + replaces the name of the ensemble

+
+ + +
+
+
#   + + + def + read_qtop(path, prefix, **kwargs): +
+ +
+ View Source +
def read_qtop(path, prefix, **kwargs):
+    """Read qtop format from given folder structure.
+
+    Keyword arguments
+    -----------------
+    target -- specifies the topological sector to be reweighted to (default 0)
+    full -- if true read the charge instead of the reweighting factor.
+    """
+
+    if 'target' in kwargs:
+        target = kwargs.get('target')
+    else:
+        target = 0
+
+    if kwargs.get('full'):
+        full = 1
+    else:
+        full = 0
+
+    ls = []
+    for (dirpath, dirnames, filenames) in os.walk(path):
+        ls.extend(filenames)
+        break
+
+    if not ls:
+        raise Exception('Error, directory not found')
+
+    # Exclude files with different names
+    for exc in ls:
+        if not fnmatch.fnmatch(exc, prefix + '*'):
+            ls = list(set(ls) - set([exc]))
+    if len(ls) > 1:
+        ls.sort(key=lambda x: int(re.findall(r'\d+', x[len(prefix):])[0]))  # New version, to cope with ids, etc.
+    replica = len(ls)
+    print('Read Q_top from', prefix[:-1], ',', replica, 'replica')
+
+    deltas = []
+
+    for rep in range(replica):
+        tmp = []
+        with open(path + '/' + ls[rep]) as fp:
+            for k, line in enumerate(fp):
+                floats = list(map(float, line.split()))
+                if full == 1:
+                    tmp.append(floats[1])
+                else:
+                    if int(floats[1]) == target:
+                        tmp.append(1.0)
+                    else:
+                        tmp.append(0.0)
+
+        deltas.append(np.array(tmp))
+
+    rep_names = []
+    for entry in ls:
+        truncated_entry = entry.split('.')[0]
+        idx = truncated_entry.index('r')
+        rep_names.append(truncated_entry[:idx] + '|' + truncated_entry[idx:])
+
+    result = Obs(deltas, rep_names)
+
+    return result
+
+ +
+ +

Read qtop format from given folder structure.

+ +
Keyword arguments
+ +

target -- specifies the topological sector to be reweighted to (default 0) +full -- if true read the charge instead of the reweighting factor.

+
+ + +
+
+ + \ No newline at end of file diff --git a/docs/pyerrors/jackknifing.html b/docs/pyerrors/jackknifing.html new file mode 100644 index 00000000..d47c0780 --- /dev/null +++ b/docs/pyerrors/jackknifing.html @@ -0,0 +1,769 @@ + + + + + + + pyerrors.jackknifing API documentation + + + + + + + + + + + +
+
+

+pyerrors.jackknifing

+ + +
+ View Source +
#!/usr/bin/env python
+# coding: utf-8
+
+import pickle
+import matplotlib.pyplot as plt
+import numpy as np
+
+
+def _jack_error(jack):
+    n = jack.size
+    mean = np.mean(jack)
+    error = 0
+    for i in range(n):
+        error += (jack[i] - mean) ** 2
+
+    return np.sqrt((n - 1) / n * error)
+
+
+class Jack:
+
+    def __init__(self, value, jacks):
+        self.jacks = jacks
+        self.N = list(map(np.size, self.jacks))
+        self.max_binsize = len(self.N)
+        self.value = value  # list(map(np.mean, self.jacks))
+        self.dvalue = list(map(_jack_error, self.jacks))
+
+    def print(self, **kwargs):
+        """Print basic properties of the Jack."""
+
+        if 'binsize' in kwargs:
+            b = kwargs.get('binsize') - 1
+            if b == -1:
+                b = 0
+            if not isinstance(b, int):
+                raise TypeError('binsize has to be integer')
+            if b + 1 > self.max_binsize:
+                raise Exception('Chosen binsize not calculated')
+        else:
+            b = 0
+
+        print('Result:\t %3.8e +/- %3.8e +/- %3.8e (%3.3f%%)' % (self.value, self.dvalue[b], self.dvalue[b] * np.sqrt(2 * b / self.N[0]), np.abs(self.dvalue[b] / self.value * 100)))
+
+    def plot_tauint(self):
+        plt.xlabel('binsize')
+        plt.ylabel('tauint')
+        length = self.max_binsize
+        x = np.arange(length) + 1
+        plt.errorbar(x[:], (self.dvalue[:] / self.dvalue[0]) ** 2 / 2, yerr=np.sqrt(((2 * (self.dvalue[:] / self.dvalue[0]) ** 2 * np.sqrt(2 * x[:] / self.N[0])) / 2) ** 2 + ((2 * (self.dvalue[:] / self.dvalue[0]) ** 2 * np.sqrt(2 / self.N[0])) / 2) ** 2), linewidth=1, capsize=2)
+        plt.xlim(0.5, length + 0.5)
+        plt.title('Tauint')
+        plt.show()
+
+    def plot_history(self):
+        N = self.N
+        x = np.arange(N)
+        tmp = []
+        for i in range(self.replicas):
+            tmp.append(self.deltas[i] + self.r_values[i])
+        y = np.concatenate(tmp, axis=0)  # Think about including kwarg to look only at some replica
+        plt.errorbar(x, y, fmt='.', markersize=3)
+        plt.xlim(-0.5, N - 0.5)
+        plt.show()
+
+    def dump(self, name, **kwargs):
+        """Dump the Jack to a pickle file 'name'.
+
+        Keyword arguments:
+        path -- specifies a custom path for the file (default '.')
+        """
+        if 'path' in kwargs:
+            file_name = kwargs.get('path') + '/' + name + '.p'
+        else:
+            file_name = name + '.p'
+        with open(file_name, 'wb') as fb:
+            pickle.dump(self, fb)
+
+
+def generate_jack(obs, **kwargs):
+    full_data = []
+    for r, name in enumerate(obs.names):
+        if r == 0:
+            full_data = obs.deltas[name] + obs.r_values[name]
+        else:
+            full_data = np.append(full_data, obs.deltas[name] + obs.r_values[name])
+
+    jacks = []
+    if 'max_binsize' in kwargs:
+        max_b = kwargs.get('max_binsize')
+        if not isinstance(max_b, int):
+            raise TypeError('max_binsize has to be integer')
+    else:
+        max_b = 1
+
+    for b in range(max_b):
+        # binning if necessary
+        if b > 0:
+            n = full_data.size // (b + 1)
+            binned_data = np.zeros(n)
+            for i in range(n):
+                for j in range(b + 1):
+                    binned_data[i] += full_data[i * (b + 1) + j]
+                binned_data[i] /= (b + 1)
+        else:
+            binned_data = full_data
+            n = binned_data.size
+        # generate jacks from data
+        mean = np.mean(binned_data)
+        tmp_jacks = np.zeros(n)
+        for i in range(n):
+            tmp_jacks[i] = (n * mean - binned_data[i]) / (n - 1)
+        jacks.append(tmp_jacks)
+
+        # Value is not correctly reproduced here
+    return Jack(obs.value, jacks)
+
+
+def derived_jack(func, data, **kwargs):
+    """Construct a derived Jack according to func(data, **kwargs).
+
+    Parameters
+    ----------
+    func -- arbitrary function of the form func(data, **kwargs). For the automatic differentiation to work,
+            all numpy functions have to have the autograd wrapper (use 'import autograd.numpy as np').
+    data -- list of Jacks, e.g. [jack1, jack2, jack3].
+
+    Notes
+    -----
+    For simple mathematical operations it can be practical to use anonymous functions.
+    For the ratio of two jacks one can e.g. use
+
+    new_jack = derived_jack(lambda x : x[0] / x[1], [jack1, jack2])
+
+    """
+
+    # Check shapes of data
+    if not all(x.N == data[0].N for x in data):
+        raise Exception('Error: Shape of data does not fit')
+
+    values = np.zeros(len(data))
+    for j, item in enumerate(data):
+        values[j] = item.value
+    new_value = func(values, **kwargs)
+
+    jacks = []
+    for b in range(data[0].max_binsize):
+        tmp_jacks = np.zeros(data[0].N[b])
+        for i in range(data[0].N[b]):
+            values = np.zeros(len(data))
+            for j, item in enumerate(data):
+                values[j] = item.jacks[b][i]
+            tmp_jacks[i] = func(values, **kwargs)
+        jacks.append(tmp_jacks)
+
+    return Jack(new_value, jacks)
+
+ +
+ +
+
+
+ #   + + + class + Jack: +
+ +
+ View Source +
class Jack:
+
+    def __init__(self, value, jacks):
+        self.jacks = jacks
+        self.N = list(map(np.size, self.jacks))
+        self.max_binsize = len(self.N)
+        self.value = value  # list(map(np.mean, self.jacks))
+        self.dvalue = list(map(_jack_error, self.jacks))
+
+    def print(self, **kwargs):
+        """Print basic properties of the Jack."""
+
+        if 'binsize' in kwargs:
+            b = kwargs.get('binsize') - 1
+            if b == -1:
+                b = 0
+            if not isinstance(b, int):
+                raise TypeError('binsize has to be integer')
+            if b + 1 > self.max_binsize:
+                raise Exception('Chosen binsize not calculated')
+        else:
+            b = 0
+
+        print('Result:\t %3.8e +/- %3.8e +/- %3.8e (%3.3f%%)' % (self.value, self.dvalue[b], self.dvalue[b] * np.sqrt(2 * b / self.N[0]), np.abs(self.dvalue[b] / self.value * 100)))
+
+    def plot_tauint(self):
+        plt.xlabel('binsize')
+        plt.ylabel('tauint')
+        length = self.max_binsize
+        x = np.arange(length) + 1
+        plt.errorbar(x[:], (self.dvalue[:] / self.dvalue[0]) ** 2 / 2, yerr=np.sqrt(((2 * (self.dvalue[:] / self.dvalue[0]) ** 2 * np.sqrt(2 * x[:] / self.N[0])) / 2) ** 2 + ((2 * (self.dvalue[:] / self.dvalue[0]) ** 2 * np.sqrt(2 / self.N[0])) / 2) ** 2), linewidth=1, capsize=2)
+        plt.xlim(0.5, length + 0.5)
+        plt.title('Tauint')
+        plt.show()
+
+    def plot_history(self):
+        N = self.N
+        x = np.arange(N)
+        tmp = []
+        for i in range(self.replicas):
+            tmp.append(self.deltas[i] + self.r_values[i])
+        y = np.concatenate(tmp, axis=0)  # Think about including kwarg to look only at some replica
+        plt.errorbar(x, y, fmt='.', markersize=3)
+        plt.xlim(-0.5, N - 0.5)
+        plt.show()
+
+    def dump(self, name, **kwargs):
+        """Dump the Jack to a pickle file 'name'.
+
+        Keyword arguments:
+        path -- specifies a custom path for the file (default '.')
+        """
+        if 'path' in kwargs:
+            file_name = kwargs.get('path') + '/' + name + '.p'
+        else:
+            file_name = name + '.p'
+        with open(file_name, 'wb') as fb:
+            pickle.dump(self, fb)
+
+ +
+ + + +
+
#   + + + Jack(value, jacks) +
+ +
+ View Source +
    def __init__(self, value, jacks):
+        self.jacks = jacks
+        self.N = list(map(np.size, self.jacks))
+        self.max_binsize = len(self.N)
+        self.value = value  # list(map(np.mean, self.jacks))
+        self.dvalue = list(map(_jack_error, self.jacks))
+
+ +
+ + + +
+
+
#   + + + def + print(self, **kwargs): +
+ +
+ View Source +
    def print(self, **kwargs):
+        """Print basic properties of the Jack."""
+
+        if 'binsize' in kwargs:
+            b = kwargs.get('binsize') - 1
+            if b == -1:
+                b = 0
+            if not isinstance(b, int):
+                raise TypeError('binsize has to be integer')
+            if b + 1 > self.max_binsize:
+                raise Exception('Chosen binsize not calculated')
+        else:
+            b = 0
+
+        print('Result:\t %3.8e +/- %3.8e +/- %3.8e (%3.3f%%)' % (self.value, self.dvalue[b], self.dvalue[b] * np.sqrt(2 * b / self.N[0]), np.abs(self.dvalue[b] / self.value * 100)))
+
+ +
+ +

Print basic properties of the Jack.

+
+ + +
+
+
#   + + + def + plot_tauint(self): +
+ +
+ View Source +
    def plot_tauint(self):
+        plt.xlabel('binsize')
+        plt.ylabel('tauint')
+        length = self.max_binsize
+        x = np.arange(length) + 1
+        plt.errorbar(x[:], (self.dvalue[:] / self.dvalue[0]) ** 2 / 2, yerr=np.sqrt(((2 * (self.dvalue[:] / self.dvalue[0]) ** 2 * np.sqrt(2 * x[:] / self.N[0])) / 2) ** 2 + ((2 * (self.dvalue[:] / self.dvalue[0]) ** 2 * np.sqrt(2 / self.N[0])) / 2) ** 2), linewidth=1, capsize=2)
+        plt.xlim(0.5, length + 0.5)
+        plt.title('Tauint')
+        plt.show()
+
+ +
+ + + +
+
+
#   + + + def + plot_history(self): +
+ +
+ View Source +
    def plot_history(self):
+        N = self.N
+        x = np.arange(N)
+        tmp = []
+        for i in range(self.replicas):
+            tmp.append(self.deltas[i] + self.r_values[i])
+        y = np.concatenate(tmp, axis=0)  # Think about including kwarg to look only at some replica
+        plt.errorbar(x, y, fmt='.', markersize=3)
+        plt.xlim(-0.5, N - 0.5)
+        plt.show()
+
+ +
+ + + +
+
+
#   + + + def + dump(self, name, **kwargs): +
+ +
+ View Source +
    def dump(self, name, **kwargs):
+        """Dump the Jack to a pickle file 'name'.
+
+        Keyword arguments:
+        path -- specifies a custom path for the file (default '.')
+        """
+        if 'path' in kwargs:
+            file_name = kwargs.get('path') + '/' + name + '.p'
+        else:
+            file_name = name + '.p'
+        with open(file_name, 'wb') as fb:
+            pickle.dump(self, fb)
+
+ +
+ +

Dump the Jack to a pickle file 'name'.

+ +

Keyword arguments: +path -- specifies a custom path for the file (default '.')

+
+ + +
+
+
+
#   + + + def + generate_jack(obs, **kwargs): +
+ +
+ View Source +
def generate_jack(obs, **kwargs):
+    full_data = []
+    for r, name in enumerate(obs.names):
+        if r == 0:
+            full_data = obs.deltas[name] + obs.r_values[name]
+        else:
+            full_data = np.append(full_data, obs.deltas[name] + obs.r_values[name])
+
+    jacks = []
+    if 'max_binsize' in kwargs:
+        max_b = kwargs.get('max_binsize')
+        if not isinstance(max_b, int):
+            raise TypeError('max_binsize has to be integer')
+    else:
+        max_b = 1
+
+    for b in range(max_b):
+        # binning if necessary
+        if b > 0:
+            n = full_data.size // (b + 1)
+            binned_data = np.zeros(n)
+            for i in range(n):
+                for j in range(b + 1):
+                    binned_data[i] += full_data[i * (b + 1) + j]
+                binned_data[i] /= (b + 1)
+        else:
+            binned_data = full_data
+            n = binned_data.size
+        # generate jacks from data
+        mean = np.mean(binned_data)
+        tmp_jacks = np.zeros(n)
+        for i in range(n):
+            tmp_jacks[i] = (n * mean - binned_data[i]) / (n - 1)
+        jacks.append(tmp_jacks)
+
+        # Value is not correctly reproduced here
+    return Jack(obs.value, jacks)
+
+ +
+ + + +
+
+
#   + + + def + derived_jack(func, data, **kwargs): +
+ +
+ View Source +
def derived_jack(func, data, **kwargs):
+    """Construct a derived Jack according to func(data, **kwargs).
+
+    Parameters
+    ----------
+    func -- arbitrary function of the form func(data, **kwargs). For the automatic differentiation to work,
+            all numpy functions have to have the autograd wrapper (use 'import autograd.numpy as np').
+    data -- list of Jacks, e.g. [jack1, jack2, jack3].
+
+    Notes
+    -----
+    For simple mathematical operations it can be practical to use anonymous functions.
+    For the ratio of two jacks one can e.g. use
+
+    new_jack = derived_jack(lambda x : x[0] / x[1], [jack1, jack2])
+
+    """
+
+    # Check shapes of data
+    if not all(x.N == data[0].N for x in data):
+        raise Exception('Error: Shape of data does not fit')
+
+    values = np.zeros(len(data))
+    for j, item in enumerate(data):
+        values[j] = item.value
+    new_value = func(values, **kwargs)
+
+    jacks = []
+    for b in range(data[0].max_binsize):
+        tmp_jacks = np.zeros(data[0].N[b])
+        for i in range(data[0].N[b]):
+            values = np.zeros(len(data))
+            for j, item in enumerate(data):
+                values[j] = item.jacks[b][i]
+            tmp_jacks[i] = func(values, **kwargs)
+        jacks.append(tmp_jacks)
+
+    return Jack(new_value, jacks)
+
+ +
+ +

Construct a derived Jack according to func(data, **kwargs).

+ +
Parameters
+ +
    +
  • func -- arbitrary function of the form func(data, **kwargs). For the automatic differentiation to work,: all numpy functions have to have the autograd wrapper (use 'import autograd.numpy as np').
  • +
  • data -- list of Jacks, e.g. [jack1, jack2, jack3].
  • +
+ +
Notes
+ +

For simple mathematical operations it can be practical to use anonymous functions. +For the ratio of two jacks one can e.g. use

+ +

new_jack = derived_jack(lambda x : x[0] / x[1], [jack1, jack2])

+
+ + +
+
+ + \ No newline at end of file diff --git a/docs/pyerrors/linalg.html b/docs/pyerrors/linalg.html new file mode 100644 index 00000000..f514df08 --- /dev/null +++ b/docs/pyerrors/linalg.html @@ -0,0 +1,1294 @@ + + + + + + + pyerrors.linalg API documentation + + + + + + + + + + + +
+
+

+pyerrors.linalg

+ + +
+ View Source +
#!/usr/bin/env python
+# coding: utf-8
+
+import numpy as np
+from autograd import jacobian
+import autograd.numpy as anp  # Thinly-wrapped numpy
+from .obs import derived_observable, CObs, Obs
+
+from functools import partial
+from autograd.extend import defvjp
+
+
+def derived_array(func, data, **kwargs):
+    """Construct a derived Obs according to func(data, **kwargs) of matrix value data
+    using automatic differentiation.
+
+    Parameters
+    ----------
+    func -- arbitrary function of the form func(data, **kwargs). For the
+            automatic differentiation to work, all numpy functions have to have
+            the autograd wrapper (use 'import autograd.numpy as anp').
+    data -- list of Obs, e.g. [obs1, obs2, obs3].
+
+    Keyword arguments
+    -----------------
+    man_grad -- manually supply a list or an array which contains the jacobian
+                of func. Use cautiously, supplying the wrong derivative will
+                not be intercepted.
+    """
+
+    data = np.asarray(data)
+    raveled_data = data.ravel()
+
+    # Workaround for matrix operations containing non Obs data
+    for i_data in raveled_data:
+        if isinstance(i_data, Obs):
+            first_name = i_data.names[0]
+            first_shape = i_data.shape[first_name]
+            break
+
+    for i in range(len(raveled_data)):
+        if isinstance(raveled_data[i], (int, float)):
+            raveled_data[i] = Obs([raveled_data[i] + np.zeros(first_shape)], [first_name])
+
+    n_obs = len(raveled_data)
+    new_names = sorted(set([y for x in [o.names for o in raveled_data] for y in x]))
+
+    new_shape = {}
+    for i_data in raveled_data:
+        for name in new_names:
+            tmp = i_data.shape.get(name)
+            if tmp is not None:
+                if new_shape.get(name) is None:
+                    new_shape[name] = tmp
+                else:
+                    if new_shape[name] != tmp:
+                        raise Exception('Shapes of ensemble', name, 'do not match.')
+    if data.ndim == 1:
+        values = np.array([o.value for o in data])
+    else:
+        values = np.vectorize(lambda x: x.value)(data)
+
+    new_values = func(values, **kwargs)
+
+    new_r_values = {}
+    for name in new_names:
+        tmp_values = np.zeros(n_obs)
+        for i, item in enumerate(raveled_data):
+            tmp = item.r_values.get(name)
+            if tmp is None:
+                tmp = item.value
+            tmp_values[i] = tmp
+        tmp_values = np.array(tmp_values).reshape(data.shape)
+        new_r_values[name] = func(tmp_values, **kwargs)
+
+    if 'man_grad' in kwargs:
+        deriv = np.asarray(kwargs.get('man_grad'))
+        if new_values.shape + data.shape != deriv.shape:
+            raise Exception('Manual derivative does not have correct shape.')
+    elif kwargs.get('num_grad') is True:
+        raise Exception('Multi mode currently not supported for numerical derivative')
+    else:
+        deriv = jacobian(func)(values, **kwargs)
+
+    final_result = np.zeros(new_values.shape, dtype=object)
+
+    d_extracted = {}
+    for name in new_names:
+        d_extracted[name] = []
+        for i_dat, dat in enumerate(data):
+            ens_length = dat.ravel()[0].shape[name]
+            d_extracted[name].append(np.array([o.deltas[name] for o in dat.reshape(np.prod(dat.shape))]).reshape(dat.shape + (ens_length, )))
+
+    for i_val, new_val in np.ndenumerate(new_values):
+        new_deltas = {}
+        for name in new_names:
+            ens_length = d_extracted[name][0].shape[-1]
+            new_deltas[name] = np.zeros(ens_length)
+            for i_dat, dat in enumerate(d_extracted[name]):
+                new_deltas[name] += np.tensordot(deriv[i_val + (i_dat, )], dat)
+
+        new_samples = []
+        new_means = []
+        for name in new_names:
+            new_samples.append(new_deltas[name])
+            new_means.append(new_r_values[name][i_val])
+
+        final_result[i_val] = Obs(new_samples, new_names, means=new_means)
+        final_result[i_val]._value = new_val
+
+    return final_result
+
+
+def matmul(*operands):
+    """Matrix multiply all operands.
+
+       Supports real and complex valued matrices and is faster compared to
+       standard multiplication via the @ operator.
+    """
+    if any(isinstance(o[0, 0], CObs) for o in operands):
+        extended_operands = []
+        for op in operands:
+            tmp = np.vectorize(lambda x: (np.real(x), np.imag(x)))(op)
+            extended_operands.append(tmp[0])
+            extended_operands.append(tmp[1])
+
+        def multi_dot(operands, part):
+            stack_r = operands[0]
+            stack_i = operands[1]
+            for op_r, op_i in zip(operands[2::2], operands[3::2]):
+                tmp_r = stack_r @ op_r - stack_i @ op_i
+                tmp_i = stack_r @ op_i + stack_i @ op_r
+
+                stack_r = tmp_r
+                stack_i = tmp_i
+
+            if part == 'Real':
+                return stack_r
+            else:
+                return stack_i
+
+        def multi_dot_r(operands):
+            return multi_dot(operands, 'Real')
+
+        def multi_dot_i(operands):
+            return multi_dot(operands, 'Imag')
+
+        Nr = derived_array(multi_dot_r, extended_operands)
+        Ni = derived_array(multi_dot_i, extended_operands)
+
+        res = np.empty_like(Nr)
+        for (n, m), entry in np.ndenumerate(Nr):
+            res[n, m] = CObs(Nr[n, m], Ni[n, m])
+
+        return res
+    else:
+        def multi_dot(operands):
+            stack = operands[0]
+            for op in operands[1:]:
+                stack = stack @ op
+            return stack
+        return derived_array(multi_dot, operands)
+
+
+def inv(x):
+    """Inverse of Obs or CObs valued matrices."""
+    return _mat_mat_op(anp.linalg.inv, x)
+
+
+def cholesky(x):
+    """Cholesky decompostion of Obs or CObs valued matrices."""
+    return _mat_mat_op(anp.linalg.cholesky, x)
+
+
+def scalar_mat_op(op, obs, **kwargs):
+    """Computes the matrix to scalar operation op to a given matrix of Obs."""
+    def _mat(x, **kwargs):
+        dim = int(np.sqrt(len(x)))
+        if np.sqrt(len(x)) != dim:
+            raise Exception('Input has to have dim**2 entries')
+
+        mat = []
+        for i in range(dim):
+            row = []
+            for j in range(dim):
+                row.append(x[j + dim * i])
+            mat.append(row)
+
+        return op(anp.array(mat))
+
+    if isinstance(obs, np.ndarray):
+        raveled_obs = (1 * (obs.ravel())).tolist()
+    elif isinstance(obs, list):
+        raveled_obs = obs
+    else:
+        raise TypeError('Unproper type of input.')
+    return derived_observable(_mat, raveled_obs, **kwargs)
+
+
+def _mat_mat_op(op, obs, **kwargs):
+    """Computes the matrix to matrix operation op to a given matrix of Obs."""
+    # Use real representation to calculate matrix operations for complex matrices
+    if isinstance(obs.ravel()[0], CObs):
+        A = np.empty_like(obs)
+        B = np.empty_like(obs)
+        for (n, m), entry in np.ndenumerate(obs):
+            if hasattr(entry, 'real') and hasattr(entry, 'imag'):
+                A[n, m] = entry.real
+                B[n, m] = entry.imag
+            else:
+                A[n, m] = entry
+                B[n, m] = 0.0
+        big_matrix = np.block([[A, -B], [B, A]])
+        if kwargs.get('num_grad') is True:
+            op_big_matrix = _num_diff_mat_mat_op(op, big_matrix, **kwargs)
+        else:
+            op_big_matrix = derived_array(lambda x, **kwargs: op(x), [big_matrix])[0]
+        dim = op_big_matrix.shape[0]
+        op_A = op_big_matrix[0: dim // 2, 0: dim // 2]
+        op_B = op_big_matrix[dim // 2:, 0: dim // 2]
+        res = np.empty_like(op_A)
+        for (n, m), entry in np.ndenumerate(op_A):
+            res[n, m] = CObs(op_A[n, m], op_B[n, m])
+        return res
+    else:
+        if kwargs.get('num_grad') is True:
+            return _num_diff_mat_mat_op(op, obs, **kwargs)
+        return derived_array(lambda x, **kwargs: op(x), [obs])[0]
+
+
+def eigh(obs, **kwargs):
+    """Computes the eigenvalues and eigenvectors of a given hermitian matrix of Obs according to np.linalg.eigh."""
+    if kwargs.get('num_grad') is True:
+        return _num_diff_eigh(obs, **kwargs)
+    w = derived_observable(lambda x, **kwargs: anp.linalg.eigh(x)[0], obs)
+    v = derived_observable(lambda x, **kwargs: anp.linalg.eigh(x)[1], obs)
+    return w, v
+
+
+def eig(obs, **kwargs):
+    """Computes the eigenvalues of a given matrix of Obs according to np.linalg.eig."""
+    if kwargs.get('num_grad') is True:
+        return _num_diff_eig(obs, **kwargs)
+        # Note: Automatic differentiation of eig is implemented in the git of autograd
+        # but not yet released to PyPi (1.3)
+    w = derived_observable(lambda x, **kwargs: anp.real(anp.linalg.eig(x)[0]), obs)
+    return w
+
+
+def pinv(obs, **kwargs):
+    """Computes the Moore-Penrose pseudoinverse of a matrix of Obs."""
+    if kwargs.get('num_grad') is True:
+        return _num_diff_pinv(obs, **kwargs)
+    return derived_observable(lambda x, **kwargs: anp.linalg.pinv(x), obs)
+
+
+def svd(obs, **kwargs):
+    """Computes the singular value decomposition of a matrix of Obs."""
+    if kwargs.get('num_grad') is True:
+        return _num_diff_svd(obs, **kwargs)
+    u = derived_observable(lambda x, **kwargs: anp.linalg.svd(x, full_matrices=False)[0], obs)
+    s = derived_observable(lambda x, **kwargs: anp.linalg.svd(x, full_matrices=False)[1], obs)
+    vh = derived_observable(lambda x, **kwargs: anp.linalg.svd(x, full_matrices=False)[2], obs)
+    return (u, s, vh)
+
+
+def slogdet(obs, **kwargs):
+    """Computes the determinant of a matrix of Obs via np.linalg.slogdet."""
+    def _mat(x):
+        dim = int(np.sqrt(len(x)))
+        if np.sqrt(len(x)) != dim:
+            raise Exception('Input has to have dim**2 entries')
+
+        mat = []
+        for i in range(dim):
+            row = []
+            for j in range(dim):
+                row.append(x[j + dim * i])
+            mat.append(row)
+
+        (sign, logdet) = anp.linalg.slogdet(np.array(mat))
+        return sign * anp.exp(logdet)
+
+    if isinstance(obs, np.ndarray):
+        return derived_observable(_mat, (1 * (obs.ravel())).tolist(), **kwargs)
+    elif isinstance(obs, list):
+        return derived_observable(_mat, obs, **kwargs)
+    else:
+        raise TypeError('Unproper type of input.')
+
+
+# Variants for numerical differentiation
+
+def _num_diff_mat_mat_op(op, obs, **kwargs):
+    """Computes the matrix to matrix operation op to a given matrix of Obs elementwise
+       which is suitable for numerical differentiation."""
+    def _mat(x, **kwargs):
+        dim = int(np.sqrt(len(x)))
+        if np.sqrt(len(x)) != dim:
+            raise Exception('Input has to have dim**2 entries')
+
+        mat = []
+        for i in range(dim):
+            row = []
+            for j in range(dim):
+                row.append(x[j + dim * i])
+            mat.append(row)
+
+        return op(np.array(mat))[kwargs.get('i')][kwargs.get('j')]
+
+    if isinstance(obs, np.ndarray):
+        raveled_obs = (1 * (obs.ravel())).tolist()
+    elif isinstance(obs, list):
+        raveled_obs = obs
+    else:
+        raise TypeError('Unproper type of input.')
+
+    dim = int(np.sqrt(len(raveled_obs)))
+
+    res_mat = []
+    for i in range(dim):
+        row = []
+        for j in range(dim):
+            row.append(derived_observable(_mat, raveled_obs, i=i, j=j, **kwargs))
+        res_mat.append(row)
+
+    return np.array(res_mat) @ np.identity(dim)
+
+
+def _num_diff_eigh(obs, **kwargs):
+    """Computes the eigenvalues and eigenvectors of a given hermitian matrix of Obs according to np.linalg.eigh
+       elementwise which is suitable for numerical differentiation."""
+    def _mat(x, **kwargs):
+        dim = int(np.sqrt(len(x)))
+        if np.sqrt(len(x)) != dim:
+            raise Exception('Input has to have dim**2 entries')
+
+        mat = []
+        for i in range(dim):
+            row = []
+            for j in range(dim):
+                row.append(x[j + dim * i])
+            mat.append(row)
+
+        n = kwargs.get('n')
+        res = np.linalg.eigh(np.array(mat))[n]
+
+        if n == 0:
+            return res[kwargs.get('i')]
+        else:
+            return res[kwargs.get('i')][kwargs.get('j')]
+
+    if isinstance(obs, np.ndarray):
+        raveled_obs = (1 * (obs.ravel())).tolist()
+    elif isinstance(obs, list):
+        raveled_obs = obs
+    else:
+        raise TypeError('Unproper type of input.')
+
+    dim = int(np.sqrt(len(raveled_obs)))
+
+    res_vec = []
+    for i in range(dim):
+        res_vec.append(derived_observable(_mat, raveled_obs, n=0, i=i, **kwargs))
+
+    res_mat = []
+    for i in range(dim):
+        row = []
+        for j in range(dim):
+            row.append(derived_observable(_mat, raveled_obs, n=1, i=i, j=j, **kwargs))
+        res_mat.append(row)
+
+    return (np.array(res_vec) @ np.identity(dim), np.array(res_mat) @ np.identity(dim))
+
+
+def _num_diff_eig(obs, **kwargs):
+    """Computes the eigenvalues of a given matrix of Obs according to np.linalg.eig
+       elementwise which is suitable for numerical differentiation."""
+    def _mat(x, **kwargs):
+        dim = int(np.sqrt(len(x)))
+        if np.sqrt(len(x)) != dim:
+            raise Exception('Input has to have dim**2 entries')
+
+        mat = []
+        for i in range(dim):
+            row = []
+            for j in range(dim):
+                row.append(x[j + dim * i])
+            mat.append(row)
+
+        n = kwargs.get('n')
+        res = np.linalg.eig(np.array(mat))[n]
+
+        if n == 0:
+            # Discard imaginary part of eigenvalue here
+            return np.real(res[kwargs.get('i')])
+        else:
+            return res[kwargs.get('i')][kwargs.get('j')]
+
+    if isinstance(obs, np.ndarray):
+        raveled_obs = (1 * (obs.ravel())).tolist()
+    elif isinstance(obs, list):
+        raveled_obs = obs
+    else:
+        raise TypeError('Unproper type of input.')
+
+    dim = int(np.sqrt(len(raveled_obs)))
+
+    res_vec = []
+    for i in range(dim):
+        # Note: Automatic differentiation of eig is implemented in the git of autograd
+        # but not yet released to PyPi (1.3)
+        res_vec.append(derived_observable(_mat, raveled_obs, n=0, i=i, **kwargs))
+
+    return np.array(res_vec) @ np.identity(dim)
+
+
+def _num_diff_pinv(obs, **kwargs):
+    """Computes the Moore-Penrose pseudoinverse of a matrix of Obs elementwise which is suitable
+       for numerical differentiation."""
+    def _mat(x, **kwargs):
+        shape = kwargs.get('shape')
+
+        mat = []
+        for i in range(shape[0]):
+            row = []
+            for j in range(shape[1]):
+                row.append(x[j + shape[1] * i])
+            mat.append(row)
+
+        return np.linalg.pinv(np.array(mat))[kwargs.get('i')][kwargs.get('j')]
+
+    if isinstance(obs, np.ndarray):
+        shape = obs.shape
+        raveled_obs = (1 * (obs.ravel())).tolist()
+    else:
+        raise TypeError('Unproper type of input.')
+
+    res_mat = []
+    for i in range(shape[1]):
+        row = []
+        for j in range(shape[0]):
+            row.append(derived_observable(_mat, raveled_obs, shape=shape, i=i, j=j, **kwargs))
+        res_mat.append(row)
+
+    return np.array(res_mat) @ np.identity(shape[0])
+
+
+def _num_diff_svd(obs, **kwargs):
+    """Computes the singular value decomposition of a matrix of Obs elementwise which
+       is suitable for numerical differentiation."""
+    def _mat(x, **kwargs):
+        shape = kwargs.get('shape')
+
+        mat = []
+        for i in range(shape[0]):
+            row = []
+            for j in range(shape[1]):
+                row.append(x[j + shape[1] * i])
+            mat.append(row)
+
+        res = np.linalg.svd(np.array(mat), full_matrices=False)
+
+        if kwargs.get('n') == 1:
+            return res[1][kwargs.get('i')]
+        else:
+            return res[kwargs.get('n')][kwargs.get('i')][kwargs.get('j')]
+
+    if isinstance(obs, np.ndarray):
+        shape = obs.shape
+        raveled_obs = (1 * (obs.ravel())).tolist()
+    else:
+        raise TypeError('Unproper type of input.')
+
+    mid_index = min(shape[0], shape[1])
+
+    res_mat0 = []
+    for i in range(shape[0]):
+        row = []
+        for j in range(mid_index):
+            row.append(derived_observable(_mat, raveled_obs, shape=shape, n=0, i=i, j=j, **kwargs))
+        res_mat0.append(row)
+
+    res_mat1 = []
+    for i in range(mid_index):
+        res_mat1.append(derived_observable(_mat, raveled_obs, shape=shape, n=1, i=i, **kwargs))
+
+    res_mat2 = []
+    for i in range(mid_index):
+        row = []
+        for j in range(shape[1]):
+            row.append(derived_observable(_mat, raveled_obs, shape=shape, n=2, i=i, j=j, **kwargs))
+        res_mat2.append(row)
+
+    return (np.array(res_mat0) @ np.identity(mid_index), np.array(res_mat1) @ np.identity(mid_index), np.array(res_mat2) @ np.identity(shape[1]))
+
+
+# This code block is directly taken from the current master branch of autograd and remains
+# only until the new version is released on PyPi
+_dot = partial(anp.einsum, '...ij,...jk->...ik')
+
+
+# batched diag
+def _diag(a):
+    return anp.eye(a.shape[-1]) * a
+
+
+# batched diagonal, similar to matrix_diag in tensorflow
+def _matrix_diag(a):
+    reps = anp.array(a.shape)
+    reps[:-1] = 1
+    reps[-1] = a.shape[-1]
+    newshape = list(a.shape) + [a.shape[-1]]
+    return _diag(anp.tile(a, reps).reshape(newshape))
+
+# https://arxiv.org/pdf/1701.00392.pdf Eq(4.77)
+# Note the formula from Sec3.1 in https://people.maths.ox.ac.uk/gilesm/files/NA-08-01.pdf is incomplete
+
+
+def grad_eig(ans, x):
+    """Gradient of a general square (complex valued) matrix"""
+    e, u = ans  # eigenvalues as 1d array, eigenvectors in columns
+    n = e.shape[-1]
+
+    def vjp(g):
+        ge, gu = g
+        ge = _matrix_diag(ge)
+        f = 1 / (e[..., anp.newaxis, :] - e[..., :, anp.newaxis] + 1.e-20)
+        f -= _diag(f)
+        ut = anp.swapaxes(u, -1, -2)
+        r1 = f * _dot(ut, gu)
+        r2 = -f * (_dot(_dot(ut, anp.conj(u)), anp.real(_dot(ut, gu)) * anp.eye(n)))
+        r = _dot(_dot(anp.linalg.inv(ut), ge + r1 + r2), ut)
+        if not anp.iscomplexobj(x):
+            r = anp.real(r)
+            # the derivative is still complex for real input (imaginary delta is allowed), real output
+            # but the derivative should be real in real input case when imaginary delta is forbidden
+        return r
+    return vjp
+
+
+defvjp(anp.linalg.eig, grad_eig)
+# End of the code block from autograd.master
+
+ +
+ +
+
+
#   + + + def + derived_array(func, data, **kwargs): +
+ +
+ View Source +
def derived_array(func, data, **kwargs):
+    """Construct a derived Obs according to func(data, **kwargs) of matrix value data
+    using automatic differentiation.
+
+    Parameters
+    ----------
+    func -- arbitrary function of the form func(data, **kwargs). For the
+            automatic differentiation to work, all numpy functions have to have
+            the autograd wrapper (use 'import autograd.numpy as anp').
+    data -- list of Obs, e.g. [obs1, obs2, obs3].
+
+    Keyword arguments
+    -----------------
+    man_grad -- manually supply a list or an array which contains the jacobian
+                of func. Use cautiously, supplying the wrong derivative will
+                not be intercepted.
+    """
+
+    data = np.asarray(data)
+    raveled_data = data.ravel()
+
+    # Workaround for matrix operations containing non Obs data
+    for i_data in raveled_data:
+        if isinstance(i_data, Obs):
+            first_name = i_data.names[0]
+            first_shape = i_data.shape[first_name]
+            break
+
+    for i in range(len(raveled_data)):
+        if isinstance(raveled_data[i], (int, float)):
+            raveled_data[i] = Obs([raveled_data[i] + np.zeros(first_shape)], [first_name])
+
+    n_obs = len(raveled_data)
+    new_names = sorted(set([y for x in [o.names for o in raveled_data] for y in x]))
+
+    new_shape = {}
+    for i_data in raveled_data:
+        for name in new_names:
+            tmp = i_data.shape.get(name)
+            if tmp is not None:
+                if new_shape.get(name) is None:
+                    new_shape[name] = tmp
+                else:
+                    if new_shape[name] != tmp:
+                        raise Exception('Shapes of ensemble', name, 'do not match.')
+    if data.ndim == 1:
+        values = np.array([o.value for o in data])
+    else:
+        values = np.vectorize(lambda x: x.value)(data)
+
+    new_values = func(values, **kwargs)
+
+    new_r_values = {}
+    for name in new_names:
+        tmp_values = np.zeros(n_obs)
+        for i, item in enumerate(raveled_data):
+            tmp = item.r_values.get(name)
+            if tmp is None:
+                tmp = item.value
+            tmp_values[i] = tmp
+        tmp_values = np.array(tmp_values).reshape(data.shape)
+        new_r_values[name] = func(tmp_values, **kwargs)
+
+    if 'man_grad' in kwargs:
+        deriv = np.asarray(kwargs.get('man_grad'))
+        if new_values.shape + data.shape != deriv.shape:
+            raise Exception('Manual derivative does not have correct shape.')
+    elif kwargs.get('num_grad') is True:
+        raise Exception('Multi mode currently not supported for numerical derivative')
+    else:
+        deriv = jacobian(func)(values, **kwargs)
+
+    final_result = np.zeros(new_values.shape, dtype=object)
+
+    d_extracted = {}
+    for name in new_names:
+        d_extracted[name] = []
+        for i_dat, dat in enumerate(data):
+            ens_length = dat.ravel()[0].shape[name]
+            d_extracted[name].append(np.array([o.deltas[name] for o in dat.reshape(np.prod(dat.shape))]).reshape(dat.shape + (ens_length, )))
+
+    for i_val, new_val in np.ndenumerate(new_values):
+        new_deltas = {}
+        for name in new_names:
+            ens_length = d_extracted[name][0].shape[-1]
+            new_deltas[name] = np.zeros(ens_length)
+            for i_dat, dat in enumerate(d_extracted[name]):
+                new_deltas[name] += np.tensordot(deriv[i_val + (i_dat, )], dat)
+
+        new_samples = []
+        new_means = []
+        for name in new_names:
+            new_samples.append(new_deltas[name])
+            new_means.append(new_r_values[name][i_val])
+
+        final_result[i_val] = Obs(new_samples, new_names, means=new_means)
+        final_result[i_val]._value = new_val
+
+    return final_result
+
+ +
+ +

Construct a derived Obs according to func(data, **kwargs) of matrix value data +using automatic differentiation.

+ +
Parameters
+ +
    +
  • func -- arbitrary function of the form func(data, **kwargs). For the: automatic differentiation to work, all numpy functions have to have +the autograd wrapper (use 'import autograd.numpy as anp').
  • +
  • data -- list of Obs, e.g. [obs1, obs2, obs3].
  • +
+ +
Keyword arguments
+ +

man_grad -- manually supply a list or an array which contains the jacobian + of func. Use cautiously, supplying the wrong derivative will + not be intercepted.

+
+ + +
+
+
#   + + + def + matmul(*operands): +
+ +
+ View Source +
def matmul(*operands):
+    """Matrix multiply all operands.
+
+       Supports real and complex valued matrices and is faster compared to
+       standard multiplication via the @ operator.
+    """
+    if any(isinstance(o[0, 0], CObs) for o in operands):
+        extended_operands = []
+        for op in operands:
+            tmp = np.vectorize(lambda x: (np.real(x), np.imag(x)))(op)
+            extended_operands.append(tmp[0])
+            extended_operands.append(tmp[1])
+
+        def multi_dot(operands, part):
+            stack_r = operands[0]
+            stack_i = operands[1]
+            for op_r, op_i in zip(operands[2::2], operands[3::2]):
+                tmp_r = stack_r @ op_r - stack_i @ op_i
+                tmp_i = stack_r @ op_i + stack_i @ op_r
+
+                stack_r = tmp_r
+                stack_i = tmp_i
+
+            if part == 'Real':
+                return stack_r
+            else:
+                return stack_i
+
+        def multi_dot_r(operands):
+            return multi_dot(operands, 'Real')
+
+        def multi_dot_i(operands):
+            return multi_dot(operands, 'Imag')
+
+        Nr = derived_array(multi_dot_r, extended_operands)
+        Ni = derived_array(multi_dot_i, extended_operands)
+
+        res = np.empty_like(Nr)
+        for (n, m), entry in np.ndenumerate(Nr):
+            res[n, m] = CObs(Nr[n, m], Ni[n, m])
+
+        return res
+    else:
+        def multi_dot(operands):
+            stack = operands[0]
+            for op in operands[1:]:
+                stack = stack @ op
+            return stack
+        return derived_array(multi_dot, operands)
+
+ +
+ +

Matrix multiply all operands.

+ +

Supports real and complex valued matrices and is faster compared to +standard multiplication via the @ operator.

+
+ + +
+
+
#   + + + def + inv(x): +
+ +
+ View Source +
def inv(x):
+    """Inverse of Obs or CObs valued matrices."""
+    return _mat_mat_op(anp.linalg.inv, x)
+
+ +
+ +

Inverse of Obs or CObs valued matrices.

+
+ + +
+
+
#   + + + def + cholesky(x): +
+ +
+ View Source +
def cholesky(x):
+    """Cholesky decompostion of Obs or CObs valued matrices."""
+    return _mat_mat_op(anp.linalg.cholesky, x)
+
+ +
+ +

Cholesky decompostion of Obs or CObs valued matrices.

+
+ + +
+
+
#   + + + def + scalar_mat_op(op, obs, **kwargs): +
+ +
+ View Source +
def scalar_mat_op(op, obs, **kwargs):
+    """Computes the matrix to scalar operation op to a given matrix of Obs."""
+    def _mat(x, **kwargs):
+        dim = int(np.sqrt(len(x)))
+        if np.sqrt(len(x)) != dim:
+            raise Exception('Input has to have dim**2 entries')
+
+        mat = []
+        for i in range(dim):
+            row = []
+            for j in range(dim):
+                row.append(x[j + dim * i])
+            mat.append(row)
+
+        return op(anp.array(mat))
+
+    if isinstance(obs, np.ndarray):
+        raveled_obs = (1 * (obs.ravel())).tolist()
+    elif isinstance(obs, list):
+        raveled_obs = obs
+    else:
+        raise TypeError('Unproper type of input.')
+    return derived_observable(_mat, raveled_obs, **kwargs)
+
+ +
+ +

Computes the matrix to scalar operation op to a given matrix of Obs.

+
+ + +
+
+
#   + + + def + eigh(obs, **kwargs): +
+ +
+ View Source +
def eigh(obs, **kwargs):
+    """Computes the eigenvalues and eigenvectors of a given hermitian matrix of Obs according to np.linalg.eigh."""
+    if kwargs.get('num_grad') is True:
+        return _num_diff_eigh(obs, **kwargs)
+    w = derived_observable(lambda x, **kwargs: anp.linalg.eigh(x)[0], obs)
+    v = derived_observable(lambda x, **kwargs: anp.linalg.eigh(x)[1], obs)
+    return w, v
+
+ +
+ +

Computes the eigenvalues and eigenvectors of a given hermitian matrix of Obs according to np.linalg.eigh.

+
+ + +
+
+
#   + + + def + eig(obs, **kwargs): +
+ +
+ View Source +
def eig(obs, **kwargs):
+    """Computes the eigenvalues of a given matrix of Obs according to np.linalg.eig."""
+    if kwargs.get('num_grad') is True:
+        return _num_diff_eig(obs, **kwargs)
+        # Note: Automatic differentiation of eig is implemented in the git of autograd
+        # but not yet released to PyPi (1.3)
+    w = derived_observable(lambda x, **kwargs: anp.real(anp.linalg.eig(x)[0]), obs)
+    return w
+
+ +
+ +

Computes the eigenvalues of a given matrix of Obs according to np.linalg.eig.

+
+ + +
+
+
#   + + + def + pinv(obs, **kwargs): +
+ +
+ View Source +
def pinv(obs, **kwargs):
+    """Computes the Moore-Penrose pseudoinverse of a matrix of Obs."""
+    if kwargs.get('num_grad') is True:
+        return _num_diff_pinv(obs, **kwargs)
+    return derived_observable(lambda x, **kwargs: anp.linalg.pinv(x), obs)
+
+ +
+ +

Computes the Moore-Penrose pseudoinverse of a matrix of Obs.

+
+ + +
+
+
#   + + + def + svd(obs, **kwargs): +
+ +
+ View Source +
def svd(obs, **kwargs):
+    """Computes the singular value decomposition of a matrix of Obs."""
+    if kwargs.get('num_grad') is True:
+        return _num_diff_svd(obs, **kwargs)
+    u = derived_observable(lambda x, **kwargs: anp.linalg.svd(x, full_matrices=False)[0], obs)
+    s = derived_observable(lambda x, **kwargs: anp.linalg.svd(x, full_matrices=False)[1], obs)
+    vh = derived_observable(lambda x, **kwargs: anp.linalg.svd(x, full_matrices=False)[2], obs)
+    return (u, s, vh)
+
+ +
+ +

Computes the singular value decomposition of a matrix of Obs.

+
+ + +
+
+
#   + + + def + slogdet(obs, **kwargs): +
+ +
+ View Source +
def slogdet(obs, **kwargs):
+    """Computes the determinant of a matrix of Obs via np.linalg.slogdet."""
+    def _mat(x):
+        dim = int(np.sqrt(len(x)))
+        if np.sqrt(len(x)) != dim:
+            raise Exception('Input has to have dim**2 entries')
+
+        mat = []
+        for i in range(dim):
+            row = []
+            for j in range(dim):
+                row.append(x[j + dim * i])
+            mat.append(row)
+
+        (sign, logdet) = anp.linalg.slogdet(np.array(mat))
+        return sign * anp.exp(logdet)
+
+    if isinstance(obs, np.ndarray):
+        return derived_observable(_mat, (1 * (obs.ravel())).tolist(), **kwargs)
+    elif isinstance(obs, list):
+        return derived_observable(_mat, obs, **kwargs)
+    else:
+        raise TypeError('Unproper type of input.')
+
+ +
+ +

Computes the determinant of a matrix of Obs via np.linalg.slogdet.

+
+ + +
+
+
#   + + + def + grad_eig(ans, x): +
+ +
+ View Source +
def grad_eig(ans, x):
+    """Gradient of a general square (complex valued) matrix"""
+    e, u = ans  # eigenvalues as 1d array, eigenvectors in columns
+    n = e.shape[-1]
+
+    def vjp(g):
+        ge, gu = g
+        ge = _matrix_diag(ge)
+        f = 1 / (e[..., anp.newaxis, :] - e[..., :, anp.newaxis] + 1.e-20)
+        f -= _diag(f)
+        ut = anp.swapaxes(u, -1, -2)
+        r1 = f * _dot(ut, gu)
+        r2 = -f * (_dot(_dot(ut, anp.conj(u)), anp.real(_dot(ut, gu)) * anp.eye(n)))
+        r = _dot(_dot(anp.linalg.inv(ut), ge + r1 + r2), ut)
+        if not anp.iscomplexobj(x):
+            r = anp.real(r)
+            # the derivative is still complex for real input (imaginary delta is allowed), real output
+            # but the derivative should be real in real input case when imaginary delta is forbidden
+        return r
+    return vjp
+
+ +
+ +

Gradient of a general square (complex valued) matrix

+
+ + +
+
+ + \ No newline at end of file diff --git a/docs/pyerrors/misc.html b/docs/pyerrors/misc.html new file mode 100644 index 00000000..40db85ac --- /dev/null +++ b/docs/pyerrors/misc.html @@ -0,0 +1,341 @@ + + + + + + + pyerrors.misc API documentation + + + + + + + + + + + +
+
+

+pyerrors.misc

+ + +
+ View Source +
#!/usr/bin/env python
+# coding: utf-8
+
+import numpy as np
+from .obs import Obs
+
+
+def gen_correlated_data(means, cov, name, tau=0.5, samples=1000):
+    """ Generate observables with given covariance and autocorrelation times.
+
+    Arguments
+    -----------------
+    means -- list containing the mean value of each observable.
+    cov -- covariance matrix for the data to be geneated.
+    name -- ensemble name for the data to be geneated.
+    tau -- can either be a real number or a list with an entry for
+           every dataset.
+    samples -- number of samples to be generated for each observable.
+    """
+
+    assert len(means) == cov.shape[-1]
+    tau = np.asarray(tau)
+    if np.min(tau) < 0.5:
+        raise Exception('All integrated autocorrelations have to be >= 0.5.')
+
+    a = (2 * tau - 1) / (2 * tau + 1)
+    rand = np.random.multivariate_normal(np.zeros_like(means), cov * samples, samples)
+
+    # Normalize samples such that sample variance matches input
+    norm = np.array([np.var(o, ddof=1) / samples for o in rand.T])
+    rand = rand @ np.diag(np.sqrt(np.diag(cov))) @ np.diag(1 / np.sqrt(norm))
+
+    data = [rand[0]]
+    for i in range(1, samples):
+        data.append(np.sqrt(1 - a ** 2) * rand[i] + a * data[-1])
+    corr_data = np.array(data) - np.mean(data, axis=0) + means
+    return [Obs([dat], [name]) for dat in corr_data.T]
+
+ +
+ +
+
+
#   + + + def + gen_correlated_data(means, cov, name, tau=0.5, samples=1000): +
+ +
+ View Source +
def gen_correlated_data(means, cov, name, tau=0.5, samples=1000):
+    """ Generate observables with given covariance and autocorrelation times.
+
+    Arguments
+    -----------------
+    means -- list containing the mean value of each observable.
+    cov -- covariance matrix for the data to be geneated.
+    name -- ensemble name for the data to be geneated.
+    tau -- can either be a real number or a list with an entry for
+           every dataset.
+    samples -- number of samples to be generated for each observable.
+    """
+
+    assert len(means) == cov.shape[-1]
+    tau = np.asarray(tau)
+    if np.min(tau) < 0.5:
+        raise Exception('All integrated autocorrelations have to be >= 0.5.')
+
+    a = (2 * tau - 1) / (2 * tau + 1)
+    rand = np.random.multivariate_normal(np.zeros_like(means), cov * samples, samples)
+
+    # Normalize samples such that sample variance matches input
+    norm = np.array([np.var(o, ddof=1) / samples for o in rand.T])
+    rand = rand @ np.diag(np.sqrt(np.diag(cov))) @ np.diag(1 / np.sqrt(norm))
+
+    data = [rand[0]]
+    for i in range(1, samples):
+        data.append(np.sqrt(1 - a ** 2) * rand[i] + a * data[-1])
+    corr_data = np.array(data) - np.mean(data, axis=0) + means
+    return [Obs([dat], [name]) for dat in corr_data.T]
+
+ +
+ +

Generate observables with given covariance and autocorrelation times.

+ +
Arguments
+ +

means -- list containing the mean value of each observable. +cov -- covariance matrix for the data to be geneated. +name -- ensemble name for the data to be geneated. +tau -- can either be a real number or a list with an entry for + every dataset. +samples -- number of samples to be generated for each observable.

+
+ + +
+
+ + \ No newline at end of file diff --git a/docs/pyerrors/mpm.html b/docs/pyerrors/mpm.html new file mode 100644 index 00000000..bdbb53de --- /dev/null +++ b/docs/pyerrors/mpm.html @@ -0,0 +1,519 @@ + + + + + + + pyerrors.mpm API documentation + + + + + + + + + + + +
+
+

+pyerrors.mpm

+ + +
+ View Source +
#!/usr/bin/env python
+# coding: utf-8
+
+import numpy as np
+import scipy.linalg
+from .obs import Obs
+from .linalg import svd, eig, pinv
+
+
+def matrix_pencil_method(corrs, k=1, p=None, **kwargs):
+    """ Matrix pencil method to extract k energy levels from data
+
+    Implementation of the matrix pencil method based on
+    eq. (2.17) of Y. Hua, T. K. Sarkar, IEEE Trans. Acoust. 38, 814-824 (1990)
+
+    Parameters
+    ----------
+    data -- can be a list of Obs for the analysis of a single correlator, or a list of lists
+            of Obs if several correlators are to analyzed at once.
+    k -- Number of states to extract (default 1).
+    p -- matrix pencil parameter which filters noise. The optimal value is expected between
+         len(data)/3 and 2*len(data)/3. The computation is more expensive the closer p is
+         to len(data)/2 but could possibly suppress more noise (default len(data)//2).
+    """
+    if isinstance(corrs[0], Obs):
+        data = [corrs]
+    else:
+        data = corrs
+
+    lengths = [len(d) for d in data]
+    if lengths.count(lengths[0]) != len(lengths):
+        raise Exception('All datasets have to have the same length.')
+
+    data_sets = len(data)
+    n_data = len(data[0])
+
+    if p is None:
+        p = max(n_data // 2, k)
+    if n_data <= p:
+        raise Exception('The pencil p has to be smaller than the number of data samples.')
+    if p < k or n_data - p < k:
+        raise Exception('Cannot extract', k, 'energy levels with p=', p, 'and N-p=', n_data - p)
+
+    # Construct the hankel matrices
+    matrix = []
+    for n in range(data_sets):
+        matrix.append(scipy.linalg.hankel(data[n][:n_data - p], data[n][n_data - p - 1:]))
+    matrix = np.array(matrix)
+    # Construct y1 and y2
+    y1 = np.concatenate(matrix[:, :, :p])
+    y2 = np.concatenate(matrix[:, :, 1:])
+    # Apply SVD to y2
+    u, s, vh = svd(y2, **kwargs)
+    # Construct z from y1 and SVD of y2, setting all singular values beyond the kth to zero
+    z = np.diag(1. / s[:k]) @ u[:, :k].T @ y1 @ vh.T[:, :k]
+    # Return the sorted logarithms of the real eigenvalues as Obs
+    energy_levels = np.log(np.abs(eig(z, **kwargs)))
+    return sorted(energy_levels, key=lambda x: abs(x.value))
+
+
+def matrix_pencil_method_old(data, p, noise_level=None, verbose=1, **kwargs):
+    """ Older impleentation of the matrix pencil method with pencil p on given data to
+        extract energy levels.
+
+    Parameters
+    ----------
+    data -- lists of Obs, where the nth entry is considered to be the correlation function
+            at x0=n+offset.
+    p -- matrix pencil parameter which corresponds to the number of energy levels to extract.
+         higher values for p can help decreasing noise.
+    noise_level -- If this argument is not None an additional prefiltering via singular
+                   value decomposition is performed in which all singular values below 10^(-noise_level)
+                   times the largest singular value are discarded. This increases the computation time.
+    verbose -- if larger than zero details about the noise filtering are printed to stdout
+               (default 1)
+
+    """
+    n_data = len(data)
+    if n_data <= p:
+        raise Exception('The pencil p has to be smaller than the number of data samples.')
+
+    matrix = scipy.linalg.hankel(data[:n_data - p], data[n_data - p - 1:]) @ np.identity(p + 1)
+
+    if noise_level is not None:
+        u, s, vh = svd(matrix)
+
+        s_values = np.vectorize(lambda x: x.value)(s)
+        if verbose > 0:
+            print('Singular values: ', s_values)
+        digit = np.argwhere(s_values / s_values[0] < 10.0**(-noise_level))
+        if digit.size == 0:
+            digit = len(s_values)
+        else:
+            digit = int(digit[0])
+        if verbose > 0:
+            print('Consider only', digit, 'out of', len(s), 'singular values')
+
+        new_matrix = u[:, :digit] * s[:digit] @ vh[:digit, :]
+        y1 = new_matrix[:, :-1]
+        y2 = new_matrix[:, 1:]
+    else:
+        y1 = matrix[:, :-1]
+        y2 = matrix[:, 1:]
+
+    # Moore–Penrose pseudoinverse
+    pinv_y1 = pinv(y1)
+
+    e = eig((pinv_y1 @ y2), **kwargs)
+    energy_levels = -np.log(np.abs(e))
+    return sorted(energy_levels, key=lambda x: abs(x.value))
+
+ +
+ +
+
+
#   + + + def + matrix_pencil_method(corrs, k=1, p=None, **kwargs): +
+ +
+ View Source +
def matrix_pencil_method(corrs, k=1, p=None, **kwargs):
+    """ Matrix pencil method to extract k energy levels from data
+
+    Implementation of the matrix pencil method based on
+    eq. (2.17) of Y. Hua, T. K. Sarkar, IEEE Trans. Acoust. 38, 814-824 (1990)
+
+    Parameters
+    ----------
+    data -- can be a list of Obs for the analysis of a single correlator, or a list of lists
+            of Obs if several correlators are to analyzed at once.
+    k -- Number of states to extract (default 1).
+    p -- matrix pencil parameter which filters noise. The optimal value is expected between
+         len(data)/3 and 2*len(data)/3. The computation is more expensive the closer p is
+         to len(data)/2 but could possibly suppress more noise (default len(data)//2).
+    """
+    if isinstance(corrs[0], Obs):
+        data = [corrs]
+    else:
+        data = corrs
+
+    lengths = [len(d) for d in data]
+    if lengths.count(lengths[0]) != len(lengths):
+        raise Exception('All datasets have to have the same length.')
+
+    data_sets = len(data)
+    n_data = len(data[0])
+
+    if p is None:
+        p = max(n_data // 2, k)
+    if n_data <= p:
+        raise Exception('The pencil p has to be smaller than the number of data samples.')
+    if p < k or n_data - p < k:
+        raise Exception('Cannot extract', k, 'energy levels with p=', p, 'and N-p=', n_data - p)
+
+    # Construct the hankel matrices
+    matrix = []
+    for n in range(data_sets):
+        matrix.append(scipy.linalg.hankel(data[n][:n_data - p], data[n][n_data - p - 1:]))
+    matrix = np.array(matrix)
+    # Construct y1 and y2
+    y1 = np.concatenate(matrix[:, :, :p])
+    y2 = np.concatenate(matrix[:, :, 1:])
+    # Apply SVD to y2
+    u, s, vh = svd(y2, **kwargs)
+    # Construct z from y1 and SVD of y2, setting all singular values beyond the kth to zero
+    z = np.diag(1. / s[:k]) @ u[:, :k].T @ y1 @ vh.T[:, :k]
+    # Return the sorted logarithms of the real eigenvalues as Obs
+    energy_levels = np.log(np.abs(eig(z, **kwargs)))
+    return sorted(energy_levels, key=lambda x: abs(x.value))
+
+ +
+ +

Matrix pencil method to extract k energy levels from data

+ +

Implementation of the matrix pencil method based on +eq. (2.17) of Y. Hua, T. K. Sarkar, IEEE Trans. Acoust. 38, 814-824 (1990)

+ +
Parameters
+ +
    +
  • data -- can be a list of Obs for the analysis of a single correlator, or a list of lists: of Obs if several correlators are to analyzed at once.
  • +
  • k -- Number of states to extract (default 1).
  • +
  • p -- matrix pencil parameter which filters noise. The optimal value is expected between: len(data)/3 and 2*len(data)/3. The computation is more expensive the closer p is +to len(data)/2 but could possibly suppress more noise (default len(data)//2).
  • +
+
+ + +
+
+
#   + + + def + matrix_pencil_method_old(data, p, noise_level=None, verbose=1, **kwargs): +
+ +
+ View Source +
def matrix_pencil_method_old(data, p, noise_level=None, verbose=1, **kwargs):
+    """ Older impleentation of the matrix pencil method with pencil p on given data to
+        extract energy levels.
+
+    Parameters
+    ----------
+    data -- lists of Obs, where the nth entry is considered to be the correlation function
+            at x0=n+offset.
+    p -- matrix pencil parameter which corresponds to the number of energy levels to extract.
+         higher values for p can help decreasing noise.
+    noise_level -- If this argument is not None an additional prefiltering via singular
+                   value decomposition is performed in which all singular values below 10^(-noise_level)
+                   times the largest singular value are discarded. This increases the computation time.
+    verbose -- if larger than zero details about the noise filtering are printed to stdout
+               (default 1)
+
+    """
+    n_data = len(data)
+    if n_data <= p:
+        raise Exception('The pencil p has to be smaller than the number of data samples.')
+
+    matrix = scipy.linalg.hankel(data[:n_data - p], data[n_data - p - 1:]) @ np.identity(p + 1)
+
+    if noise_level is not None:
+        u, s, vh = svd(matrix)
+
+        s_values = np.vectorize(lambda x: x.value)(s)
+        if verbose > 0:
+            print('Singular values: ', s_values)
+        digit = np.argwhere(s_values / s_values[0] < 10.0**(-noise_level))
+        if digit.size == 0:
+            digit = len(s_values)
+        else:
+            digit = int(digit[0])
+        if verbose > 0:
+            print('Consider only', digit, 'out of', len(s), 'singular values')
+
+        new_matrix = u[:, :digit] * s[:digit] @ vh[:digit, :]
+        y1 = new_matrix[:, :-1]
+        y2 = new_matrix[:, 1:]
+    else:
+        y1 = matrix[:, :-1]
+        y2 = matrix[:, 1:]
+
+    # Moore–Penrose pseudoinverse
+    pinv_y1 = pinv(y1)
+
+    e = eig((pinv_y1 @ y2), **kwargs)
+    energy_levels = -np.log(np.abs(e))
+    return sorted(energy_levels, key=lambda x: abs(x.value))
+
+ +
+ +

Older impleentation of the matrix pencil method with pencil p on given data to + extract energy levels.

+ +
Parameters
+ +
    +
  • data -- lists of Obs, where the nth entry is considered to be the correlation function: at x0=n+offset.
  • +
  • p -- matrix pencil parameter which corresponds to the number of energy levels to extract.: higher values for p can help decreasing noise.
  • +
  • noise_level -- If this argument is not None an additional prefiltering via singular: value decomposition is performed in which all singular values below 10^(-noise_level) +times the largest singular value are discarded. This increases the computation time.
  • +
  • verbose -- if larger than zero details about the noise filtering are printed to stdout: (default 1)
  • +
+
+ + +
+
+ + \ No newline at end of file diff --git a/docs/pyerrors/npr.html b/docs/pyerrors/npr.html new file mode 100644 index 00000000..0fb8a698 --- /dev/null +++ b/docs/pyerrors/npr.html @@ -0,0 +1,736 @@ + + + + + + + pyerrors.npr API documentation + + + + + + + + + + + +
+
+

+pyerrors.npr

+ + +
+ View Source +
import warnings
+import numpy as np
+from .linalg import inv, matmul
+from .dirac import gamma, gamma5
+
+
+L = None
+T = None
+
+
+class Npr_matrix(np.ndarray):
+
+    def __new__(cls, input_array, mom_in=None, mom_out=None):
+        obj = np.asarray(input_array).view(cls)
+        obj.mom_in = mom_in
+        obj.mom_out = mom_out
+        return obj
+
+    @property
+    def g5H(self):
+        """Gamma_5 hermitean conjugate
+
+        Returns gamma_5 @ M.T.conj() @ gamma_5 and exchanges in and out going
+        momenta. Works only for 12x12 matrices.
+        """
+        if self.shape != (12, 12):
+            raise Exception('g5H only works for 12x12 matrices.')
+        extended_g5 = np.kron(np.eye(3, dtype=int), gamma5)
+        return Npr_matrix(matmul(extended_g5, self.conj().T, extended_g5),
+                          mom_in=self.mom_out,
+                          mom_out=self.mom_in)
+
+    def _propagate_mom(self, other, name):
+        s_mom = getattr(self, name, None)
+        o_mom = getattr(other, name, None)
+        if s_mom is not None and o_mom is not None:
+            if not np.allclose(s_mom, o_mom):
+                raise Exception(name + ' does not match.')
+        return o_mom if o_mom is not None else s_mom
+
+    def __matmul__(self, other):
+        return self.__new__(Npr_matrix,
+                            super().__matmul__(other),
+                            self._propagate_mom(other, 'mom_in'),
+                            self._propagate_mom(other, 'mom_out'))
+
+    def __array_finalize__(self, obj):
+        if obj is None:
+            return
+        self.mom_in = getattr(obj, 'mom_in', None)
+        self.mom_out = getattr(obj, 'mom_out', None)
+
+
+def _check_geometry():
+    if L is None:
+        raise Exception("Spatial extent 'L' not set.")
+    else:
+        if not isinstance(L, int):
+            raise Exception("Spatial extent 'L' must be an integer.")
+    if T is None:
+        raise Exception("Temporal extent 'T' not set.")
+        if not isinstance(T, int):
+            raise Exception("Temporal extent 'T' must be an integer.")
+
+
+def inv_propagator(prop):
+    """ Inverts a 12x12 quark propagator"""
+    if prop.shape != (12, 12):
+        raise Exception("Only 12x12 propagators can be inverted.")
+    return Npr_matrix(inv(prop), prop.mom_in)
+
+
+def Zq(inv_prop, fermion='Wilson'):
+    """ Calculates the quark field renormalization constant Zq
+
+        Attributes:
+        inv_prop -- Inverted 12x12 quark propagator
+        fermion -- Fermion type for which the tree-level propagator is used
+                   in the calculation of Zq. Default Wilson.
+    """
+    _check_geometry()
+    mom = np.copy(inv_prop.mom_in)
+    mom[3] /= T / L
+    sin_mom = np.sin(2 * np.pi / L * mom)
+
+    if fermion == 'Wilson':
+        p_slash = -1j * (sin_mom[0] * gamma[0] + sin_mom[1] * gamma[1] + sin_mom[2] * gamma[2] + sin_mom[3] * gamma[3]) / np.sum(sin_mom ** 2)
+    elif fermion == 'Continuum':
+        p_mom = 2 * np.pi / L * mom
+        p_slash = -1j * (p_mom[0] * gamma[0] + p_mom[1] * gamma[1] + p_mom[2] * gamma[2] + p_mom[3] * gamma[3]) / np.sum(p_mom ** 2)
+    elif fermion == 'DWF':
+        W = np.sum(1 - np.cos(2 * np.pi / L * mom))
+        s2 = np.sum(sin_mom ** 2)
+        p_slash = -1j * (sin_mom[0] * gamma[0] + sin_mom[1] * gamma[1] + sin_mom[2] * gamma[2] + sin_mom[3] * gamma[3])
+        p_slash /= 2 * (W - 1 + np.sqrt((1 - W) ** 2 + s2))
+    else:
+        raise Exception("Fermion type '" + fermion + "' not implemented")
+
+    res = 1 / 12. * np.trace(matmul(inv_prop, np.kron(np.eye(3, dtype=int), p_slash)))
+    res.gamma_method()
+
+    if not res.imag.is_zero_within_error(5):
+        warnings.warn("Imaginary part of Zq is not zero within 5 sigma")
+        return res
+    return res.real
+
+ +
+ +
+
+
+ #   + + + class + Npr_matrix(numpy.ndarray): +
+ +
+ View Source +
class Npr_matrix(np.ndarray):
+
+    def __new__(cls, input_array, mom_in=None, mom_out=None):
+        obj = np.asarray(input_array).view(cls)
+        obj.mom_in = mom_in
+        obj.mom_out = mom_out
+        return obj
+
+    @property
+    def g5H(self):
+        """Gamma_5 hermitean conjugate
+
+        Returns gamma_5 @ M.T.conj() @ gamma_5 and exchanges in and out going
+        momenta. Works only for 12x12 matrices.
+        """
+        if self.shape != (12, 12):
+            raise Exception('g5H only works for 12x12 matrices.')
+        extended_g5 = np.kron(np.eye(3, dtype=int), gamma5)
+        return Npr_matrix(matmul(extended_g5, self.conj().T, extended_g5),
+                          mom_in=self.mom_out,
+                          mom_out=self.mom_in)
+
+    def _propagate_mom(self, other, name):
+        s_mom = getattr(self, name, None)
+        o_mom = getattr(other, name, None)
+        if s_mom is not None and o_mom is not None:
+            if not np.allclose(s_mom, o_mom):
+                raise Exception(name + ' does not match.')
+        return o_mom if o_mom is not None else s_mom
+
+    def __matmul__(self, other):
+        return self.__new__(Npr_matrix,
+                            super().__matmul__(other),
+                            self._propagate_mom(other, 'mom_in'),
+                            self._propagate_mom(other, 'mom_out'))
+
+    def __array_finalize__(self, obj):
+        if obj is None:
+            return
+        self.mom_in = getattr(obj, 'mom_in', None)
+        self.mom_out = getattr(obj, 'mom_out', None)
+
+ +
+ +

ndarray(shape, dtype=float, buffer=None, offset=0, + strides=None, order=None)

+ +

An array object represents a multidimensional, homogeneous array +of fixed-size items. An associated data-type object describes the +format of each element in the array (its byte-order, how many bytes it +occupies in memory, whether it is an integer, a floating point number, +or something else, etc.)

+ +

Arrays should be constructed using array, zeros or empty (refer +to the See Also section below). The parameters given here refer to +a low-level method (ndarray(...)) for instantiating an array.

+ +

For more information, refer to the numpy module and examine the +methods and attributes of an array.

+ +
Parameters
+ +
    +
  • (for the __new__ method; see Notes below)
  • +
  • shape (tuple of ints): +Shape of created array.
  • +
  • dtype (data-type, optional): +Any object that can be interpreted as a numpy data type.
  • +
  • buffer (object exposing buffer interface, optional): +Used to fill the array with data.
  • +
  • offset (int, optional): +Offset of array data in buffer.
  • +
  • strides (tuple of ints, optional): +Strides of data in memory.
  • +
  • order ({'C', 'F'}, optional): +Row-major (C-style) or column-major (Fortran-style) order.
  • +
+ +
Attributes
+ +
    +
  • T (ndarray): +Transpose of the array.
  • +
  • data (buffer): +The array's elements, in memory.
  • +
  • dtype (dtype object): +Describes the format of the elements in the array.
  • +
  • flags (dict): +Dictionary containing information related to memory use, e.g., +'C_CONTIGUOUS', 'OWNDATA', 'WRITEABLE', etc.
  • +
  • flat (numpy.flatiter object): +Flattened version of the array as an iterator. The iterator +allows assignments, e.g., x.flat = 3 (See ndarray.flat for +assignment examples; TODO).
  • +
  • imag (ndarray): +Imaginary part of the array.
  • +
  • real (ndarray): +Real part of the array.
  • +
  • size (int): +Number of elements in the array.
  • +
  • itemsize (int): +The memory use of each array element in bytes.
  • +
  • nbytes (int): +The total number of bytes required to store the array data, +i.e., itemsize * size.
  • +
  • ndim (int): +The array's number of dimensions.
  • +
  • shape (tuple of ints): +Shape of the array.
  • +
  • strides (tuple of ints): +The step-size required to move from one element to the next in +memory. For example, a contiguous (3, 4) array of type +int16 in C-order has strides (8, 2). This implies that +to move from element to element in memory requires jumps of 2 bytes. +To move from row-to-row, one needs to jump 8 bytes at a time +(2 * 4).
  • +
  • ctypes (ctypes object): +Class containing properties of the array needed for interaction +with ctypes.
  • +
  • base (ndarray): +If the array is a view into another array, that array is its base +(unless that array is also a view). The base array is where the +array data is actually stored.
  • +
+ +
See Also
+ +

array: Construct an array.
+zeros: Create an array, each element of which is zero.
+empty: Create an array, but leave its allocated memory unchanged (i.e., +it contains "garbage").
+dtype: Create a data-type.
+numpy.typing.NDArray: A :term:generic <generic type> version +of ndarray.

+ +
Notes
+ +

There are two modes of creating an array using __new__:

+ +
    +
  1. If buffer is None, then only shape, dtype, and order +are used.
  2. +
  3. If buffer is an object exposing the buffer interface, then +all keywords are interpreted.
  4. +
+ +

No __init__ method is needed because the array is fully initialized +after the __new__ method.

+ +
Examples
+ +

These examples illustrate the low-level ndarray constructor. Refer +to the See Also section above for easier ways of constructing an +ndarray.

+ +

First mode, buffer is None:

+ +
>>> np.ndarray(shape=(2,2), dtype=float, order='F')
+array([[0.0e+000, 0.0e+000], # random
+       [     nan, 2.5e-323]])
+
+ +

Second mode:

+ +
>>> np.ndarray((2,), buffer=np.array([1,2,3]),
+...            offset=np.int_().itemsize,
+...            dtype=int) # offset = 1*itemsize, i.e. skip first element
+array([2, 3])
+
+
+ + +
+
#   + + + Npr_matrix() +
+ + + + +
+
+
#   + + g5H +
+ +

Gamma_5 hermitean conjugate

+ +

Returns gamma_5 @ M.T.conj() @ gamma_5 and exchanges in and out going +momenta. Works only for 12x12 matrices.

+
+ + +
+
+
Inherited Members
+
+
numpy.ndarray
+
dumps
+
dump
+
all
+
any
+
argmax
+
argmin
+
argpartition
+
argsort
+
astype
+
byteswap
+
choose
+
clip
+
compress
+
conj
+
conjugate
+
copy
+
cumprod
+
cumsum
+
diagonal
+
dot
+
fill
+
flatten
+
getfield
+
item
+
itemset
+
max
+
mean
+
min
+
newbyteorder
+
nonzero
+
partition
+
prod
+
ptp
+
put
+
ravel
+
repeat
+
reshape
+
resize
+
round
+
searchsorted
+
setfield
+
setflags
+
sort
+
squeeze
+
std
+
sum
+
swapaxes
+
take
+
tobytes
+
tofile
+
tolist
+
tostring
+
trace
+
transpose
+
var
+
view
+
ndim
+
flags
+
shape
+
strides
+
data
+
itemsize
+
size
+
nbytes
+
base
+
dtype
+
real
+
imag
+
flat
+
ctypes
+
T
+ +
+
+
+
+
+
#   + + + def + inv_propagator(prop): +
+ +
+ View Source +
def inv_propagator(prop):
+    """ Inverts a 12x12 quark propagator"""
+    if prop.shape != (12, 12):
+        raise Exception("Only 12x12 propagators can be inverted.")
+    return Npr_matrix(inv(prop), prop.mom_in)
+
+ +
+ +

Inverts a 12x12 quark propagator

+
+ + +
+
+
#   + + + def + Zq(inv_prop, fermion='Wilson'): +
+ +
+ View Source +
def Zq(inv_prop, fermion='Wilson'):
+    """ Calculates the quark field renormalization constant Zq
+
+        Attributes:
+        inv_prop -- Inverted 12x12 quark propagator
+        fermion -- Fermion type for which the tree-level propagator is used
+                   in the calculation of Zq. Default Wilson.
+    """
+    _check_geometry()
+    mom = np.copy(inv_prop.mom_in)
+    mom[3] /= T / L
+    sin_mom = np.sin(2 * np.pi / L * mom)
+
+    if fermion == 'Wilson':
+        p_slash = -1j * (sin_mom[0] * gamma[0] + sin_mom[1] * gamma[1] + sin_mom[2] * gamma[2] + sin_mom[3] * gamma[3]) / np.sum(sin_mom ** 2)
+    elif fermion == 'Continuum':
+        p_mom = 2 * np.pi / L * mom
+        p_slash = -1j * (p_mom[0] * gamma[0] + p_mom[1] * gamma[1] + p_mom[2] * gamma[2] + p_mom[3] * gamma[3]) / np.sum(p_mom ** 2)
+    elif fermion == 'DWF':
+        W = np.sum(1 - np.cos(2 * np.pi / L * mom))
+        s2 = np.sum(sin_mom ** 2)
+        p_slash = -1j * (sin_mom[0] * gamma[0] + sin_mom[1] * gamma[1] + sin_mom[2] * gamma[2] + sin_mom[3] * gamma[3])
+        p_slash /= 2 * (W - 1 + np.sqrt((1 - W) ** 2 + s2))
+    else:
+        raise Exception("Fermion type '" + fermion + "' not implemented")
+
+    res = 1 / 12. * np.trace(matmul(inv_prop, np.kron(np.eye(3, dtype=int), p_slash)))
+    res.gamma_method()
+
+    if not res.imag.is_zero_within_error(5):
+        warnings.warn("Imaginary part of Zq is not zero within 5 sigma")
+        return res
+    return res.real
+
+ +
+ +

Calculates the quark field renormalization constant Zq

+ +

Attributes: +inv_prop -- Inverted 12x12 quark propagator +fermion -- Fermion type for which the tree-level propagator is used + in the calculation of Zq. Default Wilson.

+
+ + +
+
+ + \ No newline at end of file diff --git a/docs/pyerrors/obs.html b/docs/pyerrors/obs.html new file mode 100644 index 00000000..40754f5c --- /dev/null +++ b/docs/pyerrors/obs.html @@ -0,0 +1,5413 @@ + + + + + + + pyerrors.obs API documentation + + + + + + + + + + + +
+
+

+pyerrors.obs

+ + +
+ View Source +
#!/usr/bin/env python
+# coding: utf-8
+
+import warnings
+import pickle
+import numpy as np
+import autograd.numpy as anp  # Thinly-wrapped numpy
+from autograd import jacobian
+import matplotlib.pyplot as plt
+import numdifftools as nd
+from itertools import groupby
+
+
+class Obs:
+    """Class for a general observable.
+
+    Instances of Obs are the basic objects of a pyerrors error analysis.
+    They are initialized with a list which contains arrays of samples for
+    different ensembles/replica and another list of same length which contains
+    the names of the ensembles/replica. Mathematical operations can be
+    performed on instances. The result is another instance of Obs. The error of
+    an instance can be computed with the gamma_method. Also contains additional
+    methods for output and visualization of the error calculation.
+
+    Attributes
+    ----------
+    S_global : float
+        Standard value for S (default 2.0)
+    S_dict : dict
+        Dictionary for S values. If an entry for a given ensemble
+        exists this overwrites the standard value for that ensemble.
+    tau_exp_global : float
+        Standard value for tau_exp (default 0.0)
+    tau_exp_dict :dict
+        Dictionary for tau_exp values. If an entry for a given ensemble exists
+        this overwrites the standard value for that ensemble.
+    N_sigma_global : float
+        Standard value for N_sigma (default 1.0)
+    """
+    __slots__ = ['names', 'shape', 'r_values', 'deltas', 'N', '_value', '_dvalue',
+                 'ddvalue', 'reweighted', 'S', 'tau_exp', 'N_sigma',
+                 'e_dvalue', 'e_ddvalue', 'e_tauint', 'e_dtauint',
+                 'e_windowsize', 'e_rho', 'e_drho', 'e_n_tauint', 'e_n_dtauint',
+                 'idl', 'is_merged', 'tag', '__dict__']
+
+    S_global = 2.0
+    S_dict = {}
+    tau_exp_global = 0.0
+    tau_exp_dict = {}
+    N_sigma_global = 1.0
+    filter_eps = 1e-10
+
+    def __init__(self, samples, names, idl=None, means=None, **kwargs):
+        """ Initialize Obs object.
+
+        Attributes
+        ----------
+        samples : list
+            list of numpy arrays containing the Monte Carlo samples
+        names : list
+            list of strings labeling the indivdual samples
+        idl : list, optional
+            list of ranges or lists on which the samples are defined
+        means : list, optional
+            list of mean values for the case that the mean values were
+            already subtracted from the samples
+        """
+
+        if means is None:
+            if len(samples) != len(names):
+                raise Exception('Length of samples and names incompatible.')
+            if len(names) != len(set(names)):
+                raise Exception('Names are not unique.')
+            if not all(isinstance(x, str) for x in names):
+                raise TypeError('All names have to be strings.')
+            if min(len(x) for x in samples) <= 4:
+                raise Exception('Samples have to have at least 4 entries.')
+
+        self.names = sorted(names)
+        self.shape = {}
+        self.r_values = {}
+        self.deltas = {}
+
+        self.idl = {}
+        if idl is not None:
+            for name, idx in sorted(zip(names, idl)):
+                if isinstance(idx, range):
+                    self.idl[name] = idx
+                elif isinstance(idx, (list, np.ndarray)):
+                    dc = np.unique(np.diff(idx))
+                    if np.any(dc < 0):
+                        raise Exception("Unsorted idx for idl[%s]" % (name))
+                    if len(dc) == 1:
+                        self.idl[name] = range(idx[0], idx[-1] + dc[0], dc[0])
+                    else:
+                        self.idl[name] = list(idx)
+                else:
+                    raise Exception('incompatible type for idl[%s].' % (name))
+        else:
+            for name, sample in sorted(zip(names, samples)):
+                self.idl[name] = range(1, len(sample) + 1)
+
+        if means is not None:
+            for name, sample, mean in sorted(zip(names, samples, means)):
+                self.shape[name] = len(self.idl[name])
+                if len(sample) != self.shape[name]:
+                    raise Exception('Incompatible samples and idx for %s: %d vs. %d' % (name, len(sample), self.shape[name]))
+                self.r_values[name] = mean
+                self.deltas[name] = sample
+        else:
+            for name, sample in sorted(zip(names, samples)):
+                self.shape[name] = len(self.idl[name])
+                if len(sample) != self.shape[name]:
+                    raise Exception('Incompatible samples and idx for %s: %d vs. %d' % (name, len(sample), self.shape[name]))
+                self.r_values[name] = np.mean(sample)
+                self.deltas[name] = sample - self.r_values[name]
+        self.is_merged = False
+        self.N = sum(list(self.shape.values()))
+
+        self._value = 0
+        if means is None:
+            for name in self.names:
+                self._value += self.shape[name] * self.r_values[name]
+            self._value /= self.N
+
+        self._dvalue = 0.0
+        self.ddvalue = 0.0
+        self.reweighted = False
+
+        self.tag = None
+
+    @property
+    def value(self):
+        return self._value
+
+    @property
+    def dvalue(self):
+        return self._dvalue
+
+    @property
+    def e_names(self):
+        return sorted(set([o.split('|')[0] for o in self.names]))
+
+    @property
+    def e_content(self):
+        res = {}
+        for e, e_name in enumerate(self.e_names):
+            res[e_name] = sorted(filter(lambda x: x.startswith(e_name + '|'), self.names))
+            if e_name in self.names:
+                res[e_name].append(e_name)
+        return res
+
+    def expand_deltas(self, deltas, idx, shape):
+        """Expand deltas defined on idx to a regular, contiguous range, where holes are filled by 0.
+           If idx is of type range, the deltas are not changed
+
+        Parameters
+        ----------
+        deltas  -- List of fluctuations
+        idx     -- List or range of configs on which the deltas are defined.
+        shape   -- Number of configs in idx.
+        """
+        if type(idx) is range:
+            return deltas
+        else:
+            ret = np.zeros(idx[-1] - idx[0] + 1)
+            for i in range(shape):
+                ret[idx[i] - idx[0]] = deltas[i]
+            return ret
+
+    def calc_gamma(self, deltas, idx, shape, w_max, fft):
+        """Calculate Gamma_{AA} from the deltas, which are defined on idx.
+           idx is assumed to be a contiguous range (possibly with a stepsize != 1)
+
+        Parameters
+        ----------
+        deltas  -- List of fluctuations
+        idx     -- List or range of configs on which the deltas are defined.
+        shape   -- Number of configs in idx.
+        w_max   -- Upper bound for the summation window
+        fft     -- boolean, which determines whether the fft algorithm is used for
+                   the computation of the autocorrelation function
+        """
+        gamma = np.zeros(w_max)
+        deltas = self.expand_deltas(deltas, idx, shape)
+        new_shape = len(deltas)
+        if fft:
+            max_gamma = min(new_shape, w_max)
+            # The padding for the fft has to be even
+            padding = new_shape + max_gamma + (new_shape + max_gamma) % 2
+            gamma[:max_gamma] += np.fft.irfft(np.abs(np.fft.rfft(deltas, padding)) ** 2)[:max_gamma]
+        else:
+            for n in range(w_max):
+                if new_shape - n >= 0:
+                    gamma[n] += deltas[0:new_shape - n].dot(deltas[n:new_shape])
+
+        return gamma
+
+    def gamma_method(self, **kwargs):
+        """Calculate the error and related properties of the Obs.
+
+        Keyword arguments
+        -----------------
+        S : float
+            specifies a custom value for the parameter S (default 2.0), can be
+            a float or an array of floats for different ensembles
+        tau_exp : float
+            positive value triggers the critical slowing down analysis
+            (default 0.0), can be a float or an array of floats for different
+            ensembles
+        N_sigma : float
+            number of standard deviations from zero until the tail is
+            attached to the autocorrelation function (default 1)
+        fft : bool
+            determines whether the fft algorithm is used for the computation
+            of the autocorrelation function (default True)
+        """
+
+        e_content = self.e_content
+        self.e_dvalue = {}
+        self.e_ddvalue = {}
+        self.e_tauint = {}
+        self.e_dtauint = {}
+        self.e_windowsize = {}
+        self.e_n_tauint = {}
+        self.e_n_dtauint = {}
+        e_gamma = {}
+        self.e_rho = {}
+        self.e_drho = {}
+        self._dvalue = 0
+        self.ddvalue = 0
+
+        self.S = {}
+        self.tau_exp = {}
+
+        if kwargs.get('fft') is False:
+            fft = False
+        else:
+            fft = True
+
+        if 'S' in kwargs:
+            tmp = kwargs.get('S')
+            if isinstance(tmp, list):
+                if len(tmp) != len(self.e_names):
+                    raise Exception('Length of S array does not match ensembles.')
+                for e, e_name in enumerate(self.e_names):
+                    if tmp[e] <= 0:
+                        raise Exception('S has to be larger than 0.')
+                    self.S[e_name] = tmp[e]
+            else:
+                if isinstance(tmp, (int, float)):
+                    if tmp <= 0:
+                        raise Exception('S has to be larger than 0.')
+                    for e, e_name in enumerate(self.e_names):
+                        self.S[e_name] = tmp
+                else:
+                    raise TypeError('S is not in proper format.')
+        else:
+            for e, e_name in enumerate(self.e_names):
+                if e_name in Obs.S_dict:
+                    self.S[e_name] = Obs.S_dict[e_name]
+                else:
+                    self.S[e_name] = Obs.S_global
+
+        if 'tau_exp' in kwargs:
+            tmp = kwargs.get('tau_exp')
+            if isinstance(tmp, list):
+                if len(tmp) != len(self.e_names):
+                    raise Exception('Length of tau_exp array does not match ensembles.')
+                for e, e_name in enumerate(self.e_names):
+                    if tmp[e] < 0:
+                        raise Exception('tau_exp smaller than 0.')
+                    self.tau_exp[e_name] = tmp[e]
+            else:
+                if isinstance(tmp, (int, float)):
+                    if tmp < 0:
+                        raise Exception('tau_exp smaller than 0.')
+                    for e, e_name in enumerate(self.e_names):
+                        self.tau_exp[e_name] = tmp
+                else:
+                    raise TypeError('tau_exp is not in proper format.')
+        else:
+            for e, e_name in enumerate(self.e_names):
+                if e_name in Obs.tau_exp_dict:
+                    self.tau_exp[e_name] = Obs.tau_exp_dict[e_name]
+                else:
+                    self.tau_exp[e_name] = Obs.tau_exp_global
+
+        if 'N_sigma' in kwargs:
+            self.N_sigma = kwargs.get('N_sigma')
+            if not isinstance(self.N_sigma, (int, float)):
+                raise TypeError('N_sigma is not a number.')
+        else:
+            self.N_sigma = Obs.N_sigma_global
+
+        for e, e_name in enumerate(self.e_names):
+
+            r_length = []
+            for r_name in e_content[e_name]:
+                if self.idl[r_name] is range:
+                    r_length.append(len(self.idl[r_name]))
+                else:
+                    r_length.append((self.idl[r_name][-1] - self.idl[r_name][0] + 1))
+
+            e_N = np.sum([self.shape[r_name] for r_name in e_content[e_name]])
+            w_max = max(r_length) // 2
+            e_gamma[e_name] = np.zeros(w_max)
+            self.e_rho[e_name] = np.zeros(w_max)
+            self.e_drho[e_name] = np.zeros(w_max)
+
+            for r_name in e_content[e_name]:
+                e_gamma[e_name] += self.calc_gamma(self.deltas[r_name], self.idl[r_name], self.shape[r_name], w_max, fft)
+
+            gamma_div = np.zeros(w_max)
+            for r_name in e_content[e_name]:
+                gamma_div += self.calc_gamma(np.ones((self.shape[r_name])), self.idl[r_name], self.shape[r_name], w_max, fft)
+            e_gamma[e_name] /= gamma_div[:w_max]
+
+            if np.abs(e_gamma[e_name][0]) < 10 * np.finfo(float).tiny:  # Prevent division by zero
+                self.e_tauint[e_name] = 0.5
+                self.e_dtauint[e_name] = 0.0
+                self.e_dvalue[e_name] = 0.0
+                self.e_ddvalue[e_name] = 0.0
+                self.e_windowsize[e_name] = 0
+                continue
+
+            self.e_rho[e_name] = e_gamma[e_name][:w_max] / e_gamma[e_name][0]
+            self.e_n_tauint[e_name] = np.cumsum(np.concatenate(([0.5], self.e_rho[e_name][1:])))
+            # Make sure no entry of tauint is smaller than 0.5
+            self.e_n_tauint[e_name][self.e_n_tauint[e_name] <= 0.5] = 0.5 + np.finfo(np.float64).eps
+            # hep-lat/0306017 eq. (42)
+            self.e_n_dtauint[e_name] = self.e_n_tauint[e_name] * 2 * np.sqrt(np.abs(np.arange(w_max) + 0.5 - self.e_n_tauint[e_name]) / e_N)
+            self.e_n_dtauint[e_name][0] = 0.0
+
+            def _compute_drho(i):
+                tmp = self.e_rho[e_name][i + 1:w_max] + np.concatenate([self.e_rho[e_name][i - 1::-1], self.e_rho[e_name][1:w_max - 2 * i]]) - 2 * self.e_rho[e_name][i] * self.e_rho[e_name][1:w_max - i]
+                self.e_drho[e_name][i] = np.sqrt(np.sum(tmp ** 2) / e_N)
+
+            _compute_drho(1)
+            if self.tau_exp[e_name] > 0:
+                texp = self.tau_exp[e_name]
+                # if type(self.idl[e_name]) is range: # scale tau_exp according to step size
+                #    texp /= self.idl[e_name].step
+                # Critical slowing down analysis
+                for n in range(1, w_max // 2):
+                    _compute_drho(n + 1)
+                    if (self.e_rho[e_name][n] - self.N_sigma * self.e_drho[e_name][n]) < 0 or n >= w_max // 2 - 2:
+                        # Bias correction hep-lat/0306017 eq. (49) included
+                        self.e_tauint[e_name] = self.e_n_tauint[e_name][n] * (1 + (2 * n + 1) / e_N) / (1 + 1 / e_N) + texp * np.abs(self.e_rho[e_name][n + 1])  # The absolute makes sure, that the tail contribution is always positive
+                        self.e_dtauint[e_name] = np.sqrt(self.e_n_dtauint[e_name][n] ** 2 + texp ** 2 * self.e_drho[e_name][n + 1] ** 2)
+                        # Error of tau_exp neglected so far, missing term: self.e_rho[e_name][n + 1] ** 2 * d_tau_exp ** 2
+                        self.e_dvalue[e_name] = np.sqrt(2 * self.e_tauint[e_name] * e_gamma[e_name][0] * (1 + 1 / e_N) / e_N)
+                        self.e_ddvalue[e_name] = self.e_dvalue[e_name] * np.sqrt((n + 0.5) / e_N)
+                        self.e_windowsize[e_name] = n
+                        break
+            else:
+                # Standard automatic windowing procedure
+                g_w = self.S[e_name] / np.log((2 * self.e_n_tauint[e_name][1:] + 1) / (2 * self.e_n_tauint[e_name][1:] - 1))
+                g_w = np.exp(- np.arange(1, w_max) / g_w) - g_w / np.sqrt(np.arange(1, w_max) * e_N)
+                for n in range(1, w_max):
+                    if n < w_max // 2 - 2:
+                        _compute_drho(n + 1)
+                    if g_w[n - 1] < 0 or n >= w_max - 1:
+                        self.e_tauint[e_name] = self.e_n_tauint[e_name][n] * (1 + (2 * n + 1) / e_N) / (1 + 1 / e_N)  # Bias correction hep-lat/0306017 eq. (49)
+                        self.e_dtauint[e_name] = self.e_n_dtauint[e_name][n]
+                        self.e_dvalue[e_name] = np.sqrt(2 * self.e_tauint[e_name] * e_gamma[e_name][0] * (1 + 1 / e_N) / e_N)
+                        self.e_ddvalue[e_name] = self.e_dvalue[e_name] * np.sqrt((n + 0.5) / e_N)
+                        self.e_windowsize[e_name] = n
+                        break
+
+            self._dvalue += self.e_dvalue[e_name] ** 2
+            self.ddvalue += (self.e_dvalue[e_name] * self.e_ddvalue[e_name]) ** 2
+
+        self._dvalue = np.sqrt(self.dvalue)
+        if self._dvalue == 0.0:
+            self.ddvalue = 0.0
+        else:
+            self.ddvalue = np.sqrt(self.ddvalue) / self.dvalue
+        return
+
+    def print(self, level=1):
+        warnings.warn("Method 'print' renamed to 'details'", DeprecationWarning)
+        self.details(level > 1)
+
+    def details(self, ens_content=True):
+        """Output detailed properties of the Obs."""
+        if self.value == 0.0:
+            percentage = np.nan
+        else:
+            percentage = np.abs(self.dvalue / self.value) * 100
+        print('Result\t %3.8e +/- %3.8e +/- %3.8e (%3.3f%%)' % (self.value, self.dvalue, self.ddvalue, percentage))
+        if hasattr(self, 'e_dvalue'):
+            if len(self.e_names) > 1:
+                print(' Ensemble errors:')
+            for e_name in self.e_names:
+                if len(self.e_names) > 1:
+                    print('', e_name, '\t %3.8e +/- %3.8e' % (self.e_dvalue[e_name], self.e_ddvalue[e_name]))
+                if self.tau_exp[e_name] > 0:
+                    print(' t_int\t %3.8e +/- %3.8e tau_exp = %3.2f,  N_sigma = %1.0i' % (self.e_tauint[e_name], self.e_dtauint[e_name], self.tau_exp[e_name], self.N_sigma))
+                else:
+                    print(' t_int\t %3.8e +/- %3.8e S = %3.2f' % (self.e_tauint[e_name], self.e_dtauint[e_name], self.S[e_name]))
+        if self.tag is not None:
+            print("Description:", self.tag)
+        if ens_content is True:
+            if len(self.e_names) == 1:
+                print(self.N, 'samples in', len(self.e_names), 'ensemble:')
+            else:
+                print(self.N, 'samples in', len(self.e_names), 'ensembles:')
+            m = max(map(len, list(self.e_content.keys()))) + 1
+            print('\n'.join(['  ' + key.rjust(m) + ': ' + str(value) for key, value in sorted(self.e_content.items())]))
+
+    def is_zero_within_error(self, sigma=1):
+        """Checks whether the observable is zero within 'sigma' standard errors.
+
+        Works only properly when the gamma method was run.
+        """
+        return self.is_zero() or np.abs(self.value) <= sigma * self.dvalue
+
+    def is_zero(self):
+        """Checks whether the observable is zero within machine precision."""
+        return np.isclose(0.0, self.value) and all(np.allclose(0.0, delta) for delta in self.deltas.values())
+
+    def plot_tauint(self, save=None):
+        """Plot integrated autocorrelation time for each ensemble."""
+        if not hasattr(self, 'e_names'):
+            raise Exception('Run the gamma method first.')
+
+        fig = plt.figure()
+        for e, e_name in enumerate(self.e_names):
+            plt.xlabel(r'$W$')
+            plt.ylabel(r'$\tau_\mathrm{int}$')
+            length = int(len(self.e_n_tauint[e_name]))
+            if self.tau_exp[e_name] > 0:
+                base = self.e_n_tauint[e_name][self.e_windowsize[e_name]]
+                x_help = np.arange(2 * self.tau_exp[e_name])
+                y_help = (x_help + 1) * np.abs(self.e_rho[e_name][self.e_windowsize[e_name] + 1]) * (1 - x_help / (2 * (2 * self.tau_exp[e_name] - 1))) + base
+                x_arr = np.arange(self.e_windowsize[e_name] + 1, self.e_windowsize[e_name] + 1 + 2 * self.tau_exp[e_name])
+                plt.plot(x_arr, y_help, 'C' + str(e), linewidth=1, ls='--', marker=',')
+                plt.errorbar([self.e_windowsize[e_name] + 2 * self.tau_exp[e_name]], [self.e_tauint[e_name]],
+                             yerr=[self.e_dtauint[e_name]], fmt='C' + str(e), linewidth=1, capsize=2, marker='o', mfc=plt.rcParams['axes.facecolor'])
+                xmax = self.e_windowsize[e_name] + 2 * self.tau_exp[e_name] + 1.5
+                label = e_name + r', $\tau_\mathrm{exp}$=' + str(np.around(self.tau_exp[e_name], decimals=2))
+            else:
+                label = e_name + ', S=' + str(np.around(self.S[e_name], decimals=2))
+                xmax = max(10.5, 2 * self.e_windowsize[e_name] - 0.5)
+
+            plt.errorbar(np.arange(length), self.e_n_tauint[e_name][:], yerr=self.e_n_dtauint[e_name][:], linewidth=1, capsize=2, label=label)
+            plt.axvline(x=self.e_windowsize[e_name], color='C' + str(e), alpha=0.5, marker=',', ls='--')
+            plt.legend()
+            plt.xlim(-0.5, xmax)
+            plt.ylim(bottom=0.0)
+            plt.draw()
+            if save:
+                fig.savefig(save)
+
+    def plot_rho(self):
+        """Plot normalized autocorrelation function time for each ensemble."""
+        if not hasattr(self, 'e_names'):
+            raise Exception('Run the gamma method first.')
+        for e, e_name in enumerate(self.e_names):
+            plt.xlabel('W')
+            plt.ylabel('rho')
+            length = int(len(self.e_drho[e_name]))
+            plt.errorbar(np.arange(length), self.e_rho[e_name][:length], yerr=self.e_drho[e_name][:], linewidth=1, capsize=2)
+            plt.axvline(x=self.e_windowsize[e_name], color='r', alpha=0.25, ls='--', marker=',')
+            if self.tau_exp[e_name] > 0:
+                plt.plot([self.e_windowsize[e_name] + 1, self.e_windowsize[e_name] + 1 + 2 * self.tau_exp[e_name]],
+                         [self.e_rho[e_name][self.e_windowsize[e_name] + 1], 0], 'k-', lw=1)
+                xmax = self.e_windowsize[e_name] + 2 * self.tau_exp[e_name] + 1.5
+                plt.title('Rho ' + e_name + r', tau\_exp=' + str(np.around(self.tau_exp[e_name], decimals=2)))
+            else:
+                xmax = max(10.5, 2 * self.e_windowsize[e_name] - 0.5)
+                plt.title('Rho ' + e_name + ', S=' + str(np.around(self.S[e_name], decimals=2)))
+            plt.plot([-0.5, xmax], [0, 0], 'k--', lw=1)
+            plt.xlim(-0.5, xmax)
+            plt.draw()
+
+    def plot_rep_dist(self):
+        """Plot replica distribution for each ensemble with more than one replicum."""
+        if not hasattr(self, 'e_names'):
+            raise Exception('Run the gamma method first.')
+        for e, e_name in enumerate(self.e_names):
+            if len(self.e_content[e_name]) == 1:
+                print('No replica distribution for a single replicum (', e_name, ')')
+                continue
+            r_length = []
+            sub_r_mean = 0
+            for r, r_name in enumerate(self.e_content[e_name]):
+                r_length.append(len(self.deltas[r_name]))
+                sub_r_mean += self.shape[r_name] * self.r_values[r_name]
+            e_N = np.sum(r_length)
+            sub_r_mean /= e_N
+            arr = np.zeros(len(self.e_content[e_name]))
+            for r, r_name in enumerate(self.e_content[e_name]):
+                arr[r] = (self.r_values[r_name] - sub_r_mean) / (self.e_dvalue[e_name] * np.sqrt(e_N / self.shape[r_name] - 1))
+            plt.hist(arr, rwidth=0.8, bins=len(self.e_content[e_name]))
+            plt.title('Replica distribution' + e_name + ' (mean=0, var=1)')
+            plt.draw()
+
+    def plot_history(self, expand=True):
+        """Plot derived Monte Carlo history for each ensemble."""
+        if not hasattr(self, 'e_names'):
+            raise Exception('Run the gamma method first.')
+
+        for e, e_name in enumerate(self.e_names):
+            plt.figure()
+            r_length = []
+            tmp = []
+            for r, r_name in enumerate(self.e_content[e_name]):
+                if expand:
+                    tmp.append(self.expand_deltas(self.deltas[r_name], self.idl[r_name], self.shape[r_name]) + self.r_values[r_name])
+                else:
+                    tmp.append(self.deltas[r_name] + self.r_values[r_name])
+                r_length.append(len(tmp[-1]))
+            e_N = np.sum(r_length)
+            x = np.arange(e_N)
+            y = np.concatenate(tmp, axis=0)
+            plt.errorbar(x, y, fmt='.', markersize=3)
+            plt.xlim(-0.5, e_N - 0.5)
+            plt.title(e_name)
+            plt.draw()
+
+    def plot_piechart(self):
+        """Plot piechart which shows the fractional contribution of each
+        ensemble to the error and returns a dictionary containing the fractions."""
+        if not hasattr(self, 'e_names'):
+            raise Exception('Run the gamma method first.')
+        if self.dvalue == 0.0:
+            raise Exception('Error is 0.0')
+        labels = self.e_names
+        sizes = [i ** 2 for i in list(self.e_dvalue.values())] / self.dvalue ** 2
+        fig1, ax1 = plt.subplots()
+        ax1.pie(sizes, labels=labels, startangle=90, normalize=True)
+        ax1.axis('equal')
+        plt.draw()
+
+        return dict(zip(self.e_names, sizes))
+
+    def dump(self, name, **kwargs):
+        """Dump the Obs to a pickle file 'name'.
+
+        Keyword arguments
+        -----------------
+        path -- specifies a custom path for the file (default '.')
+        """
+        if 'path' in kwargs:
+            file_name = kwargs.get('path') + '/' + name + '.p'
+        else:
+            file_name = name + '.p'
+        with open(file_name, 'wb') as fb:
+            pickle.dump(self, fb)
+
+    def __float__(self):
+        return float(self.value)
+
+    def __repr__(self):
+        return 'Obs[' + str(self) + ']'
+
+    def __str__(self):
+        if self.dvalue == 0.0:
+            return str(self.value)
+        fexp = np.floor(np.log10(self.dvalue))
+        if fexp < 0.0:
+            return '{:{form}}({:2.0f})'.format(self.value, self.dvalue * 10 ** (-fexp + 1), form='.' + str(-int(fexp) + 1) + 'f')
+        elif fexp == 0.0:
+            return '{:.1f}({:1.1f})'.format(self.value, self.dvalue)
+        else:
+            return '{:.0f}({:2.0f})'.format(self.value, self.dvalue)
+
+    # Overload comparisons
+    def __lt__(self, other):
+        return self.value < other
+
+    def __le__(self, other):
+        return self.value <= other
+
+    def __gt__(self, other):
+        return self.value > other
+
+    def __ge__(self, other):
+        return self.value >= other
+
+    def __eq__(self, other):
+        return (self - other).is_zero()
+
+    def __ne__(self, other):
+        return not (self - other).is_zero()
+
+    # Overload math operations
+    def __add__(self, y):
+        if isinstance(y, Obs):
+            return derived_observable(lambda x, **kwargs: x[0] + x[1], [self, y], man_grad=[1, 1])
+        else:
+            if isinstance(y, np.ndarray):
+                return np.array([self + o for o in y])
+            elif y.__class__.__name__ == 'Corr':
+                return NotImplemented
+            else:
+                return derived_observable(lambda x, **kwargs: x[0] + y, [self], man_grad=[1])
+
+    def __radd__(self, y):
+        return self + y
+
+    def __mul__(self, y):
+        if isinstance(y, Obs):
+            return derived_observable(lambda x, **kwargs: x[0] * x[1], [self, y], man_grad=[y.value, self.value])
+        else:
+            if isinstance(y, np.ndarray):
+                return np.array([self * o for o in y])
+            elif isinstance(y, complex):
+                return CObs(self * y.real, self * y.imag)
+            elif y.__class__.__name__ == 'Corr':
+                return NotImplemented
+            else:
+                return derived_observable(lambda x, **kwargs: x[0] * y, [self], man_grad=[y])
+
+    def __rmul__(self, y):
+        return self * y
+
+    def __sub__(self, y):
+        if isinstance(y, Obs):
+            return derived_observable(lambda x, **kwargs: x[0] - x[1], [self, y], man_grad=[1, -1])
+        else:
+            if isinstance(y, np.ndarray):
+                return np.array([self - o for o in y])
+
+            elif y.__class__.__name__ == 'Corr':
+                return NotImplemented
+
+            else:
+                return derived_observable(lambda x, **kwargs: x[0] - y, [self], man_grad=[1])
+
+    def __rsub__(self, y):
+        return -1 * (self - y)
+
+    def __neg__(self):
+        return -1 * self
+
+    def __truediv__(self, y):
+        if isinstance(y, Obs):
+            return derived_observable(lambda x, **kwargs: x[0] / x[1], [self, y], man_grad=[1 / y.value, - self.value / y.value ** 2])
+        else:
+            if isinstance(y, np.ndarray):
+                return np.array([self / o for o in y])
+            elif y.__class__.__name__ == 'Corr':
+                return NotImplemented
+            else:
+                return derived_observable(lambda x, **kwargs: x[0] / y, [self], man_grad=[1 / y])
+
+    def __rtruediv__(self, y):
+        if isinstance(y, Obs):
+            return derived_observable(lambda x, **kwargs: x[0] / x[1], [y, self], man_grad=[1 / self.value, - y.value / self.value ** 2])
+        else:
+            if isinstance(y, np.ndarray):
+                return np.array([o / self for o in y])
+            elif y.__class__.__name__ == 'Corr':
+                return NotImplemented
+            else:
+                return derived_observable(lambda x, **kwargs: y / x[0], [self], man_grad=[-y / self.value ** 2])
+
+    def __pow__(self, y):
+        if isinstance(y, Obs):
+            return derived_observable(lambda x: x[0] ** x[1], [self, y])
+        else:
+            return derived_observable(lambda x: x[0] ** y, [self])
+
+    def __rpow__(self, y):
+        if isinstance(y, Obs):
+            return derived_observable(lambda x: x[0] ** x[1], [y, self])
+        else:
+            return derived_observable(lambda x: y ** x[0], [self])
+
+    def __abs__(self):
+        return derived_observable(lambda x: anp.abs(x[0]), [self])
+
+    # Overload numpy functions
+    def sqrt(self):
+        return derived_observable(lambda x, **kwargs: np.sqrt(x[0]), [self], man_grad=[1 / 2 / np.sqrt(self.value)])
+
+    def log(self):
+        return derived_observable(lambda x, **kwargs: np.log(x[0]), [self], man_grad=[1 / self.value])
+
+    def exp(self):
+        return derived_observable(lambda x, **kwargs: np.exp(x[0]), [self], man_grad=[np.exp(self.value)])
+
+    def sin(self):
+        return derived_observable(lambda x, **kwargs: np.sin(x[0]), [self], man_grad=[np.cos(self.value)])
+
+    def cos(self):
+        return derived_observable(lambda x, **kwargs: np.cos(x[0]), [self], man_grad=[-np.sin(self.value)])
+
+    def tan(self):
+        return derived_observable(lambda x, **kwargs: np.tan(x[0]), [self], man_grad=[1 / np.cos(self.value) ** 2])
+
+    def arcsin(self):
+        return derived_observable(lambda x: anp.arcsin(x[0]), [self])
+
+    def arccos(self):
+        return derived_observable(lambda x: anp.arccos(x[0]), [self])
+
+    def arctan(self):
+        return derived_observable(lambda x: anp.arctan(x[0]), [self])
+
+    def sinh(self):
+        return derived_observable(lambda x, **kwargs: np.sinh(x[0]), [self], man_grad=[np.cosh(self.value)])
+
+    def cosh(self):
+        return derived_observable(lambda x, **kwargs: np.cosh(x[0]), [self], man_grad=[np.sinh(self.value)])
+
+    def tanh(self):
+        return derived_observable(lambda x, **kwargs: np.tanh(x[0]), [self], man_grad=[1 / np.cosh(self.value) ** 2])
+
+    def arcsinh(self):
+        return derived_observable(lambda x: anp.arcsinh(x[0]), [self])
+
+    def arccosh(self):
+        return derived_observable(lambda x: anp.arccosh(x[0]), [self])
+
+    def arctanh(self):
+        return derived_observable(lambda x: anp.arctanh(x[0]), [self])
+
+    def sinc(self):
+        return derived_observable(lambda x: anp.sinc(x[0]), [self])
+
+
+class CObs:
+    """Class for a complex valued observable."""
+    __slots__ = ['_real', '_imag', 'tag']
+
+    def __init__(self, real, imag=0.0):
+        self._real = real
+        self._imag = imag
+        self.tag = None
+
+    @property
+    def real(self):
+        return self._real
+
+    @property
+    def imag(self):
+        return self._imag
+
+    def gamma_method(self, **kwargs):
+        """Executes the gamma_method for the real and the imaginary part."""
+        if isinstance(self.real, Obs):
+            self.real.gamma_method(**kwargs)
+        if isinstance(self.imag, Obs):
+            self.imag.gamma_method(**kwargs)
+
+    def is_zero(self):
+        """Checks whether both real and imaginary part are zero within machine precision."""
+        return self.real == 0.0 and self.imag == 0.0
+
+    def conjugate(self):
+        return CObs(self.real, -self.imag)
+
+    def __add__(self, other):
+        if isinstance(other, np.ndarray):
+            return other + self
+        elif hasattr(other, 'real') and hasattr(other, 'imag'):
+            return CObs(self.real + other.real,
+                        self.imag + other.imag)
+        else:
+            return CObs(self.real + other, self.imag)
+
+    def __radd__(self, y):
+        return self + y
+
+    def __sub__(self, other):
+        if isinstance(other, np.ndarray):
+            return -1 * (other - self)
+        elif hasattr(other, 'real') and hasattr(other, 'imag'):
+            return CObs(self.real - other.real, self.imag - other.imag)
+        else:
+            return CObs(self.real - other, self.imag)
+
+    def __rsub__(self, other):
+        return -1 * (self - other)
+
+    def __mul__(self, other):
+        if isinstance(other, np.ndarray):
+            return other * self
+        elif hasattr(other, 'real') and hasattr(other, 'imag'):
+            if all(isinstance(i, Obs) for i in [self.real, self.imag, other.real, other.imag]):
+                return CObs(derived_observable(lambda x, **kwargs: x[0] * x[1] - x[2] * x[3],
+                                               [self.real, other.real, self.imag, other.imag],
+                                               man_grad=[other.real.value, self.real.value, -other.imag.value, -self.imag.value]),
+                            derived_observable(lambda x, **kwargs: x[2] * x[1] + x[0] * x[3],
+                                               [self.real, other.real, self.imag, other.imag],
+                                               man_grad=[other.imag.value, self.imag.value, other.real.value, self.real.value]))
+            elif getattr(other, 'imag', 0) != 0:
+                return CObs(self.real * other.real - self.imag * other.imag,
+                            self.imag * other.real + self.real * other.imag)
+            else:
+                return CObs(self.real * other.real, self.imag * other.real)
+        else:
+            return CObs(self.real * other, self.imag * other)
+
+    def __rmul__(self, other):
+        return self * other
+
+    def __truediv__(self, other):
+        if isinstance(other, np.ndarray):
+            return 1 / (other / self)
+        elif hasattr(other, 'real') and hasattr(other, 'imag'):
+            r = other.real ** 2 + other.imag ** 2
+            return CObs((self.real * other.real + self.imag * other.imag) / r, (self.imag * other.real - self.real * other.imag) / r)
+        else:
+            return CObs(self.real / other, self.imag / other)
+
+    def __rtruediv__(self, other):
+        r = self.real ** 2 + self.imag ** 2
+        if hasattr(other, 'real') and hasattr(other, 'imag'):
+            return CObs((self.real * other.real + self.imag * other.imag) / r, (self.real * other.imag - self.imag * other.real) / r)
+        else:
+            return CObs(self.real * other / r, -self.imag * other / r)
+
+    def __abs__(self):
+        return np.sqrt(self.real**2 + self.imag**2)
+
+    def __neg__(other):
+        return -1 * other
+
+    def __eq__(self, other):
+        return self.real == other.real and self.imag == other.imag
+
+    def __str__(self):
+        return '(' + str(self.real) + int(self.imag >= 0.0) * '+' + str(self.imag) + 'j)'
+
+    def __repr__(self):
+        return 'CObs[' + str(self) + ']'
+
+
+def merge_idx(idl):
+    """Returns the union of all lists in idl
+
+    Parameters
+    ----------
+    idl  -- List of lists or ranges.
+    """
+
+    # Use groupby to efficiently check whether all elements of idl are identical
+    try:
+        g = groupby(idl)
+        if next(g, True) and not next(g, False):
+            return idl[0]
+    except:
+        pass
+
+    if np.all([type(idx) is range for idx in idl]):
+        if len(set([idx[0] for idx in idl])) == 1:
+            idstart = min([idx.start for idx in idl])
+            idstop = max([idx.stop for idx in idl])
+            idstep = min([idx.step for idx in idl])
+            return range(idstart, idstop, idstep)
+
+    return list(set().union(*idl))
+
+
+def expand_deltas_for_merge(deltas, idx, shape, new_idx):
+    """Expand deltas defined on idx to the list of configs that is defined by new_idx.
+       New, empy entries are filled by 0. If idx and new_idx are of type range, the smallest
+       common divisor of the step sizes is used as new step size.
+
+    Parameters
+    ----------
+    deltas : list
+        List of fluctuations
+    idx : list
+        List or range of configs on which the deltas are defined.
+        Has to be a subset of new_idx.
+    shape : list
+        Number of configs in idx.
+    new_idx : list
+        List of configs that defines the new range.
+    """
+
+    if type(idx) is range and type(new_idx) is range:
+        if idx == new_idx:
+            return deltas
+    ret = np.zeros(new_idx[-1] - new_idx[0] + 1)
+    for i in range(shape):
+        ret[idx[i] - new_idx[0]] = deltas[i]
+    return np.array([ret[new_idx[i] - new_idx[0]] for i in range(len(new_idx))])
+
+
+def filter_zeroes(names, deltas, idl, eps=Obs.filter_eps):
+    """Filter out all configurations with vanishing fluctuation such that they do not
+       contribute to the error estimate anymore. Returns the new names, deltas and
+       idl according to the filtering.
+       A fluctuation is considered to be vanishing, if it is smaller than eps times
+       the mean of the absolute values of all deltas in one list.
+
+    Parameters
+    ----------
+    names  -- List of names
+    deltas -- Dict lists of fluctuations
+    idx    -- Dict of lists or ranges of configs on which the deltas are defined.
+               Has to be a subset of new_idx.
+
+    Optional parameters
+    ----------
+    eps    -- Prefactor that enters the filter criterion.
+    """
+    new_names = []
+    new_deltas = {}
+    new_idl = {}
+    for name in names:
+        nd = []
+        ni = []
+        maxd = np.mean(np.fabs(deltas[name]))
+        for i in range(len(deltas[name])):
+            if not np.isclose(0.0, deltas[name][i], atol=eps * maxd):
+                nd.append(deltas[name][i])
+                ni.append(idl[name][i])
+        if nd:
+            new_names.append(name)
+            new_deltas[name] = np.array(nd)
+            new_idl[name] = ni
+    return (new_names, new_deltas, new_idl)
+
+
+def derived_observable(func, data, **kwargs):
+    """Construct a derived Obs according to func(data, **kwargs) using automatic differentiation.
+
+    Parameters
+    ----------
+    func : object
+        arbitrary function of the form func(data, **kwargs). For the
+        automatic differentiation to work, all numpy functions have to have
+        the autograd wrapper (use 'import autograd.numpy as anp').
+    data : list
+        list of Obs, e.g. [obs1, obs2, obs3].
+
+    Keyword arguments
+    -----------------
+    num_grad : bool
+        if True, numerical derivatives are used instead of autograd
+        (default False). To control the numerical differentiation the
+        kwargs of numdifftools.step_generators.MaxStepGenerator
+        can be used.
+    man_grad : list
+        manually supply a list or an array which contains the jacobian
+        of func. Use cautiously, supplying the wrong derivative will
+        not be intercepted.
+
+    Notes
+    -----
+    For simple mathematical operations it can be practical to use anonymous
+    functions. For the ratio of two observables one can e.g. use
+
+    new_obs = derived_observable(lambda x: x[0] / x[1], [obs1, obs2])
+    """
+
+    data = np.asarray(data)
+    raveled_data = data.ravel()
+
+    # Workaround for matrix operations containing non Obs data
+    for i_data in raveled_data:
+        if isinstance(i_data, Obs):
+            first_name = i_data.names[0]
+            first_shape = i_data.shape[first_name]
+            first_idl = i_data.idl[first_name]
+            break
+
+    for i in range(len(raveled_data)):
+        if isinstance(raveled_data[i], (int, float)):
+            raveled_data[i] = Obs([raveled_data[i] + np.zeros(first_shape)], [first_name], idl=[first_idl])
+
+    n_obs = len(raveled_data)
+    new_names = sorted(set([y for x in [o.names for o in raveled_data] for y in x]))
+
+    is_merged = len(list(filter(lambda o: o.is_merged is True, raveled_data))) > 0
+    reweighted = len(list(filter(lambda o: o.reweighted is True, raveled_data))) > 0
+    new_idl_d = {}
+    for name in new_names:
+        idl = []
+        for i_data in raveled_data:
+            tmp = i_data.idl.get(name)
+            if tmp is not None:
+                idl.append(tmp)
+        new_idl_d[name] = merge_idx(idl)
+        if not is_merged:
+            is_merged = (1 != len(set([len(idx) for idx in [*idl, new_idl_d[name]]])))
+
+    if data.ndim == 1:
+        values = np.array([o.value for o in data])
+    else:
+        values = np.vectorize(lambda x: x.value)(data)
+
+    new_values = func(values, **kwargs)
+
+    multi = 0
+    if isinstance(new_values, np.ndarray):
+        multi = 1
+
+    new_r_values = {}
+    for name in new_names:
+        tmp_values = np.zeros(n_obs)
+        for i, item in enumerate(raveled_data):
+            tmp = item.r_values.get(name)
+            if tmp is None:
+                tmp = item.value
+            tmp_values[i] = tmp
+        if multi > 0:
+            tmp_values = np.array(tmp_values).reshape(data.shape)
+        new_r_values[name] = func(tmp_values, **kwargs)
+
+    if 'man_grad' in kwargs:
+        deriv = np.asarray(kwargs.get('man_grad'))
+        if new_values.shape + data.shape != deriv.shape:
+            raise Exception('Manual derivative does not have correct shape.')
+    elif kwargs.get('num_grad') is True:
+        if multi > 0:
+            raise Exception('Multi mode currently not supported for numerical derivative')
+        options = {
+            'base_step': 0.1,
+            'step_ratio': 2.5,
+            'num_steps': None,
+            'step_nom': None,
+            'offset': None,
+            'num_extrap': None,
+            'use_exact_steps': None,
+            'check_num_steps': None,
+            'scale': None}
+        for key in options.keys():
+            kwarg = kwargs.get(key)
+            if kwarg is not None:
+                options[key] = kwarg
+        tmp_df = nd.Gradient(func, order=4, **{k: v for k, v in options.items() if v is not None})(values, **kwargs)
+        if tmp_df.size == 1:
+            deriv = np.array([tmp_df.real])
+        else:
+            deriv = tmp_df.real
+    else:
+        deriv = jacobian(func)(values, **kwargs)
+
+    final_result = np.zeros(new_values.shape, dtype=object)
+
+    for i_val, new_val in np.ndenumerate(new_values):
+        new_deltas = {}
+        for j_obs, obs in np.ndenumerate(data):
+            for name in obs.names:
+                new_deltas[name] = new_deltas.get(name, 0) + deriv[i_val + j_obs] * expand_deltas_for_merge(obs.deltas[name], obs.idl[name], obs.shape[name], new_idl_d[name])
+
+        new_samples = []
+        new_means = []
+        new_idl = []
+        if is_merged:
+            filtered_names, filtered_deltas, filtered_idl_d = filter_zeroes(new_names, new_deltas, new_idl_d)
+        else:
+            filtered_names = new_names
+            filtered_deltas = new_deltas
+            filtered_idl_d = new_idl_d
+        for name in filtered_names:
+            new_samples.append(filtered_deltas[name])
+            new_means.append(new_r_values[name][i_val])
+            new_idl.append(filtered_idl_d[name])
+        final_result[i_val] = Obs(new_samples, filtered_names, means=new_means, idl=new_idl)
+        final_result[i_val]._value = new_val
+        final_result[i_val].is_merged = is_merged
+        final_result[i_val].reweighted = reweighted
+
+    if multi == 0:
+        final_result = final_result.item()
+
+    return final_result
+
+
+def reduce_deltas(deltas, idx_old, idx_new):
+    """Extract deltas defined on idx_old on all configs of idx_new.
+
+    Parameters
+    ----------
+    deltas  -- List of fluctuations
+    idx_old -- List or range of configs on which the deltas are defined
+    idx_new -- List of configs for which we want to extract the deltas.
+               Has to be a subset of idx_old.
+    """
+    if not len(deltas) == len(idx_old):
+        raise Exception('Lenght of deltas and idx_old have to be the same: %d != %d' % (len(deltas), len(idx_old)))
+    if type(idx_old) is range and type(idx_new) is range:
+        if idx_old == idx_new:
+            return deltas
+    shape = len(idx_new)
+    ret = np.zeros(shape)
+    oldpos = 0
+    for i in range(shape):
+        if oldpos == idx_old[i]:
+            raise Exception('idx_old and idx_new do not match!')
+        pos = -1
+        for j in range(oldpos, len(idx_old)):
+            if idx_old[j] == idx_new[i]:
+                pos = j
+                break
+        if pos < 0:
+            raise Exception('Error in reduce_deltas: Config %d not in idx_old' % (idx_new[i]))
+        ret[i] = deltas[j]
+    return np.array(ret)
+
+
+def reweight(weight, obs, **kwargs):
+    """Reweight a list of observables.
+
+    Parameters
+    ----------
+    weight : Obs
+        Reweighting factor. An Observable that has to be defined on a superset of the
+        configurations in obs[i].idl for all i.
+    obs : list
+        list of Obs, e.g. [obs1, obs2, obs3].
+
+    Keyword arguments
+    -----------------
+    all_configs : bool
+        if True, the reweighted observables are normalized by the average of
+        the reweighting factor on all configurations in weight.idl and not
+        on the configurations in obs[i].idl.
+    """
+    result = []
+    for i in range(len(obs)):
+        if sorted(weight.names) != sorted(obs[i].names):
+            raise Exception('Error: Ensembles do not fit')
+        for name in weight.names:
+            if not set(obs[i].idl[name]).issubset(weight.idl[name]):
+                raise Exception('obs[%d] has to be defined on a subset of the configs in weight.idl[%s]!' % (i, name))
+        new_samples = []
+        w_deltas = {}
+        for name in sorted(weight.names):
+            w_deltas[name] = reduce_deltas(weight.deltas[name], weight.idl[name], obs[i].idl[name])
+            new_samples.append((w_deltas[name] + weight.r_values[name]) * (obs[i].deltas[name] + obs[i].r_values[name]))
+        tmp_obs = Obs(new_samples, sorted(weight.names), idl=[obs[i].idl[name] for name in sorted(weight.names)])
+
+        if kwargs.get('all_configs'):
+            new_weight = weight
+        else:
+            new_weight = Obs([w_deltas[name] + weight.r_values[name] for name in sorted(weight.names)], sorted(weight.names), idl=[obs[i].idl[name] for name in sorted(weight.names)])
+
+        result.append(derived_observable(lambda x, **kwargs: x[0] / x[1], [tmp_obs, new_weight], **kwargs))
+        result[-1].reweighted = True
+        result[-1].is_merged = obs[i].is_merged
+
+    return result
+
+
+def correlate(obs_a, obs_b):
+    """Correlate two observables.
+
+    Attributes:
+    -----------
+    obs_a : Obs
+        First observable
+    obs_b : Obs
+        Second observable
+
+    Keep in mind to only correlate primary observables which have not been reweighted
+    yet. The reweighting has to be applied after correlating the observables.
+    Currently only works if ensembles are identical. This is not really necessary.
+    """
+
+    if sorted(obs_a.names) != sorted(obs_b.names):
+        raise Exception('Ensembles do not fit')
+    for name in obs_a.names:
+        if obs_a.shape[name] != obs_b.shape[name]:
+            raise Exception('Shapes of ensemble', name, 'do not fit')
+        if obs_a.idl[name] != obs_b.idl[name]:
+            raise Exception('idl of ensemble', name, 'do not fit')
+
+    if obs_a.reweighted is True:
+        warnings.warn("The first observable is already reweighted.", RuntimeWarning)
+    if obs_b.reweighted is True:
+        warnings.warn("The second observable is already reweighted.", RuntimeWarning)
+
+    new_samples = []
+    new_idl = []
+    for name in sorted(obs_a.names):
+        new_samples.append((obs_a.deltas[name] + obs_a.r_values[name]) * (obs_b.deltas[name] + obs_b.r_values[name]))
+        new_idl.append(obs_a.idl[name])
+
+    o = Obs(new_samples, sorted(obs_a.names), idl=new_idl)
+    o.is_merged = obs_a.is_merged or obs_b.is_merged
+    o.reweighted = obs_a.reweighted or obs_b.reweighted
+    return o
+
+
+def covariance(obs1, obs2, correlation=False, **kwargs):
+    """Calculates the covariance of two observables.
+
+    covariance(obs, obs) is equal to obs.dvalue ** 2
+    The gamma method has to be applied first to both observables.
+
+    If abs(covariance(obs1, obs2)) > obs1.dvalue * obs2.dvalue, the covariance
+    is constrained to the maximum value in order to make sure that covariance
+    matrices are positive semidefinite.
+
+    Keyword arguments
+    -----------------
+    correlation -- if true the correlation instead of the covariance is
+                   returned (default False)
+    """
+
+    for name in sorted(set(obs1.names + obs2.names)):
+        if (obs1.shape.get(name) != obs2.shape.get(name)) and (obs1.shape.get(name) is not None) and (obs2.shape.get(name) is not None):
+            raise Exception('Shapes of ensemble', name, 'do not fit')
+        if (1 != len(set([len(idx) for idx in [obs1.idl[name], obs2.idl[name], merge_idx([obs1.idl[name], obs2.idl[name]])]]))):
+            raise Exception('Shapes of ensemble', name, 'do not fit')
+
+    if not hasattr(obs1, 'e_names') or not hasattr(obs2, 'e_names'):
+        raise Exception('The gamma method has to be applied to both Obs first.')
+
+    dvalue = 0
+
+    for e_name in obs1.e_names:
+
+        if e_name not in obs2.e_names:
+            continue
+
+        gamma = 0
+        r_length = []
+        for r_name in obs1.e_content[e_name]:
+            if r_name not in obs2.e_content[e_name]:
+                continue
+
+            r_length.append(len(obs1.deltas[r_name]))
+
+            gamma += np.sum(obs1.deltas[r_name] * obs2.deltas[r_name])
+
+        e_N = np.sum(r_length)
+
+        tau_combined = (obs1.e_tauint[e_name] + obs2.e_tauint[e_name]) / 2
+        dvalue += gamma / e_N * (1 + 1 / e_N) / e_N * 2 * tau_combined
+
+    if np.abs(dvalue / obs1.dvalue / obs2.dvalue) > 1.0:
+        dvalue = np.sign(dvalue) * obs1.dvalue * obs2.dvalue
+
+    if correlation:
+        dvalue = dvalue / obs1.dvalue / obs2.dvalue
+
+    return dvalue
+
+
+def covariance2(obs1, obs2, correlation=False, **kwargs):
+    """Alternative implementation of the covariance of two observables.
+
+    covariance(obs, obs) is equal to obs.dvalue ** 2
+    The gamma method has to be applied first to both observables.
+
+    If abs(covariance(obs1, obs2)) > obs1.dvalue * obs2.dvalue, the covariance
+    is constrained to the maximum value in order to make sure that covariance
+    matrices are positive semidefinite.
+
+    Keyword arguments
+    -----------------
+    correlation -- if true the correlation instead of the covariance is
+                   returned (default False)
+    """
+
+    def expand_deltas(deltas, idx, shape, new_idx):
+        """Expand deltas defined on idx to a contiguous range [new_idx[0], new_idx[-1]].
+           New, empy entries are filled by 0. If idx and new_idx are of type range, the smallest
+           common divisor of the step sizes is used as new step size.
+
+        Parameters
+        ----------
+        deltas  -- List of fluctuations
+        idx     -- List or range of configs on which the deltas are defined.
+                   Has to be a subset of new_idx.
+        shape   -- Number of configs in idx.
+        new_idx -- List of configs that defines the new range.
+        """
+
+        if type(idx) is range and type(new_idx) is range:
+            if idx == new_idx:
+                return deltas
+        ret = np.zeros(new_idx[-1] - new_idx[0] + 1)
+        for i in range(shape):
+            ret[idx[i] - new_idx[0]] = deltas[i]
+        return ret
+
+    def calc_gamma(deltas1, deltas2, idx1, idx2, new_idx, w_max):
+        gamma = np.zeros(w_max)
+        deltas1 = expand_deltas(deltas1, idx1, len(idx1), new_idx)
+        deltas2 = expand_deltas(deltas2, idx2, len(idx2), new_idx)
+        new_shape = len(deltas1)
+        max_gamma = min(new_shape, w_max)
+        # The padding for the fft has to be even
+        padding = new_shape + max_gamma + (new_shape + max_gamma) % 2
+        gamma[:max_gamma] += (np.fft.irfft(np.fft.rfft(deltas1, padding) * np.conjugate(np.fft.rfft(deltas2, padding)))[:max_gamma] + np.fft.irfft(np.fft.rfft(deltas2, padding) * np.conjugate(np.fft.rfft(deltas1, padding)))[:max_gamma]) / 2.0
+
+        return gamma
+
+    if not hasattr(obs1, 'e_names') or not hasattr(obs2, 'e_names'):
+        raise Exception('The gamma method has to be applied to both Obs first.')
+
+    dvalue = 0
+    e_gamma = {}
+    e_dvalue = {}
+    e_n_tauint = {}
+    e_rho = {}
+
+    for e_name in obs1.e_names:
+
+        if e_name not in obs2.e_names:
+            continue
+
+        idl_d = {}
+        r_length = []
+        for r_name in obs1.e_content[e_name]:
+            if r_name not in obs2.e_content[e_name]:
+                continue
+            idl_d[r_name] = merge_idx([obs1.idl[r_name], obs2.idl[r_name]])
+            if idl_d[r_name] is range:
+                r_length.append(len(idl_d[r_name]))
+            else:
+                r_length.append((idl_d[r_name][-1] - idl_d[r_name][0] + 1))
+
+        if not r_length:
+            return 0.
+
+        w_max = max(r_length) // 2
+        e_gamma[e_name] = np.zeros(w_max)
+
+        for r_name in obs1.e_content[e_name]:
+            if r_name not in obs2.e_content[e_name]:
+                continue
+            e_gamma[e_name] += calc_gamma(obs1.deltas[r_name], obs2.deltas[r_name], obs1.idl[r_name], obs2.idl[r_name], idl_d[r_name], w_max)
+
+        if np.all(e_gamma[e_name] == 0.0):
+            continue
+
+        e_shapes = []
+        for r_name in obs1.e_content[e_name]:
+            e_shapes.append(obs1.shape[r_name])
+        gamma_div = np.zeros(w_max)
+        e_N = 0
+        for r_name in obs1.e_content[e_name]:
+            if r_name not in obs2.e_content[e_name]:
+                continue
+            gamma_div += calc_gamma(np.ones(obs1.shape[r_name]), np.ones(obs2.shape[r_name]), obs1.idl[r_name], obs2.idl[r_name], idl_d[r_name], w_max)
+            e_N += np.sum(np.ones_like(idl_d[r_name]))
+        e_gamma[e_name] /= gamma_div[:w_max]
+
+        e_rho[e_name] = e_gamma[e_name][:w_max] / e_gamma[e_name][0]
+        e_n_tauint[e_name] = np.cumsum(np.concatenate(([0.5], e_rho[e_name][1:])))
+        # Make sure no entry of tauint is smaller than 0.5
+        e_n_tauint[e_name][e_n_tauint[e_name] < 0.5] = 0.500000000001
+
+        window = max(obs1.e_windowsize[e_name], obs2.e_windowsize[e_name])
+        # Bias correction hep-lat/0306017 eq. (49)
+        e_dvalue[e_name] = 2 * (e_n_tauint[e_name][window] + obs1.tau_exp[e_name] * np.abs(e_rho[e_name][window + 1])) * (1 + (2 * window + 1) / e_N) * e_gamma[e_name][0] / e_N
+
+        dvalue += e_dvalue[e_name]
+
+    if np.abs(dvalue / obs1.dvalue / obs2.dvalue) > 1.0:
+        dvalue = np.sign(dvalue) * obs1.dvalue * obs2.dvalue
+
+    if correlation:
+        dvalue = dvalue / obs1.dvalue / obs2.dvalue
+
+    return dvalue
+
+
+def covariance3(obs1, obs2, correlation=False, **kwargs):
+    """Another alternative implementation of the covariance of two observables.
+
+    covariance2(obs, obs) is equal to obs.dvalue ** 2
+    Currently only works if ensembles are identical.
+    The gamma method has to be applied first to both observables.
+
+    If abs(covariance2(obs1, obs2)) > obs1.dvalue * obs2.dvalue, the covariance
+    is constrained to the maximum value in order to make sure that covariance
+    matrices are positive semidefinite.
+
+    Keyword arguments
+    -----------------
+    correlation -- if true the correlation instead of the covariance is
+                   returned (default False)
+    plot -- if true, the integrated autocorrelation time for each ensemble is
+            plotted.
+    """
+
+    for name in sorted(set(obs1.names + obs2.names)):
+        if (obs1.shape.get(name) != obs2.shape.get(name)) and (obs1.shape.get(name) is not None) and (obs2.shape.get(name) is not None):
+            raise Exception('Shapes of ensemble', name, 'do not fit')
+        if (1 != len(set([len(idx) for idx in [obs1.idl[name], obs2.idl[name], merge_idx([obs1.idl[name], obs2.idl[name]])]]))):
+            raise Exception('Shapes of ensemble', name, 'do not fit')
+
+    if not hasattr(obs1, 'e_names') or not hasattr(obs2, 'e_names'):
+        raise Exception('The gamma method has to be applied to both Obs first.')
+
+    tau_exp = []
+    S = []
+    for e_name in sorted(set(obs1.e_names + obs2.e_names)):
+        t_1 = obs1.tau_exp.get(e_name)
+        t_2 = obs2.tau_exp.get(e_name)
+        if t_1 is None:
+            t_1 = 0
+        if t_2 is None:
+            t_2 = 0
+        tau_exp.append(max(t_1, t_2))
+        S_1 = obs1.S.get(e_name)
+        S_2 = obs2.S.get(e_name)
+        if S_1 is None:
+            S_1 = Obs.S_global
+        if S_2 is None:
+            S_2 = Obs.S_global
+        S.append(max(S_1, S_2))
+
+    check_obs = obs1 + obs2
+    check_obs.gamma_method(tau_exp=tau_exp, S=S)
+
+    if kwargs.get('plot'):
+        check_obs.plot_tauint()
+        check_obs.plot_rho()
+
+    cov = (check_obs.dvalue ** 2 - obs1.dvalue ** 2 - obs2.dvalue ** 2) / 2
+
+    if np.abs(cov / obs1.dvalue / obs2.dvalue) > 1.0:
+        cov = np.sign(cov) * obs1.dvalue * obs2.dvalue
+
+    if correlation:
+        cov = cov / obs1.dvalue / obs2.dvalue
+
+    return cov
+
+
+def pseudo_Obs(value, dvalue, name, samples=1000):
+    """Generate a pseudo Obs with given value, dvalue and name
+
+    The standard number of samples is a 1000. This can be adjusted.
+    """
+    if dvalue <= 0.0:
+        return Obs([np.zeros(samples) + value], [name])
+    else:
+        for _ in range(100):
+            deltas = [np.random.normal(0.0, dvalue * np.sqrt(samples), samples)]
+            deltas -= np.mean(deltas)
+            deltas *= dvalue / np.sqrt((np.var(deltas) / samples)) / np.sqrt(1 + 3 / samples)
+            deltas += value
+            res = Obs(deltas, [name])
+            res.gamma_method(S=2, tau_exp=0)
+            if abs(res.dvalue - dvalue) < 1e-10 * dvalue:
+                break
+
+        res._value = float(value)
+
+        return res
+
+
+def dump_object(obj, name, **kwargs):
+    """Dump object into pickle file.
+
+    Keyword arguments
+    -----------------
+    path -- specifies a custom path for the file (default '.')
+    """
+    if 'path' in kwargs:
+        file_name = kwargs.get('path') + '/' + name + '.p'
+    else:
+        file_name = name + '.p'
+    with open(file_name, 'wb') as fb:
+        pickle.dump(obj, fb)
+
+
+def load_object(path):
+    """Load object from pickle file. """
+    with open(path, 'rb') as file:
+        return pickle.load(file)
+
+
+def merge_obs(list_of_obs):
+    """Combine all observables in list_of_obs into one new observable
+
+    It is not possible to combine obs which are based on the same replicum
+    """
+    replist = [item for obs in list_of_obs for item in obs.names]
+    if (len(replist) == len(set(replist))) is False:
+        raise Exception('list_of_obs contains duplicate replica: %s' % (str(replist)))
+    new_dict = {}
+    idl_dict = {}
+    for o in list_of_obs:
+        new_dict.update({key: o.deltas.get(key, 0) + o.r_values.get(key, 0)
+                        for key in set(o.deltas) | set(o.r_values)})
+        idl_dict.update({key: o.idl.get(key, 0) for key in set(o.deltas)})
+
+    names = sorted(new_dict.keys())
+    o = Obs([new_dict[name] for name in names], names, idl=[idl_dict[name] for name in names])
+    o.is_merged = np.any([oi.is_merged for oi in list_of_obs])
+    o.reweighted = np.max([oi.reweighted for oi in list_of_obs])
+    return o
+
+ +
+ +
+
+
+ #   + + + class + Obs: +
+ +
+ View Source +
class Obs:
+    """Class for a general observable.
+
+    Instances of Obs are the basic objects of a pyerrors error analysis.
+    They are initialized with a list which contains arrays of samples for
+    different ensembles/replica and another list of same length which contains
+    the names of the ensembles/replica. Mathematical operations can be
+    performed on instances. The result is another instance of Obs. The error of
+    an instance can be computed with the gamma_method. Also contains additional
+    methods for output and visualization of the error calculation.
+
+    Attributes
+    ----------
+    S_global : float
+        Standard value for S (default 2.0)
+    S_dict : dict
+        Dictionary for S values. If an entry for a given ensemble
+        exists this overwrites the standard value for that ensemble.
+    tau_exp_global : float
+        Standard value for tau_exp (default 0.0)
+    tau_exp_dict :dict
+        Dictionary for tau_exp values. If an entry for a given ensemble exists
+        this overwrites the standard value for that ensemble.
+    N_sigma_global : float
+        Standard value for N_sigma (default 1.0)
+    """
+    __slots__ = ['names', 'shape', 'r_values', 'deltas', 'N', '_value', '_dvalue',
+                 'ddvalue', 'reweighted', 'S', 'tau_exp', 'N_sigma',
+                 'e_dvalue', 'e_ddvalue', 'e_tauint', 'e_dtauint',
+                 'e_windowsize', 'e_rho', 'e_drho', 'e_n_tauint', 'e_n_dtauint',
+                 'idl', 'is_merged', 'tag', '__dict__']
+
+    S_global = 2.0
+    S_dict = {}
+    tau_exp_global = 0.0
+    tau_exp_dict = {}
+    N_sigma_global = 1.0
+    filter_eps = 1e-10
+
+    def __init__(self, samples, names, idl=None, means=None, **kwargs):
+        """ Initialize Obs object.
+
+        Attributes
+        ----------
+        samples : list
+            list of numpy arrays containing the Monte Carlo samples
+        names : list
+            list of strings labeling the indivdual samples
+        idl : list, optional
+            list of ranges or lists on which the samples are defined
+        means : list, optional
+            list of mean values for the case that the mean values were
+            already subtracted from the samples
+        """
+
+        if means is None:
+            if len(samples) != len(names):
+                raise Exception('Length of samples and names incompatible.')
+            if len(names) != len(set(names)):
+                raise Exception('Names are not unique.')
+            if not all(isinstance(x, str) for x in names):
+                raise TypeError('All names have to be strings.')
+            if min(len(x) for x in samples) <= 4:
+                raise Exception('Samples have to have at least 4 entries.')
+
+        self.names = sorted(names)
+        self.shape = {}
+        self.r_values = {}
+        self.deltas = {}
+
+        self.idl = {}
+        if idl is not None:
+            for name, idx in sorted(zip(names, idl)):
+                if isinstance(idx, range):
+                    self.idl[name] = idx
+                elif isinstance(idx, (list, np.ndarray)):
+                    dc = np.unique(np.diff(idx))
+                    if np.any(dc < 0):
+                        raise Exception("Unsorted idx for idl[%s]" % (name))
+                    if len(dc) == 1:
+                        self.idl[name] = range(idx[0], idx[-1] + dc[0], dc[0])
+                    else:
+                        self.idl[name] = list(idx)
+                else:
+                    raise Exception('incompatible type for idl[%s].' % (name))
+        else:
+            for name, sample in sorted(zip(names, samples)):
+                self.idl[name] = range(1, len(sample) + 1)
+
+        if means is not None:
+            for name, sample, mean in sorted(zip(names, samples, means)):
+                self.shape[name] = len(self.idl[name])
+                if len(sample) != self.shape[name]:
+                    raise Exception('Incompatible samples and idx for %s: %d vs. %d' % (name, len(sample), self.shape[name]))
+                self.r_values[name] = mean
+                self.deltas[name] = sample
+        else:
+            for name, sample in sorted(zip(names, samples)):
+                self.shape[name] = len(self.idl[name])
+                if len(sample) != self.shape[name]:
+                    raise Exception('Incompatible samples and idx for %s: %d vs. %d' % (name, len(sample), self.shape[name]))
+                self.r_values[name] = np.mean(sample)
+                self.deltas[name] = sample - self.r_values[name]
+        self.is_merged = False
+        self.N = sum(list(self.shape.values()))
+
+        self._value = 0
+        if means is None:
+            for name in self.names:
+                self._value += self.shape[name] * self.r_values[name]
+            self._value /= self.N
+
+        self._dvalue = 0.0
+        self.ddvalue = 0.0
+        self.reweighted = False
+
+        self.tag = None
+
+    @property
+    def value(self):
+        return self._value
+
+    @property
+    def dvalue(self):
+        return self._dvalue
+
+    @property
+    def e_names(self):
+        return sorted(set([o.split('|')[0] for o in self.names]))
+
+    @property
+    def e_content(self):
+        res = {}
+        for e, e_name in enumerate(self.e_names):
+            res[e_name] = sorted(filter(lambda x: x.startswith(e_name + '|'), self.names))
+            if e_name in self.names:
+                res[e_name].append(e_name)
+        return res
+
+    def expand_deltas(self, deltas, idx, shape):
+        """Expand deltas defined on idx to a regular, contiguous range, where holes are filled by 0.
+           If idx is of type range, the deltas are not changed
+
+        Parameters
+        ----------
+        deltas  -- List of fluctuations
+        idx     -- List or range of configs on which the deltas are defined.
+        shape   -- Number of configs in idx.
+        """
+        if type(idx) is range:
+            return deltas
+        else:
+            ret = np.zeros(idx[-1] - idx[0] + 1)
+            for i in range(shape):
+                ret[idx[i] - idx[0]] = deltas[i]
+            return ret
+
+    def calc_gamma(self, deltas, idx, shape, w_max, fft):
+        """Calculate Gamma_{AA} from the deltas, which are defined on idx.
+           idx is assumed to be a contiguous range (possibly with a stepsize != 1)
+
+        Parameters
+        ----------
+        deltas  -- List of fluctuations
+        idx     -- List or range of configs on which the deltas are defined.
+        shape   -- Number of configs in idx.
+        w_max   -- Upper bound for the summation window
+        fft     -- boolean, which determines whether the fft algorithm is used for
+                   the computation of the autocorrelation function
+        """
+        gamma = np.zeros(w_max)
+        deltas = self.expand_deltas(deltas, idx, shape)
+        new_shape = len(deltas)
+        if fft:
+            max_gamma = min(new_shape, w_max)
+            # The padding for the fft has to be even
+            padding = new_shape + max_gamma + (new_shape + max_gamma) % 2
+            gamma[:max_gamma] += np.fft.irfft(np.abs(np.fft.rfft(deltas, padding)) ** 2)[:max_gamma]
+        else:
+            for n in range(w_max):
+                if new_shape - n >= 0:
+                    gamma[n] += deltas[0:new_shape - n].dot(deltas[n:new_shape])
+
+        return gamma
+
+    def gamma_method(self, **kwargs):
+        """Calculate the error and related properties of the Obs.
+
+        Keyword arguments
+        -----------------
+        S : float
+            specifies a custom value for the parameter S (default 2.0), can be
+            a float or an array of floats for different ensembles
+        tau_exp : float
+            positive value triggers the critical slowing down analysis
+            (default 0.0), can be a float or an array of floats for different
+            ensembles
+        N_sigma : float
+            number of standard deviations from zero until the tail is
+            attached to the autocorrelation function (default 1)
+        fft : bool
+            determines whether the fft algorithm is used for the computation
+            of the autocorrelation function (default True)
+        """
+
+        e_content = self.e_content
+        self.e_dvalue = {}
+        self.e_ddvalue = {}
+        self.e_tauint = {}
+        self.e_dtauint = {}
+        self.e_windowsize = {}
+        self.e_n_tauint = {}
+        self.e_n_dtauint = {}
+        e_gamma = {}
+        self.e_rho = {}
+        self.e_drho = {}
+        self._dvalue = 0
+        self.ddvalue = 0
+
+        self.S = {}
+        self.tau_exp = {}
+
+        if kwargs.get('fft') is False:
+            fft = False
+        else:
+            fft = True
+
+        if 'S' in kwargs:
+            tmp = kwargs.get('S')
+            if isinstance(tmp, list):
+                if len(tmp) != len(self.e_names):
+                    raise Exception('Length of S array does not match ensembles.')
+                for e, e_name in enumerate(self.e_names):
+                    if tmp[e] <= 0:
+                        raise Exception('S has to be larger than 0.')
+                    self.S[e_name] = tmp[e]
+            else:
+                if isinstance(tmp, (int, float)):
+                    if tmp <= 0:
+                        raise Exception('S has to be larger than 0.')
+                    for e, e_name in enumerate(self.e_names):
+                        self.S[e_name] = tmp
+                else:
+                    raise TypeError('S is not in proper format.')
+        else:
+            for e, e_name in enumerate(self.e_names):
+                if e_name in Obs.S_dict:
+                    self.S[e_name] = Obs.S_dict[e_name]
+                else:
+                    self.S[e_name] = Obs.S_global
+
+        if 'tau_exp' in kwargs:
+            tmp = kwargs.get('tau_exp')
+            if isinstance(tmp, list):
+                if len(tmp) != len(self.e_names):
+                    raise Exception('Length of tau_exp array does not match ensembles.')
+                for e, e_name in enumerate(self.e_names):
+                    if tmp[e] < 0:
+                        raise Exception('tau_exp smaller than 0.')
+                    self.tau_exp[e_name] = tmp[e]
+            else:
+                if isinstance(tmp, (int, float)):
+                    if tmp < 0:
+                        raise Exception('tau_exp smaller than 0.')
+                    for e, e_name in enumerate(self.e_names):
+                        self.tau_exp[e_name] = tmp
+                else:
+                    raise TypeError('tau_exp is not in proper format.')
+        else:
+            for e, e_name in enumerate(self.e_names):
+                if e_name in Obs.tau_exp_dict:
+                    self.tau_exp[e_name] = Obs.tau_exp_dict[e_name]
+                else:
+                    self.tau_exp[e_name] = Obs.tau_exp_global
+
+        if 'N_sigma' in kwargs:
+            self.N_sigma = kwargs.get('N_sigma')
+            if not isinstance(self.N_sigma, (int, float)):
+                raise TypeError('N_sigma is not a number.')
+        else:
+            self.N_sigma = Obs.N_sigma_global
+
+        for e, e_name in enumerate(self.e_names):
+
+            r_length = []
+            for r_name in e_content[e_name]:
+                if self.idl[r_name] is range:
+                    r_length.append(len(self.idl[r_name]))
+                else:
+                    r_length.append((self.idl[r_name][-1] - self.idl[r_name][0] + 1))
+
+            e_N = np.sum([self.shape[r_name] for r_name in e_content[e_name]])
+            w_max = max(r_length) // 2
+            e_gamma[e_name] = np.zeros(w_max)
+            self.e_rho[e_name] = np.zeros(w_max)
+            self.e_drho[e_name] = np.zeros(w_max)
+
+            for r_name in e_content[e_name]:
+                e_gamma[e_name] += self.calc_gamma(self.deltas[r_name], self.idl[r_name], self.shape[r_name], w_max, fft)
+
+            gamma_div = np.zeros(w_max)
+            for r_name in e_content[e_name]:
+                gamma_div += self.calc_gamma(np.ones((self.shape[r_name])), self.idl[r_name], self.shape[r_name], w_max, fft)
+            e_gamma[e_name] /= gamma_div[:w_max]
+
+            if np.abs(e_gamma[e_name][0]) < 10 * np.finfo(float).tiny:  # Prevent division by zero
+                self.e_tauint[e_name] = 0.5
+                self.e_dtauint[e_name] = 0.0
+                self.e_dvalue[e_name] = 0.0
+                self.e_ddvalue[e_name] = 0.0
+                self.e_windowsize[e_name] = 0
+                continue
+
+            self.e_rho[e_name] = e_gamma[e_name][:w_max] / e_gamma[e_name][0]
+            self.e_n_tauint[e_name] = np.cumsum(np.concatenate(([0.5], self.e_rho[e_name][1:])))
+            # Make sure no entry of tauint is smaller than 0.5
+            self.e_n_tauint[e_name][self.e_n_tauint[e_name] <= 0.5] = 0.5 + np.finfo(np.float64).eps
+            # hep-lat/0306017 eq. (42)
+            self.e_n_dtauint[e_name] = self.e_n_tauint[e_name] * 2 * np.sqrt(np.abs(np.arange(w_max) + 0.5 - self.e_n_tauint[e_name]) / e_N)
+            self.e_n_dtauint[e_name][0] = 0.0
+
+            def _compute_drho(i):
+                tmp = self.e_rho[e_name][i + 1:w_max] + np.concatenate([self.e_rho[e_name][i - 1::-1], self.e_rho[e_name][1:w_max - 2 * i]]) - 2 * self.e_rho[e_name][i] * self.e_rho[e_name][1:w_max - i]
+                self.e_drho[e_name][i] = np.sqrt(np.sum(tmp ** 2) / e_N)
+
+            _compute_drho(1)
+            if self.tau_exp[e_name] > 0:
+                texp = self.tau_exp[e_name]
+                # if type(self.idl[e_name]) is range: # scale tau_exp according to step size
+                #    texp /= self.idl[e_name].step
+                # Critical slowing down analysis
+                for n in range(1, w_max // 2):
+                    _compute_drho(n + 1)
+                    if (self.e_rho[e_name][n] - self.N_sigma * self.e_drho[e_name][n]) < 0 or n >= w_max // 2 - 2:
+                        # Bias correction hep-lat/0306017 eq. (49) included
+                        self.e_tauint[e_name] = self.e_n_tauint[e_name][n] * (1 + (2 * n + 1) / e_N) / (1 + 1 / e_N) + texp * np.abs(self.e_rho[e_name][n + 1])  # The absolute makes sure, that the tail contribution is always positive
+                        self.e_dtauint[e_name] = np.sqrt(self.e_n_dtauint[e_name][n] ** 2 + texp ** 2 * self.e_drho[e_name][n + 1] ** 2)
+                        # Error of tau_exp neglected so far, missing term: self.e_rho[e_name][n + 1] ** 2 * d_tau_exp ** 2
+                        self.e_dvalue[e_name] = np.sqrt(2 * self.e_tauint[e_name] * e_gamma[e_name][0] * (1 + 1 / e_N) / e_N)
+                        self.e_ddvalue[e_name] = self.e_dvalue[e_name] * np.sqrt((n + 0.5) / e_N)
+                        self.e_windowsize[e_name] = n
+                        break
+            else:
+                # Standard automatic windowing procedure
+                g_w = self.S[e_name] / np.log((2 * self.e_n_tauint[e_name][1:] + 1) / (2 * self.e_n_tauint[e_name][1:] - 1))
+                g_w = np.exp(- np.arange(1, w_max) / g_w) - g_w / np.sqrt(np.arange(1, w_max) * e_N)
+                for n in range(1, w_max):
+                    if n < w_max // 2 - 2:
+                        _compute_drho(n + 1)
+                    if g_w[n - 1] < 0 or n >= w_max - 1:
+                        self.e_tauint[e_name] = self.e_n_tauint[e_name][n] * (1 + (2 * n + 1) / e_N) / (1 + 1 / e_N)  # Bias correction hep-lat/0306017 eq. (49)
+                        self.e_dtauint[e_name] = self.e_n_dtauint[e_name][n]
+                        self.e_dvalue[e_name] = np.sqrt(2 * self.e_tauint[e_name] * e_gamma[e_name][0] * (1 + 1 / e_N) / e_N)
+                        self.e_ddvalue[e_name] = self.e_dvalue[e_name] * np.sqrt((n + 0.5) / e_N)
+                        self.e_windowsize[e_name] = n
+                        break
+
+            self._dvalue += self.e_dvalue[e_name] ** 2
+            self.ddvalue += (self.e_dvalue[e_name] * self.e_ddvalue[e_name]) ** 2
+
+        self._dvalue = np.sqrt(self.dvalue)
+        if self._dvalue == 0.0:
+            self.ddvalue = 0.0
+        else:
+            self.ddvalue = np.sqrt(self.ddvalue) / self.dvalue
+        return
+
+    def print(self, level=1):
+        warnings.warn("Method 'print' renamed to 'details'", DeprecationWarning)
+        self.details(level > 1)
+
+    def details(self, ens_content=True):
+        """Output detailed properties of the Obs."""
+        if self.value == 0.0:
+            percentage = np.nan
+        else:
+            percentage = np.abs(self.dvalue / self.value) * 100
+        print('Result\t %3.8e +/- %3.8e +/- %3.8e (%3.3f%%)' % (self.value, self.dvalue, self.ddvalue, percentage))
+        if hasattr(self, 'e_dvalue'):
+            if len(self.e_names) > 1:
+                print(' Ensemble errors:')
+            for e_name in self.e_names:
+                if len(self.e_names) > 1:
+                    print('', e_name, '\t %3.8e +/- %3.8e' % (self.e_dvalue[e_name], self.e_ddvalue[e_name]))
+                if self.tau_exp[e_name] > 0:
+                    print(' t_int\t %3.8e +/- %3.8e tau_exp = %3.2f,  N_sigma = %1.0i' % (self.e_tauint[e_name], self.e_dtauint[e_name], self.tau_exp[e_name], self.N_sigma))
+                else:
+                    print(' t_int\t %3.8e +/- %3.8e S = %3.2f' % (self.e_tauint[e_name], self.e_dtauint[e_name], self.S[e_name]))
+        if self.tag is not None:
+            print("Description:", self.tag)
+        if ens_content is True:
+            if len(self.e_names) == 1:
+                print(self.N, 'samples in', len(self.e_names), 'ensemble:')
+            else:
+                print(self.N, 'samples in', len(self.e_names), 'ensembles:')
+            m = max(map(len, list(self.e_content.keys()))) + 1
+            print('\n'.join(['  ' + key.rjust(m) + ': ' + str(value) for key, value in sorted(self.e_content.items())]))
+
+    def is_zero_within_error(self, sigma=1):
+        """Checks whether the observable is zero within 'sigma' standard errors.
+
+        Works only properly when the gamma method was run.
+        """
+        return self.is_zero() or np.abs(self.value) <= sigma * self.dvalue
+
+    def is_zero(self):
+        """Checks whether the observable is zero within machine precision."""
+        return np.isclose(0.0, self.value) and all(np.allclose(0.0, delta) for delta in self.deltas.values())
+
+    def plot_tauint(self, save=None):
+        """Plot integrated autocorrelation time for each ensemble."""
+        if not hasattr(self, 'e_names'):
+            raise Exception('Run the gamma method first.')
+
+        fig = plt.figure()
+        for e, e_name in enumerate(self.e_names):
+            plt.xlabel(r'$W$')
+            plt.ylabel(r'$\tau_\mathrm{int}$')
+            length = int(len(self.e_n_tauint[e_name]))
+            if self.tau_exp[e_name] > 0:
+                base = self.e_n_tauint[e_name][self.e_windowsize[e_name]]
+                x_help = np.arange(2 * self.tau_exp[e_name])
+                y_help = (x_help + 1) * np.abs(self.e_rho[e_name][self.e_windowsize[e_name] + 1]) * (1 - x_help / (2 * (2 * self.tau_exp[e_name] - 1))) + base
+                x_arr = np.arange(self.e_windowsize[e_name] + 1, self.e_windowsize[e_name] + 1 + 2 * self.tau_exp[e_name])
+                plt.plot(x_arr, y_help, 'C' + str(e), linewidth=1, ls='--', marker=',')
+                plt.errorbar([self.e_windowsize[e_name] + 2 * self.tau_exp[e_name]], [self.e_tauint[e_name]],
+                             yerr=[self.e_dtauint[e_name]], fmt='C' + str(e), linewidth=1, capsize=2, marker='o', mfc=plt.rcParams['axes.facecolor'])
+                xmax = self.e_windowsize[e_name] + 2 * self.tau_exp[e_name] + 1.5
+                label = e_name + r', $\tau_\mathrm{exp}$=' + str(np.around(self.tau_exp[e_name], decimals=2))
+            else:
+                label = e_name + ', S=' + str(np.around(self.S[e_name], decimals=2))
+                xmax = max(10.5, 2 * self.e_windowsize[e_name] - 0.5)
+
+            plt.errorbar(np.arange(length), self.e_n_tauint[e_name][:], yerr=self.e_n_dtauint[e_name][:], linewidth=1, capsize=2, label=label)
+            plt.axvline(x=self.e_windowsize[e_name], color='C' + str(e), alpha=0.5, marker=',', ls='--')
+            plt.legend()
+            plt.xlim(-0.5, xmax)
+            plt.ylim(bottom=0.0)
+            plt.draw()
+            if save:
+                fig.savefig(save)
+
+    def plot_rho(self):
+        """Plot normalized autocorrelation function time for each ensemble."""
+        if not hasattr(self, 'e_names'):
+            raise Exception('Run the gamma method first.')
+        for e, e_name in enumerate(self.e_names):
+            plt.xlabel('W')
+            plt.ylabel('rho')
+            length = int(len(self.e_drho[e_name]))
+            plt.errorbar(np.arange(length), self.e_rho[e_name][:length], yerr=self.e_drho[e_name][:], linewidth=1, capsize=2)
+            plt.axvline(x=self.e_windowsize[e_name], color='r', alpha=0.25, ls='--', marker=',')
+            if self.tau_exp[e_name] > 0:
+                plt.plot([self.e_windowsize[e_name] + 1, self.e_windowsize[e_name] + 1 + 2 * self.tau_exp[e_name]],
+                         [self.e_rho[e_name][self.e_windowsize[e_name] + 1], 0], 'k-', lw=1)
+                xmax = self.e_windowsize[e_name] + 2 * self.tau_exp[e_name] + 1.5
+                plt.title('Rho ' + e_name + r', tau\_exp=' + str(np.around(self.tau_exp[e_name], decimals=2)))
+            else:
+                xmax = max(10.5, 2 * self.e_windowsize[e_name] - 0.5)
+                plt.title('Rho ' + e_name + ', S=' + str(np.around(self.S[e_name], decimals=2)))
+            plt.plot([-0.5, xmax], [0, 0], 'k--', lw=1)
+            plt.xlim(-0.5, xmax)
+            plt.draw()
+
+    def plot_rep_dist(self):
+        """Plot replica distribution for each ensemble with more than one replicum."""
+        if not hasattr(self, 'e_names'):
+            raise Exception('Run the gamma method first.')
+        for e, e_name in enumerate(self.e_names):
+            if len(self.e_content[e_name]) == 1:
+                print('No replica distribution for a single replicum (', e_name, ')')
+                continue
+            r_length = []
+            sub_r_mean = 0
+            for r, r_name in enumerate(self.e_content[e_name]):
+                r_length.append(len(self.deltas[r_name]))
+                sub_r_mean += self.shape[r_name] * self.r_values[r_name]
+            e_N = np.sum(r_length)
+            sub_r_mean /= e_N
+            arr = np.zeros(len(self.e_content[e_name]))
+            for r, r_name in enumerate(self.e_content[e_name]):
+                arr[r] = (self.r_values[r_name] - sub_r_mean) / (self.e_dvalue[e_name] * np.sqrt(e_N / self.shape[r_name] - 1))
+            plt.hist(arr, rwidth=0.8, bins=len(self.e_content[e_name]))
+            plt.title('Replica distribution' + e_name + ' (mean=0, var=1)')
+            plt.draw()
+
+    def plot_history(self, expand=True):
+        """Plot derived Monte Carlo history for each ensemble."""
+        if not hasattr(self, 'e_names'):
+            raise Exception('Run the gamma method first.')
+
+        for e, e_name in enumerate(self.e_names):
+            plt.figure()
+            r_length = []
+            tmp = []
+            for r, r_name in enumerate(self.e_content[e_name]):
+                if expand:
+                    tmp.append(self.expand_deltas(self.deltas[r_name], self.idl[r_name], self.shape[r_name]) + self.r_values[r_name])
+                else:
+                    tmp.append(self.deltas[r_name] + self.r_values[r_name])
+                r_length.append(len(tmp[-1]))
+            e_N = np.sum(r_length)
+            x = np.arange(e_N)
+            y = np.concatenate(tmp, axis=0)
+            plt.errorbar(x, y, fmt='.', markersize=3)
+            plt.xlim(-0.5, e_N - 0.5)
+            plt.title(e_name)
+            plt.draw()
+
+    def plot_piechart(self):
+        """Plot piechart which shows the fractional contribution of each
+        ensemble to the error and returns a dictionary containing the fractions."""
+        if not hasattr(self, 'e_names'):
+            raise Exception('Run the gamma method first.')
+        if self.dvalue == 0.0:
+            raise Exception('Error is 0.0')
+        labels = self.e_names
+        sizes = [i ** 2 for i in list(self.e_dvalue.values())] / self.dvalue ** 2
+        fig1, ax1 = plt.subplots()
+        ax1.pie(sizes, labels=labels, startangle=90, normalize=True)
+        ax1.axis('equal')
+        plt.draw()
+
+        return dict(zip(self.e_names, sizes))
+
+    def dump(self, name, **kwargs):
+        """Dump the Obs to a pickle file 'name'.
+
+        Keyword arguments
+        -----------------
+        path -- specifies a custom path for the file (default '.')
+        """
+        if 'path' in kwargs:
+            file_name = kwargs.get('path') + '/' + name + '.p'
+        else:
+            file_name = name + '.p'
+        with open(file_name, 'wb') as fb:
+            pickle.dump(self, fb)
+
+    def __float__(self):
+        return float(self.value)
+
+    def __repr__(self):
+        return 'Obs[' + str(self) + ']'
+
+    def __str__(self):
+        if self.dvalue == 0.0:
+            return str(self.value)
+        fexp = np.floor(np.log10(self.dvalue))
+        if fexp < 0.0:
+            return '{:{form}}({:2.0f})'.format(self.value, self.dvalue * 10 ** (-fexp + 1), form='.' + str(-int(fexp) + 1) + 'f')
+        elif fexp == 0.0:
+            return '{:.1f}({:1.1f})'.format(self.value, self.dvalue)
+        else:
+            return '{:.0f}({:2.0f})'.format(self.value, self.dvalue)
+
+    # Overload comparisons
+    def __lt__(self, other):
+        return self.value < other
+
+    def __le__(self, other):
+        return self.value <= other
+
+    def __gt__(self, other):
+        return self.value > other
+
+    def __ge__(self, other):
+        return self.value >= other
+
+    def __eq__(self, other):
+        return (self - other).is_zero()
+
+    def __ne__(self, other):
+        return not (self - other).is_zero()
+
+    # Overload math operations
+    def __add__(self, y):
+        if isinstance(y, Obs):
+            return derived_observable(lambda x, **kwargs: x[0] + x[1], [self, y], man_grad=[1, 1])
+        else:
+            if isinstance(y, np.ndarray):
+                return np.array([self + o for o in y])
+            elif y.__class__.__name__ == 'Corr':
+                return NotImplemented
+            else:
+                return derived_observable(lambda x, **kwargs: x[0] + y, [self], man_grad=[1])
+
+    def __radd__(self, y):
+        return self + y
+
+    def __mul__(self, y):
+        if isinstance(y, Obs):
+            return derived_observable(lambda x, **kwargs: x[0] * x[1], [self, y], man_grad=[y.value, self.value])
+        else:
+            if isinstance(y, np.ndarray):
+                return np.array([self * o for o in y])
+            elif isinstance(y, complex):
+                return CObs(self * y.real, self * y.imag)
+            elif y.__class__.__name__ == 'Corr':
+                return NotImplemented
+            else:
+                return derived_observable(lambda x, **kwargs: x[0] * y, [self], man_grad=[y])
+
+    def __rmul__(self, y):
+        return self * y
+
+    def __sub__(self, y):
+        if isinstance(y, Obs):
+            return derived_observable(lambda x, **kwargs: x[0] - x[1], [self, y], man_grad=[1, -1])
+        else:
+            if isinstance(y, np.ndarray):
+                return np.array([self - o for o in y])
+
+            elif y.__class__.__name__ == 'Corr':
+                return NotImplemented
+
+            else:
+                return derived_observable(lambda x, **kwargs: x[0] - y, [self], man_grad=[1])
+
+    def __rsub__(self, y):
+        return -1 * (self - y)
+
+    def __neg__(self):
+        return -1 * self
+
+    def __truediv__(self, y):
+        if isinstance(y, Obs):
+            return derived_observable(lambda x, **kwargs: x[0] / x[1], [self, y], man_grad=[1 / y.value, - self.value / y.value ** 2])
+        else:
+            if isinstance(y, np.ndarray):
+                return np.array([self / o for o in y])
+            elif y.__class__.__name__ == 'Corr':
+                return NotImplemented
+            else:
+                return derived_observable(lambda x, **kwargs: x[0] / y, [self], man_grad=[1 / y])
+
+    def __rtruediv__(self, y):
+        if isinstance(y, Obs):
+            return derived_observable(lambda x, **kwargs: x[0] / x[1], [y, self], man_grad=[1 / self.value, - y.value / self.value ** 2])
+        else:
+            if isinstance(y, np.ndarray):
+                return np.array([o / self for o in y])
+            elif y.__class__.__name__ == 'Corr':
+                return NotImplemented
+            else:
+                return derived_observable(lambda x, **kwargs: y / x[0], [self], man_grad=[-y / self.value ** 2])
+
+    def __pow__(self, y):
+        if isinstance(y, Obs):
+            return derived_observable(lambda x: x[0] ** x[1], [self, y])
+        else:
+            return derived_observable(lambda x: x[0] ** y, [self])
+
+    def __rpow__(self, y):
+        if isinstance(y, Obs):
+            return derived_observable(lambda x: x[0] ** x[1], [y, self])
+        else:
+            return derived_observable(lambda x: y ** x[0], [self])
+
+    def __abs__(self):
+        return derived_observable(lambda x: anp.abs(x[0]), [self])
+
+    # Overload numpy functions
+    def sqrt(self):
+        return derived_observable(lambda x, **kwargs: np.sqrt(x[0]), [self], man_grad=[1 / 2 / np.sqrt(self.value)])
+
+    def log(self):
+        return derived_observable(lambda x, **kwargs: np.log(x[0]), [self], man_grad=[1 / self.value])
+
+    def exp(self):
+        return derived_observable(lambda x, **kwargs: np.exp(x[0]), [self], man_grad=[np.exp(self.value)])
+
+    def sin(self):
+        return derived_observable(lambda x, **kwargs: np.sin(x[0]), [self], man_grad=[np.cos(self.value)])
+
+    def cos(self):
+        return derived_observable(lambda x, **kwargs: np.cos(x[0]), [self], man_grad=[-np.sin(self.value)])
+
+    def tan(self):
+        return derived_observable(lambda x, **kwargs: np.tan(x[0]), [self], man_grad=[1 / np.cos(self.value) ** 2])
+
+    def arcsin(self):
+        return derived_observable(lambda x: anp.arcsin(x[0]), [self])
+
+    def arccos(self):
+        return derived_observable(lambda x: anp.arccos(x[0]), [self])
+
+    def arctan(self):
+        return derived_observable(lambda x: anp.arctan(x[0]), [self])
+
+    def sinh(self):
+        return derived_observable(lambda x, **kwargs: np.sinh(x[0]), [self], man_grad=[np.cosh(self.value)])
+
+    def cosh(self):
+        return derived_observable(lambda x, **kwargs: np.cosh(x[0]), [self], man_grad=[np.sinh(self.value)])
+
+    def tanh(self):
+        return derived_observable(lambda x, **kwargs: np.tanh(x[0]), [self], man_grad=[1 / np.cosh(self.value) ** 2])
+
+    def arcsinh(self):
+        return derived_observable(lambda x: anp.arcsinh(x[0]), [self])
+
+    def arccosh(self):
+        return derived_observable(lambda x: anp.arccosh(x[0]), [self])
+
+    def arctanh(self):
+        return derived_observable(lambda x: anp.arctanh(x[0]), [self])
+
+    def sinc(self):
+        return derived_observable(lambda x: anp.sinc(x[0]), [self])
+
+ +
+ +

Class for a general observable.

+ +

Instances of Obs are the basic objects of a pyerrors error analysis. +They are initialized with a list which contains arrays of samples for +different ensembles/replica and another list of same length which contains +the names of the ensembles/replica. Mathematical operations can be +performed on instances. The result is another instance of Obs. The error of +an instance can be computed with the gamma_method. Also contains additional +methods for output and visualization of the error calculation.

+ +
Attributes
+ +
    +
  • S_global (float): +Standard value for S (default 2.0)
  • +
  • S_dict (dict): +Dictionary for S values. If an entry for a given ensemble +exists this overwrites the standard value for that ensemble.
  • +
  • tau_exp_global (float): +Standard value for tau_exp (default 0.0)
  • +
  • tau_exp_dict (dict): +Dictionary for tau_exp values. If an entry for a given ensemble exists +this overwrites the standard value for that ensemble.
  • +
  • N_sigma_global (float): +Standard value for N_sigma (default 1.0)
  • +
+
+ + +
+
#   + + + Obs(samples, names, idl=None, means=None, **kwargs) +
+ +
+ View Source +
    def __init__(self, samples, names, idl=None, means=None, **kwargs):
+        """ Initialize Obs object.
+
+        Attributes
+        ----------
+        samples : list
+            list of numpy arrays containing the Monte Carlo samples
+        names : list
+            list of strings labeling the indivdual samples
+        idl : list, optional
+            list of ranges or lists on which the samples are defined
+        means : list, optional
+            list of mean values for the case that the mean values were
+            already subtracted from the samples
+        """
+
+        if means is None:
+            if len(samples) != len(names):
+                raise Exception('Length of samples and names incompatible.')
+            if len(names) != len(set(names)):
+                raise Exception('Names are not unique.')
+            if not all(isinstance(x, str) for x in names):
+                raise TypeError('All names have to be strings.')
+            if min(len(x) for x in samples) <= 4:
+                raise Exception('Samples have to have at least 4 entries.')
+
+        self.names = sorted(names)
+        self.shape = {}
+        self.r_values = {}
+        self.deltas = {}
+
+        self.idl = {}
+        if idl is not None:
+            for name, idx in sorted(zip(names, idl)):
+                if isinstance(idx, range):
+                    self.idl[name] = idx
+                elif isinstance(idx, (list, np.ndarray)):
+                    dc = np.unique(np.diff(idx))
+                    if np.any(dc < 0):
+                        raise Exception("Unsorted idx for idl[%s]" % (name))
+                    if len(dc) == 1:
+                        self.idl[name] = range(idx[0], idx[-1] + dc[0], dc[0])
+                    else:
+                        self.idl[name] = list(idx)
+                else:
+                    raise Exception('incompatible type for idl[%s].' % (name))
+        else:
+            for name, sample in sorted(zip(names, samples)):
+                self.idl[name] = range(1, len(sample) + 1)
+
+        if means is not None:
+            for name, sample, mean in sorted(zip(names, samples, means)):
+                self.shape[name] = len(self.idl[name])
+                if len(sample) != self.shape[name]:
+                    raise Exception('Incompatible samples and idx for %s: %d vs. %d' % (name, len(sample), self.shape[name]))
+                self.r_values[name] = mean
+                self.deltas[name] = sample
+        else:
+            for name, sample in sorted(zip(names, samples)):
+                self.shape[name] = len(self.idl[name])
+                if len(sample) != self.shape[name]:
+                    raise Exception('Incompatible samples and idx for %s: %d vs. %d' % (name, len(sample), self.shape[name]))
+                self.r_values[name] = np.mean(sample)
+                self.deltas[name] = sample - self.r_values[name]
+        self.is_merged = False
+        self.N = sum(list(self.shape.values()))
+
+        self._value = 0
+        if means is None:
+            for name in self.names:
+                self._value += self.shape[name] * self.r_values[name]
+            self._value /= self.N
+
+        self._dvalue = 0.0
+        self.ddvalue = 0.0
+        self.reweighted = False
+
+        self.tag = None
+
+ +
+ +

Initialize Obs object.

+ +
Attributes
+ +
    +
  • samples (list): +list of numpy arrays containing the Monte Carlo samples
  • +
  • names (list): +list of strings labeling the indivdual samples
  • +
  • idl (list, optional): +list of ranges or lists on which the samples are defined
  • +
  • means (list, optional): +list of mean values for the case that the mean values were +already subtracted from the samples
  • +
+
+ + +
+
+
#   + + S_global = 2.0 +
+ + + +
+
+
#   + + S_dict = {} +
+ + + +
+
+
#   + + tau_exp_global = 0.0 +
+ + + +
+
+
#   + + tau_exp_dict = {} +
+ + + +
+
+
#   + + N_sigma_global = 1.0 +
+ + + +
+
+
#   + + filter_eps = 1e-10 +
+ + + +
+
+
#   + + names +
+ + + +
+
+
#   + + shape +
+ + + +
+
+
#   + + r_values +
+ + + +
+
+
#   + + deltas +
+ + + +
+
+
#   + + idl +
+ + + +
+
+
#   + + is_merged +
+ + + +
+
+
#   + + N +
+ + + +
+
+
#   + + ddvalue +
+ + + +
+
+
#   + + reweighted +
+ + + +
+
+
#   + + tag +
+ + + +
+
+
#   + + value +
+ + + +
+
+
#   + + dvalue +
+ + + +
+
+
#   + + e_names +
+ + + +
+
+
#   + + e_content +
+ + + +
+
+
#   + + + def + expand_deltas(self, deltas, idx, shape): +
+ +
+ View Source +
    def expand_deltas(self, deltas, idx, shape):
+        """Expand deltas defined on idx to a regular, contiguous range, where holes are filled by 0.
+           If idx is of type range, the deltas are not changed
+
+        Parameters
+        ----------
+        deltas  -- List of fluctuations
+        idx     -- List or range of configs on which the deltas are defined.
+        shape   -- Number of configs in idx.
+        """
+        if type(idx) is range:
+            return deltas
+        else:
+            ret = np.zeros(idx[-1] - idx[0] + 1)
+            for i in range(shape):
+                ret[idx[i] - idx[0]] = deltas[i]
+            return ret
+
+ +
+ +

Expand deltas defined on idx to a regular, contiguous range, where holes are filled by 0. + If idx is of type range, the deltas are not changed

+ +
Parameters
+ +
    +
  • deltas -- List of fluctuations
  • +
  • idx -- List or range of configs on which the deltas are defined.
  • +
  • shape -- Number of configs in idx.
  • +
+
+ + +
+
+
#   + + + def + calc_gamma(self, deltas, idx, shape, w_max, fft): +
+ +
+ View Source +
    def calc_gamma(self, deltas, idx, shape, w_max, fft):
+        """Calculate Gamma_{AA} from the deltas, which are defined on idx.
+           idx is assumed to be a contiguous range (possibly with a stepsize != 1)
+
+        Parameters
+        ----------
+        deltas  -- List of fluctuations
+        idx     -- List or range of configs on which the deltas are defined.
+        shape   -- Number of configs in idx.
+        w_max   -- Upper bound for the summation window
+        fft     -- boolean, which determines whether the fft algorithm is used for
+                   the computation of the autocorrelation function
+        """
+        gamma = np.zeros(w_max)
+        deltas = self.expand_deltas(deltas, idx, shape)
+        new_shape = len(deltas)
+        if fft:
+            max_gamma = min(new_shape, w_max)
+            # The padding for the fft has to be even
+            padding = new_shape + max_gamma + (new_shape + max_gamma) % 2
+            gamma[:max_gamma] += np.fft.irfft(np.abs(np.fft.rfft(deltas, padding)) ** 2)[:max_gamma]
+        else:
+            for n in range(w_max):
+                if new_shape - n >= 0:
+                    gamma[n] += deltas[0:new_shape - n].dot(deltas[n:new_shape])
+
+        return gamma
+
+ +
+ +

Calculate Gamma_{AA} from the deltas, which are defined on idx. + idx is assumed to be a contiguous range (possibly with a stepsize != 1)

+ +
Parameters
+ +
    +
  • deltas -- List of fluctuations
  • +
  • idx -- List or range of configs on which the deltas are defined.
  • +
  • shape -- Number of configs in idx.
  • +
  • w_max -- Upper bound for the summation window
  • +
  • fft -- boolean, which determines whether the fft algorithm is used for: the computation of the autocorrelation function
  • +
+
+ + +
+
+
#   + + + def + gamma_method(self, **kwargs): +
+ +
+ View Source +
    def gamma_method(self, **kwargs):
+        """Calculate the error and related properties of the Obs.
+
+        Keyword arguments
+        -----------------
+        S : float
+            specifies a custom value for the parameter S (default 2.0), can be
+            a float or an array of floats for different ensembles
+        tau_exp : float
+            positive value triggers the critical slowing down analysis
+            (default 0.0), can be a float or an array of floats for different
+            ensembles
+        N_sigma : float
+            number of standard deviations from zero until the tail is
+            attached to the autocorrelation function (default 1)
+        fft : bool
+            determines whether the fft algorithm is used for the computation
+            of the autocorrelation function (default True)
+        """
+
+        e_content = self.e_content
+        self.e_dvalue = {}
+        self.e_ddvalue = {}
+        self.e_tauint = {}
+        self.e_dtauint = {}
+        self.e_windowsize = {}
+        self.e_n_tauint = {}
+        self.e_n_dtauint = {}
+        e_gamma = {}
+        self.e_rho = {}
+        self.e_drho = {}
+        self._dvalue = 0
+        self.ddvalue = 0
+
+        self.S = {}
+        self.tau_exp = {}
+
+        if kwargs.get('fft') is False:
+            fft = False
+        else:
+            fft = True
+
+        if 'S' in kwargs:
+            tmp = kwargs.get('S')
+            if isinstance(tmp, list):
+                if len(tmp) != len(self.e_names):
+                    raise Exception('Length of S array does not match ensembles.')
+                for e, e_name in enumerate(self.e_names):
+                    if tmp[e] <= 0:
+                        raise Exception('S has to be larger than 0.')
+                    self.S[e_name] = tmp[e]
+            else:
+                if isinstance(tmp, (int, float)):
+                    if tmp <= 0:
+                        raise Exception('S has to be larger than 0.')
+                    for e, e_name in enumerate(self.e_names):
+                        self.S[e_name] = tmp
+                else:
+                    raise TypeError('S is not in proper format.')
+        else:
+            for e, e_name in enumerate(self.e_names):
+                if e_name in Obs.S_dict:
+                    self.S[e_name] = Obs.S_dict[e_name]
+                else:
+                    self.S[e_name] = Obs.S_global
+
+        if 'tau_exp' in kwargs:
+            tmp = kwargs.get('tau_exp')
+            if isinstance(tmp, list):
+                if len(tmp) != len(self.e_names):
+                    raise Exception('Length of tau_exp array does not match ensembles.')
+                for e, e_name in enumerate(self.e_names):
+                    if tmp[e] < 0:
+                        raise Exception('tau_exp smaller than 0.')
+                    self.tau_exp[e_name] = tmp[e]
+            else:
+                if isinstance(tmp, (int, float)):
+                    if tmp < 0:
+                        raise Exception('tau_exp smaller than 0.')
+                    for e, e_name in enumerate(self.e_names):
+                        self.tau_exp[e_name] = tmp
+                else:
+                    raise TypeError('tau_exp is not in proper format.')
+        else:
+            for e, e_name in enumerate(self.e_names):
+                if e_name in Obs.tau_exp_dict:
+                    self.tau_exp[e_name] = Obs.tau_exp_dict[e_name]
+                else:
+                    self.tau_exp[e_name] = Obs.tau_exp_global
+
+        if 'N_sigma' in kwargs:
+            self.N_sigma = kwargs.get('N_sigma')
+            if not isinstance(self.N_sigma, (int, float)):
+                raise TypeError('N_sigma is not a number.')
+        else:
+            self.N_sigma = Obs.N_sigma_global
+
+        for e, e_name in enumerate(self.e_names):
+
+            r_length = []
+            for r_name in e_content[e_name]:
+                if self.idl[r_name] is range:
+                    r_length.append(len(self.idl[r_name]))
+                else:
+                    r_length.append((self.idl[r_name][-1] - self.idl[r_name][0] + 1))
+
+            e_N = np.sum([self.shape[r_name] for r_name in e_content[e_name]])
+            w_max = max(r_length) // 2
+            e_gamma[e_name] = np.zeros(w_max)
+            self.e_rho[e_name] = np.zeros(w_max)
+            self.e_drho[e_name] = np.zeros(w_max)
+
+            for r_name in e_content[e_name]:
+                e_gamma[e_name] += self.calc_gamma(self.deltas[r_name], self.idl[r_name], self.shape[r_name], w_max, fft)
+
+            gamma_div = np.zeros(w_max)
+            for r_name in e_content[e_name]:
+                gamma_div += self.calc_gamma(np.ones((self.shape[r_name])), self.idl[r_name], self.shape[r_name], w_max, fft)
+            e_gamma[e_name] /= gamma_div[:w_max]
+
+            if np.abs(e_gamma[e_name][0]) < 10 * np.finfo(float).tiny:  # Prevent division by zero
+                self.e_tauint[e_name] = 0.5
+                self.e_dtauint[e_name] = 0.0
+                self.e_dvalue[e_name] = 0.0
+                self.e_ddvalue[e_name] = 0.0
+                self.e_windowsize[e_name] = 0
+                continue
+
+            self.e_rho[e_name] = e_gamma[e_name][:w_max] / e_gamma[e_name][0]
+            self.e_n_tauint[e_name] = np.cumsum(np.concatenate(([0.5], self.e_rho[e_name][1:])))
+            # Make sure no entry of tauint is smaller than 0.5
+            self.e_n_tauint[e_name][self.e_n_tauint[e_name] <= 0.5] = 0.5 + np.finfo(np.float64).eps
+            # hep-lat/0306017 eq. (42)
+            self.e_n_dtauint[e_name] = self.e_n_tauint[e_name] * 2 * np.sqrt(np.abs(np.arange(w_max) + 0.5 - self.e_n_tauint[e_name]) / e_N)
+            self.e_n_dtauint[e_name][0] = 0.0
+
+            def _compute_drho(i):
+                tmp = self.e_rho[e_name][i + 1:w_max] + np.concatenate([self.e_rho[e_name][i - 1::-1], self.e_rho[e_name][1:w_max - 2 * i]]) - 2 * self.e_rho[e_name][i] * self.e_rho[e_name][1:w_max - i]
+                self.e_drho[e_name][i] = np.sqrt(np.sum(tmp ** 2) / e_N)
+
+            _compute_drho(1)
+            if self.tau_exp[e_name] > 0:
+                texp = self.tau_exp[e_name]
+                # if type(self.idl[e_name]) is range: # scale tau_exp according to step size
+                #    texp /= self.idl[e_name].step
+                # Critical slowing down analysis
+                for n in range(1, w_max // 2):
+                    _compute_drho(n + 1)
+                    if (self.e_rho[e_name][n] - self.N_sigma * self.e_drho[e_name][n]) < 0 or n >= w_max // 2 - 2:
+                        # Bias correction hep-lat/0306017 eq. (49) included
+                        self.e_tauint[e_name] = self.e_n_tauint[e_name][n] * (1 + (2 * n + 1) / e_N) / (1 + 1 / e_N) + texp * np.abs(self.e_rho[e_name][n + 1])  # The absolute makes sure, that the tail contribution is always positive
+                        self.e_dtauint[e_name] = np.sqrt(self.e_n_dtauint[e_name][n] ** 2 + texp ** 2 * self.e_drho[e_name][n + 1] ** 2)
+                        # Error of tau_exp neglected so far, missing term: self.e_rho[e_name][n + 1] ** 2 * d_tau_exp ** 2
+                        self.e_dvalue[e_name] = np.sqrt(2 * self.e_tauint[e_name] * e_gamma[e_name][0] * (1 + 1 / e_N) / e_N)
+                        self.e_ddvalue[e_name] = self.e_dvalue[e_name] * np.sqrt((n + 0.5) / e_N)
+                        self.e_windowsize[e_name] = n
+                        break
+            else:
+                # Standard automatic windowing procedure
+                g_w = self.S[e_name] / np.log((2 * self.e_n_tauint[e_name][1:] + 1) / (2 * self.e_n_tauint[e_name][1:] - 1))
+                g_w = np.exp(- np.arange(1, w_max) / g_w) - g_w / np.sqrt(np.arange(1, w_max) * e_N)
+                for n in range(1, w_max):
+                    if n < w_max // 2 - 2:
+                        _compute_drho(n + 1)
+                    if g_w[n - 1] < 0 or n >= w_max - 1:
+                        self.e_tauint[e_name] = self.e_n_tauint[e_name][n] * (1 + (2 * n + 1) / e_N) / (1 + 1 / e_N)  # Bias correction hep-lat/0306017 eq. (49)
+                        self.e_dtauint[e_name] = self.e_n_dtauint[e_name][n]
+                        self.e_dvalue[e_name] = np.sqrt(2 * self.e_tauint[e_name] * e_gamma[e_name][0] * (1 + 1 / e_N) / e_N)
+                        self.e_ddvalue[e_name] = self.e_dvalue[e_name] * np.sqrt((n + 0.5) / e_N)
+                        self.e_windowsize[e_name] = n
+                        break
+
+            self._dvalue += self.e_dvalue[e_name] ** 2
+            self.ddvalue += (self.e_dvalue[e_name] * self.e_ddvalue[e_name]) ** 2
+
+        self._dvalue = np.sqrt(self.dvalue)
+        if self._dvalue == 0.0:
+            self.ddvalue = 0.0
+        else:
+            self.ddvalue = np.sqrt(self.ddvalue) / self.dvalue
+        return
+
+ +
+ +

Calculate the error and related properties of the Obs.

+ +
Keyword arguments
+ +

S : float + specifies a custom value for the parameter S (default 2.0), can be + a float or an array of floats for different ensembles +tau_exp : float + positive value triggers the critical slowing down analysis + (default 0.0), can be a float or an array of floats for different + ensembles +N_sigma : float + number of standard deviations from zero until the tail is + attached to the autocorrelation function (default 1) +fft : bool + determines whether the fft algorithm is used for the computation + of the autocorrelation function (default True)

+
+ + +
+
+
#   + + + def + print(self, level=1): +
+ +
+ View Source +
    def print(self, level=1):
+        warnings.warn("Method 'print' renamed to 'details'", DeprecationWarning)
+        self.details(level > 1)
+
+ +
+ + + +
+
+
#   + + + def + details(self, ens_content=True): +
+ +
+ View Source +
    def details(self, ens_content=True):
+        """Output detailed properties of the Obs."""
+        if self.value == 0.0:
+            percentage = np.nan
+        else:
+            percentage = np.abs(self.dvalue / self.value) * 100
+        print('Result\t %3.8e +/- %3.8e +/- %3.8e (%3.3f%%)' % (self.value, self.dvalue, self.ddvalue, percentage))
+        if hasattr(self, 'e_dvalue'):
+            if len(self.e_names) > 1:
+                print(' Ensemble errors:')
+            for e_name in self.e_names:
+                if len(self.e_names) > 1:
+                    print('', e_name, '\t %3.8e +/- %3.8e' % (self.e_dvalue[e_name], self.e_ddvalue[e_name]))
+                if self.tau_exp[e_name] > 0:
+                    print(' t_int\t %3.8e +/- %3.8e tau_exp = %3.2f,  N_sigma = %1.0i' % (self.e_tauint[e_name], self.e_dtauint[e_name], self.tau_exp[e_name], self.N_sigma))
+                else:
+                    print(' t_int\t %3.8e +/- %3.8e S = %3.2f' % (self.e_tauint[e_name], self.e_dtauint[e_name], self.S[e_name]))
+        if self.tag is not None:
+            print("Description:", self.tag)
+        if ens_content is True:
+            if len(self.e_names) == 1:
+                print(self.N, 'samples in', len(self.e_names), 'ensemble:')
+            else:
+                print(self.N, 'samples in', len(self.e_names), 'ensembles:')
+            m = max(map(len, list(self.e_content.keys()))) + 1
+            print('\n'.join(['  ' + key.rjust(m) + ': ' + str(value) for key, value in sorted(self.e_content.items())]))
+
+ +
+ +

Output detailed properties of the Obs.

+
+ + +
+
+
#   + + + def + is_zero_within_error(self, sigma=1): +
+ +
+ View Source +
    def is_zero_within_error(self, sigma=1):
+        """Checks whether the observable is zero within 'sigma' standard errors.
+
+        Works only properly when the gamma method was run.
+        """
+        return self.is_zero() or np.abs(self.value) <= sigma * self.dvalue
+
+ +
+ +

Checks whether the observable is zero within 'sigma' standard errors.

+ +

Works only properly when the gamma method was run.

+
+ + +
+
+
#   + + + def + is_zero(self): +
+ +
+ View Source +
    def is_zero(self):
+        """Checks whether the observable is zero within machine precision."""
+        return np.isclose(0.0, self.value) and all(np.allclose(0.0, delta) for delta in self.deltas.values())
+
+ +
+ +

Checks whether the observable is zero within machine precision.

+
+ + +
+
+
#   + + + def + plot_tauint(self, save=None): +
+ +
+ View Source +
    def plot_tauint(self, save=None):
+        """Plot integrated autocorrelation time for each ensemble."""
+        if not hasattr(self, 'e_names'):
+            raise Exception('Run the gamma method first.')
+
+        fig = plt.figure()
+        for e, e_name in enumerate(self.e_names):
+            plt.xlabel(r'$W$')
+            plt.ylabel(r'$\tau_\mathrm{int}$')
+            length = int(len(self.e_n_tauint[e_name]))
+            if self.tau_exp[e_name] > 0:
+                base = self.e_n_tauint[e_name][self.e_windowsize[e_name]]
+                x_help = np.arange(2 * self.tau_exp[e_name])
+                y_help = (x_help + 1) * np.abs(self.e_rho[e_name][self.e_windowsize[e_name] + 1]) * (1 - x_help / (2 * (2 * self.tau_exp[e_name] - 1))) + base
+                x_arr = np.arange(self.e_windowsize[e_name] + 1, self.e_windowsize[e_name] + 1 + 2 * self.tau_exp[e_name])
+                plt.plot(x_arr, y_help, 'C' + str(e), linewidth=1, ls='--', marker=',')
+                plt.errorbar([self.e_windowsize[e_name] + 2 * self.tau_exp[e_name]], [self.e_tauint[e_name]],
+                             yerr=[self.e_dtauint[e_name]], fmt='C' + str(e), linewidth=1, capsize=2, marker='o', mfc=plt.rcParams['axes.facecolor'])
+                xmax = self.e_windowsize[e_name] + 2 * self.tau_exp[e_name] + 1.5
+                label = e_name + r', $\tau_\mathrm{exp}$=' + str(np.around(self.tau_exp[e_name], decimals=2))
+            else:
+                label = e_name + ', S=' + str(np.around(self.S[e_name], decimals=2))
+                xmax = max(10.5, 2 * self.e_windowsize[e_name] - 0.5)
+
+            plt.errorbar(np.arange(length), self.e_n_tauint[e_name][:], yerr=self.e_n_dtauint[e_name][:], linewidth=1, capsize=2, label=label)
+            plt.axvline(x=self.e_windowsize[e_name], color='C' + str(e), alpha=0.5, marker=',', ls='--')
+            plt.legend()
+            plt.xlim(-0.5, xmax)
+            plt.ylim(bottom=0.0)
+            plt.draw()
+            if save:
+                fig.savefig(save)
+
+ +
+ +

Plot integrated autocorrelation time for each ensemble.

+
+ + +
+
+
#   + + + def + plot_rho(self): +
+ +
+ View Source +
    def plot_rho(self):
+        """Plot normalized autocorrelation function time for each ensemble."""
+        if not hasattr(self, 'e_names'):
+            raise Exception('Run the gamma method first.')
+        for e, e_name in enumerate(self.e_names):
+            plt.xlabel('W')
+            plt.ylabel('rho')
+            length = int(len(self.e_drho[e_name]))
+            plt.errorbar(np.arange(length), self.e_rho[e_name][:length], yerr=self.e_drho[e_name][:], linewidth=1, capsize=2)
+            plt.axvline(x=self.e_windowsize[e_name], color='r', alpha=0.25, ls='--', marker=',')
+            if self.tau_exp[e_name] > 0:
+                plt.plot([self.e_windowsize[e_name] + 1, self.e_windowsize[e_name] + 1 + 2 * self.tau_exp[e_name]],
+                         [self.e_rho[e_name][self.e_windowsize[e_name] + 1], 0], 'k-', lw=1)
+                xmax = self.e_windowsize[e_name] + 2 * self.tau_exp[e_name] + 1.5
+                plt.title('Rho ' + e_name + r', tau\_exp=' + str(np.around(self.tau_exp[e_name], decimals=2)))
+            else:
+                xmax = max(10.5, 2 * self.e_windowsize[e_name] - 0.5)
+                plt.title('Rho ' + e_name + ', S=' + str(np.around(self.S[e_name], decimals=2)))
+            plt.plot([-0.5, xmax], [0, 0], 'k--', lw=1)
+            plt.xlim(-0.5, xmax)
+            plt.draw()
+
+ +
+ +

Plot normalized autocorrelation function time for each ensemble.

+
+ + +
+
+
#   + + + def + plot_rep_dist(self): +
+ +
+ View Source +
    def plot_rep_dist(self):
+        """Plot replica distribution for each ensemble with more than one replicum."""
+        if not hasattr(self, 'e_names'):
+            raise Exception('Run the gamma method first.')
+        for e, e_name in enumerate(self.e_names):
+            if len(self.e_content[e_name]) == 1:
+                print('No replica distribution for a single replicum (', e_name, ')')
+                continue
+            r_length = []
+            sub_r_mean = 0
+            for r, r_name in enumerate(self.e_content[e_name]):
+                r_length.append(len(self.deltas[r_name]))
+                sub_r_mean += self.shape[r_name] * self.r_values[r_name]
+            e_N = np.sum(r_length)
+            sub_r_mean /= e_N
+            arr = np.zeros(len(self.e_content[e_name]))
+            for r, r_name in enumerate(self.e_content[e_name]):
+                arr[r] = (self.r_values[r_name] - sub_r_mean) / (self.e_dvalue[e_name] * np.sqrt(e_N / self.shape[r_name] - 1))
+            plt.hist(arr, rwidth=0.8, bins=len(self.e_content[e_name]))
+            plt.title('Replica distribution' + e_name + ' (mean=0, var=1)')
+            plt.draw()
+
+ +
+ +

Plot replica distribution for each ensemble with more than one replicum.

+
+ + +
+
+
#   + + + def + plot_history(self, expand=True): +
+ +
+ View Source +
    def plot_history(self, expand=True):
+        """Plot derived Monte Carlo history for each ensemble."""
+        if not hasattr(self, 'e_names'):
+            raise Exception('Run the gamma method first.')
+
+        for e, e_name in enumerate(self.e_names):
+            plt.figure()
+            r_length = []
+            tmp = []
+            for r, r_name in enumerate(self.e_content[e_name]):
+                if expand:
+                    tmp.append(self.expand_deltas(self.deltas[r_name], self.idl[r_name], self.shape[r_name]) + self.r_values[r_name])
+                else:
+                    tmp.append(self.deltas[r_name] + self.r_values[r_name])
+                r_length.append(len(tmp[-1]))
+            e_N = np.sum(r_length)
+            x = np.arange(e_N)
+            y = np.concatenate(tmp, axis=0)
+            plt.errorbar(x, y, fmt='.', markersize=3)
+            plt.xlim(-0.5, e_N - 0.5)
+            plt.title(e_name)
+            plt.draw()
+
+ +
+ +

Plot derived Monte Carlo history for each ensemble.

+
+ + +
+
+
#   + + + def + plot_piechart(self): +
+ +
+ View Source +
    def plot_piechart(self):
+        """Plot piechart which shows the fractional contribution of each
+        ensemble to the error and returns a dictionary containing the fractions."""
+        if not hasattr(self, 'e_names'):
+            raise Exception('Run the gamma method first.')
+        if self.dvalue == 0.0:
+            raise Exception('Error is 0.0')
+        labels = self.e_names
+        sizes = [i ** 2 for i in list(self.e_dvalue.values())] / self.dvalue ** 2
+        fig1, ax1 = plt.subplots()
+        ax1.pie(sizes, labels=labels, startangle=90, normalize=True)
+        ax1.axis('equal')
+        plt.draw()
+
+        return dict(zip(self.e_names, sizes))
+
+ +
+ +

Plot piechart which shows the fractional contribution of each +ensemble to the error and returns a dictionary containing the fractions.

+
+ + +
+
+
#   + + + def + dump(self, name, **kwargs): +
+ +
+ View Source +
    def dump(self, name, **kwargs):
+        """Dump the Obs to a pickle file 'name'.
+
+        Keyword arguments
+        -----------------
+        path -- specifies a custom path for the file (default '.')
+        """
+        if 'path' in kwargs:
+            file_name = kwargs.get('path') + '/' + name + '.p'
+        else:
+            file_name = name + '.p'
+        with open(file_name, 'wb') as fb:
+            pickle.dump(self, fb)
+
+ +
+ +

Dump the Obs to a pickle file 'name'.

+ +
Keyword arguments
+ +

path -- specifies a custom path for the file (default '.')

+
+ + +
+
+
#   + + + def + sqrt(self): +
+ +
+ View Source +
    def sqrt(self):
+        return derived_observable(lambda x, **kwargs: np.sqrt(x[0]), [self], man_grad=[1 / 2 / np.sqrt(self.value)])
+
+ +
+ + + +
+
+
#   + + + def + log(self): +
+ +
+ View Source +
    def log(self):
+        return derived_observable(lambda x, **kwargs: np.log(x[0]), [self], man_grad=[1 / self.value])
+
+ +
+ + + +
+
+
#   + + + def + exp(self): +
+ +
+ View Source +
    def exp(self):
+        return derived_observable(lambda x, **kwargs: np.exp(x[0]), [self], man_grad=[np.exp(self.value)])
+
+ +
+ + + +
+
+
#   + + + def + sin(self): +
+ +
+ View Source +
    def sin(self):
+        return derived_observable(lambda x, **kwargs: np.sin(x[0]), [self], man_grad=[np.cos(self.value)])
+
+ +
+ + + +
+
+
#   + + + def + cos(self): +
+ +
+ View Source +
    def cos(self):
+        return derived_observable(lambda x, **kwargs: np.cos(x[0]), [self], man_grad=[-np.sin(self.value)])
+
+ +
+ + + +
+
+
#   + + + def + tan(self): +
+ +
+ View Source +
    def tan(self):
+        return derived_observable(lambda x, **kwargs: np.tan(x[0]), [self], man_grad=[1 / np.cos(self.value) ** 2])
+
+ +
+ + + +
+
+
#   + + + def + arcsin(self): +
+ +
+ View Source +
    def arcsin(self):
+        return derived_observable(lambda x: anp.arcsin(x[0]), [self])
+
+ +
+ + + +
+
+
#   + + + def + arccos(self): +
+ +
+ View Source +
    def arccos(self):
+        return derived_observable(lambda x: anp.arccos(x[0]), [self])
+
+ +
+ + + +
+
+
#   + + + def + arctan(self): +
+ +
+ View Source +
    def arctan(self):
+        return derived_observable(lambda x: anp.arctan(x[0]), [self])
+
+ +
+ + + +
+
+
#   + + + def + sinh(self): +
+ +
+ View Source +
    def sinh(self):
+        return derived_observable(lambda x, **kwargs: np.sinh(x[0]), [self], man_grad=[np.cosh(self.value)])
+
+ +
+ + + +
+
+
#   + + + def + cosh(self): +
+ +
+ View Source +
    def cosh(self):
+        return derived_observable(lambda x, **kwargs: np.cosh(x[0]), [self], man_grad=[np.sinh(self.value)])
+
+ +
+ + + +
+
+
#   + + + def + tanh(self): +
+ +
+ View Source +
    def tanh(self):
+        return derived_observable(lambda x, **kwargs: np.tanh(x[0]), [self], man_grad=[1 / np.cosh(self.value) ** 2])
+
+ +
+ + + +
+
+
#   + + + def + arcsinh(self): +
+ +
+ View Source +
    def arcsinh(self):
+        return derived_observable(lambda x: anp.arcsinh(x[0]), [self])
+
+ +
+ + + +
+
+
#   + + + def + arccosh(self): +
+ +
+ View Source +
    def arccosh(self):
+        return derived_observable(lambda x: anp.arccosh(x[0]), [self])
+
+ +
+ + + +
+
+
#   + + + def + arctanh(self): +
+ +
+ View Source +
    def arctanh(self):
+        return derived_observable(lambda x: anp.arctanh(x[0]), [self])
+
+ +
+ + + +
+
+
#   + + + def + sinc(self): +
+ +
+ View Source +
    def sinc(self):
+        return derived_observable(lambda x: anp.sinc(x[0]), [self])
+
+ +
+ + + +
+
+
#   + + N_sigma +
+ + + +
+
+
#   + + S +
+ + + +
+
+
#   + + e_ddvalue +
+ + + +
+
+
#   + + e_drho +
+ + + +
+
+
#   + + e_dtauint +
+ + + +
+
+
#   + + e_dvalue +
+ + + +
+
+
#   + + e_n_dtauint +
+ + + +
+
+
#   + + e_n_tauint +
+ + + +
+
+
#   + + e_rho +
+ + + +
+
+
#   + + e_tauint +
+ + + +
+
+
#   + + e_windowsize +
+ + + +
+
+
#   + + tau_exp +
+ + + +
+
+
+
+ #   + + + class + CObs: +
+ +
+ View Source +
class CObs:
+    """Class for a complex valued observable."""
+    __slots__ = ['_real', '_imag', 'tag']
+
+    def __init__(self, real, imag=0.0):
+        self._real = real
+        self._imag = imag
+        self.tag = None
+
+    @property
+    def real(self):
+        return self._real
+
+    @property
+    def imag(self):
+        return self._imag
+
+    def gamma_method(self, **kwargs):
+        """Executes the gamma_method for the real and the imaginary part."""
+        if isinstance(self.real, Obs):
+            self.real.gamma_method(**kwargs)
+        if isinstance(self.imag, Obs):
+            self.imag.gamma_method(**kwargs)
+
+    def is_zero(self):
+        """Checks whether both real and imaginary part are zero within machine precision."""
+        return self.real == 0.0 and self.imag == 0.0
+
+    def conjugate(self):
+        return CObs(self.real, -self.imag)
+
+    def __add__(self, other):
+        if isinstance(other, np.ndarray):
+            return other + self
+        elif hasattr(other, 'real') and hasattr(other, 'imag'):
+            return CObs(self.real + other.real,
+                        self.imag + other.imag)
+        else:
+            return CObs(self.real + other, self.imag)
+
+    def __radd__(self, y):
+        return self + y
+
+    def __sub__(self, other):
+        if isinstance(other, np.ndarray):
+            return -1 * (other - self)
+        elif hasattr(other, 'real') and hasattr(other, 'imag'):
+            return CObs(self.real - other.real, self.imag - other.imag)
+        else:
+            return CObs(self.real - other, self.imag)
+
+    def __rsub__(self, other):
+        return -1 * (self - other)
+
+    def __mul__(self, other):
+        if isinstance(other, np.ndarray):
+            return other * self
+        elif hasattr(other, 'real') and hasattr(other, 'imag'):
+            if all(isinstance(i, Obs) for i in [self.real, self.imag, other.real, other.imag]):
+                return CObs(derived_observable(lambda x, **kwargs: x[0] * x[1] - x[2] * x[3],
+                                               [self.real, other.real, self.imag, other.imag],
+                                               man_grad=[other.real.value, self.real.value, -other.imag.value, -self.imag.value]),
+                            derived_observable(lambda x, **kwargs: x[2] * x[1] + x[0] * x[3],
+                                               [self.real, other.real, self.imag, other.imag],
+                                               man_grad=[other.imag.value, self.imag.value, other.real.value, self.real.value]))
+            elif getattr(other, 'imag', 0) != 0:
+                return CObs(self.real * other.real - self.imag * other.imag,
+                            self.imag * other.real + self.real * other.imag)
+            else:
+                return CObs(self.real * other.real, self.imag * other.real)
+        else:
+            return CObs(self.real * other, self.imag * other)
+
+    def __rmul__(self, other):
+        return self * other
+
+    def __truediv__(self, other):
+        if isinstance(other, np.ndarray):
+            return 1 / (other / self)
+        elif hasattr(other, 'real') and hasattr(other, 'imag'):
+            r = other.real ** 2 + other.imag ** 2
+            return CObs((self.real * other.real + self.imag * other.imag) / r, (self.imag * other.real - self.real * other.imag) / r)
+        else:
+            return CObs(self.real / other, self.imag / other)
+
+    def __rtruediv__(self, other):
+        r = self.real ** 2 + self.imag ** 2
+        if hasattr(other, 'real') and hasattr(other, 'imag'):
+            return CObs((self.real * other.real + self.imag * other.imag) / r, (self.real * other.imag - self.imag * other.real) / r)
+        else:
+            return CObs(self.real * other / r, -self.imag * other / r)
+
+    def __abs__(self):
+        return np.sqrt(self.real**2 + self.imag**2)
+
+    def __neg__(other):
+        return -1 * other
+
+    def __eq__(self, other):
+        return self.real == other.real and self.imag == other.imag
+
+    def __str__(self):
+        return '(' + str(self.real) + int(self.imag >= 0.0) * '+' + str(self.imag) + 'j)'
+
+    def __repr__(self):
+        return 'CObs[' + str(self) + ']'
+
+ +
+ +

Class for a complex valued observable.

+
+ + +
+
#   + + + CObs(real, imag=0.0) +
+ +
+ View Source +
    def __init__(self, real, imag=0.0):
+        self._real = real
+        self._imag = imag
+        self.tag = None
+
+ +
+ + + +
+
+
#   + + tag +
+ + + +
+
+
#   + + real +
+ + + +
+
+
#   + + imag +
+ + + +
+
+
#   + + + def + gamma_method(self, **kwargs): +
+ +
+ View Source +
    def gamma_method(self, **kwargs):
+        """Executes the gamma_method for the real and the imaginary part."""
+        if isinstance(self.real, Obs):
+            self.real.gamma_method(**kwargs)
+        if isinstance(self.imag, Obs):
+            self.imag.gamma_method(**kwargs)
+
+ +
+ +

Executes the gamma_method for the real and the imaginary part.

+
+ + +
+
+
#   + + + def + is_zero(self): +
+ +
+ View Source +
    def is_zero(self):
+        """Checks whether both real and imaginary part are zero within machine precision."""
+        return self.real == 0.0 and self.imag == 0.0
+
+ +
+ +

Checks whether both real and imaginary part are zero within machine precision.

+
+ + +
+
+
#   + + + def + conjugate(self): +
+ +
+ View Source +
    def conjugate(self):
+        return CObs(self.real, -self.imag)
+
+ +
+ + + +
+
+
+
#   + + + def + merge_idx(idl): +
+ +
+ View Source +
def merge_idx(idl):
+    """Returns the union of all lists in idl
+
+    Parameters
+    ----------
+    idl  -- List of lists or ranges.
+    """
+
+    # Use groupby to efficiently check whether all elements of idl are identical
+    try:
+        g = groupby(idl)
+        if next(g, True) and not next(g, False):
+            return idl[0]
+    except:
+        pass
+
+    if np.all([type(idx) is range for idx in idl]):
+        if len(set([idx[0] for idx in idl])) == 1:
+            idstart = min([idx.start for idx in idl])
+            idstop = max([idx.stop for idx in idl])
+            idstep = min([idx.step for idx in idl])
+            return range(idstart, idstop, idstep)
+
+    return list(set().union(*idl))
+
+ +
+ +

Returns the union of all lists in idl

+ +
Parameters
+ +
    +
  • idl -- List of lists or ranges.
  • +
+
+ + +
+
+
#   + + + def + expand_deltas_for_merge(deltas, idx, shape, new_idx): +
+ +
+ View Source +
def expand_deltas_for_merge(deltas, idx, shape, new_idx):
+    """Expand deltas defined on idx to the list of configs that is defined by new_idx.
+       New, empy entries are filled by 0. If idx and new_idx are of type range, the smallest
+       common divisor of the step sizes is used as new step size.
+
+    Parameters
+    ----------
+    deltas : list
+        List of fluctuations
+    idx : list
+        List or range of configs on which the deltas are defined.
+        Has to be a subset of new_idx.
+    shape : list
+        Number of configs in idx.
+    new_idx : list
+        List of configs that defines the new range.
+    """
+
+    if type(idx) is range and type(new_idx) is range:
+        if idx == new_idx:
+            return deltas
+    ret = np.zeros(new_idx[-1] - new_idx[0] + 1)
+    for i in range(shape):
+        ret[idx[i] - new_idx[0]] = deltas[i]
+    return np.array([ret[new_idx[i] - new_idx[0]] for i in range(len(new_idx))])
+
+ +
+ +

Expand deltas defined on idx to the list of configs that is defined by new_idx. + New, empy entries are filled by 0. If idx and new_idx are of type range, the smallest + common divisor of the step sizes is used as new step size.

+ +
Parameters
+ +
    +
  • deltas (list): +List of fluctuations
  • +
  • idx (list): +List or range of configs on which the deltas are defined. +Has to be a subset of new_idx.
  • +
  • shape (list): +Number of configs in idx.
  • +
  • new_idx (list): +List of configs that defines the new range.
  • +
+
+ + +
+
+
#   + + + def + filter_zeroes(names, deltas, idl, eps=1e-10): +
+ +
+ View Source +
def filter_zeroes(names, deltas, idl, eps=Obs.filter_eps):
+    """Filter out all configurations with vanishing fluctuation such that they do not
+       contribute to the error estimate anymore. Returns the new names, deltas and
+       idl according to the filtering.
+       A fluctuation is considered to be vanishing, if it is smaller than eps times
+       the mean of the absolute values of all deltas in one list.
+
+    Parameters
+    ----------
+    names  -- List of names
+    deltas -- Dict lists of fluctuations
+    idx    -- Dict of lists or ranges of configs on which the deltas are defined.
+               Has to be a subset of new_idx.
+
+    Optional parameters
+    ----------
+    eps    -- Prefactor that enters the filter criterion.
+    """
+    new_names = []
+    new_deltas = {}
+    new_idl = {}
+    for name in names:
+        nd = []
+        ni = []
+        maxd = np.mean(np.fabs(deltas[name]))
+        for i in range(len(deltas[name])):
+            if not np.isclose(0.0, deltas[name][i], atol=eps * maxd):
+                nd.append(deltas[name][i])
+                ni.append(idl[name][i])
+        if nd:
+            new_names.append(name)
+            new_deltas[name] = np.array(nd)
+            new_idl[name] = ni
+    return (new_names, new_deltas, new_idl)
+
+ +
+ +

Filter out all configurations with vanishing fluctuation such that they do not + contribute to the error estimate anymore. Returns the new names, deltas and + idl according to the filtering. + A fluctuation is considered to be vanishing, if it is smaller than eps times + the mean of the absolute values of all deltas in one list.

+ +
Parameters
+ +
    +
  • names -- List of names
  • +
  • deltas -- Dict lists of fluctuations
  • +
  • idx -- Dict of lists or ranges of configs on which the deltas are defined.: Has to be a subset of new_idx.
  • +
+ +
Optional parameters
+ +

eps -- Prefactor that enters the filter criterion.

+
+ + +
+
+
#   + + + def + derived_observable(func, data, **kwargs): +
+ +
+ View Source +
def derived_observable(func, data, **kwargs):
+    """Construct a derived Obs according to func(data, **kwargs) using automatic differentiation.
+
+    Parameters
+    ----------
+    func : object
+        arbitrary function of the form func(data, **kwargs). For the
+        automatic differentiation to work, all numpy functions have to have
+        the autograd wrapper (use 'import autograd.numpy as anp').
+    data : list
+        list of Obs, e.g. [obs1, obs2, obs3].
+
+    Keyword arguments
+    -----------------
+    num_grad : bool
+        if True, numerical derivatives are used instead of autograd
+        (default False). To control the numerical differentiation the
+        kwargs of numdifftools.step_generators.MaxStepGenerator
+        can be used.
+    man_grad : list
+        manually supply a list or an array which contains the jacobian
+        of func. Use cautiously, supplying the wrong derivative will
+        not be intercepted.
+
+    Notes
+    -----
+    For simple mathematical operations it can be practical to use anonymous
+    functions. For the ratio of two observables one can e.g. use
+
+    new_obs = derived_observable(lambda x: x[0] / x[1], [obs1, obs2])
+    """
+
+    data = np.asarray(data)
+    raveled_data = data.ravel()
+
+    # Workaround for matrix operations containing non Obs data
+    for i_data in raveled_data:
+        if isinstance(i_data, Obs):
+            first_name = i_data.names[0]
+            first_shape = i_data.shape[first_name]
+            first_idl = i_data.idl[first_name]
+            break
+
+    for i in range(len(raveled_data)):
+        if isinstance(raveled_data[i], (int, float)):
+            raveled_data[i] = Obs([raveled_data[i] + np.zeros(first_shape)], [first_name], idl=[first_idl])
+
+    n_obs = len(raveled_data)
+    new_names = sorted(set([y for x in [o.names for o in raveled_data] for y in x]))
+
+    is_merged = len(list(filter(lambda o: o.is_merged is True, raveled_data))) > 0
+    reweighted = len(list(filter(lambda o: o.reweighted is True, raveled_data))) > 0
+    new_idl_d = {}
+    for name in new_names:
+        idl = []
+        for i_data in raveled_data:
+            tmp = i_data.idl.get(name)
+            if tmp is not None:
+                idl.append(tmp)
+        new_idl_d[name] = merge_idx(idl)
+        if not is_merged:
+            is_merged = (1 != len(set([len(idx) for idx in [*idl, new_idl_d[name]]])))
+
+    if data.ndim == 1:
+        values = np.array([o.value for o in data])
+    else:
+        values = np.vectorize(lambda x: x.value)(data)
+
+    new_values = func(values, **kwargs)
+
+    multi = 0
+    if isinstance(new_values, np.ndarray):
+        multi = 1
+
+    new_r_values = {}
+    for name in new_names:
+        tmp_values = np.zeros(n_obs)
+        for i, item in enumerate(raveled_data):
+            tmp = item.r_values.get(name)
+            if tmp is None:
+                tmp = item.value
+            tmp_values[i] = tmp
+        if multi > 0:
+            tmp_values = np.array(tmp_values).reshape(data.shape)
+        new_r_values[name] = func(tmp_values, **kwargs)
+
+    if 'man_grad' in kwargs:
+        deriv = np.asarray(kwargs.get('man_grad'))
+        if new_values.shape + data.shape != deriv.shape:
+            raise Exception('Manual derivative does not have correct shape.')
+    elif kwargs.get('num_grad') is True:
+        if multi > 0:
+            raise Exception('Multi mode currently not supported for numerical derivative')
+        options = {
+            'base_step': 0.1,
+            'step_ratio': 2.5,
+            'num_steps': None,
+            'step_nom': None,
+            'offset': None,
+            'num_extrap': None,
+            'use_exact_steps': None,
+            'check_num_steps': None,
+            'scale': None}
+        for key in options.keys():
+            kwarg = kwargs.get(key)
+            if kwarg is not None:
+                options[key] = kwarg
+        tmp_df = nd.Gradient(func, order=4, **{k: v for k, v in options.items() if v is not None})(values, **kwargs)
+        if tmp_df.size == 1:
+            deriv = np.array([tmp_df.real])
+        else:
+            deriv = tmp_df.real
+    else:
+        deriv = jacobian(func)(values, **kwargs)
+
+    final_result = np.zeros(new_values.shape, dtype=object)
+
+    for i_val, new_val in np.ndenumerate(new_values):
+        new_deltas = {}
+        for j_obs, obs in np.ndenumerate(data):
+            for name in obs.names:
+                new_deltas[name] = new_deltas.get(name, 0) + deriv[i_val + j_obs] * expand_deltas_for_merge(obs.deltas[name], obs.idl[name], obs.shape[name], new_idl_d[name])
+
+        new_samples = []
+        new_means = []
+        new_idl = []
+        if is_merged:
+            filtered_names, filtered_deltas, filtered_idl_d = filter_zeroes(new_names, new_deltas, new_idl_d)
+        else:
+            filtered_names = new_names
+            filtered_deltas = new_deltas
+            filtered_idl_d = new_idl_d
+        for name in filtered_names:
+            new_samples.append(filtered_deltas[name])
+            new_means.append(new_r_values[name][i_val])
+            new_idl.append(filtered_idl_d[name])
+        final_result[i_val] = Obs(new_samples, filtered_names, means=new_means, idl=new_idl)
+        final_result[i_val]._value = new_val
+        final_result[i_val].is_merged = is_merged
+        final_result[i_val].reweighted = reweighted
+
+    if multi == 0:
+        final_result = final_result.item()
+
+    return final_result
+
+ +
+ +

Construct a derived Obs according to func(data, **kwargs) using automatic differentiation.

+ +
Parameters
+ +
    +
  • func (object): +arbitrary function of the form func(data, **kwargs). For the +automatic differentiation to work, all numpy functions have to have +the autograd wrapper (use 'import autograd.numpy as anp').
  • +
  • data (list): +list of Obs, e.g. [obs1, obs2, obs3].
  • +
+ +
Keyword arguments
+ +

num_grad : bool + if True, numerical derivatives are used instead of autograd + (default False). To control the numerical differentiation the + kwargs of numdifftools.step_generators.MaxStepGenerator + can be used. +man_grad : list + manually supply a list or an array which contains the jacobian + of func. Use cautiously, supplying the wrong derivative will + not be intercepted.

+ +
Notes
+ +

For simple mathematical operations it can be practical to use anonymous +functions. For the ratio of two observables one can e.g. use

+ +

new_obs = derived_observable(lambda x: x[0] / x[1], [obs1, obs2])

+
+ + +
+
+
#   + + + def + reduce_deltas(deltas, idx_old, idx_new): +
+ +
+ View Source +
def reduce_deltas(deltas, idx_old, idx_new):
+    """Extract deltas defined on idx_old on all configs of idx_new.
+
+    Parameters
+    ----------
+    deltas  -- List of fluctuations
+    idx_old -- List or range of configs on which the deltas are defined
+    idx_new -- List of configs for which we want to extract the deltas.
+               Has to be a subset of idx_old.
+    """
+    if not len(deltas) == len(idx_old):
+        raise Exception('Lenght of deltas and idx_old have to be the same: %d != %d' % (len(deltas), len(idx_old)))
+    if type(idx_old) is range and type(idx_new) is range:
+        if idx_old == idx_new:
+            return deltas
+    shape = len(idx_new)
+    ret = np.zeros(shape)
+    oldpos = 0
+    for i in range(shape):
+        if oldpos == idx_old[i]:
+            raise Exception('idx_old and idx_new do not match!')
+        pos = -1
+        for j in range(oldpos, len(idx_old)):
+            if idx_old[j] == idx_new[i]:
+                pos = j
+                break
+        if pos < 0:
+            raise Exception('Error in reduce_deltas: Config %d not in idx_old' % (idx_new[i]))
+        ret[i] = deltas[j]
+    return np.array(ret)
+
+ +
+ +

Extract deltas defined on idx_old on all configs of idx_new.

+ +
Parameters
+ +
    +
  • deltas -- List of fluctuations
  • +
  • idx_old -- List or range of configs on which the deltas are defined
  • +
  • idx_new -- List of configs for which we want to extract the deltas.: Has to be a subset of idx_old.
  • +
+
+ + +
+
+
#   + + + def + reweight(weight, obs, **kwargs): +
+ +
+ View Source +
def reweight(weight, obs, **kwargs):
+    """Reweight a list of observables.
+
+    Parameters
+    ----------
+    weight : Obs
+        Reweighting factor. An Observable that has to be defined on a superset of the
+        configurations in obs[i].idl for all i.
+    obs : list
+        list of Obs, e.g. [obs1, obs2, obs3].
+
+    Keyword arguments
+    -----------------
+    all_configs : bool
+        if True, the reweighted observables are normalized by the average of
+        the reweighting factor on all configurations in weight.idl and not
+        on the configurations in obs[i].idl.
+    """
+    result = []
+    for i in range(len(obs)):
+        if sorted(weight.names) != sorted(obs[i].names):
+            raise Exception('Error: Ensembles do not fit')
+        for name in weight.names:
+            if not set(obs[i].idl[name]).issubset(weight.idl[name]):
+                raise Exception('obs[%d] has to be defined on a subset of the configs in weight.idl[%s]!' % (i, name))
+        new_samples = []
+        w_deltas = {}
+        for name in sorted(weight.names):
+            w_deltas[name] = reduce_deltas(weight.deltas[name], weight.idl[name], obs[i].idl[name])
+            new_samples.append((w_deltas[name] + weight.r_values[name]) * (obs[i].deltas[name] + obs[i].r_values[name]))
+        tmp_obs = Obs(new_samples, sorted(weight.names), idl=[obs[i].idl[name] for name in sorted(weight.names)])
+
+        if kwargs.get('all_configs'):
+            new_weight = weight
+        else:
+            new_weight = Obs([w_deltas[name] + weight.r_values[name] for name in sorted(weight.names)], sorted(weight.names), idl=[obs[i].idl[name] for name in sorted(weight.names)])
+
+        result.append(derived_observable(lambda x, **kwargs: x[0] / x[1], [tmp_obs, new_weight], **kwargs))
+        result[-1].reweighted = True
+        result[-1].is_merged = obs[i].is_merged
+
+    return result
+
+ +
+ +

Reweight a list of observables.

+ +
Parameters
+ +
    +
  • weight (Obs): +Reweighting factor. An Observable that has to be defined on a superset of the +configurations in obs[i].idl for all i.
  • +
  • obs (list): +list of Obs, e.g. [obs1, obs2, obs3].
  • +
+ +
Keyword arguments
+ +

all_configs : bool + if True, the reweighted observables are normalized by the average of + the reweighting factor on all configurations in weight.idl and not + on the configurations in obs[i].idl.

+
+ + +
+
+
#   + + + def + correlate(obs_a, obs_b): +
+ +
+ View Source +
def correlate(obs_a, obs_b):
+    """Correlate two observables.
+
+    Attributes:
+    -----------
+    obs_a : Obs
+        First observable
+    obs_b : Obs
+        Second observable
+
+    Keep in mind to only correlate primary observables which have not been reweighted
+    yet. The reweighting has to be applied after correlating the observables.
+    Currently only works if ensembles are identical. This is not really necessary.
+    """
+
+    if sorted(obs_a.names) != sorted(obs_b.names):
+        raise Exception('Ensembles do not fit')
+    for name in obs_a.names:
+        if obs_a.shape[name] != obs_b.shape[name]:
+            raise Exception('Shapes of ensemble', name, 'do not fit')
+        if obs_a.idl[name] != obs_b.idl[name]:
+            raise Exception('idl of ensemble', name, 'do not fit')
+
+    if obs_a.reweighted is True:
+        warnings.warn("The first observable is already reweighted.", RuntimeWarning)
+    if obs_b.reweighted is True:
+        warnings.warn("The second observable is already reweighted.", RuntimeWarning)
+
+    new_samples = []
+    new_idl = []
+    for name in sorted(obs_a.names):
+        new_samples.append((obs_a.deltas[name] + obs_a.r_values[name]) * (obs_b.deltas[name] + obs_b.r_values[name]))
+        new_idl.append(obs_a.idl[name])
+
+    o = Obs(new_samples, sorted(obs_a.names), idl=new_idl)
+    o.is_merged = obs_a.is_merged or obs_b.is_merged
+    o.reweighted = obs_a.reweighted or obs_b.reweighted
+    return o
+
+ +
+ +

Correlate two observables.

+ +

Attributes:

+ +

obs_a : Obs + First observable +obs_b : Obs + Second observable

+ +

Keep in mind to only correlate primary observables which have not been reweighted +yet. The reweighting has to be applied after correlating the observables. +Currently only works if ensembles are identical. This is not really necessary.

+
+ + +
+
+
#   + + + def + covariance(obs1, obs2, correlation=False, **kwargs): +
+ +
+ View Source +
def covariance(obs1, obs2, correlation=False, **kwargs):
+    """Calculates the covariance of two observables.
+
+    covariance(obs, obs) is equal to obs.dvalue ** 2
+    The gamma method has to be applied first to both observables.
+
+    If abs(covariance(obs1, obs2)) > obs1.dvalue * obs2.dvalue, the covariance
+    is constrained to the maximum value in order to make sure that covariance
+    matrices are positive semidefinite.
+
+    Keyword arguments
+    -----------------
+    correlation -- if true the correlation instead of the covariance is
+                   returned (default False)
+    """
+
+    for name in sorted(set(obs1.names + obs2.names)):
+        if (obs1.shape.get(name) != obs2.shape.get(name)) and (obs1.shape.get(name) is not None) and (obs2.shape.get(name) is not None):
+            raise Exception('Shapes of ensemble', name, 'do not fit')
+        if (1 != len(set([len(idx) for idx in [obs1.idl[name], obs2.idl[name], merge_idx([obs1.idl[name], obs2.idl[name]])]]))):
+            raise Exception('Shapes of ensemble', name, 'do not fit')
+
+    if not hasattr(obs1, 'e_names') or not hasattr(obs2, 'e_names'):
+        raise Exception('The gamma method has to be applied to both Obs first.')
+
+    dvalue = 0
+
+    for e_name in obs1.e_names:
+
+        if e_name not in obs2.e_names:
+            continue
+
+        gamma = 0
+        r_length = []
+        for r_name in obs1.e_content[e_name]:
+            if r_name not in obs2.e_content[e_name]:
+                continue
+
+            r_length.append(len(obs1.deltas[r_name]))
+
+            gamma += np.sum(obs1.deltas[r_name] * obs2.deltas[r_name])
+
+        e_N = np.sum(r_length)
+
+        tau_combined = (obs1.e_tauint[e_name] + obs2.e_tauint[e_name]) / 2
+        dvalue += gamma / e_N * (1 + 1 / e_N) / e_N * 2 * tau_combined
+
+    if np.abs(dvalue / obs1.dvalue / obs2.dvalue) > 1.0:
+        dvalue = np.sign(dvalue) * obs1.dvalue * obs2.dvalue
+
+    if correlation:
+        dvalue = dvalue / obs1.dvalue / obs2.dvalue
+
+    return dvalue
+
+ +
+ +

Calculates the covariance of two observables.

+ +

covariance(obs, obs) is equal to obs.dvalue ** 2 +The gamma method has to be applied first to both observables.

+ +

If abs(covariance(obs1, obs2)) > obs1.dvalue * obs2.dvalue, the covariance +is constrained to the maximum value in order to make sure that covariance +matrices are positive semidefinite.

+ +
Keyword arguments
+ +

correlation -- if true the correlation instead of the covariance is + returned (default False)

+
+ + +
+
+
#   + + + def + covariance2(obs1, obs2, correlation=False, **kwargs): +
+ +
+ View Source +
def covariance2(obs1, obs2, correlation=False, **kwargs):
+    """Alternative implementation of the covariance of two observables.
+
+    covariance(obs, obs) is equal to obs.dvalue ** 2
+    The gamma method has to be applied first to both observables.
+
+    If abs(covariance(obs1, obs2)) > obs1.dvalue * obs2.dvalue, the covariance
+    is constrained to the maximum value in order to make sure that covariance
+    matrices are positive semidefinite.
+
+    Keyword arguments
+    -----------------
+    correlation -- if true the correlation instead of the covariance is
+                   returned (default False)
+    """
+
+    def expand_deltas(deltas, idx, shape, new_idx):
+        """Expand deltas defined on idx to a contiguous range [new_idx[0], new_idx[-1]].
+           New, empy entries are filled by 0. If idx and new_idx are of type range, the smallest
+           common divisor of the step sizes is used as new step size.
+
+        Parameters
+        ----------
+        deltas  -- List of fluctuations
+        idx     -- List or range of configs on which the deltas are defined.
+                   Has to be a subset of new_idx.
+        shape   -- Number of configs in idx.
+        new_idx -- List of configs that defines the new range.
+        """
+
+        if type(idx) is range and type(new_idx) is range:
+            if idx == new_idx:
+                return deltas
+        ret = np.zeros(new_idx[-1] - new_idx[0] + 1)
+        for i in range(shape):
+            ret[idx[i] - new_idx[0]] = deltas[i]
+        return ret
+
+    def calc_gamma(deltas1, deltas2, idx1, idx2, new_idx, w_max):
+        gamma = np.zeros(w_max)
+        deltas1 = expand_deltas(deltas1, idx1, len(idx1), new_idx)
+        deltas2 = expand_deltas(deltas2, idx2, len(idx2), new_idx)
+        new_shape = len(deltas1)
+        max_gamma = min(new_shape, w_max)
+        # The padding for the fft has to be even
+        padding = new_shape + max_gamma + (new_shape + max_gamma) % 2
+        gamma[:max_gamma] += (np.fft.irfft(np.fft.rfft(deltas1, padding) * np.conjugate(np.fft.rfft(deltas2, padding)))[:max_gamma] + np.fft.irfft(np.fft.rfft(deltas2, padding) * np.conjugate(np.fft.rfft(deltas1, padding)))[:max_gamma]) / 2.0
+
+        return gamma
+
+    if not hasattr(obs1, 'e_names') or not hasattr(obs2, 'e_names'):
+        raise Exception('The gamma method has to be applied to both Obs first.')
+
+    dvalue = 0
+    e_gamma = {}
+    e_dvalue = {}
+    e_n_tauint = {}
+    e_rho = {}
+
+    for e_name in obs1.e_names:
+
+        if e_name not in obs2.e_names:
+            continue
+
+        idl_d = {}
+        r_length = []
+        for r_name in obs1.e_content[e_name]:
+            if r_name not in obs2.e_content[e_name]:
+                continue
+            idl_d[r_name] = merge_idx([obs1.idl[r_name], obs2.idl[r_name]])
+            if idl_d[r_name] is range:
+                r_length.append(len(idl_d[r_name]))
+            else:
+                r_length.append((idl_d[r_name][-1] - idl_d[r_name][0] + 1))
+
+        if not r_length:
+            return 0.
+
+        w_max = max(r_length) // 2
+        e_gamma[e_name] = np.zeros(w_max)
+
+        for r_name in obs1.e_content[e_name]:
+            if r_name not in obs2.e_content[e_name]:
+                continue
+            e_gamma[e_name] += calc_gamma(obs1.deltas[r_name], obs2.deltas[r_name], obs1.idl[r_name], obs2.idl[r_name], idl_d[r_name], w_max)
+
+        if np.all(e_gamma[e_name] == 0.0):
+            continue
+
+        e_shapes = []
+        for r_name in obs1.e_content[e_name]:
+            e_shapes.append(obs1.shape[r_name])
+        gamma_div = np.zeros(w_max)
+        e_N = 0
+        for r_name in obs1.e_content[e_name]:
+            if r_name not in obs2.e_content[e_name]:
+                continue
+            gamma_div += calc_gamma(np.ones(obs1.shape[r_name]), np.ones(obs2.shape[r_name]), obs1.idl[r_name], obs2.idl[r_name], idl_d[r_name], w_max)
+            e_N += np.sum(np.ones_like(idl_d[r_name]))
+        e_gamma[e_name] /= gamma_div[:w_max]
+
+        e_rho[e_name] = e_gamma[e_name][:w_max] / e_gamma[e_name][0]
+        e_n_tauint[e_name] = np.cumsum(np.concatenate(([0.5], e_rho[e_name][1:])))
+        # Make sure no entry of tauint is smaller than 0.5
+        e_n_tauint[e_name][e_n_tauint[e_name] < 0.5] = 0.500000000001
+
+        window = max(obs1.e_windowsize[e_name], obs2.e_windowsize[e_name])
+        # Bias correction hep-lat/0306017 eq. (49)
+        e_dvalue[e_name] = 2 * (e_n_tauint[e_name][window] + obs1.tau_exp[e_name] * np.abs(e_rho[e_name][window + 1])) * (1 + (2 * window + 1) / e_N) * e_gamma[e_name][0] / e_N
+
+        dvalue += e_dvalue[e_name]
+
+    if np.abs(dvalue / obs1.dvalue / obs2.dvalue) > 1.0:
+        dvalue = np.sign(dvalue) * obs1.dvalue * obs2.dvalue
+
+    if correlation:
+        dvalue = dvalue / obs1.dvalue / obs2.dvalue
+
+    return dvalue
+
+ +
+ +

Alternative implementation of the covariance of two observables.

+ +

covariance(obs, obs) is equal to obs.dvalue ** 2 +The gamma method has to be applied first to both observables.

+ +

If abs(covariance(obs1, obs2)) > obs1.dvalue * obs2.dvalue, the covariance +is constrained to the maximum value in order to make sure that covariance +matrices are positive semidefinite.

+ +
Keyword arguments
+ +

correlation -- if true the correlation instead of the covariance is + returned (default False)

+
+ + +
+
+
#   + + + def + covariance3(obs1, obs2, correlation=False, **kwargs): +
+ +
+ View Source +
def covariance3(obs1, obs2, correlation=False, **kwargs):
+    """Another alternative implementation of the covariance of two observables.
+
+    covariance2(obs, obs) is equal to obs.dvalue ** 2
+    Currently only works if ensembles are identical.
+    The gamma method has to be applied first to both observables.
+
+    If abs(covariance2(obs1, obs2)) > obs1.dvalue * obs2.dvalue, the covariance
+    is constrained to the maximum value in order to make sure that covariance
+    matrices are positive semidefinite.
+
+    Keyword arguments
+    -----------------
+    correlation -- if true the correlation instead of the covariance is
+                   returned (default False)
+    plot -- if true, the integrated autocorrelation time for each ensemble is
+            plotted.
+    """
+
+    for name in sorted(set(obs1.names + obs2.names)):
+        if (obs1.shape.get(name) != obs2.shape.get(name)) and (obs1.shape.get(name) is not None) and (obs2.shape.get(name) is not None):
+            raise Exception('Shapes of ensemble', name, 'do not fit')
+        if (1 != len(set([len(idx) for idx in [obs1.idl[name], obs2.idl[name], merge_idx([obs1.idl[name], obs2.idl[name]])]]))):
+            raise Exception('Shapes of ensemble', name, 'do not fit')
+
+    if not hasattr(obs1, 'e_names') or not hasattr(obs2, 'e_names'):
+        raise Exception('The gamma method has to be applied to both Obs first.')
+
+    tau_exp = []
+    S = []
+    for e_name in sorted(set(obs1.e_names + obs2.e_names)):
+        t_1 = obs1.tau_exp.get(e_name)
+        t_2 = obs2.tau_exp.get(e_name)
+        if t_1 is None:
+            t_1 = 0
+        if t_2 is None:
+            t_2 = 0
+        tau_exp.append(max(t_1, t_2))
+        S_1 = obs1.S.get(e_name)
+        S_2 = obs2.S.get(e_name)
+        if S_1 is None:
+            S_1 = Obs.S_global
+        if S_2 is None:
+            S_2 = Obs.S_global
+        S.append(max(S_1, S_2))
+
+    check_obs = obs1 + obs2
+    check_obs.gamma_method(tau_exp=tau_exp, S=S)
+
+    if kwargs.get('plot'):
+        check_obs.plot_tauint()
+        check_obs.plot_rho()
+
+    cov = (check_obs.dvalue ** 2 - obs1.dvalue ** 2 - obs2.dvalue ** 2) / 2
+
+    if np.abs(cov / obs1.dvalue / obs2.dvalue) > 1.0:
+        cov = np.sign(cov) * obs1.dvalue * obs2.dvalue
+
+    if correlation:
+        cov = cov / obs1.dvalue / obs2.dvalue
+
+    return cov
+
+ +
+ +

Another alternative implementation of the covariance of two observables.

+ +

covariance2(obs, obs) is equal to obs.dvalue ** 2 +Currently only works if ensembles are identical. +The gamma method has to be applied first to both observables.

+ +

If abs(covariance2(obs1, obs2)) > obs1.dvalue * obs2.dvalue, the covariance +is constrained to the maximum value in order to make sure that covariance +matrices are positive semidefinite.

+ +
Keyword arguments
+ +

correlation -- if true the correlation instead of the covariance is + returned (default False) +plot -- if true, the integrated autocorrelation time for each ensemble is + plotted.

+
+ + +
+
+
#   + + + def + pseudo_Obs(value, dvalue, name, samples=1000): +
+ +
+ View Source +
def pseudo_Obs(value, dvalue, name, samples=1000):
+    """Generate a pseudo Obs with given value, dvalue and name
+
+    The standard number of samples is a 1000. This can be adjusted.
+    """
+    if dvalue <= 0.0:
+        return Obs([np.zeros(samples) + value], [name])
+    else:
+        for _ in range(100):
+            deltas = [np.random.normal(0.0, dvalue * np.sqrt(samples), samples)]
+            deltas -= np.mean(deltas)
+            deltas *= dvalue / np.sqrt((np.var(deltas) / samples)) / np.sqrt(1 + 3 / samples)
+            deltas += value
+            res = Obs(deltas, [name])
+            res.gamma_method(S=2, tau_exp=0)
+            if abs(res.dvalue - dvalue) < 1e-10 * dvalue:
+                break
+
+        res._value = float(value)
+
+        return res
+
+ +
+ +

Generate a pseudo Obs with given value, dvalue and name

+ +

The standard number of samples is a 1000. This can be adjusted.

+
+ + +
+
+
#   + + + def + dump_object(obj, name, **kwargs): +
+ +
+ View Source +
def dump_object(obj, name, **kwargs):
+    """Dump object into pickle file.
+
+    Keyword arguments
+    -----------------
+    path -- specifies a custom path for the file (default '.')
+    """
+    if 'path' in kwargs:
+        file_name = kwargs.get('path') + '/' + name + '.p'
+    else:
+        file_name = name + '.p'
+    with open(file_name, 'wb') as fb:
+        pickle.dump(obj, fb)
+
+ +
+ +

Dump object into pickle file.

+ +
Keyword arguments
+ +

path -- specifies a custom path for the file (default '.')

+
+ + +
+
+
#   + + + def + load_object(path): +
+ +
+ View Source +
def load_object(path):
+    """Load object from pickle file. """
+    with open(path, 'rb') as file:
+        return pickle.load(file)
+
+ +
+ +

Load object from pickle file.

+
+ + +
+
+
#   + + + def + merge_obs(list_of_obs): +
+ +
+ View Source +
def merge_obs(list_of_obs):
+    """Combine all observables in list_of_obs into one new observable
+
+    It is not possible to combine obs which are based on the same replicum
+    """
+    replist = [item for obs in list_of_obs for item in obs.names]
+    if (len(replist) == len(set(replist))) is False:
+        raise Exception('list_of_obs contains duplicate replica: %s' % (str(replist)))
+    new_dict = {}
+    idl_dict = {}
+    for o in list_of_obs:
+        new_dict.update({key: o.deltas.get(key, 0) + o.r_values.get(key, 0)
+                        for key in set(o.deltas) | set(o.r_values)})
+        idl_dict.update({key: o.idl.get(key, 0) for key in set(o.deltas)})
+
+    names = sorted(new_dict.keys())
+    o = Obs([new_dict[name] for name in names], names, idl=[idl_dict[name] for name in names])
+    o.is_merged = np.any([oi.is_merged for oi in list_of_obs])
+    o.reweighted = np.max([oi.reweighted for oi in list_of_obs])
+    return o
+
+ +
+ +

Combine all observables in list_of_obs into one new observable

+ +

It is not possible to combine obs which are based on the same replicum

+
+ + +
+
+ + \ No newline at end of file diff --git a/docs/pyerrors/roots.html b/docs/pyerrors/roots.html new file mode 100644 index 00000000..b326a234 --- /dev/null +++ b/docs/pyerrors/roots.html @@ -0,0 +1,315 @@ + + + + + + + pyerrors.roots API documentation + + + + + + + + + + + +
+
+

+pyerrors.roots

+ + +
+ View Source +
#!/usr/bin/env python
+# coding: utf-8
+
+import scipy.optimize
+from autograd import jacobian
+from .obs import derived_observable, pseudo_Obs
+
+
+def find_root(d, func, guess=1.0, **kwargs):
+    """Finds the root of the function func(x, d) where d is an Obs.
+
+    Parameters
+    -----------------
+    d -- Obs passed to the function.
+    func -- Function to be minimized. Any numpy functions have to use the autograd.numpy wrapper
+    guess -- Initial guess for the minimization.
+    """
+    root = scipy.optimize.fsolve(func, guess, d.value)
+
+    # Error propagation as detailed in arXiv:1809.01289
+    dx = jacobian(func)(root[0], d.value)
+    da = jacobian(lambda u, v: func(v, u))(d.value, root[0])
+    deriv = - da / dx
+
+    return derived_observable(lambda x, **kwargs: x[0], [pseudo_Obs(root, 0.0, d.names[0], d.shape[d.names[0]]), d], man_grad=[0, deriv])
+
+ +
+ +
+
+
#   + + + def + find_root(d, func, guess=1.0, **kwargs): +
+ +
+ View Source +
def find_root(d, func, guess=1.0, **kwargs):
+    """Finds the root of the function func(x, d) where d is an Obs.
+
+    Parameters
+    -----------------
+    d -- Obs passed to the function.
+    func -- Function to be minimized. Any numpy functions have to use the autograd.numpy wrapper
+    guess -- Initial guess for the minimization.
+    """
+    root = scipy.optimize.fsolve(func, guess, d.value)
+
+    # Error propagation as detailed in arXiv:1809.01289
+    dx = jacobian(func)(root[0], d.value)
+    da = jacobian(lambda u, v: func(v, u))(d.value, root[0])
+    deriv = - da / dx
+
+    return derived_observable(lambda x, **kwargs: x[0], [pseudo_Obs(root, 0.0, d.names[0], d.shape[d.names[0]]), d], man_grad=[0, deriv])
+
+ +
+ +

Finds the root of the function func(x, d) where d is an Obs.

+ +
Parameters
+ +
    +
  • d -- Obs passed to the function.
  • +
  • func -- Function to be minimized. Any numpy functions have to use the autograd.numpy wrapper
  • +
  • guess -- Initial guess for the minimization.
  • +
+
+ + +
+
+ + \ No newline at end of file diff --git a/docs/pyerrors/version.html b/docs/pyerrors/version.html new file mode 100644 index 00000000..9b19e14a --- /dev/null +++ b/docs/pyerrors/version.html @@ -0,0 +1,240 @@ + + + + + + + pyerrors.version API documentation + + + + + + + + + + + +
+
+

+pyerrors.version

+ + +
+ View Source +
__version__ = "2.0.0"
+
+ +
+ +
+
+ + \ No newline at end of file diff --git a/docs/search.js b/docs/search.js new file mode 100644 index 00000000..be3a7ef1 --- /dev/null +++ b/docs/search.js @@ -0,0 +1,36 @@ +window.pdocSearch = (function(){ +/** elasticlunr - http://weixsong.github.io * Copyright (C) 2017 Oliver Nightingale * Copyright (C) 2017 Wei Song * MIT Licensed */!function(){function e(e){if(null===e||"object"!=typeof e)return e;var t=e.constructor();for(var n in e)e.hasOwnProperty(n)&&(t[n]=e[n]);return t}var t=function(e){var n=new t.Index;return n.pipeline.add(t.trimmer,t.stopWordFilter,t.stemmer),e&&e.call(n,n),n};t.version="0.9.5",lunr=t,t.utils={},t.utils.warn=function(e){return function(t){e.console&&console.warn&&console.warn(t)}}(this),t.utils.toString=function(e){return void 0===e||null===e?"":e.toString()},t.EventEmitter=function(){this.events={}},t.EventEmitter.prototype.addListener=function(){var e=Array.prototype.slice.call(arguments),t=e.pop(),n=e;if("function"!=typeof t)throw new TypeError("last argument must be a function");n.forEach(function(e){this.hasHandler(e)||(this.events[e]=[]),this.events[e].push(t)},this)},t.EventEmitter.prototype.removeListener=function(e,t){if(this.hasHandler(e)){var n=this.events[e].indexOf(t);-1!==n&&(this.events[e].splice(n,1),0==this.events[e].length&&delete this.events[e])}},t.EventEmitter.prototype.emit=function(e){if(this.hasHandler(e)){var t=Array.prototype.slice.call(arguments,1);this.events[e].forEach(function(e){e.apply(void 0,t)},this)}},t.EventEmitter.prototype.hasHandler=function(e){return e in this.events},t.tokenizer=function(e){if(!arguments.length||null===e||void 0===e)return[];if(Array.isArray(e)){var n=e.filter(function(e){return null===e||void 0===e?!1:!0});n=n.map(function(e){return t.utils.toString(e).toLowerCase()});var i=[];return n.forEach(function(e){var n=e.split(t.tokenizer.seperator);i=i.concat(n)},this),i}return e.toString().trim().toLowerCase().split(t.tokenizer.seperator)},t.tokenizer.defaultSeperator=/[\s\-]+/,t.tokenizer.seperator=t.tokenizer.defaultSeperator,t.tokenizer.setSeperator=function(e){null!==e&&void 0!==e&&"object"==typeof e&&(t.tokenizer.seperator=e)},t.tokenizer.resetSeperator=function(){t.tokenizer.seperator=t.tokenizer.defaultSeperator},t.tokenizer.getSeperator=function(){return t.tokenizer.seperator},t.Pipeline=function(){this._queue=[]},t.Pipeline.registeredFunctions={},t.Pipeline.registerFunction=function(e,n){n in t.Pipeline.registeredFunctions&&t.utils.warn("Overwriting existing registered function: "+n),e.label=n,t.Pipeline.registeredFunctions[n]=e},t.Pipeline.getRegisteredFunction=function(e){return e in t.Pipeline.registeredFunctions!=!0?null:t.Pipeline.registeredFunctions[e]},t.Pipeline.warnIfFunctionNotRegistered=function(e){var n=e.label&&e.label in this.registeredFunctions;n||t.utils.warn("Function is not registered with pipeline. This may cause problems when serialising the index.\n",e)},t.Pipeline.load=function(e){var n=new t.Pipeline;return e.forEach(function(e){var i=t.Pipeline.getRegisteredFunction(e);if(!i)throw new Error("Cannot load un-registered function: "+e);n.add(i)}),n},t.Pipeline.prototype.add=function(){var e=Array.prototype.slice.call(arguments);e.forEach(function(e){t.Pipeline.warnIfFunctionNotRegistered(e),this._queue.push(e)},this)},t.Pipeline.prototype.after=function(e,n){t.Pipeline.warnIfFunctionNotRegistered(n);var i=this._queue.indexOf(e);if(-1===i)throw new Error("Cannot find existingFn");this._queue.splice(i+1,0,n)},t.Pipeline.prototype.before=function(e,n){t.Pipeline.warnIfFunctionNotRegistered(n);var i=this._queue.indexOf(e);if(-1===i)throw new Error("Cannot find existingFn");this._queue.splice(i,0,n)},t.Pipeline.prototype.remove=function(e){var t=this._queue.indexOf(e);-1!==t&&this._queue.splice(t,1)},t.Pipeline.prototype.run=function(e){for(var t=[],n=e.length,i=this._queue.length,o=0;n>o;o++){for(var r=e[o],s=0;i>s&&(r=this._queue[s](r,o,e),void 0!==r&&null!==r);s++);void 0!==r&&null!==r&&t.push(r)}return t},t.Pipeline.prototype.reset=function(){this._queue=[]},t.Pipeline.prototype.get=function(){return this._queue},t.Pipeline.prototype.toJSON=function(){return this._queue.map(function(e){return t.Pipeline.warnIfFunctionNotRegistered(e),e.label})},t.Index=function(){this._fields=[],this._ref="id",this.pipeline=new t.Pipeline,this.documentStore=new t.DocumentStore,this.index={},this.eventEmitter=new t.EventEmitter,this._idfCache={},this.on("add","remove","update",function(){this._idfCache={}}.bind(this))},t.Index.prototype.on=function(){var e=Array.prototype.slice.call(arguments);return this.eventEmitter.addListener.apply(this.eventEmitter,e)},t.Index.prototype.off=function(e,t){return this.eventEmitter.removeListener(e,t)},t.Index.load=function(e){e.version!==t.version&&t.utils.warn("version mismatch: current "+t.version+" importing "+e.version);var n=new this;n._fields=e.fields,n._ref=e.ref,n.documentStore=t.DocumentStore.load(e.documentStore),n.pipeline=t.Pipeline.load(e.pipeline),n.index={};for(var i in e.index)n.index[i]=t.InvertedIndex.load(e.index[i]);return n},t.Index.prototype.addField=function(e){return this._fields.push(e),this.index[e]=new t.InvertedIndex,this},t.Index.prototype.setRef=function(e){return this._ref=e,this},t.Index.prototype.saveDocument=function(e){return this.documentStore=new t.DocumentStore(e),this},t.Index.prototype.addDoc=function(e,n){if(e){var n=void 0===n?!0:n,i=e[this._ref];this.documentStore.addDoc(i,e),this._fields.forEach(function(n){var o=this.pipeline.run(t.tokenizer(e[n]));this.documentStore.addFieldLength(i,n,o.length);var r={};o.forEach(function(e){e in r?r[e]+=1:r[e]=1},this);for(var s in r){var u=r[s];u=Math.sqrt(u),this.index[n].addToken(s,{ref:i,tf:u})}},this),n&&this.eventEmitter.emit("add",e,this)}},t.Index.prototype.removeDocByRef=function(e){if(e&&this.documentStore.isDocStored()!==!1&&this.documentStore.hasDoc(e)){var t=this.documentStore.getDoc(e);this.removeDoc(t,!1)}},t.Index.prototype.removeDoc=function(e,n){if(e){var n=void 0===n?!0:n,i=e[this._ref];this.documentStore.hasDoc(i)&&(this.documentStore.removeDoc(i),this._fields.forEach(function(n){var o=this.pipeline.run(t.tokenizer(e[n]));o.forEach(function(e){this.index[n].removeToken(e,i)},this)},this),n&&this.eventEmitter.emit("remove",e,this))}},t.Index.prototype.updateDoc=function(e,t){var t=void 0===t?!0:t;this.removeDocByRef(e[this._ref],!1),this.addDoc(e,!1),t&&this.eventEmitter.emit("update",e,this)},t.Index.prototype.idf=function(e,t){var n="@"+t+"/"+e;if(Object.prototype.hasOwnProperty.call(this._idfCache,n))return this._idfCache[n];var i=this.index[t].getDocFreq(e),o=1+Math.log(this.documentStore.length/(i+1));return this._idfCache[n]=o,o},t.Index.prototype.getFields=function(){return this._fields.slice()},t.Index.prototype.search=function(e,n){if(!e)return[];e="string"==typeof e?{any:e}:JSON.parse(JSON.stringify(e));var i=null;null!=n&&(i=JSON.stringify(n));for(var o=new t.Configuration(i,this.getFields()).get(),r={},s=Object.keys(e),u=0;u0&&t.push(e);for(var i in n)"docs"!==i&&"df"!==i&&this.expandToken(e+i,t,n[i]);return t},t.InvertedIndex.prototype.toJSON=function(){return{root:this.root}},t.Configuration=function(e,n){var e=e||"";if(void 0==n||null==n)throw new Error("fields should not be null");this.config={};var i;try{i=JSON.parse(e),this.buildUserConfig(i,n)}catch(o){t.utils.warn("user configuration parse failed, will use default configuration"),this.buildDefaultConfig(n)}},t.Configuration.prototype.buildDefaultConfig=function(e){this.reset(),e.forEach(function(e){this.config[e]={boost:1,bool:"OR",expand:!1}},this)},t.Configuration.prototype.buildUserConfig=function(e,n){var i="OR",o=!1;if(this.reset(),"bool"in e&&(i=e.bool||i),"expand"in e&&(o=e.expand||o),"fields"in e)for(var r in e.fields)if(n.indexOf(r)>-1){var s=e.fields[r],u=o;void 0!=s.expand&&(u=s.expand),this.config[r]={boost:s.boost||0===s.boost?s.boost:1,bool:s.bool||i,expand:u}}else t.utils.warn("field name in user configuration not found in index instance fields");else this.addAllFields2UserConfig(i,o,n)},t.Configuration.prototype.addAllFields2UserConfig=function(e,t,n){n.forEach(function(n){this.config[n]={boost:1,bool:e,expand:t}},this)},t.Configuration.prototype.get=function(){return this.config},t.Configuration.prototype.reset=function(){this.config={}},lunr.SortedSet=function(){this.length=0,this.elements=[]},lunr.SortedSet.load=function(e){var t=new this;return t.elements=e,t.length=e.length,t},lunr.SortedSet.prototype.add=function(){var e,t;for(e=0;e1;){if(r===e)return o;e>r&&(t=o),r>e&&(n=o),i=n-t,o=t+Math.floor(i/2),r=this.elements[o]}return r===e?o:-1},lunr.SortedSet.prototype.locationFor=function(e){for(var t=0,n=this.elements.length,i=n-t,o=t+Math.floor(i/2),r=this.elements[o];i>1;)e>r&&(t=o),r>e&&(n=o),i=n-t,o=t+Math.floor(i/2),r=this.elements[o];return r>e?o:e>r?o+1:void 0},lunr.SortedSet.prototype.intersect=function(e){for(var t=new lunr.SortedSet,n=0,i=0,o=this.length,r=e.length,s=this.elements,u=e.elements;;){if(n>o-1||i>r-1)break;s[n]!==u[i]?s[n]u[i]&&i++:(t.add(s[n]),n++,i++)}return t},lunr.SortedSet.prototype.clone=function(){var e=new lunr.SortedSet;return e.elements=this.toArray(),e.length=e.elements.length,e},lunr.SortedSet.prototype.union=function(e){var t,n,i;this.length>=e.length?(t=this,n=e):(t=e,n=this),i=t.clone();for(var o=0,r=n.toArray();oWhat is pyerrors?\n\n

pyerrors is a python package for error computation and propagation of Markov chain Monte Carlo data.

\n\n

Getting started

\n\n
import numpy as np\nimport pyerrors as pe\n\nmy_obs = pe.Obs([samples], ['ensemble_name'])\nmy_new_obs = 2 * np.log(my_obs) / my_obs\nmy_new_obs.gamma_method()\nmy_new_obs.details()\nprint(my_new_obs)\n
\n\n

The Obs class

\n\n

pyerrors.obs.Obs

\n\n
import pyerrors as pe\n\nmy_obs = pe.Obs([samples], ['ensemble_name'])\n
\n\n

Multiple ensembles/replica

\n\n

Irregular Monte Carlo chains

\n\n

Error propagation

\n\n

Automatic differentiation, cite Alberto,

\n\n

numpy overloaded

\n\n
import numpy as np\nimport pyerrors as pe\n\nmy_obs = pe.Obs([samples], ['ensemble_name'])\nmy_new_obs = 2 * np.log(my_obs) / my_obs\nmy_new_obs.gamma_method()\nmy_new_obs.details()\n
\n\n

Error estimation

\n\n

pyerrors.obs.Obs.gamma_method

\n\n

$\\delta_i\\delta_j$

\n\n

Exponential tails

\n\n

Covariance

\n\n

Optimization / fits / roots

\n\n

Complex observables

\n\n

Matrix operations

\n\n

Input

\n"}, "pyerrors.correlators": {"fullname": "pyerrors.correlators", "modulename": "pyerrors.correlators", "qualname": "", "type": "module", "doc": "

\n"}, "pyerrors.correlators.Corr": {"fullname": "pyerrors.correlators.Corr", "modulename": "pyerrors.correlators", "qualname": "Corr", "type": "class", "doc": "

The class for a correlator (time dependent sequence of pe.Obs).

\n\n

Everything, this class does, can be achieved using lists or arrays of Obs.\nBut it is simply more convenient to have a dedicated object for correlators.\nOne often wants to add or multiply correlators of the same length at every timeslice and it is inconvinient\nto iterate over all timeslices for every operation. This is especially true, when dealing with smearing matrices.

\n\n

The correlator can have two types of content: An Obs at every timeslice OR a GEVP\nsmearing matrix at every timeslice. Other dependency (eg. spacial) are not supported.

\n"}, "pyerrors.correlators.Corr.__init__": {"fullname": "pyerrors.correlators.Corr.__init__", "modulename": "pyerrors.correlators", "qualname": "Corr.__init__", "type": "function", "doc": "

\n", "parameters": ["self", "data_input", "padding_front", "padding_back", "prange"], "funcdef": "def"}, "pyerrors.correlators.Corr.reweighted": {"fullname": "pyerrors.correlators.Corr.reweighted", "modulename": "pyerrors.correlators", "qualname": "Corr.reweighted", "type": "variable", "doc": "

\n"}, "pyerrors.correlators.Corr.gamma_method": {"fullname": "pyerrors.correlators.Corr.gamma_method", "modulename": "pyerrors.correlators", "qualname": "Corr.gamma_method", "type": "function", "doc": "

Apply the gamma method to the content of the Corr.

\n", "parameters": ["self"], "funcdef": "def"}, "pyerrors.correlators.Corr.projected": {"fullname": "pyerrors.correlators.Corr.projected", "modulename": "pyerrors.correlators", "qualname": "Corr.projected", "type": "function", "doc": "

\n", "parameters": ["self", "vector_l", "vector_r"], "funcdef": "def"}, "pyerrors.correlators.Corr.sum": {"fullname": "pyerrors.correlators.Corr.sum", "modulename": "pyerrors.correlators", "qualname": "Corr.sum", "type": "function", "doc": "

\n", "parameters": ["self"], "funcdef": "def"}, "pyerrors.correlators.Corr.smearing": {"fullname": "pyerrors.correlators.Corr.smearing", "modulename": "pyerrors.correlators", "qualname": "Corr.smearing", "type": "function", "doc": "

\n", "parameters": ["self", "i", "j"], "funcdef": "def"}, "pyerrors.correlators.Corr.plottable": {"fullname": "pyerrors.correlators.Corr.plottable", "modulename": "pyerrors.correlators", "qualname": "Corr.plottable", "type": "function", "doc": "

Outputs the correlator in a plotable format.

\n\n

Outputs three lists containing the timeslice index, the value on each\ntimeslice and the error on each timeslice.

\n", "parameters": ["self"], "funcdef": "def"}, "pyerrors.correlators.Corr.symmetric": {"fullname": "pyerrors.correlators.Corr.symmetric", "modulename": "pyerrors.correlators", "qualname": "Corr.symmetric", "type": "function", "doc": "

Symmetrize the correlator around x0=0.

\n", "parameters": ["self"], "funcdef": "def"}, "pyerrors.correlators.Corr.anti_symmetric": {"fullname": "pyerrors.correlators.Corr.anti_symmetric", "modulename": "pyerrors.correlators", "qualname": "Corr.anti_symmetric", "type": "function", "doc": "

Anti-symmetrize the correlator around x0=0.

\n", "parameters": ["self"], "funcdef": "def"}, "pyerrors.correlators.Corr.smearing_symmetric": {"fullname": "pyerrors.correlators.Corr.smearing_symmetric", "modulename": "pyerrors.correlators", "qualname": "Corr.smearing_symmetric", "type": "function", "doc": "

\n", "parameters": ["self"], "funcdef": "def"}, "pyerrors.correlators.Corr.GEVP": {"fullname": "pyerrors.correlators.Corr.GEVP", "modulename": "pyerrors.correlators", "qualname": "Corr.GEVP", "type": "function", "doc": "

\n", "parameters": ["self", "t0", "ts", "state"], "funcdef": "def"}, "pyerrors.correlators.Corr.Eigenvalue": {"fullname": "pyerrors.correlators.Corr.Eigenvalue", "modulename": "pyerrors.correlators", "qualname": "Corr.Eigenvalue", "type": "function", "doc": "

\n", "parameters": ["self", "t0", "state"], "funcdef": "def"}, "pyerrors.correlators.Corr.roll": {"fullname": "pyerrors.correlators.Corr.roll", "modulename": "pyerrors.correlators", "qualname": "Corr.roll", "type": "function", "doc": "

Periodically shift the correlator by dt timeslices

\n\n

Attributes:

\n\n

dt : int\n number of timeslices

\n", "parameters": ["self", "dt"], "funcdef": "def"}, "pyerrors.correlators.Corr.reverse": {"fullname": "pyerrors.correlators.Corr.reverse", "modulename": "pyerrors.correlators", "qualname": "Corr.reverse", "type": "function", "doc": "

Reverse the time ordering of the Corr

\n", "parameters": ["self"], "funcdef": "def"}, "pyerrors.correlators.Corr.correlate": {"fullname": "pyerrors.correlators.Corr.correlate", "modulename": "pyerrors.correlators", "qualname": "Corr.correlate", "type": "function", "doc": "

Correlate the correlator with another correlator or Obs

\n", "parameters": ["self", "partner"], "funcdef": "def"}, "pyerrors.correlators.Corr.reweight": {"fullname": "pyerrors.correlators.Corr.reweight", "modulename": "pyerrors.correlators", "qualname": "Corr.reweight", "type": "function", "doc": "

Reweight the correlator.

\n\n
Parameters
\n\n
    \n
  • weight (Obs):\nReweighting factor. An Observable that has to be defined on a superset of the\nconfigurations in obs[i].idl for all i.
  • \n
\n\n
Keyword arguments
\n\n

all_configs : bool\n if True, the reweighted observables are normalized by the average of\n the reweighting factor on all configurations in weight.idl and not\n on the configurations in obs[i].idl.

\n", "parameters": ["self", "weight", "kwargs"], "funcdef": "def"}, "pyerrors.correlators.Corr.T_symmetry": {"fullname": "pyerrors.correlators.Corr.T_symmetry", "modulename": "pyerrors.correlators", "qualname": "Corr.T_symmetry", "type": "function", "doc": "

Return the time symmetry average of the correlator and its partner

\n\n

Attributes:

\n\n

partner : Corr\n Time symmetry partner of the Corr\npartity : int\n Parity quantum number of the correlator, can be +1 or -1

\n", "parameters": ["self", "partner", "parity"], "funcdef": "def"}, "pyerrors.correlators.Corr.deriv": {"fullname": "pyerrors.correlators.Corr.deriv", "modulename": "pyerrors.correlators", "qualname": "Corr.deriv", "type": "function", "doc": "

Return the first derivative of the correlator with respect to x0.

\n\n

Attributes:

\n\n

symmetric : bool\n decides whether symmertic of simple finite differences are used. Default: True

\n", "parameters": ["self", "symmetric"], "funcdef": "def"}, "pyerrors.correlators.Corr.second_deriv": {"fullname": "pyerrors.correlators.Corr.second_deriv", "modulename": "pyerrors.correlators", "qualname": "Corr.second_deriv", "type": "function", "doc": "

Return the second derivative of the correlator with respect to x0.

\n", "parameters": ["self"], "funcdef": "def"}, "pyerrors.correlators.Corr.m_eff": {"fullname": "pyerrors.correlators.Corr.m_eff", "modulename": "pyerrors.correlators", "qualname": "Corr.m_eff", "type": "function", "doc": "

Returns the effective mass of the correlator as correlator object

\n\n
Parameters
\n\n
    \n
  • variant (str):\nlog: uses the standard effective mass log(C(t) / C(t+1))\ncosh : Use periodicitiy of the correlator by solving C(t) / C(t+1) = cosh(m * (t - T/2)) / cosh(m * (t + 1 - T/2)) for m.\nsinh : Use anti-periodicitiy of the correlator by solving C(t) / C(t+1) = sinh(m * (t - T/2)) / sinh(m * (t + 1 - T/2)) for m.\nSee, e.g., arXiv:1205.5380
  • \n
  • guess (float):\nguess for the root finder, only relevant for the root variant
  • \n
\n", "parameters": ["self", "variant", "guess"], "funcdef": "def"}, "pyerrors.correlators.Corr.fit": {"fullname": "pyerrors.correlators.Corr.fit", "modulename": "pyerrors.correlators", "qualname": "Corr.fit", "type": "function", "doc": "

Fits function to the data

\n\n

Attributes:

\n\n

function : obj\n function to fit to the data. See fits.least_squares for details.\nfitrange : list\n Range in which the function is to be fitted to the data.\n If not specified, self.prange or all timeslices are used.\nsilent : bool\n Decides whether output is printed to the standard output.

\n", "parameters": ["self", "function", "fitrange", "silent", "kwargs"], "funcdef": "def"}, "pyerrors.correlators.Corr.plateau": {"fullname": "pyerrors.correlators.Corr.plateau", "modulename": "pyerrors.correlators", "qualname": "Corr.plateau", "type": "function", "doc": "

Extract a plateu value from a Corr object

\n\n

Attributes:

\n\n

plateau_range : list\n list with two entries, indicating the first and the last timeslice\n of the plateau region.\nmethod : str\n method to extract the plateau.\n 'fit' fits a constant to the plateau region\n 'avg', 'average' or 'mean' just average over the given timeslices.

\n", "parameters": ["self", "plateau_range", "method"], "funcdef": "def"}, "pyerrors.correlators.Corr.set_prange": {"fullname": "pyerrors.correlators.Corr.set_prange", "modulename": "pyerrors.correlators", "qualname": "Corr.set_prange", "type": "function", "doc": "

Sets the attribute prange of the Corr object.

\n", "parameters": ["self", "prange"], "funcdef": "def"}, "pyerrors.correlators.Corr.show": {"fullname": "pyerrors.correlators.Corr.show", "modulename": "pyerrors.correlators", "qualname": "Corr.show", "type": "function", "doc": "

Plots the correlator, uses tag as label if available.

\n\n
Parameters
\n\n
    \n
  • x_range (list):\nlist of two values, determining the range of the x-axis e.g. [4, 8]
  • \n
  • comp (Corr or list of Corr):\nCorrelator or list of correlators which are plotted for comparison.
  • \n
  • logscale (bool):\nSets y-axis to logscale
  • \n
  • plateau (Obs):\nplateau to be visualized in the figure
  • \n
  • fit_res (Fit_result):\nFit_result object to be visualized
  • \n
  • ylabel (str):\nLabel for the y-axis
  • \n
  • save (str):\npath to file in which the figure should be saved
  • \n
\n", "parameters": ["self", "x_range", "comp", "y_range", "logscale", "plateau", "fit_res", "ylabel", "save"], "funcdef": "def"}, "pyerrors.correlators.Corr.dump": {"fullname": "pyerrors.correlators.Corr.dump", "modulename": "pyerrors.correlators", "qualname": "Corr.dump", "type": "function", "doc": "

Dumps the Corr into a pickel file

\n\n

Attributes:

\n\n

filename : str\n Name of the file

\n", "parameters": ["self", "filename"], "funcdef": "def"}, "pyerrors.correlators.Corr.print": {"fullname": "pyerrors.correlators.Corr.print", "modulename": "pyerrors.correlators", "qualname": "Corr.print", "type": "function", "doc": "

\n", "parameters": ["self", "range"], "funcdef": "def"}, "pyerrors.correlators.Corr.sqrt": {"fullname": "pyerrors.correlators.Corr.sqrt", "modulename": "pyerrors.correlators", "qualname": "Corr.sqrt", "type": "function", "doc": "

\n", "parameters": ["self"], "funcdef": "def"}, "pyerrors.correlators.Corr.log": {"fullname": "pyerrors.correlators.Corr.log", "modulename": "pyerrors.correlators", "qualname": "Corr.log", "type": "function", "doc": "

\n", "parameters": ["self"], "funcdef": "def"}, "pyerrors.correlators.Corr.exp": {"fullname": "pyerrors.correlators.Corr.exp", "modulename": "pyerrors.correlators", "qualname": "Corr.exp", "type": "function", "doc": "

\n", "parameters": ["self"], "funcdef": "def"}, "pyerrors.correlators.Corr.sin": {"fullname": "pyerrors.correlators.Corr.sin", "modulename": "pyerrors.correlators", "qualname": "Corr.sin", "type": "function", "doc": "

\n", "parameters": ["self"], "funcdef": "def"}, "pyerrors.correlators.Corr.cos": {"fullname": "pyerrors.correlators.Corr.cos", "modulename": "pyerrors.correlators", "qualname": "Corr.cos", "type": "function", "doc": "

\n", "parameters": ["self"], "funcdef": "def"}, "pyerrors.correlators.Corr.tan": {"fullname": "pyerrors.correlators.Corr.tan", "modulename": "pyerrors.correlators", "qualname": "Corr.tan", "type": "function", "doc": "

\n", "parameters": ["self"], "funcdef": "def"}, "pyerrors.correlators.Corr.sinh": {"fullname": "pyerrors.correlators.Corr.sinh", "modulename": "pyerrors.correlators", "qualname": "Corr.sinh", "type": "function", "doc": "

\n", "parameters": ["self"], "funcdef": "def"}, "pyerrors.correlators.Corr.cosh": {"fullname": "pyerrors.correlators.Corr.cosh", "modulename": "pyerrors.correlators", "qualname": "Corr.cosh", "type": "function", "doc": "

\n", "parameters": ["self"], "funcdef": "def"}, "pyerrors.correlators.Corr.tanh": {"fullname": "pyerrors.correlators.Corr.tanh", "modulename": "pyerrors.correlators", "qualname": "Corr.tanh", "type": "function", "doc": "

\n", "parameters": ["self"], "funcdef": "def"}, "pyerrors.correlators.Corr.arcsin": {"fullname": "pyerrors.correlators.Corr.arcsin", "modulename": "pyerrors.correlators", "qualname": "Corr.arcsin", "type": "function", "doc": "

\n", "parameters": ["self"], "funcdef": "def"}, "pyerrors.correlators.Corr.arccos": {"fullname": "pyerrors.correlators.Corr.arccos", "modulename": "pyerrors.correlators", "qualname": "Corr.arccos", "type": "function", "doc": "

\n", "parameters": ["self"], "funcdef": "def"}, "pyerrors.correlators.Corr.arctan": {"fullname": "pyerrors.correlators.Corr.arctan", "modulename": "pyerrors.correlators", "qualname": "Corr.arctan", "type": "function", "doc": "

\n", "parameters": ["self"], "funcdef": "def"}, "pyerrors.correlators.Corr.arcsinh": {"fullname": "pyerrors.correlators.Corr.arcsinh", "modulename": "pyerrors.correlators", "qualname": "Corr.arcsinh", "type": "function", "doc": "

\n", "parameters": ["self"], "funcdef": "def"}, "pyerrors.correlators.Corr.arccosh": {"fullname": "pyerrors.correlators.Corr.arccosh", "modulename": "pyerrors.correlators", "qualname": "Corr.arccosh", "type": "function", "doc": "

\n", "parameters": ["self"], "funcdef": "def"}, "pyerrors.correlators.Corr.arctanh": {"fullname": "pyerrors.correlators.Corr.arctanh", "modulename": "pyerrors.correlators", "qualname": "Corr.arctanh", "type": "function", "doc": "

\n", "parameters": ["self"], "funcdef": "def"}, "pyerrors.dirac": {"fullname": "pyerrors.dirac", "modulename": "pyerrors.dirac", "qualname": "", "type": "module", "doc": "

\n"}, "pyerrors.dirac.Grid_gamma": {"fullname": "pyerrors.dirac.Grid_gamma", "modulename": "pyerrors.dirac", "qualname": "Grid_gamma", "type": "function", "doc": "

Returns gamma matrix in Grid labeling.

\n", "parameters": ["gamma_tag"], "funcdef": "def"}, "pyerrors.fits": {"fullname": "pyerrors.fits", "modulename": "pyerrors.fits", "qualname": "", "type": "module", "doc": "

\n"}, "pyerrors.fits.Fit_result": {"fullname": "pyerrors.fits.Fit_result", "modulename": "pyerrors.fits", "qualname": "Fit_result", "type": "class", "doc": "

Represents fit results.

\n\n
Attributes
\n\n
    \n
  • fit_parameters (list):\nresults for the individual fit parameters,\nalso accesible via indices.
  • \n
\n"}, "pyerrors.fits.Fit_result.__init__": {"fullname": "pyerrors.fits.Fit_result.__init__", "modulename": "pyerrors.fits", "qualname": "Fit_result.__init__", "type": "function", "doc": "

\n", "parameters": ["self"], "funcdef": "def"}, "pyerrors.fits.Fit_result.gamma_method": {"fullname": "pyerrors.fits.Fit_result.gamma_method", "modulename": "pyerrors.fits", "qualname": "Fit_result.gamma_method", "type": "function", "doc": "

Apply the gamma method to all fit parameters

\n", "parameters": ["self"], "funcdef": "def"}, "pyerrors.fits.least_squares": {"fullname": "pyerrors.fits.least_squares", "modulename": "pyerrors.fits", "qualname": "least_squares", "type": "function", "doc": "

Performs a non-linear fit to y = func(x).

\n\n

Arguments:

\n\n

x : list\n list of floats.\ny : list\n list of Obs.\nfunc : object\n fit function, has to be of the form

\n\n
def func(a, x):\n    return a[0] + a[1] * x + a[2] * anp.sinh(x)\n\nFor multiple x values func can be of the form\n\ndef func(a, x):\n    (x1, x2) = x\n    return a[0] * x1 ** 2 + a[1] * x2\n\nIt is important that all numpy functions refer to autograd.numpy, otherwise the differentiation\nwill not work\n
\n\n

priors : list, optional\n priors has to be a list with an entry for every parameter in the fit. The entries can either be\n Obs (e.g. results from a previous fit) or strings containing a value and an error formatted like\n 0.548(23), 500(40) or 0.5(0.4)\n It is important for the subsequent error estimation that the e_tag for the gamma method is large\n enough.\nsilent : bool, optional\n If true all output to the console is omitted (default False).

\n\n
Keyword arguments
\n\n

initial_guess -- can provide an initial guess for the input parameters. Relevant for\n non-linear fits with many parameters.\nmethod -- can be used to choose an alternative method for the minimization of chisquare.\n The possible methods are the ones which can be used for scipy.optimize.minimize and\n migrad of iminuit. If no method is specified, Levenberg-Marquard is used.\n Reliable alternatives are migrad, Powell and Nelder-Mead.\nresplot -- If true, a plot which displays fit, data and residuals is generated (default False).\nqqplot -- If true, a quantile-quantile plot of the fit result is generated (default False).\nexpected_chisquare -- If true prints the expected chisquare which is\n corrected by effects caused by correlated input data.\n This can take a while as the full correlation matrix\n has to be calculated (default False).

\n", "parameters": ["x", "y", "func", "priors", "silent", "kwargs"], "funcdef": "def"}, "pyerrors.fits.standard_fit": {"fullname": "pyerrors.fits.standard_fit", "modulename": "pyerrors.fits", "qualname": "standard_fit", "type": "function", "doc": "

\n", "parameters": ["x", "y", "func", "silent", "kwargs"], "funcdef": "def"}, "pyerrors.fits.odr_fit": {"fullname": "pyerrors.fits.odr_fit", "modulename": "pyerrors.fits", "qualname": "odr_fit", "type": "function", "doc": "

\n", "parameters": ["x", "y", "func", "silent", "kwargs"], "funcdef": "def"}, "pyerrors.fits.total_least_squares": {"fullname": "pyerrors.fits.total_least_squares", "modulename": "pyerrors.fits", "qualname": "total_least_squares", "type": "function", "doc": "

Performs a non-linear fit to y = func(x) and returns a list of Obs corresponding to the fit parameters.

\n\n

x : list\n list of Obs, or a tuple of lists of Obs\ny : list\n list of Obs. The dvalues of the Obs are used as x- and yerror for the fit.\nfunc : object\n func has to be of the form

\n\n
def func(a, x):\n    y = a[0] + a[1] * x + a[2] * anp.sinh(x)\n    return y\n\nFor multiple x values func can be of the form\n\ndef func(a, x):\n    (x1, x2) = x\n    return a[0] * x1 ** 2 + a[1] * x2\n\nIt is important that all numpy functions refer to autograd.numpy, otherwise the differentiation\nwill not work.\n
\n\n

silent : bool, optional\n If true all output to the console is omitted (default False).\nBased on the orthogonal distance regression module of scipy

\n\n
Keyword arguments
\n\n

initial_guess -- can provide an initial guess for the input parameters. Relevant for non-linear\n fits with many parameters.\nexpected_chisquare -- If true prints the expected chisquare which is\n corrected by effects caused by correlated input data.\n This can take a while as the full correlation matrix\n has to be calculated (default False).

\n", "parameters": ["x", "y", "func", "silent", "kwargs"], "funcdef": "def"}, "pyerrors.fits.prior_fit": {"fullname": "pyerrors.fits.prior_fit", "modulename": "pyerrors.fits", "qualname": "prior_fit", "type": "function", "doc": "

\n", "parameters": ["x", "y", "func", "priors", "silent", "kwargs"], "funcdef": "def"}, "pyerrors.fits.fit_lin": {"fullname": "pyerrors.fits.fit_lin", "modulename": "pyerrors.fits", "qualname": "fit_lin", "type": "function", "doc": "

Performs a linear fit to y = n + m * x and returns two Obs n, m.

\n\n

y has to be a list of Obs, the dvalues of the Obs are used as yerror for the fit.\nx can either be a list of floats in which case no xerror is assumed, or\na list of Obs, where the dvalues of the Obs are used as xerror for the fit.

\n", "parameters": ["x", "y", "kwargs"], "funcdef": "def"}, "pyerrors.fits.qqplot": {"fullname": "pyerrors.fits.qqplot", "modulename": "pyerrors.fits", "qualname": "qqplot", "type": "function", "doc": "

Generates a quantile-quantile plot of the fit result which can be used to\ncheck if the residuals of the fit are gaussian distributed.

\n", "parameters": ["x", "o_y", "func", "p"], "funcdef": "def"}, "pyerrors.fits.residual_plot": {"fullname": "pyerrors.fits.residual_plot", "modulename": "pyerrors.fits", "qualname": "residual_plot", "type": "function", "doc": "

Generates a plot which compares the fit to the data and displays the corresponding residuals

\n", "parameters": ["x", "y", "func", "fit_res"], "funcdef": "def"}, "pyerrors.fits.covariance_matrix": {"fullname": "pyerrors.fits.covariance_matrix", "modulename": "pyerrors.fits", "qualname": "covariance_matrix", "type": "function", "doc": "

Returns the covariance matrix of y.

\n", "parameters": ["y"], "funcdef": "def"}, "pyerrors.fits.error_band": {"fullname": "pyerrors.fits.error_band", "modulename": "pyerrors.fits", "qualname": "error_band", "type": "function", "doc": "

Returns the error band for an array of sample values x, for given fit function func with optimized parameters beta.

\n", "parameters": ["x", "func", "beta"], "funcdef": "def"}, "pyerrors.fits.ks_test": {"fullname": "pyerrors.fits.ks_test", "modulename": "pyerrors.fits", "qualname": "ks_test", "type": "function", "doc": "

Performs a Kolmogorov\u2013Smirnov test for the Q-values of all fit object.

\n\n

If no list is given all Obs in memory are used.

\n\n

Disclaimer: The determination of the individual Q-values as well as this function have not been tested yet.

\n", "parameters": ["obs"], "funcdef": "def"}, "pyerrors.fits.fit_general": {"fullname": "pyerrors.fits.fit_general", "modulename": "pyerrors.fits", "qualname": "fit_general", "type": "function", "doc": "

Performs a non-linear fit to y = func(x) and returns a list of Obs corresponding to the fit parameters.

\n\n

Plausibility of the results should be checked. To control the numerical differentiation\nthe kwargs of numdifftools.step_generators.MaxStepGenerator can be used.

\n\n

func has to be of the form

\n\n

def func(a, x):\n y = a[0] + a[1] * x + a[2] * np.sinh(x)\n return y

\n\n

y has to be a list of Obs, the dvalues of the Obs are used as yerror for the fit.\nx can either be a list of floats in which case no xerror is assumed, or\na list of Obs, where the dvalues of the Obs are used as xerror for the fit.

\n\n
Keyword arguments
\n\n

silent -- If true all output to the console is omitted (default False).\ninitial_guess -- can provide an initial guess for the input parameters. Relevant for non-linear fits\n with many parameters.

\n", "parameters": ["x", "y", "func", "silent", "kwargs"], "funcdef": "def"}, "pyerrors.input": {"fullname": "pyerrors.input", "modulename": "pyerrors.input", "qualname": "", "type": "module", "doc": "

\n"}, "pyerrors.input.bdio": {"fullname": "pyerrors.input.bdio", "modulename": "pyerrors.input.bdio", "qualname": "", "type": "module", "doc": "

\n"}, "pyerrors.input.bdio.read_ADerrors": {"fullname": "pyerrors.input.bdio.read_ADerrors", "modulename": "pyerrors.input.bdio", "qualname": "read_ADerrors", "type": "function", "doc": "

Extract generic MCMC data from a bdio file

\n\n

read_ADerrors requires bdio to be compiled into a shared library. This can be achieved by\nadding the flag -fPIC to CC and changing the all target to

\n\n

all: bdio.o $(LIBDIR)\n gcc -shared -Wl,-soname,libbdio.so -o $(BUILDDIR)/libbdio.so $(BUILDDIR)/bdio.o\n cp $(BUILDDIR)/libbdio.so $(LIBDIR)/

\n\n
Parameters
\n\n
    \n
  • file_path -- path to the bdio file
  • \n
  • bdio_path -- path to the shared bdio library libbdio.so (default ./libbdio.so)
  • \n
\n", "parameters": ["file_path", "bdio_path", "kwargs"], "funcdef": "def"}, "pyerrors.input.bdio.write_ADerrors": {"fullname": "pyerrors.input.bdio.write_ADerrors", "modulename": "pyerrors.input.bdio", "qualname": "write_ADerrors", "type": "function", "doc": "

Write Obs to a bdio file according to ADerrors conventions

\n\n

read_mesons requires bdio to be compiled into a shared library. This can be achieved by\nadding the flag -fPIC to CC and changing the all target to

\n\n

all: bdio.o $(LIBDIR)\n gcc -shared -Wl,-soname,libbdio.so -o $(BUILDDIR)/libbdio.so $(BUILDDIR)/bdio.o\n cp $(BUILDDIR)/libbdio.so $(LIBDIR)/

\n\n
Parameters
\n\n
    \n
  • file_path -- path to the bdio file
  • \n
  • bdio_path -- path to the shared bdio library libbdio.so (default ./libbdio.so)
  • \n
\n", "parameters": ["obs_list", "file_path", "bdio_path", "kwargs"], "funcdef": "def"}, "pyerrors.input.bdio.read_mesons": {"fullname": "pyerrors.input.bdio.read_mesons", "modulename": "pyerrors.input.bdio", "qualname": "read_mesons", "type": "function", "doc": "

Extract mesons data from a bdio file and return it as a dictionary

\n\n

The dictionary can be accessed with a tuple consisting of (type, source_position, kappa1, kappa2)

\n\n

read_mesons requires bdio to be compiled into a shared library. This can be achieved by\nadding the flag -fPIC to CC and changing the all target to

\n\n

all: bdio.o $(LIBDIR)\n gcc -shared -Wl,-soname,libbdio.so -o $(BUILDDIR)/libbdio.so $(BUILDDIR)/bdio.o\n cp $(BUILDDIR)/libbdio.so $(LIBDIR)/

\n\n
Parameters
\n\n
    \n
  • file_path -- path to the bdio file
  • \n
  • bdio_path -- path to the shared bdio library libbdio.so (default ./libbdio.so)
  • \n
  • stop -- stops reading at given configuration number (default None)
  • \n
  • alternative_ensemble_name -- Manually overwrite ensemble name
  • \n
\n", "parameters": ["file_path", "bdio_path", "kwargs"], "funcdef": "def"}, "pyerrors.input.bdio.read_dSdm": {"fullname": "pyerrors.input.bdio.read_dSdm", "modulename": "pyerrors.input.bdio", "qualname": "read_dSdm", "type": "function", "doc": "

Extract dSdm data from a bdio file and return it as a dictionary

\n\n

The dictionary can be accessed with a tuple consisting of (type, kappa)

\n\n

read_dSdm requires bdio to be compiled into a shared library. This can be achieved by\nadding the flag -fPIC to CC and changing the all target to

\n\n

all: bdio.o $(LIBDIR)\n gcc -shared -Wl,-soname,libbdio.so -o $(BUILDDIR)/libbdio.so $(BUILDDIR)/bdio.o\n cp $(BUILDDIR)/libbdio.so $(LIBDIR)/

\n\n
Parameters
\n\n
    \n
  • file_path -- path to the bdio file
  • \n
  • bdio_path -- path to the shared bdio library libbdio.so (default ./libbdio.so)
  • \n
  • stop -- stops reading at given configuration number (default None)
  • \n
\n", "parameters": ["file_path", "bdio_path", "kwargs"], "funcdef": "def"}, "pyerrors.input.hadrons": {"fullname": "pyerrors.input.hadrons", "modulename": "pyerrors.input.hadrons", "qualname": "", "type": "module", "doc": "

\n"}, "pyerrors.input.hadrons.read_meson_hd5": {"fullname": "pyerrors.input.hadrons.read_meson_hd5", "modulename": "pyerrors.input.hadrons", "qualname": "read_meson_hd5", "type": "function", "doc": "

Read hadrons meson hdf5 file and extract the meson labeled 'meson'

\n\n
Parameters
\n\n
    \n
  • path (str):\npath to the files to read
  • \n
  • filestem (str):\nnamestem of the files to read
  • \n
  • ens_id (str):\nname of the ensemble, required for internal bookkeeping
  • \n
  • meson (str):\nlabel of the meson to be extracted, standard value meson_0 which\ncorresponds to the pseudoscalar pseudoscalar two-point function.
  • \n
  • tree (str):\nLabel of the upmost directory in the hdf5 file, default 'meson'\nfor outputs of the Meson module. Can be altered to read input\nfrom other modules with similar structures.
  • \n
\n", "parameters": ["path", "filestem", "ens_id", "meson", "tree"], "funcdef": "def"}, "pyerrors.input.hadrons.read_ExternalLeg_hd5": {"fullname": "pyerrors.input.hadrons.read_ExternalLeg_hd5", "modulename": "pyerrors.input.hadrons", "qualname": "read_ExternalLeg_hd5", "type": "function", "doc": "

Read hadrons ExternalLeg hdf5 file and output an array of CObs

\n\n
Parameters
\n\n
    \n
  • path -- path to the files to read
  • \n
  • filestem -- namestem of the files to read
  • \n
  • ens_id -- name of the ensemble, required for internal bookkeeping
  • \n
  • order -- order in which the array is to be reshaped,: 'F' for the first index changing fastest (9 4x4 matrices) default.\n'C' for the last index changing fastest (16 3x3 matrices),
  • \n
\n", "parameters": ["path", "filestem", "ens_id", "order"], "funcdef": "def"}, "pyerrors.input.hadrons.read_Bilinear_hd5": {"fullname": "pyerrors.input.hadrons.read_Bilinear_hd5", "modulename": "pyerrors.input.hadrons", "qualname": "read_Bilinear_hd5", "type": "function", "doc": "

Read hadrons Bilinear hdf5 file and output an array of CObs

\n\n
Parameters
\n\n
    \n
  • path -- path to the files to read
  • \n
  • filestem -- namestem of the files to read
  • \n
  • ens_id -- name of the ensemble, required for internal bookkeeping
  • \n
  • order -- order in which the array is to be reshaped,: 'F' for the first index changing fastest (9 4x4 matrices) default.\n'C' for the last index changing fastest (16 3x3 matrices),
  • \n
\n", "parameters": ["path", "filestem", "ens_id", "order"], "funcdef": "def"}, "pyerrors.input.misc": {"fullname": "pyerrors.input.misc", "modulename": "pyerrors.input.misc", "qualname": "", "type": "module", "doc": "

\n"}, "pyerrors.input.misc.read_pbp": {"fullname": "pyerrors.input.misc.read_pbp", "modulename": "pyerrors.input.misc", "qualname": "read_pbp", "type": "function", "doc": "

Read pbp format from given folder structure. Returns a list of length nrw

\n\n
Keyword arguments
\n\n

r_start -- list which contains the first config to be read for each replicum\nr_stop -- list which contains the last config to be read for each replicum

\n", "parameters": ["path", "prefix", "kwargs"], "funcdef": "def"}, "pyerrors.input.openQCD": {"fullname": "pyerrors.input.openQCD", "modulename": "pyerrors.input.openQCD", "qualname": "", "type": "module", "doc": "

\n"}, "pyerrors.input.openQCD.read_rwms": {"fullname": "pyerrors.input.openQCD.read_rwms", "modulename": "pyerrors.input.openQCD", "qualname": "read_rwms", "type": "function", "doc": "

Read rwms format from given folder structure. Returns a list of length nrw

\n\n
Attributes
\n\n
    \n
  • version -- version of openQCD, default 2.0
  • \n
\n\n
Keyword arguments
\n\n

r_start -- list which contains the first config to be read for each replicum\nr_stop -- list which contains the last config to be read for each replicum\npostfix -- postfix of the file to read, e.g. '.ms1' for openQCD-files

\n", "parameters": ["path", "prefix", "version", "names", "kwargs"], "funcdef": "def"}, "pyerrors.input.openQCD.extract_t0": {"fullname": "pyerrors.input.openQCD.extract_t0", "modulename": "pyerrors.input.openQCD", "qualname": "extract_t0", "type": "function", "doc": "

Extract t0 from given .ms.dat files. Returns t0 as Obs.

\n\n

It is assumed that all boundary effects have sufficiently decayed at x0=xmin.\nThe data around the zero crossing of t^2 - 0.3 is fitted with a linear function\nfrom which the exact root is extracted.\nOnly works with openQCD v 1.2.

\n\n
Parameters
\n\n
    \n
  • path -- Path to .ms.dat files
  • \n
  • prefix -- Ensemble prefix
  • \n
  • dtr_read -- Determines how many trajectories should be skipped when reading the ms.dat files.: Corresponds to dtr_cnfg / dtr_ms in the openQCD input file.
  • \n
  • xmin -- First timeslice where the boundary effects have sufficiently decayed.
  • \n
  • spatial_extent -- spatial extent of the lattice, required for normalization.
  • \n
  • fit_range -- Number of data points left and right of the zero crossing to be included in the linear fit. (Default (5)):
  • \n
\n\n
Keyword arguments
\n\n

r_start -- list which contains the first config to be read for each replicum.\nr_stop -- list which contains the last config to be read for each replicum.\nplaquette -- If true extract the plaquette estimate of t0 instead.

\n", "parameters": ["path", "prefix", "dtr_read", "xmin", "spatial_extent", "fit_range", "kwargs"], "funcdef": "def"}, "pyerrors.input.sfcf": {"fullname": "pyerrors.input.sfcf", "modulename": "pyerrors.input.sfcf", "qualname": "", "type": "module", "doc": "

\n"}, "pyerrors.input.sfcf.read_sfcf": {"fullname": "pyerrors.input.sfcf.read_sfcf", "modulename": "pyerrors.input.sfcf", "qualname": "read_sfcf", "type": "function", "doc": "

Read sfcf C format from given folder structure.

\n\n
Keyword arguments
\n\n

im -- if True, read imaginary instead of real part of the correlation function.\nsingle -- if True, read a boundary-to-boundary correlation function with a single value\nb2b -- if True, read a time-dependent boundary-to-boundary correlation function\nnames -- Alternative labeling for replicas/ensembles. Has to have the appropriate length

\n", "parameters": ["path", "prefix", "name", "kwargs"], "funcdef": "def"}, "pyerrors.input.sfcf.read_sfcf_c": {"fullname": "pyerrors.input.sfcf.read_sfcf_c", "modulename": "pyerrors.input.sfcf", "qualname": "read_sfcf_c", "type": "function", "doc": "

Read sfcf c format from given folder structure.

\n\n
Arguments
\n\n

quarks -- Label of the quarks used in the sfcf input file\nnoffset -- Offset of the source (only relevant when wavefunctions are used)\nwf -- ID of wave function\nwf2 -- ID of the second wavefunction (only relevant for boundary-to-boundary correlation functions)

\n\n
Keyword arguments
\n\n

im -- if True, read imaginary instead of real part of the correlation function.\nb2b -- if True, read a time-dependent boundary-to-boundary correlation function\nnames -- Alternative labeling for replicas/ensembles. Has to have the appropriate length\nens_name : str\n replaces the name of the ensemble

\n", "parameters": ["path", "prefix", "name", "quarks", "noffset", "wf", "wf2", "kwargs"], "funcdef": "def"}, "pyerrors.input.sfcf.read_qtop": {"fullname": "pyerrors.input.sfcf.read_qtop", "modulename": "pyerrors.input.sfcf", "qualname": "read_qtop", "type": "function", "doc": "

Read qtop format from given folder structure.

\n\n
Keyword arguments
\n\n

target -- specifies the topological sector to be reweighted to (default 0)\nfull -- if true read the charge instead of the reweighting factor.

\n", "parameters": ["path", "prefix", "kwargs"], "funcdef": "def"}, "pyerrors.jackknifing": {"fullname": "pyerrors.jackknifing", "modulename": "pyerrors.jackknifing", "qualname": "", "type": "module", "doc": "

\n"}, "pyerrors.jackknifing.Jack": {"fullname": "pyerrors.jackknifing.Jack", "modulename": "pyerrors.jackknifing", "qualname": "Jack", "type": "class", "doc": "

\n"}, "pyerrors.jackknifing.Jack.__init__": {"fullname": "pyerrors.jackknifing.Jack.__init__", "modulename": "pyerrors.jackknifing", "qualname": "Jack.__init__", "type": "function", "doc": "

\n", "parameters": ["self", "value", "jacks"], "funcdef": "def"}, "pyerrors.jackknifing.Jack.print": {"fullname": "pyerrors.jackknifing.Jack.print", "modulename": "pyerrors.jackknifing", "qualname": "Jack.print", "type": "function", "doc": "

Print basic properties of the Jack.

\n", "parameters": ["self", "kwargs"], "funcdef": "def"}, "pyerrors.jackknifing.Jack.plot_tauint": {"fullname": "pyerrors.jackknifing.Jack.plot_tauint", "modulename": "pyerrors.jackknifing", "qualname": "Jack.plot_tauint", "type": "function", "doc": "

\n", "parameters": ["self"], "funcdef": "def"}, "pyerrors.jackknifing.Jack.plot_history": {"fullname": "pyerrors.jackknifing.Jack.plot_history", "modulename": "pyerrors.jackknifing", "qualname": "Jack.plot_history", "type": "function", "doc": "

\n", "parameters": ["self"], "funcdef": "def"}, "pyerrors.jackknifing.Jack.dump": {"fullname": "pyerrors.jackknifing.Jack.dump", "modulename": "pyerrors.jackknifing", "qualname": "Jack.dump", "type": "function", "doc": "

Dump the Jack to a pickle file 'name'.

\n\n

Keyword arguments:\npath -- specifies a custom path for the file (default '.')

\n", "parameters": ["self", "name", "kwargs"], "funcdef": "def"}, "pyerrors.jackknifing.generate_jack": {"fullname": "pyerrors.jackknifing.generate_jack", "modulename": "pyerrors.jackknifing", "qualname": "generate_jack", "type": "function", "doc": "

\n", "parameters": ["obs", "kwargs"], "funcdef": "def"}, "pyerrors.jackknifing.derived_jack": {"fullname": "pyerrors.jackknifing.derived_jack", "modulename": "pyerrors.jackknifing", "qualname": "derived_jack", "type": "function", "doc": "

Construct a derived Jack according to func(data, **kwargs).

\n\n
Parameters
\n\n
    \n
  • func -- arbitrary function of the form func(data, **kwargs). For the automatic differentiation to work,: all numpy functions have to have the autograd wrapper (use 'import autograd.numpy as np').
  • \n
  • data -- list of Jacks, e.g. [jack1, jack2, jack3].
  • \n
\n\n
Notes
\n\n

For simple mathematical operations it can be practical to use anonymous functions.\nFor the ratio of two jacks one can e.g. use

\n\n

new_jack = derived_jack(lambda x : x[0] / x[1], [jack1, jack2])

\n", "parameters": ["func", "data", "kwargs"], "funcdef": "def"}, "pyerrors.linalg": {"fullname": "pyerrors.linalg", "modulename": "pyerrors.linalg", "qualname": "", "type": "module", "doc": "

\n"}, "pyerrors.linalg.derived_array": {"fullname": "pyerrors.linalg.derived_array", "modulename": "pyerrors.linalg", "qualname": "derived_array", "type": "function", "doc": "

Construct a derived Obs according to func(data, **kwargs) of matrix value data\nusing automatic differentiation.

\n\n
Parameters
\n\n
    \n
  • func -- arbitrary function of the form func(data, **kwargs). For the: automatic differentiation to work, all numpy functions have to have\nthe autograd wrapper (use 'import autograd.numpy as anp').
  • \n
  • data -- list of Obs, e.g. [obs1, obs2, obs3].
  • \n
\n\n
Keyword arguments
\n\n

man_grad -- manually supply a list or an array which contains the jacobian\n of func. Use cautiously, supplying the wrong derivative will\n not be intercepted.

\n", "parameters": ["func", "data", "kwargs"], "funcdef": "def"}, "pyerrors.linalg.matmul": {"fullname": "pyerrors.linalg.matmul", "modulename": "pyerrors.linalg", "qualname": "matmul", "type": "function", "doc": "

Matrix multiply all operands.

\n\n

Supports real and complex valued matrices and is faster compared to\nstandard multiplication via the @ operator.

\n", "parameters": ["operands"], "funcdef": "def"}, "pyerrors.linalg.inv": {"fullname": "pyerrors.linalg.inv", "modulename": "pyerrors.linalg", "qualname": "inv", "type": "function", "doc": "

Inverse of Obs or CObs valued matrices.

\n", "parameters": ["x"], "funcdef": "def"}, "pyerrors.linalg.cholesky": {"fullname": "pyerrors.linalg.cholesky", "modulename": "pyerrors.linalg", "qualname": "cholesky", "type": "function", "doc": "

Cholesky decompostion of Obs or CObs valued matrices.

\n", "parameters": ["x"], "funcdef": "def"}, "pyerrors.linalg.scalar_mat_op": {"fullname": "pyerrors.linalg.scalar_mat_op", "modulename": "pyerrors.linalg", "qualname": "scalar_mat_op", "type": "function", "doc": "

Computes the matrix to scalar operation op to a given matrix of Obs.

\n", "parameters": ["op", "obs", "kwargs"], "funcdef": "def"}, "pyerrors.linalg.eigh": {"fullname": "pyerrors.linalg.eigh", "modulename": "pyerrors.linalg", "qualname": "eigh", "type": "function", "doc": "

Computes the eigenvalues and eigenvectors of a given hermitian matrix of Obs according to np.linalg.eigh.

\n", "parameters": ["obs", "kwargs"], "funcdef": "def"}, "pyerrors.linalg.eig": {"fullname": "pyerrors.linalg.eig", "modulename": "pyerrors.linalg", "qualname": "eig", "type": "function", "doc": "

Computes the eigenvalues of a given matrix of Obs according to np.linalg.eig.

\n", "parameters": ["obs", "kwargs"], "funcdef": "def"}, "pyerrors.linalg.pinv": {"fullname": "pyerrors.linalg.pinv", "modulename": "pyerrors.linalg", "qualname": "pinv", "type": "function", "doc": "

Computes the Moore-Penrose pseudoinverse of a matrix of Obs.

\n", "parameters": ["obs", "kwargs"], "funcdef": "def"}, "pyerrors.linalg.svd": {"fullname": "pyerrors.linalg.svd", "modulename": "pyerrors.linalg", "qualname": "svd", "type": "function", "doc": "

Computes the singular value decomposition of a matrix of Obs.

\n", "parameters": ["obs", "kwargs"], "funcdef": "def"}, "pyerrors.linalg.slogdet": {"fullname": "pyerrors.linalg.slogdet", "modulename": "pyerrors.linalg", "qualname": "slogdet", "type": "function", "doc": "

Computes the determinant of a matrix of Obs via np.linalg.slogdet.

\n", "parameters": ["obs", "kwargs"], "funcdef": "def"}, "pyerrors.linalg.grad_eig": {"fullname": "pyerrors.linalg.grad_eig", "modulename": "pyerrors.linalg", "qualname": "grad_eig", "type": "function", "doc": "

Gradient of a general square (complex valued) matrix

\n", "parameters": ["ans", "x"], "funcdef": "def"}, "pyerrors.misc": {"fullname": "pyerrors.misc", "modulename": "pyerrors.misc", "qualname": "", "type": "module", "doc": "

\n"}, "pyerrors.misc.gen_correlated_data": {"fullname": "pyerrors.misc.gen_correlated_data", "modulename": "pyerrors.misc", "qualname": "gen_correlated_data", "type": "function", "doc": "

Generate observables with given covariance and autocorrelation times.

\n\n
Arguments
\n\n

means -- list containing the mean value of each observable.\ncov -- covariance matrix for the data to be geneated.\nname -- ensemble name for the data to be geneated.\ntau -- can either be a real number or a list with an entry for\n every dataset.\nsamples -- number of samples to be generated for each observable.

\n", "parameters": ["means", "cov", "name", "tau", "samples"], "funcdef": "def"}, "pyerrors.mpm": {"fullname": "pyerrors.mpm", "modulename": "pyerrors.mpm", "qualname": "", "type": "module", "doc": "

\n"}, "pyerrors.mpm.matrix_pencil_method": {"fullname": "pyerrors.mpm.matrix_pencil_method", "modulename": "pyerrors.mpm", "qualname": "matrix_pencil_method", "type": "function", "doc": "

Matrix pencil method to extract k energy levels from data

\n\n

Implementation of the matrix pencil method based on\neq. (2.17) of Y. Hua, T. K. Sarkar, IEEE Trans. Acoust. 38, 814-824 (1990)

\n\n
Parameters
\n\n
    \n
  • data -- can be a list of Obs for the analysis of a single correlator, or a list of lists: of Obs if several correlators are to analyzed at once.
  • \n
  • k -- Number of states to extract (default 1).
  • \n
  • p -- matrix pencil parameter which filters noise. The optimal value is expected between: len(data)/3 and 2*len(data)/3. The computation is more expensive the closer p is\nto len(data)/2 but could possibly suppress more noise (default len(data)//2).
  • \n
\n", "parameters": ["corrs", "k", "p", "kwargs"], "funcdef": "def"}, "pyerrors.mpm.matrix_pencil_method_old": {"fullname": "pyerrors.mpm.matrix_pencil_method_old", "modulename": "pyerrors.mpm", "qualname": "matrix_pencil_method_old", "type": "function", "doc": "

Older impleentation of the matrix pencil method with pencil p on given data to\n extract energy levels.

\n\n
Parameters
\n\n
    \n
  • data -- lists of Obs, where the nth entry is considered to be the correlation function: at x0=n+offset.
  • \n
  • p -- matrix pencil parameter which corresponds to the number of energy levels to extract.: higher values for p can help decreasing noise.
  • \n
  • noise_level -- If this argument is not None an additional prefiltering via singular: value decomposition is performed in which all singular values below 10^(-noise_level)\ntimes the largest singular value are discarded. This increases the computation time.
  • \n
  • verbose -- if larger than zero details about the noise filtering are printed to stdout: (default 1)
  • \n
\n", "parameters": ["data", "p", "noise_level", "verbose", "kwargs"], "funcdef": "def"}, "pyerrors.npr": {"fullname": "pyerrors.npr", "modulename": "pyerrors.npr", "qualname": "", "type": "module", "doc": "

\n"}, "pyerrors.npr.Npr_matrix": {"fullname": "pyerrors.npr.Npr_matrix", "modulename": "pyerrors.npr", "qualname": "Npr_matrix", "type": "class", "doc": "

ndarray(shape, dtype=float, buffer=None, offset=0,\n strides=None, order=None)

\n\n

An array object represents a multidimensional, homogeneous array\nof fixed-size items. An associated data-type object describes the\nformat of each element in the array (its byte-order, how many bytes it\noccupies in memory, whether it is an integer, a floating point number,\nor something else, etc.)

\n\n

Arrays should be constructed using array, zeros or empty (refer\nto the See Also section below). The parameters given here refer to\na low-level method (ndarray(...)) for instantiating an array.

\n\n

For more information, refer to the numpy module and examine the\nmethods and attributes of an array.

\n\n
Parameters
\n\n
    \n
  • (for the __new__ method; see Notes below)
  • \n
  • shape (tuple of ints):\nShape of created array.
  • \n
  • dtype (data-type, optional):\nAny object that can be interpreted as a numpy data type.
  • \n
  • buffer (object exposing buffer interface, optional):\nUsed to fill the array with data.
  • \n
  • offset (int, optional):\nOffset of array data in buffer.
  • \n
  • strides (tuple of ints, optional):\nStrides of data in memory.
  • \n
  • order ({'C', 'F'}, optional):\nRow-major (C-style) or column-major (Fortran-style) order.
  • \n
\n\n
Attributes
\n\n
    \n
  • T (ndarray):\nTranspose of the array.
  • \n
  • data (buffer):\nThe array's elements, in memory.
  • \n
  • dtype (dtype object):\nDescribes the format of the elements in the array.
  • \n
  • flags (dict):\nDictionary containing information related to memory use, e.g.,\n'C_CONTIGUOUS', 'OWNDATA', 'WRITEABLE', etc.
  • \n
  • flat (numpy.flatiter object):\nFlattened version of the array as an iterator. The iterator\nallows assignments, e.g., x.flat = 3 (See ndarray.flat for\nassignment examples; TODO).
  • \n
  • imag (ndarray):\nImaginary part of the array.
  • \n
  • real (ndarray):\nReal part of the array.
  • \n
  • size (int):\nNumber of elements in the array.
  • \n
  • itemsize (int):\nThe memory use of each array element in bytes.
  • \n
  • nbytes (int):\nThe total number of bytes required to store the array data,\ni.e., itemsize * size.
  • \n
  • ndim (int):\nThe array's number of dimensions.
  • \n
  • shape (tuple of ints):\nShape of the array.
  • \n
  • strides (tuple of ints):\nThe step-size required to move from one element to the next in\nmemory. For example, a contiguous (3, 4) array of type\nint16 in C-order has strides (8, 2). This implies that\nto move from element to element in memory requires jumps of 2 bytes.\nTo move from row-to-row, one needs to jump 8 bytes at a time\n(2 * 4).
  • \n
  • ctypes (ctypes object):\nClass containing properties of the array needed for interaction\nwith ctypes.
  • \n
  • base (ndarray):\nIf the array is a view into another array, that array is its base\n(unless that array is also a view). The base array is where the\narray data is actually stored.
  • \n
\n\n
See Also
\n\n

array: Construct an array.
\nzeros: Create an array, each element of which is zero.
\nempty: Create an array, but leave its allocated memory unchanged (i.e.,\nit contains \"garbage\").
\ndtype: Create a data-type.
\nnumpy.typing.NDArray: A :term:generic <generic type> version\nof ndarray.

\n\n
Notes
\n\n

There are two modes of creating an array using __new__:

\n\n
    \n
  1. If buffer is None, then only shape, dtype, and order\nare used.
  2. \n
  3. If buffer is an object exposing the buffer interface, then\nall keywords are interpreted.
  4. \n
\n\n

No __init__ method is needed because the array is fully initialized\nafter the __new__ method.

\n\n
Examples
\n\n

These examples illustrate the low-level ndarray constructor. Refer\nto the See Also section above for easier ways of constructing an\nndarray.

\n\n

First mode, buffer is None:

\n\n
>>> np.ndarray(shape=(2,2), dtype=float, order='F')\narray([[0.0e+000, 0.0e+000], # random\n       [     nan, 2.5e-323]])\n
\n\n

Second mode:

\n\n
>>> np.ndarray((2,), buffer=np.array([1,2,3]),\n...            offset=np.int_().itemsize,\n...            dtype=int) # offset = 1*itemsize, i.e. skip first element\narray([2, 3])\n
\n"}, "pyerrors.npr.Npr_matrix.__init__": {"fullname": "pyerrors.npr.Npr_matrix.__init__", "modulename": "pyerrors.npr", "qualname": "Npr_matrix.__init__", "type": "function", "doc": "

\n", "parameters": [], "funcdef": "def"}, "pyerrors.npr.Npr_matrix.g5H": {"fullname": "pyerrors.npr.Npr_matrix.g5H", "modulename": "pyerrors.npr", "qualname": "Npr_matrix.g5H", "type": "variable", "doc": "

Gamma_5 hermitean conjugate

\n\n

Returns gamma_5 @ M.T.conj() @ gamma_5 and exchanges in and out going\nmomenta. Works only for 12x12 matrices.

\n"}, "pyerrors.npr.inv_propagator": {"fullname": "pyerrors.npr.inv_propagator", "modulename": "pyerrors.npr", "qualname": "inv_propagator", "type": "function", "doc": "

Inverts a 12x12 quark propagator

\n", "parameters": ["prop"], "funcdef": "def"}, "pyerrors.npr.Zq": {"fullname": "pyerrors.npr.Zq", "modulename": "pyerrors.npr", "qualname": "Zq", "type": "function", "doc": "

Calculates the quark field renormalization constant Zq

\n\n

Attributes:\ninv_prop -- Inverted 12x12 quark propagator\nfermion -- Fermion type for which the tree-level propagator is used\n in the calculation of Zq. Default Wilson.

\n", "parameters": ["inv_prop", "fermion"], "funcdef": "def"}, "pyerrors.obs": {"fullname": "pyerrors.obs", "modulename": "pyerrors.obs", "qualname": "", "type": "module", "doc": "

\n"}, "pyerrors.obs.Obs": {"fullname": "pyerrors.obs.Obs", "modulename": "pyerrors.obs", "qualname": "Obs", "type": "class", "doc": "

Class for a general observable.

\n\n

Instances of Obs are the basic objects of a pyerrors error analysis.\nThey are initialized with a list which contains arrays of samples for\ndifferent ensembles/replica and another list of same length which contains\nthe names of the ensembles/replica. Mathematical operations can be\nperformed on instances. The result is another instance of Obs. The error of\nan instance can be computed with the gamma_method. Also contains additional\nmethods for output and visualization of the error calculation.

\n\n
Attributes
\n\n
    \n
  • S_global (float):\nStandard value for S (default 2.0)
  • \n
  • S_dict (dict):\nDictionary for S values. If an entry for a given ensemble\nexists this overwrites the standard value for that ensemble.
  • \n
  • tau_exp_global (float):\nStandard value for tau_exp (default 0.0)
  • \n
  • tau_exp_dict (dict):\nDictionary for tau_exp values. If an entry for a given ensemble exists\nthis overwrites the standard value for that ensemble.
  • \n
  • N_sigma_global (float):\nStandard value for N_sigma (default 1.0)
  • \n
\n"}, "pyerrors.obs.Obs.__init__": {"fullname": "pyerrors.obs.Obs.__init__", "modulename": "pyerrors.obs", "qualname": "Obs.__init__", "type": "function", "doc": "

Initialize Obs object.

\n\n
Attributes
\n\n
    \n
  • samples (list):\nlist of numpy arrays containing the Monte Carlo samples
  • \n
  • names (list):\nlist of strings labeling the indivdual samples
  • \n
  • idl (list, optional):\nlist of ranges or lists on which the samples are defined
  • \n
  • means (list, optional):\nlist of mean values for the case that the mean values were\nalready subtracted from the samples
  • \n
\n", "parameters": ["self", "samples", "names", "idl", "means", "kwargs"], "funcdef": "def"}, "pyerrors.obs.Obs.S_global": {"fullname": "pyerrors.obs.Obs.S_global", "modulename": "pyerrors.obs", "qualname": "Obs.S_global", "type": "variable", "doc": "

\n"}, "pyerrors.obs.Obs.S_dict": {"fullname": "pyerrors.obs.Obs.S_dict", "modulename": "pyerrors.obs", "qualname": "Obs.S_dict", "type": "variable", "doc": "

\n"}, "pyerrors.obs.Obs.tau_exp_global": {"fullname": "pyerrors.obs.Obs.tau_exp_global", "modulename": "pyerrors.obs", "qualname": "Obs.tau_exp_global", "type": "variable", "doc": "

\n"}, "pyerrors.obs.Obs.tau_exp_dict": {"fullname": "pyerrors.obs.Obs.tau_exp_dict", "modulename": "pyerrors.obs", "qualname": "Obs.tau_exp_dict", "type": "variable", "doc": "

\n"}, "pyerrors.obs.Obs.N_sigma_global": {"fullname": "pyerrors.obs.Obs.N_sigma_global", "modulename": "pyerrors.obs", "qualname": "Obs.N_sigma_global", "type": "variable", "doc": "

\n"}, "pyerrors.obs.Obs.filter_eps": {"fullname": "pyerrors.obs.Obs.filter_eps", "modulename": "pyerrors.obs", "qualname": "Obs.filter_eps", "type": "variable", "doc": "

\n"}, "pyerrors.obs.Obs.names": {"fullname": "pyerrors.obs.Obs.names", "modulename": "pyerrors.obs", "qualname": "Obs.names", "type": "variable", "doc": "

\n"}, "pyerrors.obs.Obs.shape": {"fullname": "pyerrors.obs.Obs.shape", "modulename": "pyerrors.obs", "qualname": "Obs.shape", "type": "variable", "doc": "

\n"}, "pyerrors.obs.Obs.r_values": {"fullname": "pyerrors.obs.Obs.r_values", "modulename": "pyerrors.obs", "qualname": "Obs.r_values", "type": "variable", "doc": "

\n"}, "pyerrors.obs.Obs.deltas": {"fullname": "pyerrors.obs.Obs.deltas", "modulename": "pyerrors.obs", "qualname": "Obs.deltas", "type": "variable", "doc": "

\n"}, "pyerrors.obs.Obs.idl": {"fullname": "pyerrors.obs.Obs.idl", "modulename": "pyerrors.obs", "qualname": "Obs.idl", "type": "variable", "doc": "

\n"}, "pyerrors.obs.Obs.is_merged": {"fullname": "pyerrors.obs.Obs.is_merged", "modulename": "pyerrors.obs", "qualname": "Obs.is_merged", "type": "variable", "doc": "

\n"}, "pyerrors.obs.Obs.N": {"fullname": "pyerrors.obs.Obs.N", "modulename": "pyerrors.obs", "qualname": "Obs.N", "type": "variable", "doc": "

\n"}, "pyerrors.obs.Obs.ddvalue": {"fullname": "pyerrors.obs.Obs.ddvalue", "modulename": "pyerrors.obs", "qualname": "Obs.ddvalue", "type": "variable", "doc": "

\n"}, "pyerrors.obs.Obs.reweighted": {"fullname": "pyerrors.obs.Obs.reweighted", "modulename": "pyerrors.obs", "qualname": "Obs.reweighted", "type": "variable", "doc": "

\n"}, "pyerrors.obs.Obs.tag": {"fullname": "pyerrors.obs.Obs.tag", "modulename": "pyerrors.obs", "qualname": "Obs.tag", "type": "variable", "doc": "

\n"}, "pyerrors.obs.Obs.value": {"fullname": "pyerrors.obs.Obs.value", "modulename": "pyerrors.obs", "qualname": "Obs.value", "type": "variable", "doc": "

\n"}, "pyerrors.obs.Obs.dvalue": {"fullname": "pyerrors.obs.Obs.dvalue", "modulename": "pyerrors.obs", "qualname": "Obs.dvalue", "type": "variable", "doc": "

\n"}, "pyerrors.obs.Obs.e_names": {"fullname": "pyerrors.obs.Obs.e_names", "modulename": "pyerrors.obs", "qualname": "Obs.e_names", "type": "variable", "doc": "

\n"}, "pyerrors.obs.Obs.e_content": {"fullname": "pyerrors.obs.Obs.e_content", "modulename": "pyerrors.obs", "qualname": "Obs.e_content", "type": "variable", "doc": "

\n"}, "pyerrors.obs.Obs.expand_deltas": {"fullname": "pyerrors.obs.Obs.expand_deltas", "modulename": "pyerrors.obs", "qualname": "Obs.expand_deltas", "type": "function", "doc": "

Expand deltas defined on idx to a regular, contiguous range, where holes are filled by 0.\n If idx is of type range, the deltas are not changed

\n\n
Parameters
\n\n
    \n
  • deltas -- List of fluctuations
  • \n
  • idx -- List or range of configs on which the deltas are defined.
  • \n
  • shape -- Number of configs in idx.
  • \n
\n", "parameters": ["self", "deltas", "idx", "shape"], "funcdef": "def"}, "pyerrors.obs.Obs.calc_gamma": {"fullname": "pyerrors.obs.Obs.calc_gamma", "modulename": "pyerrors.obs", "qualname": "Obs.calc_gamma", "type": "function", "doc": "

Calculate Gamma_{AA} from the deltas, which are defined on idx.\n idx is assumed to be a contiguous range (possibly with a stepsize != 1)

\n\n
Parameters
\n\n
    \n
  • deltas -- List of fluctuations
  • \n
  • idx -- List or range of configs on which the deltas are defined.
  • \n
  • shape -- Number of configs in idx.
  • \n
  • w_max -- Upper bound for the summation window
  • \n
  • fft -- boolean, which determines whether the fft algorithm is used for: the computation of the autocorrelation function
  • \n
\n", "parameters": ["self", "deltas", "idx", "shape", "w_max", "fft"], "funcdef": "def"}, "pyerrors.obs.Obs.gamma_method": {"fullname": "pyerrors.obs.Obs.gamma_method", "modulename": "pyerrors.obs", "qualname": "Obs.gamma_method", "type": "function", "doc": "

Calculate the error and related properties of the Obs.

\n\n
Keyword arguments
\n\n

S : float\n specifies a custom value for the parameter S (default 2.0), can be\n a float or an array of floats for different ensembles\ntau_exp : float\n positive value triggers the critical slowing down analysis\n (default 0.0), can be a float or an array of floats for different\n ensembles\nN_sigma : float\n number of standard deviations from zero until the tail is\n attached to the autocorrelation function (default 1)\nfft : bool\n determines whether the fft algorithm is used for the computation\n of the autocorrelation function (default True)

\n", "parameters": ["self", "kwargs"], "funcdef": "def"}, "pyerrors.obs.Obs.print": {"fullname": "pyerrors.obs.Obs.print", "modulename": "pyerrors.obs", "qualname": "Obs.print", "type": "function", "doc": "

\n", "parameters": ["self", "level"], "funcdef": "def"}, "pyerrors.obs.Obs.details": {"fullname": "pyerrors.obs.Obs.details", "modulename": "pyerrors.obs", "qualname": "Obs.details", "type": "function", "doc": "

Output detailed properties of the Obs.

\n", "parameters": ["self", "ens_content"], "funcdef": "def"}, "pyerrors.obs.Obs.is_zero_within_error": {"fullname": "pyerrors.obs.Obs.is_zero_within_error", "modulename": "pyerrors.obs", "qualname": "Obs.is_zero_within_error", "type": "function", "doc": "

Checks whether the observable is zero within 'sigma' standard errors.

\n\n

Works only properly when the gamma method was run.

\n", "parameters": ["self", "sigma"], "funcdef": "def"}, "pyerrors.obs.Obs.is_zero": {"fullname": "pyerrors.obs.Obs.is_zero", "modulename": "pyerrors.obs", "qualname": "Obs.is_zero", "type": "function", "doc": "

Checks whether the observable is zero within machine precision.

\n", "parameters": ["self"], "funcdef": "def"}, "pyerrors.obs.Obs.plot_tauint": {"fullname": "pyerrors.obs.Obs.plot_tauint", "modulename": "pyerrors.obs", "qualname": "Obs.plot_tauint", "type": "function", "doc": "

Plot integrated autocorrelation time for each ensemble.

\n", "parameters": ["self", "save"], "funcdef": "def"}, "pyerrors.obs.Obs.plot_rho": {"fullname": "pyerrors.obs.Obs.plot_rho", "modulename": "pyerrors.obs", "qualname": "Obs.plot_rho", "type": "function", "doc": "

Plot normalized autocorrelation function time for each ensemble.

\n", "parameters": ["self"], "funcdef": "def"}, "pyerrors.obs.Obs.plot_rep_dist": {"fullname": "pyerrors.obs.Obs.plot_rep_dist", "modulename": "pyerrors.obs", "qualname": "Obs.plot_rep_dist", "type": "function", "doc": "

Plot replica distribution for each ensemble with more than one replicum.

\n", "parameters": ["self"], "funcdef": "def"}, "pyerrors.obs.Obs.plot_history": {"fullname": "pyerrors.obs.Obs.plot_history", "modulename": "pyerrors.obs", "qualname": "Obs.plot_history", "type": "function", "doc": "

Plot derived Monte Carlo history for each ensemble.

\n", "parameters": ["self", "expand"], "funcdef": "def"}, "pyerrors.obs.Obs.plot_piechart": {"fullname": "pyerrors.obs.Obs.plot_piechart", "modulename": "pyerrors.obs", "qualname": "Obs.plot_piechart", "type": "function", "doc": "

Plot piechart which shows the fractional contribution of each\nensemble to the error and returns a dictionary containing the fractions.

\n", "parameters": ["self"], "funcdef": "def"}, "pyerrors.obs.Obs.dump": {"fullname": "pyerrors.obs.Obs.dump", "modulename": "pyerrors.obs", "qualname": "Obs.dump", "type": "function", "doc": "

Dump the Obs to a pickle file 'name'.

\n\n
Keyword arguments
\n\n

path -- specifies a custom path for the file (default '.')

\n", "parameters": ["self", "name", "kwargs"], "funcdef": "def"}, "pyerrors.obs.Obs.sqrt": {"fullname": "pyerrors.obs.Obs.sqrt", "modulename": "pyerrors.obs", "qualname": "Obs.sqrt", "type": "function", "doc": "

\n", "parameters": ["self"], "funcdef": "def"}, "pyerrors.obs.Obs.log": {"fullname": "pyerrors.obs.Obs.log", "modulename": "pyerrors.obs", "qualname": "Obs.log", "type": "function", "doc": "

\n", "parameters": ["self"], "funcdef": "def"}, "pyerrors.obs.Obs.exp": {"fullname": "pyerrors.obs.Obs.exp", "modulename": "pyerrors.obs", "qualname": "Obs.exp", "type": "function", "doc": "

\n", "parameters": ["self"], "funcdef": "def"}, "pyerrors.obs.Obs.sin": {"fullname": "pyerrors.obs.Obs.sin", "modulename": "pyerrors.obs", "qualname": "Obs.sin", "type": "function", "doc": "

\n", "parameters": ["self"], "funcdef": "def"}, "pyerrors.obs.Obs.cos": {"fullname": "pyerrors.obs.Obs.cos", "modulename": "pyerrors.obs", "qualname": "Obs.cos", "type": "function", "doc": "

\n", "parameters": ["self"], "funcdef": "def"}, "pyerrors.obs.Obs.tan": {"fullname": "pyerrors.obs.Obs.tan", "modulename": "pyerrors.obs", "qualname": "Obs.tan", "type": "function", "doc": "

\n", "parameters": ["self"], "funcdef": "def"}, "pyerrors.obs.Obs.arcsin": {"fullname": "pyerrors.obs.Obs.arcsin", "modulename": "pyerrors.obs", "qualname": "Obs.arcsin", "type": "function", "doc": "

\n", "parameters": ["self"], "funcdef": "def"}, "pyerrors.obs.Obs.arccos": {"fullname": "pyerrors.obs.Obs.arccos", "modulename": "pyerrors.obs", "qualname": "Obs.arccos", "type": "function", "doc": "

\n", "parameters": ["self"], "funcdef": "def"}, "pyerrors.obs.Obs.arctan": {"fullname": "pyerrors.obs.Obs.arctan", "modulename": "pyerrors.obs", "qualname": "Obs.arctan", "type": "function", "doc": "

\n", "parameters": ["self"], "funcdef": "def"}, "pyerrors.obs.Obs.sinh": {"fullname": "pyerrors.obs.Obs.sinh", "modulename": "pyerrors.obs", "qualname": "Obs.sinh", "type": "function", "doc": "

\n", "parameters": ["self"], "funcdef": "def"}, "pyerrors.obs.Obs.cosh": {"fullname": "pyerrors.obs.Obs.cosh", "modulename": "pyerrors.obs", "qualname": "Obs.cosh", "type": "function", "doc": "

\n", "parameters": ["self"], "funcdef": "def"}, "pyerrors.obs.Obs.tanh": {"fullname": "pyerrors.obs.Obs.tanh", "modulename": "pyerrors.obs", "qualname": "Obs.tanh", "type": "function", "doc": "

\n", "parameters": ["self"], "funcdef": "def"}, "pyerrors.obs.Obs.arcsinh": {"fullname": "pyerrors.obs.Obs.arcsinh", "modulename": "pyerrors.obs", "qualname": "Obs.arcsinh", "type": "function", "doc": "

\n", "parameters": ["self"], "funcdef": "def"}, "pyerrors.obs.Obs.arccosh": {"fullname": "pyerrors.obs.Obs.arccosh", "modulename": "pyerrors.obs", "qualname": "Obs.arccosh", "type": "function", "doc": "

\n", "parameters": ["self"], "funcdef": "def"}, "pyerrors.obs.Obs.arctanh": {"fullname": "pyerrors.obs.Obs.arctanh", "modulename": "pyerrors.obs", "qualname": "Obs.arctanh", "type": "function", "doc": "

\n", "parameters": ["self"], "funcdef": "def"}, "pyerrors.obs.Obs.sinc": {"fullname": "pyerrors.obs.Obs.sinc", "modulename": "pyerrors.obs", "qualname": "Obs.sinc", "type": "function", "doc": "

\n", "parameters": ["self"], "funcdef": "def"}, "pyerrors.obs.Obs.N_sigma": {"fullname": "pyerrors.obs.Obs.N_sigma", "modulename": "pyerrors.obs", "qualname": "Obs.N_sigma", "type": "variable", "doc": "

\n"}, "pyerrors.obs.Obs.S": {"fullname": "pyerrors.obs.Obs.S", "modulename": "pyerrors.obs", "qualname": "Obs.S", "type": "variable", "doc": "

\n"}, "pyerrors.obs.Obs.e_ddvalue": {"fullname": "pyerrors.obs.Obs.e_ddvalue", "modulename": "pyerrors.obs", "qualname": "Obs.e_ddvalue", "type": "variable", "doc": "

\n"}, "pyerrors.obs.Obs.e_drho": {"fullname": "pyerrors.obs.Obs.e_drho", "modulename": "pyerrors.obs", "qualname": "Obs.e_drho", "type": "variable", "doc": "

\n"}, "pyerrors.obs.Obs.e_dtauint": {"fullname": "pyerrors.obs.Obs.e_dtauint", "modulename": "pyerrors.obs", "qualname": "Obs.e_dtauint", "type": "variable", "doc": "

\n"}, "pyerrors.obs.Obs.e_dvalue": {"fullname": "pyerrors.obs.Obs.e_dvalue", "modulename": "pyerrors.obs", "qualname": "Obs.e_dvalue", "type": "variable", "doc": "

\n"}, "pyerrors.obs.Obs.e_n_dtauint": {"fullname": "pyerrors.obs.Obs.e_n_dtauint", "modulename": "pyerrors.obs", "qualname": "Obs.e_n_dtauint", "type": "variable", "doc": "

\n"}, "pyerrors.obs.Obs.e_n_tauint": {"fullname": "pyerrors.obs.Obs.e_n_tauint", "modulename": "pyerrors.obs", "qualname": "Obs.e_n_tauint", "type": "variable", "doc": "

\n"}, "pyerrors.obs.Obs.e_rho": {"fullname": "pyerrors.obs.Obs.e_rho", "modulename": "pyerrors.obs", "qualname": "Obs.e_rho", "type": "variable", "doc": "

\n"}, "pyerrors.obs.Obs.e_tauint": {"fullname": "pyerrors.obs.Obs.e_tauint", "modulename": "pyerrors.obs", "qualname": "Obs.e_tauint", "type": "variable", "doc": "

\n"}, "pyerrors.obs.Obs.e_windowsize": {"fullname": "pyerrors.obs.Obs.e_windowsize", "modulename": "pyerrors.obs", "qualname": "Obs.e_windowsize", "type": "variable", "doc": "

\n"}, "pyerrors.obs.Obs.tau_exp": {"fullname": "pyerrors.obs.Obs.tau_exp", "modulename": "pyerrors.obs", "qualname": "Obs.tau_exp", "type": "variable", "doc": "

\n"}, "pyerrors.obs.CObs": {"fullname": "pyerrors.obs.CObs", "modulename": "pyerrors.obs", "qualname": "CObs", "type": "class", "doc": "

Class for a complex valued observable.

\n"}, "pyerrors.obs.CObs.__init__": {"fullname": "pyerrors.obs.CObs.__init__", "modulename": "pyerrors.obs", "qualname": "CObs.__init__", "type": "function", "doc": "

\n", "parameters": ["self", "real", "imag"], "funcdef": "def"}, "pyerrors.obs.CObs.tag": {"fullname": "pyerrors.obs.CObs.tag", "modulename": "pyerrors.obs", "qualname": "CObs.tag", "type": "variable", "doc": "

\n"}, "pyerrors.obs.CObs.real": {"fullname": "pyerrors.obs.CObs.real", "modulename": "pyerrors.obs", "qualname": "CObs.real", "type": "variable", "doc": "

\n"}, "pyerrors.obs.CObs.imag": {"fullname": "pyerrors.obs.CObs.imag", "modulename": "pyerrors.obs", "qualname": "CObs.imag", "type": "variable", "doc": "

\n"}, "pyerrors.obs.CObs.gamma_method": {"fullname": "pyerrors.obs.CObs.gamma_method", "modulename": "pyerrors.obs", "qualname": "CObs.gamma_method", "type": "function", "doc": "

Executes the gamma_method for the real and the imaginary part.

\n", "parameters": ["self", "kwargs"], "funcdef": "def"}, "pyerrors.obs.CObs.is_zero": {"fullname": "pyerrors.obs.CObs.is_zero", "modulename": "pyerrors.obs", "qualname": "CObs.is_zero", "type": "function", "doc": "

Checks whether both real and imaginary part are zero within machine precision.

\n", "parameters": ["self"], "funcdef": "def"}, "pyerrors.obs.CObs.conjugate": {"fullname": "pyerrors.obs.CObs.conjugate", "modulename": "pyerrors.obs", "qualname": "CObs.conjugate", "type": "function", "doc": "

\n", "parameters": ["self"], "funcdef": "def"}, "pyerrors.obs.merge_idx": {"fullname": "pyerrors.obs.merge_idx", "modulename": "pyerrors.obs", "qualname": "merge_idx", "type": "function", "doc": "

Returns the union of all lists in idl

\n\n
Parameters
\n\n
    \n
  • idl -- List of lists or ranges.
  • \n
\n", "parameters": ["idl"], "funcdef": "def"}, "pyerrors.obs.expand_deltas_for_merge": {"fullname": "pyerrors.obs.expand_deltas_for_merge", "modulename": "pyerrors.obs", "qualname": "expand_deltas_for_merge", "type": "function", "doc": "

Expand deltas defined on idx to the list of configs that is defined by new_idx.\n New, empy entries are filled by 0. If idx and new_idx are of type range, the smallest\n common divisor of the step sizes is used as new step size.

\n\n
Parameters
\n\n
    \n
  • deltas (list):\nList of fluctuations
  • \n
  • idx (list):\nList or range of configs on which the deltas are defined.\nHas to be a subset of new_idx.
  • \n
  • shape (list):\nNumber of configs in idx.
  • \n
  • new_idx (list):\nList of configs that defines the new range.
  • \n
\n", "parameters": ["deltas", "idx", "shape", "new_idx"], "funcdef": "def"}, "pyerrors.obs.filter_zeroes": {"fullname": "pyerrors.obs.filter_zeroes", "modulename": "pyerrors.obs", "qualname": "filter_zeroes", "type": "function", "doc": "

Filter out all configurations with vanishing fluctuation such that they do not\n contribute to the error estimate anymore. Returns the new names, deltas and\n idl according to the filtering.\n A fluctuation is considered to be vanishing, if it is smaller than eps times\n the mean of the absolute values of all deltas in one list.

\n\n
Parameters
\n\n
    \n
  • names -- List of names
  • \n
  • deltas -- Dict lists of fluctuations
  • \n
  • idx -- Dict of lists or ranges of configs on which the deltas are defined.: Has to be a subset of new_idx.
  • \n
\n\n
Optional parameters
\n\n

eps -- Prefactor that enters the filter criterion.

\n", "parameters": ["names", "deltas", "idl", "eps"], "funcdef": "def"}, "pyerrors.obs.derived_observable": {"fullname": "pyerrors.obs.derived_observable", "modulename": "pyerrors.obs", "qualname": "derived_observable", "type": "function", "doc": "

Construct a derived Obs according to func(data, **kwargs) using automatic differentiation.

\n\n
Parameters
\n\n
    \n
  • func (object):\narbitrary function of the form func(data, **kwargs). For the\nautomatic differentiation to work, all numpy functions have to have\nthe autograd wrapper (use 'import autograd.numpy as anp').
  • \n
  • data (list):\nlist of Obs, e.g. [obs1, obs2, obs3].
  • \n
\n\n
Keyword arguments
\n\n

num_grad : bool\n if True, numerical derivatives are used instead of autograd\n (default False). To control the numerical differentiation the\n kwargs of numdifftools.step_generators.MaxStepGenerator\n can be used.\nman_grad : list\n manually supply a list or an array which contains the jacobian\n of func. Use cautiously, supplying the wrong derivative will\n not be intercepted.

\n\n
Notes
\n\n

For simple mathematical operations it can be practical to use anonymous\nfunctions. For the ratio of two observables one can e.g. use

\n\n

new_obs = derived_observable(lambda x: x[0] / x[1], [obs1, obs2])

\n", "parameters": ["func", "data", "kwargs"], "funcdef": "def"}, "pyerrors.obs.reduce_deltas": {"fullname": "pyerrors.obs.reduce_deltas", "modulename": "pyerrors.obs", "qualname": "reduce_deltas", "type": "function", "doc": "

Extract deltas defined on idx_old on all configs of idx_new.

\n\n
Parameters
\n\n
    \n
  • deltas -- List of fluctuations
  • \n
  • idx_old -- List or range of configs on which the deltas are defined
  • \n
  • idx_new -- List of configs for which we want to extract the deltas.: Has to be a subset of idx_old.
  • \n
\n", "parameters": ["deltas", "idx_old", "idx_new"], "funcdef": "def"}, "pyerrors.obs.reweight": {"fullname": "pyerrors.obs.reweight", "modulename": "pyerrors.obs", "qualname": "reweight", "type": "function", "doc": "

Reweight a list of observables.

\n\n
Parameters
\n\n
    \n
  • weight (Obs):\nReweighting factor. An Observable that has to be defined on a superset of the\nconfigurations in obs[i].idl for all i.
  • \n
  • obs (list):\nlist of Obs, e.g. [obs1, obs2, obs3].
  • \n
\n\n
Keyword arguments
\n\n

all_configs : bool\n if True, the reweighted observables are normalized by the average of\n the reweighting factor on all configurations in weight.idl and not\n on the configurations in obs[i].idl.

\n", "parameters": ["weight", "obs", "kwargs"], "funcdef": "def"}, "pyerrors.obs.correlate": {"fullname": "pyerrors.obs.correlate", "modulename": "pyerrors.obs", "qualname": "correlate", "type": "function", "doc": "

Correlate two observables.

\n\n

Attributes:

\n\n

obs_a : Obs\n First observable\nobs_b : Obs\n Second observable

\n\n

Keep in mind to only correlate primary observables which have not been reweighted\nyet. The reweighting has to be applied after correlating the observables.\nCurrently only works if ensembles are identical. This is not really necessary.

\n", "parameters": ["obs_a", "obs_b"], "funcdef": "def"}, "pyerrors.obs.covariance": {"fullname": "pyerrors.obs.covariance", "modulename": "pyerrors.obs", "qualname": "covariance", "type": "function", "doc": "

Calculates the covariance of two observables.

\n\n

covariance(obs, obs) is equal to obs.dvalue ** 2\nThe gamma method has to be applied first to both observables.

\n\n

If abs(covariance(obs1, obs2)) > obs1.dvalue * obs2.dvalue, the covariance\nis constrained to the maximum value in order to make sure that covariance\nmatrices are positive semidefinite.

\n\n
Keyword arguments
\n\n

correlation -- if true the correlation instead of the covariance is\n returned (default False)

\n", "parameters": ["obs1", "obs2", "correlation", "kwargs"], "funcdef": "def"}, "pyerrors.obs.covariance2": {"fullname": "pyerrors.obs.covariance2", "modulename": "pyerrors.obs", "qualname": "covariance2", "type": "function", "doc": "

Alternative implementation of the covariance of two observables.

\n\n

covariance(obs, obs) is equal to obs.dvalue ** 2\nThe gamma method has to be applied first to both observables.

\n\n

If abs(covariance(obs1, obs2)) > obs1.dvalue * obs2.dvalue, the covariance\nis constrained to the maximum value in order to make sure that covariance\nmatrices are positive semidefinite.

\n\n
Keyword arguments
\n\n

correlation -- if true the correlation instead of the covariance is\n returned (default False)

\n", "parameters": ["obs1", "obs2", "correlation", "kwargs"], "funcdef": "def"}, "pyerrors.obs.covariance3": {"fullname": "pyerrors.obs.covariance3", "modulename": "pyerrors.obs", "qualname": "covariance3", "type": "function", "doc": "

Another alternative implementation of the covariance of two observables.

\n\n

covariance2(obs, obs) is equal to obs.dvalue ** 2\nCurrently only works if ensembles are identical.\nThe gamma method has to be applied first to both observables.

\n\n

If abs(covariance2(obs1, obs2)) > obs1.dvalue * obs2.dvalue, the covariance\nis constrained to the maximum value in order to make sure that covariance\nmatrices are positive semidefinite.

\n\n
Keyword arguments
\n\n

correlation -- if true the correlation instead of the covariance is\n returned (default False)\nplot -- if true, the integrated autocorrelation time for each ensemble is\n plotted.

\n", "parameters": ["obs1", "obs2", "correlation", "kwargs"], "funcdef": "def"}, "pyerrors.obs.pseudo_Obs": {"fullname": "pyerrors.obs.pseudo_Obs", "modulename": "pyerrors.obs", "qualname": "pseudo_Obs", "type": "function", "doc": "

Generate a pseudo Obs with given value, dvalue and name

\n\n

The standard number of samples is a 1000. This can be adjusted.

\n", "parameters": ["value", "dvalue", "name", "samples"], "funcdef": "def"}, "pyerrors.obs.dump_object": {"fullname": "pyerrors.obs.dump_object", "modulename": "pyerrors.obs", "qualname": "dump_object", "type": "function", "doc": "

Dump object into pickle file.

\n\n
Keyword arguments
\n\n

path -- specifies a custom path for the file (default '.')

\n", "parameters": ["obj", "name", "kwargs"], "funcdef": "def"}, "pyerrors.obs.load_object": {"fullname": "pyerrors.obs.load_object", "modulename": "pyerrors.obs", "qualname": "load_object", "type": "function", "doc": "

Load object from pickle file.

\n", "parameters": ["path"], "funcdef": "def"}, "pyerrors.obs.merge_obs": {"fullname": "pyerrors.obs.merge_obs", "modulename": "pyerrors.obs", "qualname": "merge_obs", "type": "function", "doc": "

Combine all observables in list_of_obs into one new observable

\n\n

It is not possible to combine obs which are based on the same replicum

\n", "parameters": ["list_of_obs"], "funcdef": "def"}, "pyerrors.roots": {"fullname": "pyerrors.roots", "modulename": "pyerrors.roots", "qualname": "", "type": "module", "doc": "

\n"}, "pyerrors.roots.find_root": {"fullname": "pyerrors.roots.find_root", "modulename": "pyerrors.roots", "qualname": "find_root", "type": "function", "doc": "

Finds the root of the function func(x, d) where d is an Obs.

\n\n
Parameters
\n\n
    \n
  • d -- Obs passed to the function.
  • \n
  • func -- Function to be minimized. Any numpy functions have to use the autograd.numpy wrapper
  • \n
  • guess -- Initial guess for the minimization.
  • \n
\n", "parameters": ["d", "func", "guess", "kwargs"], "funcdef": "def"}, "pyerrors.version": {"fullname": "pyerrors.version", "modulename": "pyerrors.version", "qualname": "", "type": "module", "doc": "

\n"}}, "docInfo": {"pyerrors": {"qualname": 0, "fullname": 1, "doc": 109}, "pyerrors.correlators": {"qualname": 0, "fullname": 2, "doc": 0}, "pyerrors.correlators.Corr": {"qualname": 1, "fullname": 3, "doc": 51}, "pyerrors.correlators.Corr.__init__": {"qualname": 2, "fullname": 4, "doc": 0}, "pyerrors.correlators.Corr.reweighted": {"qualname": 2, "fullname": 4, "doc": 0}, "pyerrors.correlators.Corr.gamma_method": {"qualname": 2, "fullname": 4, "doc": 5}, "pyerrors.correlators.Corr.projected": {"qualname": 2, "fullname": 4, "doc": 0}, "pyerrors.correlators.Corr.sum": {"qualname": 2, "fullname": 4, "doc": 0}, "pyerrors.correlators.Corr.smearing": {"qualname": 2, "fullname": 4, "doc": 0}, "pyerrors.correlators.Corr.plottable": {"qualname": 2, "fullname": 4, "doc": 16}, "pyerrors.correlators.Corr.symmetric": {"qualname": 2, "fullname": 4, "doc": 4}, "pyerrors.correlators.Corr.anti_symmetric": {"qualname": 2, "fullname": 4, "doc": 5}, "pyerrors.correlators.Corr.smearing_symmetric": {"qualname": 2, "fullname": 4, "doc": 0}, "pyerrors.correlators.Corr.GEVP": {"qualname": 2, "fullname": 4, "doc": 0}, "pyerrors.correlators.Corr.Eigenvalue": {"qualname": 2, "fullname": 4, "doc": 0}, "pyerrors.correlators.Corr.roll": {"qualname": 2, "fullname": 4, "doc": 10}, "pyerrors.correlators.Corr.reverse": {"qualname": 2, "fullname": 4, "doc": 4}, "pyerrors.correlators.Corr.correlate": {"qualname": 2, "fullname": 4, "doc": 5}, "pyerrors.correlators.Corr.reweight": {"qualname": 2, "fullname": 4, "doc": 30}, "pyerrors.correlators.Corr.T_symmetry": {"qualname": 2, "fullname": 4, "doc": 21}, "pyerrors.correlators.Corr.deriv": {"qualname": 2, "fullname": 4, "doc": 18}, "pyerrors.correlators.Corr.second_deriv": {"qualname": 2, "fullname": 4, "doc": 6}, "pyerrors.correlators.Corr.m_eff": {"qualname": 2, "fullname": 4, "doc": 60}, "pyerrors.correlators.Corr.fit": {"qualname": 2, "fullname": 4, "doc": 32}, "pyerrors.correlators.Corr.plateau": {"qualname": 2, "fullname": 4, "doc": 34}, "pyerrors.correlators.Corr.set_prange": {"qualname": 2, "fullname": 4, "doc": 5}, "pyerrors.correlators.Corr.show": {"qualname": 2, "fullname": 4, "doc": 56}, "pyerrors.correlators.Corr.dump": {"qualname": 2, "fullname": 4, "doc": 9}, "pyerrors.correlators.Corr.print": {"qualname": 2, "fullname": 4, "doc": 0}, "pyerrors.correlators.Corr.sqrt": {"qualname": 2, "fullname": 4, "doc": 0}, "pyerrors.correlators.Corr.log": {"qualname": 2, "fullname": 4, "doc": 0}, "pyerrors.correlators.Corr.exp": {"qualname": 2, "fullname": 4, "doc": 0}, "pyerrors.correlators.Corr.sin": {"qualname": 2, "fullname": 4, "doc": 0}, "pyerrors.correlators.Corr.cos": {"qualname": 2, "fullname": 4, "doc": 0}, "pyerrors.correlators.Corr.tan": {"qualname": 2, "fullname": 4, "doc": 0}, "pyerrors.correlators.Corr.sinh": {"qualname": 2, "fullname": 4, "doc": 0}, "pyerrors.correlators.Corr.cosh": {"qualname": 2, "fullname": 4, "doc": 0}, "pyerrors.correlators.Corr.tanh": {"qualname": 2, "fullname": 4, "doc": 0}, "pyerrors.correlators.Corr.arcsin": {"qualname": 2, "fullname": 4, "doc": 0}, "pyerrors.correlators.Corr.arccos": {"qualname": 2, "fullname": 4, "doc": 0}, "pyerrors.correlators.Corr.arctan": {"qualname": 2, "fullname": 4, "doc": 0}, "pyerrors.correlators.Corr.arcsinh": {"qualname": 2, "fullname": 4, "doc": 0}, "pyerrors.correlators.Corr.arccosh": {"qualname": 2, "fullname": 4, "doc": 0}, "pyerrors.correlators.Corr.arctanh": {"qualname": 2, "fullname": 4, "doc": 0}, "pyerrors.dirac": {"qualname": 0, "fullname": 2, "doc": 0}, "pyerrors.dirac.Grid_gamma": {"qualname": 1, "fullname": 3, "doc": 5}, "pyerrors.fits": {"qualname": 0, "fullname": 2, "doc": 0}, "pyerrors.fits.Fit_result": {"qualname": 1, "fullname": 3, "doc": 13}, "pyerrors.fits.Fit_result.__init__": {"qualname": 2, "fullname": 4, "doc": 0}, "pyerrors.fits.Fit_result.gamma_method": {"qualname": 2, "fullname": 4, "doc": 5}, "pyerrors.fits.least_squares": {"qualname": 1, "fullname": 3, "doc": 179}, "pyerrors.fits.standard_fit": {"qualname": 1, "fullname": 3, "doc": 0}, "pyerrors.fits.odr_fit": {"qualname": 1, "fullname": 3, "doc": 0}, "pyerrors.fits.total_least_squares": {"qualname": 1, "fullname": 3, "doc": 118}, "pyerrors.fits.prior_fit": {"qualname": 1, "fullname": 3, "doc": 0}, "pyerrors.fits.fit_lin": {"qualname": 1, "fullname": 3, "doc": 33}, "pyerrors.fits.qqplot": {"qualname": 1, "fullname": 3, "doc": 12}, "pyerrors.fits.residual_plot": {"qualname": 1, "fullname": 3, "doc": 8}, "pyerrors.fits.covariance_matrix": {"qualname": 1, "fullname": 3, "doc": 4}, "pyerrors.fits.error_band": {"qualname": 1, "fullname": 3, "doc": 14}, "pyerrors.fits.ks_test": {"qualname": 1, "fullname": 3, "doc": 20}, "pyerrors.fits.fit_general": {"qualname": 1, "fullname": 3, "doc": 79}, "pyerrors.input": {"qualname": 0, "fullname": 2, "doc": 0}, "pyerrors.input.bdio": {"qualname": 0, "fullname": 3, "doc": 0}, "pyerrors.input.bdio.read_ADerrors": {"qualname": 1, "fullname": 4, "doc": 46}, "pyerrors.input.bdio.write_ADerrors": {"qualname": 1, "fullname": 4, "doc": 47}, "pyerrors.input.bdio.read_mesons": {"qualname": 1, "fullname": 4, "doc": 68}, "pyerrors.input.bdio.read_dSdm": {"qualname": 1, "fullname": 4, "doc": 61}, "pyerrors.input.hadrons": {"qualname": 0, "fullname": 3, "doc": 0}, "pyerrors.input.hadrons.read_meson_hd5": {"qualname": 1, "fullname": 4, "doc": 59}, "pyerrors.input.hadrons.read_ExternalLeg_hd5": {"qualname": 1, "fullname": 4, "doc": 44}, "pyerrors.input.hadrons.read_Bilinear_hd5": {"qualname": 1, "fullname": 4, "doc": 44}, "pyerrors.input.misc": {"qualname": 0, "fullname": 3, "doc": 0}, "pyerrors.input.misc.read_pbp": {"qualname": 1, "fullname": 4, "doc": 28}, "pyerrors.input.openQCD": {"qualname": 0, "fullname": 3, "doc": 0}, "pyerrors.input.openQCD.read_rwms": {"qualname": 1, "fullname": 4, "doc": 44}, "pyerrors.input.openQCD.extract_t0": {"qualname": 1, "fullname": 4, "doc": 108}, "pyerrors.input.sfcf": {"qualname": 0, "fullname": 3, "doc": 0}, "pyerrors.input.sfcf.read_sfcf": {"qualname": 1, "fullname": 4, "doc": 42}, "pyerrors.input.sfcf.read_sfcf_c": {"qualname": 1, "fullname": 4, "doc": 65}, "pyerrors.input.sfcf.read_qtop": {"qualname": 1, "fullname": 4, "doc": 22}, "pyerrors.jackknifing": {"qualname": 0, "fullname": 2, "doc": 0}, "pyerrors.jackknifing.Jack": {"qualname": 1, "fullname": 3, "doc": 0}, "pyerrors.jackknifing.Jack.__init__": {"qualname": 2, "fullname": 4, "doc": 0}, "pyerrors.jackknifing.Jack.print": {"qualname": 2, "fullname": 4, "doc": 4}, "pyerrors.jackknifing.Jack.plot_tauint": {"qualname": 2, "fullname": 4, "doc": 0}, "pyerrors.jackknifing.Jack.plot_history": {"qualname": 2, "fullname": 4, "doc": 0}, "pyerrors.jackknifing.Jack.dump": {"qualname": 2, "fullname": 4, "doc": 13}, "pyerrors.jackknifing.generate_jack": {"qualname": 1, "fullname": 3, "doc": 0}, "pyerrors.jackknifing.derived_jack": {"qualname": 1, "fullname": 3, "doc": 55}, "pyerrors.linalg": {"qualname": 0, "fullname": 2, "doc": 0}, "pyerrors.linalg.derived_array": {"qualname": 1, "fullname": 3, "doc": 55}, "pyerrors.linalg.matmul": {"qualname": 1, "fullname": 3, "doc": 14}, "pyerrors.linalg.inv": {"qualname": 1, "fullname": 3, "doc": 5}, "pyerrors.linalg.cholesky": {"qualname": 1, "fullname": 3, "doc": 6}, "pyerrors.linalg.scalar_mat_op": {"qualname": 1, "fullname": 3, "doc": 8}, "pyerrors.linalg.eigh": {"qualname": 1, "fullname": 3, "doc": 11}, "pyerrors.linalg.eig": {"qualname": 1, "fullname": 3, "doc": 9}, "pyerrors.linalg.pinv": {"qualname": 1, "fullname": 3, "doc": 6}, "pyerrors.linalg.svd": {"qualname": 1, "fullname": 3, "doc": 6}, "pyerrors.linalg.slogdet": {"qualname": 1, "fullname": 3, "doc": 8}, "pyerrors.linalg.grad_eig": {"qualname": 1, "fullname": 3, "doc": 6}, "pyerrors.misc": {"qualname": 0, "fullname": 2, "doc": 0}, "pyerrors.misc.gen_correlated_data": {"qualname": 1, "fullname": 3, "doc": 36}, "pyerrors.mpm": {"qualname": 0, "fullname": 2, "doc": 0}, "pyerrors.mpm.matrix_pencil_method": {"qualname": 1, "fullname": 3, "doc": 72}, "pyerrors.mpm.matrix_pencil_method_old": {"qualname": 1, "fullname": 3, "doc": 70}, "pyerrors.npr": {"qualname": 0, "fullname": 2, "doc": 0}, "pyerrors.npr.Npr_matrix": {"qualname": 1, "fullname": 3, "doc": 425}, "pyerrors.npr.Npr_matrix.__init__": {"qualname": 2, "fullname": 4, "doc": 0}, "pyerrors.npr.Npr_matrix.g5H": {"qualname": 2, "fullname": 4, "doc": 16}, "pyerrors.npr.inv_propagator": {"qualname": 1, "fullname": 3, "doc": 4}, "pyerrors.npr.Zq": {"qualname": 1, "fullname": 3, "doc": 23}, "pyerrors.obs": {"qualname": 0, "fullname": 2, "doc": 0}, "pyerrors.obs.Obs": {"qualname": 1, "fullname": 3, "doc": 94}, "pyerrors.obs.Obs.__init__": {"qualname": 2, "fullname": 4, "doc": 40}, "pyerrors.obs.Obs.S_global": {"qualname": 2, "fullname": 4, "doc": 0}, "pyerrors.obs.Obs.S_dict": {"qualname": 2, "fullname": 4, "doc": 0}, "pyerrors.obs.Obs.tau_exp_global": {"qualname": 2, "fullname": 4, "doc": 0}, "pyerrors.obs.Obs.tau_exp_dict": {"qualname": 2, "fullname": 4, "doc": 0}, "pyerrors.obs.Obs.N_sigma_global": {"qualname": 2, "fullname": 4, "doc": 0}, "pyerrors.obs.Obs.filter_eps": {"qualname": 2, "fullname": 4, "doc": 0}, "pyerrors.obs.Obs.names": {"qualname": 2, "fullname": 4, "doc": 0}, "pyerrors.obs.Obs.shape": {"qualname": 2, "fullname": 4, "doc": 0}, "pyerrors.obs.Obs.r_values": {"qualname": 2, "fullname": 4, "doc": 0}, "pyerrors.obs.Obs.deltas": {"qualname": 2, "fullname": 4, "doc": 0}, "pyerrors.obs.Obs.idl": {"qualname": 2, "fullname": 4, "doc": 0}, "pyerrors.obs.Obs.is_merged": {"qualname": 2, "fullname": 4, "doc": 0}, "pyerrors.obs.Obs.N": {"qualname": 2, "fullname": 4, "doc": 0}, "pyerrors.obs.Obs.ddvalue": {"qualname": 2, "fullname": 4, "doc": 0}, "pyerrors.obs.Obs.reweighted": {"qualname": 2, "fullname": 4, "doc": 0}, "pyerrors.obs.Obs.tag": {"qualname": 2, "fullname": 4, "doc": 0}, "pyerrors.obs.Obs.value": {"qualname": 2, "fullname": 4, "doc": 0}, "pyerrors.obs.Obs.dvalue": {"qualname": 2, "fullname": 4, "doc": 0}, "pyerrors.obs.Obs.e_names": {"qualname": 2, "fullname": 4, "doc": 0}, "pyerrors.obs.Obs.e_content": {"qualname": 2, "fullname": 4, "doc": 0}, "pyerrors.obs.Obs.expand_deltas": {"qualname": 2, "fullname": 4, "doc": 29}, "pyerrors.obs.Obs.calc_gamma": {"qualname": 2, "fullname": 4, "doc": 41}, "pyerrors.obs.Obs.gamma_method": {"qualname": 2, "fullname": 4, "doc": 64}, "pyerrors.obs.Obs.print": {"qualname": 2, "fullname": 4, "doc": 0}, "pyerrors.obs.Obs.details": {"qualname": 2, "fullname": 4, "doc": 4}, "pyerrors.obs.Obs.is_zero_within_error": {"qualname": 2, "fullname": 4, "doc": 13}, "pyerrors.obs.Obs.is_zero": {"qualname": 2, "fullname": 4, "doc": 7}, "pyerrors.obs.Obs.plot_tauint": {"qualname": 2, "fullname": 4, "doc": 6}, "pyerrors.obs.Obs.plot_rho": {"qualname": 2, "fullname": 4, "doc": 7}, "pyerrors.obs.Obs.plot_rep_dist": {"qualname": 2, "fullname": 4, "doc": 8}, "pyerrors.obs.Obs.plot_history": {"qualname": 2, "fullname": 4, "doc": 7}, "pyerrors.obs.Obs.plot_piechart": {"qualname": 2, "fullname": 4, "doc": 12}, "pyerrors.obs.Obs.dump": {"qualname": 2, "fullname": 4, "doc": 13}, "pyerrors.obs.Obs.sqrt": {"qualname": 2, "fullname": 4, "doc": 0}, "pyerrors.obs.Obs.log": {"qualname": 2, "fullname": 4, "doc": 0}, "pyerrors.obs.Obs.exp": {"qualname": 2, "fullname": 4, "doc": 0}, "pyerrors.obs.Obs.sin": {"qualname": 2, "fullname": 4, "doc": 0}, "pyerrors.obs.Obs.cos": {"qualname": 2, "fullname": 4, "doc": 0}, "pyerrors.obs.Obs.tan": {"qualname": 2, "fullname": 4, "doc": 0}, "pyerrors.obs.Obs.arcsin": {"qualname": 2, "fullname": 4, "doc": 0}, "pyerrors.obs.Obs.arccos": {"qualname": 2, "fullname": 4, "doc": 0}, "pyerrors.obs.Obs.arctan": {"qualname": 2, "fullname": 4, "doc": 0}, "pyerrors.obs.Obs.sinh": {"qualname": 2, "fullname": 4, "doc": 0}, "pyerrors.obs.Obs.cosh": {"qualname": 2, "fullname": 4, "doc": 0}, "pyerrors.obs.Obs.tanh": {"qualname": 2, "fullname": 4, "doc": 0}, "pyerrors.obs.Obs.arcsinh": {"qualname": 2, "fullname": 4, "doc": 0}, "pyerrors.obs.Obs.arccosh": {"qualname": 2, "fullname": 4, "doc": 0}, "pyerrors.obs.Obs.arctanh": {"qualname": 2, "fullname": 4, "doc": 0}, "pyerrors.obs.Obs.sinc": {"qualname": 2, "fullname": 4, "doc": 0}, "pyerrors.obs.Obs.N_sigma": {"qualname": 2, "fullname": 4, "doc": 0}, "pyerrors.obs.Obs.S": {"qualname": 2, "fullname": 4, "doc": 0}, "pyerrors.obs.Obs.e_ddvalue": {"qualname": 2, "fullname": 4, "doc": 0}, "pyerrors.obs.Obs.e_drho": {"qualname": 2, "fullname": 4, "doc": 0}, "pyerrors.obs.Obs.e_dtauint": {"qualname": 2, "fullname": 4, "doc": 0}, "pyerrors.obs.Obs.e_dvalue": {"qualname": 2, "fullname": 4, "doc": 0}, "pyerrors.obs.Obs.e_n_dtauint": {"qualname": 2, "fullname": 4, "doc": 0}, "pyerrors.obs.Obs.e_n_tauint": {"qualname": 2, "fullname": 4, "doc": 0}, "pyerrors.obs.Obs.e_rho": {"qualname": 2, "fullname": 4, "doc": 0}, "pyerrors.obs.Obs.e_tauint": {"qualname": 2, "fullname": 4, "doc": 0}, "pyerrors.obs.Obs.e_windowsize": {"qualname": 2, "fullname": 4, "doc": 0}, "pyerrors.obs.Obs.tau_exp": {"qualname": 2, "fullname": 4, "doc": 0}, "pyerrors.obs.CObs": {"qualname": 1, "fullname": 3, "doc": 4}, "pyerrors.obs.CObs.__init__": {"qualname": 2, "fullname": 4, "doc": 0}, "pyerrors.obs.CObs.tag": {"qualname": 2, "fullname": 4, "doc": 0}, "pyerrors.obs.CObs.real": {"qualname": 2, "fullname": 4, "doc": 0}, "pyerrors.obs.CObs.imag": {"qualname": 2, "fullname": 4, "doc": 0}, "pyerrors.obs.CObs.gamma_method": {"qualname": 2, "fullname": 4, "doc": 5}, "pyerrors.obs.CObs.is_zero": {"qualname": 2, "fullname": 4, "doc": 10}, "pyerrors.obs.CObs.conjugate": {"qualname": 2, "fullname": 4, "doc": 0}, "pyerrors.obs.merge_idx": {"qualname": 1, "fullname": 3, "doc": 9}, "pyerrors.obs.expand_deltas_for_merge": {"qualname": 1, "fullname": 3, "doc": 52}, "pyerrors.obs.filter_zeroes": {"qualname": 1, "fullname": 3, "doc": 53}, "pyerrors.obs.derived_observable": {"qualname": 1, "fullname": 3, "doc": 95}, "pyerrors.obs.reduce_deltas": {"qualname": 1, "fullname": 3, "doc": 24}, "pyerrors.obs.reweight": {"qualname": 1, "fullname": 3, "doc": 40}, "pyerrors.obs.correlate": {"qualname": 1, "fullname": 3, "doc": 28}, "pyerrors.obs.covariance": {"qualname": 1, "fullname": 3, "doc": 44}, "pyerrors.obs.covariance2": {"qualname": 1, "fullname": 3, "doc": 45}, "pyerrors.obs.covariance3": {"qualname": 1, "fullname": 3, "doc": 58}, "pyerrors.obs.pseudo_Obs": {"qualname": 1, "fullname": 3, "doc": 12}, "pyerrors.obs.dump_object": {"qualname": 1, "fullname": 3, "doc": 12}, "pyerrors.obs.load_object": {"qualname": 1, "fullname": 3, "doc": 4}, "pyerrors.obs.merge_obs": {"qualname": 1, "fullname": 3, "doc": 12}, "pyerrors.roots": {"qualname": 0, "fullname": 2, "doc": 0}, "pyerrors.roots.find_root": {"qualname": 1, "fullname": 3, "doc": 25}, "pyerrors.version": {"qualname": 0, "fullname": 2, "doc": 0}}, "length": 202, "save": true}, "index": {"qualname": {"root": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "o": {"docs": {"pyerrors.correlators.Corr.cos": {"tf": 1}, "pyerrors.obs.Obs.cos": {"tf": 1}}, "df": 2, "r": {"docs": {}, "df": 0, "r": {"docs": {"pyerrors.correlators.Corr": {"tf": 1}, "pyerrors.correlators.Corr.__init__": {"tf": 1}, "pyerrors.correlators.Corr.reweighted": {"tf": 1}, "pyerrors.correlators.Corr.gamma_method": {"tf": 1}, "pyerrors.correlators.Corr.projected": {"tf": 1}, "pyerrors.correlators.Corr.sum": {"tf": 1}, "pyerrors.correlators.Corr.smearing": {"tf": 1}, "pyerrors.correlators.Corr.plottable": {"tf": 1}, "pyerrors.correlators.Corr.symmetric": {"tf": 1}, "pyerrors.correlators.Corr.anti_symmetric": {"tf": 1}, "pyerrors.correlators.Corr.smearing_symmetric": {"tf": 1}, "pyerrors.correlators.Corr.GEVP": {"tf": 1}, "pyerrors.correlators.Corr.Eigenvalue": {"tf": 1}, "pyerrors.correlators.Corr.roll": {"tf": 1}, "pyerrors.correlators.Corr.reverse": {"tf": 1}, "pyerrors.correlators.Corr.correlate": {"tf": 1}, "pyerrors.correlators.Corr.reweight": {"tf": 1}, "pyerrors.correlators.Corr.T_symmetry": {"tf": 1}, "pyerrors.correlators.Corr.deriv": {"tf": 1}, "pyerrors.correlators.Corr.second_deriv": {"tf": 1}, "pyerrors.correlators.Corr.m_eff": {"tf": 1}, "pyerrors.correlators.Corr.fit": {"tf": 1}, "pyerrors.correlators.Corr.plateau": {"tf": 1}, "pyerrors.correlators.Corr.set_prange": {"tf": 1}, "pyerrors.correlators.Corr.show": {"tf": 1}, "pyerrors.correlators.Corr.dump": {"tf": 1}, "pyerrors.correlators.Corr.print": {"tf": 1}, "pyerrors.correlators.Corr.sqrt": {"tf": 1}, "pyerrors.correlators.Corr.log": {"tf": 1}, "pyerrors.correlators.Corr.exp": {"tf": 1}, "pyerrors.correlators.Corr.sin": {"tf": 1}, "pyerrors.correlators.Corr.cos": {"tf": 1}, "pyerrors.correlators.Corr.tan": {"tf": 1}, "pyerrors.correlators.Corr.sinh": {"tf": 1}, "pyerrors.correlators.Corr.cosh": {"tf": 1}, "pyerrors.correlators.Corr.tanh": {"tf": 1}, "pyerrors.correlators.Corr.arcsin": {"tf": 1}, "pyerrors.correlators.Corr.arccos": {"tf": 1}, "pyerrors.correlators.Corr.arctan": {"tf": 1}, "pyerrors.correlators.Corr.arcsinh": {"tf": 1}, "pyerrors.correlators.Corr.arccosh": {"tf": 1}, "pyerrors.correlators.Corr.arctanh": {"tf": 1}}, "df": 42, "e": {"docs": {}, "df": 0, "l": {"docs": {"pyerrors.correlators.Corr.correlate": {"tf": 1}, "pyerrors.obs.correlate": {"tf": 1}}, "df": 2}}}}, "s": {"docs": {}, "df": 0, "h": {"docs": {"pyerrors.correlators.Corr.cosh": {"tf": 1}, "pyerrors.obs.Obs.cosh": {"tf": 1}}, "df": 2}}, "v": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "i": {"docs": {"pyerrors.obs.covariance": {"tf": 1}}, "df": 1, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "e": {"2": {"docs": {"pyerrors.obs.covariance2": {"tf": 1}}, "df": 1}, "3": {"docs": {"pyerrors.obs.covariance3": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0, "_": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "x": {"docs": {"pyerrors.fits.covariance_matrix": {"tf": 1}}, "df": 1}}}}}}}}}}}}}}}, "b": {"docs": {"pyerrors.obs.CObs": {"tf": 1}, "pyerrors.obs.CObs.__init__": {"tf": 1}, "pyerrors.obs.CObs.tag": {"tf": 1}, "pyerrors.obs.CObs.real": {"tf": 1}, "pyerrors.obs.CObs.imag": {"tf": 1}, "pyerrors.obs.CObs.gamma_method": {"tf": 1}, "pyerrors.obs.CObs.is_zero": {"tf": 1}, "pyerrors.obs.CObs.conjugate": {"tf": 1}}, "df": 8}, "n": {"docs": {}, "df": 0, "j": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "g": {"docs": {"pyerrors.obs.CObs.conjugate": {"tf": 1}}, "df": 1}}}}}, "h": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "k": {"docs": {}, "df": 0, "i": {"docs": {"pyerrors.linalg.cholesky": {"tf": 1}}, "df": 1}}}}}}}, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "_": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "a": {"docs": {"pyerrors.obs.Obs.calc_gamma": {"tf": 1}}, "df": 1}}}}}}}}}}, "_": {"docs": {}, "df": 0, "_": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "_": {"docs": {}, "df": 0, "_": {"docs": {"pyerrors.correlators.Corr.__init__": {"tf": 1}, "pyerrors.fits.Fit_result.__init__": {"tf": 1}, "pyerrors.jackknifing.Jack.__init__": {"tf": 1}, "pyerrors.npr.Npr_matrix.__init__": {"tf": 1}, "pyerrors.obs.Obs.__init__": {"tf": 1}, "pyerrors.obs.CObs.__init__": {"tf": 1}}, "df": 6}}}}}}}}, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "w": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "t": {"docs": {"pyerrors.correlators.Corr.reweighted": {"tf": 1}, "pyerrors.correlators.Corr.reweight": {"tf": 1}, "pyerrors.obs.Obs.reweighted": {"tf": 1}, "pyerrors.obs.reweight": {"tf": 1}}, "df": 4}}}}}}, "v": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "s": {"docs": {"pyerrors.correlators.Corr.reverse": {"tf": 1}}, "df": 1}}}}, "s": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "_": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "t": {"docs": {"pyerrors.fits.residual_plot": {"tf": 1}}, "df": 1}}}}}}}}}}}, "a": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "_": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {"pyerrors.input.bdio.read_ADerrors": {"tf": 1}}, "df": 1}}}}}}}, "m": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"pyerrors.input.bdio.read_mesons": {"tf": 1}}, "df": 1, "_": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "d": {"5": {"docs": {"pyerrors.input.hadrons.read_meson_hd5": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}}}}}}}}, "d": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "m": {"docs": {"pyerrors.input.bdio.read_dSdm": {"tf": 1}}, "df": 1}}}}, "e": {"docs": {}, "df": 0, "x": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "_": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "d": {"5": {"docs": {"pyerrors.input.hadrons.read_ExternalLeg_hd5": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}}}}}}}}}}}}}}, "b": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "_": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "d": {"5": {"docs": {"pyerrors.input.hadrons.read_Bilinear_hd5": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}}}}}}}}}}}, "p": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "p": {"docs": {"pyerrors.input.misc.read_pbp": {"tf": 1}}, "df": 1}}}, "r": {"docs": {}, "df": 0, "w": {"docs": {}, "df": 0, "m": {"docs": {"pyerrors.input.openQCD.read_rwms": {"tf": 1}}, "df": 1}}}, "s": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "f": {"docs": {"pyerrors.input.sfcf.read_sfcf": {"tf": 1}}, "df": 1, "_": {"docs": {}, "df": 0, "c": {"docs": {"pyerrors.input.sfcf.read_sfcf_c": {"tf": 1}}, "df": 1}}}}}}, "q": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "p": {"docs": {"pyerrors.input.sfcf.read_qtop": {"tf": 1}}, "df": 1}}}}}}, "l": {"docs": {"pyerrors.obs.CObs.real": {"tf": 1}}, "df": 1}}, "d": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "_": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "a": {"docs": {"pyerrors.obs.reduce_deltas": {"tf": 1}}, "df": 1}}}}}}}}}}}, "o": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "l": {"docs": {"pyerrors.correlators.Corr.roll": {"tf": 1}}, "df": 1}}}, "_": {"docs": {}, "df": 0, "v": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "u": {"docs": {"pyerrors.obs.Obs.r_values": {"tf": 1}}, "df": 1}}}}}}, "g": {"5": {"docs": {}, "df": 0, "h": {"docs": {"pyerrors.npr.Npr_matrix.g5H": {"tf": 1}}, "df": 1}}, "docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "_": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "d": {"docs": {"pyerrors.correlators.Corr.gamma_method": {"tf": 1}, "pyerrors.fits.Fit_result.gamma_method": {"tf": 1}, "pyerrors.obs.Obs.gamma_method": {"tf": 1}, "pyerrors.obs.CObs.gamma_method": {"tf": 1}}, "df": 4}}}}}}}}}}}, "e": {"docs": {}, "df": 0, "v": {"docs": {}, "df": 0, "p": {"docs": {"pyerrors.correlators.Corr.GEVP": {"tf": 1}}, "df": 1}}, "n": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "_": {"docs": {}, "df": 0, "j": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "k": {"docs": {"pyerrors.jackknifing.generate_jack": {"tf": 1}}, "df": 1}}}}}}}}}}, "_": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "_": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "a": {"docs": {"pyerrors.misc.gen_correlated_data": {"tf": 1}}, "df": 1}}}}}}}}}}}}}}}}}}, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "_": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "a": {"docs": {"pyerrors.dirac.Grid_gamma": {"tf": 1}}, "df": 1}}}}}}}}, "a": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "_": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "g": {"docs": {"pyerrors.linalg.grad_eig": {"tf": 1}}, "df": 1}}}}}}}}, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "j": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {"pyerrors.correlators.Corr.projected": {"tf": 1}}, "df": 1}}}}}, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"pyerrors.correlators.Corr.print": {"tf": 1}, "pyerrors.jackknifing.Jack.print": {"tf": 1}, "pyerrors.obs.Obs.print": {"tf": 1}}, "df": 3}}, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "_": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {"pyerrors.fits.prior_fit": {"tf": 1}}, "df": 1}}}}}}}}, "l": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "l": {"docs": {"pyerrors.correlators.Corr.plottable": {"tf": 1}}, "df": 1}}}}, "_": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"pyerrors.jackknifing.Jack.plot_tauint": {"tf": 1}, "pyerrors.obs.Obs.plot_tauint": {"tf": 1}}, "df": 2}}}}}}, "h": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "i": {"docs": {"pyerrors.jackknifing.Jack.plot_history": {"tf": 1}, "pyerrors.obs.Obs.plot_history": {"tf": 1}}, "df": 2}}}}}}}, "r": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "o": {"docs": {"pyerrors.obs.Obs.plot_rho": {"tf": 1}}, "df": 1}}, "e": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "_": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {"pyerrors.obs.Obs.plot_rep_dist": {"tf": 1}}, "df": 1}}}}}}}}, "p": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {"pyerrors.obs.Obs.plot_piechart": {"tf": 1}}, "df": 1}}}}}}}}}}}, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "u": {"docs": {"pyerrors.correlators.Corr.plateau": {"tf": 1}}, "df": 1}}}}}}, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "v": {"docs": {"pyerrors.linalg.pinv": {"tf": 1}}, "df": 1}}}, "s": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "_": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "b": {"docs": {"pyerrors.obs.pseudo_Obs": {"tf": 1}}, "df": 1}}}}}}}}}, "s": {"docs": {"pyerrors.obs.Obs.S": {"tf": 1}}, "df": 1, "u": {"docs": {}, "df": 0, "m": {"docs": {"pyerrors.correlators.Corr.sum": {"tf": 1}}, "df": 1}}, "m": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {"pyerrors.correlators.Corr.smearing": {"tf": 1}}, "df": 1, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "_": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "y": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {"pyerrors.correlators.Corr.smearing_symmetric": {"tf": 1}}, "df": 1}}}}}}}}}}}}}}}, "y": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {"pyerrors.correlators.Corr.symmetric": {"tf": 1}}, "df": 1}}}}}}, "e": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "_": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "v": {"docs": {"pyerrors.correlators.Corr.second_deriv": {"tf": 1}}, "df": 1}}}}}}}}}}, "t": {"docs": {}, "df": 0, "_": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"pyerrors.correlators.Corr.set_prange": {"tf": 1}}, "df": 1}}}}}}}}, "h": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "w": {"docs": {"pyerrors.correlators.Corr.show": {"tf": 1}}, "df": 1}}, "a": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {"pyerrors.obs.Obs.shape": {"tf": 1}}, "df": 1}}}}, "q": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {"pyerrors.correlators.Corr.sqrt": {"tf": 1}, "pyerrors.obs.Obs.sqrt": {"tf": 1}}, "df": 2}}}, "i": {"docs": {}, "df": 0, "n": {"docs": {"pyerrors.correlators.Corr.sin": {"tf": 1}, "pyerrors.obs.Obs.sin": {"tf": 1}}, "df": 2, "h": {"docs": {"pyerrors.correlators.Corr.sinh": {"tf": 1}, "pyerrors.obs.Obs.sinh": {"tf": 1}}, "df": 2}, "c": {"docs": {"pyerrors.obs.Obs.sinc": {"tf": 1}}, "df": 1}}}, "t": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "_": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {"pyerrors.fits.standard_fit": {"tf": 1}}, "df": 1}}}}}}}}}}}, "c": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "_": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "_": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "p": {"docs": {"pyerrors.linalg.scalar_mat_op": {"tf": 1}}, "df": 1}}}}}}}}}}}}, "v": {"docs": {}, "df": 0, "d": {"docs": {"pyerrors.linalg.svd": {"tf": 1}}, "df": 1}}, "l": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "t": {"docs": {"pyerrors.linalg.slogdet": {"tf": 1}}, "df": 1}}}}}}, "_": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {"pyerrors.obs.Obs.S_global": {"tf": 1}}, "df": 1}}}}}}, "d": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {"pyerrors.obs.Obs.S_dict": {"tf": 1}}, "df": 1}}}}}}, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "_": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "y": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {"pyerrors.correlators.Corr.anti_symmetric": {"tf": 1}}, "df": 1}}}}}}}}}}}, "r": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {"pyerrors.correlators.Corr.arcsin": {"tf": 1}, "pyerrors.obs.Obs.arcsin": {"tf": 1}}, "df": 2, "h": {"docs": {"pyerrors.correlators.Corr.arcsinh": {"tf": 1}, "pyerrors.obs.Obs.arcsinh": {"tf": 1}}, "df": 2}}}}, "c": {"docs": {}, "df": 0, "o": {"docs": {"pyerrors.correlators.Corr.arccos": {"tf": 1}, "pyerrors.obs.Obs.arccos": {"tf": 1}}, "df": 2, "s": {"docs": {}, "df": 0, "h": {"docs": {"pyerrors.correlators.Corr.arccosh": {"tf": 1}, "pyerrors.obs.Obs.arccosh": {"tf": 1}}, "df": 2}}}}, "t": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "n": {"docs": {"pyerrors.correlators.Corr.arctan": {"tf": 1}, "pyerrors.obs.Obs.arctan": {"tf": 1}}, "df": 2, "h": {"docs": {"pyerrors.correlators.Corr.arctanh": {"tf": 1}, "pyerrors.obs.Obs.arctanh": {"tf": 1}}, "df": 2}}}}}}}, "e": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "g": {"docs": {"pyerrors.linalg.eig": {"tf": 1}}, "df": 1, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "v": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "u": {"docs": {"pyerrors.correlators.Corr.Eigenvalue": {"tf": 1}}, "df": 1}}}}}}, "h": {"docs": {"pyerrors.linalg.eigh": {"tf": 1}}, "df": 1}}}, "x": {"docs": {}, "df": 0, "p": {"docs": {"pyerrors.correlators.Corr.exp": {"tf": 1}, "pyerrors.obs.Obs.exp": {"tf": 1}}, "df": 2, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "_": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "a": {"docs": {"pyerrors.obs.Obs.expand_deltas": {"tf": 1}}, "df": 1, "s": {"docs": {}, "df": 0, "_": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "_": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "g": {"docs": {"pyerrors.obs.expand_deltas_for_merge": {"tf": 1}}, "df": 1}}}}}}}}}}}}}}}}}}}}, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "_": {"docs": {}, "df": 0, "t": {"0": {"docs": {"pyerrors.input.openQCD.extract_t0": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}}}}}}}}, "r": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "_": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "d": {"docs": {"pyerrors.fits.error_band": {"tf": 1}}, "df": 1}}}}}}}}}, "_": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "m": {"docs": {"pyerrors.obs.Obs.e_names": {"tf": 1}}, "df": 1}}, "_": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"pyerrors.obs.Obs.e_n_dtauint": {"tf": 1}}, "df": 1}}}}}}}, "t": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"pyerrors.obs.Obs.e_n_tauint": {"tf": 1}}, "df": 1}}}}}}}}, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"pyerrors.obs.Obs.e_content": {"tf": 1}}, "df": 1}}}}, "d": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "v": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "u": {"docs": {"pyerrors.obs.Obs.e_ddvalue": {"tf": 1}}, "df": 1}}}}}, "r": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "o": {"docs": {"pyerrors.obs.Obs.e_drho": {"tf": 1}}, "df": 1}}}, "t": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"pyerrors.obs.Obs.e_dtauint": {"tf": 1}}, "df": 1}}}}}}, "v": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "u": {"docs": {"pyerrors.obs.Obs.e_dvalue": {"tf": 1}}, "df": 1}}}}}, "r": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "o": {"docs": {"pyerrors.obs.Obs.e_rho": {"tf": 1}}, "df": 1}}}, "t": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"pyerrors.obs.Obs.e_tauint": {"tf": 1}}, "df": 1}}}}}}, "w": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "w": {"docs": {}, "df": 0, "s": {"docs": {"pyerrors.obs.Obs.e_windowsize": {"tf": 1}}, "df": 1}}}}}}}}}, "t": {"docs": {}, "df": 0, "_": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "y": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "i": {"docs": {"pyerrors.correlators.Corr.T_symmetry": {"tf": 1}}, "df": 1}}}}}}}}}, "a": {"docs": {}, "df": 0, "n": {"docs": {"pyerrors.correlators.Corr.tan": {"tf": 1}, "pyerrors.obs.Obs.tan": {"tf": 1}}, "df": 2, "h": {"docs": {"pyerrors.correlators.Corr.tanh": {"tf": 1}, "pyerrors.obs.Obs.tanh": {"tf": 1}}, "df": 2}}, "u": {"docs": {}, "df": 0, "_": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "x": {"docs": {}, "df": 0, "p": {"docs": {"pyerrors.obs.Obs.tau_exp": {"tf": 1}}, "df": 1, "_": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "b": {"docs": {"pyerrors.obs.Obs.tau_exp_global": {"tf": 1}}, "df": 1}}}}, "d": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {"pyerrors.obs.Obs.tau_exp_dict": {"tf": 1}}, "df": 1}}}}}}}}}}, "g": {"docs": {"pyerrors.obs.Obs.tag": {"tf": 1}, "pyerrors.obs.CObs.tag": {"tf": 1}}, "df": 2}}, "o": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "_": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "_": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "q": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {"pyerrors.fits.total_least_squares": {"tf": 1}}, "df": 1}}}}}}}}}}}}}}}}}, "d": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "v": {"docs": {"pyerrors.correlators.Corr.deriv": {"tf": 1}}, "df": 1, "e": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "_": {"docs": {}, "df": 0, "j": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "k": {"docs": {"pyerrors.jackknifing.derived_jack": {"tf": 1}}, "df": 1}}}}, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "y": {"docs": {"pyerrors.linalg.derived_array": {"tf": 1}}, "df": 1}}}}}, "o": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "v": {"docs": {"pyerrors.obs.derived_observable": {"tf": 1}}, "df": 1}}}}}}}}}}}}, "l": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "a": {"docs": {"pyerrors.obs.Obs.deltas": {"tf": 1}}, "df": 1}}}, "t": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "l": {"docs": {"pyerrors.obs.Obs.details": {"tf": 1}}, "df": 1}}}}}, "u": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "p": {"docs": {"pyerrors.correlators.Corr.dump": {"tf": 1}, "pyerrors.jackknifing.Jack.dump": {"tf": 1}, "pyerrors.obs.Obs.dump": {"tf": 1}}, "df": 3, "_": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "j": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {"pyerrors.obs.dump_object": {"tf": 1}}, "df": 1}}}}}}}}}}, "d": {"docs": {}, "df": 0, "v": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "u": {"docs": {"pyerrors.obs.Obs.ddvalue": {"tf": 1}}, "df": 1}}}}}, "v": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "u": {"docs": {"pyerrors.obs.Obs.dvalue": {"tf": 1}}, "df": 1}}}}}, "m": {"docs": {}, "df": 0, "_": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "f": {"docs": {"pyerrors.correlators.Corr.m_eff": {"tf": 1}}, "df": 1}}}}, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "l": {"docs": {"pyerrors.linalg.matmul": {"tf": 1}}, "df": 1}}}, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "x": {"docs": {}, "df": 0, "_": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "_": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "d": {"docs": {"pyerrors.mpm.matrix_pencil_method": {"tf": 1}}, "df": 1, "_": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "d": {"docs": {"pyerrors.mpm.matrix_pencil_method_old": {"tf": 1}}, "df": 1}}}}}}}}}}}}}}}}}}}}}}}, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "_": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "x": {"docs": {"pyerrors.obs.merge_idx": {"tf": 1}}, "df": 1}}}, "o": {"docs": {}, "df": 0, "b": {"docs": {"pyerrors.obs.merge_obs": {"tf": 1}}, "df": 1}}}}}}}}, "f": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {"pyerrors.correlators.Corr.fit": {"tf": 1}}, "df": 1, "_": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "t": {"docs": {"pyerrors.fits.Fit_result": {"tf": 1}, "pyerrors.fits.Fit_result.__init__": {"tf": 1}, "pyerrors.fits.Fit_result.gamma_method": {"tf": 1}}, "df": 3}}}}}}, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {"pyerrors.fits.fit_lin": {"tf": 1}}, "df": 1}}}, "g": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"pyerrors.fits.fit_general": {"tf": 1}}, "df": 1}}}}}}}, "l": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "_": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "p": {"docs": {"pyerrors.obs.Obs.filter_eps": {"tf": 1}}, "df": 1}}, "z": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {"pyerrors.obs.filter_zeroes": {"tf": 1}}, "df": 1}}}}}}}}}, "n": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "_": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "t": {"docs": {"pyerrors.roots.find_root": {"tf": 1}}, "df": 1}}}}}}}}}, "l": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "g": {"docs": {"pyerrors.correlators.Corr.log": {"tf": 1}, "pyerrors.obs.Obs.log": {"tf": 1}}, "df": 2}, "a": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "_": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "j": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {"pyerrors.obs.load_object": {"tf": 1}}, "df": 1}}}}}}}}}}, "e": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "_": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "q": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {"pyerrors.fits.least_squares": {"tf": 1}}, "df": 1}}}}}}}}}}}, "o": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "_": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {"pyerrors.fits.odr_fit": {"tf": 1}}, "df": 1}}}}}}, "b": {"docs": {"pyerrors.obs.Obs": {"tf": 1}, "pyerrors.obs.Obs.__init__": {"tf": 1}, "pyerrors.obs.Obs.S_global": {"tf": 1}, "pyerrors.obs.Obs.S_dict": {"tf": 1}, "pyerrors.obs.Obs.tau_exp_global": {"tf": 1}, "pyerrors.obs.Obs.tau_exp_dict": {"tf": 1}, "pyerrors.obs.Obs.N_sigma_global": {"tf": 1}, "pyerrors.obs.Obs.filter_eps": {"tf": 1}, "pyerrors.obs.Obs.names": {"tf": 1}, "pyerrors.obs.Obs.shape": {"tf": 1}, "pyerrors.obs.Obs.r_values": {"tf": 1}, "pyerrors.obs.Obs.deltas": {"tf": 1}, "pyerrors.obs.Obs.idl": {"tf": 1}, "pyerrors.obs.Obs.is_merged": {"tf": 1}, "pyerrors.obs.Obs.N": {"tf": 1}, "pyerrors.obs.Obs.ddvalue": {"tf": 1}, "pyerrors.obs.Obs.reweighted": {"tf": 1}, "pyerrors.obs.Obs.tag": {"tf": 1}, "pyerrors.obs.Obs.value": {"tf": 1}, "pyerrors.obs.Obs.dvalue": {"tf": 1}, "pyerrors.obs.Obs.e_names": {"tf": 1}, "pyerrors.obs.Obs.e_content": {"tf": 1}, "pyerrors.obs.Obs.expand_deltas": {"tf": 1}, "pyerrors.obs.Obs.calc_gamma": {"tf": 1}, "pyerrors.obs.Obs.gamma_method": {"tf": 1}, "pyerrors.obs.Obs.print": {"tf": 1}, "pyerrors.obs.Obs.details": {"tf": 1}, "pyerrors.obs.Obs.is_zero_within_error": {"tf": 1}, "pyerrors.obs.Obs.is_zero": {"tf": 1}, "pyerrors.obs.Obs.plot_tauint": {"tf": 1}, "pyerrors.obs.Obs.plot_rho": {"tf": 1}, "pyerrors.obs.Obs.plot_rep_dist": {"tf": 1}, "pyerrors.obs.Obs.plot_history": {"tf": 1}, "pyerrors.obs.Obs.plot_piechart": {"tf": 1}, "pyerrors.obs.Obs.dump": {"tf": 1}, "pyerrors.obs.Obs.sqrt": {"tf": 1}, "pyerrors.obs.Obs.log": {"tf": 1}, "pyerrors.obs.Obs.exp": {"tf": 1}, "pyerrors.obs.Obs.sin": {"tf": 1}, "pyerrors.obs.Obs.cos": {"tf": 1}, "pyerrors.obs.Obs.tan": {"tf": 1}, "pyerrors.obs.Obs.arcsin": {"tf": 1}, "pyerrors.obs.Obs.arccos": {"tf": 1}, "pyerrors.obs.Obs.arctan": {"tf": 1}, "pyerrors.obs.Obs.sinh": {"tf": 1}, "pyerrors.obs.Obs.cosh": {"tf": 1}, "pyerrors.obs.Obs.tanh": {"tf": 1}, "pyerrors.obs.Obs.arcsinh": {"tf": 1}, "pyerrors.obs.Obs.arccosh": {"tf": 1}, "pyerrors.obs.Obs.arctanh": {"tf": 1}, "pyerrors.obs.Obs.sinc": {"tf": 1}, "pyerrors.obs.Obs.N_sigma": {"tf": 1}, "pyerrors.obs.Obs.S": {"tf": 1}, "pyerrors.obs.Obs.e_ddvalue": {"tf": 1}, "pyerrors.obs.Obs.e_drho": {"tf": 1}, "pyerrors.obs.Obs.e_dtauint": {"tf": 1}, "pyerrors.obs.Obs.e_dvalue": {"tf": 1}, "pyerrors.obs.Obs.e_n_dtauint": {"tf": 1}, "pyerrors.obs.Obs.e_n_tauint": {"tf": 1}, "pyerrors.obs.Obs.e_rho": {"tf": 1}, "pyerrors.obs.Obs.e_tauint": {"tf": 1}, "pyerrors.obs.Obs.e_windowsize": {"tf": 1}, "pyerrors.obs.Obs.tau_exp": {"tf": 1}}, "df": 63}}, "q": {"docs": {}, "df": 0, "q": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "t": {"docs": {"pyerrors.fits.qqplot": {"tf": 1}}, "df": 1}}}}}}, "k": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "_": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {"pyerrors.fits.ks_test": {"tf": 1}}, "df": 1}}}}}}}, "w": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "_": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {"pyerrors.input.bdio.write_ADerrors": {"tf": 1}}, "df": 1}}}}}}}}}}}}}, "j": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "k": {"docs": {"pyerrors.jackknifing.Jack": {"tf": 1}, "pyerrors.jackknifing.Jack.__init__": {"tf": 1}, "pyerrors.jackknifing.Jack.print": {"tf": 1}, "pyerrors.jackknifing.Jack.plot_tauint": {"tf": 1}, "pyerrors.jackknifing.Jack.plot_history": {"tf": 1}, "pyerrors.jackknifing.Jack.dump": {"tf": 1}}, "df": 6}}}}, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "v": {"docs": {"pyerrors.linalg.inv": {"tf": 1}}, "df": 1, "_": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "g": {"docs": {"pyerrors.npr.inv_propagator": {"tf": 1}}, "df": 1}}}}}}}}}, "d": {"docs": {}, "df": 0, "l": {"docs": {"pyerrors.obs.Obs.idl": {"tf": 1}}, "df": 1}}, "s": {"docs": {}, "df": 0, "_": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "g": {"docs": {"pyerrors.obs.Obs.is_merged": {"tf": 1}}, "df": 1}}}}, "z": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {"pyerrors.obs.Obs.is_zero": {"tf": 1}, "pyerrors.obs.CObs.is_zero": {"tf": 1}}, "df": 2, "_": {"docs": {}, "df": 0, "w": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "_": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {"pyerrors.obs.Obs.is_zero_within_error": {"tf": 1}}, "df": 1}}}}}}}}}}}}}}}}}}}, "m": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "g": {"docs": {"pyerrors.obs.CObs.imag": {"tf": 1}}, "df": 1}}}}, "n": {"docs": {"pyerrors.obs.Obs.N": {"tf": 1}}, "df": 1, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "_": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "x": {"docs": {"pyerrors.npr.Npr_matrix": {"tf": 1}, "pyerrors.npr.Npr_matrix.__init__": {"tf": 1}, "pyerrors.npr.Npr_matrix.g5H": {"tf": 1}}, "df": 3}}}}}}}}}, "_": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "a": {"docs": {"pyerrors.obs.Obs.N_sigma": {"tf": 1}}, "df": 1, "_": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "b": {"docs": {"pyerrors.obs.Obs.N_sigma_global": {"tf": 1}}, "df": 1}}}}}}}}}}}, "a": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {"pyerrors.obs.Obs.names": {"tf": 1}}, "df": 1}}}}, "z": {"docs": {}, "df": 0, "q": {"docs": {"pyerrors.npr.Zq": {"tf": 1}}, "df": 1}}, "v": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "u": {"docs": {"pyerrors.obs.Obs.value": {"tf": 1}}, "df": 1}}}}}}, "fullname": {"root": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "y": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {"pyerrors": {"tf": 1}, "pyerrors.correlators": {"tf": 1}, "pyerrors.correlators.Corr": {"tf": 1}, "pyerrors.correlators.Corr.__init__": {"tf": 1}, "pyerrors.correlators.Corr.reweighted": {"tf": 1}, "pyerrors.correlators.Corr.gamma_method": {"tf": 1}, "pyerrors.correlators.Corr.projected": {"tf": 1}, "pyerrors.correlators.Corr.sum": {"tf": 1}, "pyerrors.correlators.Corr.smearing": {"tf": 1}, "pyerrors.correlators.Corr.plottable": {"tf": 1}, "pyerrors.correlators.Corr.symmetric": {"tf": 1}, "pyerrors.correlators.Corr.anti_symmetric": {"tf": 1}, "pyerrors.correlators.Corr.smearing_symmetric": {"tf": 1}, "pyerrors.correlators.Corr.GEVP": {"tf": 1}, "pyerrors.correlators.Corr.Eigenvalue": {"tf": 1}, "pyerrors.correlators.Corr.roll": {"tf": 1}, "pyerrors.correlators.Corr.reverse": {"tf": 1}, "pyerrors.correlators.Corr.correlate": {"tf": 1}, "pyerrors.correlators.Corr.reweight": {"tf": 1}, "pyerrors.correlators.Corr.T_symmetry": {"tf": 1}, "pyerrors.correlators.Corr.deriv": {"tf": 1}, "pyerrors.correlators.Corr.second_deriv": {"tf": 1}, "pyerrors.correlators.Corr.m_eff": {"tf": 1}, "pyerrors.correlators.Corr.fit": {"tf": 1}, "pyerrors.correlators.Corr.plateau": {"tf": 1}, "pyerrors.correlators.Corr.set_prange": {"tf": 1}, "pyerrors.correlators.Corr.show": {"tf": 1}, "pyerrors.correlators.Corr.dump": {"tf": 1}, "pyerrors.correlators.Corr.print": {"tf": 1}, "pyerrors.correlators.Corr.sqrt": {"tf": 1}, "pyerrors.correlators.Corr.log": {"tf": 1}, "pyerrors.correlators.Corr.exp": {"tf": 1}, "pyerrors.correlators.Corr.sin": {"tf": 1}, "pyerrors.correlators.Corr.cos": {"tf": 1}, "pyerrors.correlators.Corr.tan": {"tf": 1}, "pyerrors.correlators.Corr.sinh": {"tf": 1}, "pyerrors.correlators.Corr.cosh": {"tf": 1}, "pyerrors.correlators.Corr.tanh": {"tf": 1}, "pyerrors.correlators.Corr.arcsin": {"tf": 1}, "pyerrors.correlators.Corr.arccos": {"tf": 1}, "pyerrors.correlators.Corr.arctan": {"tf": 1}, "pyerrors.correlators.Corr.arcsinh": {"tf": 1}, "pyerrors.correlators.Corr.arccosh": {"tf": 1}, "pyerrors.correlators.Corr.arctanh": {"tf": 1}, "pyerrors.dirac": {"tf": 1}, "pyerrors.dirac.Grid_gamma": {"tf": 1}, "pyerrors.fits": {"tf": 1}, "pyerrors.fits.Fit_result": {"tf": 1}, "pyerrors.fits.Fit_result.__init__": {"tf": 1}, "pyerrors.fits.Fit_result.gamma_method": {"tf": 1}, "pyerrors.fits.least_squares": {"tf": 1}, "pyerrors.fits.standard_fit": {"tf": 1}, "pyerrors.fits.odr_fit": {"tf": 1}, "pyerrors.fits.total_least_squares": {"tf": 1}, "pyerrors.fits.prior_fit": {"tf": 1}, "pyerrors.fits.fit_lin": {"tf": 1}, "pyerrors.fits.qqplot": {"tf": 1}, "pyerrors.fits.residual_plot": {"tf": 1}, "pyerrors.fits.covariance_matrix": {"tf": 1}, "pyerrors.fits.error_band": {"tf": 1}, "pyerrors.fits.ks_test": {"tf": 1}, "pyerrors.fits.fit_general": {"tf": 1}, "pyerrors.input": {"tf": 1}, "pyerrors.input.bdio": {"tf": 1}, "pyerrors.input.bdio.read_ADerrors": {"tf": 1}, "pyerrors.input.bdio.write_ADerrors": {"tf": 1}, "pyerrors.input.bdio.read_mesons": {"tf": 1}, "pyerrors.input.bdio.read_dSdm": {"tf": 1}, "pyerrors.input.hadrons": {"tf": 1}, "pyerrors.input.hadrons.read_meson_hd5": {"tf": 1}, "pyerrors.input.hadrons.read_ExternalLeg_hd5": {"tf": 1}, "pyerrors.input.hadrons.read_Bilinear_hd5": {"tf": 1}, "pyerrors.input.misc": {"tf": 1}, "pyerrors.input.misc.read_pbp": {"tf": 1}, "pyerrors.input.openQCD": {"tf": 1}, "pyerrors.input.openQCD.read_rwms": {"tf": 1}, "pyerrors.input.openQCD.extract_t0": {"tf": 1}, "pyerrors.input.sfcf": {"tf": 1}, "pyerrors.input.sfcf.read_sfcf": {"tf": 1}, "pyerrors.input.sfcf.read_sfcf_c": {"tf": 1}, "pyerrors.input.sfcf.read_qtop": {"tf": 1}, "pyerrors.jackknifing": {"tf": 1}, "pyerrors.jackknifing.Jack": {"tf": 1}, "pyerrors.jackknifing.Jack.__init__": {"tf": 1}, "pyerrors.jackknifing.Jack.print": {"tf": 1}, "pyerrors.jackknifing.Jack.plot_tauint": {"tf": 1}, "pyerrors.jackknifing.Jack.plot_history": {"tf": 1}, "pyerrors.jackknifing.Jack.dump": {"tf": 1}, "pyerrors.jackknifing.generate_jack": {"tf": 1}, "pyerrors.jackknifing.derived_jack": {"tf": 1}, "pyerrors.linalg": {"tf": 1}, "pyerrors.linalg.derived_array": {"tf": 1}, "pyerrors.linalg.matmul": {"tf": 1}, "pyerrors.linalg.inv": {"tf": 1}, "pyerrors.linalg.cholesky": {"tf": 1}, "pyerrors.linalg.scalar_mat_op": {"tf": 1}, "pyerrors.linalg.eigh": {"tf": 1}, "pyerrors.linalg.eig": {"tf": 1}, "pyerrors.linalg.pinv": {"tf": 1}, "pyerrors.linalg.svd": {"tf": 1}, "pyerrors.linalg.slogdet": {"tf": 1}, "pyerrors.linalg.grad_eig": {"tf": 1}, "pyerrors.misc": {"tf": 1}, "pyerrors.misc.gen_correlated_data": {"tf": 1}, "pyerrors.mpm": {"tf": 1}, "pyerrors.mpm.matrix_pencil_method": {"tf": 1}, "pyerrors.mpm.matrix_pencil_method_old": {"tf": 1}, "pyerrors.npr": {"tf": 1}, "pyerrors.npr.Npr_matrix": {"tf": 1}, "pyerrors.npr.Npr_matrix.__init__": {"tf": 1}, "pyerrors.npr.Npr_matrix.g5H": {"tf": 1}, "pyerrors.npr.inv_propagator": {"tf": 1}, "pyerrors.npr.Zq": {"tf": 1}, "pyerrors.obs": {"tf": 1}, "pyerrors.obs.Obs": {"tf": 1}, "pyerrors.obs.Obs.__init__": {"tf": 1}, "pyerrors.obs.Obs.S_global": {"tf": 1}, "pyerrors.obs.Obs.S_dict": {"tf": 1}, "pyerrors.obs.Obs.tau_exp_global": {"tf": 1}, "pyerrors.obs.Obs.tau_exp_dict": {"tf": 1}, "pyerrors.obs.Obs.N_sigma_global": {"tf": 1}, "pyerrors.obs.Obs.filter_eps": {"tf": 1}, "pyerrors.obs.Obs.names": {"tf": 1}, "pyerrors.obs.Obs.shape": {"tf": 1}, "pyerrors.obs.Obs.r_values": {"tf": 1}, "pyerrors.obs.Obs.deltas": {"tf": 1}, "pyerrors.obs.Obs.idl": {"tf": 1}, "pyerrors.obs.Obs.is_merged": {"tf": 1}, "pyerrors.obs.Obs.N": {"tf": 1}, "pyerrors.obs.Obs.ddvalue": {"tf": 1}, "pyerrors.obs.Obs.reweighted": {"tf": 1}, "pyerrors.obs.Obs.tag": {"tf": 1}, "pyerrors.obs.Obs.value": {"tf": 1}, "pyerrors.obs.Obs.dvalue": {"tf": 1}, "pyerrors.obs.Obs.e_names": {"tf": 1}, "pyerrors.obs.Obs.e_content": {"tf": 1}, "pyerrors.obs.Obs.expand_deltas": {"tf": 1}, "pyerrors.obs.Obs.calc_gamma": {"tf": 1}, "pyerrors.obs.Obs.gamma_method": {"tf": 1}, "pyerrors.obs.Obs.print": {"tf": 1}, "pyerrors.obs.Obs.details": {"tf": 1}, "pyerrors.obs.Obs.is_zero_within_error": {"tf": 1}, "pyerrors.obs.Obs.is_zero": {"tf": 1}, "pyerrors.obs.Obs.plot_tauint": {"tf": 1}, "pyerrors.obs.Obs.plot_rho": {"tf": 1}, "pyerrors.obs.Obs.plot_rep_dist": {"tf": 1}, "pyerrors.obs.Obs.plot_history": {"tf": 1}, "pyerrors.obs.Obs.plot_piechart": {"tf": 1}, "pyerrors.obs.Obs.dump": {"tf": 1}, "pyerrors.obs.Obs.sqrt": {"tf": 1}, "pyerrors.obs.Obs.log": {"tf": 1}, "pyerrors.obs.Obs.exp": {"tf": 1}, "pyerrors.obs.Obs.sin": {"tf": 1}, "pyerrors.obs.Obs.cos": {"tf": 1}, "pyerrors.obs.Obs.tan": {"tf": 1}, "pyerrors.obs.Obs.arcsin": {"tf": 1}, "pyerrors.obs.Obs.arccos": {"tf": 1}, "pyerrors.obs.Obs.arctan": {"tf": 1}, "pyerrors.obs.Obs.sinh": {"tf": 1}, "pyerrors.obs.Obs.cosh": {"tf": 1}, "pyerrors.obs.Obs.tanh": {"tf": 1}, "pyerrors.obs.Obs.arcsinh": {"tf": 1}, "pyerrors.obs.Obs.arccosh": {"tf": 1}, "pyerrors.obs.Obs.arctanh": {"tf": 1}, "pyerrors.obs.Obs.sinc": {"tf": 1}, "pyerrors.obs.Obs.N_sigma": {"tf": 1}, "pyerrors.obs.Obs.S": {"tf": 1}, "pyerrors.obs.Obs.e_ddvalue": {"tf": 1}, "pyerrors.obs.Obs.e_drho": {"tf": 1}, "pyerrors.obs.Obs.e_dtauint": {"tf": 1}, "pyerrors.obs.Obs.e_dvalue": {"tf": 1}, "pyerrors.obs.Obs.e_n_dtauint": {"tf": 1}, "pyerrors.obs.Obs.e_n_tauint": {"tf": 1}, "pyerrors.obs.Obs.e_rho": {"tf": 1}, "pyerrors.obs.Obs.e_tauint": {"tf": 1}, "pyerrors.obs.Obs.e_windowsize": {"tf": 1}, "pyerrors.obs.Obs.tau_exp": {"tf": 1}, "pyerrors.obs.CObs": {"tf": 1}, "pyerrors.obs.CObs.__init__": {"tf": 1}, "pyerrors.obs.CObs.tag": {"tf": 1}, "pyerrors.obs.CObs.real": {"tf": 1}, "pyerrors.obs.CObs.imag": {"tf": 1}, "pyerrors.obs.CObs.gamma_method": {"tf": 1}, "pyerrors.obs.CObs.is_zero": {"tf": 1}, "pyerrors.obs.CObs.conjugate": {"tf": 1}, "pyerrors.obs.merge_idx": {"tf": 1}, "pyerrors.obs.expand_deltas_for_merge": {"tf": 1}, "pyerrors.obs.filter_zeroes": {"tf": 1}, "pyerrors.obs.derived_observable": {"tf": 1}, "pyerrors.obs.reduce_deltas": {"tf": 1}, "pyerrors.obs.reweight": {"tf": 1}, "pyerrors.obs.correlate": {"tf": 1}, "pyerrors.obs.covariance": {"tf": 1}, "pyerrors.obs.covariance2": {"tf": 1}, "pyerrors.obs.covariance3": {"tf": 1}, "pyerrors.obs.pseudo_Obs": {"tf": 1}, "pyerrors.obs.dump_object": {"tf": 1}, "pyerrors.obs.load_object": {"tf": 1}, "pyerrors.obs.merge_obs": {"tf": 1}, "pyerrors.roots": {"tf": 1}, "pyerrors.roots.find_root": {"tf": 1}, "pyerrors.version": {"tf": 1}}, "df": 202}}}}}}, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "j": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {"pyerrors.correlators.Corr.projected": {"tf": 1}}, "df": 1}}}}}, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"pyerrors.correlators.Corr.print": {"tf": 1}, "pyerrors.jackknifing.Jack.print": {"tf": 1}, "pyerrors.obs.Obs.print": {"tf": 1}}, "df": 3}}, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "_": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {"pyerrors.fits.prior_fit": {"tf": 1}}, "df": 1}}}}}}}}, "l": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "l": {"docs": {"pyerrors.correlators.Corr.plottable": {"tf": 1}}, "df": 1}}}}, "_": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"pyerrors.jackknifing.Jack.plot_tauint": {"tf": 1}, "pyerrors.obs.Obs.plot_tauint": {"tf": 1}}, "df": 2}}}}}}, "h": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "i": {"docs": {"pyerrors.jackknifing.Jack.plot_history": {"tf": 1}, "pyerrors.obs.Obs.plot_history": {"tf": 1}}, "df": 2}}}}}}}, "r": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "o": {"docs": {"pyerrors.obs.Obs.plot_rho": {"tf": 1}}, "df": 1}}, "e": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "_": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {"pyerrors.obs.Obs.plot_rep_dist": {"tf": 1}}, "df": 1}}}}}}}}, "p": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {"pyerrors.obs.Obs.plot_piechart": {"tf": 1}}, "df": 1}}}}}}}}}}}, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "u": {"docs": {"pyerrors.correlators.Corr.plateau": {"tf": 1}}, "df": 1}}}}}}, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "v": {"docs": {"pyerrors.linalg.pinv": {"tf": 1}}, "df": 1}}}, "s": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "_": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "b": {"docs": {"pyerrors.obs.pseudo_Obs": {"tf": 1}}, "df": 1}}}}}}}}}, "c": {"docs": {}, "df": 0, "o": {"docs": {"pyerrors.correlators.Corr.cos": {"tf": 1}, "pyerrors.obs.Obs.cos": {"tf": 1}}, "df": 2, "r": {"docs": {}, "df": 0, "r": {"docs": {"pyerrors.correlators.Corr": {"tf": 1}, "pyerrors.correlators.Corr.__init__": {"tf": 1}, "pyerrors.correlators.Corr.reweighted": {"tf": 1}, "pyerrors.correlators.Corr.gamma_method": {"tf": 1}, "pyerrors.correlators.Corr.projected": {"tf": 1}, "pyerrors.correlators.Corr.sum": {"tf": 1}, "pyerrors.correlators.Corr.smearing": {"tf": 1}, "pyerrors.correlators.Corr.plottable": {"tf": 1}, "pyerrors.correlators.Corr.symmetric": {"tf": 1}, "pyerrors.correlators.Corr.anti_symmetric": {"tf": 1}, "pyerrors.correlators.Corr.smearing_symmetric": {"tf": 1}, "pyerrors.correlators.Corr.GEVP": {"tf": 1}, "pyerrors.correlators.Corr.Eigenvalue": {"tf": 1}, "pyerrors.correlators.Corr.roll": {"tf": 1}, "pyerrors.correlators.Corr.reverse": {"tf": 1}, "pyerrors.correlators.Corr.correlate": {"tf": 1}, "pyerrors.correlators.Corr.reweight": {"tf": 1}, "pyerrors.correlators.Corr.T_symmetry": {"tf": 1}, "pyerrors.correlators.Corr.deriv": {"tf": 1}, "pyerrors.correlators.Corr.second_deriv": {"tf": 1}, "pyerrors.correlators.Corr.m_eff": {"tf": 1}, "pyerrors.correlators.Corr.fit": {"tf": 1}, "pyerrors.correlators.Corr.plateau": {"tf": 1}, "pyerrors.correlators.Corr.set_prange": {"tf": 1}, "pyerrors.correlators.Corr.show": {"tf": 1}, "pyerrors.correlators.Corr.dump": {"tf": 1}, "pyerrors.correlators.Corr.print": {"tf": 1}, "pyerrors.correlators.Corr.sqrt": {"tf": 1}, "pyerrors.correlators.Corr.log": {"tf": 1}, "pyerrors.correlators.Corr.exp": {"tf": 1}, "pyerrors.correlators.Corr.sin": {"tf": 1}, "pyerrors.correlators.Corr.cos": {"tf": 1}, "pyerrors.correlators.Corr.tan": {"tf": 1}, "pyerrors.correlators.Corr.sinh": {"tf": 1}, "pyerrors.correlators.Corr.cosh": {"tf": 1}, "pyerrors.correlators.Corr.tanh": {"tf": 1}, "pyerrors.correlators.Corr.arcsin": {"tf": 1}, "pyerrors.correlators.Corr.arccos": {"tf": 1}, "pyerrors.correlators.Corr.arctan": {"tf": 1}, "pyerrors.correlators.Corr.arcsinh": {"tf": 1}, "pyerrors.correlators.Corr.arccosh": {"tf": 1}, "pyerrors.correlators.Corr.arctanh": {"tf": 1}}, "df": 42, "e": {"docs": {}, "df": 0, "l": {"docs": {"pyerrors.correlators": {"tf": 1}, "pyerrors.correlators.Corr": {"tf": 1}, "pyerrors.correlators.Corr.__init__": {"tf": 1}, "pyerrors.correlators.Corr.reweighted": {"tf": 1}, "pyerrors.correlators.Corr.gamma_method": {"tf": 1}, "pyerrors.correlators.Corr.projected": {"tf": 1}, "pyerrors.correlators.Corr.sum": {"tf": 1}, "pyerrors.correlators.Corr.smearing": {"tf": 1}, "pyerrors.correlators.Corr.plottable": {"tf": 1}, "pyerrors.correlators.Corr.symmetric": {"tf": 1}, "pyerrors.correlators.Corr.anti_symmetric": {"tf": 1}, "pyerrors.correlators.Corr.smearing_symmetric": {"tf": 1}, "pyerrors.correlators.Corr.GEVP": {"tf": 1}, "pyerrors.correlators.Corr.Eigenvalue": {"tf": 1}, "pyerrors.correlators.Corr.roll": {"tf": 1}, "pyerrors.correlators.Corr.reverse": {"tf": 1}, "pyerrors.correlators.Corr.correlate": {"tf": 1.4142135623730951}, "pyerrors.correlators.Corr.reweight": {"tf": 1}, "pyerrors.correlators.Corr.T_symmetry": {"tf": 1}, "pyerrors.correlators.Corr.deriv": {"tf": 1}, "pyerrors.correlators.Corr.second_deriv": {"tf": 1}, "pyerrors.correlators.Corr.m_eff": {"tf": 1}, "pyerrors.correlators.Corr.fit": {"tf": 1}, "pyerrors.correlators.Corr.plateau": {"tf": 1}, "pyerrors.correlators.Corr.set_prange": {"tf": 1}, "pyerrors.correlators.Corr.show": {"tf": 1}, "pyerrors.correlators.Corr.dump": {"tf": 1}, "pyerrors.correlators.Corr.print": {"tf": 1}, "pyerrors.correlators.Corr.sqrt": {"tf": 1}, "pyerrors.correlators.Corr.log": {"tf": 1}, "pyerrors.correlators.Corr.exp": {"tf": 1}, "pyerrors.correlators.Corr.sin": {"tf": 1}, "pyerrors.correlators.Corr.cos": {"tf": 1}, "pyerrors.correlators.Corr.tan": {"tf": 1}, "pyerrors.correlators.Corr.sinh": {"tf": 1}, "pyerrors.correlators.Corr.cosh": {"tf": 1}, "pyerrors.correlators.Corr.tanh": {"tf": 1}, "pyerrors.correlators.Corr.arcsin": {"tf": 1}, "pyerrors.correlators.Corr.arccos": {"tf": 1}, "pyerrors.correlators.Corr.arctan": {"tf": 1}, "pyerrors.correlators.Corr.arcsinh": {"tf": 1}, "pyerrors.correlators.Corr.arccosh": {"tf": 1}, "pyerrors.correlators.Corr.arctanh": {"tf": 1}, "pyerrors.obs.correlate": {"tf": 1}}, "df": 44}}}}, "s": {"docs": {}, "df": 0, "h": {"docs": {"pyerrors.correlators.Corr.cosh": {"tf": 1}, "pyerrors.obs.Obs.cosh": {"tf": 1}}, "df": 2}}, "v": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "i": {"docs": {"pyerrors.obs.covariance": {"tf": 1}}, "df": 1, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "e": {"2": {"docs": {"pyerrors.obs.covariance2": {"tf": 1}}, "df": 1}, "3": {"docs": {"pyerrors.obs.covariance3": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0, "_": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "x": {"docs": {"pyerrors.fits.covariance_matrix": {"tf": 1}}, "df": 1}}}}}}}}}}}}}}}, "b": {"docs": {"pyerrors.obs.CObs": {"tf": 1}, "pyerrors.obs.CObs.__init__": {"tf": 1}, "pyerrors.obs.CObs.tag": {"tf": 1}, "pyerrors.obs.CObs.real": {"tf": 1}, "pyerrors.obs.CObs.imag": {"tf": 1}, "pyerrors.obs.CObs.gamma_method": {"tf": 1}, "pyerrors.obs.CObs.is_zero": {"tf": 1}, "pyerrors.obs.CObs.conjugate": {"tf": 1}}, "df": 8}, "n": {"docs": {}, "df": 0, "j": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "g": {"docs": {"pyerrors.obs.CObs.conjugate": {"tf": 1}}, "df": 1}}}}}, "h": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "k": {"docs": {}, "df": 0, "i": {"docs": {"pyerrors.linalg.cholesky": {"tf": 1}}, "df": 1}}}}}}}, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "_": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "a": {"docs": {"pyerrors.obs.Obs.calc_gamma": {"tf": 1}}, "df": 1}}}}}}}}}}, "_": {"docs": {}, "df": 0, "_": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "_": {"docs": {}, "df": 0, "_": {"docs": {"pyerrors.correlators.Corr.__init__": {"tf": 1}, "pyerrors.fits.Fit_result.__init__": {"tf": 1}, "pyerrors.jackknifing.Jack.__init__": {"tf": 1}, "pyerrors.npr.Npr_matrix.__init__": {"tf": 1}, "pyerrors.obs.Obs.__init__": {"tf": 1}, "pyerrors.obs.CObs.__init__": {"tf": 1}}, "df": 6}}}}}}}}, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "w": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "t": {"docs": {"pyerrors.correlators.Corr.reweighted": {"tf": 1}, "pyerrors.correlators.Corr.reweight": {"tf": 1}, "pyerrors.obs.Obs.reweighted": {"tf": 1}, "pyerrors.obs.reweight": {"tf": 1}}, "df": 4}}}}}}, "v": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "s": {"docs": {"pyerrors.correlators.Corr.reverse": {"tf": 1}}, "df": 1}}}}, "s": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "_": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "t": {"docs": {"pyerrors.fits.residual_plot": {"tf": 1}}, "df": 1}}}}}}}}}}}, "a": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "_": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {"pyerrors.input.bdio.read_ADerrors": {"tf": 1}}, "df": 1}}}}}}}, "m": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"pyerrors.input.bdio.read_mesons": {"tf": 1}}, "df": 1, "_": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "d": {"5": {"docs": {"pyerrors.input.hadrons.read_meson_hd5": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}}}}}}}}, "d": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "m": {"docs": {"pyerrors.input.bdio.read_dSdm": {"tf": 1}}, "df": 1}}}}, "e": {"docs": {}, "df": 0, "x": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "_": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "d": {"5": {"docs": {"pyerrors.input.hadrons.read_ExternalLeg_hd5": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}}}}}}}}}}}}}}, "b": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "_": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "d": {"5": {"docs": {"pyerrors.input.hadrons.read_Bilinear_hd5": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}}}}}}}}}}}, "p": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "p": {"docs": {"pyerrors.input.misc.read_pbp": {"tf": 1}}, "df": 1}}}, "r": {"docs": {}, "df": 0, "w": {"docs": {}, "df": 0, "m": {"docs": {"pyerrors.input.openQCD.read_rwms": {"tf": 1}}, "df": 1}}}, "s": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "f": {"docs": {"pyerrors.input.sfcf.read_sfcf": {"tf": 1}}, "df": 1, "_": {"docs": {}, "df": 0, "c": {"docs": {"pyerrors.input.sfcf.read_sfcf_c": {"tf": 1}}, "df": 1}}}}}}, "q": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "p": {"docs": {"pyerrors.input.sfcf.read_qtop": {"tf": 1}}, "df": 1}}}}}}, "l": {"docs": {"pyerrors.obs.CObs.real": {"tf": 1}}, "df": 1}}, "d": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "_": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "a": {"docs": {"pyerrors.obs.reduce_deltas": {"tf": 1}}, "df": 1}}}}}}}}}}}, "o": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "l": {"docs": {"pyerrors.correlators.Corr.roll": {"tf": 1}}, "df": 1}}, "o": {"docs": {}, "df": 0, "t": {"docs": {"pyerrors.roots": {"tf": 1}, "pyerrors.roots.find_root": {"tf": 1}}, "df": 2}}}, "_": {"docs": {}, "df": 0, "v": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "u": {"docs": {"pyerrors.obs.Obs.r_values": {"tf": 1}}, "df": 1}}}}}}, "g": {"5": {"docs": {}, "df": 0, "h": {"docs": {"pyerrors.npr.Npr_matrix.g5H": {"tf": 1}}, "df": 1}}, "docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "_": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "d": {"docs": {"pyerrors.correlators.Corr.gamma_method": {"tf": 1}, "pyerrors.fits.Fit_result.gamma_method": {"tf": 1}, "pyerrors.obs.Obs.gamma_method": {"tf": 1}, "pyerrors.obs.CObs.gamma_method": {"tf": 1}}, "df": 4}}}}}}}}}}}, "e": {"docs": {}, "df": 0, "v": {"docs": {}, "df": 0, "p": {"docs": {"pyerrors.correlators.Corr.GEVP": {"tf": 1}}, "df": 1}}, "n": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "_": {"docs": {}, "df": 0, "j": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "k": {"docs": {"pyerrors.jackknifing.generate_jack": {"tf": 1}}, "df": 1}}}}}}}}}}, "_": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "_": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "a": {"docs": {"pyerrors.misc.gen_correlated_data": {"tf": 1}}, "df": 1}}}}}}}}}}}}}}}}}}, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "_": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "a": {"docs": {"pyerrors.dirac.Grid_gamma": {"tf": 1}}, "df": 1}}}}}}}}, "a": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "_": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "g": {"docs": {"pyerrors.linalg.grad_eig": {"tf": 1}}, "df": 1}}}}}}}}, "s": {"docs": {"pyerrors.obs.Obs.S": {"tf": 1}}, "df": 1, "u": {"docs": {}, "df": 0, "m": {"docs": {"pyerrors.correlators.Corr.sum": {"tf": 1}}, "df": 1}}, "m": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {"pyerrors.correlators.Corr.smearing": {"tf": 1}}, "df": 1, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "_": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "y": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {"pyerrors.correlators.Corr.smearing_symmetric": {"tf": 1}}, "df": 1}}}}}}}}}}}}}}}, "y": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {"pyerrors.correlators.Corr.symmetric": {"tf": 1}}, "df": 1}}}}}}, "e": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "_": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "v": {"docs": {"pyerrors.correlators.Corr.second_deriv": {"tf": 1}}, "df": 1}}}}}}}}}}, "t": {"docs": {}, "df": 0, "_": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"pyerrors.correlators.Corr.set_prange": {"tf": 1}}, "df": 1}}}}}}}}, "h": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "w": {"docs": {"pyerrors.correlators.Corr.show": {"tf": 1}}, "df": 1}}, "a": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {"pyerrors.obs.Obs.shape": {"tf": 1}}, "df": 1}}}}, "q": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {"pyerrors.correlators.Corr.sqrt": {"tf": 1}, "pyerrors.obs.Obs.sqrt": {"tf": 1}}, "df": 2}}}, "i": {"docs": {}, "df": 0, "n": {"docs": {"pyerrors.correlators.Corr.sin": {"tf": 1}, "pyerrors.obs.Obs.sin": {"tf": 1}}, "df": 2, "h": {"docs": {"pyerrors.correlators.Corr.sinh": {"tf": 1}, "pyerrors.obs.Obs.sinh": {"tf": 1}}, "df": 2}, "c": {"docs": {"pyerrors.obs.Obs.sinc": {"tf": 1}}, "df": 1}}}, "t": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "_": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {"pyerrors.fits.standard_fit": {"tf": 1}}, "df": 1}}}}}}}}}}}, "f": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "f": {"docs": {"pyerrors.input.sfcf": {"tf": 1}, "pyerrors.input.sfcf.read_sfcf": {"tf": 1}, "pyerrors.input.sfcf.read_sfcf_c": {"tf": 1}, "pyerrors.input.sfcf.read_qtop": {"tf": 1}}, "df": 4}}}, "c": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "_": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "_": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "p": {"docs": {"pyerrors.linalg.scalar_mat_op": {"tf": 1}}, "df": 1}}}}}}}}}}}}, "v": {"docs": {}, "df": 0, "d": {"docs": {"pyerrors.linalg.svd": {"tf": 1}}, "df": 1}}, "l": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "t": {"docs": {"pyerrors.linalg.slogdet": {"tf": 1}}, "df": 1}}}}}}, "_": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {"pyerrors.obs.Obs.S_global": {"tf": 1}}, "df": 1}}}}}}, "d": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {"pyerrors.obs.Obs.S_dict": {"tf": 1}}, "df": 1}}}}}}, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "_": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "y": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {"pyerrors.correlators.Corr.anti_symmetric": {"tf": 1}}, "df": 1}}}}}}}}}}}, "r": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {"pyerrors.correlators.Corr.arcsin": {"tf": 1}, "pyerrors.obs.Obs.arcsin": {"tf": 1}}, "df": 2, "h": {"docs": {"pyerrors.correlators.Corr.arcsinh": {"tf": 1}, "pyerrors.obs.Obs.arcsinh": {"tf": 1}}, "df": 2}}}}, "c": {"docs": {}, "df": 0, "o": {"docs": {"pyerrors.correlators.Corr.arccos": {"tf": 1}, "pyerrors.obs.Obs.arccos": {"tf": 1}}, "df": 2, "s": {"docs": {}, "df": 0, "h": {"docs": {"pyerrors.correlators.Corr.arccosh": {"tf": 1}, "pyerrors.obs.Obs.arccosh": {"tf": 1}}, "df": 2}}}}, "t": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "n": {"docs": {"pyerrors.correlators.Corr.arctan": {"tf": 1}, "pyerrors.obs.Obs.arctan": {"tf": 1}}, "df": 2, "h": {"docs": {"pyerrors.correlators.Corr.arctanh": {"tf": 1}, "pyerrors.obs.Obs.arctanh": {"tf": 1}}, "df": 2}}}}}}}, "e": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "g": {"docs": {"pyerrors.linalg.eig": {"tf": 1}}, "df": 1, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "v": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "u": {"docs": {"pyerrors.correlators.Corr.Eigenvalue": {"tf": 1}}, "df": 1}}}}}}, "h": {"docs": {"pyerrors.linalg.eigh": {"tf": 1}}, "df": 1}}}, "x": {"docs": {}, "df": 0, "p": {"docs": {"pyerrors.correlators.Corr.exp": {"tf": 1}, "pyerrors.obs.Obs.exp": {"tf": 1}}, "df": 2, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "_": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "a": {"docs": {"pyerrors.obs.Obs.expand_deltas": {"tf": 1}}, "df": 1, "s": {"docs": {}, "df": 0, "_": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "_": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "g": {"docs": {"pyerrors.obs.expand_deltas_for_merge": {"tf": 1}}, "df": 1}}}}}}}}}}}}}}}}}}}}, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "_": {"docs": {}, "df": 0, "t": {"0": {"docs": {"pyerrors.input.openQCD.extract_t0": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}}}}}}}}, "r": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "_": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "d": {"docs": {"pyerrors.fits.error_band": {"tf": 1}}, "df": 1}}}}}}}}}, "_": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "m": {"docs": {"pyerrors.obs.Obs.e_names": {"tf": 1}}, "df": 1}}, "_": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"pyerrors.obs.Obs.e_n_dtauint": {"tf": 1}}, "df": 1}}}}}}}, "t": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"pyerrors.obs.Obs.e_n_tauint": {"tf": 1}}, "df": 1}}}}}}}}, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"pyerrors.obs.Obs.e_content": {"tf": 1}}, "df": 1}}}}, "d": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "v": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "u": {"docs": {"pyerrors.obs.Obs.e_ddvalue": {"tf": 1}}, "df": 1}}}}}, "r": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "o": {"docs": {"pyerrors.obs.Obs.e_drho": {"tf": 1}}, "df": 1}}}, "t": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"pyerrors.obs.Obs.e_dtauint": {"tf": 1}}, "df": 1}}}}}}, "v": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "u": {"docs": {"pyerrors.obs.Obs.e_dvalue": {"tf": 1}}, "df": 1}}}}}, "r": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "o": {"docs": {"pyerrors.obs.Obs.e_rho": {"tf": 1}}, "df": 1}}}, "t": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"pyerrors.obs.Obs.e_tauint": {"tf": 1}}, "df": 1}}}}}}, "w": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "w": {"docs": {}, "df": 0, "s": {"docs": {"pyerrors.obs.Obs.e_windowsize": {"tf": 1}}, "df": 1}}}}}}}}}, "t": {"docs": {}, "df": 0, "_": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "y": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "i": {"docs": {"pyerrors.correlators.Corr.T_symmetry": {"tf": 1}}, "df": 1}}}}}}}}}, "a": {"docs": {}, "df": 0, "n": {"docs": {"pyerrors.correlators.Corr.tan": {"tf": 1}, "pyerrors.obs.Obs.tan": {"tf": 1}}, "df": 2, "h": {"docs": {"pyerrors.correlators.Corr.tanh": {"tf": 1}, "pyerrors.obs.Obs.tanh": {"tf": 1}}, "df": 2}}, "u": {"docs": {}, "df": 0, "_": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "x": {"docs": {}, "df": 0, "p": {"docs": {"pyerrors.obs.Obs.tau_exp": {"tf": 1}}, "df": 1, "_": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "b": {"docs": {"pyerrors.obs.Obs.tau_exp_global": {"tf": 1}}, "df": 1}}}}, "d": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {"pyerrors.obs.Obs.tau_exp_dict": {"tf": 1}}, "df": 1}}}}}}}}}}, "g": {"docs": {"pyerrors.obs.Obs.tag": {"tf": 1}, "pyerrors.obs.CObs.tag": {"tf": 1}}, "df": 2}}, "o": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "_": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "_": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "q": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {"pyerrors.fits.total_least_squares": {"tf": 1}}, "df": 1}}}}}}}}}}}}}}}}}, "d": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "v": {"docs": {"pyerrors.correlators.Corr.deriv": {"tf": 1}}, "df": 1, "e": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "_": {"docs": {}, "df": 0, "j": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "k": {"docs": {"pyerrors.jackknifing.derived_jack": {"tf": 1}}, "df": 1}}}}, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "y": {"docs": {"pyerrors.linalg.derived_array": {"tf": 1}}, "df": 1}}}}}, "o": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "v": {"docs": {"pyerrors.obs.derived_observable": {"tf": 1}}, "df": 1}}}}}}}}}}}}, "l": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "a": {"docs": {"pyerrors.obs.Obs.deltas": {"tf": 1}}, "df": 1}}}, "t": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "l": {"docs": {"pyerrors.obs.Obs.details": {"tf": 1}}, "df": 1}}}}}, "u": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "p": {"docs": {"pyerrors.correlators.Corr.dump": {"tf": 1}, "pyerrors.jackknifing.Jack.dump": {"tf": 1}, "pyerrors.obs.Obs.dump": {"tf": 1}}, "df": 3, "_": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "j": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {"pyerrors.obs.dump_object": {"tf": 1}}, "df": 1}}}}}}}}}}, "i": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "c": {"docs": {"pyerrors.dirac": {"tf": 1}, "pyerrors.dirac.Grid_gamma": {"tf": 1}}, "df": 2}}}}, "d": {"docs": {}, "df": 0, "v": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "u": {"docs": {"pyerrors.obs.Obs.ddvalue": {"tf": 1}}, "df": 1}}}}}, "v": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "u": {"docs": {"pyerrors.obs.Obs.dvalue": {"tf": 1}}, "df": 1}}}}}, "m": {"docs": {}, "df": 0, "_": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "f": {"docs": {"pyerrors.correlators.Corr.m_eff": {"tf": 1}}, "df": 1}}}}, "i": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "c": {"docs": {"pyerrors.input.misc": {"tf": 1}, "pyerrors.input.misc.read_pbp": {"tf": 1}, "pyerrors.misc": {"tf": 1}, "pyerrors.misc.gen_correlated_data": {"tf": 1}}, "df": 4}}}, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "l": {"docs": {"pyerrors.linalg.matmul": {"tf": 1}}, "df": 1}}}, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "x": {"docs": {}, "df": 0, "_": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "_": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "d": {"docs": {"pyerrors.mpm.matrix_pencil_method": {"tf": 1}}, "df": 1, "_": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "d": {"docs": {"pyerrors.mpm.matrix_pencil_method_old": {"tf": 1}}, "df": 1}}}}}}}}}}}}}}}}}}}}}}}, "p": {"docs": {}, "df": 0, "m": {"docs": {"pyerrors.mpm": {"tf": 1}, "pyerrors.mpm.matrix_pencil_method": {"tf": 1}, "pyerrors.mpm.matrix_pencil_method_old": {"tf": 1}}, "df": 3}}, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "_": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "x": {"docs": {"pyerrors.obs.merge_idx": {"tf": 1}}, "df": 1}}}, "o": {"docs": {}, "df": 0, "b": {"docs": {"pyerrors.obs.merge_obs": {"tf": 1}}, "df": 1}}}}}}}}, "f": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {"pyerrors.correlators.Corr.fit": {"tf": 1}, "pyerrors.fits": {"tf": 1}, "pyerrors.fits.Fit_result": {"tf": 1}, "pyerrors.fits.Fit_result.__init__": {"tf": 1}, "pyerrors.fits.Fit_result.gamma_method": {"tf": 1}, "pyerrors.fits.least_squares": {"tf": 1}, "pyerrors.fits.standard_fit": {"tf": 1}, "pyerrors.fits.odr_fit": {"tf": 1}, "pyerrors.fits.total_least_squares": {"tf": 1}, "pyerrors.fits.prior_fit": {"tf": 1}, "pyerrors.fits.fit_lin": {"tf": 1}, "pyerrors.fits.qqplot": {"tf": 1}, "pyerrors.fits.residual_plot": {"tf": 1}, "pyerrors.fits.covariance_matrix": {"tf": 1}, "pyerrors.fits.error_band": {"tf": 1}, "pyerrors.fits.ks_test": {"tf": 1}, "pyerrors.fits.fit_general": {"tf": 1}}, "df": 17, "_": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "t": {"docs": {"pyerrors.fits.Fit_result": {"tf": 1}, "pyerrors.fits.Fit_result.__init__": {"tf": 1}, "pyerrors.fits.Fit_result.gamma_method": {"tf": 1}}, "df": 3}}}}}}, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {"pyerrors.fits.fit_lin": {"tf": 1}}, "df": 1}}}, "g": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"pyerrors.fits.fit_general": {"tf": 1}}, "df": 1}}}}}}}, "l": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "_": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "p": {"docs": {"pyerrors.obs.Obs.filter_eps": {"tf": 1}}, "df": 1}}, "z": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {"pyerrors.obs.filter_zeroes": {"tf": 1}}, "df": 1}}}}}}}}}, "n": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "_": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "t": {"docs": {"pyerrors.roots.find_root": {"tf": 1}}, "df": 1}}}}}}}}}, "l": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "g": {"docs": {"pyerrors.correlators.Corr.log": {"tf": 1}, "pyerrors.obs.Obs.log": {"tf": 1}}, "df": 2}, "a": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "_": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "j": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {"pyerrors.obs.load_object": {"tf": 1}}, "df": 1}}}}}}}}}}, "e": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "_": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "q": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {"pyerrors.fits.least_squares": {"tf": 1}}, "df": 1}}}}}}}}}}, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "g": {"docs": {"pyerrors.linalg": {"tf": 1}, "pyerrors.linalg.derived_array": {"tf": 1}, "pyerrors.linalg.matmul": {"tf": 1}, "pyerrors.linalg.inv": {"tf": 1}, "pyerrors.linalg.cholesky": {"tf": 1}, "pyerrors.linalg.scalar_mat_op": {"tf": 1}, "pyerrors.linalg.eigh": {"tf": 1}, "pyerrors.linalg.eig": {"tf": 1}, "pyerrors.linalg.pinv": {"tf": 1}, "pyerrors.linalg.svd": {"tf": 1}, "pyerrors.linalg.slogdet": {"tf": 1}, "pyerrors.linalg.grad_eig": {"tf": 1}}, "df": 12}}}}}}, "o": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "_": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {"pyerrors.fits.odr_fit": {"tf": 1}}, "df": 1}}}}}}, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "q": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "d": {"docs": {"pyerrors.input.openQCD": {"tf": 1}, "pyerrors.input.openQCD.read_rwms": {"tf": 1}, "pyerrors.input.openQCD.extract_t0": {"tf": 1}}, "df": 3}}}}}}, "b": {"docs": {"pyerrors.obs": {"tf": 1}, "pyerrors.obs.Obs": {"tf": 1.4142135623730951}, "pyerrors.obs.Obs.__init__": {"tf": 1.4142135623730951}, "pyerrors.obs.Obs.S_global": {"tf": 1.4142135623730951}, "pyerrors.obs.Obs.S_dict": {"tf": 1.4142135623730951}, "pyerrors.obs.Obs.tau_exp_global": {"tf": 1.4142135623730951}, "pyerrors.obs.Obs.tau_exp_dict": {"tf": 1.4142135623730951}, "pyerrors.obs.Obs.N_sigma_global": {"tf": 1.4142135623730951}, "pyerrors.obs.Obs.filter_eps": {"tf": 1.4142135623730951}, "pyerrors.obs.Obs.names": {"tf": 1.4142135623730951}, "pyerrors.obs.Obs.shape": {"tf": 1.4142135623730951}, "pyerrors.obs.Obs.r_values": {"tf": 1.4142135623730951}, "pyerrors.obs.Obs.deltas": {"tf": 1.4142135623730951}, "pyerrors.obs.Obs.idl": {"tf": 1.4142135623730951}, "pyerrors.obs.Obs.is_merged": {"tf": 1.4142135623730951}, "pyerrors.obs.Obs.N": {"tf": 1.4142135623730951}, "pyerrors.obs.Obs.ddvalue": {"tf": 1.4142135623730951}, "pyerrors.obs.Obs.reweighted": {"tf": 1.4142135623730951}, "pyerrors.obs.Obs.tag": {"tf": 1.4142135623730951}, "pyerrors.obs.Obs.value": {"tf": 1.4142135623730951}, "pyerrors.obs.Obs.dvalue": {"tf": 1.4142135623730951}, "pyerrors.obs.Obs.e_names": {"tf": 1.4142135623730951}, "pyerrors.obs.Obs.e_content": {"tf": 1.4142135623730951}, "pyerrors.obs.Obs.expand_deltas": {"tf": 1.4142135623730951}, "pyerrors.obs.Obs.calc_gamma": {"tf": 1.4142135623730951}, "pyerrors.obs.Obs.gamma_method": {"tf": 1.4142135623730951}, "pyerrors.obs.Obs.print": {"tf": 1.4142135623730951}, "pyerrors.obs.Obs.details": {"tf": 1.4142135623730951}, "pyerrors.obs.Obs.is_zero_within_error": {"tf": 1.4142135623730951}, "pyerrors.obs.Obs.is_zero": {"tf": 1.4142135623730951}, "pyerrors.obs.Obs.plot_tauint": {"tf": 1.4142135623730951}, "pyerrors.obs.Obs.plot_rho": {"tf": 1.4142135623730951}, "pyerrors.obs.Obs.plot_rep_dist": {"tf": 1.4142135623730951}, "pyerrors.obs.Obs.plot_history": {"tf": 1.4142135623730951}, "pyerrors.obs.Obs.plot_piechart": {"tf": 1.4142135623730951}, "pyerrors.obs.Obs.dump": {"tf": 1.4142135623730951}, "pyerrors.obs.Obs.sqrt": {"tf": 1.4142135623730951}, "pyerrors.obs.Obs.log": {"tf": 1.4142135623730951}, "pyerrors.obs.Obs.exp": {"tf": 1.4142135623730951}, "pyerrors.obs.Obs.sin": {"tf": 1.4142135623730951}, "pyerrors.obs.Obs.cos": {"tf": 1.4142135623730951}, "pyerrors.obs.Obs.tan": {"tf": 1.4142135623730951}, "pyerrors.obs.Obs.arcsin": {"tf": 1.4142135623730951}, "pyerrors.obs.Obs.arccos": {"tf": 1.4142135623730951}, "pyerrors.obs.Obs.arctan": {"tf": 1.4142135623730951}, "pyerrors.obs.Obs.sinh": {"tf": 1.4142135623730951}, "pyerrors.obs.Obs.cosh": {"tf": 1.4142135623730951}, "pyerrors.obs.Obs.tanh": {"tf": 1.4142135623730951}, "pyerrors.obs.Obs.arcsinh": {"tf": 1.4142135623730951}, "pyerrors.obs.Obs.arccosh": {"tf": 1.4142135623730951}, "pyerrors.obs.Obs.arctanh": {"tf": 1.4142135623730951}, "pyerrors.obs.Obs.sinc": {"tf": 1.4142135623730951}, "pyerrors.obs.Obs.N_sigma": {"tf": 1.4142135623730951}, "pyerrors.obs.Obs.S": {"tf": 1.4142135623730951}, "pyerrors.obs.Obs.e_ddvalue": {"tf": 1.4142135623730951}, "pyerrors.obs.Obs.e_drho": {"tf": 1.4142135623730951}, "pyerrors.obs.Obs.e_dtauint": {"tf": 1.4142135623730951}, "pyerrors.obs.Obs.e_dvalue": {"tf": 1.4142135623730951}, "pyerrors.obs.Obs.e_n_dtauint": {"tf": 1.4142135623730951}, "pyerrors.obs.Obs.e_n_tauint": {"tf": 1.4142135623730951}, "pyerrors.obs.Obs.e_rho": {"tf": 1.4142135623730951}, "pyerrors.obs.Obs.e_tauint": {"tf": 1.4142135623730951}, "pyerrors.obs.Obs.e_windowsize": {"tf": 1.4142135623730951}, "pyerrors.obs.Obs.tau_exp": {"tf": 1.4142135623730951}, "pyerrors.obs.CObs": {"tf": 1}, "pyerrors.obs.CObs.__init__": {"tf": 1}, "pyerrors.obs.CObs.tag": {"tf": 1}, "pyerrors.obs.CObs.real": {"tf": 1}, "pyerrors.obs.CObs.imag": {"tf": 1}, "pyerrors.obs.CObs.gamma_method": {"tf": 1}, "pyerrors.obs.CObs.is_zero": {"tf": 1}, "pyerrors.obs.CObs.conjugate": {"tf": 1}, "pyerrors.obs.merge_idx": {"tf": 1}, "pyerrors.obs.expand_deltas_for_merge": {"tf": 1}, "pyerrors.obs.filter_zeroes": {"tf": 1}, "pyerrors.obs.derived_observable": {"tf": 1}, "pyerrors.obs.reduce_deltas": {"tf": 1}, "pyerrors.obs.reweight": {"tf": 1}, "pyerrors.obs.correlate": {"tf": 1}, "pyerrors.obs.covariance": {"tf": 1}, "pyerrors.obs.covariance2": {"tf": 1}, "pyerrors.obs.covariance3": {"tf": 1}, "pyerrors.obs.pseudo_Obs": {"tf": 1}, "pyerrors.obs.dump_object": {"tf": 1}, "pyerrors.obs.load_object": {"tf": 1}, "pyerrors.obs.merge_obs": {"tf": 1}}, "df": 86}}, "q": {"docs": {}, "df": 0, "q": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "t": {"docs": {"pyerrors.fits.qqplot": {"tf": 1}}, "df": 1}}}}}}, "k": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "_": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {"pyerrors.fits.ks_test": {"tf": 1}}, "df": 1}}}}}}}, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "t": {"docs": {"pyerrors.input": {"tf": 1}, "pyerrors.input.bdio": {"tf": 1}, "pyerrors.input.bdio.read_ADerrors": {"tf": 1}, "pyerrors.input.bdio.write_ADerrors": {"tf": 1}, "pyerrors.input.bdio.read_mesons": {"tf": 1}, "pyerrors.input.bdio.read_dSdm": {"tf": 1}, "pyerrors.input.hadrons": {"tf": 1}, "pyerrors.input.hadrons.read_meson_hd5": {"tf": 1}, "pyerrors.input.hadrons.read_ExternalLeg_hd5": {"tf": 1}, "pyerrors.input.hadrons.read_Bilinear_hd5": {"tf": 1}, "pyerrors.input.misc": {"tf": 1}, "pyerrors.input.misc.read_pbp": {"tf": 1}, "pyerrors.input.openQCD": {"tf": 1}, "pyerrors.input.openQCD.read_rwms": {"tf": 1}, "pyerrors.input.openQCD.extract_t0": {"tf": 1}, "pyerrors.input.sfcf": {"tf": 1}, "pyerrors.input.sfcf.read_sfcf": {"tf": 1}, "pyerrors.input.sfcf.read_sfcf_c": {"tf": 1}, "pyerrors.input.sfcf.read_qtop": {"tf": 1}}, "df": 19}}}, "v": {"docs": {"pyerrors.linalg.inv": {"tf": 1}}, "df": 1, "_": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "g": {"docs": {"pyerrors.npr.inv_propagator": {"tf": 1}}, "df": 1}}}}}}}}}, "d": {"docs": {}, "df": 0, "l": {"docs": {"pyerrors.obs.Obs.idl": {"tf": 1}}, "df": 1}}, "s": {"docs": {}, "df": 0, "_": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "g": {"docs": {"pyerrors.obs.Obs.is_merged": {"tf": 1}}, "df": 1}}}}, "z": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {"pyerrors.obs.Obs.is_zero": {"tf": 1}, "pyerrors.obs.CObs.is_zero": {"tf": 1}}, "df": 2, "_": {"docs": {}, "df": 0, "w": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "_": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {"pyerrors.obs.Obs.is_zero_within_error": {"tf": 1}}, "df": 1}}}}}}}}}}}}}}}}}}}, "m": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "g": {"docs": {"pyerrors.obs.CObs.imag": {"tf": 1}}, "df": 1}}}}, "b": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {"pyerrors.input.bdio": {"tf": 1}, "pyerrors.input.bdio.read_ADerrors": {"tf": 1}, "pyerrors.input.bdio.write_ADerrors": {"tf": 1}, "pyerrors.input.bdio.read_mesons": {"tf": 1}, "pyerrors.input.bdio.read_dSdm": {"tf": 1}}, "df": 5}}}}, "w": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "_": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {"pyerrors.input.bdio.write_ADerrors": {"tf": 1}}, "df": 1}}}}}}}}}}}}}, "h": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"pyerrors.input.hadrons": {"tf": 1}, "pyerrors.input.hadrons.read_meson_hd5": {"tf": 1}, "pyerrors.input.hadrons.read_ExternalLeg_hd5": {"tf": 1}, "pyerrors.input.hadrons.read_Bilinear_hd5": {"tf": 1}}, "df": 4}}}}}}, "j": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "k": {"docs": {"pyerrors.jackknifing.Jack": {"tf": 1}, "pyerrors.jackknifing.Jack.__init__": {"tf": 1}, "pyerrors.jackknifing.Jack.print": {"tf": 1}, "pyerrors.jackknifing.Jack.plot_tauint": {"tf": 1}, "pyerrors.jackknifing.Jack.plot_history": {"tf": 1}, "pyerrors.jackknifing.Jack.dump": {"tf": 1}}, "df": 6, "k": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "f": {"docs": {"pyerrors.jackknifing": {"tf": 1}, "pyerrors.jackknifing.Jack": {"tf": 1}, "pyerrors.jackknifing.Jack.__init__": {"tf": 1}, "pyerrors.jackknifing.Jack.print": {"tf": 1}, "pyerrors.jackknifing.Jack.plot_tauint": {"tf": 1}, "pyerrors.jackknifing.Jack.plot_history": {"tf": 1}, "pyerrors.jackknifing.Jack.dump": {"tf": 1}, "pyerrors.jackknifing.generate_jack": {"tf": 1}, "pyerrors.jackknifing.derived_jack": {"tf": 1}}, "df": 9}}}}}}}}, "n": {"docs": {"pyerrors.obs.Obs.N": {"tf": 1}}, "df": 1, "p": {"docs": {}, "df": 0, "r": {"docs": {"pyerrors.npr": {"tf": 1}, "pyerrors.npr.Npr_matrix": {"tf": 1}, "pyerrors.npr.Npr_matrix.__init__": {"tf": 1}, "pyerrors.npr.Npr_matrix.g5H": {"tf": 1}, "pyerrors.npr.inv_propagator": {"tf": 1}, "pyerrors.npr.Zq": {"tf": 1}}, "df": 6, "_": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "x": {"docs": {"pyerrors.npr.Npr_matrix": {"tf": 1}, "pyerrors.npr.Npr_matrix.__init__": {"tf": 1}, "pyerrors.npr.Npr_matrix.g5H": {"tf": 1}}, "df": 3}}}}}}}}}, "_": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "a": {"docs": {"pyerrors.obs.Obs.N_sigma": {"tf": 1}}, "df": 1, "_": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "b": {"docs": {"pyerrors.obs.Obs.N_sigma_global": {"tf": 1}}, "df": 1}}}}}}}}}}}, "a": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {"pyerrors.obs.Obs.names": {"tf": 1}}, "df": 1}}}}, "z": {"docs": {}, "df": 0, "q": {"docs": {"pyerrors.npr.Zq": {"tf": 1}}, "df": 1}}, "v": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "u": {"docs": {"pyerrors.obs.Obs.value": {"tf": 1}}, "df": 1}}}, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"pyerrors.version": {"tf": 1}}, "df": 1}}}}}}}}}, "doc": {"root": {"0": {"docs": {"pyerrors.fits.least_squares": {"tf": 1.4142135623730951}, "pyerrors.input.openQCD.read_rwms": {"tf": 1}, "pyerrors.input.openQCD.extract_t0": {"tf": 1}, "pyerrors.input.sfcf.read_qtop": {"tf": 1}, "pyerrors.npr.Npr_matrix": {"tf": 1}, "pyerrors.obs.Obs": {"tf": 2}, "pyerrors.obs.Obs.expand_deltas": {"tf": 1}, "pyerrors.obs.Obs.gamma_method": {"tf": 1.7320508075688772}, "pyerrors.obs.expand_deltas_for_merge": {"tf": 1}}, "df": 9, "e": {"docs": {}, "df": 0, "+": {"0": {"0": {"0": {"docs": {"pyerrors.npr.Npr_matrix": {"tf": 1.4142135623730951}}, "df": 1}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}}}, "1": {"0": {"0": {"0": {"docs": {"pyerrors.obs.pseudo_Obs": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}, "docs": {"pyerrors.mpm.matrix_pencil_method_old": {"tf": 1}}, "df": 1}, "2": {"docs": {}, "df": 0, "x": {"1": {"2": {"docs": {"pyerrors.npr.Npr_matrix.g5H": {"tf": 1}, "pyerrors.npr.inv_propagator": {"tf": 1}, "pyerrors.npr.Zq": {"tf": 1}}, "df": 3}, "docs": {}, "df": 0}, "docs": {}, "df": 0}}, "6": {"docs": {"pyerrors.input.hadrons.read_ExternalLeg_hd5": {"tf": 1}, "pyerrors.input.hadrons.read_Bilinear_hd5": {"tf": 1}}, "df": 2}, "7": {"docs": {"pyerrors.mpm.matrix_pencil_method": {"tf": 1}}, "df": 1}, "9": {"9": {"0": {"docs": {"pyerrors.mpm.matrix_pencil_method": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {"pyerrors.correlators.Corr.T_symmetry": {"tf": 1.4142135623730951}, "pyerrors.correlators.Corr.m_eff": {"tf": 1.4142135623730951}, "pyerrors.input.openQCD.extract_t0": {"tf": 1}, "pyerrors.mpm.matrix_pencil_method": {"tf": 1}, "pyerrors.mpm.matrix_pencil_method_old": {"tf": 1}, "pyerrors.npr.Npr_matrix": {"tf": 1}, "pyerrors.obs.Obs": {"tf": 1}, "pyerrors.obs.Obs.calc_gamma": {"tf": 1}, "pyerrors.obs.Obs.gamma_method": {"tf": 1}}, "df": 9, "*": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "s": {"docs": {"pyerrors.npr.Npr_matrix": {"tf": 1}}, "df": 1}}}}}}}, "2": {"docs": {"pyerrors": {"tf": 1.4142135623730951}, "pyerrors.fits.least_squares": {"tf": 1}, "pyerrors.fits.total_least_squares": {"tf": 1}, "pyerrors.input.openQCD.read_rwms": {"tf": 1}, "pyerrors.input.openQCD.extract_t0": {"tf": 1}, "pyerrors.mpm.matrix_pencil_method": {"tf": 1}, "pyerrors.npr.Npr_matrix": {"tf": 2.8284271247461903}, "pyerrors.obs.Obs": {"tf": 1}, "pyerrors.obs.Obs.gamma_method": {"tf": 1}, "pyerrors.obs.covariance": {"tf": 1}, "pyerrors.obs.covariance2": {"tf": 1}, "pyerrors.obs.covariance3": {"tf": 1}}, "df": 12, "*": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "(": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, ")": {"docs": {}, "df": 0, "/": {"3": {"docs": {"pyerrors.mpm.matrix_pencil_method": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}}}}}}}}}}}}, "3": {"2": {"3": {"docs": {"pyerrors.npr.Npr_matrix": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}, "8": {"docs": {"pyerrors.mpm.matrix_pencil_method": {"tf": 1}}, "df": 1}, "9": {"docs": {"pyerrors": {"tf": 2.449489742783178}, "pyerrors.npr.Npr_matrix": {"tf": 1.4142135623730951}}, "df": 2}, "docs": {"pyerrors.input.openQCD.extract_t0": {"tf": 1}, "pyerrors.npr.Npr_matrix": {"tf": 2}}, "df": 2, "x": {"3": {"docs": {"pyerrors.input.hadrons.read_ExternalLeg_hd5": {"tf": 1}, "pyerrors.input.hadrons.read_Bilinear_hd5": {"tf": 1}}, "df": 2}, "docs": {}, "df": 0}}, "4": {"docs": {"pyerrors.correlators.Corr.show": {"tf": 1}, "pyerrors.fits.least_squares": {"tf": 1}, "pyerrors.npr.Npr_matrix": {"tf": 1.4142135623730951}}, "df": 3, "x": {"4": {"docs": {"pyerrors.input.hadrons.read_ExternalLeg_hd5": {"tf": 1}, "pyerrors.input.hadrons.read_Bilinear_hd5": {"tf": 1}}, "df": 2}, "docs": {}, "df": 0}}, "5": {"0": {"0": {"docs": {}, "df": 0, "(": {"4": {"0": {"docs": {"pyerrors.fits.least_squares": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}, "docs": {}, "df": 0}}, "docs": {}, "df": 0}, "3": {"8": {"0": {"docs": {"pyerrors.correlators.Corr.m_eff": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "4": {"8": {"docs": {}, "df": 0, "(": {"2": {"3": {"docs": {"pyerrors.fits.least_squares": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}, "docs": {}, "df": 0}}, "docs": {}, "df": 0}, "docs": {"pyerrors.input.openQCD.extract_t0": {"tf": 1}}, "df": 1, "(": {"0": {"docs": {"pyerrors.fits.least_squares": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}, "e": {"docs": {"pyerrors.npr.Npr_matrix": {"tf": 1}}, "df": 1}}, "8": {"1": {"4": {"docs": {"pyerrors.mpm.matrix_pencil_method": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}, "2": {"4": {"docs": {"pyerrors.mpm.matrix_pencil_method": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}, "docs": {"pyerrors.correlators.Corr.show": {"tf": 1}, "pyerrors.npr.Npr_matrix": {"tf": 1.4142135623730951}}, "df": 2}, "9": {"docs": {"pyerrors.input.hadrons.read_ExternalLeg_hd5": {"tf": 1}, "pyerrors.input.hadrons.read_Bilinear_hd5": {"tf": 1}}, "df": 2}, "docs": {}, "df": 0, "p": {"docs": {"pyerrors.mpm.matrix_pencil_method": {"tf": 1.4142135623730951}, "pyerrors.mpm.matrix_pencil_method_old": {"tf": 1.7320508075688772}}, "df": 2, "y": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {"pyerrors": {"tf": 2.6457513110645907}, "pyerrors.obs.Obs": {"tf": 1}}, "df": 2}}}}}, "t": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"pyerrors": {"tf": 1}}, "df": 1}}}}}, "a": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "k": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "g": {"docs": {"pyerrors": {"tf": 1}}, "df": 1}}}}, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "t": {"docs": {"pyerrors.correlators.Corr.reweight": {"tf": 1}, "pyerrors.correlators.Corr.m_eff": {"tf": 1}, "pyerrors.correlators.Corr.show": {"tf": 1}, "pyerrors.fits.Fit_result": {"tf": 1}, "pyerrors.fits.Fit_result.gamma_method": {"tf": 1}, "pyerrors.fits.least_squares": {"tf": 1.7320508075688772}, "pyerrors.fits.total_least_squares": {"tf": 1.7320508075688772}, "pyerrors.fits.error_band": {"tf": 1}, "pyerrors.fits.fit_general": {"tf": 1.7320508075688772}, "pyerrors.input.bdio.read_ADerrors": {"tf": 1}, "pyerrors.input.bdio.write_ADerrors": {"tf": 1}, "pyerrors.input.bdio.read_mesons": {"tf": 1}, "pyerrors.input.bdio.read_dSdm": {"tf": 1}, "pyerrors.input.hadrons.read_meson_hd5": {"tf": 1}, "pyerrors.input.hadrons.read_ExternalLeg_hd5": {"tf": 1}, "pyerrors.input.hadrons.read_Bilinear_hd5": {"tf": 1}, "pyerrors.input.openQCD.extract_t0": {"tf": 1}, "pyerrors.jackknifing.derived_jack": {"tf": 1}, "pyerrors.linalg.derived_array": {"tf": 1}, "pyerrors.mpm.matrix_pencil_method": {"tf": 1.4142135623730951}, "pyerrors.mpm.matrix_pencil_method_old": {"tf": 1.4142135623730951}, "pyerrors.npr.Npr_matrix": {"tf": 1.4142135623730951}, "pyerrors.obs.Obs.expand_deltas": {"tf": 1}, "pyerrors.obs.Obs.calc_gamma": {"tf": 1}, "pyerrors.obs.Obs.gamma_method": {"tf": 1}, "pyerrors.obs.merge_idx": {"tf": 1}, "pyerrors.obs.expand_deltas_for_merge": {"tf": 1}, "pyerrors.obs.filter_zeroes": {"tf": 1.4142135623730951}, "pyerrors.obs.derived_observable": {"tf": 1}, "pyerrors.obs.reduce_deltas": {"tf": 1}, "pyerrors.obs.reweight": {"tf": 1}, "pyerrors.roots.find_root": {"tf": 1}}, "df": 32}}}}, "t": {"docs": {"pyerrors.input.sfcf.read_sfcf": {"tf": 1}, "pyerrors.input.sfcf.read_sfcf_c": {"tf": 1}, "pyerrors.npr.Npr_matrix": {"tf": 1.4142135623730951}, "pyerrors.obs.CObs.gamma_method": {"tf": 1}, "pyerrors.obs.CObs.is_zero": {"tf": 1}}, "df": 5, "n": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"pyerrors.correlators.Corr.T_symmetry": {"tf": 1.7320508075688772}}, "df": 1}}}, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {"pyerrors.correlators.Corr.T_symmetry": {"tf": 1}}, "df": 1}}}}, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {"pyerrors.correlators.Corr.T_symmetry": {"tf": 1}}, "df": 1}}}}, "t": {"docs": {}, "df": 0, "h": {"docs": {"pyerrors.correlators.Corr.show": {"tf": 1}, "pyerrors.input.bdio.read_ADerrors": {"tf": 1.4142135623730951}, "pyerrors.input.bdio.write_ADerrors": {"tf": 1.4142135623730951}, "pyerrors.input.bdio.read_mesons": {"tf": 1.4142135623730951}, "pyerrors.input.bdio.read_dSdm": {"tf": 1.4142135623730951}, "pyerrors.input.hadrons.read_meson_hd5": {"tf": 1.4142135623730951}, "pyerrors.input.hadrons.read_ExternalLeg_hd5": {"tf": 1.4142135623730951}, "pyerrors.input.hadrons.read_Bilinear_hd5": {"tf": 1.4142135623730951}, "pyerrors.input.openQCD.extract_t0": {"tf": 1.4142135623730951}, "pyerrors.jackknifing.Jack.dump": {"tf": 1.4142135623730951}, "pyerrors.obs.Obs.dump": {"tf": 1.4142135623730951}, "pyerrors.obs.dump_object": {"tf": 1.4142135623730951}}, "df": 12}}, "s": {"docs": {}, "df": 0, "s": {"docs": {"pyerrors.roots.find_root": {"tf": 1}}, "df": 1}}}, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "g": {"docs": {"pyerrors": {"tf": 1.4142135623730951}, "pyerrors.npr.inv_propagator": {"tf": 1}, "pyerrors.npr.Zq": {"tf": 1.4142135623730951}}, "df": 3}}, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {"pyerrors.jackknifing.Jack.print": {"tf": 1}, "pyerrors.npr.Npr_matrix": {"tf": 1}, "pyerrors.obs.Obs.gamma_method": {"tf": 1}, "pyerrors.obs.Obs.details": {"tf": 1}}, "df": 4}}, "l": {"docs": {}, "df": 0, "i": {"docs": {"pyerrors.obs.Obs.is_zero_within_error": {"tf": 1}}, "df": 1}}}}}, "v": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "d": {"docs": {"pyerrors.fits.least_squares": {"tf": 1}, "pyerrors.fits.total_least_squares": {"tf": 1}, "pyerrors.fits.fit_general": {"tf": 1}}, "df": 3}}}}, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"pyerrors": {"tf": 1}, "pyerrors.correlators.Corr.fit": {"tf": 1}, "pyerrors.fits.least_squares": {"tf": 1}, "pyerrors.fits.total_least_squares": {"tf": 1}, "pyerrors.jackknifing.Jack.print": {"tf": 1}, "pyerrors.mpm.matrix_pencil_method_old": {"tf": 1}}, "df": 6}}, "o": {"docs": {}, "df": 0, "r": {"docs": {"pyerrors.fits.least_squares": {"tf": 1.4142135623730951}}, "df": 1}}, "m": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "i": {"docs": {"pyerrors.obs.correlate": {"tf": 1}}, "df": 1}}}}}, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"pyerrors.correlators.Corr.fit": {"tf": 1}, "pyerrors.correlators.Corr.set_prange": {"tf": 1}}, "df": 2}}, "c": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "c": {"docs": {"pyerrors.jackknifing.derived_jack": {"tf": 1}, "pyerrors.obs.derived_observable": {"tf": 1}}, "df": 2}}}}}, "e": {"docs": {}, "df": 0, "v": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "u": {"docs": {"pyerrors.fits.least_squares": {"tf": 1}}, "df": 1}}}}, "f": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "x": {"docs": {"pyerrors.input.openQCD.extract_t0": {"tf": 1.4142135623730951}}, "df": 1}, "l": {"docs": {}, "df": 0, "t": {"docs": {"pyerrors.mpm.matrix_pencil_method_old": {"tf": 1}}, "df": 1}}}, "a": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {"pyerrors.obs.filter_zeroes": {"tf": 1}}, "df": 1}}}}}}, "c": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "s": {"docs": {"pyerrors.obs.Obs.is_zero": {"tf": 1}, "pyerrors.obs.CObs.is_zero": {"tf": 1}}, "df": 2}}}}}, "e": {"docs": {"pyerrors": {"tf": 2.449489742783178}, "pyerrors.correlators.Corr": {"tf": 1}}, "df": 2, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "d": {"docs": {"pyerrors.correlators.Corr.roll": {"tf": 1}}, "df": 1, "i": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "y": {"docs": {"pyerrors.correlators.Corr.m_eff": {"tf": 1.4142135623730951}}, "df": 1}}}}}}}}}, "f": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "m": {"docs": {"pyerrors.fits.least_squares": {"tf": 1}, "pyerrors.fits.total_least_squares": {"tf": 1}, "pyerrors.fits.fit_lin": {"tf": 1}, "pyerrors.fits.ks_test": {"tf": 1}, "pyerrors.fits.fit_general": {"tf": 1}, "pyerrors.mpm.matrix_pencil_method_old": {"tf": 1}, "pyerrors.obs.Obs": {"tf": 1}}, "df": 7}}}}}, "n": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "s": {"docs": {"pyerrors.linalg.pinv": {"tf": 1}}, "df": 1}}}, "c": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "l": {"docs": {"pyerrors.mpm.matrix_pencil_method": {"tf": 1.7320508075688772}, "pyerrors.mpm.matrix_pencil_method_old": {"tf": 1.7320508075688772}}, "df": 2}}}}}, "l": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "t": {"docs": {"pyerrors.correlators.Corr.show": {"tf": 1.4142135623730951}, "pyerrors.fits.least_squares": {"tf": 1.4142135623730951}, "pyerrors.fits.qqplot": {"tf": 1}, "pyerrors.fits.residual_plot": {"tf": 1}, "pyerrors.obs.Obs.plot_tauint": {"tf": 1}, "pyerrors.obs.Obs.plot_rho": {"tf": 1}, "pyerrors.obs.Obs.plot_rep_dist": {"tf": 1}, "pyerrors.obs.Obs.plot_history": {"tf": 1}, "pyerrors.obs.Obs.plot_piechart": {"tf": 1}, "pyerrors.obs.covariance3": {"tf": 1.4142135623730951}}, "df": 10, "a": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "l": {"docs": {"pyerrors.correlators.Corr.plottable": {"tf": 1}}, "df": 1}}}}}, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "u": {"docs": {"pyerrors.correlators.Corr.plateau": {"tf": 1}}, "df": 1}, "a": {"docs": {}, "df": 0, "u": {"docs": {"pyerrors.correlators.Corr.plateau": {"tf": 1.7320508075688772}, "pyerrors.correlators.Corr.show": {"tf": 1.4142135623730951}}, "df": 2, "_": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"pyerrors.correlators.Corr.plateau": {"tf": 1}}, "df": 1}}}}}}}}}, "u": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "l": {"docs": {"pyerrors.fits.fit_general": {"tf": 1}}, "df": 1}}}}}, "q": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "t": {"docs": {"pyerrors.input.openQCD.extract_t0": {"tf": 1.4142135623730951}}, "df": 1}}}}}}}, "i": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "k": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "l": {"docs": {"pyerrors.correlators.Corr.dump": {"tf": 1}}, "df": 1}}, "l": {"docs": {"pyerrors.jackknifing.Jack.dump": {"tf": 1}, "pyerrors.obs.Obs.dump": {"tf": 1}, "pyerrors.obs.dump_object": {"tf": 1}, "pyerrors.obs.load_object": {"tf": 1}}, "df": 4}}}, "e": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {"pyerrors.obs.Obs.plot_piechart": {"tf": 1}}, "df": 1}}}}}}}, "o": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "l": {"docs": {"pyerrors.fits.least_squares": {"tf": 1}, "pyerrors.mpm.matrix_pencil_method": {"tf": 1}, "pyerrors.obs.Obs.calc_gamma": {"tf": 1}, "pyerrors.obs.merge_obs": {"tf": 1}}, "df": 4}}}}, "t": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "x": {"docs": {"pyerrors.input.openQCD.read_rwms": {"tf": 1.4142135623730951}}, "df": 1}}}}, "i": {"docs": {}, "df": 0, "t": {"docs": {"pyerrors.obs.Obs.gamma_method": {"tf": 1}, "pyerrors.obs.covariance": {"tf": 1}, "pyerrors.obs.covariance2": {"tf": 1}, "pyerrors.obs.covariance3": {"tf": 1}}, "df": 4}}}, "w": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "l": {"docs": {"pyerrors.fits.least_squares": {"tf": 1}}, "df": 1}}}, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"pyerrors.input.hadrons.read_meson_hd5": {"tf": 1}, "pyerrors.input.openQCD.extract_t0": {"tf": 1}, "pyerrors.npr.Npr_matrix": {"tf": 1}}, "df": 3}}}}, "s": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "o": {"docs": {"pyerrors.obs.pseudo_Obs": {"tf": 1}}, "df": 1, "s": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {"pyerrors.input.hadrons.read_meson_hd5": {"tf": 1.4142135623730951}}, "df": 1}}}}}}, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "v": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "s": {"docs": {"pyerrors.linalg.pinv": {"tf": 1}}, "df": 1}}}}}}}}}}}, "b": {"docs": {}, "df": 0, "p": {"docs": {"pyerrors.input.misc.read_pbp": {"tf": 1}}, "df": 1}}}, "e": {"docs": {"pyerrors.correlators.Corr.m_eff": {"tf": 1}, "pyerrors.correlators.Corr.show": {"tf": 1}, "pyerrors.fits.least_squares": {"tf": 1}, "pyerrors.input.openQCD.read_rwms": {"tf": 1}, "pyerrors.jackknifing.derived_jack": {"tf": 1.4142135623730951}, "pyerrors.linalg.derived_array": {"tf": 1}, "pyerrors.npr.Npr_matrix": {"tf": 2.23606797749979}, "pyerrors.obs.derived_observable": {"tf": 1.4142135623730951}, "pyerrors.obs.reweight": {"tf": 1}}, "df": 9, "r": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {"pyerrors": {"tf": 1.7320508075688772}, "pyerrors.correlators.Corr.plottable": {"tf": 1}, "pyerrors.fits.least_squares": {"tf": 1.4142135623730951}, "pyerrors.fits.error_band": {"tf": 1}, "pyerrors.obs.Obs": {"tf": 1.7320508075688772}, "pyerrors.obs.Obs.gamma_method": {"tf": 1}, "pyerrors.obs.Obs.is_zero_within_error": {"tf": 1}, "pyerrors.obs.Obs.plot_piechart": {"tf": 1}, "pyerrors.obs.filter_zeroes": {"tf": 1}}, "df": 9}}}}, "n": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "l": {"docs": {"pyerrors.input.bdio.read_mesons": {"tf": 1}, "pyerrors.input.hadrons.read_meson_hd5": {"tf": 1}, "pyerrors.input.hadrons.read_ExternalLeg_hd5": {"tf": 1}, "pyerrors.input.hadrons.read_Bilinear_hd5": {"tf": 1}, "pyerrors.input.openQCD.extract_t0": {"tf": 1}, "pyerrors.input.sfcf.read_sfcf_c": {"tf": 1}, "pyerrors.misc.gen_correlated_data": {"tf": 1}, "pyerrors.obs.Obs": {"tf": 2}, "pyerrors.obs.Obs.gamma_method": {"tf": 1.4142135623730951}, "pyerrors.obs.Obs.plot_tauint": {"tf": 1}, "pyerrors.obs.Obs.plot_rho": {"tf": 1}, "pyerrors.obs.Obs.plot_rep_dist": {"tf": 1}, "pyerrors.obs.Obs.plot_history": {"tf": 1}, "pyerrors.obs.Obs.plot_piechart": {"tf": 1}, "pyerrors.obs.correlate": {"tf": 1}, "pyerrors.obs.covariance3": {"tf": 1.4142135623730951}}, "df": 16, "e": {"docs": {}, "df": 0, "_": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "m": {"docs": {"pyerrors": {"tf": 1.7320508075688772}}, "df": 1}}}}, "s": {"docs": {}, "df": 0, "/": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "a": {"docs": {"pyerrors": {"tf": 1}, "pyerrors.obs.Obs": {"tf": 1.4142135623730951}}, "df": 2}}}}}}}}}}}}}}, "_": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "d": {"docs": {"pyerrors.input.hadrons.read_meson_hd5": {"tf": 1}, "pyerrors.input.hadrons.read_ExternalLeg_hd5": {"tf": 1}, "pyerrors.input.hadrons.read_Bilinear_hd5": {"tf": 1}}, "df": 3}}, "n": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "m": {"docs": {"pyerrors.input.sfcf.read_sfcf_c": {"tf": 1}}, "df": 1}}}}}, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "i": {"docs": {"pyerrors.correlators.Corr.plateau": {"tf": 1}, "pyerrors.fits.least_squares": {"tf": 1.4142135623730951}, "pyerrors.misc.gen_correlated_data": {"tf": 1}, "pyerrors.mpm.matrix_pencil_method_old": {"tf": 1}, "pyerrors.obs.Obs": {"tf": 1.4142135623730951}, "pyerrors.obs.expand_deltas_for_merge": {"tf": 1}}, "df": 6}}, "e": {"docs": {}, "df": 0, "r": {"docs": {"pyerrors.obs.filter_zeroes": {"tf": 1}}, "df": 1}}}, "o": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "h": {"docs": {"pyerrors.fits.least_squares": {"tf": 1}}, "df": 1}}}}, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "i": {"docs": {"pyerrors.mpm.matrix_pencil_method": {"tf": 1}, "pyerrors.mpm.matrix_pencil_method_old": {"tf": 1.4142135623730951}}, "df": 2}}}}}, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "m": {"docs": {"pyerrors": {"tf": 1}, "pyerrors.fits.least_squares": {"tf": 1}, "pyerrors.input.openQCD.extract_t0": {"tf": 1}, "pyerrors.obs.filter_zeroes": {"tf": 1}}, "df": 4}}}, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "i": {"docs": {"pyerrors.correlators.Corr": {"tf": 1}}, "df": 1}}}}}, "x": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {"pyerrors": {"tf": 1}}, "df": 1}}}}}, "s": {"docs": {"pyerrors.npr.Npr_matrix": {"tf": 1.4142135623730951}}, "df": 1}}, "e": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {"pyerrors.fits.least_squares": {"tf": 1}, "pyerrors.fits.total_least_squares": {"tf": 1}, "pyerrors.mpm.matrix_pencil_method": {"tf": 1}}, "df": 3, "e": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "_": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "q": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {"pyerrors.fits.least_squares": {"tf": 1}, "pyerrors.fits.total_least_squares": {"tf": 1}}, "df": 2}}}}}}}}}}}}}, "n": {"docs": {}, "df": 0, "s": {"docs": {"pyerrors.mpm.matrix_pencil_method": {"tf": 1}}, "df": 1}}}, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "d": {"docs": {"pyerrors.obs.Obs.expand_deltas": {"tf": 1}, "pyerrors.obs.expand_deltas_for_merge": {"tf": 1}}, "df": 2}}}}, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {"pyerrors.correlators.Corr.plateau": {"tf": 1.4142135623730951}, "pyerrors.input.bdio.read_ADerrors": {"tf": 1}, "pyerrors.input.bdio.read_mesons": {"tf": 1}, "pyerrors.input.bdio.read_dSdm": {"tf": 1}, "pyerrors.input.hadrons.read_meson_hd5": {"tf": 1.4142135623730951}, "pyerrors.input.openQCD.extract_t0": {"tf": 1.7320508075688772}, "pyerrors.mpm.matrix_pencil_method": {"tf": 1.4142135623730951}, "pyerrors.mpm.matrix_pencil_method_old": {"tf": 1.4142135623730951}, "pyerrors.obs.reduce_deltas": {"tf": 1.4142135623730951}}, "df": 9}}}}, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "g": {"docs": {"pyerrors.input.hadrons.read_ExternalLeg_hd5": {"tf": 1}}, "df": 1}}}}}}}, "n": {"docs": {}, "df": 0, "t": {"docs": {"pyerrors.input.openQCD.extract_t0": {"tf": 1}}, "df": 1}}}}, "a": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {"pyerrors.input.openQCD.extract_t0": {"tf": 1}}, "df": 1}}, "m": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {"pyerrors.npr.Npr_matrix": {"tf": 1}}, "df": 1}}, "p": {"docs": {}, "df": 0, "l": {"docs": {"pyerrors.npr.Npr_matrix": {"tf": 2}}, "df": 1}}}}, "c": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"pyerrors.npr.Npr_matrix.g5H": {"tf": 1}}, "df": 1}}}}}, "i": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {"pyerrors.obs.Obs": {"tf": 1.4142135623730951}}, "df": 1}}}, "e": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "t": {"docs": {"pyerrors.obs.CObs.gamma_method": {"tf": 1}}, "df": 1}}}}}, "v": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "y": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "h": {"docs": {"pyerrors.correlators.Corr": {"tf": 1}}, "df": 1}}}}}}, "g": {"docs": {"pyerrors.correlators.Corr": {"tf": 1}}, "df": 1}, "a": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "h": {"docs": {"pyerrors.correlators.Corr.plottable": {"tf": 1.4142135623730951}, "pyerrors.input.misc.read_pbp": {"tf": 1.4142135623730951}, "pyerrors.input.openQCD.read_rwms": {"tf": 1.4142135623730951}, "pyerrors.input.openQCD.extract_t0": {"tf": 1.4142135623730951}, "pyerrors.misc.gen_correlated_data": {"tf": 1.4142135623730951}, "pyerrors.npr.Npr_matrix": {"tf": 1.7320508075688772}, "pyerrors.obs.Obs.plot_tauint": {"tf": 1}, "pyerrors.obs.Obs.plot_rho": {"tf": 1}, "pyerrors.obs.Obs.plot_rep_dist": {"tf": 1}, "pyerrors.obs.Obs.plot_history": {"tf": 1}, "pyerrors.obs.Obs.plot_piechart": {"tf": 1}, "pyerrors.obs.covariance3": {"tf": 1}}, "df": 12}}, "s": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"pyerrors.npr.Npr_matrix": {"tf": 1}}, "df": 1}}}}}, "f": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {"pyerrors.correlators.Corr.m_eff": {"tf": 1.4142135623730951}, "pyerrors.fits.least_squares": {"tf": 1}, "pyerrors.fits.total_least_squares": {"tf": 1}, "pyerrors.input.openQCD.extract_t0": {"tf": 1.4142135623730951}}, "df": 4}}}}}, "_": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "g": {"docs": {"pyerrors.fits.least_squares": {"tf": 1}}, "df": 1}}}}, "i": {"docs": {}, "df": 0, "g": {"docs": {"pyerrors.linalg.eig": {"tf": 1}}, "df": 1, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "v": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "u": {"docs": {"pyerrors.linalg.eigh": {"tf": 1}, "pyerrors.linalg.eig": {"tf": 1}}, "df": 2}}}, "e": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {"pyerrors.linalg.eigh": {"tf": 1}}, "df": 1}}}}}}}}, "h": {"docs": {"pyerrors.linalg.eigh": {"tf": 1}}, "df": 1}}}, "q": {"docs": {"pyerrors.mpm.matrix_pencil_method": {"tf": 1}}, "df": 1, "u": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {"pyerrors.obs.covariance": {"tf": 1}, "pyerrors.obs.covariance2": {"tf": 1}, "pyerrors.obs.covariance3": {"tf": 1}}, "df": 3}}}}, "l": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"pyerrors.npr.Npr_matrix": {"tf": 3.1622776601683795}}, "df": 1}}}}}}, "t": {"docs": {}, "df": 0, "c": {"docs": {"pyerrors.npr.Npr_matrix": {"tf": 1.4142135623730951}}, "df": 1}}, "m": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {"pyerrors.npr.Npr_matrix": {"tf": 1.4142135623730951}}, "df": 1}}, "i": {"docs": {"pyerrors.obs.expand_deltas_for_merge": {"tf": 1}}, "df": 1}}}, "p": {"docs": {"pyerrors.obs.filter_zeroes": {"tf": 1.4142135623730951}}, "df": 1}}, "c": {"docs": {"pyerrors.input.hadrons.read_ExternalLeg_hd5": {"tf": 1}, "pyerrors.input.hadrons.read_Bilinear_hd5": {"tf": 1}, "pyerrors.input.sfcf.read_sfcf": {"tf": 1}, "pyerrors.input.sfcf.read_sfcf_c": {"tf": 1}, "pyerrors.npr.Npr_matrix": {"tf": 1.7320508075688772}}, "df": 5, "o": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "p": {"docs": {"pyerrors.correlators.Corr.show": {"tf": 1}}, "df": 1, "u": {"docs": {}, "df": 0, "t": {"docs": {"pyerrors": {"tf": 1}, "pyerrors.linalg.scalar_mat_op": {"tf": 1}, "pyerrors.linalg.eigh": {"tf": 1}, "pyerrors.linalg.eig": {"tf": 1}, "pyerrors.linalg.pinv": {"tf": 1}, "pyerrors.linalg.svd": {"tf": 1}, "pyerrors.linalg.slogdet": {"tf": 1}, "pyerrors.mpm.matrix_pencil_method": {"tf": 1}, "pyerrors.mpm.matrix_pencil_method_old": {"tf": 1}, "pyerrors.obs.Obs": {"tf": 1}, "pyerrors.obs.Obs.calc_gamma": {"tf": 1}, "pyerrors.obs.Obs.gamma_method": {"tf": 1}}, "df": 12}}, "l": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "x": {"docs": {"pyerrors": {"tf": 1}, "pyerrors.linalg.matmul": {"tf": 1}, "pyerrors.linalg.grad_eig": {"tf": 1}, "pyerrors.obs.CObs": {"tf": 1}}, "df": 4}}}, "a": {"docs": {}, "df": 0, "r": {"docs": {"pyerrors.fits.residual_plot": {"tf": 1}, "pyerrors.linalg.matmul": {"tf": 1}}, "df": 2, "i": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"pyerrors.correlators.Corr.show": {"tf": 1}}, "df": 1}}}}}}, "i": {"docs": {}, "df": 0, "l": {"docs": {"pyerrors.input.bdio.read_ADerrors": {"tf": 1}, "pyerrors.input.bdio.write_ADerrors": {"tf": 1}, "pyerrors.input.bdio.read_mesons": {"tf": 1}, "pyerrors.input.bdio.read_dSdm": {"tf": 1}}, "df": 4}}}, "m": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"pyerrors.obs.expand_deltas_for_merge": {"tf": 1}}, "df": 1}}}, "b": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {"pyerrors.obs.merge_obs": {"tf": 1.4142135623730951}}, "df": 1}}}}, "v": {"docs": {"pyerrors.misc.gen_correlated_data": {"tf": 1}}, "df": 1, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "i": {"docs": {"pyerrors": {"tf": 1}, "pyerrors.fits.covariance_matrix": {"tf": 1}, "pyerrors.misc.gen_correlated_data": {"tf": 1.4142135623730951}, "pyerrors.obs.covariance": {"tf": 2}, "pyerrors.obs.covariance2": {"tf": 2}, "pyerrors.obs.covariance3": {"tf": 2}}, "df": 6, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "e": {"2": {"docs": {}, "df": 0, "(": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "b": {"docs": {"pyerrors.obs.covariance3": {"tf": 1}}, "df": 1}}}}, "docs": {}, "df": 0, "(": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "b": {"docs": {"pyerrors.obs.covariance": {"tf": 1}, "pyerrors.obs.covariance2": {"tf": 1}}, "df": 2}}}}}}}}}}}, "r": {"docs": {}, "df": 0, "r": {"docs": {"pyerrors.correlators.Corr.gamma_method": {"tf": 1}, "pyerrors.correlators.Corr.reverse": {"tf": 1}, "pyerrors.correlators.Corr.T_symmetry": {"tf": 1.4142135623730951}, "pyerrors.correlators.Corr.plateau": {"tf": 1}, "pyerrors.correlators.Corr.set_prange": {"tf": 1}, "pyerrors.correlators.Corr.show": {"tf": 1.4142135623730951}, "pyerrors.correlators.Corr.dump": {"tf": 1}}, "df": 7, "e": {"docs": {}, "df": 0, "l": {"docs": {"pyerrors.correlators.Corr": {"tf": 2}, "pyerrors.correlators.Corr.plottable": {"tf": 1}, "pyerrors.correlators.Corr.symmetric": {"tf": 1}, "pyerrors.correlators.Corr.anti_symmetric": {"tf": 1}, "pyerrors.correlators.Corr.roll": {"tf": 1}, "pyerrors.correlators.Corr.correlate": {"tf": 1.7320508075688772}, "pyerrors.correlators.Corr.reweight": {"tf": 1}, "pyerrors.correlators.Corr.T_symmetry": {"tf": 1.4142135623730951}, "pyerrors.correlators.Corr.deriv": {"tf": 1}, "pyerrors.correlators.Corr.second_deriv": {"tf": 1}, "pyerrors.correlators.Corr.m_eff": {"tf": 2}, "pyerrors.correlators.Corr.show": {"tf": 1.7320508075688772}, "pyerrors.fits.least_squares": {"tf": 1.4142135623730951}, "pyerrors.fits.total_least_squares": {"tf": 1.4142135623730951}, "pyerrors.input.sfcf.read_sfcf": {"tf": 1.7320508075688772}, "pyerrors.input.sfcf.read_sfcf_c": {"tf": 1.7320508075688772}, "pyerrors.mpm.matrix_pencil_method": {"tf": 1.4142135623730951}, "pyerrors.mpm.matrix_pencil_method_old": {"tf": 1}, "pyerrors.obs.correlate": {"tf": 1.7320508075688772}, "pyerrors.obs.covariance": {"tf": 1.4142135623730951}, "pyerrors.obs.covariance2": {"tf": 1.4142135623730951}, "pyerrors.obs.covariance3": {"tf": 1.4142135623730951}}, "df": 22}, "c": {"docs": {}, "df": 0, "t": {"docs": {"pyerrors.fits.least_squares": {"tf": 1}, "pyerrors.fits.total_least_squares": {"tf": 1}}, "df": 2}}, "s": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "d": {"docs": {"pyerrors.fits.total_least_squares": {"tf": 1}, "pyerrors.fits.residual_plot": {"tf": 1}, "pyerrors.fits.fit_general": {"tf": 1}, "pyerrors.input.hadrons.read_meson_hd5": {"tf": 1}, "pyerrors.input.openQCD.extract_t0": {"tf": 1}, "pyerrors.mpm.matrix_pencil_method_old": {"tf": 1}}, "df": 6}}}}}}}}, "n": {"docs": {}, "df": 0, "v": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "i": {"docs": {"pyerrors.correlators.Corr": {"tf": 1}}, "df": 1}, "t": {"docs": {"pyerrors.input.bdio.write_ADerrors": {"tf": 1}}, "df": 1}}}}, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"pyerrors.correlators.Corr": {"tf": 1}, "pyerrors.correlators.Corr.gamma_method": {"tf": 1}}, "df": 2}}}, "a": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {"pyerrors.correlators.Corr.plottable": {"tf": 1}, "pyerrors.fits.least_squares": {"tf": 1}, "pyerrors.input.misc.read_pbp": {"tf": 1.4142135623730951}, "pyerrors.input.openQCD.read_rwms": {"tf": 1.4142135623730951}, "pyerrors.input.openQCD.extract_t0": {"tf": 1.4142135623730951}, "pyerrors.linalg.derived_array": {"tf": 1}, "pyerrors.misc.gen_correlated_data": {"tf": 1}, "pyerrors.npr.Npr_matrix": {"tf": 1.7320508075688772}, "pyerrors.obs.Obs": {"tf": 1.7320508075688772}, "pyerrors.obs.Obs.__init__": {"tf": 1}, "pyerrors.obs.Obs.plot_piechart": {"tf": 1}, "pyerrors.obs.derived_observable": {"tf": 1}}, "df": 12}}}, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "l": {"docs": {"pyerrors.fits.fit_general": {"tf": 1}, "pyerrors.obs.derived_observable": {"tf": 1}}, "df": 2}}, "i": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "t": {"docs": {"pyerrors.obs.Obs.plot_piechart": {"tf": 1}, "pyerrors.obs.filter_zeroes": {"tf": 1}}, "df": 2}}}}}, "i": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "u": {"docs": {"pyerrors.npr.Npr_matrix": {"tf": 1}, "pyerrors.obs.Obs.expand_deltas": {"tf": 1}, "pyerrors.obs.Obs.calc_gamma": {"tf": 1}}, "df": 3}}}}, "f": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "g": {"docs": {"pyerrors.input.misc.read_pbp": {"tf": 1.4142135623730951}, "pyerrors.input.openQCD.read_rwms": {"tf": 1.4142135623730951}, "pyerrors.input.openQCD.extract_t0": {"tf": 1.4142135623730951}, "pyerrors.obs.Obs.expand_deltas": {"tf": 1.4142135623730951}, "pyerrors.obs.Obs.calc_gamma": {"tf": 1.4142135623730951}, "pyerrors.obs.expand_deltas_for_merge": {"tf": 2}, "pyerrors.obs.filter_zeroes": {"tf": 1}, "pyerrors.obs.reduce_deltas": {"tf": 1.7320508075688772}}, "df": 8, "u": {"docs": {}, "df": 0, "r": {"docs": {"pyerrors.correlators.Corr.reweight": {"tf": 1.7320508075688772}, "pyerrors.input.bdio.read_mesons": {"tf": 1}, "pyerrors.input.bdio.read_dSdm": {"tf": 1}, "pyerrors.obs.filter_zeroes": {"tf": 1}, "pyerrors.obs.reweight": {"tf": 1.7320508075688772}}, "df": 5}}}}}, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"pyerrors.correlators.Corr.plateau": {"tf": 1}, "pyerrors.npr.Zq": {"tf": 1}}, "df": 2}}}, "r": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {"pyerrors.jackknifing.derived_jack": {"tf": 1}, "pyerrors.linalg.derived_array": {"tf": 1}, "pyerrors.npr.Npr_matrix": {"tf": 1.7320508075688772}, "pyerrors.obs.derived_observable": {"tf": 1}}, "df": 4, "o": {"docs": {}, "df": 0, "r": {"docs": {"pyerrors.npr.Npr_matrix": {"tf": null}}, "df": 1}}}}}, "a": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {"pyerrors.obs.covariance": {"tf": 1}, "pyerrors.obs.covariance2": {"tf": 1}, "pyerrors.obs.covariance3": {"tf": 1}}, "df": 3}}}}}, "o": {"docs": {}, "df": 0, "l": {"docs": {"pyerrors.fits.least_squares": {"tf": 1}, "pyerrors.fits.total_least_squares": {"tf": 1}, "pyerrors.fits.fit_general": {"tf": 1}}, "df": 3}}, "i": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {"pyerrors.input.bdio.read_mesons": {"tf": 1}, "pyerrors.input.bdio.read_dSdm": {"tf": 1}}, "df": 2}}, "d": {"docs": {"pyerrors.mpm.matrix_pencil_method_old": {"tf": 1}, "pyerrors.obs.filter_zeroes": {"tf": 1}}, "df": 2}}}, "j": {"docs": {"pyerrors.npr.Npr_matrix.g5H": {"tf": 1}}, "df": 1, "u": {"docs": {}, "df": 0, "g": {"docs": {"pyerrors.npr.Npr_matrix.g5H": {"tf": 1}}, "df": 1}}}}, "s": {"docs": {}, "df": 0, "h": {"docs": {"pyerrors.correlators.Corr.m_eff": {"tf": 1}}, "df": 1, "(": {"docs": {}, "df": 0, "m": {"docs": {"pyerrors.correlators.Corr.m_eff": {"tf": 1.4142135623730951}}, "df": 1}}}}, "b": {"docs": {"pyerrors.input.hadrons.read_ExternalLeg_hd5": {"tf": 1}, "pyerrors.input.hadrons.read_Bilinear_hd5": {"tf": 1}, "pyerrors.linalg.inv": {"tf": 1}, "pyerrors.linalg.cholesky": {"tf": 1}}, "df": 4}, "l": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "n": {"docs": {"pyerrors.npr.Npr_matrix": {"tf": 1}}, "df": 1}}}}}, "h": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {"pyerrors": {"tf": 1.4142135623730951}}, "df": 1}}, "n": {"docs": {}, "df": 0, "g": {"docs": {"pyerrors.input.bdio.read_ADerrors": {"tf": 1}, "pyerrors.input.bdio.write_ADerrors": {"tf": 1}, "pyerrors.input.bdio.read_mesons": {"tf": 1}, "pyerrors.input.bdio.read_dSdm": {"tf": 1}, "pyerrors.input.hadrons.read_ExternalLeg_hd5": {"tf": 1.4142135623730951}, "pyerrors.input.hadrons.read_Bilinear_hd5": {"tf": 1.4142135623730951}, "pyerrors.obs.Obs.expand_deltas": {"tf": 1}}, "df": 7}}, "r": {"docs": {}, "df": 0, "g": {"docs": {"pyerrors.input.sfcf.read_qtop": {"tf": 1}}, "df": 1}}}, "o": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "s": {"docs": {"pyerrors.fits.least_squares": {"tf": 1}}, "df": 1}}, "l": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "k": {"docs": {}, "df": 0, "i": {"docs": {"pyerrors.linalg.cholesky": {"tf": 1}}, "df": 1}}}}}}, "i": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "q": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {"pyerrors.fits.least_squares": {"tf": 1.4142135623730951}, "pyerrors.fits.total_least_squares": {"tf": 1}}, "df": 2}}}}}}, "e": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "k": {"docs": {"pyerrors.fits.qqplot": {"tf": 1}, "pyerrors.fits.fit_general": {"tf": 1}, "pyerrors.obs.Obs.is_zero_within_error": {"tf": 1}, "pyerrors.obs.Obs.is_zero": {"tf": 1}, "pyerrors.obs.CObs.is_zero": {"tf": 1}}, "df": 5}}}}, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "o": {"docs": {"pyerrors": {"tf": 1.4142135623730951}, "pyerrors.obs.Obs.__init__": {"tf": 1}, "pyerrors.obs.Obs.plot_history": {"tf": 1}}, "df": 3}}}, "u": {"docs": {}, "df": 0, "s": {"docs": {"pyerrors.fits.least_squares": {"tf": 1}, "pyerrors.fits.total_least_squares": {"tf": 1}}, "df": 2}, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "s": {"docs": {"pyerrors.linalg.derived_array": {"tf": 1}, "pyerrors.obs.derived_observable": {"tf": 1}}, "df": 2}}}}}}, "l": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "l": {"docs": {"pyerrors.fits.least_squares": {"tf": 1}, "pyerrors.fits.total_least_squares": {"tf": 1}, "pyerrors.npr.Zq": {"tf": 1.4142135623730951}, "pyerrors.obs.Obs": {"tf": 1}, "pyerrors.obs.Obs.calc_gamma": {"tf": 1}, "pyerrors.obs.Obs.gamma_method": {"tf": 1}, "pyerrors.obs.covariance": {"tf": 1}}, "df": 7}}}}, "s": {"docs": {}, "df": 0, "e": {"docs": {"pyerrors.fits.fit_lin": {"tf": 1}, "pyerrors.fits.fit_general": {"tf": 1}, "pyerrors.obs.Obs.__init__": {"tf": 1}}, "df": 3}}}, "l": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "s": {"docs": {"pyerrors": {"tf": 1}, "pyerrors.correlators.Corr": {"tf": 1.4142135623730951}, "pyerrors.npr.Npr_matrix": {"tf": 1}, "pyerrors.obs.Obs": {"tf": 1}, "pyerrors.obs.CObs": {"tf": 1}}, "df": 5}}}, "o": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"pyerrors.mpm.matrix_pencil_method": {"tf": 1}}, "df": 1}}}}}, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"pyerrors": {"tf": 1}}, "df": 1}}}, "(": {"docs": {}, "df": 0, "t": {"docs": {"pyerrors.correlators.Corr.m_eff": {"tf": 1.4142135623730951}}, "df": 1, "+": {"1": {"docs": {"pyerrors.correlators.Corr.m_eff": {"tf": 1.7320508075688772}}, "df": 1}, "docs": {}, "df": 0}}}, "c": {"docs": {"pyerrors.input.bdio.read_ADerrors": {"tf": 1}, "pyerrors.input.bdio.write_ADerrors": {"tf": 1}, "pyerrors.input.bdio.read_mesons": {"tf": 1}, "pyerrors.input.bdio.read_dSdm": {"tf": 1}}, "df": 4}, "p": {"docs": {"pyerrors.input.bdio.read_ADerrors": {"tf": 1}, "pyerrors.input.bdio.write_ADerrors": {"tf": 1}, "pyerrors.input.bdio.read_mesons": {"tf": 1}, "pyerrors.input.bdio.read_dSdm": {"tf": 1}}, "df": 4}, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "s": {"docs": {"pyerrors.input.openQCD.extract_t0": {"tf": 1.4142135623730951}}, "df": 1}}}, "e": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {"pyerrors.npr.Npr_matrix": {"tf": 2.23606797749979}}, "df": 1}}}, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "c": {"docs": {"pyerrors.obs.Obs.gamma_method": {"tf": 1}}, "df": 1}}, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"pyerrors.obs.filter_zeroes": {"tf": 1}}, "df": 1}}}}}}}}, "u": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "m": {"docs": {"pyerrors.jackknifing.Jack.dump": {"tf": 1}, "pyerrors.obs.Obs.gamma_method": {"tf": 1}, "pyerrors.obs.Obs.dump": {"tf": 1}, "pyerrors.obs.dump_object": {"tf": 1}}, "df": 4}}}}, "r": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"pyerrors.obs.correlate": {"tf": 1}, "pyerrors.obs.covariance3": {"tf": 1}}, "df": 2}}}}}}, "_": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "u": {"docs": {"pyerrors.npr.Npr_matrix": {"tf": 1}}, "df": 1}}}}}}}}, "t": {"docs": {}, "df": 0, "y": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {"pyerrors.npr.Npr_matrix": {"tf": 1.7320508075688772}}, "df": 1}}}}}, "m": {"docs": {"pyerrors.correlators.Corr.m_eff": {"tf": 1.4142135623730951}, "pyerrors.fits.fit_lin": {"tf": 1.4142135623730951}, "pyerrors.npr.Npr_matrix.g5H": {"tf": 1}}, "df": 3, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "k": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "v": {"docs": {"pyerrors": {"tf": 1}}, "df": 1}}}, "q": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "d": {"docs": {"pyerrors.fits.least_squares": {"tf": 1}}, "df": 1}}}}}}, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "x": {"docs": {"pyerrors": {"tf": 1}, "pyerrors.correlators.Corr": {"tf": 1}, "pyerrors.dirac.Grid_gamma": {"tf": 1}, "pyerrors.fits.least_squares": {"tf": 1}, "pyerrors.fits.total_least_squares": {"tf": 1}, "pyerrors.fits.covariance_matrix": {"tf": 1}, "pyerrors.linalg.derived_array": {"tf": 1}, "pyerrors.linalg.matmul": {"tf": 1}, "pyerrors.linalg.scalar_mat_op": {"tf": 1.4142135623730951}, "pyerrors.linalg.eigh": {"tf": 1}, "pyerrors.linalg.eig": {"tf": 1}, "pyerrors.linalg.pinv": {"tf": 1}, "pyerrors.linalg.svd": {"tf": 1}, "pyerrors.linalg.slogdet": {"tf": 1}, "pyerrors.linalg.grad_eig": {"tf": 1}, "pyerrors.misc.gen_correlated_data": {"tf": 1}, "pyerrors.mpm.matrix_pencil_method": {"tf": 1.7320508075688772}, "pyerrors.mpm.matrix_pencil_method_old": {"tf": 1.4142135623730951}}, "df": 18}, "c": {"docs": {"pyerrors.correlators.Corr": {"tf": 1}, "pyerrors.input.hadrons.read_ExternalLeg_hd5": {"tf": 1.4142135623730951}, "pyerrors.input.hadrons.read_Bilinear_hd5": {"tf": 1.4142135623730951}, "pyerrors.linalg.matmul": {"tf": 1}, "pyerrors.linalg.inv": {"tf": 1}, "pyerrors.linalg.cholesky": {"tf": 1}, "pyerrors.npr.Npr_matrix.g5H": {"tf": 1}, "pyerrors.obs.covariance": {"tf": 1}, "pyerrors.obs.covariance2": {"tf": 1}, "pyerrors.obs.covariance3": {"tf": 1}}, "df": 10}}}, "h": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {"pyerrors.jackknifing.derived_jack": {"tf": 1}, "pyerrors.obs.Obs": {"tf": 1}, "pyerrors.obs.derived_observable": {"tf": 1}}, "df": 3}}}}}}, "s": {"docs": {}, "df": 0, "s": {"docs": {"pyerrors.correlators.Corr.m_eff": {"tf": 1.4142135623730951}}, "df": 1}}, "n": {"docs": {}, "df": 0, "i": {"docs": {"pyerrors.fits.least_squares": {"tf": 1}, "pyerrors.fits.total_least_squares": {"tf": 1}, "pyerrors.fits.fit_general": {"tf": 1}, "pyerrors.input.openQCD.extract_t0": {"tf": 1}, "pyerrors.npr.Npr_matrix": {"tf": 1}}, "df": 5}, "u": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {"pyerrors.input.bdio.read_mesons": {"tf": 1}, "pyerrors.linalg.derived_array": {"tf": 1}, "pyerrors.obs.derived_observable": {"tf": 1}}, "df": 3}}}, "_": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "d": {"docs": {"pyerrors.linalg.derived_array": {"tf": 1}, "pyerrors.obs.derived_observable": {"tf": 1}}, "df": 2}}}}}}, "x": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"pyerrors.fits.fit_general": {"tf": 1}, "pyerrors.obs.derived_observable": {"tf": 1}}, "df": 2}}}}}}}}}, "i": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "m": {"docs": {"pyerrors.obs.covariance": {"tf": 1}, "pyerrors.obs.covariance2": {"tf": 1}, "pyerrors.obs.covariance3": {"tf": 1}}, "df": 3}}}}}, "j": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {"pyerrors.npr.Npr_matrix": {"tf": 1.4142135623730951}}, "df": 1}}}, "c": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {"pyerrors.obs.Obs.is_zero": {"tf": 1}, "pyerrors.obs.CObs.is_zero": {"tf": 1}}, "df": 2}}}}, "k": {"docs": {}, "df": 0, "e": {"docs": {"pyerrors.obs.covariance": {"tf": 1}, "pyerrors.obs.covariance2": {"tf": 1}, "pyerrors.obs.covariance3": {"tf": 1}}, "df": 3}}}, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"pyerrors": {"tf": 1.4142135623730951}, "pyerrors.obs.Obs.__init__": {"tf": 1}, "pyerrors.obs.Obs.plot_history": {"tf": 1}}, "df": 3}}, "r": {"docs": {}, "df": 0, "e": {"docs": {"pyerrors.correlators.Corr": {"tf": 1}, "pyerrors.mpm.matrix_pencil_method": {"tf": 1.4142135623730951}, "pyerrors.npr.Npr_matrix": {"tf": 1}, "pyerrors.obs.Obs.plot_rep_dist": {"tf": 1}}, "df": 4}}, "d": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "l": {"docs": {"pyerrors.fits.total_least_squares": {"tf": 1}, "pyerrors.input.hadrons.read_meson_hd5": {"tf": 1.4142135623730951}, "pyerrors.npr.Npr_matrix": {"tf": 1}}, "df": 3}}, "e": {"docs": {"pyerrors.npr.Npr_matrix": {"tf": 1.7320508075688772}}, "df": 1}}, "o": {"docs": {}, "df": 0, "r": {"docs": {"pyerrors.linalg.pinv": {"tf": 1}}, "df": 1}}, "v": {"docs": {}, "df": 0, "e": {"docs": {"pyerrors.npr.Npr_matrix": {"tf": 1.7320508075688772}}, "df": 1}}, "m": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "a": {"docs": {"pyerrors.npr.Npr_matrix.g5H": {"tf": 1}}, "df": 1}}}}}}, "y": {"docs": {}, "df": 0, "_": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "b": {"docs": {"pyerrors": {"tf": 2.6457513110645907}}, "df": 1}}, "n": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "w": {"docs": {}, "df": 0, "_": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "b": {"docs": {"pyerrors": {"tf": 2.6457513110645907}}, "df": 1}}}}}}}}, "u": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "l": {"docs": {"pyerrors": {"tf": 1}, "pyerrors.fits.least_squares": {"tf": 1}, "pyerrors.fits.total_least_squares": {"tf": 1}, "pyerrors.linalg.matmul": {"tf": 1}}, "df": 4, "i": {"docs": {"pyerrors.correlators.Corr": {"tf": 1}, "pyerrors.linalg.matmul": {"tf": 1}}, "df": 2}}}, "d": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"pyerrors.npr.Npr_matrix": {"tf": 1}}, "df": 1}}}}}}}}}}}}}, "e": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "d": {"docs": {"pyerrors.correlators.Corr.gamma_method": {"tf": 1}, "pyerrors.correlators.Corr.plateau": {"tf": 1.4142135623730951}, "pyerrors.fits.Fit_result.gamma_method": {"tf": 1}, "pyerrors.fits.least_squares": {"tf": 2.23606797749979}, "pyerrors.mpm.matrix_pencil_method": {"tf": 1.4142135623730951}, "pyerrors.mpm.matrix_pencil_method_old": {"tf": 1}, "pyerrors.npr.Npr_matrix": {"tf": 2.23606797749979}, "pyerrors.obs.Obs": {"tf": 1}, "pyerrors.obs.Obs.is_zero_within_error": {"tf": 1}, "pyerrors.obs.covariance": {"tf": 1}, "pyerrors.obs.covariance2": {"tf": 1}, "pyerrors.obs.covariance3": {"tf": 1}}, "df": 12}}}}, "a": {"docs": {}, "df": 0, "n": {"docs": {"pyerrors.correlators.Corr.plateau": {"tf": 1}, "pyerrors.misc.gen_correlated_data": {"tf": 1.4142135623730951}, "pyerrors.obs.Obs.__init__": {"tf": 1.7320508075688772}, "pyerrors.obs.filter_zeroes": {"tf": 1}}, "df": 4}, "d": {"docs": {"pyerrors.fits.least_squares": {"tf": 1}}, "df": 1}}, "m": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "i": {"docs": {"pyerrors.fits.ks_test": {"tf": 1}, "pyerrors.npr.Npr_matrix": {"tf": 2.8284271247461903}}, "df": 2}}}}, "s": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"pyerrors.input.bdio.read_mesons": {"tf": 1}, "pyerrors.input.hadrons.read_meson_hd5": {"tf": 2.6457513110645907}}, "df": 2, "_": {"0": {"docs": {"pyerrors.input.hadrons.read_meson_hd5": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}}}}}, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "m": {"docs": {"pyerrors.fits.least_squares": {"tf": 1.4142135623730951}, "pyerrors.roots.find_root": {"tf": 1.4142135623730951}}, "df": 2}}, "d": {"docs": {"pyerrors.obs.correlate": {"tf": 1}}, "df": 1}}, "g": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "d": {"docs": {"pyerrors.fits.least_squares": {"tf": 1.4142135623730951}}, "df": 1}}}}}, "c": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "c": {"docs": {"pyerrors.input.bdio.read_ADerrors": {"tf": 1}}, "df": 1}}}, "s": {"1": {"docs": {"pyerrors.input.openQCD.read_rwms": {"tf": 1}}, "df": 1}, "docs": {"pyerrors.input.openQCD.extract_t0": {"tf": 1.7320508075688772}}, "df": 1}}, "d": {"docs": {"pyerrors.roots.find_root": {"tf": 1.7320508075688772}}, "df": 1, "a": {"docs": {}, "df": 0, "t": {"docs": {"pyerrors.input.openQCD.extract_t0": {"tf": 1.7320508075688772}}, "df": 1, "a": {"docs": {"pyerrors": {"tf": 1}, "pyerrors.correlators.Corr.fit": {"tf": 1.7320508075688772}, "pyerrors.fits.least_squares": {"tf": 1.4142135623730951}, "pyerrors.fits.total_least_squares": {"tf": 1}, "pyerrors.fits.residual_plot": {"tf": 1}, "pyerrors.input.bdio.read_ADerrors": {"tf": 1}, "pyerrors.input.bdio.read_mesons": {"tf": 1}, "pyerrors.input.bdio.read_dSdm": {"tf": 1}, "pyerrors.input.openQCD.extract_t0": {"tf": 1.4142135623730951}, "pyerrors.jackknifing.derived_jack": {"tf": 1}, "pyerrors.linalg.derived_array": {"tf": 1.4142135623730951}, "pyerrors.misc.gen_correlated_data": {"tf": 1.4142135623730951}, "pyerrors.mpm.matrix_pencil_method": {"tf": 1.4142135623730951}, "pyerrors.mpm.matrix_pencil_method_old": {"tf": 1.4142135623730951}, "pyerrors.npr.Npr_matrix": {"tf": 3.1622776601683795}, "pyerrors.obs.derived_observable": {"tf": 1}}, "df": 16, "s": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "t": {"docs": {"pyerrors.misc.gen_correlated_data": {"tf": 1}}, "df": 1}}}}}}, "e": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "l": {"docs": {"pyerrors": {"tf": 1.4142135623730951}, "pyerrors.correlators.Corr.fit": {"tf": 1}, "pyerrors.mpm.matrix_pencil_method_old": {"tf": 1}, "pyerrors.obs.Obs.details": {"tf": 1}}, "df": 4}}}, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {"pyerrors.correlators.Corr.show": {"tf": 1}, "pyerrors.fits.ks_test": {"tf": 1}, "pyerrors.input.openQCD.extract_t0": {"tf": 1}, "pyerrors.linalg.slogdet": {"tf": 1}, "pyerrors.obs.Obs.calc_gamma": {"tf": 1}, "pyerrors.obs.Obs.gamma_method": {"tf": 1}}, "df": 6}}}}}}, "l": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "a": {"docs": {"pyerrors.obs.Obs.expand_deltas": {"tf": 2}, "pyerrors.obs.Obs.calc_gamma": {"tf": 1.7320508075688772}, "pyerrors.obs.expand_deltas_for_merge": {"tf": 1.7320508075688772}, "pyerrors.obs.filter_zeroes": {"tf": 2}, "pyerrors.obs.reduce_deltas": {"tf": 2}}, "df": 5, "_": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "\\": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "_": {"docs": {}, "df": 0, "j": {"docs": {"pyerrors": {"tf": 1}}, "df": 1}}}}}}}}}}}}}, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "d": {"docs": {"pyerrors.correlators.Corr": {"tf": 1.4142135623730951}, "pyerrors.input.sfcf.read_sfcf": {"tf": 1}, "pyerrors.input.sfcf.read_sfcf_c": {"tf": 1}}, "df": 3}}}}, "d": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "c": {"docs": {"pyerrors.correlators.Corr": {"tf": 1}}, "df": 1}}}, "a": {"docs": {}, "df": 0, "l": {"docs": {"pyerrors.correlators.Corr": {"tf": 1}}, "df": 1}}, "f": {"docs": {"pyerrors.fits.least_squares": {"tf": 1.4142135623730951}, "pyerrors.fits.total_least_squares": {"tf": 1.4142135623730951}, "pyerrors.fits.fit_general": {"tf": 1}}, "df": 3, "i": {"docs": {}, "df": 0, "n": {"docs": {"pyerrors.correlators.Corr.reweight": {"tf": 1}, "pyerrors.obs.Obs.__init__": {"tf": 1}, "pyerrors.obs.Obs.expand_deltas": {"tf": 1.4142135623730951}, "pyerrors.obs.Obs.calc_gamma": {"tf": 1.4142135623730951}, "pyerrors.obs.expand_deltas_for_merge": {"tf": 2}, "pyerrors.obs.filter_zeroes": {"tf": 1}, "pyerrors.obs.reduce_deltas": {"tf": 1.4142135623730951}, "pyerrors.obs.reweight": {"tf": 1}}, "df": 8}}, "a": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "t": {"docs": {"pyerrors.correlators.Corr.deriv": {"tf": 1}, "pyerrors.fits.least_squares": {"tf": 2}, "pyerrors.fits.total_least_squares": {"tf": 1.4142135623730951}, "pyerrors.fits.fit_general": {"tf": 1}, "pyerrors.input.bdio.read_ADerrors": {"tf": 1}, "pyerrors.input.bdio.write_ADerrors": {"tf": 1}, "pyerrors.input.bdio.read_mesons": {"tf": 1.4142135623730951}, "pyerrors.input.bdio.read_dSdm": {"tf": 1.4142135623730951}, "pyerrors.input.hadrons.read_meson_hd5": {"tf": 1}, "pyerrors.input.hadrons.read_ExternalLeg_hd5": {"tf": 1}, "pyerrors.input.hadrons.read_Bilinear_hd5": {"tf": 1}, "pyerrors.input.openQCD.read_rwms": {"tf": 1}, "pyerrors.input.openQCD.extract_t0": {"tf": 1}, "pyerrors.input.sfcf.read_qtop": {"tf": 1}, "pyerrors.jackknifing.Jack.dump": {"tf": 1}, "pyerrors.mpm.matrix_pencil_method": {"tf": 1.4142135623730951}, "pyerrors.mpm.matrix_pencil_method_old": {"tf": 1}, "pyerrors.npr.Zq": {"tf": 1}, "pyerrors.obs.Obs": {"tf": 1.7320508075688772}, "pyerrors.obs.Obs.gamma_method": {"tf": 2}, "pyerrors.obs.Obs.dump": {"tf": 1}, "pyerrors.obs.derived_observable": {"tf": 1}, "pyerrors.obs.covariance": {"tf": 1}, "pyerrors.obs.covariance2": {"tf": 1}, "pyerrors.obs.covariance3": {"tf": 1}, "pyerrors.obs.dump_object": {"tf": 1}}, "df": 26}}}}}, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "v": {"docs": {"pyerrors.correlators.Corr.deriv": {"tf": 1}, "pyerrors.correlators.Corr.second_deriv": {"tf": 1}, "pyerrors.jackknifing.derived_jack": {"tf": 1}, "pyerrors.linalg.derived_array": {"tf": 1.4142135623730951}, "pyerrors.obs.Obs.plot_history": {"tf": 1}, "pyerrors.obs.derived_observable": {"tf": 1.7320508075688772}}, "df": 6, "e": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "_": {"docs": {}, "df": 0, "j": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "k": {"docs": {}, "df": 0, "(": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "a": {"docs": {"pyerrors.jackknifing.derived_jack": {"tf": 1}}, "df": 1}}}}}}}}}}}, "o": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "v": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "(": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "a": {"docs": {"pyerrors.obs.derived_observable": {"tf": 1}}, "df": 1}}}}}}}}}}}}}}}}}}}}}}}, "c": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "d": {"docs": {"pyerrors.correlators.Corr.deriv": {"tf": 1}, "pyerrors.correlators.Corr.fit": {"tf": 1}}, "df": 2}}, "a": {"docs": {}, "df": 0, "y": {"docs": {"pyerrors.input.openQCD.extract_t0": {"tf": 1.4142135623730951}}, "df": 1}}, "o": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {"pyerrors.linalg.cholesky": {"tf": 1}}, "df": 1}, "i": {"docs": {}, "df": 0, "t": {"docs": {"pyerrors.linalg.svd": {"tf": 1}, "pyerrors.mpm.matrix_pencil_method_old": {"tf": 1}}, "df": 2}}}}}}}, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "s": {"docs": {"pyerrors.mpm.matrix_pencil_method_old": {"tf": 1}}, "df": 1}}}}}, "s": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "b": {"docs": {"pyerrors.npr.Npr_matrix": {"tf": 1.4142135623730951}}, "df": 1}}}}}, "v": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {"pyerrors.obs.Obs.gamma_method": {"tf": 1}}, "df": 1}}}}}, "i": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"pyerrors.correlators.Corr.deriv": {"tf": 1}, "pyerrors.obs.Obs": {"tf": 1}, "pyerrors.obs.Obs.gamma_method": {"tf": 1.4142135623730951}}, "df": 3, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {"pyerrors": {"tf": 1}, "pyerrors.fits.least_squares": {"tf": 1}, "pyerrors.fits.total_least_squares": {"tf": 1}, "pyerrors.fits.fit_general": {"tf": 1}, "pyerrors.jackknifing.derived_jack": {"tf": 1}, "pyerrors.linalg.derived_array": {"tf": 1.4142135623730951}, "pyerrors.obs.derived_observable": {"tf": 1.7320508075688772}}, "df": 7}}}}}}}}, "s": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "y": {"docs": {"pyerrors.fits.least_squares": {"tf": 1}, "pyerrors.fits.residual_plot": {"tf": 1}}, "df": 2}}}}, "t": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {"pyerrors.fits.total_least_squares": {"tf": 1}}, "df": 1}}}, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "t": {"docs": {"pyerrors.fits.qqplot": {"tf": 1}, "pyerrors.obs.Obs.plot_rep_dist": {"tf": 1}}, "df": 2}}}}}}, "c": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "m": {"docs": {"pyerrors.fits.ks_test": {"tf": 1}}, "df": 1}}}}, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "d": {"docs": {"pyerrors.mpm.matrix_pencil_method_old": {"tf": 1}}, "df": 1}}}}}, "c": {"docs": {}, "df": 0, "t": {"docs": {"pyerrors.npr.Npr_matrix": {"tf": 1}, "pyerrors.obs.Obs": {"tf": 1.4142135623730951}, "pyerrors.obs.filter_zeroes": {"tf": 1.4142135623730951}}, "df": 3, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "i": {"docs": {"pyerrors.input.bdio.read_mesons": {"tf": 1.4142135623730951}, "pyerrors.input.bdio.read_dSdm": {"tf": 1.4142135623730951}, "pyerrors.npr.Npr_matrix": {"tf": 1}, "pyerrors.obs.Obs": {"tf": 1.4142135623730951}, "pyerrors.obs.Obs.plot_piechart": {"tf": 1}}, "df": 5}}}}}}}}, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "i": {"docs": {"pyerrors.input.hadrons.read_meson_hd5": {"tf": 1}}, "df": 1}}}}}}}, "m": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "s": {"docs": {"pyerrors.npr.Npr_matrix": {"tf": 1}}, "df": 1}}}}, "v": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {"pyerrors.obs.expand_deltas_for_merge": {"tf": 1}}, "df": 1}}}}}}, "t": {"docs": {"pyerrors.correlators.Corr.roll": {"tf": 1.4142135623730951}}, "df": 1, "r": {"docs": {}, "df": 0, "_": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "d": {"docs": {"pyerrors.input.openQCD.extract_t0": {"tf": 1}}, "df": 1}}}}, "c": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "g": {"docs": {"pyerrors.input.openQCD.extract_t0": {"tf": 1}}, "df": 1}}}}, "m": {"docs": {"pyerrors.input.openQCD.extract_t0": {"tf": 1}}, "df": 1}}}, "y": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {"pyerrors.npr.Npr_matrix": {"tf": 2.6457513110645907}}, "df": 1, "=": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {"pyerrors.npr.Npr_matrix": {"tf": 1}}, "df": 1}}}}}}}}}}, "u": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "p": {"docs": {"pyerrors.correlators.Corr.dump": {"tf": 1}, "pyerrors.jackknifing.Jack.dump": {"tf": 1}, "pyerrors.obs.Obs.dump": {"tf": 1}, "pyerrors.obs.dump_object": {"tf": 1}}, "df": 4}}}, "v": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "u": {"docs": {"pyerrors.fits.total_least_squares": {"tf": 1}, "pyerrors.fits.fit_lin": {"tf": 1.4142135623730951}, "pyerrors.fits.fit_general": {"tf": 1.4142135623730951}, "pyerrors.obs.covariance": {"tf": 1.7320508075688772}, "pyerrors.obs.covariance2": {"tf": 1.7320508075688772}, "pyerrors.obs.covariance3": {"tf": 1.7320508075688772}, "pyerrors.obs.pseudo_Obs": {"tf": 1}}, "df": 7}}}}, "s": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "m": {"docs": {"pyerrors.input.bdio.read_dSdm": {"tf": 1}}, "df": 1}}}, "o": {"docs": {}, "df": 0, "w": {"docs": {}, "df": 0, "n": {"docs": {"pyerrors.obs.Obs.gamma_method": {"tf": 1}}, "df": 1}}}}, "g": {"docs": {"pyerrors.correlators.Corr.m_eff": {"tf": 1}, "pyerrors.correlators.Corr.show": {"tf": 1}, "pyerrors.fits.least_squares": {"tf": 1}, "pyerrors.input.openQCD.read_rwms": {"tf": 1}, "pyerrors.jackknifing.derived_jack": {"tf": 1.4142135623730951}, "pyerrors.linalg.derived_array": {"tf": 1}, "pyerrors.npr.Npr_matrix": {"tf": 1.4142135623730951}, "pyerrors.obs.derived_observable": {"tf": 1.4142135623730951}, "pyerrors.obs.reweight": {"tf": 1}}, "df": 9, "e": {"docs": {}, "df": 0, "t": {"docs": {"pyerrors": {"tf": 1}}, "df": 1}, "v": {"docs": {}, "df": 0, "p": {"docs": {"pyerrors.correlators.Corr": {"tf": 1}}, "df": 1}}, "n": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"pyerrors.fits.least_squares": {"tf": 1.4142135623730951}, "pyerrors.fits.qqplot": {"tf": 1}, "pyerrors.fits.residual_plot": {"tf": 1}, "pyerrors.input.bdio.read_ADerrors": {"tf": 1}, "pyerrors.linalg.grad_eig": {"tf": 1}, "pyerrors.misc.gen_correlated_data": {"tf": 1.4142135623730951}, "pyerrors.npr.Npr_matrix": {"tf": 1.4142135623730951}, "pyerrors.obs.Obs": {"tf": 1}, "pyerrors.obs.pseudo_Obs": {"tf": 1}}, "df": 9}, "a": {"docs": {}, "df": 0, "t": {"docs": {"pyerrors.misc.gen_correlated_data": {"tf": 1.4142135623730951}}, "df": 1}}}}}, "a": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "a": {"docs": {"pyerrors.correlators.Corr.gamma_method": {"tf": 1}, "pyerrors.dirac.Grid_gamma": {"tf": 1}, "pyerrors.fits.Fit_result.gamma_method": {"tf": 1}, "pyerrors.fits.least_squares": {"tf": 1}, "pyerrors.obs.Obs.is_zero_within_error": {"tf": 1}, "pyerrors.obs.covariance": {"tf": 1}, "pyerrors.obs.covariance2": {"tf": 1}, "pyerrors.obs.covariance3": {"tf": 1}}, "df": 8, "_": {"5": {"docs": {"pyerrors.npr.Npr_matrix.g5H": {"tf": 1.7320508075688772}}, "df": 1}, "docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "d": {"docs": {"pyerrors": {"tf": 1.7320508075688772}, "pyerrors.obs.Obs": {"tf": 1}, "pyerrors.obs.CObs.gamma_method": {"tf": 1}}, "df": 3}}}}}}, "{": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "a": {"docs": {"pyerrors.obs.Obs.calc_gamma": {"tf": 1}}, "df": 1}}}}}}}, "u": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "n": {"docs": {"pyerrors.fits.qqplot": {"tf": 1}}, "df": 1}}}}}}, "r": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "g": {"docs": {"pyerrors.npr.Npr_matrix": {"tf": 1}}, "df": 1}}}}}, "u": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "s": {"docs": {"pyerrors.correlators.Corr.m_eff": {"tf": 1.4142135623730951}, "pyerrors.fits.least_squares": {"tf": 1}, "pyerrors.fits.total_least_squares": {"tf": 1}, "pyerrors.fits.fit_general": {"tf": 1}, "pyerrors.roots.find_root": {"tf": 1.4142135623730951}}, "df": 5}}}}, "i": {"docs": {}, "df": 0, "v": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {"pyerrors.correlators.Corr.plateau": {"tf": 1}, "pyerrors.fits.error_band": {"tf": 1}, "pyerrors.fits.ks_test": {"tf": 1}, "pyerrors.input.bdio.read_mesons": {"tf": 1}, "pyerrors.input.bdio.read_dSdm": {"tf": 1}, "pyerrors.input.misc.read_pbp": {"tf": 1}, "pyerrors.input.openQCD.read_rwms": {"tf": 1}, "pyerrors.input.openQCD.extract_t0": {"tf": 1}, "pyerrors.input.sfcf.read_sfcf": {"tf": 1}, "pyerrors.input.sfcf.read_sfcf_c": {"tf": 1}, "pyerrors.input.sfcf.read_qtop": {"tf": 1}, "pyerrors.linalg.scalar_mat_op": {"tf": 1}, "pyerrors.linalg.eigh": {"tf": 1}, "pyerrors.linalg.eig": {"tf": 1}, "pyerrors.misc.gen_correlated_data": {"tf": 1}, "pyerrors.mpm.matrix_pencil_method_old": {"tf": 1}, "pyerrors.npr.Npr_matrix": {"tf": 1}, "pyerrors.obs.Obs": {"tf": 1.4142135623730951}, "pyerrors.obs.pseudo_Obs": {"tf": 1}}, "df": 19}}}}, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "d": {"docs": {"pyerrors.dirac.Grid_gamma": {"tf": 1}}, "df": 1}}, "a": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"pyerrors.linalg.grad_eig": {"tf": 1}}, "df": 1}}}}}}}, "c": {"docs": {}, "df": 0, "c": {"docs": {"pyerrors.input.bdio.read_ADerrors": {"tf": 1}, "pyerrors.input.bdio.write_ADerrors": {"tf": 1}, "pyerrors.input.bdio.read_mesons": {"tf": 1}, "pyerrors.input.bdio.read_dSdm": {"tf": 1}}, "df": 4}}, "t": {"docs": {"pyerrors.npr.Npr_matrix": {"tf": 2.6457513110645907}, "pyerrors.obs.covariance": {"tf": 1}, "pyerrors.obs.covariance2": {"tf": 1}, "pyerrors.obs.covariance3": {"tf": 1}}, "df": 4}, "o": {"docs": {"pyerrors.npr.Npr_matrix.g5H": {"tf": 1}}, "df": 1}}, "s": {"docs": {"pyerrors.obs.Obs": {"tf": 1.4142135623730951}, "pyerrors.obs.Obs.gamma_method": {"tf": 1.4142135623730951}}, "df": 2, "t": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {"pyerrors": {"tf": 1}}, "df": 1}}, "n": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "d": {"docs": {"pyerrors.correlators.Corr.m_eff": {"tf": 1}, "pyerrors.correlators.Corr.fit": {"tf": 1}, "pyerrors.input.hadrons.read_meson_hd5": {"tf": 1}, "pyerrors.linalg.matmul": {"tf": 1}, "pyerrors.obs.Obs": {"tf": 2.23606797749979}, "pyerrors.obs.Obs.gamma_method": {"tf": 1}, "pyerrors.obs.Obs.is_zero_within_error": {"tf": 1}, "pyerrors.obs.pseudo_Obs": {"tf": 1}}, "df": 8}}}}}, "t": {"docs": {}, "df": 0, "e": {"docs": {"pyerrors.mpm.matrix_pencil_method": {"tf": 1}}, "df": 1}}}, "r": {"docs": {"pyerrors.correlators.Corr.m_eff": {"tf": 1}, "pyerrors.correlators.Corr.plateau": {"tf": 1}, "pyerrors.correlators.Corr.show": {"tf": 1.4142135623730951}, "pyerrors.correlators.Corr.dump": {"tf": 1}, "pyerrors.input.hadrons.read_meson_hd5": {"tf": 2.23606797749979}, "pyerrors.input.sfcf.read_sfcf_c": {"tf": 1}}, "df": 6, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"pyerrors.fits.least_squares": {"tf": 1}, "pyerrors.obs.Obs.__init__": {"tf": 1}}, "df": 2}}, "d": {"docs": {}, "df": 0, "e": {"docs": {"pyerrors.npr.Npr_matrix": {"tf": 2}}, "df": 1, "s": {"docs": {}, "df": 0, "=": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"pyerrors.npr.Npr_matrix": {"tf": 1}}, "df": 1}}}}}}}}, "u": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "r": {"docs": {"pyerrors.input.hadrons.read_meson_hd5": {"tf": 1}, "pyerrors.input.misc.read_pbp": {"tf": 1}, "pyerrors.input.openQCD.read_rwms": {"tf": 1}, "pyerrors.input.sfcf.read_sfcf": {"tf": 1}, "pyerrors.input.sfcf.read_sfcf_c": {"tf": 1}, "pyerrors.input.sfcf.read_qtop": {"tf": 1}}, "df": 6}}}}}}, "e": {"docs": {}, "df": 0, "p": {"docs": {"pyerrors.npr.Npr_matrix": {"tf": 1}, "pyerrors.obs.expand_deltas_for_merge": {"tf": 1.4142135623730951}}, "df": 2, "_": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"pyerrors.fits.fit_general": {"tf": 1}, "pyerrors.obs.derived_observable": {"tf": 1}}, "df": 2}}}}}}, "s": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "z": {"docs": {"pyerrors.obs.Obs.calc_gamma": {"tf": 1}}, "df": 1}}}}}, "o": {"docs": {}, "df": 0, "p": {"docs": {"pyerrors.input.bdio.read_mesons": {"tf": 1.4142135623730951}, "pyerrors.input.bdio.read_dSdm": {"tf": 1.4142135623730951}}, "df": 2}, "r": {"docs": {}, "df": 0, "e": {"docs": {"pyerrors.npr.Npr_matrix": {"tf": 1.4142135623730951}}, "df": 1}}}, "d": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "t": {"docs": {"pyerrors.mpm.matrix_pencil_method_old": {"tf": 1}}, "df": 1}}}}, "y": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"pyerrors.npr.Npr_matrix": {"tf": 1.4142135623730951}}, "df": 1}}}}, "a": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "l": {"docs": {"pyerrors": {"tf": 1.7320508075688772}, "pyerrors.fits.error_band": {"tf": 1}, "pyerrors.misc.gen_correlated_data": {"tf": 1.4142135623730951}, "pyerrors.obs.Obs": {"tf": 1}, "pyerrors.obs.Obs.__init__": {"tf": 2.23606797749979}, "pyerrors.obs.pseudo_Obs": {"tf": 1}}, "df": 6}}, "e": {"docs": {"pyerrors.correlators.Corr": {"tf": 1}, "pyerrors.obs.Obs": {"tf": 1}, "pyerrors.obs.merge_obs": {"tf": 1}}, "df": 3}}, "v": {"docs": {}, "df": 0, "e": {"docs": {"pyerrors.correlators.Corr.show": {"tf": 1.4142135623730951}}, "df": 1}}, "r": {"docs": {}, "df": 0, "k": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {"pyerrors.mpm.matrix_pencil_method": {"tf": 1}}, "df": 1}}}}}, "e": {"docs": {}, "df": 0, "q": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {"pyerrors.correlators.Corr": {"tf": 1}}, "df": 1}}}}}, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "d": {"docs": {"pyerrors.correlators.Corr.second_deriv": {"tf": 1}, "pyerrors.input.sfcf.read_sfcf_c": {"tf": 1}, "pyerrors.npr.Npr_matrix": {"tf": 1}, "pyerrors.obs.correlate": {"tf": 1}}, "df": 4}}}, "t": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {"pyerrors.input.sfcf.read_qtop": {"tf": 1}}, "df": 1}}, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"pyerrors.npr.Npr_matrix": {"tf": 1.4142135623730951}}, "df": 1}}}}}, "e": {"docs": {"pyerrors.correlators.Corr.m_eff": {"tf": 1}, "pyerrors.correlators.Corr.fit": {"tf": 1}, "pyerrors.npr.Npr_matrix": {"tf": 2.23606797749979}}, "df": 3}, "l": {"docs": {}, "df": 0, "f": {"docs": {"pyerrors.correlators.Corr.fit": {"tf": 1}}, "df": 1}}, "t": {"docs": {"pyerrors.correlators.Corr.set_prange": {"tf": 1}, "pyerrors.correlators.Corr.show": {"tf": 1}}, "df": 2}, "v": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"pyerrors.mpm.matrix_pencil_method": {"tf": 1}}, "df": 1}}}, "m": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {"pyerrors.obs.covariance": {"tf": 1}, "pyerrors.obs.covariance2": {"tf": 1}, "pyerrors.obs.covariance3": {"tf": 1}}, "df": 3}}}}}}}}}}, "i": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "l": {"docs": {"pyerrors.correlators.Corr.deriv": {"tf": 1}, "pyerrors.jackknifing.derived_jack": {"tf": 1}, "pyerrors.obs.derived_observable": {"tf": 1}}, "df": 3, "i": {"docs": {"pyerrors.correlators.Corr": {"tf": 1}}, "df": 1}}}, "i": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {"pyerrors.input.hadrons.read_meson_hd5": {"tf": 1}}, "df": 1}}}}}, "n": {"docs": {}, "df": 0, "h": {"docs": {"pyerrors.correlators.Corr.m_eff": {"tf": 1}}, "df": 1, "(": {"docs": {}, "df": 0, "m": {"docs": {"pyerrors.correlators.Corr.m_eff": {"tf": 1.4142135623730951}}, "df": 1}, "x": {"docs": {"pyerrors.fits.least_squares": {"tf": 1}, "pyerrors.fits.total_least_squares": {"tf": 1}, "pyerrors.fits.fit_general": {"tf": 1}}, "df": 3}}}, "g": {"docs": {}, "df": 0, "l": {"docs": {"pyerrors.input.sfcf.read_sfcf": {"tf": 1.4142135623730951}, "pyerrors.mpm.matrix_pencil_method": {"tf": 1}}, "df": 2}, "u": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {"pyerrors.linalg.svd": {"tf": 1}, "pyerrors.mpm.matrix_pencil_method_old": {"tf": 1.7320508075688772}}, "df": 2}}}}}}, "l": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"pyerrors.correlators.Corr.fit": {"tf": 1}, "pyerrors.fits.least_squares": {"tf": 1}, "pyerrors.fits.total_least_squares": {"tf": 1}, "pyerrors.fits.fit_general": {"tf": 1}}, "df": 4}}}}, "z": {"docs": {}, "df": 0, "e": {"docs": {"pyerrors.npr.Npr_matrix": {"tf": 2}, "pyerrors.obs.expand_deltas_for_merge": {"tf": 1.4142135623730951}}, "df": 2}}, "g": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "a": {"docs": {"pyerrors.obs.Obs.is_zero_within_error": {"tf": 1}}, "df": 1}}}}, "m": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {"pyerrors.correlators.Corr": {"tf": 1.4142135623730951}}, "df": 1}}}, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {"pyerrors.obs.expand_deltas_for_merge": {"tf": 1}}, "df": 1}}, "r": {"docs": {"pyerrors.obs.filter_zeroes": {"tf": 1}}, "df": 1}}}}}}, "p": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {"pyerrors.correlators.Corr": {"tf": 1}}, "df": 1}}}}, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {"pyerrors.input.openQCD.extract_t0": {"tf": 1}}, "df": 1, "_": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "x": {"docs": {}, "df": 0, "t": {"docs": {"pyerrors.input.openQCD.extract_t0": {"tf": 1}}, "df": 1}}}}}}}}}, "e": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "i": {"docs": {"pyerrors.correlators.Corr.fit": {"tf": 1}, "pyerrors.fits.least_squares": {"tf": 1}, "pyerrors.input.sfcf.read_qtop": {"tf": 1}, "pyerrors.jackknifing.Jack.dump": {"tf": 1}, "pyerrors.obs.Obs.gamma_method": {"tf": 1}, "pyerrors.obs.Obs.dump": {"tf": 1}, "pyerrors.obs.dump_object": {"tf": 1}}, "df": 7}}}}}}, "u": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {"pyerrors.correlators.Corr": {"tf": 1}, "pyerrors.linalg.matmul": {"tf": 1}}, "df": 2}}}, "l": {"docs": {}, "df": 0, "i": {"docs": {"pyerrors.linalg.derived_array": {"tf": 1.4142135623730951}, "pyerrors.obs.derived_observable": {"tf": 1.4142135623730951}}, "df": 2}}, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "s": {"docs": {"pyerrors.mpm.matrix_pencil_method": {"tf": 1}}, "df": 1}}}}}, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "t": {"docs": {"pyerrors.correlators.Corr.reweight": {"tf": 1}, "pyerrors.obs.reweight": {"tf": 1}}, "df": 2}}}}}}, "b": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "q": {"docs": {}, "df": 0, "u": {"docs": {"pyerrors.fits.least_squares": {"tf": 1}}, "df": 1}}, "t": {"docs": {"pyerrors.obs.expand_deltas_for_merge": {"tf": 1}, "pyerrors.obs.filter_zeroes": {"tf": 1}, "pyerrors.obs.reduce_deltas": {"tf": 1}}, "df": 3}}}, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {"pyerrors.obs.Obs.__init__": {"tf": 1}}, "df": 1}}}}}}, "f": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "i": {"docs": {"pyerrors.input.openQCD.extract_t0": {"tf": 1.4142135623730951}}, "df": 1}}}}}, "m": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {"pyerrors.obs.Obs.calc_gamma": {"tf": 1}}, "df": 1}}}}, "c": {"docs": {}, "df": 0, "h": {"docs": {"pyerrors.obs.filter_zeroes": {"tf": 1}}, "df": 1}}, "r": {"docs": {}, "df": 0, "e": {"docs": {"pyerrors.obs.covariance": {"tf": 1}, "pyerrors.obs.covariance2": {"tf": 1}, "pyerrors.obs.covariance3": {"tf": 1}}, "df": 3}}}, "y": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {"pyerrors.correlators.Corr.symmetric": {"tf": 1}, "pyerrors.correlators.Corr.anti_symmetric": {"tf": 1}, "pyerrors.correlators.Corr.deriv": {"tf": 1}}, "df": 3, "i": {"docs": {"pyerrors.correlators.Corr.T_symmetry": {"tf": 1.4142135623730951}}, "df": 1}}}, "r": {"docs": {}, "df": 0, "t": {"docs": {"pyerrors.correlators.Corr.deriv": {"tf": 1}}, "df": 1}}}}}}, "h": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "t": {"docs": {"pyerrors.correlators.Corr.roll": {"tf": 1}}, "df": 1}}}, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {"pyerrors.input.bdio.read_ADerrors": {"tf": 1.7320508075688772}, "pyerrors.input.bdio.write_ADerrors": {"tf": 1.7320508075688772}, "pyerrors.input.bdio.read_mesons": {"tf": 1.7320508075688772}, "pyerrors.input.bdio.read_dSdm": {"tf": 1.7320508075688772}}, "df": 4}}, "p": {"docs": {}, "df": 0, "e": {"docs": {"pyerrors.npr.Npr_matrix": {"tf": 2.449489742783178}, "pyerrors.obs.Obs.expand_deltas": {"tf": 1}, "pyerrors.obs.Obs.calc_gamma": {"tf": 1}, "pyerrors.obs.expand_deltas_for_merge": {"tf": 1}}, "df": 4}}}, "o": {"docs": {}, "df": 0, "w": {"docs": {"pyerrors.obs.Obs.plot_piechart": {"tf": 1}}, "df": 1}}}, "o": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "v": {"docs": {"pyerrors.correlators.Corr.m_eff": {"tf": 1.4142135623730951}}, "df": 1}}, "n": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, ",": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {"pyerrors.input.bdio.read_ADerrors": {"tf": 1}, "pyerrors.input.bdio.write_ADerrors": {"tf": 1}, "pyerrors.input.bdio.read_mesons": {"tf": 1}, "pyerrors.input.bdio.read_dSdm": {"tf": 1}}, "df": 4}}}}}}}}}}}}, "u": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "c": {"docs": {"pyerrors.input.sfcf.read_sfcf_c": {"tf": 1}}, "df": 1, "e": {"docs": {}, "df": 0, "_": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {"pyerrors.input.bdio.read_mesons": {"tf": 1}}, "df": 1}}}}}}}}}}, "m": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "h": {"docs": {"pyerrors.npr.Npr_matrix": {"tf": 1}}, "df": 1}}}}}, "c": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "i": {"docs": {"pyerrors.fits.least_squares": {"tf": 1}, "pyerrors.fits.total_least_squares": {"tf": 1}}, "df": 2}}}, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {"pyerrors.linalg.scalar_mat_op": {"tf": 1}}, "df": 1}}}}}, "k": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "p": {"docs": {"pyerrors.input.openQCD.extract_t0": {"tf": 1}, "pyerrors.npr.Npr_matrix": {"tf": 1}}, "df": 2}}}, "f": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "f": {"docs": {"pyerrors.input.sfcf.read_sfcf": {"tf": 1}, "pyerrors.input.sfcf.read_sfcf_c": {"tf": 1.4142135623730951}}, "df": 2}}}, "l": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "t": {"docs": {"pyerrors.linalg.slogdet": {"tf": 1}}, "df": 1}}}}, "w": {"docs": {"pyerrors.obs.Obs.gamma_method": {"tf": 1}}, "df": 1}}}, "q": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {"pyerrors.linalg.grad_eig": {"tf": 1}}, "df": 1}}}}, "_": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {"pyerrors.obs.Obs": {"tf": 1}}, "df": 1}}}}}}, "d": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {"pyerrors.obs.Obs": {"tf": 1}}, "df": 1}}}}}}, "i": {"docs": {}, "df": 0, "m": {"docs": {"pyerrors.input.sfcf.read_sfcf": {"tf": 1}, "pyerrors.input.sfcf.read_sfcf_c": {"tf": 1}}, "df": 2, "p": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {"pyerrors": {"tf": 2.23606797749979}, "pyerrors.fits.least_squares": {"tf": 1.4142135623730951}, "pyerrors.fits.total_least_squares": {"tf": 1}, "pyerrors.jackknifing.derived_jack": {"tf": 1}, "pyerrors.linalg.derived_array": {"tf": 1}, "pyerrors.obs.derived_observable": {"tf": 1}}, "df": 6}}}, "l": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"pyerrors.mpm.matrix_pencil_method": {"tf": 1}, "pyerrors.obs.covariance2": {"tf": 1}, "pyerrors.obs.covariance3": {"tf": 1}}, "df": 3}}}}, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"pyerrors.mpm.matrix_pencil_method_old": {"tf": 1}}, "df": 1}}}}, "i": {"docs": {"pyerrors.npr.Npr_matrix": {"tf": 1}}, "df": 1}}}, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {"pyerrors.fits.least_squares": {"tf": 1}}, "df": 1}}}}}, "a": {"docs": {}, "df": 0, "g": {"docs": {"pyerrors.npr.Npr_matrix": {"tf": 1}}, "df": 1, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "i": {"docs": {"pyerrors.input.sfcf.read_sfcf": {"tf": 1}, "pyerrors.input.sfcf.read_sfcf_c": {"tf": 1}, "pyerrors.npr.Npr_matrix": {"tf": 1}, "pyerrors.obs.CObs.gamma_method": {"tf": 1}, "pyerrors.obs.CObs.is_zero": {"tf": 1}}, "df": 5}}}}}}}}, "r": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {"pyerrors": {"tf": 1}}, "df": 1}}}}}}}}, "n": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "t": {"docs": {"pyerrors": {"tf": 1}, "pyerrors.fits.least_squares": {"tf": 1.4142135623730951}, "pyerrors.fits.total_least_squares": {"tf": 1.4142135623730951}, "pyerrors.fits.fit_general": {"tf": 1}, "pyerrors.input.hadrons.read_meson_hd5": {"tf": 1}, "pyerrors.input.openQCD.extract_t0": {"tf": 1}, "pyerrors.input.sfcf.read_sfcf_c": {"tf": 1}}, "df": 7}}}, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "v": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "i": {"docs": {"pyerrors.correlators.Corr": {"tf": 1}}, "df": 1}}}}}}, "l": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "d": {"docs": {"pyerrors.input.openQCD.extract_t0": {"tf": 1}}, "df": 1}}}, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "s": {"docs": {"pyerrors.mpm.matrix_pencil_method_old": {"tf": 1}}, "df": 1}}}}}, "d": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "x": {"docs": {"pyerrors.correlators.Corr.plottable": {"tf": 1}, "pyerrors.input.hadrons.read_ExternalLeg_hd5": {"tf": 1.4142135623730951}, "pyerrors.input.hadrons.read_Bilinear_hd5": {"tf": 1.4142135623730951}}, "df": 3}}, "i": {"docs": {}, "df": 0, "c": {"docs": {"pyerrors.correlators.Corr.plateau": {"tf": 1}, "pyerrors.fits.Fit_result": {"tf": 1}}, "df": 2}, "v": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "u": {"docs": {"pyerrors.fits.Fit_result": {"tf": 1}, "pyerrors.fits.ks_test": {"tf": 1}}, "df": 2}}}, "d": {"docs": {}, "df": 0, "u": {"docs": {"pyerrors.obs.Obs.__init__": {"tf": 1}}, "df": 1}}}}}, "t": {"1": {"6": {"docs": {"pyerrors.npr.Npr_matrix": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}, "docs": {"pyerrors.correlators.Corr.roll": {"tf": 1}, "pyerrors.correlators.Corr.T_symmetry": {"tf": 1}, "pyerrors.npr.Npr_matrix": {"tf": 3.1622776601683795}}, "df": 3, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "n": {"docs": {"pyerrors.input.hadrons.read_meson_hd5": {"tf": 1}, "pyerrors.input.hadrons.read_ExternalLeg_hd5": {"tf": 1}, "pyerrors.input.hadrons.read_Bilinear_hd5": {"tf": 1}}, "df": 3}, "c": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "t": {"docs": {"pyerrors.linalg.derived_array": {"tf": 1}, "pyerrors.obs.derived_observable": {"tf": 1}}, "df": 2}}}}, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "t": {"docs": {"pyerrors.npr.Npr_matrix": {"tf": 1.4142135623730951}}, "df": 1}}}}, "f": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "c": {"docs": {"pyerrors.npr.Npr_matrix": {"tf": 1.4142135623730951}}, "df": 1}}}, "a": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {"pyerrors.npr.Npr_matrix": {"tf": 1}}, "df": 1}}}}, "g": {"docs": {"pyerrors.npr.Npr_matrix": {"tf": 1}}, "df": 1, "r": {"docs": {"pyerrors.obs.Obs.plot_tauint": {"tf": 1}, "pyerrors.obs.covariance3": {"tf": 1}}, "df": 2}}}, "_": {"docs": {"pyerrors.npr.Npr_matrix": {"tf": 1}}, "df": 1}}, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {"pyerrors.fits.least_squares": {"tf": 1}, "pyerrors.fits.total_least_squares": {"tf": 1}, "pyerrors.fits.fit_general": {"tf": 1}, "pyerrors.npr.Npr_matrix": {"tf": 1}, "pyerrors.obs.Obs": {"tf": 1}, "pyerrors.obs.Obs.__init__": {"tf": 1}, "pyerrors.roots.find_root": {"tf": 1}}, "df": 7, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "_": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "s": {"docs": {"pyerrors.fits.least_squares": {"tf": 1}, "pyerrors.fits.total_least_squares": {"tf": 1}, "pyerrors.fits.fit_general": {"tf": 1}}, "df": 3}}}}}}}}}}}, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "d": {"docs": {"pyerrors.input.openQCD.extract_t0": {"tf": 1}, "pyerrors.input.sfcf.read_sfcf": {"tf": 1}, "pyerrors.input.sfcf.read_sfcf_c": {"tf": 1}, "pyerrors.input.sfcf.read_qtop": {"tf": 1}, "pyerrors.obs.derived_observable": {"tf": 1}, "pyerrors.obs.covariance": {"tf": 1}, "pyerrors.obs.covariance2": {"tf": 1}, "pyerrors.obs.covariance3": {"tf": 1}}, "df": 8}}}, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {"pyerrors.npr.Npr_matrix": {"tf": 1}}, "df": 1}}, "c": {"docs": {"pyerrors.obs.Obs": {"tf": 2}}, "df": 1}}}}}, "v": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "s": {"docs": {"pyerrors.linalg.inv": {"tf": 1}}, "df": 1}, "t": {"docs": {"pyerrors.npr.inv_propagator": {"tf": 1}, "pyerrors.npr.Zq": {"tf": 1}}, "df": 2}}}, "_": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "p": {"docs": {"pyerrors.npr.Zq": {"tf": 1}}, "df": 1}}}}}}, "f": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "m": {"docs": {"pyerrors.npr.Npr_matrix": {"tf": 1.4142135623730951}}, "df": 1}}}}}, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"pyerrors.correlators.Corr": {"tf": 1}, "pyerrors.npr.Npr_matrix": {"tf": 1.4142135623730951}}, "df": 2}, "m": {"docs": {"pyerrors.npr.Npr_matrix": {"tf": 1}}, "df": 1, "s": {"docs": {"pyerrors.npr.Npr_matrix": {"tf": 1.7320508075688772}}, "df": 1}}}}, "d": {"docs": {"pyerrors.input.sfcf.read_sfcf_c": {"tf": 1.4142135623730951}}, "df": 1, "l": {"docs": {"pyerrors.correlators.Corr.reweight": {"tf": 1.7320508075688772}, "pyerrors.obs.Obs.__init__": {"tf": 1}, "pyerrors.obs.merge_idx": {"tf": 1.4142135623730951}, "pyerrors.obs.filter_zeroes": {"tf": 1}, "pyerrors.obs.reweight": {"tf": 1.7320508075688772}}, "df": 5}, "x": {"docs": {"pyerrors.obs.Obs.expand_deltas": {"tf": 2}, "pyerrors.obs.Obs.calc_gamma": {"tf": 2}, "pyerrors.obs.expand_deltas_for_merge": {"tf": 2}, "pyerrors.obs.filter_zeroes": {"tf": 1}}, "df": 4, "_": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "d": {"docs": {"pyerrors.obs.reduce_deltas": {"tf": 1.7320508075688772}}, "df": 1}}}, "n": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "w": {"docs": {"pyerrors.obs.reduce_deltas": {"tf": 1.4142135623730951}}, "df": 1}}}}}, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"pyerrors.obs.correlate": {"tf": 1}, "pyerrors.obs.covariance3": {"tf": 1}}, "df": 2}}}}, "e": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "e": {"docs": {"pyerrors.mpm.matrix_pencil_method": {"tf": 1}}, "df": 1}}}, "l": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {"pyerrors.npr.Npr_matrix": {"tf": 1}}, "df": 1}}}}}}}, "n": {"docs": {"pyerrors.fits.fit_lin": {"tf": 1.4142135623730951}}, "df": 1, "u": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "i": {"docs": {"pyerrors": {"tf": 1.7320508075688772}, "pyerrors.fits.least_squares": {"tf": 1.4142135623730951}, "pyerrors.fits.total_least_squares": {"tf": 1.4142135623730951}, "pyerrors.jackknifing.derived_jack": {"tf": 1.4142135623730951}, "pyerrors.linalg.derived_array": {"tf": 1.4142135623730951}, "pyerrors.npr.Npr_matrix": {"tf": 2}, "pyerrors.obs.Obs.__init__": {"tf": 1}, "pyerrors.obs.derived_observable": {"tf": 1.4142135623730951}, "pyerrors.roots.find_root": {"tf": 1.4142135623730951}}, "df": 9}}, "b": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"pyerrors.correlators.Corr.roll": {"tf": 1}, "pyerrors.correlators.Corr.T_symmetry": {"tf": 1}, "pyerrors.input.bdio.read_mesons": {"tf": 1}, "pyerrors.input.bdio.read_dSdm": {"tf": 1}, "pyerrors.input.openQCD.extract_t0": {"tf": 1}, "pyerrors.misc.gen_correlated_data": {"tf": 1.4142135623730951}, "pyerrors.mpm.matrix_pencil_method": {"tf": 1}, "pyerrors.mpm.matrix_pencil_method_old": {"tf": 1}, "pyerrors.npr.Npr_matrix": {"tf": 2}, "pyerrors.obs.Obs.expand_deltas": {"tf": 1}, "pyerrors.obs.Obs.calc_gamma": {"tf": 1}, "pyerrors.obs.Obs.gamma_method": {"tf": 1}, "pyerrors.obs.expand_deltas_for_merge": {"tf": 1}, "pyerrors.obs.pseudo_Obs": {"tf": 1}}, "df": 14}}}, "e": {"docs": {}, "df": 0, "r": {"docs": {"pyerrors.fits.fit_general": {"tf": 1}, "pyerrors.obs.derived_observable": {"tf": 1.4142135623730951}}, "df": 2}}, "d": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "l": {"docs": {"pyerrors.fits.fit_general": {"tf": 1}, "pyerrors.obs.derived_observable": {"tf": 1}}, "df": 2}}}}}}}}, "_": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "d": {"docs": {"pyerrors.obs.derived_observable": {"tf": 1}}, "df": 1}}}}}}}, "p": {"docs": {"pyerrors": {"tf": 2}, "pyerrors.fits.fit_general": {"tf": 1}, "pyerrors.jackknifing.derived_jack": {"tf": 1}, "pyerrors.linalg.eigh": {"tf": 1}, "pyerrors.linalg.eig": {"tf": 1}, "pyerrors.linalg.slogdet": {"tf": 1}, "pyerrors.npr.Npr_matrix": {"tf": 2}}, "df": 7}, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {"pyerrors.correlators.Corr.reweight": {"tf": 1}, "pyerrors.input.openQCD.extract_t0": {"tf": 1}, "pyerrors.obs.Obs.plot_rho": {"tf": 1}, "pyerrors.obs.reweight": {"tf": 1}}, "df": 4}}}}, "n": {"docs": {"pyerrors.fits.least_squares": {"tf": 1.4142135623730951}, "pyerrors.fits.total_least_squares": {"tf": 1.4142135623730951}, "pyerrors.fits.fit_general": {"tf": 1.4142135623730951}}, "df": 3, "e": {"docs": {"pyerrors.input.bdio.read_mesons": {"tf": 1}, "pyerrors.input.bdio.read_dSdm": {"tf": 1}, "pyerrors.mpm.matrix_pencil_method_old": {"tf": 1}, "pyerrors.npr.Npr_matrix": {"tf": 1.4142135623730951}}, "df": 4}}, "f": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "t": {"docs": {"pyerrors.input.sfcf.read_sfcf_c": {"tf": 1}}, "df": 1}}}}}, "t": {"docs": {}, "df": 0, "e": {"docs": {"pyerrors.jackknifing.derived_jack": {"tf": 1}, "pyerrors.npr.Npr_matrix": {"tf": 1.4142135623730951}, "pyerrors.obs.derived_observable": {"tf": 1}}, "df": 3}}, "i": {"docs": {}, "df": 0, "s": {"docs": {"pyerrors.mpm.matrix_pencil_method": {"tf": 1.4142135623730951}, "pyerrors.mpm.matrix_pencil_method_old": {"tf": 1.4142135623730951}}, "df": 2, "e": {"docs": {}, "df": 0, "_": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "v": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "l": {"docs": {"pyerrors.mpm.matrix_pencil_method_old": {"tf": 1.4142135623730951}}, "df": 1}}}}}}}}}}, "a": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {"pyerrors.correlators.Corr.dump": {"tf": 1}, "pyerrors.input.bdio.read_mesons": {"tf": 1}, "pyerrors.input.hadrons.read_meson_hd5": {"tf": 1}, "pyerrors.input.hadrons.read_ExternalLeg_hd5": {"tf": 1}, "pyerrors.input.hadrons.read_Bilinear_hd5": {"tf": 1}, "pyerrors.input.sfcf.read_sfcf": {"tf": 1}, "pyerrors.input.sfcf.read_sfcf_c": {"tf": 1.4142135623730951}, "pyerrors.jackknifing.Jack.dump": {"tf": 1}, "pyerrors.misc.gen_correlated_data": {"tf": 1.4142135623730951}, "pyerrors.obs.Obs": {"tf": 1}, "pyerrors.obs.Obs.__init__": {"tf": 1}, "pyerrors.obs.Obs.dump": {"tf": 1}, "pyerrors.obs.filter_zeroes": {"tf": 1.7320508075688772}, "pyerrors.obs.pseudo_Obs": {"tf": 1}}, "df": 14, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "m": {"docs": {"pyerrors.input.hadrons.read_meson_hd5": {"tf": 1}, "pyerrors.input.hadrons.read_ExternalLeg_hd5": {"tf": 1}, "pyerrors.input.hadrons.read_Bilinear_hd5": {"tf": 1}}, "df": 3}}}}}}, "n": {"docs": {"pyerrors.npr.Npr_matrix": {"tf": 1}}, "df": 1}}, "e": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"pyerrors.fits.least_squares": {"tf": 1}}, "df": 1}}}}, "w": {"docs": {"pyerrors.obs.expand_deltas_for_merge": {"tf": 1.7320508075688772}, "pyerrors.obs.filter_zeroes": {"tf": 1}, "pyerrors.obs.merge_obs": {"tf": 1}}, "df": 3, "_": {"docs": {}, "df": 0, "j": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "k": {"docs": {"pyerrors.jackknifing.derived_jack": {"tf": 1}}, "df": 1}}}}, "i": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "x": {"docs": {"pyerrors.obs.expand_deltas_for_merge": {"tf": 2}, "pyerrors.obs.filter_zeroes": {"tf": 1}}, "df": 2}}}, "o": {"docs": {}, "df": 0, "b": {"docs": {"pyerrors.obs.derived_observable": {"tf": 1}}, "df": 1}}}}, "x": {"docs": {}, "df": 0, "t": {"docs": {"pyerrors.npr.Npr_matrix": {"tf": 1}}, "df": 1}}, "e": {"docs": {}, "df": 0, "d": {"docs": {"pyerrors.npr.Npr_matrix": {"tf": 1.7320508075688772}}, "df": 1}}, "c": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "i": {"docs": {"pyerrors.obs.correlate": {"tf": 1}}, "df": 1}}}}}}}}, "r": {"docs": {}, "df": 0, "w": {"docs": {"pyerrors.input.misc.read_pbp": {"tf": 1}, "pyerrors.input.openQCD.read_rwms": {"tf": 1}}, "df": 2}}, "t": {"docs": {}, "df": 0, "h": {"docs": {"pyerrors.mpm.matrix_pencil_method_old": {"tf": 1}}, "df": 1}}, "d": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "y": {"docs": {"pyerrors.npr.Npr_matrix": {"tf": 3.4641016151377544}}, "df": 1, "(": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "p": {"docs": {"pyerrors.npr.Npr_matrix": {"tf": 1}}, "df": 1}}}}}}}}}}, "i": {"docs": {}, "df": 0, "m": {"docs": {"pyerrors.npr.Npr_matrix": {"tf": 1}}, "df": 1}}}, "b": {"docs": {}, "df": 0, "y": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"pyerrors.npr.Npr_matrix": {"tf": 1}}, "df": 1}}}}, "_": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "a": {"docs": {"pyerrors.obs.Obs": {"tf": 1}, "pyerrors.obs.Obs.gamma_method": {"tf": 1}}, "df": 2, "_": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "b": {"docs": {"pyerrors.obs.Obs": {"tf": 1}}, "df": 1}}}}}}}}}}}}, "o": {"docs": {"pyerrors.input.bdio.read_ADerrors": {"tf": 1.7320508075688772}, "pyerrors.input.bdio.write_ADerrors": {"tf": 1.7320508075688772}, "pyerrors.input.bdio.read_mesons": {"tf": 1.7320508075688772}, "pyerrors.input.bdio.read_dSdm": {"tf": 1.7320508075688772}}, "df": 4, "b": {"docs": {"pyerrors": {"tf": 2.8284271247461903}, "pyerrors.correlators.Corr": {"tf": 1.7320508075688772}, "pyerrors.correlators.Corr.correlate": {"tf": 1}, "pyerrors.correlators.Corr.reweight": {"tf": 1}, "pyerrors.correlators.Corr.show": {"tf": 1}, "pyerrors.fits.least_squares": {"tf": 1.4142135623730951}, "pyerrors.fits.total_least_squares": {"tf": 2.23606797749979}, "pyerrors.fits.fit_lin": {"tf": 2.23606797749979}, "pyerrors.fits.ks_test": {"tf": 1}, "pyerrors.fits.fit_general": {"tf": 2.23606797749979}, "pyerrors.input.bdio.write_ADerrors": {"tf": 1}, "pyerrors.input.openQCD.extract_t0": {"tf": 1}, "pyerrors.linalg.derived_array": {"tf": 1.4142135623730951}, "pyerrors.linalg.inv": {"tf": 1}, "pyerrors.linalg.cholesky": {"tf": 1}, "pyerrors.linalg.scalar_mat_op": {"tf": 1}, "pyerrors.linalg.eigh": {"tf": 1}, "pyerrors.linalg.eig": {"tf": 1}, "pyerrors.linalg.pinv": {"tf": 1}, "pyerrors.linalg.svd": {"tf": 1}, "pyerrors.linalg.slogdet": {"tf": 1}, "pyerrors.mpm.matrix_pencil_method": {"tf": 1.4142135623730951}, "pyerrors.mpm.matrix_pencil_method_old": {"tf": 1}, "pyerrors.obs.Obs": {"tf": 1.4142135623730951}, "pyerrors.obs.Obs.__init__": {"tf": 1}, "pyerrors.obs.Obs.gamma_method": {"tf": 1}, "pyerrors.obs.Obs.details": {"tf": 1}, "pyerrors.obs.Obs.dump": {"tf": 1}, "pyerrors.obs.derived_observable": {"tf": 1.4142135623730951}, "pyerrors.obs.reweight": {"tf": 1.7320508075688772}, "pyerrors.obs.correlate": {"tf": 1.4142135623730951}, "pyerrors.obs.covariance": {"tf": 1.4142135623730951}, "pyerrors.obs.covariance2": {"tf": 1.4142135623730951}, "pyerrors.obs.covariance3": {"tf": 1.4142135623730951}, "pyerrors.obs.pseudo_Obs": {"tf": 1}, "pyerrors.obs.merge_obs": {"tf": 1}, "pyerrors.roots.find_root": {"tf": 1.4142135623730951}}, "df": 37, "s": {"1": {"docs": {"pyerrors.linalg.derived_array": {"tf": 1}, "pyerrors.obs.derived_observable": {"tf": 1.4142135623730951}, "pyerrors.obs.reweight": {"tf": 1}, "pyerrors.obs.covariance": {"tf": 1}, "pyerrors.obs.covariance2": {"tf": 1}, "pyerrors.obs.covariance3": {"tf": 1}}, "df": 6}, "2": {"docs": {"pyerrors.linalg.derived_array": {"tf": 1}, "pyerrors.obs.derived_observable": {"tf": 1.4142135623730951}, "pyerrors.obs.reweight": {"tf": 1}, "pyerrors.obs.covariance": {"tf": 1.4142135623730951}, "pyerrors.obs.covariance2": {"tf": 1.4142135623730951}, "pyerrors.obs.covariance3": {"tf": 1.4142135623730951}}, "df": 6}, "3": {"docs": {"pyerrors.linalg.derived_array": {"tf": 1}, "pyerrors.obs.derived_observable": {"tf": 1}, "pyerrors.obs.reweight": {"tf": 1}}, "df": 3}, "docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "v": {"docs": {"pyerrors": {"tf": 1}, "pyerrors.correlators.Corr.reweight": {"tf": 1.4142135623730951}, "pyerrors.misc.gen_correlated_data": {"tf": 1.7320508075688772}, "pyerrors.obs.Obs": {"tf": 1}, "pyerrors.obs.Obs.is_zero_within_error": {"tf": 1}, "pyerrors.obs.Obs.is_zero": {"tf": 1}, "pyerrors.obs.CObs": {"tf": 1}, "pyerrors.obs.derived_observable": {"tf": 1}, "pyerrors.obs.reweight": {"tf": 1.7320508075688772}, "pyerrors.obs.correlate": {"tf": 2.23606797749979}, "pyerrors.obs.covariance": {"tf": 1.4142135623730951}, "pyerrors.obs.covariance2": {"tf": 1.4142135623730951}, "pyerrors.obs.covariance3": {"tf": 1.4142135623730951}, "pyerrors.obs.merge_obs": {"tf": 1.4142135623730951}}, "df": 14}}}, "[": {"docs": {}, "df": 0, "i": {"docs": {"pyerrors.correlators.Corr.reweight": {"tf": 1.4142135623730951}, "pyerrors.obs.reweight": {"tf": 1.4142135623730951}}, "df": 2}}, "_": {"docs": {}, "df": 0, "a": {"docs": {"pyerrors.obs.correlate": {"tf": 1}}, "df": 1}, "b": {"docs": {"pyerrors.obs.correlate": {"tf": 1}}, "df": 1}}}, "j": {"docs": {"pyerrors.correlators.Corr.fit": {"tf": 1}}, "df": 1, "e": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {"pyerrors.correlators.Corr": {"tf": 1}, "pyerrors.correlators.Corr.m_eff": {"tf": 1}, "pyerrors.correlators.Corr.plateau": {"tf": 1}, "pyerrors.correlators.Corr.set_prange": {"tf": 1}, "pyerrors.correlators.Corr.show": {"tf": 1}, "pyerrors.fits.least_squares": {"tf": 1}, "pyerrors.fits.total_least_squares": {"tf": 1}, "pyerrors.fits.ks_test": {"tf": 1}, "pyerrors.npr.Npr_matrix": {"tf": 2.8284271247461903}, "pyerrors.obs.Obs": {"tf": 1}, "pyerrors.obs.Obs.__init__": {"tf": 1}, "pyerrors.obs.derived_observable": {"tf": 1}, "pyerrors.obs.dump_object": {"tf": 1}, "pyerrors.obs.load_object": {"tf": 1}}, "df": 14}}}}}, "v": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"pyerrors.correlators.Corr": {"tf": 1}, "pyerrors.correlators.Corr.plateau": {"tf": 1}}, "df": 2, "l": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "d": {"docs": {"pyerrors": {"tf": 1}}, "df": 1}}}}, "w": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {"pyerrors.input.bdio.read_mesons": {"tf": 1}, "pyerrors.obs.Obs": {"tf": 1.4142135623730951}}, "df": 2}}}}}}}, "p": {"docs": {"pyerrors.linalg.scalar_mat_op": {"tf": 1}}, "df": 1, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "m": {"docs": {"pyerrors": {"tf": 1}, "pyerrors.fits.least_squares": {"tf": 1}, "pyerrors.fits.error_band": {"tf": 1}, "pyerrors.mpm.matrix_pencil_method": {"tf": 1}}, "df": 4}, "o": {"docs": {}, "df": 0, "n": {"docs": {"pyerrors.fits.least_squares": {"tf": 1.4142135623730951}, "pyerrors.fits.total_least_squares": {"tf": 1}, "pyerrors.npr.Npr_matrix": {"tf": 2.23606797749979}, "pyerrors.obs.Obs.__init__": {"tf": 1.4142135623730951}, "pyerrors.obs.filter_zeroes": {"tf": 1}}, "df": 5}}}}, "e": {"docs": {}, "df": 0, "r": {"docs": {"pyerrors": {"tf": 1}, "pyerrors.correlators.Corr": {"tf": 1}, "pyerrors.jackknifing.derived_jack": {"tf": 1}, "pyerrors.linalg.matmul": {"tf": 1}, "pyerrors.linalg.scalar_mat_op": {"tf": 1}, "pyerrors.obs.Obs": {"tf": 1}, "pyerrors.obs.derived_observable": {"tf": 1}}, "df": 7, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "d": {"docs": {"pyerrors.linalg.matmul": {"tf": 1}}, "df": 1}}}}, "n": {"docs": {}, "df": 0, "q": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "d": {"docs": {"pyerrors.input.openQCD.read_rwms": {"tf": 1.4142135623730951}, "pyerrors.input.openQCD.extract_t0": {"tf": 1.4142135623730951}}, "df": 2}}}}}}, "n": {"docs": {"pyerrors.correlators.Corr": {"tf": 1}, "pyerrors.fits.least_squares": {"tf": 1}, "pyerrors.jackknifing.derived_jack": {"tf": 1}, "pyerrors.npr.Npr_matrix": {"tf": 1.4142135623730951}, "pyerrors.obs.Obs.plot_rep_dist": {"tf": 1}, "pyerrors.obs.filter_zeroes": {"tf": 1}, "pyerrors.obs.derived_observable": {"tf": 1}, "pyerrors.obs.merge_obs": {"tf": 1}}, "df": 8, "c": {"docs": {"pyerrors.mpm.matrix_pencil_method": {"tf": 1}}, "df": 1}}, "u": {"docs": {}, "df": 0, "t": {"docs": {"pyerrors.npr.Npr_matrix.g5H": {"tf": 1}, "pyerrors.obs.filter_zeroes": {"tf": 1}}, "df": 2, "p": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "t": {"docs": {"pyerrors.correlators.Corr.plottable": {"tf": 1.4142135623730951}, "pyerrors.correlators.Corr.fit": {"tf": 1.4142135623730951}, "pyerrors.fits.least_squares": {"tf": 1}, "pyerrors.fits.total_least_squares": {"tf": 1}, "pyerrors.fits.fit_general": {"tf": 1}, "pyerrors.input.hadrons.read_meson_hd5": {"tf": 1}, "pyerrors.input.hadrons.read_ExternalLeg_hd5": {"tf": 1}, "pyerrors.input.hadrons.read_Bilinear_hd5": {"tf": 1}, "pyerrors.obs.Obs": {"tf": 1}, "pyerrors.obs.Obs.details": {"tf": 1}}, "df": 10}}}}}, "r": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"pyerrors.correlators.Corr.reverse": {"tf": 1}, "pyerrors.input.hadrons.read_ExternalLeg_hd5": {"tf": 1.4142135623730951}, "pyerrors.input.hadrons.read_Bilinear_hd5": {"tf": 1.4142135623730951}, "pyerrors.npr.Npr_matrix": {"tf": 2.449489742783178}, "pyerrors.obs.covariance": {"tf": 1}, "pyerrors.obs.covariance2": {"tf": 1}, "pyerrors.obs.covariance3": {"tf": 1}}, "df": 7, "=": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"pyerrors.npr.Npr_matrix": {"tf": 1}}, "df": 1}}}}}}}, "t": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"pyerrors.fits.total_least_squares": {"tf": 1}}, "df": 1}}}}}}}, "t": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "w": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "s": {"docs": {"pyerrors.fits.least_squares": {"tf": 1}, "pyerrors.fits.total_least_squares": {"tf": 1}}, "df": 2}}}}}}}, "m": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {"pyerrors.fits.least_squares": {"tf": 1}, "pyerrors.fits.total_least_squares": {"tf": 1}, "pyerrors.fits.fit_general": {"tf": 1}}, "df": 3}}}, "f": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "t": {"docs": {"pyerrors.input.sfcf.read_sfcf_c": {"tf": 1}, "pyerrors.npr.Npr_matrix": {"tf": 2}}, "df": 2, "=": {"0": {"docs": {"pyerrors.npr.Npr_matrix": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}}}}}}, "l": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"pyerrors.mpm.matrix_pencil_method_old": {"tf": 1}}, "df": 1}}}}, "c": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "i": {"docs": {"pyerrors.npr.Npr_matrix": {"tf": 1}}, "df": 1}}}}}, "w": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "a": {"docs": {"pyerrors.npr.Npr_matrix": {"tf": 1}}, "df": 1}}}}}}}, "l": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "g": {"docs": {"pyerrors": {"tf": 1.4142135623730951}, "pyerrors.correlators.Corr.m_eff": {"tf": 1}}, "df": 2, "(": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "(": {"docs": {}, "df": 0, "t": {"docs": {"pyerrors.correlators.Corr.m_eff": {"tf": 1}}, "df": 1}}}}, "s": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {"pyerrors.correlators.Corr.show": {"tf": 1.4142135623730951}}, "df": 1}}}}}, "w": {"docs": {"pyerrors.npr.Npr_matrix": {"tf": 1.4142135623730951}}, "df": 1}, "a": {"docs": {}, "df": 0, "d": {"docs": {"pyerrors.obs.load_object": {"tf": 1}}, "df": 1}}}, "i": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {"pyerrors.correlators.Corr": {"tf": 1}, "pyerrors.correlators.Corr.plottable": {"tf": 1}, "pyerrors.correlators.Corr.fit": {"tf": 1}, "pyerrors.correlators.Corr.plateau": {"tf": 1.4142135623730951}, "pyerrors.correlators.Corr.show": {"tf": 2}, "pyerrors.fits.Fit_result": {"tf": 1}, "pyerrors.fits.least_squares": {"tf": 2.449489742783178}, "pyerrors.fits.total_least_squares": {"tf": 2.449489742783178}, "pyerrors.fits.fit_lin": {"tf": 1.7320508075688772}, "pyerrors.fits.ks_test": {"tf": 1}, "pyerrors.fits.fit_general": {"tf": 2}, "pyerrors.input.misc.read_pbp": {"tf": 1.7320508075688772}, "pyerrors.input.openQCD.read_rwms": {"tf": 1.7320508075688772}, "pyerrors.input.openQCD.extract_t0": {"tf": 1.4142135623730951}, "pyerrors.jackknifing.derived_jack": {"tf": 1}, "pyerrors.linalg.derived_array": {"tf": 1.4142135623730951}, "pyerrors.misc.gen_correlated_data": {"tf": 1.4142135623730951}, "pyerrors.mpm.matrix_pencil_method": {"tf": 1.7320508075688772}, "pyerrors.mpm.matrix_pencil_method_old": {"tf": 1}, "pyerrors.obs.Obs": {"tf": 1.4142135623730951}, "pyerrors.obs.Obs.__init__": {"tf": 3}, "pyerrors.obs.Obs.expand_deltas": {"tf": 1.4142135623730951}, "pyerrors.obs.Obs.calc_gamma": {"tf": 1.4142135623730951}, "pyerrors.obs.merge_idx": {"tf": 1.7320508075688772}, "pyerrors.obs.expand_deltas_for_merge": {"tf": 2.8284271247461903}, "pyerrors.obs.filter_zeroes": {"tf": 2}, "pyerrors.obs.derived_observable": {"tf": 2}, "pyerrors.obs.reduce_deltas": {"tf": 1.7320508075688772}, "pyerrors.obs.reweight": {"tf": 1.7320508075688772}}, "df": 29, "_": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "_": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "b": {"docs": {"pyerrors.obs.merge_obs": {"tf": 1}}, "df": 1}}}}}}}}, "n": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {"pyerrors.fits.least_squares": {"tf": 1.4142135623730951}, "pyerrors.fits.total_least_squares": {"tf": 1.4142135623730951}, "pyerrors.fits.fit_lin": {"tf": 1}, "pyerrors.fits.fit_general": {"tf": 1.4142135623730951}, "pyerrors.input.openQCD.extract_t0": {"tf": 1.4142135623730951}}, "df": 5}}}, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "g": {"docs": {"pyerrors.linalg.eigh": {"tf": 1}, "pyerrors.linalg.eig": {"tf": 1}, "pyerrors.linalg.slogdet": {"tf": 1}}, "df": 3}}}}, "b": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "i": {"docs": {"pyerrors.input.bdio.read_ADerrors": {"tf": 1.4142135623730951}, "pyerrors.input.bdio.write_ADerrors": {"tf": 1.4142135623730951}, "pyerrors.input.bdio.read_mesons": {"tf": 1.4142135623730951}, "pyerrors.input.bdio.read_dSdm": {"tf": 1.4142135623730951}}, "df": 4}}}}, "d": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "r": {"docs": {"pyerrors.input.bdio.read_ADerrors": {"tf": 1.4142135623730951}, "pyerrors.input.bdio.write_ADerrors": {"tf": 1.4142135623730951}, "pyerrors.input.bdio.read_mesons": {"tf": 1.4142135623730951}, "pyerrors.input.bdio.read_dSdm": {"tf": 1.4142135623730951}}, "df": 4}}}, "b": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {"pyerrors.input.bdio.read_ADerrors": {"tf": 1.4142135623730951}, "pyerrors.input.bdio.write_ADerrors": {"tf": 1.4142135623730951}, "pyerrors.input.bdio.read_mesons": {"tf": 1.4142135623730951}, "pyerrors.input.bdio.read_dSdm": {"tf": 1.4142135623730951}}, "df": 4}}}}}}, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "h": {"docs": {"pyerrors.correlators.Corr": {"tf": 1}, "pyerrors.input.misc.read_pbp": {"tf": 1}, "pyerrors.input.openQCD.read_rwms": {"tf": 1}, "pyerrors.input.sfcf.read_sfcf": {"tf": 1}, "pyerrors.input.sfcf.read_sfcf_c": {"tf": 1}, "pyerrors.obs.Obs": {"tf": 1}}, "df": 6}}}, "(": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, ")": {"docs": {}, "df": 0, "/": {"2": {"docs": {"pyerrors.mpm.matrix_pencil_method": {"tf": 1}}, "df": 1}, "3": {"docs": {"pyerrors.mpm.matrix_pencil_method": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0, "/": {"2": {"docs": {"pyerrors.mpm.matrix_pencil_method": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}}}}}}}}}, "a": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "_": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "q": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {"pyerrors.correlators.Corr.fit": {"tf": 1}}, "df": 1}}}}}}}}, "v": {"docs": {"pyerrors.npr.Npr_matrix": {"tf": 1}}, "df": 1}}, "v": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "g": {"docs": {"pyerrors.fits.least_squares": {"tf": 1}}, "df": 1}}}}}, "l": {"docs": {"pyerrors.mpm.matrix_pencil_method": {"tf": 1}, "pyerrors.mpm.matrix_pencil_method_old": {"tf": 1.4142135623730951}, "pyerrors.npr.Npr_matrix": {"tf": 1.4142135623730951}, "pyerrors.npr.Zq": {"tf": 1}}, "df": 4}}}, "f": {"docs": {}, "df": 0, "t": {"docs": {"pyerrors.input.openQCD.extract_t0": {"tf": 1}}, "df": 1}}}, "a": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {"pyerrors.correlators.Corr.plateau": {"tf": 1}, "pyerrors.input.hadrons.read_ExternalLeg_hd5": {"tf": 1}, "pyerrors.input.hadrons.read_Bilinear_hd5": {"tf": 1}, "pyerrors.input.misc.read_pbp": {"tf": 1}, "pyerrors.input.openQCD.read_rwms": {"tf": 1}, "pyerrors.input.openQCD.extract_t0": {"tf": 1}}, "df": 6}}, "b": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "l": {"docs": {"pyerrors.correlators.Corr.show": {"tf": 1.4142135623730951}, "pyerrors.dirac.Grid_gamma": {"tf": 1}, "pyerrors.input.hadrons.read_meson_hd5": {"tf": 1.7320508075688772}, "pyerrors.input.sfcf.read_sfcf": {"tf": 1}, "pyerrors.input.sfcf.read_sfcf_c": {"tf": 1.4142135623730951}, "pyerrors.obs.Obs.__init__": {"tf": 1}}, "df": 6}}}, "r": {"docs": {}, "df": 0, "g": {"docs": {"pyerrors.fits.least_squares": {"tf": 1}}, "df": 1, "e": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {"pyerrors.mpm.matrix_pencil_method_old": {"tf": 1}}, "df": 1}}, "r": {"docs": {"pyerrors.mpm.matrix_pencil_method_old": {"tf": 1}}, "df": 1}}}}, "t": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "c": {"docs": {"pyerrors.input.openQCD.extract_t0": {"tf": 1}}, "df": 1}}}}}, "t": {"docs": {"pyerrors.npr.Npr_matrix": {"tf": 1}}, "df": 1}}, "a": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {"pyerrors": {"tf": 1}, "pyerrors.jackknifing.derived_jack": {"tf": 1}, "pyerrors.linalg.derived_array": {"tf": 1.4142135623730951}, "pyerrors.obs.derived_observable": {"tf": 1.4142135623730951}}, "df": 4}}}, "g": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "d": {"docs": {"pyerrors.fits.least_squares": {"tf": 1}, "pyerrors.fits.total_least_squares": {"tf": 1}, "pyerrors.jackknifing.derived_jack": {"tf": 1.4142135623730951}, "pyerrors.linalg.derived_array": {"tf": 1.4142135623730951}, "pyerrors.obs.derived_observable": {"tf": 1.7320508075688772}, "pyerrors.roots.find_root": {"tf": 1}}, "df": 6}}}}, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "l": {"docs": {"pyerrors.misc.gen_correlated_data": {"tf": 1}, "pyerrors.obs.Obs.calc_gamma": {"tf": 1}, "pyerrors.obs.Obs.gamma_method": {"tf": 1.4142135623730951}, "pyerrors.obs.Obs.plot_tauint": {"tf": 1}, "pyerrors.obs.Obs.plot_rho": {"tf": 1}, "pyerrors.obs.covariance3": {"tf": 1}}, "df": 6}}}}}}}}}, "l": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "o": {"docs": {"pyerrors": {"tf": 1}}, "df": 1}}}}}, "l": {"docs": {}, "df": 0, "_": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "g": {"docs": {"pyerrors.correlators.Corr.reweight": {"tf": 1}, "pyerrors.obs.reweight": {"tf": 1}}, "df": 2}}}}}}}, "o": {"docs": {}, "df": 0, "w": {"docs": {"pyerrors.npr.Npr_matrix": {"tf": 1}}, "df": 1}, "c": {"docs": {"pyerrors.npr.Npr_matrix": {"tf": 1}}, "df": 1}}}, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"pyerrors.input.hadrons.read_meson_hd5": {"tf": 1}}, "df": 1, "n": {"docs": {"pyerrors.fits.least_squares": {"tf": 1.4142135623730951}, "pyerrors.input.sfcf.read_sfcf": {"tf": 1}, "pyerrors.input.sfcf.read_sfcf_c": {"tf": 1}, "pyerrors.obs.covariance2": {"tf": 1}, "pyerrors.obs.covariance3": {"tf": 1}}, "df": 5, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "v": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "_": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "_": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "m": {"docs": {"pyerrors.input.bdio.read_mesons": {"tf": 1}}, "df": 1}}}}}}}}}}}}}}}}}}}}}}, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "i": {"docs": {"pyerrors.obs.Obs.__init__": {"tf": 1}}, "df": 1}}}}}, "g": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "m": {"docs": {"pyerrors.obs.Obs.calc_gamma": {"tf": 1}, "pyerrors.obs.Obs.gamma_method": {"tf": 1}}, "df": 2}}}}}}}}, "c": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "v": {"docs": {"pyerrors.correlators.Corr": {"tf": 1}, "pyerrors.input.bdio.read_ADerrors": {"tf": 1}, "pyerrors.input.bdio.write_ADerrors": {"tf": 1}, "pyerrors.input.bdio.read_mesons": {"tf": 1}, "pyerrors.input.bdio.read_dSdm": {"tf": 1}}, "df": 5}}}}, "c": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {"pyerrors.fits.Fit_result": {"tf": 1}}, "df": 1, "s": {"docs": {"pyerrors.input.bdio.read_mesons": {"tf": 1}, "pyerrors.input.bdio.read_dSdm": {"tf": 1}}, "df": 2}}}, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "d": {"docs": {"pyerrors.input.bdio.write_ADerrors": {"tf": 1}, "pyerrors.jackknifing.derived_jack": {"tf": 1}, "pyerrors.linalg.derived_array": {"tf": 1}, "pyerrors.linalg.eigh": {"tf": 1}, "pyerrors.linalg.eig": {"tf": 1}, "pyerrors.obs.filter_zeroes": {"tf": 1}, "pyerrors.obs.derived_observable": {"tf": 1}}, "df": 7}}}}, "o": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {"pyerrors.mpm.matrix_pencil_method": {"tf": 1}}, "df": 1}}}}, "t": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {"pyerrors.npr.Npr_matrix": {"tf": 1}}, "df": 1}}}}}, "r": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "y": {"docs": {"pyerrors.correlators.Corr": {"tf": 1}, "pyerrors.fits.error_band": {"tf": 1}, "pyerrors.input.hadrons.read_ExternalLeg_hd5": {"tf": 1.4142135623730951}, "pyerrors.input.hadrons.read_Bilinear_hd5": {"tf": 1.4142135623730951}, "pyerrors.linalg.derived_array": {"tf": 1}, "pyerrors.npr.Npr_matrix": {"tf": 5.830951894845301}, "pyerrors.obs.Obs": {"tf": 1}, "pyerrors.obs.Obs.__init__": {"tf": 1}, "pyerrors.obs.Obs.gamma_method": {"tf": 1.4142135623730951}, "pyerrors.obs.derived_observable": {"tf": 1}}, "df": 10, "'": {"docs": {"pyerrors.npr.Npr_matrix": {"tf": 1.4142135623730951}}, "df": 1}, "(": {"docs": {}, "df": 0, "[": {"2": {"docs": {"pyerrors.npr.Npr_matrix": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0, "[": {"0": {"docs": {"pyerrors.npr.Npr_matrix": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}}}}}}, "o": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "d": {"docs": {"pyerrors.correlators.Corr.symmetric": {"tf": 1}, "pyerrors.correlators.Corr.anti_symmetric": {"tf": 1}, "pyerrors.input.openQCD.extract_t0": {"tf": 1}}, "df": 3}}}}, "g": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"pyerrors.correlators.Corr.reweight": {"tf": 1}, "pyerrors.fits.least_squares": {"tf": 1.4142135623730951}, "pyerrors.fits.total_least_squares": {"tf": 1}, "pyerrors.fits.fit_general": {"tf": 1}, "pyerrors.input.misc.read_pbp": {"tf": 1}, "pyerrors.input.openQCD.read_rwms": {"tf": 1}, "pyerrors.input.openQCD.extract_t0": {"tf": 1}, "pyerrors.input.sfcf.read_sfcf": {"tf": 1}, "pyerrors.input.sfcf.read_sfcf_c": {"tf": 1.4142135623730951}, "pyerrors.input.sfcf.read_qtop": {"tf": 1}, "pyerrors.jackknifing.Jack.dump": {"tf": 1}, "pyerrors.linalg.derived_array": {"tf": 1}, "pyerrors.misc.gen_correlated_data": {"tf": 1}, "pyerrors.mpm.matrix_pencil_method_old": {"tf": 1}, "pyerrors.obs.Obs.gamma_method": {"tf": 1}, "pyerrors.obs.Obs.dump": {"tf": 1}, "pyerrors.obs.derived_observable": {"tf": 1}, "pyerrors.obs.reweight": {"tf": 1}, "pyerrors.obs.covariance": {"tf": 1}, "pyerrors.obs.covariance2": {"tf": 1}, "pyerrors.obs.covariance3": {"tf": 1}, "pyerrors.obs.dump_object": {"tf": 1}}, "df": 22}}}}}}, "x": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "v": {"docs": {}, "df": 0, ":": {"1": {"2": {"0": {"5": {"docs": {"pyerrors.correlators.Corr.m_eff": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}}}}, "b": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "i": {"docs": {"pyerrors.jackknifing.derived_jack": {"tf": 1}, "pyerrors.linalg.derived_array": {"tf": 1}, "pyerrors.obs.derived_observable": {"tf": 1}}, "df": 3}}}}}}}}, "d": {"docs": {"pyerrors.input.bdio.read_ADerrors": {"tf": 1}, "pyerrors.input.bdio.write_ADerrors": {"tf": 1}, "pyerrors.input.bdio.read_mesons": {"tf": 1}, "pyerrors.input.bdio.read_dSdm": {"tf": 1}}, "df": 4, "d": {"docs": {"pyerrors.correlators.Corr": {"tf": 1}}, "df": 1, "i": {"docs": {}, "df": 0, "t": {"docs": {"pyerrors.mpm.matrix_pencil_method_old": {"tf": 1}, "pyerrors.obs.Obs": {"tf": 1}}, "df": 2}}}, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {"pyerrors.input.bdio.write_ADerrors": {"tf": 1}}, "df": 1}}}}}, "j": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {"pyerrors.obs.pseudo_Obs": {"tf": 1}}, "df": 1}}}}}, "p": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "i": {"docs": {"pyerrors.correlators.Corr.gamma_method": {"tf": 1}, "pyerrors.fits.Fit_result.gamma_method": {"tf": 1}, "pyerrors.obs.correlate": {"tf": 1}, "pyerrors.obs.covariance": {"tf": 1}, "pyerrors.obs.covariance2": {"tf": 1}, "pyerrors.obs.covariance3": {"tf": 1}}, "df": 6}}, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "i": {"docs": {"pyerrors.input.sfcf.read_sfcf": {"tf": 1}, "pyerrors.input.sfcf.read_sfcf_c": {"tf": 1}}, "df": 2}}}}}}}, "n": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {"pyerrors.correlators.Corr.anti_symmetric": {"tf": 1}, "pyerrors.correlators.Corr.m_eff": {"tf": 1}}, "df": 2}}, "o": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "h": {"docs": {"pyerrors.correlators.Corr.correlate": {"tf": 1}, "pyerrors.npr.Npr_matrix": {"tf": 1}, "pyerrors.obs.Obs": {"tf": 1.4142135623730951}, "pyerrors.obs.covariance3": {"tf": 1}}, "df": 4}}, "n": {"docs": {}, "df": 0, "y": {"docs": {}, "df": 0, "m": {"docs": {"pyerrors.jackknifing.derived_jack": {"tf": 1}, "pyerrors.obs.derived_observable": {"tf": 1}}, "df": 2}}}}, "p": {"docs": {"pyerrors.fits.least_squares": {"tf": 1}, "pyerrors.fits.total_least_squares": {"tf": 1}, "pyerrors.linalg.derived_array": {"tf": 1}, "pyerrors.obs.derived_observable": {"tf": 1}}, "df": 4}, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "y": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "i": {"docs": {"pyerrors.mpm.matrix_pencil_method": {"tf": 1}, "pyerrors.obs.Obs": {"tf": 1}, "pyerrors.obs.Obs.gamma_method": {"tf": 1}}, "df": 3}}, "z": {"docs": {"pyerrors.mpm.matrix_pencil_method": {"tf": 1}}, "df": 1}}}}, "y": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {"pyerrors.obs.filter_zeroes": {"tf": 1}}, "df": 1}}}}}, "t": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "t": {"docs": {"pyerrors.correlators.Corr.roll": {"tf": 1}, "pyerrors.correlators.Corr.T_symmetry": {"tf": 1}, "pyerrors.correlators.Corr.deriv": {"tf": 1}, "pyerrors.correlators.Corr.fit": {"tf": 1}, "pyerrors.correlators.Corr.plateau": {"tf": 1}, "pyerrors.correlators.Corr.set_prange": {"tf": 1}, "pyerrors.correlators.Corr.dump": {"tf": 1}, "pyerrors.fits.Fit_result": {"tf": 1}, "pyerrors.input.openQCD.read_rwms": {"tf": 1}, "pyerrors.npr.Npr_matrix": {"tf": 1.4142135623730951}, "pyerrors.npr.Zq": {"tf": 1}, "pyerrors.obs.Obs": {"tf": 1}, "pyerrors.obs.Obs.__init__": {"tf": 1}, "pyerrors.obs.correlate": {"tf": 1}}, "df": 14}}}}}, "a": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "h": {"docs": {"pyerrors.obs.Obs.gamma_method": {"tf": 1}}, "df": 1}}}}}, "v": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "g": {"docs": {"pyerrors.correlators.Corr.reweight": {"tf": 1}, "pyerrors.correlators.Corr.T_symmetry": {"tf": 1}, "pyerrors.correlators.Corr.plateau": {"tf": 1.4142135623730951}, "pyerrors.obs.reweight": {"tf": 1}}, "df": 4}}}}, "g": {"docs": {"pyerrors.correlators.Corr.plateau": {"tf": 1}}, "df": 1}, "a": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "l": {"docs": {"pyerrors.correlators.Corr.show": {"tf": 1}}, "df": 1}}}}, "x": {"docs": {}, "df": 0, "i": {"docs": {"pyerrors.correlators.Corr.show": {"tf": 1.7320508075688772}}, "df": 1}}, "[": {"0": {"docs": {"pyerrors.fits.least_squares": {"tf": 1.4142135623730951}, "pyerrors.fits.total_least_squares": {"tf": 1.4142135623730951}, "pyerrors.fits.fit_general": {"tf": 1}}, "df": 3}, "1": {"docs": {"pyerrors.fits.least_squares": {"tf": 1.4142135623730951}, "pyerrors.fits.total_least_squares": {"tf": 1.4142135623730951}, "pyerrors.fits.fit_general": {"tf": 1}}, "df": 3}, "2": {"docs": {"pyerrors.fits.least_squares": {"tf": 1}, "pyerrors.fits.total_least_squares": {"tf": 1}, "pyerrors.fits.fit_general": {"tf": 1}}, "df": 3}, "docs": {}, "df": 0}, "s": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "m": {"docs": {"pyerrors.fits.fit_lin": {"tf": 1}, "pyerrors.fits.fit_general": {"tf": 1}, "pyerrors.input.openQCD.extract_t0": {"tf": 1}, "pyerrors.obs.Obs.calc_gamma": {"tf": 1}}, "df": 4}}, "o": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "i": {"docs": {"pyerrors.npr.Npr_matrix": {"tf": 1}}, "df": 1}}}, "i": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "n": {"docs": {"pyerrors.npr.Npr_matrix": {"tf": 1.4142135623730951}}, "df": 1}}}}}, "b": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "v": {"docs": {"pyerrors.npr.Npr_matrix": {"tf": 1}}, "df": 1}}, "s": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "t": {"docs": {"pyerrors.obs.filter_zeroes": {"tf": 1}}, "df": 1}}}}, "(": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "v": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "e": {"2": {"docs": {}, "df": 0, "(": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "s": {"1": {"docs": {"pyerrors.obs.covariance3": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}}}}}, "docs": {}, "df": 0, "(": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "s": {"1": {"docs": {"pyerrors.obs.covariance": {"tf": 1}, "pyerrors.obs.covariance2": {"tf": 1}}, "df": 2}, "docs": {}, "df": 0}}}}}}}}}}}}}}}}}}, "t": {"0": {"docs": {"pyerrors.input.openQCD.extract_t0": {"tf": 1.7320508075688772}}, "df": 1}, "docs": {"pyerrors.correlators.Corr.m_eff": {"tf": 2}, "pyerrors.mpm.matrix_pencil_method": {"tf": 1}, "pyerrors.npr.Npr_matrix": {"tf": 1}, "pyerrors.npr.Npr_matrix.g5H": {"tf": 1}}, "df": 4, "a": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "l": {"docs": {"pyerrors": {"tf": 1}, "pyerrors.obs.Obs.gamma_method": {"tf": 1}}, "df": 2}}, "g": {"docs": {"pyerrors.correlators.Corr.show": {"tf": 1}}, "df": 1}, "k": {"docs": {}, "df": 0, "e": {"docs": {"pyerrors.fits.least_squares": {"tf": 1}, "pyerrors.fits.total_least_squares": {"tf": 1}}, "df": 2}}, "r": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "t": {"docs": {"pyerrors.input.bdio.read_ADerrors": {"tf": 1}, "pyerrors.input.bdio.write_ADerrors": {"tf": 1}, "pyerrors.input.bdio.read_mesons": {"tf": 1}, "pyerrors.input.bdio.read_dSdm": {"tf": 1}, "pyerrors.input.sfcf.read_qtop": {"tf": 1}}, "df": 5}}}}, "u": {"docs": {"pyerrors.misc.gen_correlated_data": {"tf": 1}}, "df": 1, "_": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "x": {"docs": {}, "df": 0, "p": {"docs": {"pyerrors.obs.Obs": {"tf": 1.4142135623730951}, "pyerrors.obs.Obs.gamma_method": {"tf": 1}}, "df": 2, "_": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "b": {"docs": {"pyerrors.obs.Obs": {"tf": 1}}, "df": 1}}}}, "d": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {"pyerrors.obs.Obs": {"tf": 1}}, "df": 1}}}}}}}}}}}, "i": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {"pyerrors.correlators.Corr": {"tf": 1}, "pyerrors.correlators.Corr.reverse": {"tf": 1}, "pyerrors.correlators.Corr.T_symmetry": {"tf": 1.4142135623730951}, "pyerrors.input.sfcf.read_sfcf": {"tf": 1}, "pyerrors.input.sfcf.read_sfcf_c": {"tf": 1}, "pyerrors.misc.gen_correlated_data": {"tf": 1}, "pyerrors.mpm.matrix_pencil_method_old": {"tf": 1.4142135623730951}, "pyerrors.npr.Npr_matrix": {"tf": 1}, "pyerrors.obs.Obs.plot_tauint": {"tf": 1}, "pyerrors.obs.Obs.plot_rho": {"tf": 1}, "pyerrors.obs.filter_zeroes": {"tf": 1}, "pyerrors.obs.covariance3": {"tf": 1}}, "df": 12, "s": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "c": {"docs": {"pyerrors.correlators.Corr": {"tf": 2}, "pyerrors.correlators.Corr.plottable": {"tf": 1.7320508075688772}, "pyerrors.correlators.Corr.roll": {"tf": 1.4142135623730951}, "pyerrors.correlators.Corr.fit": {"tf": 1}, "pyerrors.correlators.Corr.plateau": {"tf": 1.4142135623730951}, "pyerrors.input.openQCD.extract_t0": {"tf": 1}}, "df": 6}}}}}}}, "r": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "e": {"docs": {"pyerrors.correlators.Corr": {"tf": 1}, "pyerrors.correlators.Corr.reweight": {"tf": 1}, "pyerrors.correlators.Corr.deriv": {"tf": 1}, "pyerrors.fits.least_squares": {"tf": 2}, "pyerrors.fits.total_least_squares": {"tf": 1.4142135623730951}, "pyerrors.fits.fit_general": {"tf": 1}, "pyerrors.input.openQCD.extract_t0": {"tf": 1}, "pyerrors.input.sfcf.read_sfcf": {"tf": 1.7320508075688772}, "pyerrors.input.sfcf.read_sfcf_c": {"tf": 1.4142135623730951}, "pyerrors.input.sfcf.read_qtop": {"tf": 1}, "pyerrors.obs.Obs.gamma_method": {"tf": 1}, "pyerrors.obs.derived_observable": {"tf": 1}, "pyerrors.obs.reweight": {"tf": 1}, "pyerrors.obs.covariance": {"tf": 1}, "pyerrors.obs.covariance2": {"tf": 1}, "pyerrors.obs.covariance3": {"tf": 1.4142135623730951}}, "df": 16}}, "e": {"docs": {}, "df": 0, "e": {"docs": {"pyerrors.input.hadrons.read_meson_hd5": {"tf": 1}, "pyerrors.npr.Zq": {"tf": 1}}, "df": 2}}, "a": {"docs": {}, "df": 0, "j": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "i": {"docs": {"pyerrors.input.openQCD.extract_t0": {"tf": 1}}, "df": 1}}}}}}}, "n": {"docs": {"pyerrors.mpm.matrix_pencil_method": {"tf": 1}}, "df": 1, "s": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "s": {"docs": {"pyerrors.npr.Npr_matrix": {"tf": 1}}, "df": 1}}}}}}, "i": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"pyerrors.obs.Obs.gamma_method": {"tf": 1}}, "df": 1}}}}}}, "w": {"docs": {}, "df": 0, "o": {"docs": {"pyerrors.correlators.Corr": {"tf": 1}, "pyerrors.correlators.Corr.plateau": {"tf": 1}, "pyerrors.correlators.Corr.show": {"tf": 1}, "pyerrors.fits.fit_lin": {"tf": 1}, "pyerrors.input.hadrons.read_meson_hd5": {"tf": 1}, "pyerrors.jackknifing.derived_jack": {"tf": 1}, "pyerrors.npr.Npr_matrix": {"tf": 1}, "pyerrors.obs.derived_observable": {"tf": 1}, "pyerrors.obs.correlate": {"tf": 1}, "pyerrors.obs.covariance": {"tf": 1}, "pyerrors.obs.covariance2": {"tf": 1}, "pyerrors.obs.covariance3": {"tf": 1}}, "df": 12}}, "y": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {"pyerrors.correlators.Corr": {"tf": 1}, "pyerrors.input.bdio.read_mesons": {"tf": 1}, "pyerrors.input.bdio.read_dSdm": {"tf": 1}, "pyerrors.npr.Npr_matrix": {"tf": 2.6457513110645907}, "pyerrors.npr.Zq": {"tf": 1}, "pyerrors.obs.Obs.expand_deltas": {"tf": 1}, "pyerrors.obs.expand_deltas_for_merge": {"tf": 1}}, "df": 7}}}, "h": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "e": {"docs": {"pyerrors.correlators.Corr.plottable": {"tf": 1}}, "df": 1}}}}, "/": {"2": {"docs": {"pyerrors.correlators.Corr.m_eff": {"tf": 2}}, "df": 1}, "docs": {}, "df": 0}, "u": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "l": {"docs": {"pyerrors.fits.total_least_squares": {"tf": 1}, "pyerrors.input.bdio.read_mesons": {"tf": 1}, "pyerrors.input.bdio.read_dSdm": {"tf": 1}, "pyerrors.npr.Npr_matrix": {"tf": 2}}, "df": 4}}}, "e": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {"pyerrors.fits.ks_test": {"tf": 1.4142135623730951}}, "df": 1}}, "r": {"docs": {}, "df": 0, "m": {"docs": {"pyerrors.npr.Npr_matrix": {"tf": 1}}, "df": 1}}}, "^": {"2": {"docs": {"pyerrors.input.openQCD.extract_t0": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "g": {"docs": {"pyerrors.input.sfcf.read_qtop": {"tf": 1}}, "df": 1}}}}}, "d": {"docs": {}, "df": 0, "o": {"docs": {"pyerrors.npr.Npr_matrix": {"tf": 1}}, "df": 1}}, "t": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {"pyerrors.npr.Npr_matrix": {"tf": 1}}, "df": 1}}}}}, "f": {"docs": {"pyerrors.input.hadrons.read_ExternalLeg_hd5": {"tf": 1}, "pyerrors.input.hadrons.read_Bilinear_hd5": {"tf": 1}, "pyerrors.npr.Npr_matrix": {"tf": 1.4142135623730951}}, "df": 3, "i": {"docs": {}, "df": 0, "t": {"docs": {"pyerrors": {"tf": 1}, "pyerrors.correlators.Corr.fit": {"tf": 2}, "pyerrors.correlators.Corr.plateau": {"tf": 1.4142135623730951}, "pyerrors.fits.Fit_result": {"tf": 1.4142135623730951}, "pyerrors.fits.Fit_result.gamma_method": {"tf": 1}, "pyerrors.fits.least_squares": {"tf": 2.6457513110645907}, "pyerrors.fits.total_least_squares": {"tf": 2}, "pyerrors.fits.fit_lin": {"tf": 1.7320508075688772}, "pyerrors.fits.qqplot": {"tf": 1.4142135623730951}, "pyerrors.fits.residual_plot": {"tf": 1}, "pyerrors.fits.error_band": {"tf": 1}, "pyerrors.fits.ks_test": {"tf": 1}, "pyerrors.fits.fit_general": {"tf": 2.23606797749979}, "pyerrors.input.openQCD.extract_t0": {"tf": 1.4142135623730951}}, "df": 14, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"pyerrors.correlators.Corr.fit": {"tf": 1}}, "df": 1}}}}, "_": {"docs": {}, "df": 0, "r": {"docs": {"pyerrors.correlators.Corr.show": {"tf": 1}}, "df": 1, "e": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "t": {"docs": {"pyerrors.correlators.Corr.show": {"tf": 1.4142135623730951}}, "df": 1}}}}}, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"pyerrors.input.openQCD.extract_t0": {"tf": 1}}, "df": 1}}}}, "p": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "t": {"docs": {"pyerrors.fits.Fit_result": {"tf": 1}}, "df": 1}}}}}}}}}, "r": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {"pyerrors.correlators.Corr.deriv": {"tf": 1}, "pyerrors.correlators.Corr.plateau": {"tf": 1}, "pyerrors.input.hadrons.read_ExternalLeg_hd5": {"tf": 1}, "pyerrors.input.hadrons.read_Bilinear_hd5": {"tf": 1}, "pyerrors.input.misc.read_pbp": {"tf": 1}, "pyerrors.input.openQCD.read_rwms": {"tf": 1}, "pyerrors.input.openQCD.extract_t0": {"tf": 1.4142135623730951}, "pyerrors.npr.Npr_matrix": {"tf": 1.4142135623730951}, "pyerrors.obs.correlate": {"tf": 1}, "pyerrors.obs.covariance": {"tf": 1}, "pyerrors.obs.covariance2": {"tf": 1}, "pyerrors.obs.covariance3": {"tf": 1}}, "df": 12}}}, "n": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {"pyerrors.correlators.Corr.deriv": {"tf": 1}}, "df": 1}}, "d": {"docs": {"pyerrors.roots.find_root": {"tf": 1}}, "df": 1, "e": {"docs": {}, "df": 0, "r": {"docs": {"pyerrors.correlators.Corr.m_eff": {"tf": 1}}, "df": 1}}}}, "g": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "r": {"docs": {"pyerrors.correlators.Corr.show": {"tf": 1.4142135623730951}}, "df": 1}}}, "l": {"docs": {}, "df": 0, "e": {"docs": {"pyerrors.correlators.Corr.show": {"tf": 1}, "pyerrors.correlators.Corr.dump": {"tf": 1.4142135623730951}, "pyerrors.input.bdio.read_ADerrors": {"tf": 1.4142135623730951}, "pyerrors.input.bdio.write_ADerrors": {"tf": 1.4142135623730951}, "pyerrors.input.bdio.read_mesons": {"tf": 1.4142135623730951}, "pyerrors.input.bdio.read_dSdm": {"tf": 1.4142135623730951}, "pyerrors.input.hadrons.read_meson_hd5": {"tf": 2}, "pyerrors.input.hadrons.read_ExternalLeg_hd5": {"tf": 1.7320508075688772}, "pyerrors.input.hadrons.read_Bilinear_hd5": {"tf": 1.7320508075688772}, "pyerrors.input.openQCD.read_rwms": {"tf": 1.4142135623730951}, "pyerrors.input.openQCD.extract_t0": {"tf": 2}, "pyerrors.input.sfcf.read_sfcf_c": {"tf": 1}, "pyerrors.jackknifing.Jack.dump": {"tf": 1.4142135623730951}, "pyerrors.obs.Obs.dump": {"tf": 1.4142135623730951}, "pyerrors.obs.dump_object": {"tf": 1.4142135623730951}, "pyerrors.obs.load_object": {"tf": 1}}, "df": 16, "n": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "m": {"docs": {"pyerrors.correlators.Corr.dump": {"tf": 1}}, "df": 1}}}, "_": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "h": {"docs": {"pyerrors.input.bdio.read_ADerrors": {"tf": 1}, "pyerrors.input.bdio.write_ADerrors": {"tf": 1}, "pyerrors.input.bdio.read_mesons": {"tf": 1}, "pyerrors.input.bdio.read_dSdm": {"tf": 1}}, "df": 4}}}}}, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "m": {"docs": {"pyerrors.input.hadrons.read_meson_hd5": {"tf": 1}, "pyerrors.input.hadrons.read_ExternalLeg_hd5": {"tf": 1}, "pyerrors.input.hadrons.read_Bilinear_hd5": {"tf": 1}}, "df": 3}}}}}, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"pyerrors.mpm.matrix_pencil_method": {"tf": 1}, "pyerrors.mpm.matrix_pencil_method_old": {"tf": 1}, "pyerrors.obs.filter_zeroes": {"tf": 1.7320508075688772}}, "df": 3}}}, "l": {"docs": {"pyerrors.npr.Npr_matrix": {"tf": 1}, "pyerrors.obs.Obs.expand_deltas": {"tf": 1}, "pyerrors.obs.expand_deltas_for_merge": {"tf": 1}}, "df": 3}}, "x": {"docs": {"pyerrors.npr.Npr_matrix": {"tf": 1}}, "df": 1}, "e": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "d": {"docs": {"pyerrors.npr.Zq": {"tf": 1}}, "df": 1}}}}, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "m": {"docs": {"pyerrors.fits.least_squares": {"tf": 1.4142135623730951}, "pyerrors.fits.total_least_squares": {"tf": 1.4142135623730951}, "pyerrors.fits.fit_general": {"tf": 1}, "pyerrors.jackknifing.derived_jack": {"tf": 1}, "pyerrors.linalg.derived_array": {"tf": 1}, "pyerrors.obs.derived_observable": {"tf": 1}}, "df": 6, "a": {"docs": {}, "df": 0, "t": {"docs": {"pyerrors.correlators.Corr.plottable": {"tf": 1}, "pyerrors.fits.least_squares": {"tf": 1}, "pyerrors.input.misc.read_pbp": {"tf": 1}, "pyerrors.input.openQCD.read_rwms": {"tf": 1}, "pyerrors.input.sfcf.read_sfcf": {"tf": 1}, "pyerrors.input.sfcf.read_sfcf_c": {"tf": 1}, "pyerrors.input.sfcf.read_qtop": {"tf": 1}, "pyerrors.npr.Npr_matrix": {"tf": 1.4142135623730951}}, "df": 8}}}, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "n": {"docs": {"pyerrors.npr.Npr_matrix": {"tf": 1}}, "df": 1}}}}}, "l": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"pyerrors.input.misc.read_pbp": {"tf": 1}, "pyerrors.input.openQCD.read_rwms": {"tf": 1}, "pyerrors.input.sfcf.read_sfcf": {"tf": 1}, "pyerrors.input.sfcf.read_sfcf_c": {"tf": 1}, "pyerrors.input.sfcf.read_qtop": {"tf": 1}}, "df": 5}}}}}, "a": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {"pyerrors.correlators.Corr.reweight": {"tf": 1.4142135623730951}, "pyerrors.input.sfcf.read_qtop": {"tf": 1}, "pyerrors.obs.reweight": {"tf": 1.4142135623730951}}, "df": 3}}}}, "l": {"docs": {}, "df": 0, "s": {"docs": {"pyerrors.fits.least_squares": {"tf": 2}, "pyerrors.fits.total_least_squares": {"tf": 1.4142135623730951}, "pyerrors.fits.fit_general": {"tf": 1}, "pyerrors.obs.derived_observable": {"tf": 1}, "pyerrors.obs.covariance": {"tf": 1}, "pyerrors.obs.covariance2": {"tf": 1}, "pyerrors.obs.covariance3": {"tf": 1}}, "df": 7}}, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {"pyerrors.input.hadrons.read_ExternalLeg_hd5": {"tf": 1.4142135623730951}, "pyerrors.input.hadrons.read_Bilinear_hd5": {"tf": 1.4142135623730951}}, "df": 2}}, "r": {"docs": {"pyerrors.linalg.matmul": {"tf": 1}}, "df": 1}}}}}, "l": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {"pyerrors.correlators.Corr.m_eff": {"tf": 1}, "pyerrors.fits.least_squares": {"tf": 1}, "pyerrors.fits.fit_lin": {"tf": 1}, "pyerrors.fits.fit_general": {"tf": 1}, "pyerrors.npr.Npr_matrix": {"tf": 1.4142135623730951}, "pyerrors.obs.Obs": {"tf": 1.7320508075688772}, "pyerrors.obs.Obs.gamma_method": {"tf": 2.6457513110645907}}, "df": 7}}}, "a": {"docs": {}, "df": 0, "g": {"docs": {"pyerrors.input.bdio.read_ADerrors": {"tf": 1}, "pyerrors.input.bdio.write_ADerrors": {"tf": 1}, "pyerrors.input.bdio.read_mesons": {"tf": 1}, "pyerrors.input.bdio.read_dSdm": {"tf": 1}, "pyerrors.npr.Npr_matrix": {"tf": 1}}, "df": 5}, "t": {"docs": {"pyerrors.npr.Npr_matrix": {"tf": 1.7320508075688772}}, "df": 1, "i": {"docs": {}, "df": 0, "t": {"docs": {"pyerrors.npr.Npr_matrix": {"tf": 1}}, "df": 1}}, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {"pyerrors.npr.Npr_matrix": {"tf": 1}}, "df": 1}}}}}, "u": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {"pyerrors.obs.Obs.expand_deltas": {"tf": 1}, "pyerrors.obs.Obs.calc_gamma": {"tf": 1}, "pyerrors.obs.expand_deltas_for_merge": {"tf": 1}, "pyerrors.obs.filter_zeroes": {"tf": 1.7320508075688772}, "pyerrors.obs.reduce_deltas": {"tf": 1}}, "df": 5}}}}}}}, "u": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {"pyerrors.fits.least_squares": {"tf": 1.4142135623730951}, "pyerrors.fits.total_least_squares": {"tf": 1.7320508075688772}, "pyerrors.fits.error_band": {"tf": 1}, "pyerrors.fits.fit_general": {"tf": 1}, "pyerrors.jackknifing.derived_jack": {"tf": 1}, "pyerrors.linalg.derived_array": {"tf": 1.4142135623730951}, "pyerrors.obs.derived_observable": {"tf": 1.4142135623730951}, "pyerrors.roots.find_root": {"tf": 1}}, "df": 8, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"pyerrors.correlators.Corr.fit": {"tf": 2}, "pyerrors.fits.least_squares": {"tf": 1.4142135623730951}, "pyerrors.fits.total_least_squares": {"tf": 1}, "pyerrors.fits.error_band": {"tf": 1}, "pyerrors.fits.ks_test": {"tf": 1}, "pyerrors.input.hadrons.read_meson_hd5": {"tf": 1}, "pyerrors.input.openQCD.extract_t0": {"tf": 1}, "pyerrors.input.sfcf.read_sfcf": {"tf": 1.7320508075688772}, "pyerrors.input.sfcf.read_sfcf_c": {"tf": 2}, "pyerrors.jackknifing.derived_jack": {"tf": 1.7320508075688772}, "pyerrors.linalg.derived_array": {"tf": 1.4142135623730951}, "pyerrors.mpm.matrix_pencil_method_old": {"tf": 1}, "pyerrors.obs.Obs.calc_gamma": {"tf": 1}, "pyerrors.obs.Obs.gamma_method": {"tf": 1.4142135623730951}, "pyerrors.obs.Obs.plot_rho": {"tf": 1}, "pyerrors.obs.derived_observable": {"tf": 1.7320508075688772}, "pyerrors.roots.find_root": {"tf": 2}}, "df": 17}}}}, "(": {"docs": {}, "df": 0, "x": {"docs": {"pyerrors.fits.least_squares": {"tf": 1}, "pyerrors.fits.total_least_squares": {"tf": 1}, "pyerrors.fits.fit_general": {"tf": 1}, "pyerrors.roots.find_root": {"tf": 1}}, "df": 4}, "a": {"docs": {"pyerrors.fits.least_squares": {"tf": 1.4142135623730951}, "pyerrors.fits.total_least_squares": {"tf": 1.4142135623730951}, "pyerrors.fits.fit_general": {"tf": 1}}, "df": 3}, "d": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "a": {"docs": {"pyerrors.jackknifing.derived_jack": {"tf": 1.4142135623730951}, "pyerrors.linalg.derived_array": {"tf": 1.4142135623730951}, "pyerrors.obs.derived_observable": {"tf": 1.4142135623730951}}, "df": 3}}}}}}}, "l": {"docs": {}, "df": 0, "l": {"docs": {"pyerrors.fits.least_squares": {"tf": 1}, "pyerrors.fits.total_least_squares": {"tf": 1}, "pyerrors.input.sfcf.read_qtop": {"tf": 1}}, "df": 3, "i": {"docs": {"pyerrors.npr.Npr_matrix": {"tf": 1}}, "df": 1}}}}, "p": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "c": {"docs": {"pyerrors.input.bdio.read_ADerrors": {"tf": 1}, "pyerrors.input.bdio.write_ADerrors": {"tf": 1}, "pyerrors.input.bdio.read_mesons": {"tf": 1}, "pyerrors.input.bdio.read_dSdm": {"tf": 1}}, "df": 4}}}, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"pyerrors.npr.Zq": {"tf": 1.4142135623730951}}, "df": 1}}}}}}, "f": {"docs": {}, "df": 0, "t": {"docs": {"pyerrors.obs.Obs.calc_gamma": {"tf": 1.4142135623730951}, "pyerrors.obs.Obs.gamma_method": {"tf": 1.4142135623730951}}, "df": 2}}, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"pyerrors.obs.Obs.plot_piechart": {"tf": 1.4142135623730951}}, "df": 1}}}}}}}}, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "t": {"docs": {"pyerrors": {"tf": 1}, "pyerrors.correlators.Corr.m_eff": {"tf": 1.4142135623730951}, "pyerrors.input.openQCD.extract_t0": {"tf": 1}, "pyerrors.roots.find_root": {"tf": 1}}, "df": 4}}, "w": {"docs": {"pyerrors.npr.Npr_matrix": {"tf": 1.7320508075688772}}, "df": 1}}, "e": {"docs": {}, "df": 0, "v": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "s": {"docs": {"pyerrors.correlators.Corr.reverse": {"tf": 1}}, "df": 1}}}}, "w": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "t": {"docs": {"pyerrors.correlators.Corr.reweight": {"tf": 2}, "pyerrors.input.sfcf.read_qtop": {"tf": 1.4142135623730951}, "pyerrors.obs.reweight": {"tf": 2}, "pyerrors.obs.correlate": {"tf": 1.4142135623730951}}, "df": 4}}}}}}, "t": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "n": {"docs": {"pyerrors.correlators.Corr.T_symmetry": {"tf": 1}, "pyerrors.correlators.Corr.deriv": {"tf": 1}, "pyerrors.correlators.Corr.second_deriv": {"tf": 1}, "pyerrors.correlators.Corr.m_eff": {"tf": 1}, "pyerrors.dirac.Grid_gamma": {"tf": 1}, "pyerrors.fits.least_squares": {"tf": 1.4142135623730951}, "pyerrors.fits.total_least_squares": {"tf": 1.7320508075688772}, "pyerrors.fits.fit_lin": {"tf": 1}, "pyerrors.fits.covariance_matrix": {"tf": 1}, "pyerrors.fits.error_band": {"tf": 1}, "pyerrors.fits.fit_general": {"tf": 1.4142135623730951}, "pyerrors.input.bdio.read_mesons": {"tf": 1}, "pyerrors.input.bdio.read_dSdm": {"tf": 1}, "pyerrors.input.misc.read_pbp": {"tf": 1}, "pyerrors.input.openQCD.read_rwms": {"tf": 1}, "pyerrors.input.openQCD.extract_t0": {"tf": 1}, "pyerrors.npr.Npr_matrix.g5H": {"tf": 1}, "pyerrors.obs.Obs.plot_piechart": {"tf": 1}, "pyerrors.obs.merge_idx": {"tf": 1}, "pyerrors.obs.filter_zeroes": {"tf": 1}, "pyerrors.obs.covariance": {"tf": 1}, "pyerrors.obs.covariance2": {"tf": 1}, "pyerrors.obs.covariance3": {"tf": 1}}, "df": 23}}}}, "s": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {"pyerrors.correlators.Corr.deriv": {"tf": 1}, "pyerrors.correlators.Corr.second_deriv": {"tf": 1}}, "df": 2}}}, "l": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "t": {"docs": {"pyerrors.fits.least_squares": {"tf": 1}}, "df": 1}}}}, "u": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "t": {"docs": {"pyerrors.fits.Fit_result": {"tf": 1.4142135623730951}, "pyerrors.fits.least_squares": {"tf": 1.4142135623730951}, "pyerrors.fits.qqplot": {"tf": 1}, "pyerrors.fits.fit_general": {"tf": 1}, "pyerrors.obs.Obs": {"tf": 1}}, "df": 5}}}, "i": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "u": {"docs": {"pyerrors.fits.least_squares": {"tf": 1}, "pyerrors.fits.qqplot": {"tf": 1}, "pyerrors.fits.residual_plot": {"tf": 1}}, "df": 3}}}, "h": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "p": {"docs": {"pyerrors.input.hadrons.read_ExternalLeg_hd5": {"tf": 1}, "pyerrors.input.hadrons.read_Bilinear_hd5": {"tf": 1}}, "df": 2}}}}, "l": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "v": {"docs": {"pyerrors.correlators.Corr.m_eff": {"tf": 1}, "pyerrors.fits.least_squares": {"tf": 1}, "pyerrors.fits.total_least_squares": {"tf": 1}, "pyerrors.fits.fit_general": {"tf": 1}, "pyerrors.input.sfcf.read_sfcf_c": {"tf": 1.4142135623730951}}, "df": 5}}, "i": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "l": {"docs": {"pyerrors.fits.least_squares": {"tf": 1}}, "df": 1}}}}, "a": {"docs": {}, "df": 0, "t": {"docs": {"pyerrors.npr.Npr_matrix": {"tf": 1}, "pyerrors.obs.Obs.gamma_method": {"tf": 1}}, "df": 2}}}, "g": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"pyerrors.correlators.Corr.plateau": {"tf": 1.4142135623730951}}, "df": 1}}}, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "s": {"docs": {"pyerrors.fits.total_least_squares": {"tf": 1}}, "df": 1}}}}, "u": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {"pyerrors.obs.Obs.expand_deltas": {"tf": 1}}, "df": 1}}}}}, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {"pyerrors.fits.Fit_result": {"tf": 1}, "pyerrors.npr.Npr_matrix": {"tf": 1}}, "df": 2}}}, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "m": {"docs": {"pyerrors.input.misc.read_pbp": {"tf": 1.4142135623730951}, "pyerrors.input.openQCD.read_rwms": {"tf": 1.4142135623730951}, "pyerrors.input.openQCD.extract_t0": {"tf": 1.4142135623730951}, "pyerrors.obs.Obs.plot_rep_dist": {"tf": 1}, "pyerrors.obs.merge_obs": {"tf": 1}}, "df": 5}}, "a": {"docs": {"pyerrors.obs.Obs.plot_rep_dist": {"tf": 1}}, "df": 1, "s": {"docs": {}, "df": 0, "/": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "l": {"docs": {"pyerrors.input.sfcf.read_sfcf": {"tf": 1}, "pyerrors.input.sfcf.read_sfcf_c": {"tf": 1}}, "df": 2}}}}}}}}}}}}, "a": {"docs": {}, "df": 0, "c": {"docs": {"pyerrors.input.sfcf.read_sfcf_c": {"tf": 1}}, "df": 1}}}}, "f": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"pyerrors.fits.least_squares": {"tf": 1}, "pyerrors.fits.total_least_squares": {"tf": 1}, "pyerrors.npr.Npr_matrix": {"tf": 2}}, "df": 3}}}, "a": {"docs": {}, "df": 0, "d": {"docs": {"pyerrors.input.bdio.read_mesons": {"tf": 1}, "pyerrors.input.bdio.read_dSdm": {"tf": 1}, "pyerrors.input.hadrons.read_meson_hd5": {"tf": 2}, "pyerrors.input.hadrons.read_ExternalLeg_hd5": {"tf": 1.7320508075688772}, "pyerrors.input.hadrons.read_Bilinear_hd5": {"tf": 1.7320508075688772}, "pyerrors.input.misc.read_pbp": {"tf": 1.7320508075688772}, "pyerrors.input.openQCD.read_rwms": {"tf": 2}, "pyerrors.input.openQCD.extract_t0": {"tf": 1.7320508075688772}, "pyerrors.input.sfcf.read_sfcf": {"tf": 2}, "pyerrors.input.sfcf.read_sfcf_c": {"tf": 1.7320508075688772}, "pyerrors.input.sfcf.read_qtop": {"tf": 1.4142135623730951}}, "df": 11, "_": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {"pyerrors.input.bdio.read_ADerrors": {"tf": 1}}, "df": 1}}}}}}}, "m": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"pyerrors.input.bdio.write_ADerrors": {"tf": 1}, "pyerrors.input.bdio.read_mesons": {"tf": 1}}, "df": 2}}}}}, "d": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "m": {"docs": {"pyerrors.input.bdio.read_dSdm": {"tf": 1}}, "df": 1}}}}}}, "l": {"docs": {"pyerrors.input.sfcf.read_sfcf": {"tf": 1}, "pyerrors.input.sfcf.read_sfcf_c": {"tf": 1}, "pyerrors.linalg.matmul": {"tf": 1}, "pyerrors.misc.gen_correlated_data": {"tf": 1}, "pyerrors.npr.Npr_matrix": {"tf": 1.4142135623730951}, "pyerrors.obs.CObs.gamma_method": {"tf": 1}, "pyerrors.obs.CObs.is_zero": {"tf": 1}}, "df": 7, "l": {"docs": {}, "df": 0, "i": {"docs": {"pyerrors.obs.correlate": {"tf": 1}}, "df": 1}}}}, "q": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "r": {"docs": {"pyerrors.input.bdio.read_ADerrors": {"tf": 1}, "pyerrors.input.bdio.write_ADerrors": {"tf": 1}, "pyerrors.input.bdio.read_mesons": {"tf": 1}, "pyerrors.input.bdio.read_dSdm": {"tf": 1}, "pyerrors.input.hadrons.read_meson_hd5": {"tf": 1}, "pyerrors.input.hadrons.read_ExternalLeg_hd5": {"tf": 1}, "pyerrors.input.hadrons.read_Bilinear_hd5": {"tf": 1}, "pyerrors.input.openQCD.extract_t0": {"tf": 1}, "pyerrors.npr.Npr_matrix": {"tf": 1.7320508075688772}}, "df": 9}}}}, "n": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "m": {"docs": {"pyerrors.npr.Zq": {"tf": 1}}, "df": 1}}}}}, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"pyerrors.correlators.Corr.fit": {"tf": 1}, "pyerrors.correlators.Corr.show": {"tf": 1}, "pyerrors.obs.Obs.__init__": {"tf": 1}, "pyerrors.obs.Obs.expand_deltas": {"tf": 1.7320508075688772}, "pyerrors.obs.Obs.calc_gamma": {"tf": 1.4142135623730951}, "pyerrors.obs.merge_idx": {"tf": 1}, "pyerrors.obs.expand_deltas_for_merge": {"tf": 1.7320508075688772}, "pyerrors.obs.filter_zeroes": {"tf": 1}, "pyerrors.obs.reduce_deltas": {"tf": 1}}, "df": 9}, "d": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "m": {"docs": {"pyerrors.npr.Npr_matrix": {"tf": 1}}, "df": 1}}}}, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {"pyerrors.jackknifing.derived_jack": {"tf": 1}, "pyerrors.obs.derived_observable": {"tf": 1}}, "df": 2}}}}, "_": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {"pyerrors.input.misc.read_pbp": {"tf": 1}, "pyerrors.input.openQCD.read_rwms": {"tf": 1}, "pyerrors.input.openQCD.extract_t0": {"tf": 1}}, "df": 3}}}, "o": {"docs": {}, "df": 0, "p": {"docs": {"pyerrors.input.misc.read_pbp": {"tf": 1}, "pyerrors.input.openQCD.read_rwms": {"tf": 1}, "pyerrors.input.openQCD.extract_t0": {"tf": 1}}, "df": 3}}}}}, "w": {"docs": {}, "df": 0, "m": {"docs": {"pyerrors.input.openQCD.read_rwms": {"tf": 1}}, "df": 1}}, "i": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "t": {"docs": {"pyerrors.input.openQCD.extract_t0": {"tf": 1}}, "df": 1}}}}, "u": {"docs": {}, "df": 0, "n": {"docs": {"pyerrors.obs.Obs.is_zero_within_error": {"tf": 1}}, "df": 1}}}, "u": {"docs": {}, "df": 0, "s": {"docs": {"pyerrors.correlators.Corr": {"tf": 1}, "pyerrors.correlators.Corr.deriv": {"tf": 1}, "pyerrors.correlators.Corr.m_eff": {"tf": 1.7320508075688772}, "pyerrors.correlators.Corr.fit": {"tf": 1}, "pyerrors.correlators.Corr.show": {"tf": 1}, "pyerrors.fits.least_squares": {"tf": 1.7320508075688772}, "pyerrors.fits.total_least_squares": {"tf": 1}, "pyerrors.fits.fit_lin": {"tf": 1.4142135623730951}, "pyerrors.fits.qqplot": {"tf": 1}, "pyerrors.fits.ks_test": {"tf": 1}, "pyerrors.fits.fit_general": {"tf": 1.7320508075688772}, "pyerrors.input.sfcf.read_sfcf_c": {"tf": 1.4142135623730951}, "pyerrors.jackknifing.derived_jack": {"tf": 1.7320508075688772}, "pyerrors.linalg.derived_array": {"tf": 1.7320508075688772}, "pyerrors.npr.Npr_matrix": {"tf": 2.449489742783178}, "pyerrors.npr.Zq": {"tf": 1}, "pyerrors.obs.Obs.calc_gamma": {"tf": 1}, "pyerrors.obs.Obs.gamma_method": {"tf": 1}, "pyerrors.obs.expand_deltas_for_merge": {"tf": 1}, "pyerrors.obs.derived_observable": {"tf": 2.6457513110645907}, "pyerrors.roots.find_root": {"tf": 1}}, "df": 21}, "p": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {"pyerrors.input.hadrons.read_meson_hd5": {"tf": 1}}, "df": 1}}}}, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"pyerrors.obs.Obs.calc_gamma": {"tf": 1}}, "df": 1}}}}, "n": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "s": {"docs": {"pyerrors.npr.Npr_matrix": {"tf": 1}}, "df": 1}}}}, "c": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"pyerrors.npr.Npr_matrix": {"tf": 1}}, "df": 1}}}}}, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "l": {"docs": {"pyerrors.obs.Obs.gamma_method": {"tf": 1}}, "df": 1}}}, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"pyerrors.obs.merge_idx": {"tf": 1}}, "df": 1}}}}}, "v": {"docs": {"pyerrors.input.openQCD.extract_t0": {"tf": 1}}, "df": 1, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "u": {"docs": {"pyerrors.correlators.Corr.plottable": {"tf": 1}, "pyerrors.correlators.Corr.plateau": {"tf": 1}, "pyerrors.correlators.Corr.show": {"tf": 1}, "pyerrors.fits.least_squares": {"tf": 1.4142135623730951}, "pyerrors.fits.total_least_squares": {"tf": 1}, "pyerrors.fits.error_band": {"tf": 1}, "pyerrors.fits.ks_test": {"tf": 1.4142135623730951}, "pyerrors.input.hadrons.read_meson_hd5": {"tf": 1}, "pyerrors.input.sfcf.read_sfcf": {"tf": 1}, "pyerrors.linalg.derived_array": {"tf": 1}, "pyerrors.linalg.matmul": {"tf": 1}, "pyerrors.linalg.inv": {"tf": 1}, "pyerrors.linalg.cholesky": {"tf": 1}, "pyerrors.linalg.svd": {"tf": 1}, "pyerrors.linalg.grad_eig": {"tf": 1}, "pyerrors.misc.gen_correlated_data": {"tf": 1}, "pyerrors.mpm.matrix_pencil_method": {"tf": 1}, "pyerrors.mpm.matrix_pencil_method_old": {"tf": 2}, "pyerrors.obs.Obs": {"tf": 2.6457513110645907}, "pyerrors.obs.Obs.__init__": {"tf": 1.4142135623730951}, "pyerrors.obs.Obs.gamma_method": {"tf": 1.4142135623730951}, "pyerrors.obs.CObs": {"tf": 1}, "pyerrors.obs.filter_zeroes": {"tf": 1}, "pyerrors.obs.covariance": {"tf": 1}, "pyerrors.obs.covariance2": {"tf": 1}, "pyerrors.obs.covariance3": {"tf": 1}, "pyerrors.obs.pseudo_Obs": {"tf": 1}}, "df": 27}}, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"pyerrors.correlators.Corr.m_eff": {"tf": 1.4142135623730951}}, "df": 1}}}}}, "n": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "h": {"docs": {"pyerrors.obs.filter_zeroes": {"tf": 1.4142135623730951}}, "df": 1}}}}}, "i": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {"pyerrors.correlators.Corr.show": {"tf": 1.4142135623730951}, "pyerrors.obs.Obs": {"tf": 1}}, "df": 2}}}}, "a": {"docs": {"pyerrors.fits.Fit_result": {"tf": 1}, "pyerrors.linalg.matmul": {"tf": 1}, "pyerrors.linalg.slogdet": {"tf": 1}, "pyerrors.mpm.matrix_pencil_method_old": {"tf": 1}}, "df": 4}, "e": {"docs": {}, "df": 0, "w": {"docs": {"pyerrors.npr.Npr_matrix": {"tf": 1.4142135623730951}}, "df": 1}}}, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"pyerrors.input.openQCD.read_rwms": {"tf": 1.4142135623730951}, "pyerrors.npr.Npr_matrix": {"tf": 1.4142135623730951}}, "df": 2}}}}, "b": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "s": {"docs": {"pyerrors.mpm.matrix_pencil_method_old": {"tf": 1}}, "df": 1}}}}}}, "x": {"0": {"docs": {"pyerrors.correlators.Corr.deriv": {"tf": 1}, "pyerrors.correlators.Corr.second_deriv": {"tf": 1}}, "df": 2, "=": {"0": {"docs": {"pyerrors.correlators.Corr.symmetric": {"tf": 1}, "pyerrors.correlators.Corr.anti_symmetric": {"tf": 1}}, "df": 2}, "docs": {}, "df": 0, "x": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {"pyerrors.input.openQCD.extract_t0": {"tf": 1}}, "df": 1}}}}, "n": {"docs": {}, "df": 0, "+": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "t": {"docs": {"pyerrors.mpm.matrix_pencil_method_old": {"tf": 1}}, "df": 1}}}}}}}}}}, "1": {"docs": {"pyerrors.fits.least_squares": {"tf": 1.4142135623730951}, "pyerrors.fits.total_least_squares": {"tf": 1.4142135623730951}}, "df": 2}, "2": {"docs": {"pyerrors.fits.least_squares": {"tf": 1.4142135623730951}, "pyerrors.fits.total_least_squares": {"tf": 1.4142135623730951}}, "df": 2}, "docs": {"pyerrors.correlators.Corr.show": {"tf": 1}, "pyerrors.fits.least_squares": {"tf": 2.449489742783178}, "pyerrors.fits.total_least_squares": {"tf": 2.6457513110645907}, "pyerrors.fits.fit_lin": {"tf": 1.4142135623730951}, "pyerrors.fits.error_band": {"tf": 1}, "pyerrors.fits.fit_general": {"tf": 1.7320508075688772}, "pyerrors.jackknifing.derived_jack": {"tf": 1}, "pyerrors.npr.Npr_matrix": {"tf": 1}, "pyerrors.obs.derived_observable": {"tf": 1}}, "df": 9, "_": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"pyerrors.correlators.Corr.show": {"tf": 1}}, "df": 1}}}}}, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {"pyerrors.fits.fit_lin": {"tf": 1.4142135623730951}, "pyerrors.fits.fit_general": {"tf": 1.4142135623730951}}, "df": 2}}}}}, "m": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {"pyerrors.input.openQCD.extract_t0": {"tf": 1}}, "df": 1}}}, "[": {"0": {"docs": {"pyerrors.jackknifing.derived_jack": {"tf": 1}, "pyerrors.obs.derived_observable": {"tf": 1}}, "df": 2}, "1": {"docs": {"pyerrors.jackknifing.derived_jack": {"tf": 1}, "pyerrors.obs.derived_observable": {"tf": 1}}, "df": 2}, "docs": {}, "df": 0}}, "w": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "t": {"docs": {"pyerrors.correlators.Corr.reweight": {"tf": 1.4142135623730951}, "pyerrors.obs.reweight": {"tf": 1.4142135623730951}}, "df": 2}}}}, "l": {"docs": {}, "df": 0, "l": {"docs": {"pyerrors.fits.ks_test": {"tf": 1}}, "df": 1}}}, "h": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"pyerrors.correlators.Corr.deriv": {"tf": 1}, "pyerrors.correlators.Corr.fit": {"tf": 1}, "pyerrors.npr.Npr_matrix": {"tf": 1}, "pyerrors.obs.Obs.calc_gamma": {"tf": 1}, "pyerrors.obs.Obs.gamma_method": {"tf": 1}, "pyerrors.obs.Obs.is_zero_within_error": {"tf": 1}, "pyerrors.obs.Obs.is_zero": {"tf": 1}, "pyerrors.obs.CObs.is_zero": {"tf": 1}}, "df": 8}}}}}}, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "k": {"docs": {"pyerrors.fits.least_squares": {"tf": 1}, "pyerrors.fits.total_least_squares": {"tf": 1}, "pyerrors.input.openQCD.extract_t0": {"tf": 1}, "pyerrors.jackknifing.derived_jack": {"tf": 1}, "pyerrors.linalg.derived_array": {"tf": 1}, "pyerrors.npr.Npr_matrix.g5H": {"tf": 1}, "pyerrors.obs.Obs.is_zero_within_error": {"tf": 1}, "pyerrors.obs.derived_observable": {"tf": 1}, "pyerrors.obs.correlate": {"tf": 1}, "pyerrors.obs.covariance3": {"tf": 1}}, "df": 10}}}, "l": {"docs": {"pyerrors.input.bdio.read_ADerrors": {"tf": 1}, "pyerrors.input.bdio.write_ADerrors": {"tf": 1}, "pyerrors.input.bdio.read_mesons": {"tf": 1}, "pyerrors.input.bdio.read_dSdm": {"tf": 1}}, "df": 4}, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"pyerrors.input.bdio.write_ADerrors": {"tf": 1}}, "df": 1, "a": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "l": {"docs": {"pyerrors.npr.Npr_matrix": {"tf": 1}}, "df": 1}}}}}}, "a": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"pyerrors.jackknifing.derived_jack": {"tf": 1}, "pyerrors.linalg.derived_array": {"tf": 1}, "pyerrors.obs.derived_observable": {"tf": 1}, "pyerrors.roots.find_root": {"tf": 1}}, "df": 4}}}}}, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"pyerrors.linalg.derived_array": {"tf": 1}, "pyerrors.obs.derived_observable": {"tf": 1}}, "df": 2}}}}, "a": {"docs": {}, "df": 0, "v": {"docs": {}, "df": 0, "e": {"docs": {"pyerrors.input.sfcf.read_sfcf_c": {"tf": 1}}, "df": 1, "f": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {"pyerrors.input.sfcf.read_sfcf_c": {"tf": 1.4142135623730951}}, "df": 1}}}}}}}, "y": {"docs": {"pyerrors.npr.Npr_matrix": {"tf": 1}}, "df": 1}, "n": {"docs": {}, "df": 0, "t": {"docs": {"pyerrors.obs.reduce_deltas": {"tf": 1}}, "df": 1}}}, "f": {"2": {"docs": {"pyerrors.input.sfcf.read_sfcf_c": {"tf": 1}}, "df": 1}, "docs": {"pyerrors.input.sfcf.read_sfcf_c": {"tf": 1}}, "df": 1}, "i": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"pyerrors.npr.Zq": {"tf": 1}}, "df": 1}}}}, "n": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "w": {"docs": {"pyerrors.obs.Obs.calc_gamma": {"tf": 1}}, "df": 1}}}}, "t": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {"pyerrors.obs.Obs.is_zero_within_error": {"tf": 1}, "pyerrors.obs.Obs.is_zero": {"tf": 1}, "pyerrors.obs.CObs.is_zero": {"tf": 1}}, "df": 3}}}}}, "_": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "x": {"docs": {"pyerrors.obs.Obs.calc_gamma": {"tf": 1}}, "df": 1}}}}}, "k": {"docs": {"pyerrors.mpm.matrix_pencil_method": {"tf": 1.7320508075688772}}, "df": 1, "e": {"docs": {}, "df": 0, "y": {"docs": {}, "df": 0, "w": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "d": {"docs": {"pyerrors.correlators.Corr.reweight": {"tf": 1}, "pyerrors.fits.least_squares": {"tf": 1}, "pyerrors.fits.total_least_squares": {"tf": 1}, "pyerrors.fits.fit_general": {"tf": 1}, "pyerrors.input.misc.read_pbp": {"tf": 1}, "pyerrors.input.openQCD.read_rwms": {"tf": 1}, "pyerrors.input.openQCD.extract_t0": {"tf": 1}, "pyerrors.input.sfcf.read_sfcf": {"tf": 1}, "pyerrors.input.sfcf.read_sfcf_c": {"tf": 1}, "pyerrors.input.sfcf.read_qtop": {"tf": 1}, "pyerrors.jackknifing.Jack.dump": {"tf": 1}, "pyerrors.linalg.derived_array": {"tf": 1}, "pyerrors.npr.Npr_matrix": {"tf": 1}, "pyerrors.obs.Obs.gamma_method": {"tf": 1}, "pyerrors.obs.Obs.dump": {"tf": 1}, "pyerrors.obs.derived_observable": {"tf": 1}, "pyerrors.obs.reweight": {"tf": 1}, "pyerrors.obs.covariance": {"tf": 1}, "pyerrors.obs.covariance2": {"tf": 1}, "pyerrors.obs.covariance3": {"tf": 1}, "pyerrors.obs.dump_object": {"tf": 1}}, "df": 21}}}}}, "e": {"docs": {}, "df": 0, "p": {"docs": {"pyerrors.obs.correlate": {"tf": 1}}, "df": 1}}}, "o": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "v": {"docs": {}, "df": 0, "\u2013": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "v": {"docs": {"pyerrors.fits.ks_test": {"tf": 1}}, "df": 1}}}}}}}}}}}}}}}}}, "w": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "g": {"docs": {"pyerrors.fits.fit_general": {"tf": 1}, "pyerrors.jackknifing.derived_jack": {"tf": 1.4142135623730951}, "pyerrors.linalg.derived_array": {"tf": 1.4142135623730951}, "pyerrors.obs.derived_observable": {"tf": 1.7320508075688772}}, "df": 4}}}}, "a": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "a": {"1": {"docs": {"pyerrors.input.bdio.read_mesons": {"tf": 1}}, "df": 1}, "2": {"docs": {"pyerrors.input.bdio.read_mesons": {"tf": 1}}, "df": 1}, "docs": {"pyerrors.input.bdio.read_dSdm": {"tf": 1}}, "df": 1}}}}}, "b": {"2": {"docs": {}, "df": 0, "b": {"docs": {"pyerrors.input.sfcf.read_sfcf": {"tf": 1}, "pyerrors.input.sfcf.read_sfcf_c": {"tf": 1}}, "df": 2}}, "docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "l": {"docs": {"pyerrors.correlators.Corr.reweight": {"tf": 1}, "pyerrors.correlators.Corr.deriv": {"tf": 1}, "pyerrors.correlators.Corr.fit": {"tf": 1}, "pyerrors.correlators.Corr.show": {"tf": 1}, "pyerrors.fits.least_squares": {"tf": 1}, "pyerrors.fits.total_least_squares": {"tf": 1}, "pyerrors.obs.Obs.gamma_method": {"tf": 1}, "pyerrors.obs.derived_observable": {"tf": 1}, "pyerrors.obs.reweight": {"tf": 1}}, "df": 9, "e": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "n": {"docs": {"pyerrors.obs.Obs.calc_gamma": {"tf": 1}}, "df": 1}}}}, "k": {"docs": {}, "df": 0, "k": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "p": {"docs": {"pyerrors.input.hadrons.read_meson_hd5": {"tf": 1}, "pyerrors.input.hadrons.read_ExternalLeg_hd5": {"tf": 1}, "pyerrors.input.hadrons.read_Bilinear_hd5": {"tf": 1}}, "df": 3}}}}}}, "u": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "d": {"docs": {"pyerrors.obs.Obs.calc_gamma": {"tf": 1}}, "df": 1, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "i": {"docs": {"pyerrors.input.openQCD.extract_t0": {"tf": 1.4142135623730951}, "pyerrors.input.sfcf.read_sfcf": {"tf": 2}, "pyerrors.input.sfcf.read_sfcf_c": {"tf": 2}}, "df": 3}}}}}}, "t": {"docs": {}, "df": 0, "h": {"docs": {"pyerrors.obs.CObs.is_zero": {"tf": 1}, "pyerrors.obs.covariance": {"tf": 1}, "pyerrors.obs.covariance2": {"tf": 1}, "pyerrors.obs.covariance3": {"tf": 1}}, "df": 4}}}, "a": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "e": {"docs": {"pyerrors.fits.total_least_squares": {"tf": 1}, "pyerrors.mpm.matrix_pencil_method": {"tf": 1}, "pyerrors.npr.Npr_matrix": {"tf": 1.7320508075688772}, "pyerrors.obs.merge_obs": {"tf": 1}}, "df": 4}, "i": {"docs": {}, "df": 0, "c": {"docs": {"pyerrors.jackknifing.Jack.print": {"tf": 1}, "pyerrors.obs.Obs": {"tf": 1}}, "df": 2}}}, "n": {"docs": {}, "df": 0, "d": {"docs": {"pyerrors.fits.error_band": {"tf": 1}}, "df": 1}}}, "e": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "a": {"docs": {"pyerrors.fits.error_band": {"tf": 1}}, "df": 1}, "w": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {"pyerrors.mpm.matrix_pencil_method": {"tf": 1}}, "df": 1}}}}}, "l": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "w": {"docs": {"pyerrors.mpm.matrix_pencil_method_old": {"tf": 1}, "pyerrors.npr.Npr_matrix": {"tf": 1.4142135623730951}}, "df": 2}}}}, "d": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {"pyerrors.input.bdio.read_ADerrors": {"tf": 2.23606797749979}, "pyerrors.input.bdio.write_ADerrors": {"tf": 2.23606797749979}, "pyerrors.input.bdio.read_mesons": {"tf": 2.23606797749979}, "pyerrors.input.bdio.read_dSdm": {"tf": 2.23606797749979}}, "df": 4, "_": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "h": {"docs": {"pyerrors.input.bdio.read_ADerrors": {"tf": 1}, "pyerrors.input.bdio.write_ADerrors": {"tf": 1}, "pyerrors.input.bdio.read_mesons": {"tf": 1}, "pyerrors.input.bdio.read_dSdm": {"tf": 1}}, "df": 4}}}}}}}}, "u": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, ")": {"docs": {}, "df": 0, "/": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {"pyerrors.input.bdio.read_ADerrors": {"tf": 1.4142135623730951}, "pyerrors.input.bdio.write_ADerrors": {"tf": 1.4142135623730951}, "pyerrors.input.bdio.read_mesons": {"tf": 1.4142135623730951}, "pyerrors.input.bdio.read_dSdm": {"tf": 1.4142135623730951}}, "df": 4}}}}}}}, "b": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {"pyerrors.input.bdio.read_ADerrors": {"tf": 1}, "pyerrors.input.bdio.write_ADerrors": {"tf": 1}, "pyerrors.input.bdio.read_mesons": {"tf": 1}, "pyerrors.input.bdio.read_dSdm": {"tf": 1}}, "df": 4}}}}}}}}}}}}, "f": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"pyerrors.npr.Npr_matrix": {"tf": 3}}, "df": 1, "=": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"pyerrors.npr.Npr_matrix": {"tf": 1}}, "df": 1}}}}}}}}}, "i": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {"pyerrors.input.hadrons.read_Bilinear_hd5": {"tf": 1}}, "df": 1}}}}}}}, "y": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"pyerrors.npr.Npr_matrix": {"tf": 2.449489742783178}}, "df": 1}}}}, "q": {"docs": {"pyerrors.fits.ks_test": {"tf": 1.4142135623730951}}, "df": 1, "u": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "m": {"docs": {"pyerrors.correlators.Corr.T_symmetry": {"tf": 1}}, "df": 1}}, "i": {"docs": {}, "df": 0, "l": {"docs": {"pyerrors.fits.least_squares": {"tf": 1.4142135623730951}, "pyerrors.fits.qqplot": {"tf": 1.4142135623730951}}, "df": 2}}}}, "r": {"docs": {}, "df": 0, "k": {"docs": {"pyerrors.input.sfcf.read_sfcf_c": {"tf": 1.4142135623730951}, "pyerrors.npr.inv_propagator": {"tf": 1}, "pyerrors.npr.Zq": {"tf": 1.4142135623730951}}, "df": 3}}}}, "q": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "t": {"docs": {"pyerrors.fits.least_squares": {"tf": 1}}, "df": 1}}}}}, "t": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "p": {"docs": {"pyerrors.input.sfcf.read_qtop": {"tf": 1}}, "df": 1}}}}, "y": {"docs": {"pyerrors.correlators.Corr.show": {"tf": 1.4142135623730951}, "pyerrors.fits.least_squares": {"tf": 1.4142135623730951}, "pyerrors.fits.total_least_squares": {"tf": 2}, "pyerrors.fits.fit_lin": {"tf": 1.4142135623730951}, "pyerrors.fits.covariance_matrix": {"tf": 1}, "pyerrors.fits.fit_general": {"tf": 2}, "pyerrors.mpm.matrix_pencil_method": {"tf": 1}}, "df": 7, "l": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "l": {"docs": {"pyerrors.correlators.Corr.show": {"tf": 1}}, "df": 1}}}}}, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {"pyerrors.fits.total_least_squares": {"tf": 1}, "pyerrors.fits.fit_lin": {"tf": 1}, "pyerrors.fits.fit_general": {"tf": 1}}, "df": 3}}}}}}, "h": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"pyerrors.input.hadrons.read_meson_hd5": {"tf": 1}, "pyerrors.input.hadrons.read_ExternalLeg_hd5": {"tf": 1}, "pyerrors.input.hadrons.read_Bilinear_hd5": {"tf": 1}}, "df": 3}}}}}, "d": {"docs": {}, "df": 0, "f": {"5": {"docs": {"pyerrors.input.hadrons.read_meson_hd5": {"tf": 1.4142135623730951}, "pyerrors.input.hadrons.read_ExternalLeg_hd5": {"tf": 1}, "pyerrors.input.hadrons.read_Bilinear_hd5": {"tf": 1}}, "df": 3}, "docs": {}, "df": 0}}, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "n": {"docs": {"pyerrors.linalg.eigh": {"tf": 1}}, "df": 1}}}, "e": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "n": {"docs": {"pyerrors.npr.Npr_matrix.g5H": {"tf": 1}}, "df": 1}}}}}}, "e": {"docs": {"pyerrors.npr.Npr_matrix": {"tf": 1}}, "df": 1}}, "l": {"docs": {}, "df": 0, "p": {"docs": {"pyerrors.mpm.matrix_pencil_method_old": {"tf": 1}}, "df": 1}}}, "u": {"docs": {}, "df": 0, "a": {"docs": {"pyerrors.mpm.matrix_pencil_method": {"tf": 1}}, "df": 1}}, "i": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"pyerrors.mpm.matrix_pencil_method_old": {"tf": 1}}, "df": 1}}}}, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "i": {"docs": {"pyerrors.obs.Obs.plot_history": {"tf": 1}}, "df": 1}}}}}}, "o": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {"pyerrors.npr.Npr_matrix": {"tf": 1}}, "df": 1}}}}}, "l": {"docs": {}, "df": 0, "e": {"docs": {"pyerrors.obs.Obs.expand_deltas": {"tf": 1}}, "df": 1}}}}, "z": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {"pyerrors.input.openQCD.extract_t0": {"tf": 1.4142135623730951}, "pyerrors.mpm.matrix_pencil_method_old": {"tf": 1}, "pyerrors.npr.Npr_matrix": {"tf": 1.7320508075688772}, "pyerrors.obs.Obs.gamma_method": {"tf": 1}, "pyerrors.obs.Obs.is_zero_within_error": {"tf": 1}, "pyerrors.obs.Obs.is_zero": {"tf": 1}, "pyerrors.obs.CObs.is_zero": {"tf": 1}}, "df": 7}}}, "q": {"docs": {"pyerrors.npr.Zq": {"tf": 1.4142135623730951}}, "df": 1}}, "j": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "k": {"1": {"docs": {"pyerrors.jackknifing.derived_jack": {"tf": 1.4142135623730951}}, "df": 1}, "2": {"docs": {"pyerrors.jackknifing.derived_jack": {"tf": 1.4142135623730951}}, "df": 1}, "3": {"docs": {"pyerrors.jackknifing.derived_jack": {"tf": 1}}, "df": 1}, "docs": {"pyerrors.jackknifing.Jack.print": {"tf": 1}, "pyerrors.jackknifing.Jack.dump": {"tf": 1}, "pyerrors.jackknifing.derived_jack": {"tf": 1.7320508075688772}}, "df": 3}, "o": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "n": {"docs": {"pyerrors.linalg.derived_array": {"tf": 1}, "pyerrors.obs.derived_observable": {"tf": 1}}, "df": 2}}}}}}}, "u": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "p": {"docs": {"pyerrors.npr.Npr_matrix": {"tf": 1.4142135623730951}}, "df": 1}}}}, "_": {"docs": {}, "df": 0, "_": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "w": {"docs": {}, "df": 0, "_": {"docs": {}, "df": 0, "_": {"docs": {"pyerrors.npr.Npr_matrix": {"tf": 1.7320508075688772}}, "df": 1}}}}}, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "_": {"docs": {}, "df": 0, "_": {"docs": {"pyerrors.npr.Npr_matrix": {"tf": 1}}, "df": 1}}}}}}}}}}}, "pipeline": ["trimmer", "stopWordFilter", "stemmer"], "_isPrebuiltIndex": true}; + + // mirrored in build-search-index.js (part 1) + // Also split on html tags. this is a cheap heuristic, but good enough. + elasticlunr.tokenizer.setSeperator(/[\s\-.;&]+|<[^>]*>/); + + let searchIndex; + if (docs._isPrebuiltIndex) { + console.info("using precompiled search index"); + searchIndex = elasticlunr.Index.load(docs); + } else { + console.time("building search index"); + // mirrored in build-search-index.js (part 2) + searchIndex = elasticlunr(function () { + this.addField("qualname"); + this.addField("fullname"); + this.addField("doc"); + this.setRef("fullname"); + }); + for (let doc of docs) { + searchIndex.addDoc(doc); + } + console.timeEnd("building search index"); + } + + return (term) => searchIndex.search(term, { + fields: { + qualname: {boost: 4}, + fullname: {boost: 2}, + doc: {boost: 1}, + }, + expand: true + }); +})(); \ No newline at end of file