@@ -109,7 +113,7 @@ It is based on the gamma method
Basic example
-import numpy as np
+import numpy as np
import pyerrors as pe
my_obs = pe.Obs([samples], ['ensemble_name']) # Initialize an Obs object
@@ -126,7 +130,7 @@ An Obs
object can be initialized with two arguments, the first is a
The samples can either be provided as python list or as numpy array.
The second argument is a list containing the names of the respective Monte Carlo chains as strings. These strings uniquely identify a Monte Carlo chain/ensemble.
-import pyerrors as pe
+import pyerrors as pe
my_obs = pe.Obs([samples], ['ensemble_name'])
@@ -140,7 +144,7 @@ The required derivatives $\bar{f}_\alpha$ are evaluated up to machine precision
The Obs
class is designed such that mathematical numpy functions can be used on Obs
just as for regular floats.
-import numpy as np
+import numpy as np
import pyerrors as pe
my_obs1 = pe.Obs([samples1], ['ensemble_name'])
@@ -161,7 +165,7 @@ The required derivatives $\bar{f}_\alpha$ are evaluated up to machine precision
The error estimation within pyerrors
is based on the gamma method introduced in arXiv:hep-lat/0306017.
After having arrived at the derived quantity of interest the gamma_method
can be called as detailed in the following example.
-my_sum.gamma_method()
+my_sum.gamma_method()
print(my_sum)
> 1.70(57)
my_sum.details()
@@ -176,7 +180,7 @@ $$\tau_\mathrm{int}=\frac{1}{2}+\sum_{t=1}^{W}\rho(t)\geq \frac{1}{2}\,.$$
The window $W$ is determined via the automatic windowing procedure described in arXiv:hep-lat/0306017.
The standard value for the parameter $S$ of this automatic windowing procedure is $S=2$. Other values for $S$ can be passed to the gamma_method
as parameter.
-my_sum.gamma_method(S=3.0)
+my_sum.gamma_method(S=3.0)
my_sum.details()
> Result 1.70000000e+00 +/- 6.30675201e-01 +/- 1.04585650e-01 (37.099%)
> t_int 3.29909703e+00 +/- 9.77310102e-01 S = 3.00
@@ -193,7 +197,7 @@ In this case the error estimate is identical to the sample standard error.
Slow modes in the Monte Carlo history can be accounted for by attaching an exponential tail to the autocorrelation function $\rho$ as suggested in arXiv:1009.5228. The longest autocorrelation time in the history, $\tau_\mathrm{exp}$, can be passed to the gamma_method
as parameter. In this case the automatic windowing procedure is vacated and the parameter $S$ does not affect the error estimate.
-my_sum.gamma_method(tau_exp=7.2)
+my_sum.gamma_method(tau_exp=7.2)
my_sum.details()
> Result 1.70000000e+00 +/- 6.28097762e-01 +/- 5.79077524e-02 (36.947%)
> t_int 3.27218667e+00 +/- 7.99583654e-01 tau_exp = 7.20, N_sigma = 1
@@ -207,7 +211,7 @@ In this case the error estimate is identical to the sample standard error.
Error propagation for multiple ensembles (Markov chains with different simulation parameters) is handled automatically. Ensembles are uniquely identified by their name
.
-obs1 = pe.Obs([samples1], ['ensemble1'])
+obs1 = pe.Obs([samples1], ['ensemble1'])
obs2 = pe.Obs([samples2], ['ensemble2'])
my_sum = obs1 + obs2
@@ -220,7 +224,7 @@ In this case the error estimate is identical to the sample standard error.
pyerrors
identifies multiple replica (independent Markov chains with identical simulation parameters) by the vertical bar |
in the name of the data set.
-obs1 = pe.Obs([samples1], ['ensemble1|r01'])
+obs1 = pe.Obs([samples1], ['ensemble1|r01'])
obs2 = pe.Obs([samples2], ['ensemble1|r02'])
> my_sum = obs1 + obs2
@@ -236,7 +240,7 @@ In this case the error estimate is identical to the sample standard error.
In order to keep track of different error analysis parameters for different ensembles one can make use of global dictionaries as detailed in the following example.
-pe.Obs.S_dict['ensemble1'] = 2.5
+pe.Obs.S_dict['ensemble1'] = 2.5
pe.Obs.tau_exp_dict['ensemble2'] = 8.0
pe.Obs.tau_exp_dict['ensemble3'] = 2.0
@@ -248,7 +252,7 @@ Passing arguments to the gamma_method
still dominates over the dict
Obs
objects defined on irregular Monte Carlo chains can be initialized with the parameter idl
.
-# Observable defined on configurations 20 to 519
+# Observable defined on configurations 20 to 519
obs1 = pe.Obs([samples1], ['ensemble1'], idl=[range(20, 520)])
obs1.details()
> Result 9.98319881e-01
@@ -281,7 +285,7 @@ Make sure to check the autocorrelation time with e.g. pyerrors
offers the Corr
class which simplifies the corresponding error propagation and provides the user with a set of standard methods. In order to initialize a Corr
objects one needs to arrange the data as a list of `Obs´
-my_corr = pe.Corr([obs_0, obs_1, obs_2, obs_3])
+my_corr = pe.Corr([obs_0, obs_1, obs_2, obs_3])
print(my_corr)
> x0/a Corr(x0/a)
> ------------------
@@ -293,7 +297,7 @@ Make sure to check the autocorrelation time with e.g. my_corr = pe.Corr([obs_0, obs_1, obs_2, obs_3], padding=[1, 1])
+my_corr = pe.Corr([obs_0, obs_1, obs_2, obs_3], padding=[1, 1])
print(my_corr)
> x0/a Corr(x0/a)
> ------------------
@@ -307,13 +311,13 @@ Make sure to check the autocorrelation time with e.g. print(my_corr[3])
+print(my_corr[3])
> 0.3227(33)
Error propagation with the Corr
class works very similar to Obs
objects. Mathematical operations are overloaded and Corr
objects can be computed together with other Corr
objects, Obs
objects or real numbers and integers.
-my_new_corr = 0.3 * my_corr[2] * my_corr * my_corr + 12 / my_corr
+my_new_corr = 0.3 * my_corr[2] * my_corr * my_corr + 12 / my_corr
pyerrors
provides the user with a set of regularly used methods for the manipulation of correlator objects:
@@ -337,12 +341,12 @@ Make sure to check the autocorrelation time with e.g. pyerrors.correlators.Corr
.
-Complex observables
+Complex valued observables
pyerrors
can handle complex valued observables via the class pyerrors.obs.CObs
.
CObs
are initialized with a real and an imaginary part which both can be Obs
valued.
-my_real_part = pe.Obs([samples1], ['ensemble1'])
+my_real_part = pe.Obs([samples1], ['ensemble1'])
my_imag_part = pe.Obs([samples2], ['ensemble1'])
my_cobs = pe.CObs(my_real_part, my_imag_part)
@@ -353,16 +357,69 @@ Make sure to check the autocorrelation time with e.g. my_derived_cobs = (my_cobs + my_cobs.conjugate()) / np.abs(my_cobs)
+my_derived_cobs = (my_cobs + my_cobs.conjugate()) / np.abs(my_cobs)
my_derived_cobs.gamma_method()
print(my_derived_cobs)
> (1.668(23)+0.0j)
-Optimization / fits / roots
+Error propagation in iterative algorithms
-
+pyerrors
supports exact linear error propagation for iterative algorithms like various variants of non-linear least sqaures fits or root finding. The derivatives required for the error propagation are calculated as described in arXiv:1809.01289.
+
+Least squares fits
+
+Standard non-linear least square fits with errors on the dependent but not the independent variables can be performed with pyerrors.fits.least_squares
. As default solver the Levenberg-Marquardt algorithm implemented in scipy is used.
+
+Fit functions have to be of the following form
+
+import autograd.numpy as anp
+
+def func(a, x):
+ return a[1] * anp.exp(-a[0] * x)
+
+
+It is important that numerical functions refer to autograd.numpy
instead of numpy
for the automatic differentiation to work properly.
+
+Fits can then be performed via
+
+fit_result = pe.fits.least_squares(x, y, func)
+print("\n", fit_result)
+> Fit with 2 parameters
+> Method: Levenberg-Marquardt
+> `ftol` termination condition is satisfied.
+> chisquare/d.o.f.: 0.9593035785160936
+
+> Goodness of fit:
+> χ²/d.o.f. = 0.959304
+> p-value = 0.5673
+> Fit parameters:
+> 0 0.0548(28)
+> 1 1.933(64)
+
+
+where x is a list
or numpy.array
of floats
and y is a list
or numpy.array
of Obs
.
+
+Data stored in Corr
objects can be fitted directly using the Corr.fit
method.
+
+my_corr = pe.Corr(y)
+fit_result = my_corr.fit(func, fitrange=[12, 25])
+
+
+this can simplify working with absolute fit ranges and takes care of gaps in the data automatically.
+
+For fit functions with multiple independent variables the fit function can be of the form
+
+def func(a, x):
+ (x1, x2) = x
+ return a[0] * x1 ** 2 + a[1] * x2
+
+
+Total least squares fits
+
+pyerrors
can also fit data with errors on both the dependent and independent variables using the total least squares method also referred to orthogonal distance regression as implemented in scipy, see pyerrors.fits.least_squares
. The syntax is identical to the standard least squares case, the only diffrence being that x
also has to be a list
or numpy.array
of Obs
.
+
+For the full API see pyerrors.fits
for fits and pyerrors.roots
for finding roots of functions.
Matrix operations
@@ -392,11 +449,21 @@ See pyerrors.obs.Obs.expo
Input
pyerrors
includes an input
submodule in which input routines and parsers for the output of various numerical programs are contained. For details see pyerrors.input
.
+
+Citing
+
+If you use pyerrors
for research that leads to a publication please consider citing:
+
+
+- Ulli Wolff, "Monte Carlo errors with less errors". Comput.Phys.Commun. 156 (2004) 143-153, Comput.Phys.Commun. 176 (2007) 383 (erratum).
+- Stefan Schaefer, Rainer Sommer, Francesco Virotta, "Critical slowing down and error analysis in lattice QCD simulations". Nucl.Phys.B 845 (2011) 93-119.
+- Alberto Ramos, "Automatic differentiation for error analysis of Monte Carlo data". Comput.Phys.Commun. 238 (2019) 19-35.
+
View Source
- r'''
+ r'''
# What is pyerrors?
`pyerrors` is a python package for error computation and propagation of Markov chain Monte Carlo data.
It is based on the gamma method [arXiv:hep-lat/0306017](https://arxiv.org/abs/hep-lat/0306017). Some of its features are:
@@ -645,7 +712,7 @@ See pyerrors.obs.Obs.expo
For the full API see `pyerrors.correlators.Corr`.
-# Complex observables
+# Complex valued observables
`pyerrors` can handle complex valued observables via the class `pyerrors.obs.CObs`.
`CObs` are initialized with a real and an imaginary part which both can be `Obs` valued.
@@ -668,9 +735,60 @@ See pyerrors.obs.Obs.expo
> (1.668(23)+0.0j)
```
-# Optimization / fits / roots
-`pyerrors.fits`
-`pyerrors.roots`
+# Error propagation in iterative algorithms
+
+`pyerrors` supports exact linear error propagation for iterative algorithms like various variants of non-linear least sqaures fits or root finding. The derivatives required for the error propagation are calculated as described in [arXiv:1809.01289](https://arxiv.org/abs/1809.01289).
+
+## Least squares fits
+
+Standard non-linear least square fits with errors on the dependent but not the independent variables can be performed with `pyerrors.fits.least_squares`. As default solver the Levenberg-Marquardt algorithm implemented in [scipy](https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.least_squares.html) is used.
+
+Fit functions have to be of the following form
+```python
+import autograd.numpy as anp
+
+def func(a, x):
+ return a[1] * anp.exp(-a[0] * x)
+```
+**It is important that numerical functions refer to `autograd.numpy` instead of `numpy` for the automatic differentiation to work properly.**
+
+Fits can then be performed via
+```python
+fit_result = pe.fits.least_squares(x, y, func)
+print("\n", fit_result)
+> Fit with 2 parameters
+> Method: Levenberg-Marquardt
+> `ftol` termination condition is satisfied.
+> chisquare/d.o.f.: 0.9593035785160936
+
+> Goodness of fit:
+> χ²/d.o.f. = 0.959304
+> p-value = 0.5673
+> Fit parameters:
+> 0 0.0548(28)
+> 1 1.933(64)
+```
+where x is a `list` or `numpy.array` of `floats` and y is a `list` or `numpy.array` of `Obs`.
+
+Data stored in `Corr` objects can be fitted directly using the `Corr.fit` method.
+```python
+my_corr = pe.Corr(y)
+fit_result = my_corr.fit(func, fitrange=[12, 25])
+```
+this can simplify working with absolute fit ranges and takes care of gaps in the data automatically.
+
+For fit functions with multiple independent variables the fit function can be of the form
+
+```python
+def func(a, x):
+ (x1, x2) = x
+ return a[0] * x1 ** 2 + a[1] * x2
+```
+
+## Total least squares fits
+`pyerrors` can also fit data with errors on both the dependent and independent variables using the total least squares method also referred to orthogonal distance regression as implemented in [scipy](https://docs.scipy.org/doc/scipy/reference/odr.html), see `pyerrors.fits.least_squares`. The syntax is identical to the standard least squares case, the only diffrence being that `x` also has to be a `list` or `numpy.array` of `Obs`.
+
+For the full API see `pyerrors.fits` for fits and `pyerrors.roots` for finding roots of functions.
# Matrix operations
`pyerrors` provides wrappers for `Obs`-valued matrix operations based on `numpy.linalg`. The supported functions include:
@@ -693,6 +811,12 @@ See pyerrors.obs.Obs.expo
# Input
`pyerrors` includes an `input` submodule in which input routines and parsers for the output of various numerical programs are contained. For details see `pyerrors.input`.
+
+# Citing
+If you use `pyerrors` for research that leads to a publication please consider citing:
+- Ulli Wolff, "Monte Carlo errors with less errors". Comput.Phys.Commun. 156 (2004) 143-153, Comput.Phys.Commun. 176 (2007) 383 (erratum).
+- Stefan Schaefer, Rainer Sommer, Francesco Virotta, "Critical slowing down and error analysis in lattice QCD simulations". Nucl.Phys.B 845 (2011) 93-119.
+- Alberto Ramos, "Automatic differentiation for error analysis of Monte Carlo data". Comput.Phys.Commun. 238 (2019) 19-35.
'''
from .obs import *
from .correlators import *
diff --git a/docs/pyerrors/correlators.html b/docs/pyerrors/correlators.html
index 3ec302c5..482b1137 100644
--- a/docs/pyerrors/correlators.html
+++ b/docs/pyerrors/correlators.html
@@ -3,11 +3,14 @@
-
+
pyerrors.correlators API documentation
-
-
-
-
-
-
-
-
-
-
+
@@ -207,7 +206,7 @@
View Source
- import warnings
+ import warnings
import numpy as np
import autograd.numpy as anp
import matplotlib.pyplot as plt
@@ -1308,9 +1307,9 @@
Corr:
-
+
View Source
- class Corr:
+ class Corr:
"""The class for a correlator (time dependent sequence of pe.Obs).
Everything, this class does, can be achieved using lists or arrays of Obs.
@@ -2361,9 +2360,9 @@ matrix at every timeslice. Other dependency (eg. spatial) are not supported.
Corr(data_input, padding=[0, 0], prange=None)
-
+
View Source
- def __init__(self, data_input, padding=[0, 0], prange=None):
+ def __init__(self, data_input, padding=[0, 0], prange=None):
""" Initialize a Corr object.
Parameters
@@ -2468,6 +2467,7 @@ region indentified for this correlator.
+
@@ -2478,9 +2478,9 @@ region indentified for this correlator.
gamma_method(self, **kwargs):
-
+
View Source
- def gamma_method(self, **kwargs):
+ def gamma_method(self, **kwargs):
"""Apply the gamma method to the content of the Corr."""
for item in self.content:
if not(item is None):
@@ -2507,9 +2507,9 @@ region indentified for this correlator.
projected(self, vector_l=None, vector_r=None, normalize=False):
-
+
View Source
- def projected(self, vector_l=None, vector_r=None, normalize=False):
+ def projected(self, vector_l=None, vector_r=None, normalize=False):
"""We need to project the Correlator with a Vector to get a single value at each timeslice.
The method can use one or two vectors.
@@ -2570,9 +2570,9 @@ By default it will return the lowest source, which usually means unsmeared-unsme
item(self, i, j):
-
+
View Source
- def item(self, i, j):
+ def item(self, i, j):
"""Picks the element [i,j] from every matrix and returns a correlator containing one Obs per timeslice.
Parameters
@@ -2612,9 +2612,9 @@ Second index to be picked.
plottable(self):
-
+
View Source
- def plottable(self):
+ def plottable(self):
"""Outputs the correlator in a plotable format.
Outputs three lists containing the timeslice index, the value on each
@@ -2647,9 +2647,9 @@ timeslice and the error on each timeslice.
symmetric(self):
-
+
View Source
- def symmetric(self):
+ def symmetric(self):
""" Symmetrize the correlator around x0=0."""
if self.T % 2 != 0:
raise Exception("Can not symmetrize odd T")
@@ -2683,9 +2683,9 @@ timeslice and the error on each timeslice.
anti_symmetric(self):
-
+
View Source
- def anti_symmetric(self):
+ def anti_symmetric(self):
"""Anti-symmetrize the correlator around x0=0."""
if self.T % 2 != 0:
raise Exception("Can not symmetrize odd T")
@@ -2719,9 +2719,9 @@ timeslice and the error on each timeslice.
matrix_symmetric(self):
-
+
View Source
- def matrix_symmetric(self):
+ def matrix_symmetric(self):
"""Symmetrizes the correlator matrices on every timeslice."""
if self.N > 1:
transposed = [None if (G is None) else G.T for G in self.content]
@@ -2745,9 +2745,9 @@ timeslice and the error on each timeslice.
GEVP(self, t0, ts=None, state=0, sorted_list=None):
-
+
View Source
- def GEVP(self, t0, ts=None, state=0, sorted_list=None):
+ def GEVP(self, t0, ts=None, state=0, sorted_list=None):
"""Solve the general eigenvalue problem on the current correlator
Parameters
@@ -2839,9 +2839,9 @@ if this argument is set, a list of vectors (len=self.T) is returned. If it is le
Eigenvalue(self, t0, state=1):
-
+
View Source
- def Eigenvalue(self, t0, state=1):
+ def Eigenvalue(self, t0, state=1):
G = self.matrix_symmetric()
G0 = G.content[t0]
L = cholesky(G0)
@@ -2874,9 +2874,9 @@ if this argument is set, a list of vectors (len=self.T) is returned. If it is le
Hankel(self, N, periodic=False):
-
+
View Source
- def Hankel(self, N, periodic=False):
+ def Hankel(self, N, periodic=False):
"""Constructs an NxN Hankel matrix
C(t) c(t+1) ... c(t+n-1)
@@ -2947,9 +2947,9 @@ determines whether the matrix is extended periodically
roll(self, dt):
-
+
View Source
- def roll(self, dt):
+ def roll(self, dt):
"""Periodically shift the correlator by dt timeslices
Parameters
@@ -2982,9 +2982,9 @@ number of timeslices
reverse(self):
-
+
View Source
- def reverse(self):
+ def reverse(self):
"""Reverse the time ordering of the Corr"""
return Corr(self.content[:: -1])
@@ -3004,9 +3004,9 @@ number of timeslices
thin(self, spacing=2, offset=0):
-
+
View Source
- def thin(self, spacing=2, offset=0):
+ def thin(self, spacing=2, offset=0):
"""Thin out a correlator to suppress correlations
Parameters
@@ -3049,9 +3049,9 @@ Offset the equal spacing
correlate(self, partner):
-
+
View Source
- def correlate(self, partner):
+ def correlate(self, partner):
"""Correlate the correlator with another correlator or Obs
Parameters
@@ -3103,9 +3103,9 @@ correlator or a Corr of same length.
reweight(self, weight, **kwargs):
-
+
View Source
- def reweight(self, weight, **kwargs):
+ def reweight(self, weight, **kwargs):
"""Reweight the correlator.
Parameters
@@ -3154,9 +3154,9 @@ on the configurations in obs[i].idl.
T_symmetry(self, partner, parity=1):
-
+
View Source
- def T_symmetry(self, partner, parity=+1):
+ def T_symmetry(self, partner, parity=+1):
"""Return the time symmetry average of the correlator and its partner
Parameters
@@ -3207,9 +3207,9 @@ Parity quantum number of the correlator, can be +1 or -1
deriv(self, variant='symmetric'):
-
+
View Source
- def deriv(self, variant="symmetric"):
+ def deriv(self, variant="symmetric"):
"""Return the first derivative of the correlator with respect to x0.
Parameters
@@ -3285,9 +3285,9 @@ Available choice: symmetric, forward, backward, improved, default: symmetricsecond_deriv(self, variant='symmetric'):
-
+
View Source
- def second_deriv(self, variant="symmetric"):
+ def second_deriv(self, variant="symmetric"):
"""Return the second derivative of the correlator with respect to x0.
Parameters
@@ -3343,9 +3343,9 @@ Available choice: symmetric, improved, default: symmetric
m_eff(self, variant='log', guess=1.0):
-
+
View Source
- def m_eff(self, variant='log', guess=1.0):
+ def m_eff(self, variant='log', guess=1.0):
"""Returns the effective mass of the correlator as correlator object
Parameters
@@ -3439,9 +3439,9 @@ guess for the root finder, only relevant for the root variant
fit(self, function, fitrange=None, silent=False, **kwargs):
-
+
View Source
- def fit(self, function, fitrange=None, silent=False, **kwargs):
+ def fit(self, function, fitrange=None, silent=False, **kwargs):
"""Fits function to the data
Parameters
@@ -3497,9 +3497,9 @@ Decides whether output is printed to the standard output.
plateau(self, plateau_range=None, method='fit'):
-
+
View Source
- def plateau(self, plateau_range=None, method="fit"):
+ def plateau(self, plateau_range=None, method="fit"):
""" Extract a plateau value from a Corr object
Parameters
@@ -3561,9 +3561,9 @@ method to extract the plateau.
set_prange(self, prange):
-
+
View Source
- def set_prange(self, prange):
+ def set_prange(self, prange):
"""Sets the attribute prange of the Corr object."""
if not len(prange) == 2:
raise Exception("prange must be a list or array with two values")
@@ -3601,9 +3601,9 @@ method to extract the plateau.
):
-
+
View Source
- def show(self, x_range=None, comp=None, y_range=None, logscale=False, plateau=None, fit_res=None, ylabel=None, save=None):
+ def show(self, x_range=None, comp=None, y_range=None, logscale=False, plateau=None, fit_res=None, ylabel=None, save=None):
"""Plots the correlator, uses tag as label if available.
Parameters
@@ -3723,9 +3723,9 @@ path to file in which the figure should be saved
dump(self, filename, datatype='json.gz', **kwargs):
-
+
View Source
- def dump(self, filename, datatype="json.gz", **kwargs):
+ def dump(self, filename, datatype="json.gz", **kwargs):
"""Dumps the Corr into a file of chosen type
Parameters
----------
@@ -3777,9 +3777,9 @@ specifies a custom path for the file (default '.')
print(self, range=[0, None]):
-
+
View Source
- def print(self, range=[0, None]):
+ def print(self, range=[0, None]):
print(self.__repr__(range))
@@ -3796,9 +3796,9 @@ specifies a custom path for the file (default '.')
sqrt(self):
-
+
View Source
- def sqrt(self):
+ def sqrt(self):
return self**0.5
@@ -3815,9 +3815,9 @@ specifies a custom path for the file (default '.')
log(self):
-
+
View Source
- def log(self):
+ def log(self):
newcontent = [None if (item is None) else np.log(item) for item in self.content]
return Corr(newcontent, prange=self.prange)
@@ -3835,9 +3835,9 @@ specifies a custom path for the file (default '.')
exp(self):
-
+
View Source
- def exp(self):
+ def exp(self):
newcontent = [None if (item is None) else np.exp(item) for item in self.content]
return Corr(newcontent, prange=self.prange)
@@ -3855,9 +3855,9 @@ specifies a custom path for the file (default '.')
sin(self):
-
+
View Source
- def sin(self):
+ def sin(self):
return self._apply_func_to_corr(np.sin)
@@ -3874,9 +3874,9 @@ specifies a custom path for the file (default '.')
cos(self):
-
+
View Source
- def cos(self):
+ def cos(self):
return self._apply_func_to_corr(np.cos)
@@ -3893,9 +3893,9 @@ specifies a custom path for the file (default '.')
tan(self):
-
+
View Source
- def tan(self):
+ def tan(self):
return self._apply_func_to_corr(np.tan)
@@ -3912,9 +3912,9 @@ specifies a custom path for the file (default '.')
sinh(self):
-
+
View Source
- def sinh(self):
+ def sinh(self):
return self._apply_func_to_corr(np.sinh)
@@ -3931,9 +3931,9 @@ specifies a custom path for the file (default '.')
cosh(self):
-
+
View Source
- def cosh(self):
+ def cosh(self):
return self._apply_func_to_corr(np.cosh)
@@ -3950,9 +3950,9 @@ specifies a custom path for the file (default '.')
tanh(self):
-
+
View Source
- def tanh(self):
+ def tanh(self):
return self._apply_func_to_corr(np.tanh)
@@ -3969,9 +3969,9 @@ specifies a custom path for the file (default '.')
arcsin(self):
-
+
View Source
- def arcsin(self):
+ def arcsin(self):
return self._apply_func_to_corr(np.arcsin)
@@ -3988,9 +3988,9 @@ specifies a custom path for the file (default '.')
arccos(self):
-
+
View Source
- def arccos(self):
+ def arccos(self):
return self._apply_func_to_corr(np.arccos)
@@ -4007,9 +4007,9 @@ specifies a custom path for the file (default '.')
arctan(self):
-
+
View Source
- def arctan(self):
+ def arctan(self):
return self._apply_func_to_corr(np.arctan)
@@ -4026,9 +4026,9 @@ specifies a custom path for the file (default '.')
arcsinh(self):
-
+
View Source
- def arcsinh(self):
+ def arcsinh(self):
return self._apply_func_to_corr(np.arcsinh)
@@ -4045,9 +4045,9 @@ specifies a custom path for the file (default '.')
arccosh(self):
-
+
View Source
- def arccosh(self):
+ def arccosh(self):
return self._apply_func_to_corr(np.arccosh)
@@ -4064,9 +4064,9 @@ specifies a custom path for the file (default '.')
arctanh(self):
-
+
View Source
- def arctanh(self):
+ def arctanh(self):
return self._apply_func_to_corr(np.arctanh)
@@ -4082,6 +4082,7 @@ specifies a custom path for the file (default '.')
+
@@ -4091,6 +4092,7 @@ specifies a custom path for the file (default '.')
+
@@ -4102,9 +4104,9 @@ specifies a custom path for the file (default '.')
permutation(lst):
-
+
View Source
- def permutation(lst): # Shamelessly copied
+ def permutation(lst): # Shamelessly copied
if len(lst) == 1:
return [lst]
ll = []
diff --git a/docs/pyerrors/covobs.html b/docs/pyerrors/covobs.html
index a0811bfe..98461908 100644
--- a/docs/pyerrors/covobs.html
+++ b/docs/pyerrors/covobs.html
@@ -3,11 +3,14 @@
-
+
pyerrors.covobs API documentation
-
-
-
-
-
-
-
-
-
-
+
@@ -84,7 +83,7 @@
View Source
- import numpy as np
+ import numpy as np
class Covobs:
@@ -173,9 +172,9 @@
Covobs:
-
+
View Source
- class Covobs:
+ class Covobs:
def __init__(self, mean, cov, name, pos=None, grad=None):
""" Initialize Covobs object.
@@ -260,9 +259,9 @@
Covobs(mean, cov, name, pos=None, grad=None)
-
+
View Source
- def __init__(self, mean, cov, name, pos=None, grad=None):
+ def __init__(self, mean, cov, name, pos=None, grad=None):
""" Initialize Covobs object.
Parameters
@@ -330,9 +329,9 @@ Gradient of the Covobs wrt. the means belonging to cov.
errsq(self):
-
+
View Source
- def errsq(self):
+ def errsq(self):
""" Return the variance (= square of the error) of the Covobs
"""
return float(np.dot(np.transpose(self.grad), np.dot(self.cov, self.grad)))
@@ -352,6 +351,7 @@ Gradient of the Covobs wrt. the means belonging to cov.
+
@@ -361,6 +361,7 @@ Gradient of the Covobs wrt. the means belonging to cov.
+
diff --git a/docs/pyerrors/dirac.html b/docs/pyerrors/dirac.html
index 6e2bf167..824e2fca 100644
--- a/docs/pyerrors/dirac.html
+++ b/docs/pyerrors/dirac.html
@@ -3,11 +3,14 @@
-
+
pyerrors.dirac API documentation
-
-
-
-
-
-
-
-
-
-
+
@@ -75,7 +74,7 @@
View Source
- import numpy as np
+ import numpy as np
gammaX = np.array(
@@ -173,9 +172,9 @@
epsilon_tensor(i, j, k):
-
+
View Source
- def epsilon_tensor(i, j, k):
+ def epsilon_tensor(i, j, k):
"""Rank-3 epsilon tensor
Based on https://codegolf.stackexchange.com/a/160375
@@ -204,9 +203,9 @@
epsilon_tensor_rank4(i, j, k, o):
-
+
View Source
- def epsilon_tensor_rank4(i, j, k, o):
+ def epsilon_tensor_rank4(i, j, k, o):
"""Rank-4 epsilon tensor
Extension of https://codegolf.stackexchange.com/a/160375
@@ -235,9 +234,9 @@
Grid_gamma(gamma_tag):
-
+
View Source
- def Grid_gamma(gamma_tag):
+ def Grid_gamma(gamma_tag):
"""Returns gamma matrix in Grid labeling."""
if gamma_tag == 'Identity':
g = identity
diff --git a/docs/pyerrors/fits.html b/docs/pyerrors/fits.html
index 88c95d04..ed4ae757 100644
--- a/docs/pyerrors/fits.html
+++ b/docs/pyerrors/fits.html
@@ -3,11 +3,14 @@
-
+
pyerrors.fits API documentation
-
-
-
-
-
-
-
-
-
-
+
@@ -102,7 +101,7 @@
View Source
- import gc
+ import gc
from collections.abc import Sequence
import warnings
import numpy as np
@@ -176,9 +175,10 @@
fit function, has to be of the form
```python
+ import autograd.numpy as anp
+
def func(a, x):
- y = a[0] + a[1] * x + a[2] * anp.sinh(x)
- return y
+ return a[0] + a[1] * x + a[2] * anp.sinh(x)
```
For multiple x values func can be of the form
@@ -237,9 +237,10 @@
func has to be of the form
```python
+ import autograd.numpy as anp
+
def func(a, x):
- y = a[0] + a[1] * x + a[2] * anp.sinh(x)
- return y
+ return a[0] + a[1] * x + a[2] * anp.sinh(x)
```
For multiple x values func can be of the form
@@ -837,9 +838,9 @@
Fit_result(collections.abc.Sequence):
-
+
View Source
- class Fit_result(Sequence):
+ class Fit_result(Sequence):
"""Represents fit results.
Attributes
@@ -904,9 +905,9 @@ also accessible via indices.
Fit_result()
-
+
View Source
- def __init__(self):
+ def __init__(self):
self.fit_parameters = None
@@ -923,9 +924,9 @@ also accessible via indices.
gamma_method(self):
-
+
View Source
- def gamma_method(self):
+ def gamma_method(self):
"""Apply the gamma method to all fit parameters"""
[o.gamma_method() for o in self.fit_parameters]
@@ -956,9 +957,9 @@ also accessible via indices.
least_squares(x, y, func, priors=None, silent=False, **kwargs):
-
+
View Source
- def least_squares(x, y, func, priors=None, silent=False, **kwargs):
+ def least_squares(x, y, func, priors=None, silent=False, **kwargs):
r'''Performs a non-linear fit to y = func(x).
Parameters
@@ -971,9 +972,10 @@ also accessible via indices.
fit function, has to be of the form
```python
+ import autograd.numpy as anp
+
def func(a, x):
- y = a[0] + a[1] * x + a[2] * anp.sinh(x)
- return y
+ return a[0] + a[1] * x + a[2] * anp.sinh(x)
```
For multiple x values func can be of the form
@@ -1033,14 +1035,15 @@ list of Obs.
func (object):
fit function, has to be of the form
-def func(a, x):
- y = a[0] + a[1] * x + a[2] * anp.sinh(x)
- return y
+import autograd.numpy as anp
+
+def func(a, x):
+ return a[0] + a[1] * x + a[2] * anp.sinh(x)
For multiple x values func can be of the form
-def func(a, x):
+
import numpy as np
+import numpy as np
import pyerrors as pe
my_obs = pe.Obs([samples], ['ensemble_name']) # Initialize an Obs object
@@ -126,7 +130,7 @@ An Obs
object can be initialized with two arguments, the first is a
The samples can either be provided as python list or as numpy array.
The second argument is a list containing the names of the respective Monte Carlo chains as strings. These strings uniquely identify a Monte Carlo chain/ensemble.
-import pyerrors as pe
+import pyerrors as pe
my_obs = pe.Obs([samples], ['ensemble_name'])
@@ -140,7 +144,7 @@ The required derivatives $\bar{f}_\alpha$ are evaluated up to machine precision
The Obs
class is designed such that mathematical numpy functions can be used on Obs
just as for regular floats.
-import numpy as np
+import numpy as np
import pyerrors as pe
my_obs1 = pe.Obs([samples1], ['ensemble_name'])
@@ -161,7 +165,7 @@ The required derivatives $\bar{f}_\alpha$ are evaluated up to machine precision
The error estimation within pyerrors
is based on the gamma method introduced in arXiv:hep-lat/0306017.
After having arrived at the derived quantity of interest the gamma_method
can be called as detailed in the following example.
-my_sum.gamma_method()
+my_sum.gamma_method()
print(my_sum)
> 1.70(57)
my_sum.details()
@@ -176,7 +180,7 @@ $$\tau_\mathrm{int}=\frac{1}{2}+\sum_{t=1}^{W}\rho(t)\geq \frac{1}{2}\,.$$
The window $W$ is determined via the automatic windowing procedure described in arXiv:hep-lat/0306017.
The standard value for the parameter $S$ of this automatic windowing procedure is $S=2$. Other values for $S$ can be passed to the gamma_method
as parameter.
-my_sum.gamma_method(S=3.0)
+my_sum.gamma_method(S=3.0)
my_sum.details()
> Result 1.70000000e+00 +/- 6.30675201e-01 +/- 1.04585650e-01 (37.099%)
> t_int 3.29909703e+00 +/- 9.77310102e-01 S = 3.00
@@ -193,7 +197,7 @@ In this case the error estimate is identical to the sample standard error.
Slow modes in the Monte Carlo history can be accounted for by attaching an exponential tail to the autocorrelation function $\rho$ as suggested in arXiv:1009.5228. The longest autocorrelation time in the history, $\tau_\mathrm{exp}$, can be passed to the gamma_method
as parameter. In this case the automatic windowing procedure is vacated and the parameter $S$ does not affect the error estimate.
-my_sum.gamma_method(tau_exp=7.2)
+my_sum.gamma_method(tau_exp=7.2)
my_sum.details()
> Result 1.70000000e+00 +/- 6.28097762e-01 +/- 5.79077524e-02 (36.947%)
> t_int 3.27218667e+00 +/- 7.99583654e-01 tau_exp = 7.20, N_sigma = 1
@@ -207,7 +211,7 @@ In this case the error estimate is identical to the sample standard error.
Error propagation for multiple ensembles (Markov chains with different simulation parameters) is handled automatically. Ensembles are uniquely identified by their name
.
-obs1 = pe.Obs([samples1], ['ensemble1'])
+obs1 = pe.Obs([samples1], ['ensemble1'])
obs2 = pe.Obs([samples2], ['ensemble2'])
my_sum = obs1 + obs2
@@ -220,7 +224,7 @@ In this case the error estimate is identical to the sample standard error.
pyerrors
identifies multiple replica (independent Markov chains with identical simulation parameters) by the vertical bar |
in the name of the data set.
-obs1 = pe.Obs([samples1], ['ensemble1|r01'])
+obs1 = pe.Obs([samples1], ['ensemble1|r01'])
obs2 = pe.Obs([samples2], ['ensemble1|r02'])
> my_sum = obs1 + obs2
@@ -236,7 +240,7 @@ In this case the error estimate is identical to the sample standard error.
In order to keep track of different error analysis parameters for different ensembles one can make use of global dictionaries as detailed in the following example.
-pe.Obs.S_dict['ensemble1'] = 2.5
+pe.Obs.S_dict['ensemble1'] = 2.5
pe.Obs.tau_exp_dict['ensemble2'] = 8.0
pe.Obs.tau_exp_dict['ensemble3'] = 2.0
@@ -248,7 +252,7 @@ Passing arguments to the gamma_method
still dominates over the dict
Obs
objects defined on irregular Monte Carlo chains can be initialized with the parameter idl
.
-# Observable defined on configurations 20 to 519
+# Observable defined on configurations 20 to 519
obs1 = pe.Obs([samples1], ['ensemble1'], idl=[range(20, 520)])
obs1.details()
> Result 9.98319881e-01
@@ -281,7 +285,7 @@ Make sure to check the autocorrelation time with e.g. pyerrors
offers the Corr
class which simplifies the corresponding error propagation and provides the user with a set of standard methods. In order to initialize a Corr
objects one needs to arrange the data as a list of `Obs´
-my_corr = pe.Corr([obs_0, obs_1, obs_2, obs_3])
+my_corr = pe.Corr([obs_0, obs_1, obs_2, obs_3])
print(my_corr)
> x0/a Corr(x0/a)
> ------------------
@@ -293,7 +297,7 @@ Make sure to check the autocorrelation time with e.g. my_corr = pe.Corr([obs_0, obs_1, obs_2, obs_3], padding=[1, 1])
+my_corr = pe.Corr([obs_0, obs_1, obs_2, obs_3], padding=[1, 1])
print(my_corr)
> x0/a Corr(x0/a)
> ------------------
@@ -307,13 +311,13 @@ Make sure to check the autocorrelation time with e.g. print(my_corr[3])
+print(my_corr[3])
> 0.3227(33)
Error propagation with the Corr
class works very similar to Obs
objects. Mathematical operations are overloaded and Corr
objects can be computed together with other Corr
objects, Obs
objects or real numbers and integers.
-my_new_corr = 0.3 * my_corr[2] * my_corr * my_corr + 12 / my_corr
+my_new_corr = 0.3 * my_corr[2] * my_corr * my_corr + 12 / my_corr
pyerrors
provides the user with a set of regularly used methods for the manipulation of correlator objects:
@@ -337,12 +341,12 @@ Make sure to check the autocorrelation time with e.g. pyerrors.correlators.Corr
.
-Complex observables
+Complex valued observables
pyerrors
can handle complex valued observables via the class pyerrors.obs.CObs
.
CObs
are initialized with a real and an imaginary part which both can be Obs
valued.
-my_real_part = pe.Obs([samples1], ['ensemble1'])
+my_real_part = pe.Obs([samples1], ['ensemble1'])
my_imag_part = pe.Obs([samples2], ['ensemble1'])
my_cobs = pe.CObs(my_real_part, my_imag_part)
@@ -353,16 +357,69 @@ Make sure to check the autocorrelation time with e.g. my_derived_cobs = (my_cobs + my_cobs.conjugate()) / np.abs(my_cobs)
+my_derived_cobs = (my_cobs + my_cobs.conjugate()) / np.abs(my_cobs)
my_derived_cobs.gamma_method()
print(my_derived_cobs)
> (1.668(23)+0.0j)
-Optimization / fits / roots
+Error propagation in iterative algorithms
-
+pyerrors
supports exact linear error propagation for iterative algorithms like various variants of non-linear least sqaures fits or root finding. The derivatives required for the error propagation are calculated as described in arXiv:1809.01289.
+
+Least squares fits
+
+Standard non-linear least square fits with errors on the dependent but not the independent variables can be performed with pyerrors.fits.least_squares
. As default solver the Levenberg-Marquardt algorithm implemented in scipy is used.
+
+Fit functions have to be of the following form
+
+import autograd.numpy as anp
+
+def func(a, x):
+ return a[1] * anp.exp(-a[0] * x)
+
+
+It is important that numerical functions refer to autograd.numpy
instead of numpy
for the automatic differentiation to work properly.
+
+Fits can then be performed via
+
+fit_result = pe.fits.least_squares(x, y, func)
+print("\n", fit_result)
+> Fit with 2 parameters
+> Method: Levenberg-Marquardt
+> `ftol` termination condition is satisfied.
+> chisquare/d.o.f.: 0.9593035785160936
+
+> Goodness of fit:
+> χ²/d.o.f. = 0.959304
+> p-value = 0.5673
+> Fit parameters:
+> 0 0.0548(28)
+> 1 1.933(64)
+
+
+where x is a list
or numpy.array
of floats
and y is a list
or numpy.array
of Obs
.
+
+Data stored in Corr
objects can be fitted directly using the Corr.fit
method.
+
+my_corr = pe.Corr(y)
+fit_result = my_corr.fit(func, fitrange=[12, 25])
+
+
+this can simplify working with absolute fit ranges and takes care of gaps in the data automatically.
+
+For fit functions with multiple independent variables the fit function can be of the form
+
+def func(a, x):
+ (x1, x2) = x
+ return a[0] * x1 ** 2 + a[1] * x2
+
+
+Total least squares fits
+
+pyerrors
can also fit data with errors on both the dependent and independent variables using the total least squares method also referred to orthogonal distance regression as implemented in scipy, see pyerrors.fits.least_squares
. The syntax is identical to the standard least squares case, the only diffrence being that x
also has to be a list
or numpy.array
of Obs
.
+
+For the full API see pyerrors.fits
for fits and pyerrors.roots
for finding roots of functions.
Matrix operations
@@ -392,11 +449,21 @@ See pyerrors.obs.Obs.expo
Input
pyerrors
includes an input
submodule in which input routines and parsers for the output of various numerical programs are contained. For details see pyerrors.input
.
+
+Citing
+
+If you use pyerrors
for research that leads to a publication please consider citing:
+
+
+- Ulli Wolff, "Monte Carlo errors with less errors". Comput.Phys.Commun. 156 (2004) 143-153, Comput.Phys.Commun. 176 (2007) 383 (erratum).
+- Stefan Schaefer, Rainer Sommer, Francesco Virotta, "Critical slowing down and error analysis in lattice QCD simulations". Nucl.Phys.B 845 (2011) 93-119.
+- Alberto Ramos, "Automatic differentiation for error analysis of Monte Carlo data". Comput.Phys.Commun. 238 (2019) 19-35.
+
View Source
- r'''
+ r'''
# What is pyerrors?
`pyerrors` is a python package for error computation and propagation of Markov chain Monte Carlo data.
It is based on the gamma method [arXiv:hep-lat/0306017](https://arxiv.org/abs/hep-lat/0306017). Some of its features are:
@@ -645,7 +712,7 @@ See pyerrors.obs.Obs.expo
For the full API see `pyerrors.correlators.Corr`.
-# Complex observables
+# Complex valued observables
`pyerrors` can handle complex valued observables via the class `pyerrors.obs.CObs`.
`CObs` are initialized with a real and an imaginary part which both can be `Obs` valued.
@@ -668,9 +735,60 @@ See pyerrors.obs.Obs.expo
> (1.668(23)+0.0j)
```
-# Optimization / fits / roots
-`pyerrors.fits`
-`pyerrors.roots`
+# Error propagation in iterative algorithms
+
+`pyerrors` supports exact linear error propagation for iterative algorithms like various variants of non-linear least sqaures fits or root finding. The derivatives required for the error propagation are calculated as described in [arXiv:1809.01289](https://arxiv.org/abs/1809.01289).
+
+## Least squares fits
+
+Standard non-linear least square fits with errors on the dependent but not the independent variables can be performed with `pyerrors.fits.least_squares`. As default solver the Levenberg-Marquardt algorithm implemented in [scipy](https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.least_squares.html) is used.
+
+Fit functions have to be of the following form
+```python
+import autograd.numpy as anp
+
+def func(a, x):
+ return a[1] * anp.exp(-a[0] * x)
+```
+**It is important that numerical functions refer to `autograd.numpy` instead of `numpy` for the automatic differentiation to work properly.**
+
+Fits can then be performed via
+```python
+fit_result = pe.fits.least_squares(x, y, func)
+print("\n", fit_result)
+> Fit with 2 parameters
+> Method: Levenberg-Marquardt
+> `ftol` termination condition is satisfied.
+> chisquare/d.o.f.: 0.9593035785160936
+
+> Goodness of fit:
+> χ²/d.o.f. = 0.959304
+> p-value = 0.5673
+> Fit parameters:
+> 0 0.0548(28)
+> 1 1.933(64)
+```
+where x is a `list` or `numpy.array` of `floats` and y is a `list` or `numpy.array` of `Obs`.
+
+Data stored in `Corr` objects can be fitted directly using the `Corr.fit` method.
+```python
+my_corr = pe.Corr(y)
+fit_result = my_corr.fit(func, fitrange=[12, 25])
+```
+this can simplify working with absolute fit ranges and takes care of gaps in the data automatically.
+
+For fit functions with multiple independent variables the fit function can be of the form
+
+```python
+def func(a, x):
+ (x1, x2) = x
+ return a[0] * x1 ** 2 + a[1] * x2
+```
+
+## Total least squares fits
+`pyerrors` can also fit data with errors on both the dependent and independent variables using the total least squares method also referred to orthogonal distance regression as implemented in [scipy](https://docs.scipy.org/doc/scipy/reference/odr.html), see `pyerrors.fits.least_squares`. The syntax is identical to the standard least squares case, the only diffrence being that `x` also has to be a `list` or `numpy.array` of `Obs`.
+
+For the full API see `pyerrors.fits` for fits and `pyerrors.roots` for finding roots of functions.
# Matrix operations
`pyerrors` provides wrappers for `Obs`-valued matrix operations based on `numpy.linalg`. The supported functions include:
@@ -693,6 +811,12 @@ See pyerrors.obs.Obs.expo
# Input
`pyerrors` includes an `input` submodule in which input routines and parsers for the output of various numerical programs are contained. For details see `pyerrors.input`.
+
+# Citing
+If you use `pyerrors` for research that leads to a publication please consider citing:
+- Ulli Wolff, "Monte Carlo errors with less errors". Comput.Phys.Commun. 156 (2004) 143-153, Comput.Phys.Commun. 176 (2007) 383 (erratum).
+- Stefan Schaefer, Rainer Sommer, Francesco Virotta, "Critical slowing down and error analysis in lattice QCD simulations". Nucl.Phys.B 845 (2011) 93-119.
+- Alberto Ramos, "Automatic differentiation for error analysis of Monte Carlo data". Comput.Phys.Commun. 238 (2019) 19-35.
'''
from .obs import *
from .correlators import *
diff --git a/docs/pyerrors/correlators.html b/docs/pyerrors/correlators.html
index 3ec302c5..482b1137 100644
--- a/docs/pyerrors/correlators.html
+++ b/docs/pyerrors/correlators.html
@@ -3,11 +3,14 @@
-
+
pyerrors.correlators API documentation
-
-
-
-
-
-
-
-
-
-
+
@@ -207,7 +206,7 @@
View Source
- import warnings
+ import warnings
import numpy as np
import autograd.numpy as anp
import matplotlib.pyplot as plt
@@ -1308,9 +1307,9 @@
Corr:
-
+
View Source
- class Corr:
+ class Corr:
"""The class for a correlator (time dependent sequence of pe.Obs).
Everything, this class does, can be achieved using lists or arrays of Obs.
@@ -2361,9 +2360,9 @@ matrix at every timeslice. Other dependency (eg. spatial) are not supported.
Corr(data_input, padding=[0, 0], prange=None)
-
+
View Source
- def __init__(self, data_input, padding=[0, 0], prange=None):
+ def __init__(self, data_input, padding=[0, 0], prange=None):
""" Initialize a Corr object.
Parameters
@@ -2468,6 +2467,7 @@ region indentified for this correlator.
+
@@ -2478,9 +2478,9 @@ region indentified for this correlator.
gamma_method(self, **kwargs):
-
+
View Source
- def gamma_method(self, **kwargs):
+ def gamma_method(self, **kwargs):
"""Apply the gamma method to the content of the Corr."""
for item in self.content:
if not(item is None):
@@ -2507,9 +2507,9 @@ region indentified for this correlator.
projected(self, vector_l=None, vector_r=None, normalize=False):
-
+
View Source
- def projected(self, vector_l=None, vector_r=None, normalize=False):
+ def projected(self, vector_l=None, vector_r=None, normalize=False):
"""We need to project the Correlator with a Vector to get a single value at each timeslice.
The method can use one or two vectors.
@@ -2570,9 +2570,9 @@ By default it will return the lowest source, which usually means unsmeared-unsme
item(self, i, j):
-
+
View Source
- def item(self, i, j):
+ def item(self, i, j):
"""Picks the element [i,j] from every matrix and returns a correlator containing one Obs per timeslice.
Parameters
@@ -2612,9 +2612,9 @@ Second index to be picked.
plottable(self):
-
+
View Source
- def plottable(self):
+ def plottable(self):
"""Outputs the correlator in a plotable format.
Outputs three lists containing the timeslice index, the value on each
@@ -2647,9 +2647,9 @@ timeslice and the error on each timeslice.
symmetric(self):
-
+
View Source
- def symmetric(self):
+ def symmetric(self):
""" Symmetrize the correlator around x0=0."""
if self.T % 2 != 0:
raise Exception("Can not symmetrize odd T")
@@ -2683,9 +2683,9 @@ timeslice and the error on each timeslice.
anti_symmetric(self):
-
+
View Source
- def anti_symmetric(self):
+ def anti_symmetric(self):
"""Anti-symmetrize the correlator around x0=0."""
if self.T % 2 != 0:
raise Exception("Can not symmetrize odd T")
@@ -2719,9 +2719,9 @@ timeslice and the error on each timeslice.
matrix_symmetric(self):
-
+
View Source
- def matrix_symmetric(self):
+ def matrix_symmetric(self):
"""Symmetrizes the correlator matrices on every timeslice."""
if self.N > 1:
transposed = [None if (G is None) else G.T for G in self.content]
@@ -2745,9 +2745,9 @@ timeslice and the error on each timeslice.
GEVP(self, t0, ts=None, state=0, sorted_list=None):
-
+
View Source
- def GEVP(self, t0, ts=None, state=0, sorted_list=None):
+ def GEVP(self, t0, ts=None, state=0, sorted_list=None):
"""Solve the general eigenvalue problem on the current correlator
Parameters
@@ -2839,9 +2839,9 @@ if this argument is set, a list of vectors (len=self.T) is returned. If it is le
Eigenvalue(self, t0, state=1):
-
+
View Source
- def Eigenvalue(self, t0, state=1):
+ def Eigenvalue(self, t0, state=1):
G = self.matrix_symmetric()
G0 = G.content[t0]
L = cholesky(G0)
@@ -2874,9 +2874,9 @@ if this argument is set, a list of vectors (len=self.T) is returned. If it is le
Hankel(self, N, periodic=False):
-
+
View Source
- def Hankel(self, N, periodic=False):
+ def Hankel(self, N, periodic=False):
"""Constructs an NxN Hankel matrix
C(t) c(t+1) ... c(t+n-1)
@@ -2947,9 +2947,9 @@ determines whether the matrix is extended periodically
roll(self, dt):
-
+
View Source
- def roll(self, dt):
+ def roll(self, dt):
"""Periodically shift the correlator by dt timeslices
Parameters
@@ -2982,9 +2982,9 @@ number of timeslices
reverse(self):
-
+
View Source
- def reverse(self):
+ def reverse(self):
"""Reverse the time ordering of the Corr"""
return Corr(self.content[:: -1])
@@ -3004,9 +3004,9 @@ number of timeslices
thin(self, spacing=2, offset=0):
-
+
View Source
- def thin(self, spacing=2, offset=0):
+ def thin(self, spacing=2, offset=0):
"""Thin out a correlator to suppress correlations
Parameters
@@ -3049,9 +3049,9 @@ Offset the equal spacing
correlate(self, partner):
-
+
View Source
- def correlate(self, partner):
+ def correlate(self, partner):
"""Correlate the correlator with another correlator or Obs
Parameters
@@ -3103,9 +3103,9 @@ correlator or a Corr of same length.
reweight(self, weight, **kwargs):
-
+
View Source
- def reweight(self, weight, **kwargs):
+ def reweight(self, weight, **kwargs):
"""Reweight the correlator.
Parameters
@@ -3154,9 +3154,9 @@ on the configurations in obs[i].idl.
T_symmetry(self, partner, parity=1):
-
+
View Source
- def T_symmetry(self, partner, parity=+1):
+ def T_symmetry(self, partner, parity=+1):
"""Return the time symmetry average of the correlator and its partner
Parameters
@@ -3207,9 +3207,9 @@ Parity quantum number of the correlator, can be +1 or -1
deriv(self, variant='symmetric'):
-
+
View Source
- def deriv(self, variant="symmetric"):
+ def deriv(self, variant="symmetric"):
"""Return the first derivative of the correlator with respect to x0.
Parameters
@@ -3285,9 +3285,9 @@ Available choice: symmetric, forward, backward, improved, default: symmetricsecond_deriv(self, variant='symmetric'):
-
+
View Source
- def second_deriv(self, variant="symmetric"):
+ def second_deriv(self, variant="symmetric"):
"""Return the second derivative of the correlator with respect to x0.
Parameters
@@ -3343,9 +3343,9 @@ Available choice: symmetric, improved, default: symmetric
m_eff(self, variant='log', guess=1.0):
-
+
View Source
- def m_eff(self, variant='log', guess=1.0):
+ def m_eff(self, variant='log', guess=1.0):
"""Returns the effective mass of the correlator as correlator object
Parameters
@@ -3439,9 +3439,9 @@ guess for the root finder, only relevant for the root variant
fit(self, function, fitrange=None, silent=False, **kwargs):
-
+
View Source
- def fit(self, function, fitrange=None, silent=False, **kwargs):
+ def fit(self, function, fitrange=None, silent=False, **kwargs):
"""Fits function to the data
Parameters
@@ -3497,9 +3497,9 @@ Decides whether output is printed to the standard output.
plateau(self, plateau_range=None, method='fit'):
-
+
View Source
- def plateau(self, plateau_range=None, method="fit"):
+ def plateau(self, plateau_range=None, method="fit"):
""" Extract a plateau value from a Corr object
Parameters
@@ -3561,9 +3561,9 @@ method to extract the plateau.
set_prange(self, prange):
-
+
View Source
- def set_prange(self, prange):
+ def set_prange(self, prange):
"""Sets the attribute prange of the Corr object."""
if not len(prange) == 2:
raise Exception("prange must be a list or array with two values")
@@ -3601,9 +3601,9 @@ method to extract the plateau.
):
-
+
View Source
- def show(self, x_range=None, comp=None, y_range=None, logscale=False, plateau=None, fit_res=None, ylabel=None, save=None):
+ def show(self, x_range=None, comp=None, y_range=None, logscale=False, plateau=None, fit_res=None, ylabel=None, save=None):
"""Plots the correlator, uses tag as label if available.
Parameters
@@ -3723,9 +3723,9 @@ path to file in which the figure should be saved
dump(self, filename, datatype='json.gz', **kwargs):
-
+
View Source
- def dump(self, filename, datatype="json.gz", **kwargs):
+ def dump(self, filename, datatype="json.gz", **kwargs):
"""Dumps the Corr into a file of chosen type
Parameters
----------
@@ -3777,9 +3777,9 @@ specifies a custom path for the file (default '.')
print(self, range=[0, None]):
-
+
View Source
- def print(self, range=[0, None]):
+ def print(self, range=[0, None]):
print(self.__repr__(range))
@@ -3796,9 +3796,9 @@ specifies a custom path for the file (default '.')
sqrt(self):
-
+
View Source
- def sqrt(self):
+ def sqrt(self):
return self**0.5
@@ -3815,9 +3815,9 @@ specifies a custom path for the file (default '.')
log(self):
-
+
View Source
- def log(self):
+ def log(self):
newcontent = [None if (item is None) else np.log(item) for item in self.content]
return Corr(newcontent, prange=self.prange)
@@ -3835,9 +3835,9 @@ specifies a custom path for the file (default '.')
exp(self):
-
+
View Source
- def exp(self):
+ def exp(self):
newcontent = [None if (item is None) else np.exp(item) for item in self.content]
return Corr(newcontent, prange=self.prange)
@@ -3855,9 +3855,9 @@ specifies a custom path for the file (default '.')
sin(self):
-
+
View Source
- def sin(self):
+ def sin(self):
return self._apply_func_to_corr(np.sin)
@@ -3874,9 +3874,9 @@ specifies a custom path for the file (default '.')
cos(self):
-
+
View Source
- def cos(self):
+ def cos(self):
return self._apply_func_to_corr(np.cos)
@@ -3893,9 +3893,9 @@ specifies a custom path for the file (default '.')
tan(self):
-
+
View Source
- def tan(self):
+ def tan(self):
return self._apply_func_to_corr(np.tan)
@@ -3912,9 +3912,9 @@ specifies a custom path for the file (default '.')
sinh(self):
-
+
View Source
- def sinh(self):
+ def sinh(self):
return self._apply_func_to_corr(np.sinh)
@@ -3931,9 +3931,9 @@ specifies a custom path for the file (default '.')
cosh(self):
-
+
View Source
- def cosh(self):
+ def cosh(self):
return self._apply_func_to_corr(np.cosh)
@@ -3950,9 +3950,9 @@ specifies a custom path for the file (default '.')
tanh(self):
-
+
View Source
- def tanh(self):
+ def tanh(self):
return self._apply_func_to_corr(np.tanh)
@@ -3969,9 +3969,9 @@ specifies a custom path for the file (default '.')
arcsin(self):
-
+
View Source
- def arcsin(self):
+ def arcsin(self):
return self._apply_func_to_corr(np.arcsin)
@@ -3988,9 +3988,9 @@ specifies a custom path for the file (default '.')
arccos(self):
-
+
View Source
- def arccos(self):
+ def arccos(self):
return self._apply_func_to_corr(np.arccos)
@@ -4007,9 +4007,9 @@ specifies a custom path for the file (default '.')
arctan(self):
-
+
View Source
- def arctan(self):
+ def arctan(self):
return self._apply_func_to_corr(np.arctan)
@@ -4026,9 +4026,9 @@ specifies a custom path for the file (default '.')
arcsinh(self):
-
+
View Source
- def arcsinh(self):
+ def arcsinh(self):
return self._apply_func_to_corr(np.arcsinh)
@@ -4045,9 +4045,9 @@ specifies a custom path for the file (default '.')
arccosh(self):
-
+
View Source
- def arccosh(self):
+ def arccosh(self):
return self._apply_func_to_corr(np.arccosh)
@@ -4064,9 +4064,9 @@ specifies a custom path for the file (default '.')
arctanh(self):
-
+
View Source
- def arctanh(self):
+ def arctanh(self):
return self._apply_func_to_corr(np.arctanh)
@@ -4082,6 +4082,7 @@ specifies a custom path for the file (default '.')
+
@@ -4091,6 +4092,7 @@ specifies a custom path for the file (default '.')
+
@@ -4102,9 +4104,9 @@ specifies a custom path for the file (default '.')
permutation(lst):
-
+
View Source
- def permutation(lst): # Shamelessly copied
+ def permutation(lst): # Shamelessly copied
if len(lst) == 1:
return [lst]
ll = []
diff --git a/docs/pyerrors/covobs.html b/docs/pyerrors/covobs.html
index a0811bfe..98461908 100644
--- a/docs/pyerrors/covobs.html
+++ b/docs/pyerrors/covobs.html
@@ -3,11 +3,14 @@
-
+
pyerrors.covobs API documentation
-
-
-
-
-
-
-
-
-
-
+
@@ -84,7 +83,7 @@
View Source
- import numpy as np
+ import numpy as np
class Covobs:
@@ -173,9 +172,9 @@
Covobs:
-
+
View Source
- class Covobs:
+ class Covobs:
def __init__(self, mean, cov, name, pos=None, grad=None):
""" Initialize Covobs object.
@@ -260,9 +259,9 @@
Covobs(mean, cov, name, pos=None, grad=None)
-
+
View Source
- def __init__(self, mean, cov, name, pos=None, grad=None):
+ def __init__(self, mean, cov, name, pos=None, grad=None):
""" Initialize Covobs object.
Parameters
@@ -330,9 +329,9 @@ Gradient of the Covobs wrt. the means belonging to cov.
errsq(self):
-
+
View Source
- def errsq(self):
+ def errsq(self):
""" Return the variance (= square of the error) of the Covobs
"""
return float(np.dot(np.transpose(self.grad), np.dot(self.cov, self.grad)))
@@ -352,6 +351,7 @@ Gradient of the Covobs wrt. the means belonging to cov.
+
@@ -361,6 +361,7 @@ Gradient of the Covobs wrt. the means belonging to cov.
+
diff --git a/docs/pyerrors/dirac.html b/docs/pyerrors/dirac.html
index 6e2bf167..824e2fca 100644
--- a/docs/pyerrors/dirac.html
+++ b/docs/pyerrors/dirac.html
@@ -3,11 +3,14 @@
-
+
pyerrors.dirac API documentation
-
-
-
-
-
-
-
-
-
-
+
@@ -75,7 +74,7 @@
View Source
- import numpy as np
+ import numpy as np
gammaX = np.array(
@@ -173,9 +172,9 @@
epsilon_tensor(i, j, k):
-
+
View Source
- def epsilon_tensor(i, j, k):
+ def epsilon_tensor(i, j, k):
"""Rank-3 epsilon tensor
Based on https://codegolf.stackexchange.com/a/160375
@@ -204,9 +203,9 @@
epsilon_tensor_rank4(i, j, k, o):
-
+
View Source
- def epsilon_tensor_rank4(i, j, k, o):
+ def epsilon_tensor_rank4(i, j, k, o):
"""Rank-4 epsilon tensor
Extension of https://codegolf.stackexchange.com/a/160375
@@ -235,9 +234,9 @@
Grid_gamma(gamma_tag):
-
+
View Source
- def Grid_gamma(gamma_tag):
+ def Grid_gamma(gamma_tag):
"""Returns gamma matrix in Grid labeling."""
if gamma_tag == 'Identity':
g = identity
diff --git a/docs/pyerrors/fits.html b/docs/pyerrors/fits.html
index 88c95d04..ed4ae757 100644
--- a/docs/pyerrors/fits.html
+++ b/docs/pyerrors/fits.html
@@ -3,11 +3,14 @@
-
+
pyerrors.fits API documentation
-
-
-
-
-
-
-
-
-
-
+
@@ -102,7 +101,7 @@
View Source
- import gc
+ import gc
from collections.abc import Sequence
import warnings
import numpy as np
@@ -176,9 +175,10 @@
fit function, has to be of the form
```python
+ import autograd.numpy as anp
+
def func(a, x):
- y = a[0] + a[1] * x + a[2] * anp.sinh(x)
- return y
+ return a[0] + a[1] * x + a[2] * anp.sinh(x)
```
For multiple x values func can be of the form
@@ -237,9 +237,10 @@
func has to be of the form
```python
+ import autograd.numpy as anp
+
def func(a, x):
- y = a[0] + a[1] * x + a[2] * anp.sinh(x)
- return y
+ return a[0] + a[1] * x + a[2] * anp.sinh(x)
```
For multiple x values func can be of the form
@@ -837,9 +838,9 @@
Fit_result(collections.abc.Sequence):
-
+
View Source
- class Fit_result(Sequence):
+ class Fit_result(Sequence):
"""Represents fit results.
Attributes
@@ -904,9 +905,9 @@ also accessible via indices.
Fit_result()
-
+
View Source
- def __init__(self):
+ def __init__(self):
self.fit_parameters = None
@@ -923,9 +924,9 @@ also accessible via indices.
gamma_method(self):
-
+
View Source
- def gamma_method(self):
+ def gamma_method(self):
"""Apply the gamma method to all fit parameters"""
[o.gamma_method() for o in self.fit_parameters]
@@ -956,9 +957,9 @@ also accessible via indices.
least_squares(x, y, func, priors=None, silent=False, **kwargs):
-
+
View Source
- def least_squares(x, y, func, priors=None, silent=False, **kwargs):
+ def least_squares(x, y, func, priors=None, silent=False, **kwargs):
r'''Performs a non-linear fit to y = func(x).
Parameters
@@ -971,9 +972,10 @@ also accessible via indices.
fit function, has to be of the form
```python
+ import autograd.numpy as anp
+
def func(a, x):
- y = a[0] + a[1] * x + a[2] * anp.sinh(x)
- return y
+ return a[0] + a[1] * x + a[2] * anp.sinh(x)
```
For multiple x values func can be of the form
@@ -1033,14 +1035,15 @@ list of Obs.
func (object):
fit function, has to be of the form
-def func(a, x):
- y = a[0] + a[1] * x + a[2] * anp.sinh(x)
- return y
+import autograd.numpy as anp
+
+def func(a, x):
+ return a[0] + a[1] * x + a[2] * anp.sinh(x)
For multiple x values func can be of the form
-def func(a, x):
+