From 35df837bb42fcd938f921f317d544df8b478104d Mon Sep 17 00:00:00 2001
From: fjosw The Basic example
import numpy as np
-import pyerrors as pe
+
import numpy as np
+import pyerrors as pe
my_obs = pe.Obs([samples], ['ensemble_name']) # Initialize an Obs object
my_new_obs = 2 * np.log(my_obs) / my_obs ** 2 # Construct derived Obs object
@@ -186,7 +186,7 @@ The samples can either be provided as python list or as numpy array.
The second argument is a list containing the names of the respective Monte Carlo chains as strings. These strings uniquely identify a Monte Carlo chain/ensemble. It is crucial for the correct error propagation that observations from the same Monte Carlo history are labeled with the same name. See Multiple ensembles/replica for details.
import pyerrors as pe
+
@@ -202,8 +202,8 @@ The required derivatives $\bar{f}_\alpha$ are evaluated up to machine precision
import pyerrors as pe
my_obs = pe.Obs([samples], ['ensemble_name'])
Obs
class is designed such that mathematical numpy functions can be used on Obs
just as for regular floats.import numpy as np
-import pyerrors as pe
+
import numpy as np
+import pyerrors as pe
my_obs1 = pe.Obs([samples1], ['ensemble_name'])
my_obs2 = pe.Obs([samples2], ['ensemble_name'])
@@ -232,7 +232,7 @@ After having arrived at the derived quantity of interest the
gamma_method<
> Result 1.70000000e+00 +/- 5.72046658e-01 +/- 7.56746598e-02 (33.650%)
> t_int 2.71422900e+00 +/- 6.40320983e-01 S = 2.00
> 1000 samples in 1 ensemble:
-> · Ensemble 'ensemble_name' : 1000 configurations (from 1 to 1000)
+> · Ensemble 'ensemble_name' : 1000 configurations (from 1 to 1000)
gamma_method
still dominates over the dict
obs1.details()
> Result 9.98319881e-01
> 500 samples in 1 ensemble:
-> · Ensemble 'ensemble1' : 500 configurations (from 20 to 519)
+> · Ensemble 'ensemble1' : 500 configurations (from 20 to 519)
# Observable defined on every second configuration between 5 and 1003
obs2 = pe.Obs([samples2], ['ensemble1'], idl=[range(5, 1005, 2)])
obs2.details()
> Result 9.99100712e-01
> 500 samples in 1 ensemble:
-> · Ensemble 'ensemble1' : 500 configurations (from 5 to 1003 in steps of 2)
+> · Ensemble 'ensemble1' : 500 configurations (from 5 to 1003 in steps of 2)
# Observable defined on configurations 2, 9, 28, 29 and 501
obs3 = pe.Obs([samples3], ['ensemble1'], idl=[[2, 9, 28, 29, 501]])
@@ -459,7 +459,7 @@ Make sure to check the autocorrelation time with e.g. pyerrors
, external input is defined by $M$ mean values, a $M\times M$ covariance matrix, where $M=1$ is permissible, and a name that uniquely identifies the covariance matrix. Below, we define the pion mass, based on its mean value and error, 134.9768(5). Note, that the square of the error enters cov_Obs
, since the second argument of this function is the covariance matrix of the Covobs
.
import pyerrors.obs as pe
+import pyerrors.obs as pe
mpi = pe.cov_Obs(134.9768, 0.0005**2, 'pi^0 mass')
mpi.gamma_method()
@@ -504,9 +504,9 @@ where the Jacobian is computed for each derived quantity via automatic different
Fit functions have to be of the following form
-import autograd.numpy as anp
+import autograd.numpy as anp
-def func(a, x):
+def func(a, x):
return a[1] * anp.exp(-a[0] * x)
@@ -547,7 +547,7 @@ where the Jacobian is computed for each derived quantity via automatic different
For fit functions with multiple independent variables the fit function can be of the form
-def func(a, x):
+def func(a, x):
(x1, x2) = x
return a[0] * x1 ** 2 + a[1] * x2
@@ -1151,19 +1151,19 @@ The following entries are optional:
477
478Julia I/O routines for the json.gz format, compatible with [ADerrors.jl](https://gitlab.ift.uam-csic.es/alberto/aderrors.jl), can be found [here](https://github.com/fjosw/ADjson.jl).
479'''
-480from .obs import *
-481from .correlators import *
-482from .fits import *
-483from .misc import *
-484from . import dirac as dirac
-485from . import input as input
-486from . import linalg as linalg
-487from . import mpm as mpm
-488from . import roots as roots
-489from . import integrate as integrate
-490from . import special as special
+480from .obs import *
+481from .correlators import *
+482from .fits import *
+483from .misc import *
+484from . import dirac as dirac
+485from . import input as input
+486from . import linalg as linalg
+487from . import mpm as mpm
+488from . import roots as roots
+489from . import integrate as integrate
+490from . import special as special
491
-492from .version import __version__ as __version__
+492from .version import __version__ as __version__
diff --git a/docs/pyerrors/correlators.html b/docs/pyerrors/correlators.html
index 602f733c..543600bd 100644
--- a/docs/pyerrors/correlators.html
+++ b/docs/pyerrors/correlators.html
@@ -241,20 +241,20 @@
- 1import warnings
- 2from itertools import permutations
- 3import numpy as np
- 4import autograd.numpy as anp
- 5import matplotlib.pyplot as plt
- 6import scipy.linalg
- 7from .obs import Obs, reweight, correlate, CObs
- 8from .misc import dump_object, _assert_equal_properties
- 9from .fits import least_squares
- 10from .roots import find_root
- 11from . import linalg
+ 1import warnings
+ 2from itertools import permutations
+ 3import numpy as np
+ 4import autograd.numpy as anp
+ 5import matplotlib.pyplot as plt
+ 6import scipy.linalg
+ 7from .obs import Obs, reweight, correlate, CObs
+ 8from .misc import dump_object, _assert_equal_properties
+ 9from .fits import least_squares
+ 10from .roots import find_root
+ 11from . import linalg
12
13
- 14class Corr:
+ 14class Corr:
15 r"""The class for a correlator (time dependent sequence of pe.Obs).
16
17 Everything, this class does, can be achieved using lists or arrays of Obs.
@@ -285,7 +285,7 @@
42
43 __slots__ = ["content", "N", "T", "tag", "prange"]
44
- 45 def __init__(self, data_input, padding=[0, 0], prange=None):
+ 45 def __init__(self, data_input, padding=[0, 0], prange=None):
46 """ Initialize a Corr object.
47
48 Parameters
@@ -362,7 +362,7 @@
119 self.T = len(self.content)
120 self.prange = prange
121
- 122 def __getitem__(self, idx):
+ 122 def __getitem__(self, idx):
123 """Return the content of timeslice idx"""
124 if self.content[idx] is None:
125 return None
@@ -372,7 +372,7 @@
129 return self.content[idx]
130
131 @property
- 132 def reweighted(self):
+ 132 def reweighted(self):
133 bool_array = np.array([list(map(lambda x: x.reweighted, o)) for o in [x for x in self.content if x is not None]])
134 if np.all(bool_array == 1):
135 return True
@@ -381,7 +381,7 @@
138 else:
139 raise Exception("Reweighting status of correlator corrupted.")
140
- 141 def gamma_method(self, **kwargs):
+ 141 def gamma_method(self, **kwargs):
142 """Apply the gamma method to the content of the Corr."""
143 for item in self.content:
144 if item is not None:
@@ -394,7 +394,7 @@
151
152 gm = gamma_method
153
- 154 def projected(self, vector_l=None, vector_r=None, normalize=False):
+ 154 def projected(self, vector_l=None, vector_r=None, normalize=False):
155 """We need to project the Correlator with a Vector to get a single value at each timeslice.
156
157 The method can use one or two vectors.
@@ -433,7 +433,7 @@
190 newcontent = [None if (_check_for_none(self, self.content[t]) or vector_l[t] is None or vector_r[t] is None) else np.asarray([vector_l[t].T @ self.content[t] @ vector_r[t]]) for t in range(self.T)]
191 return Corr(newcontent)
192
- 193 def item(self, i, j):
+ 193 def item(self, i, j):
194 """Picks the element [i,j] from every matrix and returns a correlator containing one Obs per timeslice.
195
196 Parameters
@@ -448,7 +448,7 @@
205 newcontent = [None if (item is None) else item[i, j] for item in self.content]
206 return Corr(newcontent)
207
- 208 def plottable(self):
+ 208 def plottable(self):
209 """Outputs the correlator in a plotable format.
210
211 Outputs three lists containing the timeslice index, the value on each
@@ -462,7 +462,7 @@
219
220 return x_list, y_list, y_err_list
221
- 222 def symmetric(self):
+ 222 def symmetric(self):
223 """ Symmetrize the correlator around x0=0."""
224 if self.N != 1:
225 raise ValueError('symmetric cannot be safely applied to multi-dimensional correlators.')
@@ -483,7 +483,7 @@
240 raise ValueError("Corr could not be symmetrized: No redundant values")
241 return Corr(newcontent, prange=self.prange)
242
- 243 def anti_symmetric(self):
+ 243 def anti_symmetric(self):
244 """Anti-symmetrize the correlator around x0=0."""
245 if self.N != 1:
246 raise TypeError('anti_symmetric cannot be safely applied to multi-dimensional correlators.')
@@ -505,7 +505,7 @@
262 raise ValueError("Corr could not be symmetrized: No redundant values")
263 return Corr(newcontent, prange=self.prange)
264
- 265 def is_matrix_symmetric(self):
+ 265 def is_matrix_symmetric(self):
266 """Checks whether a correlator matrices is symmetric on every timeslice."""
267 if self.N == 1:
268 raise TypeError("Only works for correlator matrices.")
@@ -520,7 +520,7 @@
277 return False
278 return True
279
- 280 def trace(self):
+ 280 def trace(self):
281 """Calculates the per-timeslice trace of a correlator matrix."""
282 if self.N == 1:
283 raise ValueError("Only works for correlator matrices.")
@@ -532,7 +532,7 @@
289 newcontent.append(np.trace(self.content[t]))
290 return Corr(newcontent)
291
- 292 def matrix_symmetric(self):
+ 292 def matrix_symmetric(self):
293 """Symmetrizes the correlator matrices on every timeslice."""
294 if self.N == 1:
295 raise ValueError("Trying to symmetrize a correlator matrix, that already has N=1.")
@@ -542,7 +542,7 @@
299 transposed = [None if _check_for_none(self, G) else G.T for G in self.content]
300 return 0.5 * (Corr(transposed) + self)
301
- 302 def GEVP(self, t0, ts=None, sort="Eigenvalue", vector_obs=False, **kwargs):
+ 302 def GEVP(self, t0, ts=None, sort="Eigenvalue", vector_obs=False, **kwargs):
303 r'''Solve the generalized eigenvalue problem on the correlator matrix and returns the corresponding eigenvectors.
304
305 The eigenvectors are sorted according to the descending eigenvalues, the zeroth eigenvector(s) correspond to the
@@ -593,7 +593,7 @@
350 else:
351 symmetric_corr = self.matrix_symmetric()
352
- 353 def _get_mat_at_t(t, vector_obs=vector_obs):
+ 353 def _get_mat_at_t(t, vector_obs=vector_obs):
354 if vector_obs:
355 return symmetric_corr[t]
356 else:
@@ -648,7 +648,7 @@
405 else:
406 return reordered_vecs
407
- 408 def Eigenvalue(self, t0, ts=None, state=0, sort="Eigenvalue", **kwargs):
+ 408 def Eigenvalue(self, t0, ts=None, state=0, sort="Eigenvalue", **kwargs):
409 """Determines the eigenvalue of the GEVP by solving and projecting the correlator
410
411 Parameters
@@ -661,7 +661,7 @@
418 vec = self.GEVP(t0, ts=ts, sort=sort, **kwargs)[state]
419 return self.projected(vec)
420
- 421 def Hankel(self, N, periodic=False):
+ 421 def Hankel(self, N, periodic=False):
422 """Constructs an NxN Hankel matrix
423
424 C(t) c(t+1) ... c(t+n-1)
@@ -685,7 +685,7 @@
442 for t in range(self.T):
443 new_content.append(array.copy())
444
- 445 def wrap(i):
+ 445 def wrap(i):
446 while i >= self.T:
447 i -= self.T
448 return i
@@ -702,7 +702,7 @@
459
460 return Corr(new_content)
461
- 462 def roll(self, dt):
+ 462 def roll(self, dt):
463 """Periodically shift the correlator by dt timeslices
464
465 Parameters
@@ -712,11 +712,11 @@
469 """
470 return Corr(list(np.roll(np.array(self.content, dtype=object), dt, axis=0)))
471
- 472 def reverse(self):
+ 472 def reverse(self):
473 """Reverse the time ordering of the Corr"""
474 return Corr(self.content[:: -1])
475
- 476 def thin(self, spacing=2, offset=0):
+ 476 def thin(self, spacing=2, offset=0):
477 """Thin out a correlator to suppress correlations
478
479 Parameters
@@ -734,7 +734,7 @@
491 new_content.append(self.content[t])
492 return Corr(new_content)
493
- 494 def correlate(self, partner):
+ 494 def correlate(self, partner):
495 """Correlate the correlator with another correlator or Obs
496
497 Parameters
@@ -763,7 +763,7 @@
520
521 return Corr(new_content)
522
- 523 def reweight(self, weight, **kwargs):
+ 523 def reweight(self, weight, **kwargs):
524 """Reweight the correlator.
525
526 Parameters
@@ -786,7 +786,7 @@
543 new_content.append(np.array(reweight(weight, t_slice, **kwargs)))
544 return Corr(new_content)
545
- 546 def T_symmetry(self, partner, parity=+1):
+ 546 def T_symmetry(self, partner, parity=+1):
547 """Return the time symmetry average of the correlator and its partner
548
549 Parameters
@@ -816,7 +816,7 @@
573
574 return (self + T_partner) / 2
575
- 576 def deriv(self, variant="symmetric"):
+ 576 def deriv(self, variant="symmetric"):
577 """Return the first derivative of the correlator with respect to x0.
578
579 Parameters
@@ -881,7 +881,7 @@
638 else:
639 raise ValueError("Unknown variant.")
640
- 641 def second_deriv(self, variant="symmetric"):
+ 641 def second_deriv(self, variant="symmetric"):
642 r"""Return the second derivative of the correlator with respect to x0.
643
644 Parameters
@@ -944,7 +944,7 @@
701 else:
702 raise ValueError("Unknown variant.")
703
- 704 def m_eff(self, variant='log', guess=1.0):
+ 704 def m_eff(self, variant='log', guess=1.0):
705 """Returns the effective mass of the correlator as correlator object
706
707 Parameters
@@ -995,7 +995,7 @@
752 else:
753 func = anp.sinh
754
- 755 def root_function(x, d):
+ 755 def root_function(x, d):
756 return func(x * (t - self.T / 2)) / func(x * (t + 1 - self.T / 2)) - d
757
758 newcontent = []
@@ -1028,7 +1028,7 @@
785 else:
786 raise ValueError('Unknown variant.')
787
- 788 def fit(self, function, fitrange=None, silent=False, **kwargs):
+ 788 def fit(self, function, fitrange=None, silent=False, **kwargs):
789 r'''Fits function to the data
790
791 Parameters
@@ -1062,7 +1062,7 @@
819 result = least_squares(xs, ys, function, silent=silent, **kwargs)
820 return result
821
- 822 def plateau(self, plateau_range=None, method="fit", auto_gamma=False):
+ 822 def plateau(self, plateau_range=None, method="fit", auto_gamma=False):
823 """ Extract a plateau value from a Corr object
824
825 Parameters
@@ -1089,7 +1089,7 @@
846 if auto_gamma:
847 self.gamma_method()
848 if method == "fit":
- 849 def const_func(a, t):
+ 849 def const_func(a, t):
850 return a[0]
851 return self.fit(const_func, plateau_range)[0]
852 elif method in ["avg", "average", "mean"]:
@@ -1099,7 +1099,7 @@
856 else:
857 raise ValueError("Unsupported plateau method: " + method)
858
- 859 def set_prange(self, prange):
+ 859 def set_prange(self, prange):
860 """Sets the attribute prange of the Corr object."""
861 if not len(prange) == 2:
862 raise ValueError("prange must be a list or array with two values")
@@ -1111,7 +1111,7 @@
868 self.prange = prange
869 return
870
- 871 def show(self, x_range=None, comp=None, y_range=None, logscale=False, plateau=None, fit_res=None, fit_key=None, ylabel=None, save=None, auto_gamma=False, hide_sigma=None, references=None, title=None):
+ 871 def show(self, x_range=None, comp=None, y_range=None, logscale=False, plateau=None, fit_res=None, fit_key=None, ylabel=None, save=None, auto_gamma=False, hide_sigma=None, references=None, title=None):
872 """Plots the correlator using the tag of the correlator as label if available.
873
874 Parameters
@@ -1236,7 +1236,7 @@
993 else:
994 raise TypeError("'save' has to be a string.")
995
- 996 def spaghetti_plot(self, logscale=True):
+ 996 def spaghetti_plot(self, logscale=True):
997 """Produces a spaghetti plot of the correlator suited to monitor exceptional configurations.
998
999 Parameters
@@ -1265,7 +1265,7 @@
1022 plt.title(name)
1023 plt.draw()
1024
-1025 def dump(self, filename, datatype="json.gz", **kwargs):
+1025 def dump(self, filename, datatype="json.gz", **kwargs):
1026 """Dumps the Corr into a file of chosen type
1027 Parameters
1028 ----------
@@ -1278,7 +1278,7 @@
1035 specifies a custom path for the file (default '.')
1036 """
1037 if datatype == "json.gz":
-1038 from .input.json import dump_to_json
+1038 from .input.json import dump_to_json
1039 if 'path' in kwargs:
1040 file_name = kwargs.get('path') + '/' + filename
1041 else:
@@ -1289,10 +1289,10 @@
1046 else:
1047 raise ValueError("Unknown datatype " + str(datatype))
1048
-1049 def print(self, print_range=None):
+1049 def print(self, print_range=None):
1050 print(self.__repr__(print_range))
1051
-1052 def __repr__(self, print_range=None):
+1052 def __repr__(self, print_range=None):
1053 if print_range is None:
1054 print_range = [0, None]
1055
@@ -1317,7 +1317,7 @@
1074 content_string += '\n'
1075 return content_string
1076
-1077 def __str__(self):
+1077 def __str__(self):
1078 return self.__repr__()
1079
1080 # We define the basic operations, that can be performed with correlators.
@@ -1327,14 +1327,14 @@
1084
1085 __array_priority__ = 10000
1086
-1087 def __eq__(self, y):
+1087 def __eq__(self, y):
1088 if isinstance(y, Corr):
1089 comp = np.asarray(y.content, dtype=object)
1090 else:
1091 comp = np.asarray(y)
1092 return np.asarray(self.content, dtype=object) == comp
1093
-1094 def __add__(self, y):
+1094 def __add__(self, y):
1095 if isinstance(y, Corr):
1096 if ((self.N != y.N) or (self.T != y.T)):
1097 raise ValueError("Addition of Corrs with different shape")
@@ -1362,7 +1362,7 @@
1119 else:
1120 raise TypeError("Corr + wrong type")
1121
-1122 def __mul__(self, y):
+1122 def __mul__(self, y):
1123 if isinstance(y, Corr):
1124 if not ((self.N == 1 or y.N == 1 or self.N == y.N) and self.T == y.T):
1125 raise ValueError("Multiplication of Corr object requires N=N or N=1 and T=T")
@@ -1390,7 +1390,7 @@
1147 else:
1148 raise TypeError("Corr * wrong type")
1149
-1150 def __matmul__(self, y):
+1150 def __matmul__(self, y):
1151 if isinstance(y, np.ndarray):
1152 if y.ndim != 2 or y.shape[0] != y.shape[1]:
1153 raise ValueError("Can only multiply correlators by square matrices.")
@@ -1417,7 +1417,7 @@
1174 else:
1175 return NotImplemented
1176
-1177 def __rmatmul__(self, y):
+1177 def __rmatmul__(self, y):
1178 if isinstance(y, np.ndarray):
1179 if y.ndim != 2 or y.shape[0] != y.shape[1]:
1180 raise ValueError("Can only multiply correlators by square matrices.")
@@ -1433,7 +1433,7 @@
1190 else:
1191 return NotImplemented
1192
-1193 def __truediv__(self, y):
+1193 def __truediv__(self, y):
1194 if isinstance(y, Corr):
1195 if not ((self.N == 1 or y.N == 1 or self.N == y.N) and self.T == y.T):
1196 raise ValueError("Multiplication of Corr object requires N=N or N=1 and T=T")
@@ -1487,37 +1487,37 @@
1244 else:
1245 raise TypeError('Corr / wrong type')
1246
-1247 def __neg__(self):
+1247 def __neg__(self):
1248 newcontent = [None if _check_for_none(self, item) else -1. * item for item in self.content]
1249 return Corr(newcontent, prange=self.prange)
1250
-1251 def __sub__(self, y):
+1251 def __sub__(self, y):
1252 return self + (-y)
1253
-1254 def __pow__(self, y):
+1254 def __pow__(self, y):
1255 if isinstance(y, (Obs, int, float, CObs)):
1256 newcontent = [None if _check_for_none(self, item) else item**y for item in self.content]
1257 return Corr(newcontent, prange=self.prange)
1258 else:
1259 raise TypeError('Type of exponent not supported')
1260
-1261 def __abs__(self):
+1261 def __abs__(self):
1262 newcontent = [None if _check_for_none(self, item) else np.abs(item) for item in self.content]
1263 return Corr(newcontent, prange=self.prange)
1264
1265 # The numpy functions:
-1266 def sqrt(self):
+1266 def sqrt(self):
1267 return self ** 0.5
1268
-1269 def log(self):
+1269 def log(self):
1270 newcontent = [None if _check_for_none(self, item) else np.log(item) for item in self.content]
1271 return Corr(newcontent, prange=self.prange)
1272
-1273 def exp(self):
+1273 def exp(self):
1274 newcontent = [None if _check_for_none(self, item) else np.exp(item) for item in self.content]
1275 return Corr(newcontent, prange=self.prange)
1276
-1277 def _apply_func_to_corr(self, func):
+1277 def _apply_func_to_corr(self, func):
1278 newcontent = [None if _check_for_none(self, item) else func(item) for item in self.content]
1279 for t in range(self.T):
1280 if _check_for_none(self, newcontent[t]):
@@ -1530,58 +1530,58 @@
1287 raise ValueError('Operation returns undefined correlator')
1288 return Corr(newcontent)
1289
-1290 def sin(self):
+1290 def sin(self):
1291 return self._apply_func_to_corr(np.sin)
1292
-1293 def cos(self):
+1293 def cos(self):
1294 return self._apply_func_to_corr(np.cos)
1295
-1296 def tan(self):
+1296 def tan(self):
1297 return self._apply_func_to_corr(np.tan)
1298
-1299 def sinh(self):
+1299 def sinh(self):
1300 return self._apply_func_to_corr(np.sinh)
1301
-1302 def cosh(self):
+1302 def cosh(self):
1303 return self._apply_func_to_corr(np.cosh)
1304
-1305 def tanh(self):
+1305 def tanh(self):
1306 return self._apply_func_to_corr(np.tanh)
1307
-1308 def arcsin(self):
+1308 def arcsin(self):
1309 return self._apply_func_to_corr(np.arcsin)
1310
-1311 def arccos(self):
+1311 def arccos(self):
1312 return self._apply_func_to_corr(np.arccos)
1313
-1314 def arctan(self):
+1314 def arctan(self):
1315 return self._apply_func_to_corr(np.arctan)
1316
-1317 def arcsinh(self):
+1317 def arcsinh(self):
1318 return self._apply_func_to_corr(np.arcsinh)
1319
-1320 def arccosh(self):
+1320 def arccosh(self):
1321 return self._apply_func_to_corr(np.arccosh)
1322
-1323 def arctanh(self):
+1323 def arctanh(self):
1324 return self._apply_func_to_corr(np.arctanh)
1325
1326 # Right hand side operations (require tweak in main module to work)
-1327 def __radd__(self, y):
+1327 def __radd__(self, y):
1328 return self + y
1329
-1330 def __rsub__(self, y):
+1330 def __rsub__(self, y):
1331 return -self + y
1332
-1333 def __rmul__(self, y):
+1333 def __rmul__(self, y):
1334 return self * y
1335
-1336 def __rtruediv__(self, y):
+1336 def __rtruediv__(self, y):
1337 return (self / y) ** (-1)
1338
1339 @property
-1340 def real(self):
-1341 def return_real(obs_OR_cobs):
+1340 def real(self):
+1341 def return_real(obs_OR_cobs):
1342 if isinstance(obs_OR_cobs.flatten()[0], CObs):
1343 return np.vectorize(lambda x: x.real)(obs_OR_cobs)
1344 else:
@@ -1590,8 +1590,8 @@
1347 return self._apply_func_to_corr(return_real)
1348
1349 @property
-1350 def imag(self):
-1351 def return_imag(obs_OR_cobs):
+1350 def imag(self):
+1351 def return_imag(obs_OR_cobs):
1352 if isinstance(obs_OR_cobs.flatten()[0], CObs):
1353 return np.vectorize(lambda x: x.imag)(obs_OR_cobs)
1354 else:
@@ -1599,7 +1599,7 @@
1356
1357 return self._apply_func_to_corr(return_imag)
1358
-1359 def prune(self, Ntrunc, tproj=3, t0proj=2, basematrix=None):
+1359 def prune(self, Ntrunc, tproj=3, t0proj=2, basematrix=None):
1360 r''' Project large correlation matrix to lowest states
1361
1362 This method can be used to reduce the size of an (N x N) correlation matrix
@@ -1657,7 +1657,7 @@
1414 return Corr(newcontent)
1415
1416
-1417def _sort_vectors(vec_set_in, ts):
+1417def _sort_vectors(vec_set_in, ts):
1418 """Helper function used to find a set of Eigenvectors consistent over all timeslices"""
1419
1420 if isinstance(vec_set_in[ts][0][0], Obs):
@@ -1689,12 +1689,12 @@
1446 return sorted_vec_set
1447
1448
-1449def _check_for_none(corr, entry):
+1449def _check_for_none(corr, entry):
1450 """Checks if entry for correlator corr is None"""
1451 return len(list(filter(None, np.asarray(entry).flatten()))) < corr.N ** 2
1452
1453
-1454def _GEVP_solver(Gt, G0, method='eigh', chol_inv=None):
+1454def _GEVP_solver(Gt, G0, method='eigh', chol_inv=None):
1455 r"""Helper function for solving the GEVP and sorting the eigenvectors.
1456
1457 Solves $G(t)v_i=\lambda_i G(t_0)v_i$ and returns the eigenvectors v_i
@@ -1732,10 +1732,10 @@
1489 cholesky = np.linalg.cholesky
1490 inv = np.linalg.inv
1491
-1492 def eigv(x, **kwargs):
+1492 def eigv(x, **kwargs):
1493 return np.linalg.eigh(x)[1]
1494
-1495 def matmul(*operands):
+1495 def matmul(*operands):
1496 return np.linalg.multi_dot(operands)
1497 N = Gt.shape[0]
1498 output = [[] for j in range(N)]
@@ -1769,7 +1769,7 @@
- 15class Corr:
+ 15class Corr:
16 r"""The class for a correlator (time dependent sequence of pe.Obs).
17
18 Everything, this class does, can be achieved using lists or arrays of Obs.
@@ -1800,7 +1800,7 @@
43
44 __slots__ = ["content", "N", "T", "tag", "prange"]
45
- 46 def __init__(self, data_input, padding=[0, 0], prange=None):
+ 46 def __init__(self, data_input, padding=[0, 0], prange=None):
47 """ Initialize a Corr object.
48
49 Parameters
@@ -1877,7 +1877,7 @@
120 self.T = len(self.content)
121 self.prange = prange
122
- 123 def __getitem__(self, idx):
+ 123 def __getitem__(self, idx):
124 """Return the content of timeslice idx"""
125 if self.content[idx] is None:
126 return None
@@ -1887,7 +1887,7 @@
130 return self.content[idx]
131
132 @property
- 133 def reweighted(self):
+ 133 def reweighted(self):
134 bool_array = np.array([list(map(lambda x: x.reweighted, o)) for o in [x for x in self.content if x is not None]])
135 if np.all(bool_array == 1):
136 return True
@@ -1896,7 +1896,7 @@
139 else:
140 raise Exception("Reweighting status of correlator corrupted.")
141
- 142 def gamma_method(self, **kwargs):
+ 142 def gamma_method(self, **kwargs):
143 """Apply the gamma method to the content of the Corr."""
144 for item in self.content:
145 if item is not None:
@@ -1909,7 +1909,7 @@
152
153 gm = gamma_method
154
- 155 def projected(self, vector_l=None, vector_r=None, normalize=False):
+ 155 def projected(self, vector_l=None, vector_r=None, normalize=False):
156 """We need to project the Correlator with a Vector to get a single value at each timeslice.
157
158 The method can use one or two vectors.
@@ -1948,7 +1948,7 @@
191 newcontent = [None if (_check_for_none(self, self.content[t]) or vector_l[t] is None or vector_r[t] is None) else np.asarray([vector_l[t].T @ self.content[t] @ vector_r[t]]) for t in range(self.T)]
192 return Corr(newcontent)
193
- 194 def item(self, i, j):
+ 194 def item(self, i, j):
195 """Picks the element [i,j] from every matrix and returns a correlator containing one Obs per timeslice.
196
197 Parameters
@@ -1963,7 +1963,7 @@
206 newcontent = [None if (item is None) else item[i, j] for item in self.content]
207 return Corr(newcontent)
208
- 209 def plottable(self):
+ 209 def plottable(self):
210 """Outputs the correlator in a plotable format.
211
212 Outputs three lists containing the timeslice index, the value on each
@@ -1977,7 +1977,7 @@
220
221 return x_list, y_list, y_err_list
222
- 223 def symmetric(self):
+ 223 def symmetric(self):
224 """ Symmetrize the correlator around x0=0."""
225 if self.N != 1:
226 raise ValueError('symmetric cannot be safely applied to multi-dimensional correlators.')
@@ -1998,7 +1998,7 @@
241 raise ValueError("Corr could not be symmetrized: No redundant values")
242 return Corr(newcontent, prange=self.prange)
243
- 244 def anti_symmetric(self):
+ 244 def anti_symmetric(self):
245 """Anti-symmetrize the correlator around x0=0."""
246 if self.N != 1:
247 raise TypeError('anti_symmetric cannot be safely applied to multi-dimensional correlators.')
@@ -2020,7 +2020,7 @@
263 raise ValueError("Corr could not be symmetrized: No redundant values")
264 return Corr(newcontent, prange=self.prange)
265
- 266 def is_matrix_symmetric(self):
+ 266 def is_matrix_symmetric(self):
267 """Checks whether a correlator matrices is symmetric on every timeslice."""
268 if self.N == 1:
269 raise TypeError("Only works for correlator matrices.")
@@ -2035,7 +2035,7 @@
278 return False
279 return True
280
- 281 def trace(self):
+ 281 def trace(self):
282 """Calculates the per-timeslice trace of a correlator matrix."""
283 if self.N == 1:
284 raise ValueError("Only works for correlator matrices.")
@@ -2047,7 +2047,7 @@
290 newcontent.append(np.trace(self.content[t]))
291 return Corr(newcontent)
292
- 293 def matrix_symmetric(self):
+ 293 def matrix_symmetric(self):
294 """Symmetrizes the correlator matrices on every timeslice."""
295 if self.N == 1:
296 raise ValueError("Trying to symmetrize a correlator matrix, that already has N=1.")
@@ -2057,7 +2057,7 @@
300 transposed = [None if _check_for_none(self, G) else G.T for G in self.content]
301 return 0.5 * (Corr(transposed) + self)
302
- 303 def GEVP(self, t0, ts=None, sort="Eigenvalue", vector_obs=False, **kwargs):
+ 303 def GEVP(self, t0, ts=None, sort="Eigenvalue", vector_obs=False, **kwargs):
304 r'''Solve the generalized eigenvalue problem on the correlator matrix and returns the corresponding eigenvectors.
305
306 The eigenvectors are sorted according to the descending eigenvalues, the zeroth eigenvector(s) correspond to the
@@ -2108,7 +2108,7 @@
351 else:
352 symmetric_corr = self.matrix_symmetric()
353
- 354 def _get_mat_at_t(t, vector_obs=vector_obs):
+ 354 def _get_mat_at_t(t, vector_obs=vector_obs):
355 if vector_obs:
356 return symmetric_corr[t]
357 else:
@@ -2163,7 +2163,7 @@
406 else:
407 return reordered_vecs
408
- 409 def Eigenvalue(self, t0, ts=None, state=0, sort="Eigenvalue", **kwargs):
+ 409 def Eigenvalue(self, t0, ts=None, state=0, sort="Eigenvalue", **kwargs):
410 """Determines the eigenvalue of the GEVP by solving and projecting the correlator
411
412 Parameters
@@ -2176,7 +2176,7 @@
419 vec = self.GEVP(t0, ts=ts, sort=sort, **kwargs)[state]
420 return self.projected(vec)
421
- 422 def Hankel(self, N, periodic=False):
+ 422 def Hankel(self, N, periodic=False):
423 """Constructs an NxN Hankel matrix
424
425 C(t) c(t+1) ... c(t+n-1)
@@ -2200,7 +2200,7 @@
443 for t in range(self.T):
444 new_content.append(array.copy())
445
- 446 def wrap(i):
+ 446 def wrap(i):
447 while i >= self.T:
448 i -= self.T
449 return i
@@ -2217,7 +2217,7 @@
460
461 return Corr(new_content)
462
- 463 def roll(self, dt):
+ 463 def roll(self, dt):
464 """Periodically shift the correlator by dt timeslices
465
466 Parameters
@@ -2227,11 +2227,11 @@
470 """
471 return Corr(list(np.roll(np.array(self.content, dtype=object), dt, axis=0)))
472
- 473 def reverse(self):
+ 473 def reverse(self):
474 """Reverse the time ordering of the Corr"""
475 return Corr(self.content[:: -1])
476
- 477 def thin(self, spacing=2, offset=0):
+ 477 def thin(self, spacing=2, offset=0):
478 """Thin out a correlator to suppress correlations
479
480 Parameters
@@ -2249,7 +2249,7 @@
492 new_content.append(self.content[t])
493 return Corr(new_content)
494
- 495 def correlate(self, partner):
+ 495 def correlate(self, partner):
496 """Correlate the correlator with another correlator or Obs
497
498 Parameters
@@ -2278,7 +2278,7 @@
521
522 return Corr(new_content)
523
- 524 def reweight(self, weight, **kwargs):
+ 524 def reweight(self, weight, **kwargs):
525 """Reweight the correlator.
526
527 Parameters
@@ -2301,7 +2301,7 @@
544 new_content.append(np.array(reweight(weight, t_slice, **kwargs)))
545 return Corr(new_content)
546
- 547 def T_symmetry(self, partner, parity=+1):
+ 547 def T_symmetry(self, partner, parity=+1):
548 """Return the time symmetry average of the correlator and its partner
549
550 Parameters
@@ -2331,7 +2331,7 @@
574
575 return (self + T_partner) / 2
576
- 577 def deriv(self, variant="symmetric"):
+ 577 def deriv(self, variant="symmetric"):
578 """Return the first derivative of the correlator with respect to x0.
579
580 Parameters
@@ -2396,7 +2396,7 @@
639 else:
640 raise ValueError("Unknown variant.")
641
- 642 def second_deriv(self, variant="symmetric"):
+ 642 def second_deriv(self, variant="symmetric"):
643 r"""Return the second derivative of the correlator with respect to x0.
644
645 Parameters
@@ -2459,7 +2459,7 @@
702 else:
703 raise ValueError("Unknown variant.")
704
- 705 def m_eff(self, variant='log', guess=1.0):
+ 705 def m_eff(self, variant='log', guess=1.0):
706 """Returns the effective mass of the correlator as correlator object
707
708 Parameters
@@ -2510,7 +2510,7 @@
753 else:
754 func = anp.sinh
755
- 756 def root_function(x, d):
+ 756 def root_function(x, d):
757 return func(x * (t - self.T / 2)) / func(x * (t + 1 - self.T / 2)) - d
758
759 newcontent = []
@@ -2543,7 +2543,7 @@
786 else:
787 raise ValueError('Unknown variant.')
788
- 789 def fit(self, function, fitrange=None, silent=False, **kwargs):
+ 789 def fit(self, function, fitrange=None, silent=False, **kwargs):
790 r'''Fits function to the data
791
792 Parameters
@@ -2577,7 +2577,7 @@
820 result = least_squares(xs, ys, function, silent=silent, **kwargs)
821 return result
822
- 823 def plateau(self, plateau_range=None, method="fit", auto_gamma=False):
+ 823 def plateau(self, plateau_range=None, method="fit", auto_gamma=False):
824 """ Extract a plateau value from a Corr object
825
826 Parameters
@@ -2604,7 +2604,7 @@
847 if auto_gamma:
848 self.gamma_method()
849 if method == "fit":
- 850 def const_func(a, t):
+ 850 def const_func(a, t):
851 return a[0]
852 return self.fit(const_func, plateau_range)[0]
853 elif method in ["avg", "average", "mean"]:
@@ -2614,7 +2614,7 @@
857 else:
858 raise ValueError("Unsupported plateau method: " + method)
859
- 860 def set_prange(self, prange):
+ 860 def set_prange(self, prange):
861 """Sets the attribute prange of the Corr object."""
862 if not len(prange) == 2:
863 raise ValueError("prange must be a list or array with two values")
@@ -2626,7 +2626,7 @@
869 self.prange = prange
870 return
871
- 872 def show(self, x_range=None, comp=None, y_range=None, logscale=False, plateau=None, fit_res=None, fit_key=None, ylabel=None, save=None, auto_gamma=False, hide_sigma=None, references=None, title=None):
+ 872 def show(self, x_range=None, comp=None, y_range=None, logscale=False, plateau=None, fit_res=None, fit_key=None, ylabel=None, save=None, auto_gamma=False, hide_sigma=None, references=None, title=None):
873 """Plots the correlator using the tag of the correlator as label if available.
874
875 Parameters
@@ -2751,7 +2751,7 @@
994 else:
995 raise TypeError("'save' has to be a string.")
996
- 997 def spaghetti_plot(self, logscale=True):
+ 997 def spaghetti_plot(self, logscale=True):
998 """Produces a spaghetti plot of the correlator suited to monitor exceptional configurations.
999
1000 Parameters
@@ -2780,7 +2780,7 @@
1023 plt.title(name)
1024 plt.draw()
1025
-1026 def dump(self, filename, datatype="json.gz", **kwargs):
+1026 def dump(self, filename, datatype="json.gz", **kwargs):
1027 """Dumps the Corr into a file of chosen type
1028 Parameters
1029 ----------
@@ -2793,7 +2793,7 @@
1036 specifies a custom path for the file (default '.')
1037 """
1038 if datatype == "json.gz":
-1039 from .input.json import dump_to_json
+1039 from .input.json import dump_to_json
1040 if 'path' in kwargs:
1041 file_name = kwargs.get('path') + '/' + filename
1042 else:
@@ -2804,10 +2804,10 @@
1047 else:
1048 raise ValueError("Unknown datatype " + str(datatype))
1049
-1050 def print(self, print_range=None):
+1050 def print(self, print_range=None):
1051 print(self.__repr__(print_range))
1052
-1053 def __repr__(self, print_range=None):
+1053 def __repr__(self, print_range=None):
1054 if print_range is None:
1055 print_range = [0, None]
1056
@@ -2832,7 +2832,7 @@
1075 content_string += '\n'
1076 return content_string
1077
-1078 def __str__(self):
+1078 def __str__(self):
1079 return self.__repr__()
1080
1081 # We define the basic operations, that can be performed with correlators.
@@ -2842,14 +2842,14 @@
1085
1086 __array_priority__ = 10000
1087
-1088 def __eq__(self, y):
+1088 def __eq__(self, y):
1089 if isinstance(y, Corr):
1090 comp = np.asarray(y.content, dtype=object)
1091 else:
1092 comp = np.asarray(y)
1093 return np.asarray(self.content, dtype=object) == comp
1094
-1095 def __add__(self, y):
+1095 def __add__(self, y):
1096 if isinstance(y, Corr):
1097 if ((self.N != y.N) or (self.T != y.T)):
1098 raise ValueError("Addition of Corrs with different shape")
@@ -2877,7 +2877,7 @@
1120 else:
1121 raise TypeError("Corr + wrong type")
1122
-1123 def __mul__(self, y):
+1123 def __mul__(self, y):
1124 if isinstance(y, Corr):
1125 if not ((self.N == 1 or y.N == 1 or self.N == y.N) and self.T == y.T):
1126 raise ValueError("Multiplication of Corr object requires N=N or N=1 and T=T")
@@ -2905,7 +2905,7 @@
1148 else:
1149 raise TypeError("Corr * wrong type")
1150
-1151 def __matmul__(self, y):
+1151 def __matmul__(self, y):
1152 if isinstance(y, np.ndarray):
1153 if y.ndim != 2 or y.shape[0] != y.shape[1]:
1154 raise ValueError("Can only multiply correlators by square matrices.")
@@ -2932,7 +2932,7 @@
1175 else:
1176 return NotImplemented
1177
-1178 def __rmatmul__(self, y):
+1178 def __rmatmul__(self, y):
1179 if isinstance(y, np.ndarray):
1180 if y.ndim != 2 or y.shape[0] != y.shape[1]:
1181 raise ValueError("Can only multiply correlators by square matrices.")
@@ -2948,7 +2948,7 @@
1191 else:
1192 return NotImplemented
1193
-1194 def __truediv__(self, y):
+1194 def __truediv__(self, y):
1195 if isinstance(y, Corr):
1196 if not ((self.N == 1 or y.N == 1 or self.N == y.N) and self.T == y.T):
1197 raise ValueError("Multiplication of Corr object requires N=N or N=1 and T=T")
@@ -3002,37 +3002,37 @@
1245 else:
1246 raise TypeError('Corr / wrong type')
1247
-1248 def __neg__(self):
+1248 def __neg__(self):
1249 newcontent = [None if _check_for_none(self, item) else -1. * item for item in self.content]
1250 return Corr(newcontent, prange=self.prange)
1251
-1252 def __sub__(self, y):
+1252 def __sub__(self, y):
1253 return self + (-y)
1254
-1255 def __pow__(self, y):
+1255 def __pow__(self, y):
1256 if isinstance(y, (Obs, int, float, CObs)):
1257 newcontent = [None if _check_for_none(self, item) else item**y for item in self.content]
1258 return Corr(newcontent, prange=self.prange)
1259 else:
1260 raise TypeError('Type of exponent not supported')
1261
-1262 def __abs__(self):
+1262 def __abs__(self):
1263 newcontent = [None if _check_for_none(self, item) else np.abs(item) for item in self.content]
1264 return Corr(newcontent, prange=self.prange)
1265
1266 # The numpy functions:
-1267 def sqrt(self):
+1267 def sqrt(self):
1268 return self ** 0.5
1269
-1270 def log(self):
+1270 def log(self):
1271 newcontent = [None if _check_for_none(self, item) else np.log(item) for item in self.content]
1272 return Corr(newcontent, prange=self.prange)
1273
-1274 def exp(self):
+1274 def exp(self):
1275 newcontent = [None if _check_for_none(self, item) else np.exp(item) for item in self.content]
1276 return Corr(newcontent, prange=self.prange)
1277
-1278 def _apply_func_to_corr(self, func):
+1278 def _apply_func_to_corr(self, func):
1279 newcontent = [None if _check_for_none(self, item) else func(item) for item in self.content]
1280 for t in range(self.T):
1281 if _check_for_none(self, newcontent[t]):
@@ -3045,58 +3045,58 @@
1288 raise ValueError('Operation returns undefined correlator')
1289 return Corr(newcontent)
1290
-1291 def sin(self):
+1291 def sin(self):
1292 return self._apply_func_to_corr(np.sin)
1293
-1294 def cos(self):
+1294 def cos(self):
1295 return self._apply_func_to_corr(np.cos)
1296
-1297 def tan(self):
+1297 def tan(self):
1298 return self._apply_func_to_corr(np.tan)
1299
-1300 def sinh(self):
+1300 def sinh(self):
1301 return self._apply_func_to_corr(np.sinh)
1302
-1303 def cosh(self):
+1303 def cosh(self):
1304 return self._apply_func_to_corr(np.cosh)
1305
-1306 def tanh(self):
+1306 def tanh(self):
1307 return self._apply_func_to_corr(np.tanh)
1308
-1309 def arcsin(self):
+1309 def arcsin(self):
1310 return self._apply_func_to_corr(np.arcsin)
1311
-1312 def arccos(self):
+1312 def arccos(self):
1313 return self._apply_func_to_corr(np.arccos)
1314
-1315 def arctan(self):
+1315 def arctan(self):
1316 return self._apply_func_to_corr(np.arctan)
1317
-1318 def arcsinh(self):
+1318 def arcsinh(self):
1319 return self._apply_func_to_corr(np.arcsinh)
1320
-1321 def arccosh(self):
+1321 def arccosh(self):
1322 return self._apply_func_to_corr(np.arccosh)
1323
-1324 def arctanh(self):
+1324 def arctanh(self):
1325 return self._apply_func_to_corr(np.arctanh)
1326
1327 # Right hand side operations (require tweak in main module to work)
-1328 def __radd__(self, y):
+1328 def __radd__(self, y):
1329 return self + y
1330
-1331 def __rsub__(self, y):
+1331 def __rsub__(self, y):
1332 return -self + y
1333
-1334 def __rmul__(self, y):
+1334 def __rmul__(self, y):
1335 return self * y
1336
-1337 def __rtruediv__(self, y):
+1337 def __rtruediv__(self, y):
1338 return (self / y) ** (-1)
1339
1340 @property
-1341 def real(self):
-1342 def return_real(obs_OR_cobs):
+1341 def real(self):
+1342 def return_real(obs_OR_cobs):
1343 if isinstance(obs_OR_cobs.flatten()[0], CObs):
1344 return np.vectorize(lambda x: x.real)(obs_OR_cobs)
1345 else:
@@ -3105,8 +3105,8 @@
1348 return self._apply_func_to_corr(return_real)
1349
1350 @property
-1351 def imag(self):
-1352 def return_imag(obs_OR_cobs):
+1351 def imag(self):
+1352 def return_imag(obs_OR_cobs):
1353 if isinstance(obs_OR_cobs.flatten()[0], CObs):
1354 return np.vectorize(lambda x: x.imag)(obs_OR_cobs)
1355 else:
@@ -3114,7 +3114,7 @@
1357
1358 return self._apply_func_to_corr(return_imag)
1359
-1360 def prune(self, Ntrunc, tproj=3, t0proj=2, basematrix=None):
+1360 def prune(self, Ntrunc, tproj=3, t0proj=2, basematrix=None):
1361 r''' Project large correlation matrix to lowest states
1362
1363 This method can be used to reduce the size of an (N x N) correlation matrix
@@ -3218,7 +3218,7 @@ the temporal extent of the correlator and N is the dimension of the matrix.
- 46 def __init__(self, data_input, padding=[0, 0], prange=None):
+ 46 def __init__(self, data_input, padding=[0, 0], prange=None):
47 """ Initialize a Corr object.
48
49 Parameters
@@ -3370,7 +3370,7 @@ region identified for this correlator.
132 @property
-133 def reweighted(self):
+133 def reweighted(self):
134 bool_array = np.array([list(map(lambda x: x.reweighted, o)) for o in [x for x in self.content if x is not None]])
135 if np.all(bool_array == 1):
136 return True
@@ -3395,7 +3395,7 @@ region identified for this correlator.
- 142 def gamma_method(self, **kwargs):
+ 142 def gamma_method(self, **kwargs):
143 """Apply the gamma method to the content of the Corr."""
144 for item in self.content:
145 if item is not None:
@@ -3424,7 +3424,7 @@ region identified for this correlator.
- 142 def gamma_method(self, **kwargs):
+ 142 def gamma_method(self, **kwargs):
143 """Apply the gamma method to the content of the Corr."""
144 for item in self.content:
145 if item is not None:
@@ -3453,7 +3453,7 @@ region identified for this correlator.
- 155 def projected(self, vector_l=None, vector_r=None, normalize=False):
+ 155 def projected(self, vector_l=None, vector_r=None, normalize=False):
156 """We need to project the Correlator with a Vector to get a single value at each timeslice.
157
158 The method can use one or two vectors.
@@ -3514,7 +3514,7 @@ By default it will return the lowest source, which usually means unsmeared-unsme
- 194 def item(self, i, j):
+ 194 def item(self, i, j):
195 """Picks the element [i,j] from every matrix and returns a correlator containing one Obs per timeslice.
196
197 Parameters
@@ -3556,7 +3556,7 @@ Second index to be picked.
- 209 def plottable(self):
+ 209 def plottable(self):
210 """Outputs the correlator in a plotable format.
211
212 Outputs three lists containing the timeslice index, the value on each
@@ -3591,7 +3591,7 @@ timeslice and the error on each timeslice.
- 223 def symmetric(self):
+ 223 def symmetric(self):
224 """ Symmetrize the correlator around x0=0."""
225 if self.N != 1:
226 raise ValueError('symmetric cannot be safely applied to multi-dimensional correlators.')
@@ -3630,7 +3630,7 @@ timeslice and the error on each timeslice.
- 244 def anti_symmetric(self):
+ 244 def anti_symmetric(self):
245 """Anti-symmetrize the correlator around x0=0."""
246 if self.N != 1:
247 raise TypeError('anti_symmetric cannot be safely applied to multi-dimensional correlators.')
@@ -3670,7 +3670,7 @@ timeslice and the error on each timeslice.
- 266 def is_matrix_symmetric(self):
+ 266 def is_matrix_symmetric(self):
267 """Checks whether a correlator matrices is symmetric on every timeslice."""
268 if self.N == 1:
269 raise TypeError("Only works for correlator matrices.")
@@ -3703,7 +3703,7 @@ timeslice and the error on each timeslice.
- 281 def trace(self):
+ 281 def trace(self):
282 """Calculates the per-timeslice trace of a correlator matrix."""
283 if self.N == 1:
284 raise ValueError("Only works for correlator matrices.")
@@ -3733,7 +3733,7 @@ timeslice and the error on each timeslice.
- 293 def matrix_symmetric(self):
+ 293 def matrix_symmetric(self):
294 """Symmetrizes the correlator matrices on every timeslice."""
295 if self.N == 1:
296 raise ValueError("Trying to symmetrize a correlator matrix, that already has N=1.")
@@ -3761,7 +3761,7 @@ timeslice and the error on each timeslice.
- 303 def GEVP(self, t0, ts=None, sort="Eigenvalue", vector_obs=False, **kwargs):
+ 303 def GEVP(self, t0, ts=None, sort="Eigenvalue", vector_obs=False, **kwargs):
304 r'''Solve the generalized eigenvalue problem on the correlator matrix and returns the corresponding eigenvectors.
305
306 The eigenvectors are sorted according to the descending eigenvalues, the zeroth eigenvector(s) correspond to the
@@ -3812,7 +3812,7 @@ timeslice and the error on each timeslice.
351 else:
352 symmetric_corr = self.matrix_symmetric()
353
-354 def _get_mat_at_t(t, vector_obs=vector_obs):
+354 def _get_mat_at_t(t, vector_obs=vector_obs):
355 if vector_obs:
356 return symmetric_corr[t]
357 else:
@@ -3927,7 +3927,7 @@ Method used to solve the GEVP.
- 409 def Eigenvalue(self, t0, ts=None, state=0, sort="Eigenvalue", **kwargs):
+ 409 def Eigenvalue(self, t0, ts=None, state=0, sort="Eigenvalue", **kwargs):
410 """Determines the eigenvalue of the GEVP by solving and projecting the correlator
411
412 Parameters
@@ -3966,7 +3966,7 @@ The state one is interested in ordered by energy. The lowest state is zero.
- 422 def Hankel(self, N, periodic=False):
+ 422 def Hankel(self, N, periodic=False):
423 """Constructs an NxN Hankel matrix
424
425 C(t) c(t+1) ... c(t+n-1)
@@ -3990,7 +3990,7 @@ The state one is interested in ordered by energy. The lowest state is zero.
443 for t in range(self.T):
444 new_content.append(array.copy())
445
-446 def wrap(i):
+446 def wrap(i):
447 while i >= self.T:
448 i -= self.T
449 return i
@@ -4039,7 +4039,7 @@ determines whether the matrix is extended periodically
- 463 def roll(self, dt):
+ 463 def roll(self, dt):
464 """Periodically shift the correlator by dt timeslices
465
466 Parameters
@@ -4074,7 +4074,7 @@ number of timeslices
- 473 def reverse(self):
+
@@ -4096,7 +4096,7 @@ number of timeslices
- 477 def thin(self, spacing=2, offset=0):
+ 477 def thin(self, spacing=2, offset=0):
478 """Thin out a correlator to suppress correlations
479
480 Parameters
@@ -4141,7 +4141,7 @@ Offset the equal spacing
- 495 def correlate(self, partner):
+ 495 def correlate(self, partner):
496 """Correlate the correlator with another correlator or Obs
497
498 Parameters
@@ -4197,7 +4197,7 @@ correlator or a Corr of same length.
- 524 def reweight(self, weight, **kwargs):
+ 524 def reweight(self, weight, **kwargs):
525 """Reweight the correlator.
526
527 Parameters
@@ -4250,7 +4250,7 @@ on the configurations in obs[i].idl.
- 547 def T_symmetry(self, partner, parity=+1):
+ 547 def T_symmetry(self, partner, parity=+1):
548 """Return the time symmetry average of the correlator and its partner
549
550 Parameters
@@ -4307,7 +4307,7 @@ Parity quantum number of the correlator, can be +1 or -1
- 577 def deriv(self, variant="symmetric"):
+ 577 def deriv(self, variant="symmetric"):
578 """Return the first derivative of the correlator with respect to x0.
579
580 Parameters
@@ -4398,7 +4398,7 @@ Available choice: symmetric, forward, backward, improved, log, default: symmetri
- 642 def second_deriv(self, variant="symmetric"):
+ 642 def second_deriv(self, variant="symmetric"):
643 r"""Return the second derivative of the correlator with respect to x0.
644
645 Parameters
@@ -4495,7 +4495,7 @@ Available choice:
- 705 def m_eff(self, variant='log', guess=1.0):
+ 705 def m_eff(self, variant='log', guess=1.0):
706 """Returns the effective mass of the correlator as correlator object
707
708 Parameters
@@ -4546,7 +4546,7 @@ Available choice:
753 else:
754 func = anp.sinh
755
-756 def root_function(x, d):
+756 def root_function(x, d):
757 return func(x * (t - self.T / 2)) / func(x * (t + 1 - self.T / 2)) - d
758
759 newcontent = []
@@ -4611,7 +4611,7 @@ guess for the root finder, only relevant for the root variant
- 789 def fit(self, function, fitrange=None, silent=False, **kwargs):
+ 789 def fit(self, function, fitrange=None, silent=False, **kwargs):
790 r'''Fits function to the data
791
792 Parameters
@@ -4677,7 +4677,7 @@ Decides whether output is printed to the standard output.
- 823 def plateau(self, plateau_range=None, method="fit", auto_gamma=False):
+ 823 def plateau(self, plateau_range=None, method="fit", auto_gamma=False):
824 """ Extract a plateau value from a Corr object
825
826 Parameters
@@ -4704,7 +4704,7 @@ Decides whether output is printed to the standard output.
847 if auto_gamma:
848 self.gamma_method()
849 if method == "fit":
-850 def const_func(a, t):
+850 def const_func(a, t):
851 return a[0]
852 return self.fit(const_func, plateau_range)[0]
853 elif method in ["avg", "average", "mean"]:
@@ -4746,7 +4746,7 @@ apply gamma_method with default parameters to the Corr. Defaults to None
- 860 def set_prange(self, prange):
+ 860 def set_prange(self, prange):
861 """Sets the attribute prange of the Corr object."""
862 if not len(prange) == 2:
863 raise ValueError("prange must be a list or array with two values")
@@ -4776,7 +4776,7 @@ apply gamma_method with default parameters to the Corr. Defaults to None
- 872 def show(self, x_range=None, comp=None, y_range=None, logscale=False, plateau=None, fit_res=None, fit_key=None, ylabel=None, save=None, auto_gamma=False, hide_sigma=None, references=None, title=None):
+ 872 def show(self, x_range=None, comp=None, y_range=None, logscale=False, plateau=None, fit_res=None, fit_key=None, ylabel=None, save=None, auto_gamma=False, hide_sigma=None, references=None, title=None):
873 """Plots the correlator using the tag of the correlator as label if available.
874
875 Parameters
@@ -4949,7 +4949,7 @@ Optional title of the figure.
- 997 def spaghetti_plot(self, logscale=True):
+ 997 def spaghetti_plot(self, logscale=True):
998 """Produces a spaghetti plot of the correlator suited to monitor exceptional configurations.
999
1000 Parameters
@@ -5003,7 +5003,7 @@ Determines whether the scale of the y-axis is logarithmic or standard.
- 1026 def dump(self, filename, datatype="json.gz", **kwargs):
+ 1026 def dump(self, filename, datatype="json.gz", **kwargs):
1027 """Dumps the Corr into a file of chosen type
1028 Parameters
1029 ----------
@@ -5016,7 +5016,7 @@ Determines whether the scale of the y-axis is logarithmic or standard.
1036 specifies a custom path for the file (default '.')
1037 """
1038 if datatype == "json.gz":
-1039 from .input.json import dump_to_json
+1039 from .input.json import dump_to_json
1040 if 'path' in kwargs:
1041 file_name = kwargs.get('path') + '/' + filename
1042 else:
@@ -5057,7 +5057,7 @@ specifies a custom path for the file (default '.')
- 1050 def print(self, print_range=None):
+
@@ -5076,7 +5076,7 @@ specifies a custom path for the file (default '.')
- 1267 def sqrt(self):
+
@@ -5095,7 +5095,7 @@ specifies a custom path for the file (default '.')
- 1270 def log(self):
+
@@ -5115,7 +5115,7 @@ specifies a custom path for the file (default '.')
- 1274 def exp(self):
+
@@ -5135,7 +5135,7 @@ specifies a custom path for the file (default '.')
- 1291 def sin(self):
+
@@ -5154,7 +5154,7 @@ specifies a custom path for the file (default '.')
- 1294 def cos(self):
+
@@ -5173,7 +5173,7 @@ specifies a custom path for the file (default '.')
- 1297 def tan(self):
+
@@ -5192,7 +5192,7 @@ specifies a custom path for the file (default '.')
- 1300 def sinh(self):
+
@@ -5211,7 +5211,7 @@ specifies a custom path for the file (default '.')
- 1303 def cosh(self):
+
@@ -5230,7 +5230,7 @@ specifies a custom path for the file (default '.')
- 1306 def tanh(self):
+
@@ -5249,7 +5249,7 @@ specifies a custom path for the file (default '.')
- 1309 def arcsin(self):
+
@@ -5268,7 +5268,7 @@ specifies a custom path for the file (default '.')
- 1312 def arccos(self):
+
@@ -5287,7 +5287,7 @@ specifies a custom path for the file (default '.')
- 1315 def arctan(self):
+
@@ -5306,7 +5306,7 @@ specifies a custom path for the file (default '.')
- 1318 def arcsinh(self):
+
@@ -5325,7 +5325,7 @@ specifies a custom path for the file (default '.')
- 1321 def arccosh(self):
+
@@ -5344,7 +5344,7 @@ specifies a custom path for the file (default '.')
- 1324 def arctanh(self):
+
@@ -5362,8 +5362,8 @@ specifies a custom path for the file (default '.')
1340 @property
-1341 def real(self):
-1342 def return_real(obs_OR_cobs):
+1341 def real(self):
+1342 def return_real(obs_OR_cobs):
1343 if isinstance(obs_OR_cobs.flatten()[0], CObs):
1344 return np.vectorize(lambda x: x.real)(obs_OR_cobs)
1345 else:
@@ -5386,8 +5386,8 @@ specifies a custom path for the file (default '.')
1350 @property
-1351 def imag(self):
-1352 def return_imag(obs_OR_cobs):
+1351 def imag(self):
+1352 def return_imag(obs_OR_cobs):
1353 if isinstance(obs_OR_cobs.flatten()[0], CObs):
1354 return np.vectorize(lambda x: x.imag)(obs_OR_cobs)
1355 else:
@@ -5411,7 +5411,7 @@ specifies a custom path for the file (default '.')
- 1360 def prune(self, Ntrunc, tproj=3, t0proj=2, basematrix=None):
+ 1360 def prune(self, Ntrunc, tproj=3, t0proj=2, basematrix=None):
1361 r''' Project large correlation matrix to lowest states
1362
1363 This method can be used to reduce the size of an (N x N) correlation matrix
diff --git a/docs/pyerrors/covobs.html b/docs/pyerrors/covobs.html
index 9b20b705..80a4ef5f 100644
--- a/docs/pyerrors/covobs.html
+++ b/docs/pyerrors/covobs.html
@@ -97,12 +97,12 @@
- 1import numpy as np
+ 1import numpy as np
2
3
- 4class Covobs:
+ 4class Covobs:
5
- 6 def __init__(self, mean, cov, name, pos=None, grad=None):
+ 6 def __init__(self, mean, cov, name, pos=None, grad=None):
7 """ Initialize Covobs object.
8
9 Parameters
@@ -138,12 +138,12 @@
39 self._set_grad(grad)
40 self.value = mean
41
- 42 def errsq(self):
+ 42 def errsq(self):
43 """ Return the variance (= square of the error) of the Covobs
44 """
45 return np.dot(np.transpose(self.grad), np.dot(self.cov, self.grad)).item()
46
- 47 def _set_cov(self, cov):
+ 47 def _set_cov(self, cov):
48 """ Set the covariance matrix of the covobs
49
50 Parameters
@@ -178,7 +178,7 @@
79 if ev < 0:
80 raise Exception('Covariance matrix is not positive-semidefinite!')
81
- 82 def _set_grad(self, grad):
+ 82 def _set_grad(self, grad):
83 """ Set the gradient of the covobs
84
85 Parameters
@@ -195,11 +195,11 @@
96 raise Exception('Invalid dimension of grad!')
97
98 @property
- 99 def cov(self):
+ 99 def cov(self):
100 return self._cov
101
102 @property
-103 def grad(self):
+103 def grad(self):
104 return self._grad
@@ -216,9 +216,9 @@
- 5class Covobs:
+ 5class Covobs:
6
- 7 def __init__(self, mean, cov, name, pos=None, grad=None):
+ 7 def __init__(self, mean, cov, name, pos=None, grad=None):
8 """ Initialize Covobs object.
9
10 Parameters
@@ -254,12 +254,12 @@
40 self._set_grad(grad)
41 self.value = mean
42
- 43 def errsq(self):
+ 43 def errsq(self):
44 """ Return the variance (= square of the error) of the Covobs
45 """
46 return np.dot(np.transpose(self.grad), np.dot(self.cov, self.grad)).item()
47
- 48 def _set_cov(self, cov):
+ 48 def _set_cov(self, cov):
49 """ Set the covariance matrix of the covobs
50
51 Parameters
@@ -294,7 +294,7 @@
80 if ev < 0:
81 raise Exception('Covariance matrix is not positive-semidefinite!')
82
- 83 def _set_grad(self, grad):
+ 83 def _set_grad(self, grad):
84 """ Set the gradient of the covobs
85
86 Parameters
@@ -311,11 +311,11 @@
97 raise Exception('Invalid dimension of grad!')
98
99 @property
-100 def cov(self):
+100 def cov(self):
101 return self._cov
102
103 @property
-104 def grad(self):
+104 def grad(self):
105 return self._grad
@@ -332,7 +332,7 @@
- 7 def __init__(self, mean, cov, name, pos=None, grad=None):
+ 7 def __init__(self, mean, cov, name, pos=None, grad=None):
8 """ Initialize Covobs object.
9
10 Parameters
@@ -424,7 +424,7 @@ Gradient of the Covobs wrt. the means belonging to cov.
- 43 def errsq(self):
+ 43 def errsq(self):
44 """ Return the variance (= square of the error) of the Covobs
45 """
46 return np.dot(np.transpose(self.grad), np.dot(self.cov, self.grad)).item()
@@ -446,7 +446,7 @@ Gradient of the Covobs wrt. the means belonging to cov.
@@ -464,7 +464,7 @@ Gradient of the Covobs wrt. the means belonging to cov.
diff --git a/docs/pyerrors/dirac.html b/docs/pyerrors/dirac.html
index 49b94a06..450c573a 100644
--- a/docs/pyerrors/dirac.html
+++ b/docs/pyerrors/dirac.html
@@ -103,7 +103,7 @@
- 1import numpy as np
+ 1import numpy as np
2
3
4gammaX = np.array(
@@ -127,7 +127,7 @@
22 dtype=complex)
23
24
-25def epsilon_tensor(i, j, k):
+25def epsilon_tensor(i, j, k):
26 """Rank-3 epsilon tensor
27
28 Based on https://codegolf.stackexchange.com/a/160375
@@ -144,7 +144,7 @@
39 return (i - j) * (j - k) * (k - i) / 2
40
41
-42def epsilon_tensor_rank4(i, j, k, o):
+42def epsilon_tensor_rank4(i, j, k, o):
43 """Rank-4 epsilon tensor
44
45 Extension of https://codegolf.stackexchange.com/a/160375
@@ -162,7 +162,7 @@
57 return (i - j) * (j - k) * (k - i) * (i - o) * (j - o) * (o - k) / 12
58
59
-60def Grid_gamma(gamma_tag):
+60def Grid_gamma(gamma_tag):
61 """Returns gamma matrix in Grid labeling."""
62 if gamma_tag == 'Identity':
63 g = identity
@@ -341,7 +341,7 @@
- 26def epsilon_tensor(i, j, k):
+ 26def epsilon_tensor(i, j, k):
27 """Rank-3 epsilon tensor
28
29 Based on https://codegolf.stackexchange.com/a/160375
@@ -384,7 +384,7 @@ Element (i,j,k) of the epsilon tensor of rank 3
- 43def epsilon_tensor_rank4(i, j, k, o):
+ 43def epsilon_tensor_rank4(i, j, k, o):
44 """Rank-4 epsilon tensor
45
46 Extension of https://codegolf.stackexchange.com/a/160375
@@ -428,7 +428,7 @@ Element (i,j,k,o) of the epsilon tensor of rank 4
- 61def Grid_gamma(gamma_tag):
+ 61def Grid_gamma(gamma_tag):
62 """Returns gamma matrix in Grid labeling."""
63 if gamma_tag == 'Identity':
64 g = identity
diff --git a/docs/pyerrors/fits.html b/docs/pyerrors/fits.html
index 21620dbb..63d7aae4 100644
--- a/docs/pyerrors/fits.html
+++ b/docs/pyerrors/fits.html
@@ -109,26 +109,26 @@
- 1import gc
- 2from collections.abc import Sequence
- 3import warnings
- 4import numpy as np
- 5import autograd.numpy as anp
- 6import scipy.optimize
- 7import scipy.stats
- 8import matplotlib.pyplot as plt
- 9from matplotlib import gridspec
- 10from scipy.odr import ODR, Model, RealData
- 11import iminuit
- 12from autograd import jacobian as auto_jacobian
- 13from autograd import hessian as auto_hessian
- 14from autograd import elementwise_grad as egrad
- 15from numdifftools import Jacobian as num_jacobian
- 16from numdifftools import Hessian as num_hessian
- 17from .obs import Obs, derived_observable, covariance, cov_Obs, invert_corr_cov_cholesky
+ 1import gc
+ 2from collections.abc import Sequence
+ 3import warnings
+ 4import numpy as np
+ 5import autograd.numpy as anp
+ 6import scipy.optimize
+ 7import scipy.stats
+ 8import matplotlib.pyplot as plt
+ 9from matplotlib import gridspec
+ 10from scipy.odr import ODR, Model, RealData
+ 11import iminuit
+ 12from autograd import jacobian as auto_jacobian
+ 13from autograd import hessian as auto_hessian
+ 14from autograd import elementwise_grad as egrad
+ 15from numdifftools import Jacobian as num_jacobian
+ 16from numdifftools import Hessian as num_hessian
+ 17from .obs import Obs, derived_observable, covariance, cov_Obs, invert_corr_cov_cholesky
18
19
- 20class Fit_result(Sequence):
+ 20class Fit_result(Sequence):
21 """Represents fit results.
22
23 Attributes
@@ -144,22 +144,22 @@
33 Hotelling t-squared p-value for correlated fits.
34 """
35
- 36 def __init__(self):
+ 36 def __init__(self):
37 self.fit_parameters = None
38
- 39 def __getitem__(self, idx):
+ 39 def __getitem__(self, idx):
40 return self.fit_parameters[idx]
41
- 42 def __len__(self):
+ 42 def __len__(self):
43 return len(self.fit_parameters)
44
- 45 def gamma_method(self, **kwargs):
+ 45 def gamma_method(self, **kwargs):
46 """Apply the gamma method to all fit parameters"""
47 [o.gamma_method(**kwargs) for o in self.fit_parameters]
48
49 gm = gamma_method
50
- 51 def __str__(self):
+ 51 def __str__(self):
52 my_str = 'Goodness of fit:\n'
53 if hasattr(self, 'chisquare_by_dof'):
54 my_str += '\u03C7\u00b2/d.o.f. = ' + f'{self.chisquare_by_dof:2.6f}' + '\n'
@@ -176,12 +176,12 @@
65 my_str += str(i_par) + '\t' + ' ' * int(par >= 0) + str(par).rjust(int(par < 0.0)) + '\n'
66 return my_str
67
- 68 def __repr__(self):
+ 68 def __repr__(self):
69 m = max(map(len, list(self.__dict__.keys()))) + 1
70 return '\n'.join([key.rjust(m) + ': ' + repr(value) for key, value in sorted(self.__dict__.items())])
71
72
- 73def least_squares(x, y, func, priors=None, silent=False, **kwargs):
+ 73def least_squares(x, y, func, priors=None, silent=False, **kwargs):
74 r'''Performs a non-linear fit to y = func(x).
75 ```
76
@@ -455,15 +455,15 @@
344 x0 = [0.1] * n_parms
345
346 if priors is None:
-347 def general_chisqfunc_uncorr(p, ivars, pr):
+347 def general_chisqfunc_uncorr(p, ivars, pr):
348 model = anp.concatenate([anp.array(funcd[key](p, xd[key])).reshape(-1) for key in key_ls])
349 return (ivars - model) / dy_f
350 else:
-351 def general_chisqfunc_uncorr(p, ivars, pr):
+351 def general_chisqfunc_uncorr(p, ivars, pr):
352 model = anp.concatenate([anp.array(funcd[key](p, xd[key])).reshape(-1) for key in key_ls])
353 return anp.concatenate(((ivars - model) / dy_f, (p[prior_mask] - pr) / dp_f))
354
-355 def chisqfunc_uncorr(p):
+355 def chisqfunc_uncorr(p):
356 return anp.sum(general_chisqfunc_uncorr(p, y_f, p_f) ** 2)
357
358 if kwargs.get('correlated_fit') is True:
@@ -481,11 +481,11 @@
370 inverrdiag = np.diag(1 / np.asarray(dy_f))
371 chol_inv = invert_corr_cov_cholesky(corr, inverrdiag)
372
-373 def general_chisqfunc(p, ivars, pr):
+373 def general_chisqfunc(p, ivars, pr):
374 model = anp.concatenate([anp.array(funcd[key](p, xd[key])).reshape(-1) for key in key_ls])
375 return anp.concatenate((anp.dot(chol_inv, (ivars - model)), (p[prior_mask] - pr) / dp_f))
376
-377 def chisqfunc(p):
+377 def chisqfunc(p):
378 return anp.sum(general_chisqfunc(p, y_f, p_f) ** 2)
379 else:
380 general_chisqfunc = general_chisqfunc_uncorr
@@ -519,12 +519,12 @@
408 if 'tol' in kwargs:
409 print('tol cannot be set for Levenberg-Marquardt')
410
-411 def chisqfunc_residuals_uncorr(p):
+411 def chisqfunc_residuals_uncorr(p):
412 return general_chisqfunc_uncorr(p, y_f, p_f)
413
414 fit_result = scipy.optimize.least_squares(chisqfunc_residuals_uncorr, x0, method='lm', ftol=1e-15, gtol=1e-15, xtol=1e-15)
415 if kwargs.get('correlated_fit') is True:
-416 def chisqfunc_residuals(p):
+416 def chisqfunc_residuals(p):
417 return general_chisqfunc(p, y_f, p_f)
418
419 fit_result = scipy.optimize.least_squares(chisqfunc_residuals, fit_result.x, method='lm', ftol=1e-15, gtol=1e-15, xtol=1e-15)
@@ -551,7 +551,7 @@
440 print('chisquare/d.o.f.:', output.chisquare_by_dof)
441 print('fit parameters', fit_result.x)
442
-443 def prepare_hat_matrix():
+443 def prepare_hat_matrix():
444 hat_vector = []
445 for key in key_ls:
446 if (len(xd[key]) != 0):
@@ -576,11 +576,11 @@
465 try:
466 hess = hessian(chisqfunc)(fitp)
467 except TypeError:
-468 raise Exception("It is required to use autograd.numpy instead of numpy within fit functions, see the documentation for details.") from None
+468 raise Exception("It is required to use autograd.numpy instead of numpy within fit functions, see the documentation for details.") from None
469
470 len_y = len(y_f)
471
-472 def chisqfunc_compact(d):
+472 def chisqfunc_compact(d):
473 return anp.sum(general_chisqfunc(d[:n_parms], d[n_parms: n_parms + len_y], d[n_parms + len_y:]) ** 2)
474
475 jac_jac_y = hessian(chisqfunc_compact)(np.concatenate((fitp, y_f, p_f)))
@@ -614,7 +614,7 @@
503 return output
504
505
-506def total_least_squares(x, y, func, silent=False, **kwargs):
+506def total_least_squares(x, y, func, silent=False, **kwargs):
507 r'''Performs a non-linear fit to y = func(x) and returns a list of Obs corresponding to the fit parameters.
508
509 Parameters
@@ -742,7 +742,7 @@
631
632 m = x_f.size
633
-634 def odr_chisquare(p):
+634 def odr_chisquare(p):
635 model = func(p[:n_parms], p[n_parms:].reshape(x_shape))
636 chisq = anp.sum(((y_f - model) / dy_f) ** 2) + anp.sum(((x_f - p[n_parms:].reshape(x_shape)) / dx_f) ** 2)
637 return chisq
@@ -777,9 +777,9 @@
666 try:
667 hess = hessian(odr_chisquare)(np.concatenate((fitp, out.xplus.ravel())))
668 except TypeError:
-669 raise Exception("It is required to use autograd.numpy instead of numpy within fit functions, see the documentation for details.") from None
+669 raise Exception("It is required to use autograd.numpy instead of numpy within fit functions, see the documentation for details.") from None
670
-671 def odr_chisquare_compact_x(d):
+671 def odr_chisquare_compact_x(d):
672 model = func(d[:n_parms], d[n_parms:n_parms + m].reshape(x_shape))
673 chisq = anp.sum(((y_f - model) / dy_f) ** 2) + anp.sum(((d[n_parms + m:].reshape(x_shape) - d[n_parms:n_parms + m].reshape(x_shape)) / dx_f) ** 2)
674 return chisq
@@ -792,7 +792,7 @@
681 except np.linalg.LinAlgError:
682 raise Exception("Cannot invert hessian matrix.")
683
-684 def odr_chisquare_compact_y(d):
+684 def odr_chisquare_compact_y(d):
685 model = func(d[:n_parms], d[n_parms:n_parms + m].reshape(x_shape))
686 chisq = anp.sum(((d[n_parms + m:] - model) / dy_f) ** 2) + anp.sum(((x_f - d[n_parms:n_parms + m].reshape(x_shape)) / dx_f) ** 2)
687 return chisq
@@ -818,7 +818,7 @@
707 return output
708
709
-710def fit_lin(x, y, **kwargs):
+710def fit_lin(x, y, **kwargs):
711 """Performs a linear fit to y = n + m * x and returns two Obs n, m.
712
713 Parameters
@@ -835,7 +835,7 @@
724 LIist of fitted observables.
725 """
726
-727 def f(a, x):
+727 def f(a, x):
728 y = a[0] + a[1] * x
729 return y
730
@@ -849,7 +849,7 @@
738 raise TypeError('Unsupported types for x')
739
740
-741def qqplot(x, o_y, func, p, title=""):
+741def qqplot(x, o_y, func, p, title=""):
742 """Generates a quantile-quantile plot of the fit result which can be used to
743 check if the residuals of the fit are gaussian distributed.
744
@@ -879,7 +879,7 @@
768 plt.draw()
769
770
-771def residual_plot(x, y, func, fit_res, title=""):
+771def residual_plot(x, y, func, fit_res, title=""):
772 """Generates a plot which compares the fit to the data and displays the corresponding residuals
773
774 For uncorrelated data the residuals are expected to be distributed ~N(0,1).
@@ -916,7 +916,7 @@
805 plt.draw()
806
807
-808def error_band(x, func, beta):
+808def error_band(x, func, beta):
809 """Calculate the error band for an array of sample values x, for given fit function func with optimized parameters beta.
810
811 Returns
@@ -940,7 +940,7 @@
829 return err
830
831
-832def ks_test(objects=None):
+832def ks_test(objects=None):
833 """Performs a Kolmogorov–Smirnov test for the p-values of all fit object.
834
835 Parameters
@@ -984,7 +984,7 @@
873 print(scipy.stats.kstest(p_values, 'uniform'))
874
875
-876def _extract_val_and_dval(string):
+876def _extract_val_and_dval(string):
877 split_string = string.split('(')
878 if '.' in split_string[0] and '.' not in split_string[1][:-1]:
879 factor = 10 ** -len(split_string[0].partition('.')[2])
@@ -993,7 +993,7 @@
882 return float(split_string[0]), float(split_string[1][:-1]) * factor
883
884
-885def _construct_prior_obs(i_prior, i_n):
+885def _construct_prior_obs(i_prior, i_n):
886 if isinstance(i_prior, Obs):
887 return i_prior
888 elif isinstance(i_prior, str):
@@ -1016,7 +1016,7 @@
- 21class Fit_result(Sequence):
+ 21class Fit_result(Sequence):
22 """Represents fit results.
23
24 Attributes
@@ -1032,22 +1032,22 @@
34 Hotelling t-squared p-value for correlated fits.
35 """
36
-37 def __init__(self):
+37 def __init__(self):
38 self.fit_parameters = None
39
-40 def __getitem__(self, idx):
+40 def __getitem__(self, idx):
41 return self.fit_parameters[idx]
42
-43 def __len__(self):
+43 def __len__(self):
44 return len(self.fit_parameters)
45
-46 def gamma_method(self, **kwargs):
+46 def gamma_method(self, **kwargs):
47 """Apply the gamma method to all fit parameters"""
48 [o.gamma_method(**kwargs) for o in self.fit_parameters]
49
50 gm = gamma_method
51
-52 def __str__(self):
+52 def __str__(self):
53 my_str = 'Goodness of fit:\n'
54 if hasattr(self, 'chisquare_by_dof'):
55 my_str += '\u03C7\u00b2/d.o.f. = ' + f'{self.chisquare_by_dof:2.6f}' + '\n'
@@ -1064,7 +1064,7 @@
66 my_str += str(i_par) + '\t' + ' ' * int(par >= 0) + str(par).rjust(int(par < 0.0)) + '\n'
67 return my_str
68
-69 def __repr__(self):
+69 def __repr__(self):
70 m = max(map(len, list(self.__dict__.keys()))) + 1
71 return '\n'.join([key.rjust(m) + ': ' + repr(value) for key, value in sorted(self.__dict__.items())])
@@ -1110,7 +1110,7 @@ Hotelling t-squared p-value for correlated fits.
- 46 def gamma_method(self, **kwargs):
+
@@ -1132,7 +1132,7 @@ Hotelling t-squared p-value for correlated fits.
- 46 def gamma_method(self, **kwargs):
+
@@ -1155,7 +1155,7 @@ Hotelling t-squared p-value for correlated fits.
- 74def least_squares(x, y, func, priors=None, silent=False, **kwargs):
+ 74def least_squares(x, y, func, priors=None, silent=False, **kwargs):
75 r'''Performs a non-linear fit to y = func(x).
76 ```
77
@@ -1429,15 +1429,15 @@ Hotelling t-squared p-value for correlated fits.
345 x0 = [0.1] * n_parms
346
347 if priors is None:
-348 def general_chisqfunc_uncorr(p, ivars, pr):
+348 def general_chisqfunc_uncorr(p, ivars, pr):
349 model = anp.concatenate([anp.array(funcd[key](p, xd[key])).reshape(-1) for key in key_ls])
350 return (ivars - model) / dy_f
351 else:
-352 def general_chisqfunc_uncorr(p, ivars, pr):
+352 def general_chisqfunc_uncorr(p, ivars, pr):
353 model = anp.concatenate([anp.array(funcd[key](p, xd[key])).reshape(-1) for key in key_ls])
354 return anp.concatenate(((ivars - model) / dy_f, (p[prior_mask] - pr) / dp_f))
355
-356 def chisqfunc_uncorr(p):
+356 def chisqfunc_uncorr(p):
357 return anp.sum(general_chisqfunc_uncorr(p, y_f, p_f) ** 2)
358
359 if kwargs.get('correlated_fit') is True:
@@ -1455,11 +1455,11 @@ Hotelling t-squared p-value for correlated fits.
371 inverrdiag = np.diag(1 / np.asarray(dy_f))
372 chol_inv = invert_corr_cov_cholesky(corr, inverrdiag)
373
-374 def general_chisqfunc(p, ivars, pr):
+374 def general_chisqfunc(p, ivars, pr):
375 model = anp.concatenate([anp.array(funcd[key](p, xd[key])).reshape(-1) for key in key_ls])
376 return anp.concatenate((anp.dot(chol_inv, (ivars - model)), (p[prior_mask] - pr) / dp_f))
377
-378 def chisqfunc(p):
+378 def chisqfunc(p):
379 return anp.sum(general_chisqfunc(p, y_f, p_f) ** 2)
380 else:
381 general_chisqfunc = general_chisqfunc_uncorr
@@ -1493,12 +1493,12 @@ Hotelling t-squared p-value for correlated fits.
409 if 'tol' in kwargs:
410 print('tol cannot be set for Levenberg-Marquardt')
411
-412 def chisqfunc_residuals_uncorr(p):
+412 def chisqfunc_residuals_uncorr(p):
413 return general_chisqfunc_uncorr(p, y_f, p_f)
414
415 fit_result = scipy.optimize.least_squares(chisqfunc_residuals_uncorr, x0, method='lm', ftol=1e-15, gtol=1e-15, xtol=1e-15)
416 if kwargs.get('correlated_fit') is True:
-417 def chisqfunc_residuals(p):
+417 def chisqfunc_residuals(p):
418 return general_chisqfunc(p, y_f, p_f)
419
420 fit_result = scipy.optimize.least_squares(chisqfunc_residuals, fit_result.x, method='lm', ftol=1e-15, gtol=1e-15, xtol=1e-15)
@@ -1525,7 +1525,7 @@ Hotelling t-squared p-value for correlated fits.
441 print('chisquare/d.o.f.:', output.chisquare_by_dof)
442 print('fit parameters', fit_result.x)
443
-444 def prepare_hat_matrix():
+444 def prepare_hat_matrix():
445 hat_vector = []
446 for key in key_ls:
447 if (len(xd[key]) != 0):
@@ -1550,11 +1550,11 @@ Hotelling t-squared p-value for correlated fits.
466 try:
467 hess = hessian(chisqfunc)(fitp)
468 except TypeError:
-469 raise Exception("It is required to use autograd.numpy instead of numpy within fit functions, see the documentation for details.") from None
+469 raise Exception("It is required to use autograd.numpy instead of numpy within fit functions, see the documentation for details.") from None
470
471 len_y = len(y_f)
472
-473 def chisqfunc_compact(d):
+473 def chisqfunc_compact(d):
474 return anp.sum(general_chisqfunc(d[:n_parms], d[n_parms: n_parms + len_y], d[n_parms + len_y:]) ** 2)
475
476 jac_jac_y = hessian(chisqfunc_compact)(np.concatenate((fitp, y_f, p_f)))
@@ -1604,9 +1604,9 @@ list of Obs.
fit function, has to be of the form
-import autograd.numpy as anp
+import autograd.numpy as anp
-def func(a, x):
+def func(a, x):
return a[0] + a[1] * x + a[2] * anp.sinh(x)
@@ -1614,7 +1614,7 @@ fit function, has to be of the form
For multiple x values func can be of the form
-def func(a, x):
+def func(a, x):
(x1, x2) = x
return a[0] * x1 ** 2 + a[1] * x2
@@ -1698,10 +1698,10 @@ Parameters and information on the fitted result.
>>> # Example of a correlated (correlated_fit = True, inv_chol_cov_matrix handed over) combined fit, based on a randomly generated data set
->>> import numpy as np
->>> from scipy.stats import norm
->>> from scipy.linalg import cholesky
->>> import pyerrors as pe
+>>> import numpy as np
+>>> from scipy.stats import norm
+>>> from scipy.linalg import cholesky
+>>> import pyerrors as pe
>>> # generating the random data set
>>> num_samples = 400
>>> N = 3
@@ -1734,9 +1734,9 @@ Parameters and information on the fitted result.
>>> chol_inv = pe.obs.invert_corr_cov_cholesky(corr, inverrdiag) # gives form of the inverse covariance matrix needed for the combined correlated fit below
>>> y_dict = {'a': data[:3], 'b': data[3:]}
>>> # common fit parameter p[0] in combined fit
->>> def fit1(p, x):
+>>> def fit1(p, x):
>>> return p[0] + p[1] * x
->>> def fit2(p, x):
+>>> def fit2(p, x):
>>> return p[0] + p[2] * x
>>> fitf_dict = {'a': fit1, 'b':fit2}
>>> fitp_inv_cov_combined_fit = pe.least_squares(x_dict,y_dict, fitf_dict, correlated_fit = True, inv_chol_cov_matrix = [chol_inv,['a','b']])
@@ -1762,7 +1762,7 @@ Parameters and information on the fitted result.
- 507def total_least_squares(x, y, func, silent=False, **kwargs):
+ 507def total_least_squares(x, y, func, silent=False, **kwargs):
508 r'''Performs a non-linear fit to y = func(x) and returns a list of Obs corresponding to the fit parameters.
509
510 Parameters
@@ -1890,7 +1890,7 @@ Parameters and information on the fitted result.
632
633 m = x_f.size
634
-635 def odr_chisquare(p):
+635 def odr_chisquare(p):
636 model = func(p[:n_parms], p[n_parms:].reshape(x_shape))
637 chisq = anp.sum(((y_f - model) / dy_f) ** 2) + anp.sum(((x_f - p[n_parms:].reshape(x_shape)) / dx_f) ** 2)
638 return chisq
@@ -1925,9 +1925,9 @@ Parameters and information on the fitted result.
667 try:
668 hess = hessian(odr_chisquare)(np.concatenate((fitp, out.xplus.ravel())))
669 except TypeError:
-670 raise Exception("It is required to use autograd.numpy instead of numpy within fit functions, see the documentation for details.") from None
+670 raise Exception("It is required to use autograd.numpy instead of numpy within fit functions, see the documentation for details.") from None
671
-672 def odr_chisquare_compact_x(d):
+672 def odr_chisquare_compact_x(d):
673 model = func(d[:n_parms], d[n_parms:n_parms + m].reshape(x_shape))
674 chisq = anp.sum(((y_f - model) / dy_f) ** 2) + anp.sum(((d[n_parms + m:].reshape(x_shape) - d[n_parms:n_parms + m].reshape(x_shape)) / dx_f) ** 2)
675 return chisq
@@ -1940,7 +1940,7 @@ Parameters and information on the fitted result.
682 except np.linalg.LinAlgError:
683 raise Exception("Cannot invert hessian matrix.")
684
-685 def odr_chisquare_compact_y(d):
+685 def odr_chisquare_compact_y(d):
686 model = func(d[:n_parms], d[n_parms:n_parms + m].reshape(x_shape))
687 chisq = anp.sum(((d[n_parms + m:] - model) / dy_f) ** 2) + anp.sum(((x_f - d[n_parms:n_parms + m].reshape(x_shape)) / dx_f) ** 2)
688 return chisq
@@ -1980,9 +1980,9 @@ list of Obs. The dvalues of the Obs are used as x- and yerror for the fit.
func has to be of the form
-import autograd.numpy as anp
+import autograd.numpy as anp
-def func(a, x):
+def func(a, x):
return a[0] + a[1] * x + a[2] * anp.sinh(x)
@@ -1990,7 +1990,7 @@ func has to be of the form
For multiple x values func can be of the form
-def func(a, x):
+def func(a, x):
(x1, x2) = x
return a[0] * x1 ** 2 + a[1] * x2
@@ -2037,7 +2037,7 @@ Parameters and information on the fitted result.
- 711def fit_lin(x, y, **kwargs):
+ 711def fit_lin(x, y, **kwargs):
712 """Performs a linear fit to y = n + m * x and returns two Obs n, m.
713
714 Parameters
@@ -2054,7 +2054,7 @@ Parameters and information on the fitted result.
725 LIist of fitted observables.
726 """
727
-728 def f(a, x):
+728 def f(a, x):
729 y = a[0] + a[1] * x
730 return y
731
@@ -2102,7 +2102,7 @@ LIist of fitted observables.
- 742def qqplot(x, o_y, func, p, title=""):
+ 742def qqplot(x, o_y, func, p, title=""):
743 """Generates a quantile-quantile plot of the fit result which can be used to
744 check if the residuals of the fit are gaussian distributed.
745
@@ -2156,7 +2156,7 @@ LIist of fitted observables.
- 772def residual_plot(x, y, func, fit_res, title=""):
+ 772def residual_plot(x, y, func, fit_res, title=""):
773 """Generates a plot which compares the fit to the data and displays the corresponding residuals
774
775 For uncorrelated data the residuals are expected to be distributed ~N(0,1).
@@ -2218,7 +2218,7 @@ LIist of fitted observables.
- 809def error_band(x, func, beta):
+ 809def error_band(x, func, beta):
810 """Calculate the error band for an array of sample values x, for given fit function func with optimized parameters beta.
811
812 Returns
@@ -2266,7 +2266,7 @@ Error band for an array of sample values x
- 833def ks_test(objects=None):
+ 833def ks_test(objects=None):
834 """Performs a Kolmogorov–Smirnov test for the p-values of all fit object.
835
836 Parameters
diff --git a/docs/pyerrors/input.html b/docs/pyerrors/input.html
index 966d6dfb..3fd783ff 100644
--- a/docs/pyerrors/input.html
+++ b/docs/pyerrors/input.html
@@ -100,14 +100,14 @@ See pyerrors.obs.Obs.export_jackkn
5For comparison with other analysis workflows `pyerrors` can also generate jackknife samples from an `Obs` object or import jackknife samples into an `Obs` object.
6See `pyerrors.obs.Obs.export_jackknife` and `pyerrors.obs.import_jackknife` for details.
7'''
- 8from . import bdio as bdio
- 9from . import dobs as dobs
-10from . import hadrons as hadrons
-11from . import json as json
-12from . import misc as misc
-13from . import openQCD as openQCD
-14from . import pandas as pandas
-15from . import sfcf as sfcf
+ 8from . import bdio as bdio
+ 9from . import dobs as dobs
+10from . import hadrons as hadrons
+11from . import json as json
+12from . import misc as misc
+13from . import openQCD as openQCD
+14from . import pandas as pandas
+15from . import sfcf as sfcf
diff --git a/docs/pyerrors/input/bdio.html b/docs/pyerrors/input/bdio.html
index 2a08f3b0..afaebcd8 100644
--- a/docs/pyerrors/input/bdio.html
+++ b/docs/pyerrors/input/bdio.html
@@ -85,13 +85,13 @@
- 1import ctypes
- 2import hashlib
- 3import autograd.numpy as np # Thinly-wrapped numpy
- 4from ..obs import Obs
+ 1import ctypes
+ 2import hashlib
+ 3import autograd.numpy as np # Thinly-wrapped numpy
+ 4from ..obs import Obs
5
6
- 7def read_ADerrors(file_path, bdio_path='./libbdio.so', **kwargs):
+ 7def read_ADerrors(file_path, bdio_path='./libbdio.so', **kwargs):
8 """ Extract generic MCMC data from a bdio file
9
10 read_ADerrors requires bdio to be compiled into a shared library. This can be achieved by
@@ -166,7 +166,7 @@
79 break
80 bdio_get_rlen(fbdio)
81
- 82 def read_c_double():
+ 82 def read_c_double():
83 d_buf = ctypes.c_double
84 pd_buf = d_buf()
85 ppd_buf = ctypes.c_void_p(ctypes.addressof(pd_buf))
@@ -176,7 +176,7 @@
89 mean = read_c_double()
90 print('mean', mean)
91
- 92 def read_c_size_t():
+ 92 def read_c_size_t():
93 d_buf = ctypes.c_size_t
94 pd_buf = d_buf()
95 ppd_buf = ctypes.c_void_p(ctypes.addressof(pd_buf))
@@ -247,7 +247,7 @@
160 return return_list
161
162
-163def write_ADerrors(obs_list, file_path, bdio_path='./libbdio.so', **kwargs):
+163def write_ADerrors(obs_list, file_path, bdio_path='./libbdio.so', **kwargs):
164 """ Write Obs to a bdio file according to ADerrors conventions
165
166 read_mesons requires bdio to be compiled into a shared library. This can be achieved by
@@ -341,12 +341,12 @@
254
255 bdio_start_record(0x00, 8, fbdio)
256
-257 def write_c_double(double):
+257 def write_c_double(double):
258 pd_buf = ctypes.c_double(double)
259 ppd_buf = ctypes.c_void_p(ctypes.addressof(pd_buf))
260 bdio_write_f64(ppd_buf, ctypes.c_size_t(8), ctypes.c_void_p(fbdio))
261
-262 def write_c_size_t(int32):
+262 def write_c_size_t(int32):
263 pd_buf = ctypes.c_size_t(int32)
264 ppd_buf = ctypes.c_void_p(ctypes.addressof(pd_buf))
265 bdio_write_int32(ppd_buf, ctypes.c_size_t(4), ctypes.c_void_p(fbdio))
@@ -377,15 +377,15 @@
290 return 0
291
292
-293def _get_kwd(string, key):
+293def _get_kwd(string, key):
294 return (string.split(key, 1)[1]).split(" ", 1)[0]
295
296
-297def _get_corr_name(string, key):
+297def _get_corr_name(string, key):
298 return (string.split(key, 1)[1]).split(' NDIM=', 1)[0]
299
300
-301def read_mesons(file_path, bdio_path='./libbdio.so', **kwargs):
+301def read_mesons(file_path, bdio_path='./libbdio.so', **kwargs):
302 """ Extract mesons data from a bdio file and return it as a dictionary
303
304 The dictionary can be accessed with a tuple consisting of (type, source_position, kappa1, kappa2)
@@ -600,7 +600,7 @@
513 return result
514
515
-516def read_dSdm(file_path, bdio_path='./libbdio.so', **kwargs):
+516def read_dSdm(file_path, bdio_path='./libbdio.so', **kwargs):
517 """ Extract dSdm data from a bdio file and return it as a dictionary
518
519 The dictionary can be accessed with a tuple consisting of (type, kappa)
@@ -794,7 +794,7 @@
- 8def read_ADerrors(file_path, bdio_path='./libbdio.so', **kwargs):
+ 8def read_ADerrors(file_path, bdio_path='./libbdio.so', **kwargs):
9 """ Extract generic MCMC data from a bdio file
10
11 read_ADerrors requires bdio to be compiled into a shared library. This can be achieved by
@@ -869,7 +869,7 @@
80 break
81 bdio_get_rlen(fbdio)
82
- 83 def read_c_double():
+ 83 def read_c_double():
84 d_buf = ctypes.c_double
85 pd_buf = d_buf()
86 ppd_buf = ctypes.c_void_p(ctypes.addressof(pd_buf))
@@ -879,7 +879,7 @@
90 mean = read_c_double()
91 print('mean', mean)
92
- 93 def read_c_size_t():
+ 93 def read_c_size_t():
94 d_buf = ctypes.c_size_t
95 pd_buf = d_buf()
96 ppd_buf = ctypes.c_void_p(ctypes.addressof(pd_buf))
@@ -988,7 +988,7 @@ Extracted data
- 164def write_ADerrors(obs_list, file_path, bdio_path='./libbdio.so', **kwargs):
+ 164def write_ADerrors(obs_list, file_path, bdio_path='./libbdio.so', **kwargs):
165 """ Write Obs to a bdio file according to ADerrors conventions
166
167 read_mesons requires bdio to be compiled into a shared library. This can be achieved by
@@ -1082,12 +1082,12 @@ Extracted data
255
256 bdio_start_record(0x00, 8, fbdio)
257
-258 def write_c_double(double):
+258 def write_c_double(double):
259 pd_buf = ctypes.c_double(double)
260 ppd_buf = ctypes.c_void_p(ctypes.addressof(pd_buf))
261 bdio_write_f64(ppd_buf, ctypes.c_size_t(8), ctypes.c_void_p(fbdio))
262
-263 def write_c_size_t(int32):
+263 def write_c_size_t(int32):
264 pd_buf = ctypes.c_size_t(int32)
265 ppd_buf = ctypes.c_void_p(ctypes.addressof(pd_buf))
266 bdio_write_int32(ppd_buf, ctypes.c_size_t(4), ctypes.c_void_p(fbdio))
@@ -1156,7 +1156,7 @@ returns 0 is successful
- 302def read_mesons(file_path, bdio_path='./libbdio.so', **kwargs):
+ 302def read_mesons(file_path, bdio_path='./libbdio.so', **kwargs):
303 """ Extract mesons data from a bdio file and return it as a dictionary
304
305 The dictionary can be accessed with a tuple consisting of (type, source_position, kappa1, kappa2)
@@ -1421,7 +1421,7 @@ Extracted meson data
- 517def read_dSdm(file_path, bdio_path='./libbdio.so', **kwargs):
+ 517def read_dSdm(file_path, bdio_path='./libbdio.so', **kwargs):
518 """ Extract dSdm data from a bdio file and return it as a dictionary
519
520 The dictionary can be accessed with a tuple consisting of (type, kappa)
diff --git a/docs/pyerrors/input/dobs.html b/docs/pyerrors/input/dobs.html
index b2a872be..622f2963 100644
--- a/docs/pyerrors/input/dobs.html
+++ b/docs/pyerrors/input/dobs.html
@@ -94,23 +94,23 @@
- 1from collections import defaultdict
- 2import gzip
- 3import lxml.etree as et
- 4import getpass
- 5import socket
- 6import datetime
- 7import json
- 8import warnings
- 9import numpy as np
- 10from ..obs import Obs
- 11from ..obs import _merge_idx
- 12from ..covobs import Covobs
- 13from .. import version as pyerrorsversion
+ 1from collections import defaultdict
+ 2import gzip
+ 3import lxml.etree as et
+ 4import getpass
+ 5import socket
+ 6import datetime
+ 7import json
+ 8import warnings
+ 9import numpy as np
+ 10from ..obs import Obs
+ 11from ..obs import _merge_idx
+ 12from ..covobs import Covobs
+ 13from .. import version as pyerrorsversion
14
15
16# Based on https://stackoverflow.com/a/10076823
- 17def _etree_to_dict(t):
+ 17def _etree_to_dict(t):
18 """ Convert the content of an XML file to a python dict"""
19 d = {t.tag: {} if t.attrib else None}
20 children = list(t)
@@ -134,7 +134,7 @@
38 return d
39
40
- 41def _dict_to_xmlstring(d):
+ 41def _dict_to_xmlstring(d):
42 if isinstance(d, dict):
43 iters = ''
44 for k in d:
@@ -162,7 +162,7 @@
66 return iters
67
68
- 69def _dict_to_xmlstring_spaces(d, space=' '):
+ 69def _dict_to_xmlstring_spaces(d, space=' '):
70 s = _dict_to_xmlstring(d)
71 o = ''
72 c = 0
@@ -181,7 +181,7 @@
85 return o
86
87
- 88def create_pobs_string(obsl, name, spec='', origin='', symbol=[], enstag=None):
+ 88def create_pobs_string(obsl, name, spec='', origin='', symbol=[], enstag=None):
89 """Export a list of Obs or structures containing Obs to an xml string
90 according to the Zeuthen pobs format.
91
@@ -272,7 +272,7 @@
176 return rs
177
178
-179def write_pobs(obsl, fname, name, spec='', origin='', symbol=[], enstag=None, gz=True):
+179def write_pobs(obsl, fname, name, spec='', origin='', symbol=[], enstag=None, gz=True):
180 """Export a list of Obs or structures containing Obs to a .xml.gz file
181 according to the Zeuthen pobs format.
182
@@ -319,30 +319,30 @@
223 fp.close()
224
225
-226def _import_data(string):
+226def _import_data(string):
227 return json.loads("[" + ",".join(string.replace(' +', ' ').split()) + "]")
228
229
-230def _check(condition):
+230def _check(condition):
231 if not condition:
232 raise Exception("XML file format not supported")
233
234
-235class _NoTagInDataError(Exception):
+235class _NoTagInDataError(Exception):
236 """Raised when tag is not in data"""
-237 def __init__(self, tag):
+237 def __init__(self, tag):
238 self.tag = tag
239 super().__init__('Tag %s not in data!' % (self.tag))
240
241
-242def _find_tag(dat, tag):
+242def _find_tag(dat, tag):
243 for i in range(len(dat)):
244 if dat[i].tag == tag:
245 return i
246 raise _NoTagInDataError(tag)
247
248
-249def _import_array(arr):
+249def _import_array(arr):
250 name = arr[_find_tag(arr, 'id')].text.strip()
251 index = _find_tag(arr, 'layout')
252 try:
@@ -380,12 +380,12 @@
284 _check(False)
285
286
-287def _import_rdata(rd):
+287def _import_rdata(rd):
288 name, idx, mask, deltas = _import_array(rd)
289 return deltas, name, idx
290
291
-292def _import_cdata(cd):
+292def _import_cdata(cd):
293 _check(cd[0].tag == "id")
294 _check(cd[1][0].text.strip() == "cov")
295 cov = _import_array(cd[1])
@@ -393,7 +393,7 @@
297 return cd[0].text.strip(), cov, grad
298
299
-300def read_pobs(fname, full_output=False, gz=True, separator_insertion=None):
+300def read_pobs(fname, full_output=False, gz=True, separator_insertion=None):
301 """Import a list of Obs from an xml.gz file in the Zeuthen pobs format.
302
303 Tags are not written or recovered automatically.
@@ -493,7 +493,7 @@
397
398
399# this is based on Mattia Bruno's implementation at https://github.com/mbruno46/pyobs/blob/master/pyobs/IO/xml.py
-400def import_dobs_string(content, full_output=False, separator_insertion=True):
+400def import_dobs_string(content, full_output=False, separator_insertion=True):
401 """Import a list of Obs from a string in the Zeuthen dobs format.
402
403 Tags are not written or recovered automatically.
@@ -667,7 +667,7 @@
571 return res
572
573
-574def read_dobs(fname, full_output=False, gz=True, separator_insertion=True):
+574def read_dobs(fname, full_output=False, gz=True, separator_insertion=True):
575 """Import a list of Obs from an xml.gz file in the Zeuthen dobs format.
576
577 Tags are not written or recovered automatically.
@@ -714,7 +714,7 @@
618 return import_dobs_string(content, full_output, separator_insertion=separator_insertion)
619
620
-621def _dobsdict_to_xmlstring(d):
+621def _dobsdict_to_xmlstring(d):
622 if isinstance(d, dict):
623 iters = ''
624 for k in d:
@@ -754,7 +754,7 @@
658 return iters
659
660
-661def _dobsdict_to_xmlstring_spaces(d, space=' '):
+661def _dobsdict_to_xmlstring_spaces(d, space=' '):
662 s = _dobsdict_to_xmlstring(d)
663 o = ''
664 c = 0
@@ -773,7 +773,7 @@
677 return o
678
679
-680def create_dobs_string(obsl, name, spec='dobs v1.0', origin='', symbol=[], who=None, enstags=None):
+680def create_dobs_string(obsl, name, spec='dobs v1.0', origin='', symbol=[], who=None, enstags=None):
681 """Generate the string for the export of a list of Obs or structures containing Obs
682 to a .xml.gz file according to the Zeuthen dobs format.
683
@@ -962,7 +962,7 @@
866 return rs
867
868
-869def write_dobs(obsl, fname, name, spec='dobs v1.0', origin='', symbol=[], who=None, enstags=None, gz=True):
+869def write_dobs(obsl, fname, name, spec='dobs v1.0', origin='', symbol=[], who=None, enstags=None, gz=True):
870 """Export a list of Obs or structures containing Obs to a .xml.gz file
871 according to the Zeuthen dobs format.
872
@@ -1029,7 +1029,7 @@
- 89def create_pobs_string(obsl, name, spec='', origin='', symbol=[], enstag=None):
+ 89def create_pobs_string(obsl, name, spec='', origin='', symbol=[], enstag=None):
90 """Export a list of Obs or structures containing Obs to an xml string
91 according to the Zeuthen pobs format.
92
@@ -1165,7 +1165,7 @@ XML formatted string of the input data
- 180def write_pobs(obsl, fname, name, spec='', origin='', symbol=[], enstag=None, gz=True):
+ 180def write_pobs(obsl, fname, name, spec='', origin='', symbol=[], enstag=None, gz=True):
181 """Export a list of Obs or structures containing Obs to a .xml.gz file
182 according to the Zeuthen pobs format.
183
@@ -1260,7 +1260,7 @@ If True, the output is a gzipped xml. If False, the output is an xml file.
- 301def read_pobs(fname, full_output=False, gz=True, separator_insertion=None):
+ 301def read_pobs(fname, full_output=False, gz=True, separator_insertion=None):
302 """Import a list of Obs from an xml.gz file in the Zeuthen pobs format.
303
304 Tags are not written or recovered automatically.
@@ -1403,7 +1403,7 @@ Imported data and meta-data
- 401def import_dobs_string(content, full_output=False, separator_insertion=True):
+ 401def import_dobs_string(content, full_output=False, separator_insertion=True):
402 """Import a list of Obs from a string in the Zeuthen dobs format.
403
404 Tags are not written or recovered automatically.
@@ -1623,7 +1623,7 @@ Imported data and meta-data
- 575def read_dobs(fname, full_output=False, gz=True, separator_insertion=True):
+ 575def read_dobs(fname, full_output=False, gz=True, separator_insertion=True):
576 """Import a list of Obs from an xml.gz file in the Zeuthen dobs format.
577
578 Tags are not written or recovered automatically.
@@ -1718,7 +1718,7 @@ Imported data and meta-data
- 681def create_dobs_string(obsl, name, spec='dobs v1.0', origin='', symbol=[], who=None, enstags=None):
+ 681def create_dobs_string(obsl, name, spec='dobs v1.0', origin='', symbol=[], who=None, enstags=None):
682 """Generate the string for the export of a list of Obs or structures containing Obs
683 to a .xml.gz file according to the Zeuthen dobs format.
684
@@ -1956,7 +1956,7 @@ XML string generated from the data
- 870def write_dobs(obsl, fname, name, spec='dobs v1.0', origin='', symbol=[], who=None, enstags=None, gz=True):
+ 870def write_dobs(obsl, fname, name, spec='dobs v1.0', origin='', symbol=[], who=None, enstags=None, gz=True):
871 """Export a list of Obs or structures containing Obs to a .xml.gz file
872 according to the Zeuthen dobs format.
873
diff --git a/docs/pyerrors/input/hadrons.html b/docs/pyerrors/input/hadrons.html
index 7512c1f8..fcd3dec1 100644
--- a/docs/pyerrors/input/hadrons.html
+++ b/docs/pyerrors/input/hadrons.html
@@ -103,18 +103,18 @@
- 1import os
- 2from collections import Counter
- 3import h5py
- 4from pathlib import Path
- 5import numpy as np
- 6from ..obs import Obs, CObs
- 7from ..correlators import Corr
- 8from ..dirac import epsilon_tensor_rank4
- 9from .misc import fit_t0
+ 1import os
+ 2from collections import Counter
+ 3import h5py
+ 4from pathlib import Path
+ 5import numpy as np
+ 6from ..obs import Obs, CObs
+ 7from ..correlators import Corr
+ 8from ..dirac import epsilon_tensor_rank4
+ 9from .misc import fit_t0
10
11
- 12def _get_files(path, filestem, idl):
+ 12def _get_files(path, filestem, idl):
13 ls = os.listdir(path)
14
15 # Clean up file list
@@ -123,7 +123,7 @@
18 if not files:
19 raise Exception('No files starting with', filestem, 'in folder', path)
20
- 21 def get_cnfg_number(n):
+ 21 def get_cnfg_number(n):
22 return int(n.replace(".h5", "")[len(filestem) + 1:]) # From python 3.9 onward the safer 'removesuffix' method can be used.
23
24 # Sort according to configuration number
@@ -159,7 +159,7 @@
54 return filtered_files, idx
55
56
- 57def read_hd5(filestem, ens_id, group, attrs=None, idl=None, part="real"):
+ 57def read_hd5(filestem, ens_id, group, attrs=None, idl=None, part="real"):
58 r'''Read hadrons hdf5 file and extract entry based on attributes.
59
60 Parameters
@@ -245,7 +245,7 @@
140 return corr
141
142
-143def read_meson_hd5(path, filestem, ens_id, meson='meson_0', idl=None, gammas=None):
+143def read_meson_hd5(path, filestem, ens_id, meson='meson_0', idl=None, gammas=None):
144 r'''Read hadrons meson hdf5 file and extract the meson labeled 'meson'
145
146 Parameters
@@ -284,7 +284,7 @@
179 part="real")
180
181
-182def _extract_real_arrays(path, files, tree, keys):
+182def _extract_real_arrays(path, files, tree, keys):
183 corr_data = {}
184 for key in keys:
185 corr_data[key] = []
@@ -302,7 +302,7 @@
197 return corr_data
198
199
-200def extract_t0_hd5(path, filestem, ens_id, obs='Clover energy density', fit_range=5, idl=None, **kwargs):
+200def extract_t0_hd5(path, filestem, ens_id, obs='Clover energy density', fit_range=5, idl=None, **kwargs):
201 r'''Read hadrons FlowObservables hdf5 file and extract t0
202
203 Parameters
@@ -350,7 +350,7 @@
245 return fit_t0(t2E_dict, fit_range, plot_fit=kwargs.get('plot_fit'))
246
247
-248def read_DistillationContraction_hd5(path, ens_id, diagrams=["direct"], idl=None):
+248def read_DistillationContraction_hd5(path, ens_id, diagrams=["direct"], idl=None):
249 """Read hadrons DistillationContraction hdf5 files in given directory structure
250
251 Parameters
@@ -447,16 +447,16 @@
342 return res_dict
343
344
-345class Npr_matrix(np.ndarray):
+345class Npr_matrix(np.ndarray):
346
-347 def __new__(cls, input_array, mom_in=None, mom_out=None):
+347 def __new__(cls, input_array, mom_in=None, mom_out=None):
348 obj = np.asarray(input_array).view(cls)
349 obj.mom_in = mom_in
350 obj.mom_out = mom_out
351 return obj
352
353 @property
-354 def g5H(self):
+354 def g5H(self):
355 """Gamma_5 hermitean conjugate
356
357 Uses the fact that the propagator is gamma5 hermitean, so just the
@@ -466,7 +466,7 @@
361 mom_in=self.mom_out,
362 mom_out=self.mom_in)
363
-364 def _propagate_mom(self, other, name):
+364 def _propagate_mom(self, other, name):
365 s_mom = getattr(self, name, None)
366 o_mom = getattr(other, name, None)
367 if s_mom is not None and o_mom is not None:
@@ -474,20 +474,20 @@
369 raise Exception(name + ' does not match.')
370 return o_mom if o_mom is not None else s_mom
371
-372 def __matmul__(self, other):
+372 def __matmul__(self, other):
373 return self.__new__(Npr_matrix,
374 super().__matmul__(other),
375 self._propagate_mom(other, 'mom_in'),
376 self._propagate_mom(other, 'mom_out'))
377
-378 def __array_finalize__(self, obj):
+378 def __array_finalize__(self, obj):
379 if obj is None:
380 return
381 self.mom_in = getattr(obj, 'mom_in', None)
382 self.mom_out = getattr(obj, 'mom_out', None)
383
384
-385def read_ExternalLeg_hd5(path, filestem, ens_id, idl=None):
+385def read_ExternalLeg_hd5(path, filestem, ens_id, idl=None):
386 """Read hadrons ExternalLeg hdf5 file and output an array of CObs
387
388 Parameters
@@ -532,7 +532,7 @@
427 return Npr_matrix(matrix, mom_in=mom)
428
429
-430def read_Bilinear_hd5(path, filestem, ens_id, idl=None):
+430def read_Bilinear_hd5(path, filestem, ens_id, idl=None):
431 """Read hadrons Bilinear hdf5 file and output an array of CObs
432
433 Parameters
@@ -591,7 +591,7 @@
486 return result_dict
487
488
-489def read_Fourquark_hd5(path, filestem, ens_id, idl=None, vertices=["VA", "AV"]):
+489def read_Fourquark_hd5(path, filestem, ens_id, idl=None, vertices=["VA", "AV"]):
490 """Read hadrons FourquarkFullyConnected hdf5 file and output an array of CObs
491
492 Parameters
@@ -677,7 +677,7 @@
572 return result_dict
573
574
-575def _get_lorentz_names(name):
+575def _get_lorentz_names(name):
576 lorentz_index = ['X', 'Y', 'Z', 'T']
577
578 res = []
@@ -733,7 +733,7 @@
- 58def read_hd5(filestem, ens_id, group, attrs=None, idl=None, part="real"):
+ 58def read_hd5(filestem, ens_id, group, attrs=None, idl=None, part="real"):
59 r'''Read hadrons hdf5 file and extract entry based on attributes.
60
61 Parameters
@@ -871,7 +871,7 @@ Correlator of the source sink combination in question.
- 144def read_meson_hd5(path, filestem, ens_id, meson='meson_0', idl=None, gammas=None):
+ 144def read_meson_hd5(path, filestem, ens_id, meson='meson_0', idl=None, gammas=None):
145 r'''Read hadrons meson hdf5 file and extract the meson labeled 'meson'
146
147 Parameters
@@ -955,7 +955,7 @@ Correlator of the source sink combination in question.
- 201def extract_t0_hd5(path, filestem, ens_id, obs='Clover energy density', fit_range=5, idl=None, **kwargs):
+ 201def extract_t0_hd5(path, filestem, ens_id, obs='Clover energy density', fit_range=5, idl=None, **kwargs):
202 r'''Read hadrons FlowObservables hdf5 file and extract t0
203
204 Parameters
@@ -1041,7 +1041,7 @@ If true, the fit for the extraction of t0 is shown together with the data.
- 249def read_DistillationContraction_hd5(path, ens_id, diagrams=["direct"], idl=None):
+ 249def read_DistillationContraction_hd5(path, ens_id, diagrams=["direct"], idl=None):
250 """Read hadrons DistillationContraction hdf5 files in given directory structure
251
252 Parameters
@@ -1175,16 +1175,16 @@ extracted DistillationContration data
- 346class Npr_matrix(np.ndarray):
+ 346class Npr_matrix(np.ndarray):
347
-348 def __new__(cls, input_array, mom_in=None, mom_out=None):
+348 def __new__(cls, input_array, mom_in=None, mom_out=None):
349 obj = np.asarray(input_array).view(cls)
350 obj.mom_in = mom_in
351 obj.mom_out = mom_out
352 return obj
353
354 @property
-355 def g5H(self):
+355 def g5H(self):
356 """Gamma_5 hermitean conjugate
357
358 Uses the fact that the propagator is gamma5 hermitean, so just the
@@ -1194,7 +1194,7 @@ extracted DistillationContration data
362 mom_in=self.mom_out,
363 mom_out=self.mom_in)
364
-365 def _propagate_mom(self, other, name):
+365 def _propagate_mom(self, other, name):
366 s_mom = getattr(self, name, None)
367 o_mom = getattr(other, name, None)
368 if s_mom is not None and o_mom is not None:
@@ -1202,13 +1202,13 @@ extracted DistillationContration data
370 raise Exception(name + ' does not match.')
371 return o_mom if o_mom is not None else s_mom
372
-373 def __matmul__(self, other):
+373 def __matmul__(self, other):
374 return self.__new__(Npr_matrix,
375 super().__matmul__(other),
376 self._propagate_mom(other, 'mom_in'),
377 self._propagate_mom(other, 'mom_out'))
378
-379 def __array_finalize__(self, obj):
+379 def __array_finalize__(self, obj):
380 if obj is None:
381 return
382 self.mom_in = getattr(obj, 'mom_in', None)
@@ -1330,7 +1330,7 @@ ndarray.
First mode, buffer
is None:
->>> import numpy as np
+>>> import numpy as np
>>> np.ndarray(shape=(2,2), dtype=float, order='F')
array([[0.0e+000, 0.0e+000], # random
[ nan, 2.5e-323]])
@@ -1359,7 +1359,7 @@ ndarray.
354 @property
-355 def g5H(self):
+355 def g5H(self):
356 """Gamma_5 hermitean conjugate
357
358 Uses the fact that the propagator is gamma5 hermitean, so just the
@@ -1391,7 +1391,7 @@ in and out momenta of the propagator are exchanged.
- 386def read_ExternalLeg_hd5(path, filestem, ens_id, idl=None):
+ 386def read_ExternalLeg_hd5(path, filestem, ens_id, idl=None):
387 """Read hadrons ExternalLeg hdf5 file and output an array of CObs
388
389 Parameters
@@ -1473,7 +1473,7 @@ read Cobs-matrix
- 431def read_Bilinear_hd5(path, filestem, ens_id, idl=None):
+ 431def read_Bilinear_hd5(path, filestem, ens_id, idl=None):
432 """Read hadrons Bilinear hdf5 file and output an array of CObs
433
434 Parameters
@@ -1569,7 +1569,7 @@ extracted Bilinears
- 490def read_Fourquark_hd5(path, filestem, ens_id, idl=None, vertices=["VA", "AV"]):
+ 490def read_Fourquark_hd5(path, filestem, ens_id, idl=None, vertices=["VA", "AV"]):
491 """Read hadrons FourquarkFullyConnected hdf5 file and output an array of CObs
492
493 Parameters
diff --git a/docs/pyerrors/input/json.html b/docs/pyerrors/input/json.html
index f6677161..c6ad22a8 100644
--- a/docs/pyerrors/input/json.html
+++ b/docs/pyerrors/input/json.html
@@ -91,23 +91,23 @@
- 1import rapidjson as json
- 2import gzip
- 3import getpass
- 4import socket
- 5import datetime
- 6import platform
- 7import warnings
- 8import re
- 9import numpy as np
- 10from ..obs import Obs
- 11from ..covobs import Covobs
- 12from ..correlators import Corr
- 13from ..misc import _assert_equal_properties
- 14from .. import version as pyerrorsversion
+ 1import rapidjson as json
+ 2import gzip
+ 3import getpass
+ 4import socket
+ 5import datetime
+ 6import platform
+ 7import warnings
+ 8import re
+ 9import numpy as np
+ 10from ..obs import Obs
+ 11from ..covobs import Covobs
+ 12from ..correlators import Corr
+ 13from ..misc import _assert_equal_properties
+ 14from .. import version as pyerrorsversion
15
16
- 17def create_json_string(ol, description='', indent=1):
+ 17def create_json_string(ol, description='', indent=1):
18 """Generate the string for the export of a list of Obs or structures containing Obs
19 to a .json(.gz) file
20
@@ -129,7 +129,7 @@
36 String for export to .json(.gz) file
37 """
38
- 39 def _gen_data_d_from_list(ol):
+ 39 def _gen_data_d_from_list(ol):
40 dl = []
41 No = len(ol)
42 for name in ol[0].mc_names:
@@ -149,7 +149,7 @@
56 dl.append(ed)
57 return dl
58
- 59 def _gen_cdata_d_from_list(ol):
+ 59 def _gen_cdata_d_from_list(ol):
60 dl = []
61 for name in ol[0].cov_names:
62 ed = {}
@@ -165,7 +165,7 @@
72 dl.append(ed)
73 return dl
74
- 75 def write_Obs_to_dict(o):
+ 75 def write_Obs_to_dict(o):
76 d = {}
77 d['type'] = 'Obs'
78 d['layout'] = '1'
@@ -182,7 +182,7 @@
89 d['cdata'] = cdata
90 return d
91
- 92 def write_List_to_dict(ol):
+ 92 def write_List_to_dict(ol):
93 _assert_equal_properties(ol)
94 d = {}
95 d['type'] = 'List'
@@ -201,7 +201,7 @@
108 d['cdata'] = cdata
109 return d
110
-111 def write_Array_to_dict(oa):
+111 def write_Array_to_dict(oa):
112 ol = np.ravel(oa)
113 _assert_equal_properties(ol)
114 d = {}
@@ -221,7 +221,7 @@
128 d['cdata'] = cdata
129 return d
130
-131 def _nan_Obs_like(obs):
+131 def _nan_Obs_like(obs):
132 samples = []
133 names = []
134 idl = []
@@ -236,7 +236,7 @@
143 my_obs.reweighted = obs.reweighted
144 return my_obs
145
-146 def write_Corr_to_dict(my_corr):
+146 def write_Corr_to_dict(my_corr):
147 first_not_none = next(i for i, j in enumerate(my_corr.content) if np.all(j))
148 dummy_array = np.empty((my_corr.N, my_corr.N), dtype=object)
149 dummy_array[:] = _nan_Obs_like(my_corr.content[first_not_none].ravel()[0])
@@ -281,7 +281,7 @@
188 else:
189 raise Exception("Unkown datatype.")
190
-191 def _jsonifier(obj):
+191 def _jsonifier(obj):
192 if isinstance(obj, dict):
193 result = {}
194 for key in obj:
@@ -309,7 +309,7 @@
216 return json.dumps(d, indent=indent, ensure_ascii=False, default=_jsonifier, write_mode=json.WM_COMPACT)
217
218
-219def dump_to_json(ol, fname, description='', indent=1, gz=True):
+219def dump_to_json(ol, fname, description='', indent=1, gz=True):
220 """Export a list of Obs or structures containing Obs to a .json(.gz) file.
221 Dict keys that are not JSON-serializable such as floats are converted to strings.
222
@@ -351,7 +351,7 @@
258 fp.close()
259
260
-261def _parse_json_dict(json_dict, verbose=True, full_output=False):
+261def _parse_json_dict(json_dict, verbose=True, full_output=False):
262 """Reconstruct a list of Obs or structures containing Obs from a dict that
263 was built out of a json string.
264
@@ -380,7 +380,7 @@
287 if full_output=True
288 """
289
-290 def _gen_obsd_from_datad(d):
+290 def _gen_obsd_from_datad(d):
291 retd = {}
292 if d:
293 retd['names'] = []
@@ -399,7 +399,7 @@
306 retd['deltas'].append(np.array([di[1:] for di in rep['deltas']]))
307 return retd
308
-309 def _gen_covobsd_from_cdatad(d):
+309 def _gen_covobsd_from_cdatad(d):
310 retd = {}
311 for ens in d:
312 retl = []
@@ -414,7 +414,7 @@
321 retd[name] = retl
322 return retd
323
-324 def get_Obs_from_dict(o):
+324 def get_Obs_from_dict(o):
325 layouts = o.get('layout', '1').strip()
326 if layouts != '1':
327 raise Exception("layout is %s has to be 1 for type Obs." % (layouts), RuntimeWarning)
@@ -438,7 +438,7 @@
345 ret.tag = o.get('tag', [None])[0]
346 return ret
347
-348 def get_List_from_dict(o):
+348 def get_List_from_dict(o):
349 layouts = o.get('layout', '1').strip()
350 layout = int(layouts)
351 values = o['value']
@@ -464,7 +464,7 @@
371 ret[-1].tag = taglist[i]
372 return ret
373
-374 def get_Array_from_dict(o):
+374 def get_Array_from_dict(o):
375 layouts = o.get('layout', '1').strip()
376 layout = [int(ls.strip()) for ls in layouts.split(',') if len(ls) > 0]
377 N = np.prod(layout)
@@ -489,7 +489,7 @@
396 ret[-1].tag = taglist[i]
397 return np.reshape(ret, layout)
398
-399 def get_Corr_from_dict(o):
+399 def get_Corr_from_dict(o):
400 if isinstance(o.get('tag'), list): # supports the old way
401 taglist = o.get('tag') # This had to be modified to get the taglist from the dictionary
402 temp_prange = None
@@ -563,7 +563,7 @@
470 return ol
471
472
-473def import_json_string(json_string, verbose=True, full_output=False):
+473def import_json_string(json_string, verbose=True, full_output=False):
474 """Reconstruct a list of Obs or structures containing Obs from a json string.
475
476 The following structures are supported: Obs, list, numpy.ndarray, Corr
@@ -593,7 +593,7 @@
500 return _parse_json_dict(json.loads(json_string), verbose, full_output)
501
502
-503def load_json(fname, verbose=True, gz=True, full_output=False):
+503def load_json(fname, verbose=True, gz=True, full_output=False):
504 """Import a list of Obs or structures containing Obs from a .json(.gz) file.
505
506 The following structures are supported: Obs, list, numpy.ndarray, Corr
@@ -638,7 +638,7 @@
545 return _parse_json_dict(d, verbose, full_output)
546
547
-548def _ol_from_dict(ind, reps='DICTOBS'):
+548def _ol_from_dict(ind, reps='DICTOBS'):
549 """Convert a dictionary of Obs objects to a list and a dictionary that contains
550 placeholders instead of the Obs objects.
551
@@ -659,7 +659,7 @@
566 ol = []
567 counter = 0
568
-569 def dict_replace_obs(d):
+569 def dict_replace_obs(d):
570 nonlocal ol
571 nonlocal counter
572 x = {}
@@ -680,7 +680,7 @@
587 x[k] = v
588 return x
589
-590 def list_replace_obs(li):
+590 def list_replace_obs(li):
591 nonlocal ol
592 nonlocal counter
593 x = []
@@ -701,7 +701,7 @@
608 x.append(e)
609 return x
610
-611 def obslist_replace_obs(li):
+611 def obslist_replace_obs(li):
612 nonlocal ol
613 nonlocal counter
614 il = []
@@ -718,7 +718,7 @@
625 return ol, nd
626
627
-628def dump_dict_to_json(od, fname, description='', indent=1, reps='DICTOBS', gz=True):
+628def dump_dict_to_json(od, fname, description='', indent=1, reps='DICTOBS', gz=True):
629 """Export a dict of Obs or structures containing Obs to a .json(.gz) file
630
631 Parameters
@@ -758,7 +758,7 @@
665 dump_to_json(ol, fname, description=desc_dict, indent=indent, gz=gz)
666
667
-668def _od_from_list_and_dict(ol, ind, reps='DICTOBS'):
+668def _od_from_list_and_dict(ol, ind, reps='DICTOBS'):
669 """Parse a list of Obs or structures containing Obs and an accompanying
670 dict, where the structures have been replaced by placeholders to a
671 dict that contains the structures.
@@ -781,7 +781,7 @@
688
689 counter = 0
690
-691 def dict_replace_string(d):
+691 def dict_replace_string(d):
692 nonlocal counter
693 nonlocal ol
694 x = {}
@@ -797,7 +797,7 @@
704 x[k] = v
705 return x
706
-707 def list_replace_string(li):
+707 def list_replace_string(li):
708 nonlocal counter
709 nonlocal ol
710 x = []
@@ -821,7 +821,7 @@
728 return nd
729
730
-731def load_json_dict(fname, verbose=True, gz=True, full_output=False, reps='DICTOBS'):
+731def load_json_dict(fname, verbose=True, gz=True, full_output=False, reps='DICTOBS'):
732 """Import a dict of Obs or structures containing Obs from a .json(.gz) file.
733
734 The following structures are supported: Obs, list, numpy.ndarray, Corr
@@ -875,7 +875,7 @@
- 18def create_json_string(ol, description='', indent=1):
+ 18def create_json_string(ol, description='', indent=1):
19 """Generate the string for the export of a list of Obs or structures containing Obs
20 to a .json(.gz) file
21
@@ -897,7 +897,7 @@
37 String for export to .json(.gz) file
38 """
39
- 40 def _gen_data_d_from_list(ol):
+ 40 def _gen_data_d_from_list(ol):
41 dl = []
42 No = len(ol)
43 for name in ol[0].mc_names:
@@ -917,7 +917,7 @@
57 dl.append(ed)
58 return dl
59
- 60 def _gen_cdata_d_from_list(ol):
+ 60 def _gen_cdata_d_from_list(ol):
61 dl = []
62 for name in ol[0].cov_names:
63 ed = {}
@@ -933,7 +933,7 @@
73 dl.append(ed)
74 return dl
75
- 76 def write_Obs_to_dict(o):
+ 76 def write_Obs_to_dict(o):
77 d = {}
78 d['type'] = 'Obs'
79 d['layout'] = '1'
@@ -950,7 +950,7 @@
90 d['cdata'] = cdata
91 return d
92
- 93 def write_List_to_dict(ol):
+ 93 def write_List_to_dict(ol):
94 _assert_equal_properties(ol)
95 d = {}
96 d['type'] = 'List'
@@ -969,7 +969,7 @@
109 d['cdata'] = cdata
110 return d
111
-112 def write_Array_to_dict(oa):
+112 def write_Array_to_dict(oa):
113 ol = np.ravel(oa)
114 _assert_equal_properties(ol)
115 d = {}
@@ -989,7 +989,7 @@
129 d['cdata'] = cdata
130 return d
131
-132 def _nan_Obs_like(obs):
+132 def _nan_Obs_like(obs):
133 samples = []
134 names = []
135 idl = []
@@ -1004,7 +1004,7 @@
144 my_obs.reweighted = obs.reweighted
145 return my_obs
146
-147 def write_Corr_to_dict(my_corr):
+147 def write_Corr_to_dict(my_corr):
148 first_not_none = next(i for i, j in enumerate(my_corr.content) if np.all(j))
149 dummy_array = np.empty((my_corr.N, my_corr.N), dtype=object)
150 dummy_array[:] = _nan_Obs_like(my_corr.content[first_not_none].ravel()[0])
@@ -1049,7 +1049,7 @@
189 else:
190 raise Exception("Unkown datatype.")
191
-192 def _jsonifier(obj):
+192 def _jsonifier(obj):
193 if isinstance(obj, dict):
194 result = {}
195 for key in obj:
@@ -1116,7 +1116,7 @@ String for export to pyerrors.input.json(.gz) file
- 220def dump_to_json(ol, fname, description='', indent=1, gz=True):
+ 220def dump_to_json(ol, fname, description='', indent=1, gz=True):
221 """Export a list of Obs or structures containing Obs to a .json(.gz) file.
222 Dict keys that are not JSON-serializable such as floats are converted to strings.
223
@@ -1200,7 +1200,7 @@ If True, the output is a gzipped json. If False, the output is a json file.
- 474def import_json_string(json_string, verbose=True, full_output=False):
+ 474def import_json_string(json_string, verbose=True, full_output=False):
475 """Reconstruct a list of Obs or structures containing Obs from a json string.
476
477 The following structures are supported: Obs, list, numpy.ndarray, Corr
@@ -1275,7 +1275,7 @@ if full_output=True
- 504def load_json(fname, verbose=True, gz=True, full_output=False):
+ 504def load_json(fname, verbose=True, gz=True, full_output=False):
505 """Import a list of Obs or structures containing Obs from a .json(.gz) file.
506
507 The following structures are supported: Obs, list, numpy.ndarray, Corr
@@ -1367,7 +1367,7 @@ if full_output=True
- 629def dump_dict_to_json(od, fname, description='', indent=1, reps='DICTOBS', gz=True):
+ 629def dump_dict_to_json(od, fname, description='', indent=1, reps='DICTOBS', gz=True):
630 """Export a dict of Obs or structures containing Obs to a .json(.gz) file
631
632 Parameters
@@ -1450,7 +1450,7 @@ If True, the output is a gzipped json. If False, the output is a json file.
- 732def load_json_dict(fname, verbose=True, gz=True, full_output=False, reps='DICTOBS'):
+