From 847317353c2f1e4547ab070a7f437f508b3da8d2 Mon Sep 17 00:00:00 2001 From: fjosw Date: Thu, 1 Dec 2022 16:01:38 +0000 Subject: [PATCH] Documentation updated --- docs/pyerrors/obs.html | 6955 ++++++++++++++++++++-------------------- 1 file changed, 3479 insertions(+), 3476 deletions(-) diff --git a/docs/pyerrors/obs.html b/docs/pyerrors/obs.html index d65897e6..e526141d 100644 --- a/docs/pyerrors/obs.html +++ b/docs/pyerrors/obs.html @@ -216,1660 +216,1663 @@ 12from itertools import groupby 13from .covobs import Covobs 14 - 15 - 16class Obs: - 17 """Class for a general observable. + 15# Improve print output of numpy.ndarrays containing Obs objects. + 16np.set_printoptions(formatter={'object': lambda x: str(x)}) + 17 18 - 19 Instances of Obs are the basic objects of a pyerrors error analysis. - 20 They are initialized with a list which contains arrays of samples for - 21 different ensembles/replica and another list of same length which contains - 22 the names of the ensembles/replica. Mathematical operations can be - 23 performed on instances. The result is another instance of Obs. The error of - 24 an instance can be computed with the gamma_method. Also contains additional - 25 methods for output and visualization of the error calculation. - 26 - 27 Attributes - 28 ---------- - 29 S_global : float - 30 Standard value for S (default 2.0) - 31 S_dict : dict - 32 Dictionary for S values. If an entry for a given ensemble - 33 exists this overwrites the standard value for that ensemble. - 34 tau_exp_global : float - 35 Standard value for tau_exp (default 0.0) - 36 tau_exp_dict : dict - 37 Dictionary for tau_exp values. If an entry for a given ensemble exists - 38 this overwrites the standard value for that ensemble. - 39 N_sigma_global : float - 40 Standard value for N_sigma (default 1.0) - 41 N_sigma_dict : dict - 42 Dictionary for N_sigma values. If an entry for a given ensemble exists - 43 this overwrites the standard value for that ensemble. - 44 """ - 45 __slots__ = ['names', 'shape', 'r_values', 'deltas', 'N', '_value', '_dvalue', - 46 'ddvalue', 'reweighted', 'S', 'tau_exp', 'N_sigma', - 47 'e_dvalue', 'e_ddvalue', 'e_tauint', 'e_dtauint', - 48 'e_windowsize', 'e_rho', 'e_drho', 'e_n_tauint', 'e_n_dtauint', - 49 'idl', 'is_merged', 'tag', '_covobs', '__dict__'] - 50 - 51 S_global = 2.0 - 52 S_dict = {} - 53 tau_exp_global = 0.0 - 54 tau_exp_dict = {} - 55 N_sigma_global = 1.0 - 56 N_sigma_dict = {} - 57 filter_eps = 1e-10 - 58 - 59 def __init__(self, samples, names, idl=None, **kwargs): - 60 """ Initialize Obs object. + 19class Obs: + 20 """Class for a general observable. + 21 + 22 Instances of Obs are the basic objects of a pyerrors error analysis. + 23 They are initialized with a list which contains arrays of samples for + 24 different ensembles/replica and another list of same length which contains + 25 the names of the ensembles/replica. Mathematical operations can be + 26 performed on instances. The result is another instance of Obs. The error of + 27 an instance can be computed with the gamma_method. Also contains additional + 28 methods for output and visualization of the error calculation. + 29 + 30 Attributes + 31 ---------- + 32 S_global : float + 33 Standard value for S (default 2.0) + 34 S_dict : dict + 35 Dictionary for S values. If an entry for a given ensemble + 36 exists this overwrites the standard value for that ensemble. + 37 tau_exp_global : float + 38 Standard value for tau_exp (default 0.0) + 39 tau_exp_dict : dict + 40 Dictionary for tau_exp values. If an entry for a given ensemble exists + 41 this overwrites the standard value for that ensemble. + 42 N_sigma_global : float + 43 Standard value for N_sigma (default 1.0) + 44 N_sigma_dict : dict + 45 Dictionary for N_sigma values. If an entry for a given ensemble exists + 46 this overwrites the standard value for that ensemble. + 47 """ + 48 __slots__ = ['names', 'shape', 'r_values', 'deltas', 'N', '_value', '_dvalue', + 49 'ddvalue', 'reweighted', 'S', 'tau_exp', 'N_sigma', + 50 'e_dvalue', 'e_ddvalue', 'e_tauint', 'e_dtauint', + 51 'e_windowsize', 'e_rho', 'e_drho', 'e_n_tauint', 'e_n_dtauint', + 52 'idl', 'is_merged', 'tag', '_covobs', '__dict__'] + 53 + 54 S_global = 2.0 + 55 S_dict = {} + 56 tau_exp_global = 0.0 + 57 tau_exp_dict = {} + 58 N_sigma_global = 1.0 + 59 N_sigma_dict = {} + 60 filter_eps = 1e-10 61 - 62 Parameters - 63 ---------- - 64 samples : list - 65 list of numpy arrays containing the Monte Carlo samples - 66 names : list - 67 list of strings labeling the individual samples - 68 idl : list, optional - 69 list of ranges or lists on which the samples are defined - 70 """ - 71 - 72 if kwargs.get("means") is None and len(samples): - 73 if len(samples) != len(names): - 74 raise Exception('Length of samples and names incompatible.') - 75 if idl is not None: - 76 if len(idl) != len(names): - 77 raise Exception('Length of idl incompatible with samples and names.') - 78 name_length = len(names) - 79 if name_length > 1: - 80 if name_length != len(set(names)): - 81 raise Exception('names are not unique.') - 82 if not all(isinstance(x, str) for x in names): - 83 raise TypeError('All names have to be strings.') - 84 else: - 85 if not isinstance(names[0], str): + 62 def __init__(self, samples, names, idl=None, **kwargs): + 63 """ Initialize Obs object. + 64 + 65 Parameters + 66 ---------- + 67 samples : list + 68 list of numpy arrays containing the Monte Carlo samples + 69 names : list + 70 list of strings labeling the individual samples + 71 idl : list, optional + 72 list of ranges or lists on which the samples are defined + 73 """ + 74 + 75 if kwargs.get("means") is None and len(samples): + 76 if len(samples) != len(names): + 77 raise Exception('Length of samples and names incompatible.') + 78 if idl is not None: + 79 if len(idl) != len(names): + 80 raise Exception('Length of idl incompatible with samples and names.') + 81 name_length = len(names) + 82 if name_length > 1: + 83 if name_length != len(set(names)): + 84 raise Exception('names are not unique.') + 85 if not all(isinstance(x, str) for x in names): 86 raise TypeError('All names have to be strings.') - 87 if min(len(x) for x in samples) <= 4: - 88 raise Exception('Samples have to have at least 5 entries.') - 89 - 90 self.names = sorted(names) - 91 self.shape = {} - 92 self.r_values = {} - 93 self.deltas = {} - 94 self._covobs = {} - 95 - 96 self._value = 0 - 97 self.N = 0 - 98 self.is_merged = {} - 99 self.idl = {} - 100 if idl is not None: - 101 for name, idx in sorted(zip(names, idl)): - 102 if isinstance(idx, range): - 103 self.idl[name] = idx - 104 elif isinstance(idx, (list, np.ndarray)): - 105 dc = np.unique(np.diff(idx)) - 106 if np.any(dc < 0): - 107 raise Exception("Unsorted idx for idl[%s]" % (name)) - 108 if len(dc) == 1: - 109 self.idl[name] = range(idx[0], idx[-1] + dc[0], dc[0]) - 110 else: - 111 self.idl[name] = list(idx) - 112 else: - 113 raise Exception('incompatible type for idl[%s].' % (name)) - 114 else: - 115 for name, sample in sorted(zip(names, samples)): - 116 self.idl[name] = range(1, len(sample) + 1) - 117 - 118 if kwargs.get("means") is not None: - 119 for name, sample, mean in sorted(zip(names, samples, kwargs.get("means"))): - 120 self.shape[name] = len(self.idl[name]) - 121 self.N += self.shape[name] - 122 self.r_values[name] = mean - 123 self.deltas[name] = sample - 124 else: - 125 for name, sample in sorted(zip(names, samples)): - 126 self.shape[name] = len(self.idl[name]) - 127 self.N += self.shape[name] - 128 if len(sample) != self.shape[name]: - 129 raise Exception('Incompatible samples and idx for %s: %d vs. %d' % (name, len(sample), self.shape[name])) - 130 self.r_values[name] = np.mean(sample) - 131 self.deltas[name] = sample - self.r_values[name] - 132 self._value += self.shape[name] * self.r_values[name] - 133 self._value /= self.N - 134 - 135 self._dvalue = 0.0 - 136 self.ddvalue = 0.0 - 137 self.reweighted = False - 138 - 139 self.tag = None - 140 - 141 @property - 142 def value(self): - 143 return self._value - 144 - 145 @property - 146 def dvalue(self): - 147 return self._dvalue - 148 - 149 @property - 150 def e_names(self): - 151 return sorted(set([o.split('|')[0] for o in self.names])) - 152 - 153 @property - 154 def cov_names(self): - 155 return sorted(set([o for o in self.covobs.keys()])) - 156 - 157 @property - 158 def mc_names(self): - 159 return sorted(set([o.split('|')[0] for o in self.names if o not in self.cov_names])) - 160 - 161 @property - 162 def e_content(self): - 163 res = {} - 164 for e, e_name in enumerate(self.e_names): - 165 res[e_name] = sorted(filter(lambda x: x.startswith(e_name + '|'), self.names)) - 166 if e_name in self.names: - 167 res[e_name].append(e_name) - 168 return res - 169 - 170 @property - 171 def covobs(self): - 172 return self._covobs - 173 - 174 def gamma_method(self, **kwargs): - 175 """Estimate the error and related properties of the Obs. + 87 else: + 88 if not isinstance(names[0], str): + 89 raise TypeError('All names have to be strings.') + 90 if min(len(x) for x in samples) <= 4: + 91 raise Exception('Samples have to have at least 5 entries.') + 92 + 93 self.names = sorted(names) + 94 self.shape = {} + 95 self.r_values = {} + 96 self.deltas = {} + 97 self._covobs = {} + 98 + 99 self._value = 0 + 100 self.N = 0 + 101 self.is_merged = {} + 102 self.idl = {} + 103 if idl is not None: + 104 for name, idx in sorted(zip(names, idl)): + 105 if isinstance(idx, range): + 106 self.idl[name] = idx + 107 elif isinstance(idx, (list, np.ndarray)): + 108 dc = np.unique(np.diff(idx)) + 109 if np.any(dc < 0): + 110 raise Exception("Unsorted idx for idl[%s]" % (name)) + 111 if len(dc) == 1: + 112 self.idl[name] = range(idx[0], idx[-1] + dc[0], dc[0]) + 113 else: + 114 self.idl[name] = list(idx) + 115 else: + 116 raise Exception('incompatible type for idl[%s].' % (name)) + 117 else: + 118 for name, sample in sorted(zip(names, samples)): + 119 self.idl[name] = range(1, len(sample) + 1) + 120 + 121 if kwargs.get("means") is not None: + 122 for name, sample, mean in sorted(zip(names, samples, kwargs.get("means"))): + 123 self.shape[name] = len(self.idl[name]) + 124 self.N += self.shape[name] + 125 self.r_values[name] = mean + 126 self.deltas[name] = sample + 127 else: + 128 for name, sample in sorted(zip(names, samples)): + 129 self.shape[name] = len(self.idl[name]) + 130 self.N += self.shape[name] + 131 if len(sample) != self.shape[name]: + 132 raise Exception('Incompatible samples and idx for %s: %d vs. %d' % (name, len(sample), self.shape[name])) + 133 self.r_values[name] = np.mean(sample) + 134 self.deltas[name] = sample - self.r_values[name] + 135 self._value += self.shape[name] * self.r_values[name] + 136 self._value /= self.N + 137 + 138 self._dvalue = 0.0 + 139 self.ddvalue = 0.0 + 140 self.reweighted = False + 141 + 142 self.tag = None + 143 + 144 @property + 145 def value(self): + 146 return self._value + 147 + 148 @property + 149 def dvalue(self): + 150 return self._dvalue + 151 + 152 @property + 153 def e_names(self): + 154 return sorted(set([o.split('|')[0] for o in self.names])) + 155 + 156 @property + 157 def cov_names(self): + 158 return sorted(set([o for o in self.covobs.keys()])) + 159 + 160 @property + 161 def mc_names(self): + 162 return sorted(set([o.split('|')[0] for o in self.names if o not in self.cov_names])) + 163 + 164 @property + 165 def e_content(self): + 166 res = {} + 167 for e, e_name in enumerate(self.e_names): + 168 res[e_name] = sorted(filter(lambda x: x.startswith(e_name + '|'), self.names)) + 169 if e_name in self.names: + 170 res[e_name].append(e_name) + 171 return res + 172 + 173 @property + 174 def covobs(self): + 175 return self._covobs 176 - 177 Parameters - 178 ---------- - 179 S : float - 180 specifies a custom value for the parameter S (default 2.0). - 181 If set to 0 it is assumed that the data exhibits no - 182 autocorrelation. In this case the error estimates coincides - 183 with the sample standard error. - 184 tau_exp : float - 185 positive value triggers the critical slowing down analysis - 186 (default 0.0). - 187 N_sigma : float - 188 number of standard deviations from zero until the tail is - 189 attached to the autocorrelation function (default 1). - 190 fft : bool - 191 determines whether the fft algorithm is used for the computation - 192 of the autocorrelation function (default True) - 193 """ - 194 - 195 e_content = self.e_content - 196 self.e_dvalue = {} - 197 self.e_ddvalue = {} - 198 self.e_tauint = {} - 199 self.e_dtauint = {} - 200 self.e_windowsize = {} - 201 self.e_n_tauint = {} - 202 self.e_n_dtauint = {} - 203 e_gamma = {} - 204 self.e_rho = {} - 205 self.e_drho = {} - 206 self._dvalue = 0 - 207 self.ddvalue = 0 - 208 - 209 self.S = {} - 210 self.tau_exp = {} - 211 self.N_sigma = {} - 212 - 213 if kwargs.get('fft') is False: - 214 fft = False - 215 else: - 216 fft = True - 217 - 218 def _parse_kwarg(kwarg_name): - 219 if kwarg_name in kwargs: - 220 tmp = kwargs.get(kwarg_name) - 221 if isinstance(tmp, (int, float)): - 222 if tmp < 0: - 223 raise Exception(kwarg_name + ' has to be larger or equal to 0.') - 224 for e, e_name in enumerate(self.e_names): - 225 getattr(self, kwarg_name)[e_name] = tmp - 226 else: - 227 raise TypeError(kwarg_name + ' is not in proper format.') - 228 else: - 229 for e, e_name in enumerate(self.e_names): - 230 if e_name in getattr(Obs, kwarg_name + '_dict'): - 231 getattr(self, kwarg_name)[e_name] = getattr(Obs, kwarg_name + '_dict')[e_name] - 232 else: - 233 getattr(self, kwarg_name)[e_name] = getattr(Obs, kwarg_name + '_global') - 234 - 235 _parse_kwarg('S') - 236 _parse_kwarg('tau_exp') - 237 _parse_kwarg('N_sigma') - 238 - 239 for e, e_name in enumerate(self.mc_names): - 240 r_length = [] - 241 for r_name in e_content[e_name]: - 242 if isinstance(self.idl[r_name], range): - 243 r_length.append(len(self.idl[r_name])) - 244 else: - 245 r_length.append((self.idl[r_name][-1] - self.idl[r_name][0] + 1)) - 246 - 247 e_N = np.sum([self.shape[r_name] for r_name in e_content[e_name]]) - 248 w_max = max(r_length) // 2 - 249 e_gamma[e_name] = np.zeros(w_max) - 250 self.e_rho[e_name] = np.zeros(w_max) - 251 self.e_drho[e_name] = np.zeros(w_max) - 252 - 253 for r_name in e_content[e_name]: - 254 e_gamma[e_name] += self._calc_gamma(self.deltas[r_name], self.idl[r_name], self.shape[r_name], w_max, fft) + 177 def gamma_method(self, **kwargs): + 178 """Estimate the error and related properties of the Obs. + 179 + 180 Parameters + 181 ---------- + 182 S : float + 183 specifies a custom value for the parameter S (default 2.0). + 184 If set to 0 it is assumed that the data exhibits no + 185 autocorrelation. In this case the error estimates coincides + 186 with the sample standard error. + 187 tau_exp : float + 188 positive value triggers the critical slowing down analysis + 189 (default 0.0). + 190 N_sigma : float + 191 number of standard deviations from zero until the tail is + 192 attached to the autocorrelation function (default 1). + 193 fft : bool + 194 determines whether the fft algorithm is used for the computation + 195 of the autocorrelation function (default True) + 196 """ + 197 + 198 e_content = self.e_content + 199 self.e_dvalue = {} + 200 self.e_ddvalue = {} + 201 self.e_tauint = {} + 202 self.e_dtauint = {} + 203 self.e_windowsize = {} + 204 self.e_n_tauint = {} + 205 self.e_n_dtauint = {} + 206 e_gamma = {} + 207 self.e_rho = {} + 208 self.e_drho = {} + 209 self._dvalue = 0 + 210 self.ddvalue = 0 + 211 + 212 self.S = {} + 213 self.tau_exp = {} + 214 self.N_sigma = {} + 215 + 216 if kwargs.get('fft') is False: + 217 fft = False + 218 else: + 219 fft = True + 220 + 221 def _parse_kwarg(kwarg_name): + 222 if kwarg_name in kwargs: + 223 tmp = kwargs.get(kwarg_name) + 224 if isinstance(tmp, (int, float)): + 225 if tmp < 0: + 226 raise Exception(kwarg_name + ' has to be larger or equal to 0.') + 227 for e, e_name in enumerate(self.e_names): + 228 getattr(self, kwarg_name)[e_name] = tmp + 229 else: + 230 raise TypeError(kwarg_name + ' is not in proper format.') + 231 else: + 232 for e, e_name in enumerate(self.e_names): + 233 if e_name in getattr(Obs, kwarg_name + '_dict'): + 234 getattr(self, kwarg_name)[e_name] = getattr(Obs, kwarg_name + '_dict')[e_name] + 235 else: + 236 getattr(self, kwarg_name)[e_name] = getattr(Obs, kwarg_name + '_global') + 237 + 238 _parse_kwarg('S') + 239 _parse_kwarg('tau_exp') + 240 _parse_kwarg('N_sigma') + 241 + 242 for e, e_name in enumerate(self.mc_names): + 243 r_length = [] + 244 for r_name in e_content[e_name]: + 245 if isinstance(self.idl[r_name], range): + 246 r_length.append(len(self.idl[r_name])) + 247 else: + 248 r_length.append((self.idl[r_name][-1] - self.idl[r_name][0] + 1)) + 249 + 250 e_N = np.sum([self.shape[r_name] for r_name in e_content[e_name]]) + 251 w_max = max(r_length) // 2 + 252 e_gamma[e_name] = np.zeros(w_max) + 253 self.e_rho[e_name] = np.zeros(w_max) + 254 self.e_drho[e_name] = np.zeros(w_max) 255 - 256 gamma_div = np.zeros(w_max) - 257 for r_name in e_content[e_name]: - 258 gamma_div += self._calc_gamma(np.ones((self.shape[r_name])), self.idl[r_name], self.shape[r_name], w_max, fft) - 259 gamma_div[gamma_div < 1] = 1.0 - 260 e_gamma[e_name] /= gamma_div[:w_max] - 261 - 262 if np.abs(e_gamma[e_name][0]) < 10 * np.finfo(float).tiny: # Prevent division by zero - 263 self.e_tauint[e_name] = 0.5 - 264 self.e_dtauint[e_name] = 0.0 - 265 self.e_dvalue[e_name] = 0.0 - 266 self.e_ddvalue[e_name] = 0.0 - 267 self.e_windowsize[e_name] = 0 - 268 continue - 269 - 270 gaps = [] - 271 for r_name in e_content[e_name]: - 272 if isinstance(self.idl[r_name], range): - 273 gaps.append(1) - 274 else: - 275 gaps.append(np.min(np.diff(self.idl[r_name]))) - 276 - 277 if not np.all([gi == gaps[0] for gi in gaps]): - 278 raise Exception(f"Replica for ensemble {e_name} are not equally spaced.", gaps) - 279 else: - 280 gapsize = gaps[0] - 281 - 282 self.e_rho[e_name] = e_gamma[e_name][:w_max] / e_gamma[e_name][0] - 283 self.e_n_tauint[e_name] = np.cumsum(np.concatenate(([0.5], self.e_rho[e_name][1:]))) - 284 # Make sure no entry of tauint is smaller than 0.5 - 285 self.e_n_tauint[e_name][self.e_n_tauint[e_name] <= 0.5] = 0.5 + np.finfo(np.float64).eps - 286 # hep-lat/0306017 eq. (42) - 287 self.e_n_dtauint[e_name] = self.e_n_tauint[e_name] * 2 * np.sqrt(np.abs(np.arange(w_max) / gapsize + 0.5 - self.e_n_tauint[e_name]) / e_N) - 288 self.e_n_dtauint[e_name][0] = 0.0 - 289 - 290 def _compute_drho(i): - 291 tmp = self.e_rho[e_name][i + 1:w_max] + np.concatenate([self.e_rho[e_name][i - 1::-1], self.e_rho[e_name][1:w_max - 2 * i]]) - 2 * self.e_rho[e_name][i] * self.e_rho[e_name][1:w_max - i] - 292 self.e_drho[e_name][i] = np.sqrt(np.sum(tmp ** 2) / e_N) - 293 - 294 _compute_drho(gapsize) - 295 if self.tau_exp[e_name] > 0: - 296 texp = self.tau_exp[e_name] - 297 # Critical slowing down analysis - 298 if w_max // 2 <= 1: - 299 raise Exception("Need at least 8 samples for tau_exp error analysis") - 300 for n in range(gapsize, w_max // 2, gapsize): - 301 _compute_drho(n + gapsize) - 302 if (self.e_rho[e_name][n] - self.N_sigma[e_name] * self.e_drho[e_name][n]) < 0 or n >= w_max // 2 - 2: - 303 # Bias correction hep-lat/0306017 eq. (49) included - 304 self.e_tauint[e_name] = self.e_n_tauint[e_name][n] * (1 + (2 * n / gapsize + 1) / e_N) / (1 + 1 / e_N) + texp * np.abs(self.e_rho[e_name][n + 1]) # The absolute makes sure, that the tail contribution is always positive - 305 self.e_dtauint[e_name] = np.sqrt(self.e_n_dtauint[e_name][n] ** 2 + texp ** 2 * self.e_drho[e_name][n + 1] ** 2) - 306 # Error of tau_exp neglected so far, missing term: self.e_rho[e_name][n + 1] ** 2 * d_tau_exp ** 2 - 307 self.e_dvalue[e_name] = np.sqrt(2 * self.e_tauint[e_name] * e_gamma[e_name][0] * (1 + 1 / e_N) / e_N) - 308 self.e_ddvalue[e_name] = self.e_dvalue[e_name] * np.sqrt((n / gapsize + 0.5) / e_N) - 309 self.e_windowsize[e_name] = n - 310 break - 311 else: - 312 if self.S[e_name] == 0.0: - 313 self.e_tauint[e_name] = 0.5 - 314 self.e_dtauint[e_name] = 0.0 - 315 self.e_dvalue[e_name] = np.sqrt(e_gamma[e_name][0] / (e_N - 1)) - 316 self.e_ddvalue[e_name] = self.e_dvalue[e_name] * np.sqrt(0.5 / e_N) - 317 self.e_windowsize[e_name] = 0 - 318 else: - 319 # Standard automatic windowing procedure - 320 tau = self.S[e_name] / np.log((2 * self.e_n_tauint[e_name][gapsize::gapsize] + 1) / (2 * self.e_n_tauint[e_name][gapsize::gapsize] - 1)) - 321 g_w = np.exp(- np.arange(1, len(tau) + 1) / tau) - tau / np.sqrt(np.arange(1, len(tau) + 1) * e_N) - 322 for n in range(1, w_max): - 323 if n < w_max // 2 - 2: - 324 _compute_drho(gapsize * n + gapsize) - 325 if g_w[n - 1] < 0 or n >= w_max - 1: - 326 n *= gapsize - 327 self.e_tauint[e_name] = self.e_n_tauint[e_name][n] * (1 + (2 * n / gapsize + 1) / e_N) / (1 + 1 / e_N) # Bias correction hep-lat/0306017 eq. (49) - 328 self.e_dtauint[e_name] = self.e_n_dtauint[e_name][n] - 329 self.e_dvalue[e_name] = np.sqrt(2 * self.e_tauint[e_name] * e_gamma[e_name][0] * (1 + 1 / e_N) / e_N) - 330 self.e_ddvalue[e_name] = self.e_dvalue[e_name] * np.sqrt((n / gapsize + 0.5) / e_N) - 331 self.e_windowsize[e_name] = n - 332 break - 333 - 334 self._dvalue += self.e_dvalue[e_name] ** 2 - 335 self.ddvalue += (self.e_dvalue[e_name] * self.e_ddvalue[e_name]) ** 2 + 256 for r_name in e_content[e_name]: + 257 e_gamma[e_name] += self._calc_gamma(self.deltas[r_name], self.idl[r_name], self.shape[r_name], w_max, fft) + 258 + 259 gamma_div = np.zeros(w_max) + 260 for r_name in e_content[e_name]: + 261 gamma_div += self._calc_gamma(np.ones((self.shape[r_name])), self.idl[r_name], self.shape[r_name], w_max, fft) + 262 gamma_div[gamma_div < 1] = 1.0 + 263 e_gamma[e_name] /= gamma_div[:w_max] + 264 + 265 if np.abs(e_gamma[e_name][0]) < 10 * np.finfo(float).tiny: # Prevent division by zero + 266 self.e_tauint[e_name] = 0.5 + 267 self.e_dtauint[e_name] = 0.0 + 268 self.e_dvalue[e_name] = 0.0 + 269 self.e_ddvalue[e_name] = 0.0 + 270 self.e_windowsize[e_name] = 0 + 271 continue + 272 + 273 gaps = [] + 274 for r_name in e_content[e_name]: + 275 if isinstance(self.idl[r_name], range): + 276 gaps.append(1) + 277 else: + 278 gaps.append(np.min(np.diff(self.idl[r_name]))) + 279 + 280 if not np.all([gi == gaps[0] for gi in gaps]): + 281 raise Exception(f"Replica for ensemble {e_name} are not equally spaced.", gaps) + 282 else: + 283 gapsize = gaps[0] + 284 + 285 self.e_rho[e_name] = e_gamma[e_name][:w_max] / e_gamma[e_name][0] + 286 self.e_n_tauint[e_name] = np.cumsum(np.concatenate(([0.5], self.e_rho[e_name][1:]))) + 287 # Make sure no entry of tauint is smaller than 0.5 + 288 self.e_n_tauint[e_name][self.e_n_tauint[e_name] <= 0.5] = 0.5 + np.finfo(np.float64).eps + 289 # hep-lat/0306017 eq. (42) + 290 self.e_n_dtauint[e_name] = self.e_n_tauint[e_name] * 2 * np.sqrt(np.abs(np.arange(w_max) / gapsize + 0.5 - self.e_n_tauint[e_name]) / e_N) + 291 self.e_n_dtauint[e_name][0] = 0.0 + 292 + 293 def _compute_drho(i): + 294 tmp = self.e_rho[e_name][i + 1:w_max] + np.concatenate([self.e_rho[e_name][i - 1::-1], self.e_rho[e_name][1:w_max - 2 * i]]) - 2 * self.e_rho[e_name][i] * self.e_rho[e_name][1:w_max - i] + 295 self.e_drho[e_name][i] = np.sqrt(np.sum(tmp ** 2) / e_N) + 296 + 297 _compute_drho(gapsize) + 298 if self.tau_exp[e_name] > 0: + 299 texp = self.tau_exp[e_name] + 300 # Critical slowing down analysis + 301 if w_max // 2 <= 1: + 302 raise Exception("Need at least 8 samples for tau_exp error analysis") + 303 for n in range(gapsize, w_max // 2, gapsize): + 304 _compute_drho(n + gapsize) + 305 if (self.e_rho[e_name][n] - self.N_sigma[e_name] * self.e_drho[e_name][n]) < 0 or n >= w_max // 2 - 2: + 306 # Bias correction hep-lat/0306017 eq. (49) included + 307 self.e_tauint[e_name] = self.e_n_tauint[e_name][n] * (1 + (2 * n / gapsize + 1) / e_N) / (1 + 1 / e_N) + texp * np.abs(self.e_rho[e_name][n + 1]) # The absolute makes sure, that the tail contribution is always positive + 308 self.e_dtauint[e_name] = np.sqrt(self.e_n_dtauint[e_name][n] ** 2 + texp ** 2 * self.e_drho[e_name][n + 1] ** 2) + 309 # Error of tau_exp neglected so far, missing term: self.e_rho[e_name][n + 1] ** 2 * d_tau_exp ** 2 + 310 self.e_dvalue[e_name] = np.sqrt(2 * self.e_tauint[e_name] * e_gamma[e_name][0] * (1 + 1 / e_N) / e_N) + 311 self.e_ddvalue[e_name] = self.e_dvalue[e_name] * np.sqrt((n / gapsize + 0.5) / e_N) + 312 self.e_windowsize[e_name] = n + 313 break + 314 else: + 315 if self.S[e_name] == 0.0: + 316 self.e_tauint[e_name] = 0.5 + 317 self.e_dtauint[e_name] = 0.0 + 318 self.e_dvalue[e_name] = np.sqrt(e_gamma[e_name][0] / (e_N - 1)) + 319 self.e_ddvalue[e_name] = self.e_dvalue[e_name] * np.sqrt(0.5 / e_N) + 320 self.e_windowsize[e_name] = 0 + 321 else: + 322 # Standard automatic windowing procedure + 323 tau = self.S[e_name] / np.log((2 * self.e_n_tauint[e_name][gapsize::gapsize] + 1) / (2 * self.e_n_tauint[e_name][gapsize::gapsize] - 1)) + 324 g_w = np.exp(- np.arange(1, len(tau) + 1) / tau) - tau / np.sqrt(np.arange(1, len(tau) + 1) * e_N) + 325 for n in range(1, w_max): + 326 if n < w_max // 2 - 2: + 327 _compute_drho(gapsize * n + gapsize) + 328 if g_w[n - 1] < 0 or n >= w_max - 1: + 329 n *= gapsize + 330 self.e_tauint[e_name] = self.e_n_tauint[e_name][n] * (1 + (2 * n / gapsize + 1) / e_N) / (1 + 1 / e_N) # Bias correction hep-lat/0306017 eq. (49) + 331 self.e_dtauint[e_name] = self.e_n_dtauint[e_name][n] + 332 self.e_dvalue[e_name] = np.sqrt(2 * self.e_tauint[e_name] * e_gamma[e_name][0] * (1 + 1 / e_N) / e_N) + 333 self.e_ddvalue[e_name] = self.e_dvalue[e_name] * np.sqrt((n / gapsize + 0.5) / e_N) + 334 self.e_windowsize[e_name] = n + 335 break 336 - 337 for e_name in self.cov_names: - 338 self.e_dvalue[e_name] = np.sqrt(self.covobs[e_name].errsq()) - 339 self.e_ddvalue[e_name] = 0 - 340 self._dvalue += self.e_dvalue[e_name]**2 - 341 - 342 self._dvalue = np.sqrt(self._dvalue) - 343 if self._dvalue == 0.0: - 344 self.ddvalue = 0.0 - 345 else: - 346 self.ddvalue = np.sqrt(self.ddvalue) / self._dvalue - 347 return - 348 - 349 def _calc_gamma(self, deltas, idx, shape, w_max, fft): - 350 """Calculate Gamma_{AA} from the deltas, which are defined on idx. - 351 idx is assumed to be a contiguous range (possibly with a stepsize != 1) - 352 - 353 Parameters - 354 ---------- - 355 deltas : list - 356 List of fluctuations - 357 idx : list - 358 List or range of configurations on which the deltas are defined. - 359 shape : int - 360 Number of configurations in idx. - 361 w_max : int - 362 Upper bound for the summation window. - 363 fft : bool - 364 determines whether the fft algorithm is used for the computation - 365 of the autocorrelation function. - 366 """ - 367 gamma = np.zeros(w_max) - 368 deltas = _expand_deltas(deltas, idx, shape) - 369 new_shape = len(deltas) - 370 if fft: - 371 max_gamma = min(new_shape, w_max) - 372 # The padding for the fft has to be even - 373 padding = new_shape + max_gamma + (new_shape + max_gamma) % 2 - 374 gamma[:max_gamma] += np.fft.irfft(np.abs(np.fft.rfft(deltas, padding)) ** 2)[:max_gamma] - 375 else: - 376 for n in range(w_max): - 377 if new_shape - n >= 0: - 378 gamma[n] += deltas[0:new_shape - n].dot(deltas[n:new_shape]) - 379 - 380 return gamma - 381 - 382 def details(self, ens_content=True): - 383 """Output detailed properties of the Obs. + 337 self._dvalue += self.e_dvalue[e_name] ** 2 + 338 self.ddvalue += (self.e_dvalue[e_name] * self.e_ddvalue[e_name]) ** 2 + 339 + 340 for e_name in self.cov_names: + 341 self.e_dvalue[e_name] = np.sqrt(self.covobs[e_name].errsq()) + 342 self.e_ddvalue[e_name] = 0 + 343 self._dvalue += self.e_dvalue[e_name]**2 + 344 + 345 self._dvalue = np.sqrt(self._dvalue) + 346 if self._dvalue == 0.0: + 347 self.ddvalue = 0.0 + 348 else: + 349 self.ddvalue = np.sqrt(self.ddvalue) / self._dvalue + 350 return + 351 + 352 def _calc_gamma(self, deltas, idx, shape, w_max, fft): + 353 """Calculate Gamma_{AA} from the deltas, which are defined on idx. + 354 idx is assumed to be a contiguous range (possibly with a stepsize != 1) + 355 + 356 Parameters + 357 ---------- + 358 deltas : list + 359 List of fluctuations + 360 idx : list + 361 List or range of configurations on which the deltas are defined. + 362 shape : int + 363 Number of configurations in idx. + 364 w_max : int + 365 Upper bound for the summation window. + 366 fft : bool + 367 determines whether the fft algorithm is used for the computation + 368 of the autocorrelation function. + 369 """ + 370 gamma = np.zeros(w_max) + 371 deltas = _expand_deltas(deltas, idx, shape) + 372 new_shape = len(deltas) + 373 if fft: + 374 max_gamma = min(new_shape, w_max) + 375 # The padding for the fft has to be even + 376 padding = new_shape + max_gamma + (new_shape + max_gamma) % 2 + 377 gamma[:max_gamma] += np.fft.irfft(np.abs(np.fft.rfft(deltas, padding)) ** 2)[:max_gamma] + 378 else: + 379 for n in range(w_max): + 380 if new_shape - n >= 0: + 381 gamma[n] += deltas[0:new_shape - n].dot(deltas[n:new_shape]) + 382 + 383 return gamma 384 - 385 Parameters - 386 ---------- - 387 ens_content : bool - 388 print details about the ensembles and replica if true. - 389 """ - 390 if self.tag is not None: - 391 print("Description:", self.tag) - 392 if not hasattr(self, 'e_dvalue'): - 393 print('Result\t %3.8e' % (self.value)) - 394 else: - 395 if self.value == 0.0: - 396 percentage = np.nan - 397 else: - 398 percentage = np.abs(self._dvalue / self.value) * 100 - 399 print('Result\t %3.8e +/- %3.8e +/- %3.8e (%3.3f%%)' % (self.value, self._dvalue, self.ddvalue, percentage)) - 400 if len(self.e_names) > 1: - 401 print(' Ensemble errors:') - 402 e_content = self.e_content - 403 for e_name in self.mc_names: - 404 if isinstance(self.idl[e_content[e_name][0]], range): - 405 gap = self.idl[e_content[e_name][0]].step - 406 else: - 407 gap = np.min(np.diff(self.idl[e_content[e_name][0]])) - 408 - 409 if len(self.e_names) > 1: - 410 print('', e_name, '\t %3.6e +/- %3.6e' % (self.e_dvalue[e_name], self.e_ddvalue[e_name])) - 411 tau_string = " \N{GREEK SMALL LETTER TAU}_int\t " + _format_uncertainty(self.e_tauint[e_name], self.e_dtauint[e_name]) - 412 tau_string += f" in units of {gap} config" - 413 if gap > 1: - 414 tau_string += "s" - 415 if self.tau_exp[e_name] > 0: - 416 tau_string = f"{tau_string: <45}" + '\t(\N{GREEK SMALL LETTER TAU}_exp=%3.2f, N_\N{GREEK SMALL LETTER SIGMA}=%1.0i)' % (self.tau_exp[e_name], self.N_sigma[e_name]) - 417 else: - 418 tau_string = f"{tau_string: <45}" + '\t(S=%3.2f)' % (self.S[e_name]) - 419 print(tau_string) - 420 for e_name in self.cov_names: - 421 print('', e_name, '\t %3.8e' % (self.e_dvalue[e_name])) - 422 if ens_content is True: - 423 if len(self.e_names) == 1: - 424 print(self.N, 'samples in', len(self.e_names), 'ensemble:') - 425 else: - 426 print(self.N, 'samples in', len(self.e_names), 'ensembles:') - 427 my_string_list = [] - 428 for key, value in sorted(self.e_content.items()): - 429 if key not in self.covobs: - 430 my_string = ' ' + "\u00B7 Ensemble '" + key + "' " - 431 if len(value) == 1: - 432 my_string += f': {self.shape[value[0]]} configurations' - 433 if isinstance(self.idl[value[0]], range): - 434 my_string += f' (from {self.idl[value[0]].start} to {self.idl[value[0]][-1]}' + int(self.idl[value[0]].step != 1) * f' in steps of {self.idl[value[0]].step}' + ')' - 435 else: - 436 my_string += f' (irregular range from {self.idl[value[0]][0]} to {self.idl[value[0]][-1]})' - 437 else: - 438 sublist = [] - 439 for v in value: - 440 my_substring = ' ' + "\u00B7 Replicum '" + v[len(key) + 1:] + "' " - 441 my_substring += f': {self.shape[v]} configurations' - 442 if isinstance(self.idl[v], range): - 443 my_substring += f' (from {self.idl[v].start} to {self.idl[v][-1]}' + int(self.idl[v].step != 1) * f' in steps of {self.idl[v].step}' + ')' - 444 else: - 445 my_substring += f' (irregular range from {self.idl[v][0]} to {self.idl[v][-1]})' - 446 sublist.append(my_substring) - 447 - 448 my_string += '\n' + '\n'.join(sublist) - 449 else: - 450 my_string = ' ' + "\u00B7 Covobs '" + key + "' " - 451 my_string_list.append(my_string) - 452 print('\n'.join(my_string_list)) - 453 - 454 def reweight(self, weight): - 455 """Reweight the obs with given rewighting factors. + 385 def details(self, ens_content=True): + 386 """Output detailed properties of the Obs. + 387 + 388 Parameters + 389 ---------- + 390 ens_content : bool + 391 print details about the ensembles and replica if true. + 392 """ + 393 if self.tag is not None: + 394 print("Description:", self.tag) + 395 if not hasattr(self, 'e_dvalue'): + 396 print('Result\t %3.8e' % (self.value)) + 397 else: + 398 if self.value == 0.0: + 399 percentage = np.nan + 400 else: + 401 percentage = np.abs(self._dvalue / self.value) * 100 + 402 print('Result\t %3.8e +/- %3.8e +/- %3.8e (%3.3f%%)' % (self.value, self._dvalue, self.ddvalue, percentage)) + 403 if len(self.e_names) > 1: + 404 print(' Ensemble errors:') + 405 e_content = self.e_content + 406 for e_name in self.mc_names: + 407 if isinstance(self.idl[e_content[e_name][0]], range): + 408 gap = self.idl[e_content[e_name][0]].step + 409 else: + 410 gap = np.min(np.diff(self.idl[e_content[e_name][0]])) + 411 + 412 if len(self.e_names) > 1: + 413 print('', e_name, '\t %3.6e +/- %3.6e' % (self.e_dvalue[e_name], self.e_ddvalue[e_name])) + 414 tau_string = " \N{GREEK SMALL LETTER TAU}_int\t " + _format_uncertainty(self.e_tauint[e_name], self.e_dtauint[e_name]) + 415 tau_string += f" in units of {gap} config" + 416 if gap > 1: + 417 tau_string += "s" + 418 if self.tau_exp[e_name] > 0: + 419 tau_string = f"{tau_string: <45}" + '\t(\N{GREEK SMALL LETTER TAU}_exp=%3.2f, N_\N{GREEK SMALL LETTER SIGMA}=%1.0i)' % (self.tau_exp[e_name], self.N_sigma[e_name]) + 420 else: + 421 tau_string = f"{tau_string: <45}" + '\t(S=%3.2f)' % (self.S[e_name]) + 422 print(tau_string) + 423 for e_name in self.cov_names: + 424 print('', e_name, '\t %3.8e' % (self.e_dvalue[e_name])) + 425 if ens_content is True: + 426 if len(self.e_names) == 1: + 427 print(self.N, 'samples in', len(self.e_names), 'ensemble:') + 428 else: + 429 print(self.N, 'samples in', len(self.e_names), 'ensembles:') + 430 my_string_list = [] + 431 for key, value in sorted(self.e_content.items()): + 432 if key not in self.covobs: + 433 my_string = ' ' + "\u00B7 Ensemble '" + key + "' " + 434 if len(value) == 1: + 435 my_string += f': {self.shape[value[0]]} configurations' + 436 if isinstance(self.idl[value[0]], range): + 437 my_string += f' (from {self.idl[value[0]].start} to {self.idl[value[0]][-1]}' + int(self.idl[value[0]].step != 1) * f' in steps of {self.idl[value[0]].step}' + ')' + 438 else: + 439 my_string += f' (irregular range from {self.idl[value[0]][0]} to {self.idl[value[0]][-1]})' + 440 else: + 441 sublist = [] + 442 for v in value: + 443 my_substring = ' ' + "\u00B7 Replicum '" + v[len(key) + 1:] + "' " + 444 my_substring += f': {self.shape[v]} configurations' + 445 if isinstance(self.idl[v], range): + 446 my_substring += f' (from {self.idl[v].start} to {self.idl[v][-1]}' + int(self.idl[v].step != 1) * f' in steps of {self.idl[v].step}' + ')' + 447 else: + 448 my_substring += f' (irregular range from {self.idl[v][0]} to {self.idl[v][-1]})' + 449 sublist.append(my_substring) + 450 + 451 my_string += '\n' + '\n'.join(sublist) + 452 else: + 453 my_string = ' ' + "\u00B7 Covobs '" + key + "' " + 454 my_string_list.append(my_string) + 455 print('\n'.join(my_string_list)) 456 - 457 Parameters - 458 ---------- - 459 weight : Obs - 460 Reweighting factor. An Observable that has to be defined on a superset of the - 461 configurations in obs[i].idl for all i. - 462 all_configs : bool - 463 if True, the reweighted observables are normalized by the average of - 464 the reweighting factor on all configurations in weight.idl and not - 465 on the configurations in obs[i].idl. Default False. - 466 """ - 467 return reweight(weight, [self])[0] - 468 - 469 def is_zero_within_error(self, sigma=1): - 470 """Checks whether the observable is zero within 'sigma' standard errors. + 457 def reweight(self, weight): + 458 """Reweight the obs with given rewighting factors. + 459 + 460 Parameters + 461 ---------- + 462 weight : Obs + 463 Reweighting factor. An Observable that has to be defined on a superset of the + 464 configurations in obs[i].idl for all i. + 465 all_configs : bool + 466 if True, the reweighted observables are normalized by the average of + 467 the reweighting factor on all configurations in weight.idl and not + 468 on the configurations in obs[i].idl. Default False. + 469 """ + 470 return reweight(weight, [self])[0] 471 - 472 Parameters - 473 ---------- - 474 sigma : int - 475 Number of standard errors used for the check. - 476 - 477 Works only properly when the gamma method was run. - 478 """ - 479 return self.is_zero() or np.abs(self.value) <= sigma * self._dvalue - 480 - 481 def is_zero(self, atol=1e-10): - 482 """Checks whether the observable is zero within a given tolerance. + 472 def is_zero_within_error(self, sigma=1): + 473 """Checks whether the observable is zero within 'sigma' standard errors. + 474 + 475 Parameters + 476 ---------- + 477 sigma : int + 478 Number of standard errors used for the check. + 479 + 480 Works only properly when the gamma method was run. + 481 """ + 482 return self.is_zero() or np.abs(self.value) <= sigma * self._dvalue 483 - 484 Parameters - 485 ---------- - 486 atol : float - 487 Absolute tolerance (for details see numpy documentation). - 488 """ - 489 return np.isclose(0.0, self.value, 1e-14, atol) and all(np.allclose(0.0, delta, 1e-14, atol) for delta in self.deltas.values()) and all(np.allclose(0.0, delta.errsq(), 1e-14, atol) for delta in self.covobs.values()) - 490 - 491 def plot_tauint(self, save=None): - 492 """Plot integrated autocorrelation time for each ensemble. + 484 def is_zero(self, atol=1e-10): + 485 """Checks whether the observable is zero within a given tolerance. + 486 + 487 Parameters + 488 ---------- + 489 atol : float + 490 Absolute tolerance (for details see numpy documentation). + 491 """ + 492 return np.isclose(0.0, self.value, 1e-14, atol) and all(np.allclose(0.0, delta, 1e-14, atol) for delta in self.deltas.values()) and all(np.allclose(0.0, delta.errsq(), 1e-14, atol) for delta in self.covobs.values()) 493 - 494 Parameters - 495 ---------- - 496 save : str - 497 saves the figure to a file named 'save' if. - 498 """ - 499 if not hasattr(self, 'e_dvalue'): - 500 raise Exception('Run the gamma method first.') - 501 - 502 for e, e_name in enumerate(self.mc_names): - 503 fig = plt.figure() - 504 plt.xlabel(r'$W$') - 505 plt.ylabel(r'$\tau_\mathrm{int}$') - 506 length = int(len(self.e_n_tauint[e_name])) - 507 if self.tau_exp[e_name] > 0: - 508 base = self.e_n_tauint[e_name][self.e_windowsize[e_name]] - 509 x_help = np.arange(2 * self.tau_exp[e_name]) - 510 y_help = (x_help + 1) * np.abs(self.e_rho[e_name][self.e_windowsize[e_name] + 1]) * (1 - x_help / (2 * (2 * self.tau_exp[e_name] - 1))) + base - 511 x_arr = np.arange(self.e_windowsize[e_name] + 1, self.e_windowsize[e_name] + 1 + 2 * self.tau_exp[e_name]) - 512 plt.plot(x_arr, y_help, 'C' + str(e), linewidth=1, ls='--', marker=',') - 513 plt.errorbar([self.e_windowsize[e_name] + 2 * self.tau_exp[e_name]], [self.e_tauint[e_name]], - 514 yerr=[self.e_dtauint[e_name]], fmt='C' + str(e), linewidth=1, capsize=2, marker='o', mfc=plt.rcParams['axes.facecolor']) - 515 xmax = self.e_windowsize[e_name] + 2 * self.tau_exp[e_name] + 1.5 - 516 label = e_name + r', $\tau_\mathrm{exp}$=' + str(np.around(self.tau_exp[e_name], decimals=2)) - 517 else: - 518 label = e_name + ', S=' + str(np.around(self.S[e_name], decimals=2)) - 519 xmax = max(10.5, 2 * self.e_windowsize[e_name] - 0.5) - 520 - 521 plt.errorbar(np.arange(length)[:int(xmax) + 1], self.e_n_tauint[e_name][:int(xmax) + 1], yerr=self.e_n_dtauint[e_name][:int(xmax) + 1], linewidth=1, capsize=2, label=label) - 522 plt.axvline(x=self.e_windowsize[e_name], color='C' + str(e), alpha=0.5, marker=',', ls='--') - 523 plt.legend() - 524 plt.xlim(-0.5, xmax) - 525 ylim = plt.ylim() - 526 plt.ylim(bottom=0.0, top=max(1.0, ylim[1])) - 527 plt.draw() - 528 if save: - 529 fig.savefig(save + "_" + str(e)) - 530 - 531 def plot_rho(self, save=None): - 532 """Plot normalized autocorrelation function time for each ensemble. + 494 def plot_tauint(self, save=None): + 495 """Plot integrated autocorrelation time for each ensemble. + 496 + 497 Parameters + 498 ---------- + 499 save : str + 500 saves the figure to a file named 'save' if. + 501 """ + 502 if not hasattr(self, 'e_dvalue'): + 503 raise Exception('Run the gamma method first.') + 504 + 505 for e, e_name in enumerate(self.mc_names): + 506 fig = plt.figure() + 507 plt.xlabel(r'$W$') + 508 plt.ylabel(r'$\tau_\mathrm{int}$') + 509 length = int(len(self.e_n_tauint[e_name])) + 510 if self.tau_exp[e_name] > 0: + 511 base = self.e_n_tauint[e_name][self.e_windowsize[e_name]] + 512 x_help = np.arange(2 * self.tau_exp[e_name]) + 513 y_help = (x_help + 1) * np.abs(self.e_rho[e_name][self.e_windowsize[e_name] + 1]) * (1 - x_help / (2 * (2 * self.tau_exp[e_name] - 1))) + base + 514 x_arr = np.arange(self.e_windowsize[e_name] + 1, self.e_windowsize[e_name] + 1 + 2 * self.tau_exp[e_name]) + 515 plt.plot(x_arr, y_help, 'C' + str(e), linewidth=1, ls='--', marker=',') + 516 plt.errorbar([self.e_windowsize[e_name] + 2 * self.tau_exp[e_name]], [self.e_tauint[e_name]], + 517 yerr=[self.e_dtauint[e_name]], fmt='C' + str(e), linewidth=1, capsize=2, marker='o', mfc=plt.rcParams['axes.facecolor']) + 518 xmax = self.e_windowsize[e_name] + 2 * self.tau_exp[e_name] + 1.5 + 519 label = e_name + r', $\tau_\mathrm{exp}$=' + str(np.around(self.tau_exp[e_name], decimals=2)) + 520 else: + 521 label = e_name + ', S=' + str(np.around(self.S[e_name], decimals=2)) + 522 xmax = max(10.5, 2 * self.e_windowsize[e_name] - 0.5) + 523 + 524 plt.errorbar(np.arange(length)[:int(xmax) + 1], self.e_n_tauint[e_name][:int(xmax) + 1], yerr=self.e_n_dtauint[e_name][:int(xmax) + 1], linewidth=1, capsize=2, label=label) + 525 plt.axvline(x=self.e_windowsize[e_name], color='C' + str(e), alpha=0.5, marker=',', ls='--') + 526 plt.legend() + 527 plt.xlim(-0.5, xmax) + 528 ylim = plt.ylim() + 529 plt.ylim(bottom=0.0, top=max(1.0, ylim[1])) + 530 plt.draw() + 531 if save: + 532 fig.savefig(save + "_" + str(e)) 533 - 534 Parameters - 535 ---------- - 536 save : str - 537 saves the figure to a file named 'save' if. - 538 """ - 539 if not hasattr(self, 'e_dvalue'): - 540 raise Exception('Run the gamma method first.') - 541 for e, e_name in enumerate(self.mc_names): - 542 fig = plt.figure() - 543 plt.xlabel('W') - 544 plt.ylabel('rho') - 545 length = int(len(self.e_drho[e_name])) - 546 plt.errorbar(np.arange(length), self.e_rho[e_name][:length], yerr=self.e_drho[e_name][:], linewidth=1, capsize=2) - 547 plt.axvline(x=self.e_windowsize[e_name], color='r', alpha=0.25, ls='--', marker=',') - 548 if self.tau_exp[e_name] > 0: - 549 plt.plot([self.e_windowsize[e_name] + 1, self.e_windowsize[e_name] + 1 + 2 * self.tau_exp[e_name]], - 550 [self.e_rho[e_name][self.e_windowsize[e_name] + 1], 0], 'k-', lw=1) - 551 xmax = self.e_windowsize[e_name] + 2 * self.tau_exp[e_name] + 1.5 - 552 plt.title('Rho ' + e_name + r', tau\_exp=' + str(np.around(self.tau_exp[e_name], decimals=2))) - 553 else: - 554 xmax = max(10.5, 2 * self.e_windowsize[e_name] - 0.5) - 555 plt.title('Rho ' + e_name + ', S=' + str(np.around(self.S[e_name], decimals=2))) - 556 plt.plot([-0.5, xmax], [0, 0], 'k--', lw=1) - 557 plt.xlim(-0.5, xmax) - 558 plt.draw() - 559 if save: - 560 fig.savefig(save + "_" + str(e)) - 561 - 562 def plot_rep_dist(self): - 563 """Plot replica distribution for each ensemble with more than one replicum.""" - 564 if not hasattr(self, 'e_dvalue'): - 565 raise Exception('Run the gamma method first.') - 566 for e, e_name in enumerate(self.mc_names): - 567 if len(self.e_content[e_name]) == 1: - 568 print('No replica distribution for a single replicum (', e_name, ')') - 569 continue - 570 r_length = [] - 571 sub_r_mean = 0 - 572 for r, r_name in enumerate(self.e_content[e_name]): - 573 r_length.append(len(self.deltas[r_name])) - 574 sub_r_mean += self.shape[r_name] * self.r_values[r_name] - 575 e_N = np.sum(r_length) - 576 sub_r_mean /= e_N - 577 arr = np.zeros(len(self.e_content[e_name])) - 578 for r, r_name in enumerate(self.e_content[e_name]): - 579 arr[r] = (self.r_values[r_name] - sub_r_mean) / (self.e_dvalue[e_name] * np.sqrt(e_N / self.shape[r_name] - 1)) - 580 plt.hist(arr, rwidth=0.8, bins=len(self.e_content[e_name])) - 581 plt.title('Replica distribution' + e_name + ' (mean=0, var=1)') - 582 plt.draw() - 583 - 584 def plot_history(self, expand=True): - 585 """Plot derived Monte Carlo history for each ensemble + 534 def plot_rho(self, save=None): + 535 """Plot normalized autocorrelation function time for each ensemble. + 536 + 537 Parameters + 538 ---------- + 539 save : str + 540 saves the figure to a file named 'save' if. + 541 """ + 542 if not hasattr(self, 'e_dvalue'): + 543 raise Exception('Run the gamma method first.') + 544 for e, e_name in enumerate(self.mc_names): + 545 fig = plt.figure() + 546 plt.xlabel('W') + 547 plt.ylabel('rho') + 548 length = int(len(self.e_drho[e_name])) + 549 plt.errorbar(np.arange(length), self.e_rho[e_name][:length], yerr=self.e_drho[e_name][:], linewidth=1, capsize=2) + 550 plt.axvline(x=self.e_windowsize[e_name], color='r', alpha=0.25, ls='--', marker=',') + 551 if self.tau_exp[e_name] > 0: + 552 plt.plot([self.e_windowsize[e_name] + 1, self.e_windowsize[e_name] + 1 + 2 * self.tau_exp[e_name]], + 553 [self.e_rho[e_name][self.e_windowsize[e_name] + 1], 0], 'k-', lw=1) + 554 xmax = self.e_windowsize[e_name] + 2 * self.tau_exp[e_name] + 1.5 + 555 plt.title('Rho ' + e_name + r', tau\_exp=' + str(np.around(self.tau_exp[e_name], decimals=2))) + 556 else: + 557 xmax = max(10.5, 2 * self.e_windowsize[e_name] - 0.5) + 558 plt.title('Rho ' + e_name + ', S=' + str(np.around(self.S[e_name], decimals=2))) + 559 plt.plot([-0.5, xmax], [0, 0], 'k--', lw=1) + 560 plt.xlim(-0.5, xmax) + 561 plt.draw() + 562 if save: + 563 fig.savefig(save + "_" + str(e)) + 564 + 565 def plot_rep_dist(self): + 566 """Plot replica distribution for each ensemble with more than one replicum.""" + 567 if not hasattr(self, 'e_dvalue'): + 568 raise Exception('Run the gamma method first.') + 569 for e, e_name in enumerate(self.mc_names): + 570 if len(self.e_content[e_name]) == 1: + 571 print('No replica distribution for a single replicum (', e_name, ')') + 572 continue + 573 r_length = [] + 574 sub_r_mean = 0 + 575 for r, r_name in enumerate(self.e_content[e_name]): + 576 r_length.append(len(self.deltas[r_name])) + 577 sub_r_mean += self.shape[r_name] * self.r_values[r_name] + 578 e_N = np.sum(r_length) + 579 sub_r_mean /= e_N + 580 arr = np.zeros(len(self.e_content[e_name])) + 581 for r, r_name in enumerate(self.e_content[e_name]): + 582 arr[r] = (self.r_values[r_name] - sub_r_mean) / (self.e_dvalue[e_name] * np.sqrt(e_N / self.shape[r_name] - 1)) + 583 plt.hist(arr, rwidth=0.8, bins=len(self.e_content[e_name])) + 584 plt.title('Replica distribution' + e_name + ' (mean=0, var=1)') + 585 plt.draw() 586 - 587 Parameters - 588 ---------- - 589 expand : bool - 590 show expanded history for irregular Monte Carlo chains (default: True). - 591 """ - 592 for e, e_name in enumerate(self.mc_names): - 593 plt.figure() - 594 r_length = [] - 595 tmp = [] - 596 tmp_expanded = [] - 597 for r, r_name in enumerate(self.e_content[e_name]): - 598 tmp.append(self.deltas[r_name] + self.r_values[r_name]) - 599 if expand: - 600 tmp_expanded.append(_expand_deltas(self.deltas[r_name], list(self.idl[r_name]), self.shape[r_name]) + self.r_values[r_name]) - 601 r_length.append(len(tmp_expanded[-1])) - 602 else: - 603 r_length.append(len(tmp[-1])) - 604 e_N = np.sum(r_length) - 605 x = np.arange(e_N) - 606 y_test = np.concatenate(tmp, axis=0) - 607 if expand: - 608 y = np.concatenate(tmp_expanded, axis=0) - 609 else: - 610 y = y_test - 611 plt.errorbar(x, y, fmt='.', markersize=3) - 612 plt.xlim(-0.5, e_N - 0.5) - 613 plt.title(e_name + f'\nskew: {skew(y_test):.3f} (p={skewtest(y_test).pvalue:.3f}), kurtosis: {kurtosis(y_test):.3f} (p={kurtosistest(y_test).pvalue:.3f})') - 614 plt.draw() - 615 - 616 def plot_piechart(self, save=None): - 617 """Plot piechart which shows the fractional contribution of each - 618 ensemble to the error and returns a dictionary containing the fractions. - 619 - 620 Parameters - 621 ---------- - 622 save : str - 623 saves the figure to a file named 'save' if. - 624 """ - 625 if not hasattr(self, 'e_dvalue'): - 626 raise Exception('Run the gamma method first.') - 627 if np.isclose(0.0, self._dvalue, atol=1e-15): - 628 raise Exception('Error is 0.0') - 629 labels = self.e_names - 630 sizes = [self.e_dvalue[name] ** 2 for name in labels] / self._dvalue ** 2 - 631 fig1, ax1 = plt.subplots() - 632 ax1.pie(sizes, labels=labels, startangle=90, normalize=True) - 633 ax1.axis('equal') - 634 plt.draw() - 635 if save: - 636 fig1.savefig(save) - 637 - 638 return dict(zip(self.e_names, sizes)) - 639 - 640 def dump(self, filename, datatype="json.gz", description="", **kwargs): - 641 """Dump the Obs to a file 'name' of chosen format. + 587 def plot_history(self, expand=True): + 588 """Plot derived Monte Carlo history for each ensemble + 589 + 590 Parameters + 591 ---------- + 592 expand : bool + 593 show expanded history for irregular Monte Carlo chains (default: True). + 594 """ + 595 for e, e_name in enumerate(self.mc_names): + 596 plt.figure() + 597 r_length = [] + 598 tmp = [] + 599 tmp_expanded = [] + 600 for r, r_name in enumerate(self.e_content[e_name]): + 601 tmp.append(self.deltas[r_name] + self.r_values[r_name]) + 602 if expand: + 603 tmp_expanded.append(_expand_deltas(self.deltas[r_name], list(self.idl[r_name]), self.shape[r_name]) + self.r_values[r_name]) + 604 r_length.append(len(tmp_expanded[-1])) + 605 else: + 606 r_length.append(len(tmp[-1])) + 607 e_N = np.sum(r_length) + 608 x = np.arange(e_N) + 609 y_test = np.concatenate(tmp, axis=0) + 610 if expand: + 611 y = np.concatenate(tmp_expanded, axis=0) + 612 else: + 613 y = y_test + 614 plt.errorbar(x, y, fmt='.', markersize=3) + 615 plt.xlim(-0.5, e_N - 0.5) + 616 plt.title(e_name + f'\nskew: {skew(y_test):.3f} (p={skewtest(y_test).pvalue:.3f}), kurtosis: {kurtosis(y_test):.3f} (p={kurtosistest(y_test).pvalue:.3f})') + 617 plt.draw() + 618 + 619 def plot_piechart(self, save=None): + 620 """Plot piechart which shows the fractional contribution of each + 621 ensemble to the error and returns a dictionary containing the fractions. + 622 + 623 Parameters + 624 ---------- + 625 save : str + 626 saves the figure to a file named 'save' if. + 627 """ + 628 if not hasattr(self, 'e_dvalue'): + 629 raise Exception('Run the gamma method first.') + 630 if np.isclose(0.0, self._dvalue, atol=1e-15): + 631 raise Exception('Error is 0.0') + 632 labels = self.e_names + 633 sizes = [self.e_dvalue[name] ** 2 for name in labels] / self._dvalue ** 2 + 634 fig1, ax1 = plt.subplots() + 635 ax1.pie(sizes, labels=labels, startangle=90, normalize=True) + 636 ax1.axis('equal') + 637 plt.draw() + 638 if save: + 639 fig1.savefig(save) + 640 + 641 return dict(zip(self.e_names, sizes)) 642 - 643 Parameters - 644 ---------- - 645 filename : str - 646 name of the file to be saved. - 647 datatype : str - 648 Format of the exported file. Supported formats include - 649 "json.gz" and "pickle" - 650 description : str - 651 Description for output file, only relevant for json.gz format. - 652 path : str - 653 specifies a custom path for the file (default '.') - 654 """ - 655 if 'path' in kwargs: - 656 file_name = kwargs.get('path') + '/' + filename - 657 else: - 658 file_name = filename - 659 - 660 if datatype == "json.gz": - 661 from .input.json import dump_to_json - 662 dump_to_json([self], file_name, description=description) - 663 elif datatype == "pickle": - 664 with open(file_name + '.p', 'wb') as fb: - 665 pickle.dump(self, fb) - 666 else: - 667 raise Exception("Unknown datatype " + str(datatype)) - 668 - 669 def export_jackknife(self): - 670 """Export jackknife samples from the Obs + 643 def dump(self, filename, datatype="json.gz", description="", **kwargs): + 644 """Dump the Obs to a file 'name' of chosen format. + 645 + 646 Parameters + 647 ---------- + 648 filename : str + 649 name of the file to be saved. + 650 datatype : str + 651 Format of the exported file. Supported formats include + 652 "json.gz" and "pickle" + 653 description : str + 654 Description for output file, only relevant for json.gz format. + 655 path : str + 656 specifies a custom path for the file (default '.') + 657 """ + 658 if 'path' in kwargs: + 659 file_name = kwargs.get('path') + '/' + filename + 660 else: + 661 file_name = filename + 662 + 663 if datatype == "json.gz": + 664 from .input.json import dump_to_json + 665 dump_to_json([self], file_name, description=description) + 666 elif datatype == "pickle": + 667 with open(file_name + '.p', 'wb') as fb: + 668 pickle.dump(self, fb) + 669 else: + 670 raise Exception("Unknown datatype " + str(datatype)) 671 - 672 Returns - 673 ------- - 674 numpy.ndarray - 675 Returns a numpy array of length N + 1 where N is the number of samples - 676 for the given ensemble and replicum. The zeroth entry of the array contains - 677 the mean value of the Obs, entries 1 to N contain the N jackknife samples - 678 derived from the Obs. The current implementation only works for observables - 679 defined on exactly one ensemble and replicum. The derived jackknife samples - 680 should agree with samples from a full jackknife analysis up to O(1/N). - 681 """ - 682 - 683 if len(self.names) != 1: - 684 raise Exception("'export_jackknife' is only implemented for Obs defined on one ensemble and replicum.") + 672 def export_jackknife(self): + 673 """Export jackknife samples from the Obs + 674 + 675 Returns + 676 ------- + 677 numpy.ndarray + 678 Returns a numpy array of length N + 1 where N is the number of samples + 679 for the given ensemble and replicum. The zeroth entry of the array contains + 680 the mean value of the Obs, entries 1 to N contain the N jackknife samples + 681 derived from the Obs. The current implementation only works for observables + 682 defined on exactly one ensemble and replicum. The derived jackknife samples + 683 should agree with samples from a full jackknife analysis up to O(1/N). + 684 """ 685 - 686 name = self.names[0] - 687 full_data = self.deltas[name] + self.r_values[name] - 688 n = full_data.size - 689 mean = self.value - 690 tmp_jacks = np.zeros(n + 1) - 691 tmp_jacks[0] = mean - 692 tmp_jacks[1:] = (n * mean - full_data) / (n - 1) - 693 return tmp_jacks - 694 - 695 def __float__(self): - 696 return float(self.value) + 686 if len(self.names) != 1: + 687 raise Exception("'export_jackknife' is only implemented for Obs defined on one ensemble and replicum.") + 688 + 689 name = self.names[0] + 690 full_data = self.deltas[name] + self.r_values[name] + 691 n = full_data.size + 692 mean = self.value + 693 tmp_jacks = np.zeros(n + 1) + 694 tmp_jacks[0] = mean + 695 tmp_jacks[1:] = (n * mean - full_data) / (n - 1) + 696 return tmp_jacks 697 - 698 def __repr__(self): - 699 return 'Obs[' + str(self) + ']' + 698 def __float__(self): + 699 return float(self.value) 700 - 701 def __str__(self): - 702 return _format_uncertainty(self.value, self._dvalue) + 701 def __repr__(self): + 702 return 'Obs[' + str(self) + ']' 703 - 704 def __hash__(self): - 705 hash_tuple = (np.array([self.value]).astype(np.float32).data.tobytes(),) - 706 hash_tuple += tuple([o.astype(np.float32).data.tobytes() for o in self.deltas.values()]) - 707 hash_tuple += tuple([np.array([o.errsq()]).astype(np.float32).data.tobytes() for o in self.covobs.values()]) - 708 hash_tuple += tuple([o.encode() for o in self.names]) - 709 m = hashlib.md5() - 710 [m.update(o) for o in hash_tuple] - 711 return int(m.hexdigest(), 16) & 0xFFFFFFFF - 712 - 713 # Overload comparisons - 714 def __lt__(self, other): - 715 return self.value < other - 716 - 717 def __le__(self, other): - 718 return self.value <= other + 704 def __str__(self): + 705 return _format_uncertainty(self.value, self._dvalue) + 706 + 707 def __hash__(self): + 708 hash_tuple = (np.array([self.value]).astype(np.float32).data.tobytes(),) + 709 hash_tuple += tuple([o.astype(np.float32).data.tobytes() for o in self.deltas.values()]) + 710 hash_tuple += tuple([np.array([o.errsq()]).astype(np.float32).data.tobytes() for o in self.covobs.values()]) + 711 hash_tuple += tuple([o.encode() for o in self.names]) + 712 m = hashlib.md5() + 713 [m.update(o) for o in hash_tuple] + 714 return int(m.hexdigest(), 16) & 0xFFFFFFFF + 715 + 716 # Overload comparisons + 717 def __lt__(self, other): + 718 return self.value < other 719 - 720 def __gt__(self, other): - 721 return self.value > other + 720 def __le__(self, other): + 721 return self.value <= other 722 - 723 def __ge__(self, other): - 724 return self.value >= other + 723 def __gt__(self, other): + 724 return self.value > other 725 - 726 def __eq__(self, other): - 727 return (self - other).is_zero() + 726 def __ge__(self, other): + 727 return self.value >= other 728 - 729 def __ne__(self, other): - 730 return not (self - other).is_zero() + 729 def __eq__(self, other): + 730 return (self - other).is_zero() 731 - 732 # Overload math operations - 733 def __add__(self, y): - 734 if isinstance(y, Obs): - 735 return derived_observable(lambda x, **kwargs: x[0] + x[1], [self, y], man_grad=[1, 1]) - 736 else: - 737 if isinstance(y, np.ndarray): - 738 return np.array([self + o for o in y]) - 739 elif y.__class__.__name__ in ['Corr', 'CObs']: - 740 return NotImplemented - 741 else: - 742 return derived_observable(lambda x, **kwargs: x[0] + y, [self], man_grad=[1]) - 743 - 744 def __radd__(self, y): - 745 return self + y + 732 def __ne__(self, other): + 733 return not (self - other).is_zero() + 734 + 735 # Overload math operations + 736 def __add__(self, y): + 737 if isinstance(y, Obs): + 738 return derived_observable(lambda x, **kwargs: x[0] + x[1], [self, y], man_grad=[1, 1]) + 739 else: + 740 if isinstance(y, np.ndarray): + 741 return np.array([self + o for o in y]) + 742 elif y.__class__.__name__ in ['Corr', 'CObs']: + 743 return NotImplemented + 744 else: + 745 return derived_observable(lambda x, **kwargs: x[0] + y, [self], man_grad=[1]) 746 - 747 def __mul__(self, y): - 748 if isinstance(y, Obs): - 749 return derived_observable(lambda x, **kwargs: x[0] * x[1], [self, y], man_grad=[y.value, self.value]) - 750 else: - 751 if isinstance(y, np.ndarray): - 752 return np.array([self * o for o in y]) - 753 elif isinstance(y, complex): - 754 return CObs(self * y.real, self * y.imag) - 755 elif y.__class__.__name__ in ['Corr', 'CObs']: - 756 return NotImplemented - 757 else: - 758 return derived_observable(lambda x, **kwargs: x[0] * y, [self], man_grad=[y]) - 759 - 760 def __rmul__(self, y): - 761 return self * y + 747 def __radd__(self, y): + 748 return self + y + 749 + 750 def __mul__(self, y): + 751 if isinstance(y, Obs): + 752 return derived_observable(lambda x, **kwargs: x[0] * x[1], [self, y], man_grad=[y.value, self.value]) + 753 else: + 754 if isinstance(y, np.ndarray): + 755 return np.array([self * o for o in y]) + 756 elif isinstance(y, complex): + 757 return CObs(self * y.real, self * y.imag) + 758 elif y.__class__.__name__ in ['Corr', 'CObs']: + 759 return NotImplemented + 760 else: + 761 return derived_observable(lambda x, **kwargs: x[0] * y, [self], man_grad=[y]) 762 - 763 def __sub__(self, y): - 764 if isinstance(y, Obs): - 765 return derived_observable(lambda x, **kwargs: x[0] - x[1], [self, y], man_grad=[1, -1]) - 766 else: - 767 if isinstance(y, np.ndarray): - 768 return np.array([self - o for o in y]) - 769 elif y.__class__.__name__ in ['Corr', 'CObs']: - 770 return NotImplemented - 771 else: - 772 return derived_observable(lambda x, **kwargs: x[0] - y, [self], man_grad=[1]) - 773 - 774 def __rsub__(self, y): - 775 return -1 * (self - y) + 763 def __rmul__(self, y): + 764 return self * y + 765 + 766 def __sub__(self, y): + 767 if isinstance(y, Obs): + 768 return derived_observable(lambda x, **kwargs: x[0] - x[1], [self, y], man_grad=[1, -1]) + 769 else: + 770 if isinstance(y, np.ndarray): + 771 return np.array([self - o for o in y]) + 772 elif y.__class__.__name__ in ['Corr', 'CObs']: + 773 return NotImplemented + 774 else: + 775 return derived_observable(lambda x, **kwargs: x[0] - y, [self], man_grad=[1]) 776 - 777 def __pos__(self): - 778 return self + 777 def __rsub__(self, y): + 778 return -1 * (self - y) 779 - 780 def __neg__(self): - 781 return -1 * self + 780 def __pos__(self): + 781 return self 782 - 783 def __truediv__(self, y): - 784 if isinstance(y, Obs): - 785 return derived_observable(lambda x, **kwargs: x[0] / x[1], [self, y], man_grad=[1 / y.value, - self.value / y.value ** 2]) - 786 else: - 787 if isinstance(y, np.ndarray): - 788 return np.array([self / o for o in y]) - 789 elif y.__class__.__name__ in ['Corr', 'CObs']: - 790 return NotImplemented - 791 else: - 792 return derived_observable(lambda x, **kwargs: x[0] / y, [self], man_grad=[1 / y]) - 793 - 794 def __rtruediv__(self, y): - 795 if isinstance(y, Obs): - 796 return derived_observable(lambda x, **kwargs: x[0] / x[1], [y, self], man_grad=[1 / self.value, - y.value / self.value ** 2]) - 797 else: - 798 if isinstance(y, np.ndarray): - 799 return np.array([o / self for o in y]) - 800 elif y.__class__.__name__ in ['Corr', 'CObs']: - 801 return NotImplemented - 802 else: - 803 return derived_observable(lambda x, **kwargs: y / x[0], [self], man_grad=[-y / self.value ** 2]) - 804 - 805 def __pow__(self, y): - 806 if isinstance(y, Obs): - 807 return derived_observable(lambda x: x[0] ** x[1], [self, y]) - 808 else: - 809 return derived_observable(lambda x: x[0] ** y, [self]) - 810 - 811 def __rpow__(self, y): - 812 if isinstance(y, Obs): - 813 return derived_observable(lambda x: x[0] ** x[1], [y, self]) - 814 else: - 815 return derived_observable(lambda x: y ** x[0], [self]) - 816 - 817 def __abs__(self): - 818 return derived_observable(lambda x: anp.abs(x[0]), [self]) + 783 def __neg__(self): + 784 return -1 * self + 785 + 786 def __truediv__(self, y): + 787 if isinstance(y, Obs): + 788 return derived_observable(lambda x, **kwargs: x[0] / x[1], [self, y], man_grad=[1 / y.value, - self.value / y.value ** 2]) + 789 else: + 790 if isinstance(y, np.ndarray): + 791 return np.array([self / o for o in y]) + 792 elif y.__class__.__name__ in ['Corr', 'CObs']: + 793 return NotImplemented + 794 else: + 795 return derived_observable(lambda x, **kwargs: x[0] / y, [self], man_grad=[1 / y]) + 796 + 797 def __rtruediv__(self, y): + 798 if isinstance(y, Obs): + 799 return derived_observable(lambda x, **kwargs: x[0] / x[1], [y, self], man_grad=[1 / self.value, - y.value / self.value ** 2]) + 800 else: + 801 if isinstance(y, np.ndarray): + 802 return np.array([o / self for o in y]) + 803 elif y.__class__.__name__ in ['Corr', 'CObs']: + 804 return NotImplemented + 805 else: + 806 return derived_observable(lambda x, **kwargs: y / x[0], [self], man_grad=[-y / self.value ** 2]) + 807 + 808 def __pow__(self, y): + 809 if isinstance(y, Obs): + 810 return derived_observable(lambda x: x[0] ** x[1], [self, y]) + 811 else: + 812 return derived_observable(lambda x: x[0] ** y, [self]) + 813 + 814 def __rpow__(self, y): + 815 if isinstance(y, Obs): + 816 return derived_observable(lambda x: x[0] ** x[1], [y, self]) + 817 else: + 818 return derived_observable(lambda x: y ** x[0], [self]) 819 - 820 # Overload numpy functions - 821 def sqrt(self): - 822 return derived_observable(lambda x, **kwargs: np.sqrt(x[0]), [self], man_grad=[1 / 2 / np.sqrt(self.value)]) - 823 - 824 def log(self): - 825 return derived_observable(lambda x, **kwargs: np.log(x[0]), [self], man_grad=[1 / self.value]) + 820 def __abs__(self): + 821 return derived_observable(lambda x: anp.abs(x[0]), [self]) + 822 + 823 # Overload numpy functions + 824 def sqrt(self): + 825 return derived_observable(lambda x, **kwargs: np.sqrt(x[0]), [self], man_grad=[1 / 2 / np.sqrt(self.value)]) 826 - 827 def exp(self): - 828 return derived_observable(lambda x, **kwargs: np.exp(x[0]), [self], man_grad=[np.exp(self.value)]) + 827 def log(self): + 828 return derived_observable(lambda x, **kwargs: np.log(x[0]), [self], man_grad=[1 / self.value]) 829 - 830 def sin(self): - 831 return derived_observable(lambda x, **kwargs: np.sin(x[0]), [self], man_grad=[np.cos(self.value)]) + 830 def exp(self): + 831 return derived_observable(lambda x, **kwargs: np.exp(x[0]), [self], man_grad=[np.exp(self.value)]) 832 - 833 def cos(self): - 834 return derived_observable(lambda x, **kwargs: np.cos(x[0]), [self], man_grad=[-np.sin(self.value)]) + 833 def sin(self): + 834 return derived_observable(lambda x, **kwargs: np.sin(x[0]), [self], man_grad=[np.cos(self.value)]) 835 - 836 def tan(self): - 837 return derived_observable(lambda x, **kwargs: np.tan(x[0]), [self], man_grad=[1 / np.cos(self.value) ** 2]) + 836 def cos(self): + 837 return derived_observable(lambda x, **kwargs: np.cos(x[0]), [self], man_grad=[-np.sin(self.value)]) 838 - 839 def arcsin(self): - 840 return derived_observable(lambda x: anp.arcsin(x[0]), [self]) + 839 def tan(self): + 840 return derived_observable(lambda x, **kwargs: np.tan(x[0]), [self], man_grad=[1 / np.cos(self.value) ** 2]) 841 - 842 def arccos(self): - 843 return derived_observable(lambda x: anp.arccos(x[0]), [self]) + 842 def arcsin(self): + 843 return derived_observable(lambda x: anp.arcsin(x[0]), [self]) 844 - 845 def arctan(self): - 846 return derived_observable(lambda x: anp.arctan(x[0]), [self]) + 845 def arccos(self): + 846 return derived_observable(lambda x: anp.arccos(x[0]), [self]) 847 - 848 def sinh(self): - 849 return derived_observable(lambda x, **kwargs: np.sinh(x[0]), [self], man_grad=[np.cosh(self.value)]) + 848 def arctan(self): + 849 return derived_observable(lambda x: anp.arctan(x[0]), [self]) 850 - 851 def cosh(self): - 852 return derived_observable(lambda x, **kwargs: np.cosh(x[0]), [self], man_grad=[np.sinh(self.value)]) + 851 def sinh(self): + 852 return derived_observable(lambda x, **kwargs: np.sinh(x[0]), [self], man_grad=[np.cosh(self.value)]) 853 - 854 def tanh(self): - 855 return derived_observable(lambda x, **kwargs: np.tanh(x[0]), [self], man_grad=[1 / np.cosh(self.value) ** 2]) + 854 def cosh(self): + 855 return derived_observable(lambda x, **kwargs: np.cosh(x[0]), [self], man_grad=[np.sinh(self.value)]) 856 - 857 def arcsinh(self): - 858 return derived_observable(lambda x: anp.arcsinh(x[0]), [self]) + 857 def tanh(self): + 858 return derived_observable(lambda x, **kwargs: np.tanh(x[0]), [self], man_grad=[1 / np.cosh(self.value) ** 2]) 859 - 860 def arccosh(self): - 861 return derived_observable(lambda x: anp.arccosh(x[0]), [self]) + 860 def arcsinh(self): + 861 return derived_observable(lambda x: anp.arcsinh(x[0]), [self]) 862 - 863 def arctanh(self): - 864 return derived_observable(lambda x: anp.arctanh(x[0]), [self]) + 863 def arccosh(self): + 864 return derived_observable(lambda x: anp.arccosh(x[0]), [self]) 865 - 866 - 867class CObs: - 868 """Class for a complex valued observable.""" - 869 __slots__ = ['_real', '_imag', 'tag'] - 870 - 871 def __init__(self, real, imag=0.0): - 872 self._real = real - 873 self._imag = imag - 874 self.tag = None - 875 - 876 @property - 877 def real(self): - 878 return self._real - 879 - 880 @property - 881 def imag(self): - 882 return self._imag - 883 - 884 def gamma_method(self, **kwargs): - 885 """Executes the gamma_method for the real and the imaginary part.""" - 886 if isinstance(self.real, Obs): - 887 self.real.gamma_method(**kwargs) - 888 if isinstance(self.imag, Obs): - 889 self.imag.gamma_method(**kwargs) - 890 - 891 def is_zero(self): - 892 """Checks whether both real and imaginary part are zero within machine precision.""" - 893 return self.real == 0.0 and self.imag == 0.0 - 894 - 895 def conjugate(self): - 896 return CObs(self.real, -self.imag) + 866 def arctanh(self): + 867 return derived_observable(lambda x: anp.arctanh(x[0]), [self]) + 868 + 869 + 870class CObs: + 871 """Class for a complex valued observable.""" + 872 __slots__ = ['_real', '_imag', 'tag'] + 873 + 874 def __init__(self, real, imag=0.0): + 875 self._real = real + 876 self._imag = imag + 877 self.tag = None + 878 + 879 @property + 880 def real(self): + 881 return self._real + 882 + 883 @property + 884 def imag(self): + 885 return self._imag + 886 + 887 def gamma_method(self, **kwargs): + 888 """Executes the gamma_method for the real and the imaginary part.""" + 889 if isinstance(self.real, Obs): + 890 self.real.gamma_method(**kwargs) + 891 if isinstance(self.imag, Obs): + 892 self.imag.gamma_method(**kwargs) + 893 + 894 def is_zero(self): + 895 """Checks whether both real and imaginary part are zero within machine precision.""" + 896 return self.real == 0.0 and self.imag == 0.0 897 - 898 def __add__(self, other): - 899 if isinstance(other, np.ndarray): - 900 return other + self - 901 elif hasattr(other, 'real') and hasattr(other, 'imag'): - 902 return CObs(self.real + other.real, - 903 self.imag + other.imag) - 904 else: - 905 return CObs(self.real + other, self.imag) - 906 - 907 def __radd__(self, y): - 908 return self + y + 898 def conjugate(self): + 899 return CObs(self.real, -self.imag) + 900 + 901 def __add__(self, other): + 902 if isinstance(other, np.ndarray): + 903 return other + self + 904 elif hasattr(other, 'real') and hasattr(other, 'imag'): + 905 return CObs(self.real + other.real, + 906 self.imag + other.imag) + 907 else: + 908 return CObs(self.real + other, self.imag) 909 - 910 def __sub__(self, other): - 911 if isinstance(other, np.ndarray): - 912 return -1 * (other - self) - 913 elif hasattr(other, 'real') and hasattr(other, 'imag'): - 914 return CObs(self.real - other.real, self.imag - other.imag) - 915 else: - 916 return CObs(self.real - other, self.imag) - 917 - 918 def __rsub__(self, other): - 919 return -1 * (self - other) + 910 def __radd__(self, y): + 911 return self + y + 912 + 913 def __sub__(self, other): + 914 if isinstance(other, np.ndarray): + 915 return -1 * (other - self) + 916 elif hasattr(other, 'real') and hasattr(other, 'imag'): + 917 return CObs(self.real - other.real, self.imag - other.imag) + 918 else: + 919 return CObs(self.real - other, self.imag) 920 - 921 def __mul__(self, other): - 922 if isinstance(other, np.ndarray): - 923 return other * self - 924 elif hasattr(other, 'real') and hasattr(other, 'imag'): - 925 if all(isinstance(i, Obs) for i in [self.real, self.imag, other.real, other.imag]): - 926 return CObs(derived_observable(lambda x, **kwargs: x[0] * x[1] - x[2] * x[3], - 927 [self.real, other.real, self.imag, other.imag], - 928 man_grad=[other.real.value, self.real.value, -other.imag.value, -self.imag.value]), - 929 derived_observable(lambda x, **kwargs: x[2] * x[1] + x[0] * x[3], + 921 def __rsub__(self, other): + 922 return -1 * (self - other) + 923 + 924 def __mul__(self, other): + 925 if isinstance(other, np.ndarray): + 926 return other * self + 927 elif hasattr(other, 'real') and hasattr(other, 'imag'): + 928 if all(isinstance(i, Obs) for i in [self.real, self.imag, other.real, other.imag]): + 929 return CObs(derived_observable(lambda x, **kwargs: x[0] * x[1] - x[2] * x[3], 930 [self.real, other.real, self.imag, other.imag], - 931 man_grad=[other.imag.value, self.imag.value, other.real.value, self.real.value])) - 932 elif getattr(other, 'imag', 0) != 0: - 933 return CObs(self.real * other.real - self.imag * other.imag, - 934 self.imag * other.real + self.real * other.imag) - 935 else: - 936 return CObs(self.real * other.real, self.imag * other.real) - 937 else: - 938 return CObs(self.real * other, self.imag * other) - 939 - 940 def __rmul__(self, other): - 941 return self * other + 931 man_grad=[other.real.value, self.real.value, -other.imag.value, -self.imag.value]), + 932 derived_observable(lambda x, **kwargs: x[2] * x[1] + x[0] * x[3], + 933 [self.real, other.real, self.imag, other.imag], + 934 man_grad=[other.imag.value, self.imag.value, other.real.value, self.real.value])) + 935 elif getattr(other, 'imag', 0) != 0: + 936 return CObs(self.real * other.real - self.imag * other.imag, + 937 self.imag * other.real + self.real * other.imag) + 938 else: + 939 return CObs(self.real * other.real, self.imag * other.real) + 940 else: + 941 return CObs(self.real * other, self.imag * other) 942 - 943 def __truediv__(self, other): - 944 if isinstance(other, np.ndarray): - 945 return 1 / (other / self) - 946 elif hasattr(other, 'real') and hasattr(other, 'imag'): - 947 r = other.real ** 2 + other.imag ** 2 - 948 return CObs((self.real * other.real + self.imag * other.imag) / r, (self.imag * other.real - self.real * other.imag) / r) - 949 else: - 950 return CObs(self.real / other, self.imag / other) - 951 - 952 def __rtruediv__(self, other): - 953 r = self.real ** 2 + self.imag ** 2 - 954 if hasattr(other, 'real') and hasattr(other, 'imag'): - 955 return CObs((self.real * other.real + self.imag * other.imag) / r, (self.real * other.imag - self.imag * other.real) / r) - 956 else: - 957 return CObs(self.real * other / r, -self.imag * other / r) - 958 - 959 def __abs__(self): - 960 return np.sqrt(self.real**2 + self.imag**2) + 943 def __rmul__(self, other): + 944 return self * other + 945 + 946 def __truediv__(self, other): + 947 if isinstance(other, np.ndarray): + 948 return 1 / (other / self) + 949 elif hasattr(other, 'real') and hasattr(other, 'imag'): + 950 r = other.real ** 2 + other.imag ** 2 + 951 return CObs((self.real * other.real + self.imag * other.imag) / r, (self.imag * other.real - self.real * other.imag) / r) + 952 else: + 953 return CObs(self.real / other, self.imag / other) + 954 + 955 def __rtruediv__(self, other): + 956 r = self.real ** 2 + self.imag ** 2 + 957 if hasattr(other, 'real') and hasattr(other, 'imag'): + 958 return CObs((self.real * other.real + self.imag * other.imag) / r, (self.real * other.imag - self.imag * other.real) / r) + 959 else: + 960 return CObs(self.real * other / r, -self.imag * other / r) 961 - 962 def __pos__(self): - 963 return self + 962 def __abs__(self): + 963 return np.sqrt(self.real**2 + self.imag**2) 964 - 965 def __neg__(self): - 966 return -1 * self + 965 def __pos__(self): + 966 return self 967 - 968 def __eq__(self, other): - 969 return self.real == other.real and self.imag == other.imag + 968 def __neg__(self): + 969 return -1 * self 970 - 971 def __str__(self): - 972 return '(' + str(self.real) + int(self.imag >= 0.0) * '+' + str(self.imag) + 'j)' + 971 def __eq__(self, other): + 972 return self.real == other.real and self.imag == other.imag 973 - 974 def __repr__(self): - 975 return 'CObs[' + str(self) + ']' + 974 def __str__(self): + 975 return '(' + str(self.real) + int(self.imag >= 0.0) * '+' + str(self.imag) + 'j)' 976 - 977 - 978def _format_uncertainty(value, dvalue): - 979 """Creates a string of a value and its error in paranthesis notation, e.g., 13.02(45)""" - 980 if dvalue == 0.0: - 981 return str(value) - 982 fexp = np.floor(np.log10(dvalue)) - 983 if fexp < 0.0: - 984 return '{:{form}}({:2.0f})'.format(value, dvalue * 10 ** (-fexp + 1), form='.' + str(-int(fexp) + 1) + 'f') - 985 elif fexp == 0.0: - 986 return '{:.1f}({:1.1f})'.format(value, dvalue) - 987 else: - 988 return '{:.0f}({:2.0f})'.format(value, dvalue) - 989 - 990 - 991def _expand_deltas(deltas, idx, shape): - 992 """Expand deltas defined on idx to a regular, contiguous range, where holes are filled by 0. - 993 If idx is of type range, the deltas are not changed - 994 - 995 Parameters - 996 ---------- - 997 deltas : list - 998 List of fluctuations - 999 idx : list -1000 List or range of configs on which the deltas are defined, has to be sorted in ascending order. -1001 shape : int -1002 Number of configs in idx. -1003 """ -1004 if isinstance(idx, range): -1005 return deltas -1006 else: -1007 ret = np.zeros(idx[-1] - idx[0] + 1) -1008 for i in range(shape): -1009 ret[idx[i] - idx[0]] = deltas[i] -1010 return ret -1011 -1012 -1013def _merge_idx(idl): -1014 """Returns the union of all lists in idl as sorted list + 977 def __repr__(self): + 978 return 'CObs[' + str(self) + ']' + 979 + 980 + 981def _format_uncertainty(value, dvalue): + 982 """Creates a string of a value and its error in paranthesis notation, e.g., 13.02(45)""" + 983 if dvalue == 0.0: + 984 return str(value) + 985 fexp = np.floor(np.log10(dvalue)) + 986 if fexp < 0.0: + 987 return '{:{form}}({:2.0f})'.format(value, dvalue * 10 ** (-fexp + 1), form='.' + str(-int(fexp) + 1) + 'f') + 988 elif fexp == 0.0: + 989 return '{:.1f}({:1.1f})'.format(value, dvalue) + 990 else: + 991 return '{:.0f}({:2.0f})'.format(value, dvalue) + 992 + 993 + 994def _expand_deltas(deltas, idx, shape): + 995 """Expand deltas defined on idx to a regular, contiguous range, where holes are filled by 0. + 996 If idx is of type range, the deltas are not changed + 997 + 998 Parameters + 999 ---------- +1000 deltas : list +1001 List of fluctuations +1002 idx : list +1003 List or range of configs on which the deltas are defined, has to be sorted in ascending order. +1004 shape : int +1005 Number of configs in idx. +1006 """ +1007 if isinstance(idx, range): +1008 return deltas +1009 else: +1010 ret = np.zeros(idx[-1] - idx[0] + 1) +1011 for i in range(shape): +1012 ret[idx[i] - idx[0]] = deltas[i] +1013 return ret +1014 1015 -1016 Parameters -1017 ---------- -1018 idl : list -1019 List of lists or ranges. -1020 """ -1021 -1022 # Use groupby to efficiently check whether all elements of idl are identical -1023 try: -1024 g = groupby(idl) -1025 if next(g, True) and not next(g, False): -1026 return idl[0] -1027 except Exception: -1028 pass -1029 -1030 if np.all([type(idx) is range for idx in idl]): -1031 if len(set([idx[0] for idx in idl])) == 1: -1032 idstart = min([idx.start for idx in idl]) -1033 idstop = max([idx.stop for idx in idl]) -1034 idstep = min([idx.step for idx in idl]) -1035 return range(idstart, idstop, idstep) -1036 -1037 return sorted(set().union(*idl)) -1038 +1016def _merge_idx(idl): +1017 """Returns the union of all lists in idl as sorted list +1018 +1019 Parameters +1020 ---------- +1021 idl : list +1022 List of lists or ranges. +1023 """ +1024 +1025 # Use groupby to efficiently check whether all elements of idl are identical +1026 try: +1027 g = groupby(idl) +1028 if next(g, True) and not next(g, False): +1029 return idl[0] +1030 except Exception: +1031 pass +1032 +1033 if np.all([type(idx) is range for idx in idl]): +1034 if len(set([idx[0] for idx in idl])) == 1: +1035 idstart = min([idx.start for idx in idl]) +1036 idstop = max([idx.stop for idx in idl]) +1037 idstep = min([idx.step for idx in idl]) +1038 return range(idstart, idstop, idstep) 1039 -1040def _intersection_idx(idl): -1041 """Returns the intersection of all lists in idl as sorted list +1040 return sorted(set().union(*idl)) +1041 1042 -1043 Parameters -1044 ---------- -1045 idl : list -1046 List of lists or ranges. -1047 """ -1048 -1049 def _lcm(*args): -1050 """Returns the lowest common multiple of args. +1043def _intersection_idx(idl): +1044 """Returns the intersection of all lists in idl as sorted list +1045 +1046 Parameters +1047 ---------- +1048 idl : list +1049 List of lists or ranges. +1050 """ 1051 -1052 From python 3.9 onwards the math library contains an lcm function.""" -1053 return reduce(lambda a, b: a * b // gcd(a, b), args) +1052 def _lcm(*args): +1053 """Returns the lowest common multiple of args. 1054 -1055 # Use groupby to efficiently check whether all elements of idl are identical -1056 try: -1057 g = groupby(idl) -1058 if next(g, True) and not next(g, False): -1059 return idl[0] -1060 except Exception: -1061 pass -1062 -1063 if np.all([type(idx) is range for idx in idl]): -1064 if len(set([idx[0] for idx in idl])) == 1: -1065 idstart = max([idx.start for idx in idl]) -1066 idstop = min([idx.stop for idx in idl]) -1067 idstep = _lcm(*[idx.step for idx in idl]) -1068 return range(idstart, idstop, idstep) -1069 -1070 return sorted(set.intersection(*[set(o) for o in idl])) -1071 +1055 From python 3.9 onwards the math library contains an lcm function.""" +1056 return reduce(lambda a, b: a * b // gcd(a, b), args) +1057 +1058 # Use groupby to efficiently check whether all elements of idl are identical +1059 try: +1060 g = groupby(idl) +1061 if next(g, True) and not next(g, False): +1062 return idl[0] +1063 except Exception: +1064 pass +1065 +1066 if np.all([type(idx) is range for idx in idl]): +1067 if len(set([idx[0] for idx in idl])) == 1: +1068 idstart = max([idx.start for idx in idl]) +1069 idstop = min([idx.stop for idx in idl]) +1070 idstep = _lcm(*[idx.step for idx in idl]) +1071 return range(idstart, idstop, idstep) 1072 -1073def _expand_deltas_for_merge(deltas, idx, shape, new_idx): -1074 """Expand deltas defined on idx to the list of configs that is defined by new_idx. -1075 New, empty entries are filled by 0. If idx and new_idx are of type range, the smallest -1076 common divisor of the step sizes is used as new step size. -1077 -1078 Parameters -1079 ---------- -1080 deltas : list -1081 List of fluctuations -1082 idx : list -1083 List or range of configs on which the deltas are defined. -1084 Has to be a subset of new_idx and has to be sorted in ascending order. -1085 shape : list -1086 Number of configs in idx. -1087 new_idx : list -1088 List of configs that defines the new range, has to be sorted in ascending order. -1089 """ -1090 -1091 if type(idx) is range and type(new_idx) is range: -1092 if idx == new_idx: -1093 return deltas -1094 ret = np.zeros(new_idx[-1] - new_idx[0] + 1) -1095 for i in range(shape): -1096 ret[idx[i] - new_idx[0]] = deltas[i] -1097 return np.array([ret[new_idx[i] - new_idx[0]] for i in range(len(new_idx))]) -1098 -1099 -1100def _filter_zeroes(deltas, idx, eps=Obs.filter_eps): -1101 """Filter out all configurations with vanishing fluctuation such that they do not -1102 contribute to the error estimate anymore. Returns the new deltas and -1103 idx according to the filtering. -1104 A fluctuation is considered to be vanishing, if it is smaller than eps times -1105 the mean of the absolute values of all deltas in one list. -1106 -1107 Parameters -1108 ---------- -1109 deltas : list -1110 List of fluctuations -1111 idx : list -1112 List or ranges of configs on which the deltas are defined. -1113 eps : float -1114 Prefactor that enters the filter criterion. -1115 """ -1116 new_deltas = [] -1117 new_idx = [] -1118 maxd = np.mean(np.fabs(deltas)) -1119 for i in range(len(deltas)): -1120 if abs(deltas[i]) > eps * maxd: -1121 new_deltas.append(deltas[i]) -1122 new_idx.append(idx[i]) -1123 if new_idx: -1124 return np.array(new_deltas), new_idx -1125 else: -1126 return deltas, idx -1127 -1128 -1129def derived_observable(func, data, array_mode=False, **kwargs): -1130 """Construct a derived Obs according to func(data, **kwargs) using automatic differentiation. +1073 return sorted(set.intersection(*[set(o) for o in idl])) +1074 +1075 +1076def _expand_deltas_for_merge(deltas, idx, shape, new_idx): +1077 """Expand deltas defined on idx to the list of configs that is defined by new_idx. +1078 New, empty entries are filled by 0. If idx and new_idx are of type range, the smallest +1079 common divisor of the step sizes is used as new step size. +1080 +1081 Parameters +1082 ---------- +1083 deltas : list +1084 List of fluctuations +1085 idx : list +1086 List or range of configs on which the deltas are defined. +1087 Has to be a subset of new_idx and has to be sorted in ascending order. +1088 shape : list +1089 Number of configs in idx. +1090 new_idx : list +1091 List of configs that defines the new range, has to be sorted in ascending order. +1092 """ +1093 +1094 if type(idx) is range and type(new_idx) is range: +1095 if idx == new_idx: +1096 return deltas +1097 ret = np.zeros(new_idx[-1] - new_idx[0] + 1) +1098 for i in range(shape): +1099 ret[idx[i] - new_idx[0]] = deltas[i] +1100 return np.array([ret[new_idx[i] - new_idx[0]] for i in range(len(new_idx))]) +1101 +1102 +1103def _filter_zeroes(deltas, idx, eps=Obs.filter_eps): +1104 """Filter out all configurations with vanishing fluctuation such that they do not +1105 contribute to the error estimate anymore. Returns the new deltas and +1106 idx according to the filtering. +1107 A fluctuation is considered to be vanishing, if it is smaller than eps times +1108 the mean of the absolute values of all deltas in one list. +1109 +1110 Parameters +1111 ---------- +1112 deltas : list +1113 List of fluctuations +1114 idx : list +1115 List or ranges of configs on which the deltas are defined. +1116 eps : float +1117 Prefactor that enters the filter criterion. +1118 """ +1119 new_deltas = [] +1120 new_idx = [] +1121 maxd = np.mean(np.fabs(deltas)) +1122 for i in range(len(deltas)): +1123 if abs(deltas[i]) > eps * maxd: +1124 new_deltas.append(deltas[i]) +1125 new_idx.append(idx[i]) +1126 if new_idx: +1127 return np.array(new_deltas), new_idx +1128 else: +1129 return deltas, idx +1130 1131 -1132 Parameters -1133 ---------- -1134 func : object -1135 arbitrary function of the form func(data, **kwargs). For the -1136 automatic differentiation to work, all numpy functions have to have -1137 the autograd wrapper (use 'import autograd.numpy as anp'). -1138 data : list -1139 list of Obs, e.g. [obs1, obs2, obs3]. -1140 num_grad : bool -1141 if True, numerical derivatives are used instead of autograd -1142 (default False). To control the numerical differentiation the -1143 kwargs of numdifftools.step_generators.MaxStepGenerator -1144 can be used. -1145 man_grad : list -1146 manually supply a list or an array which contains the jacobian -1147 of func. Use cautiously, supplying the wrong derivative will -1148 not be intercepted. -1149 -1150 Notes -1151 ----- -1152 For simple mathematical operations it can be practical to use anonymous -1153 functions. For the ratio of two observables one can e.g. use -1154 -1155 new_obs = derived_observable(lambda x: x[0] / x[1], [obs1, obs2]) -1156 """ +1132def derived_observable(func, data, array_mode=False, **kwargs): +1133 """Construct a derived Obs according to func(data, **kwargs) using automatic differentiation. +1134 +1135 Parameters +1136 ---------- +1137 func : object +1138 arbitrary function of the form func(data, **kwargs). For the +1139 automatic differentiation to work, all numpy functions have to have +1140 the autograd wrapper (use 'import autograd.numpy as anp'). +1141 data : list +1142 list of Obs, e.g. [obs1, obs2, obs3]. +1143 num_grad : bool +1144 if True, numerical derivatives are used instead of autograd +1145 (default False). To control the numerical differentiation the +1146 kwargs of numdifftools.step_generators.MaxStepGenerator +1147 can be used. +1148 man_grad : list +1149 manually supply a list or an array which contains the jacobian +1150 of func. Use cautiously, supplying the wrong derivative will +1151 not be intercepted. +1152 +1153 Notes +1154 ----- +1155 For simple mathematical operations it can be practical to use anonymous +1156 functions. For the ratio of two observables one can e.g. use 1157 -1158 data = np.asarray(data) -1159 raveled_data = data.ravel() +1158 new_obs = derived_observable(lambda x: x[0] / x[1], [obs1, obs2]) +1159 """ 1160 -1161 # Workaround for matrix operations containing non Obs data -1162 if not all(isinstance(x, Obs) for x in raveled_data): -1163 for i in range(len(raveled_data)): -1164 if isinstance(raveled_data[i], (int, float)): -1165 raveled_data[i] = cov_Obs(raveled_data[i], 0.0, "###dummy_covobs###") -1166 -1167 allcov = {} -1168 for o in raveled_data: -1169 for name in o.cov_names: -1170 if name in allcov: -1171 if not np.allclose(allcov[name], o.covobs[name].cov): -1172 raise Exception('Inconsistent covariance matrices for %s!' % (name)) -1173 else: -1174 allcov[name] = o.covobs[name].cov -1175 -1176 n_obs = len(raveled_data) -1177 new_names = sorted(set([y for x in [o.names for o in raveled_data] for y in x])) -1178 new_cov_names = sorted(set([y for x in [o.cov_names for o in raveled_data] for y in x])) -1179 new_sample_names = sorted(set(new_names) - set(new_cov_names)) -1180 -1181 is_merged = {name: (len(list(filter(lambda o: o.is_merged.get(name, False) is True, raveled_data))) > 0) for name in new_sample_names} -1182 reweighted = len(list(filter(lambda o: o.reweighted is True, raveled_data))) > 0 +1161 data = np.asarray(data) +1162 raveled_data = data.ravel() +1163 +1164 # Workaround for matrix operations containing non Obs data +1165 if not all(isinstance(x, Obs) for x in raveled_data): +1166 for i in range(len(raveled_data)): +1167 if isinstance(raveled_data[i], (int, float)): +1168 raveled_data[i] = cov_Obs(raveled_data[i], 0.0, "###dummy_covobs###") +1169 +1170 allcov = {} +1171 for o in raveled_data: +1172 for name in o.cov_names: +1173 if name in allcov: +1174 if not np.allclose(allcov[name], o.covobs[name].cov): +1175 raise Exception('Inconsistent covariance matrices for %s!' % (name)) +1176 else: +1177 allcov[name] = o.covobs[name].cov +1178 +1179 n_obs = len(raveled_data) +1180 new_names = sorted(set([y for x in [o.names for o in raveled_data] for y in x])) +1181 new_cov_names = sorted(set([y for x in [o.cov_names for o in raveled_data] for y in x])) +1182 new_sample_names = sorted(set(new_names) - set(new_cov_names)) 1183 -1184 if data.ndim == 1: -1185 values = np.array([o.value for o in data]) -1186 else: -1187 values = np.vectorize(lambda x: x.value)(data) -1188 -1189 new_values = func(values, **kwargs) -1190 -1191 multi = int(isinstance(new_values, np.ndarray)) -1192 -1193 new_r_values = {} -1194 new_idl_d = {} -1195 for name in new_sample_names: -1196 idl = [] -1197 tmp_values = np.zeros(n_obs) -1198 for i, item in enumerate(raveled_data): -1199 tmp_values[i] = item.r_values.get(name, item.value) -1200 tmp_idl = item.idl.get(name) -1201 if tmp_idl is not None: -1202 idl.append(tmp_idl) -1203 if multi > 0: -1204 tmp_values = np.array(tmp_values).reshape(data.shape) -1205 new_r_values[name] = func(tmp_values, **kwargs) -1206 new_idl_d[name] = _merge_idx(idl) -1207 if not is_merged[name]: -1208 is_merged[name] = (1 != len(set([len(idx) for idx in [*idl, new_idl_d[name]]]))) -1209 -1210 if 'man_grad' in kwargs: -1211 deriv = np.asarray(kwargs.get('man_grad')) -1212 if new_values.shape + data.shape != deriv.shape: -1213 raise Exception('Manual derivative does not have correct shape.') -1214 elif kwargs.get('num_grad') is True: -1215 if multi > 0: -1216 raise Exception('Multi mode currently not supported for numerical derivative') -1217 options = { -1218 'base_step': 0.1, -1219 'step_ratio': 2.5} -1220 for key in options.keys(): -1221 kwarg = kwargs.get(key) -1222 if kwarg is not None: -1223 options[key] = kwarg -1224 tmp_df = nd.Gradient(func, order=4, **{k: v for k, v in options.items() if v is not None})(values, **kwargs) -1225 if tmp_df.size == 1: -1226 deriv = np.array([tmp_df.real]) -1227 else: -1228 deriv = tmp_df.real -1229 else: -1230 deriv = jacobian(func)(values, **kwargs) -1231 -1232 final_result = np.zeros(new_values.shape, dtype=object) -1233 -1234 if array_mode is True: -1235 -1236 class _Zero_grad(): -1237 def __init__(self, N): -1238 self.grad = np.zeros((N, 1)) -1239 -1240 new_covobs_lengths = dict(set([y for x in [[(n, o.covobs[n].N) for n in o.cov_names] for o in raveled_data] for y in x])) -1241 d_extracted = {} -1242 g_extracted = {} -1243 for name in new_sample_names: -1244 d_extracted[name] = [] -1245 ens_length = len(new_idl_d[name]) -1246 for i_dat, dat in enumerate(data): -1247 d_extracted[name].append(np.array([_expand_deltas_for_merge(o.deltas.get(name, np.zeros(ens_length)), o.idl.get(name, new_idl_d[name]), o.shape.get(name, ens_length), new_idl_d[name]) for o in dat.reshape(np.prod(dat.shape))]).reshape(dat.shape + (ens_length, ))) -1248 for name in new_cov_names: -1249 g_extracted[name] = [] -1250 zero_grad = _Zero_grad(new_covobs_lengths[name]) -1251 for i_dat, dat in enumerate(data): -1252 g_extracted[name].append(np.array([o.covobs.get(name, zero_grad).grad for o in dat.reshape(np.prod(dat.shape))]).reshape(dat.shape + (new_covobs_lengths[name], 1))) -1253 -1254 for i_val, new_val in np.ndenumerate(new_values): -1255 new_deltas = {} -1256 new_grad = {} -1257 if array_mode is True: -1258 for name in new_sample_names: -1259 ens_length = d_extracted[name][0].shape[-1] -1260 new_deltas[name] = np.zeros(ens_length) -1261 for i_dat, dat in enumerate(d_extracted[name]): -1262 new_deltas[name] += np.tensordot(deriv[i_val + (i_dat, )], dat) -1263 for name in new_cov_names: -1264 new_grad[name] = 0 -1265 for i_dat, dat in enumerate(g_extracted[name]): -1266 new_grad[name] += np.tensordot(deriv[i_val + (i_dat, )], dat) -1267 else: -1268 for j_obs, obs in np.ndenumerate(data): -1269 for name in obs.names: -1270 if name in obs.cov_names: -1271 new_grad[name] = new_grad.get(name, 0) + deriv[i_val + j_obs] * obs.covobs[name].grad -1272 else: -1273 new_deltas[name] = new_deltas.get(name, 0) + deriv[i_val + j_obs] * _expand_deltas_for_merge(obs.deltas[name], obs.idl[name], obs.shape[name], new_idl_d[name]) -1274 -1275 new_covobs = {name: Covobs(0, allcov[name], name, grad=new_grad[name]) for name in new_grad} -1276 -1277 if not set(new_covobs.keys()).isdisjoint(new_deltas.keys()): -1278 raise Exception('The same name has been used for deltas and covobs!') -1279 new_samples = [] -1280 new_means = [] -1281 new_idl = [] -1282 new_names_obs = [] -1283 for name in new_names: -1284 if name not in new_covobs: -1285 if is_merged[name]: -1286 filtered_deltas, filtered_idl_d = _filter_zeroes(new_deltas[name], new_idl_d[name]) -1287 else: -1288 filtered_deltas = new_deltas[name] -1289 filtered_idl_d = new_idl_d[name] -1290 -1291 new_samples.append(filtered_deltas) -1292 new_idl.append(filtered_idl_d) -1293 new_means.append(new_r_values[name][i_val]) -1294 new_names_obs.append(name) -1295 final_result[i_val] = Obs(new_samples, new_names_obs, means=new_means, idl=new_idl) -1296 for name in new_covobs: -1297 final_result[i_val].names.append(name) -1298 final_result[i_val]._covobs = new_covobs -1299 final_result[i_val]._value = new_val -1300 final_result[i_val].is_merged = is_merged -1301 final_result[i_val].reweighted = reweighted -1302 -1303 if multi == 0: -1304 final_result = final_result.item() +1184 is_merged = {name: (len(list(filter(lambda o: o.is_merged.get(name, False) is True, raveled_data))) > 0) for name in new_sample_names} +1185 reweighted = len(list(filter(lambda o: o.reweighted is True, raveled_data))) > 0 +1186 +1187 if data.ndim == 1: +1188 values = np.array([o.value for o in data]) +1189 else: +1190 values = np.vectorize(lambda x: x.value)(data) +1191 +1192 new_values = func(values, **kwargs) +1193 +1194 multi = int(isinstance(new_values, np.ndarray)) +1195 +1196 new_r_values = {} +1197 new_idl_d = {} +1198 for name in new_sample_names: +1199 idl = [] +1200 tmp_values = np.zeros(n_obs) +1201 for i, item in enumerate(raveled_data): +1202 tmp_values[i] = item.r_values.get(name, item.value) +1203 tmp_idl = item.idl.get(name) +1204 if tmp_idl is not None: +1205 idl.append(tmp_idl) +1206 if multi > 0: +1207 tmp_values = np.array(tmp_values).reshape(data.shape) +1208 new_r_values[name] = func(tmp_values, **kwargs) +1209 new_idl_d[name] = _merge_idx(idl) +1210 if not is_merged[name]: +1211 is_merged[name] = (1 != len(set([len(idx) for idx in [*idl, new_idl_d[name]]]))) +1212 +1213 if 'man_grad' in kwargs: +1214 deriv = np.asarray(kwargs.get('man_grad')) +1215 if new_values.shape + data.shape != deriv.shape: +1216 raise Exception('Manual derivative does not have correct shape.') +1217 elif kwargs.get('num_grad') is True: +1218 if multi > 0: +1219 raise Exception('Multi mode currently not supported for numerical derivative') +1220 options = { +1221 'base_step': 0.1, +1222 'step_ratio': 2.5} +1223 for key in options.keys(): +1224 kwarg = kwargs.get(key) +1225 if kwarg is not None: +1226 options[key] = kwarg +1227 tmp_df = nd.Gradient(func, order=4, **{k: v for k, v in options.items() if v is not None})(values, **kwargs) +1228 if tmp_df.size == 1: +1229 deriv = np.array([tmp_df.real]) +1230 else: +1231 deriv = tmp_df.real +1232 else: +1233 deriv = jacobian(func)(values, **kwargs) +1234 +1235 final_result = np.zeros(new_values.shape, dtype=object) +1236 +1237 if array_mode is True: +1238 +1239 class _Zero_grad(): +1240 def __init__(self, N): +1241 self.grad = np.zeros((N, 1)) +1242 +1243 new_covobs_lengths = dict(set([y for x in [[(n, o.covobs[n].N) for n in o.cov_names] for o in raveled_data] for y in x])) +1244 d_extracted = {} +1245 g_extracted = {} +1246 for name in new_sample_names: +1247 d_extracted[name] = [] +1248 ens_length = len(new_idl_d[name]) +1249 for i_dat, dat in enumerate(data): +1250 d_extracted[name].append(np.array([_expand_deltas_for_merge(o.deltas.get(name, np.zeros(ens_length)), o.idl.get(name, new_idl_d[name]), o.shape.get(name, ens_length), new_idl_d[name]) for o in dat.reshape(np.prod(dat.shape))]).reshape(dat.shape + (ens_length, ))) +1251 for name in new_cov_names: +1252 g_extracted[name] = [] +1253 zero_grad = _Zero_grad(new_covobs_lengths[name]) +1254 for i_dat, dat in enumerate(data): +1255 g_extracted[name].append(np.array([o.covobs.get(name, zero_grad).grad for o in dat.reshape(np.prod(dat.shape))]).reshape(dat.shape + (new_covobs_lengths[name], 1))) +1256 +1257 for i_val, new_val in np.ndenumerate(new_values): +1258 new_deltas = {} +1259 new_grad = {} +1260 if array_mode is True: +1261 for name in new_sample_names: +1262 ens_length = d_extracted[name][0].shape[-1] +1263 new_deltas[name] = np.zeros(ens_length) +1264 for i_dat, dat in enumerate(d_extracted[name]): +1265 new_deltas[name] += np.tensordot(deriv[i_val + (i_dat, )], dat) +1266 for name in new_cov_names: +1267 new_grad[name] = 0 +1268 for i_dat, dat in enumerate(g_extracted[name]): +1269 new_grad[name] += np.tensordot(deriv[i_val + (i_dat, )], dat) +1270 else: +1271 for j_obs, obs in np.ndenumerate(data): +1272 for name in obs.names: +1273 if name in obs.cov_names: +1274 new_grad[name] = new_grad.get(name, 0) + deriv[i_val + j_obs] * obs.covobs[name].grad +1275 else: +1276 new_deltas[name] = new_deltas.get(name, 0) + deriv[i_val + j_obs] * _expand_deltas_for_merge(obs.deltas[name], obs.idl[name], obs.shape[name], new_idl_d[name]) +1277 +1278 new_covobs = {name: Covobs(0, allcov[name], name, grad=new_grad[name]) for name in new_grad} +1279 +1280 if not set(new_covobs.keys()).isdisjoint(new_deltas.keys()): +1281 raise Exception('The same name has been used for deltas and covobs!') +1282 new_samples = [] +1283 new_means = [] +1284 new_idl = [] +1285 new_names_obs = [] +1286 for name in new_names: +1287 if name not in new_covobs: +1288 if is_merged[name]: +1289 filtered_deltas, filtered_idl_d = _filter_zeroes(new_deltas[name], new_idl_d[name]) +1290 else: +1291 filtered_deltas = new_deltas[name] +1292 filtered_idl_d = new_idl_d[name] +1293 +1294 new_samples.append(filtered_deltas) +1295 new_idl.append(filtered_idl_d) +1296 new_means.append(new_r_values[name][i_val]) +1297 new_names_obs.append(name) +1298 final_result[i_val] = Obs(new_samples, new_names_obs, means=new_means, idl=new_idl) +1299 for name in new_covobs: +1300 final_result[i_val].names.append(name) +1301 final_result[i_val]._covobs = new_covobs +1302 final_result[i_val]._value = new_val +1303 final_result[i_val].is_merged = is_merged +1304 final_result[i_val].reweighted = reweighted 1305 -1306 return final_result -1307 +1306 if multi == 0: +1307 final_result = final_result.item() 1308 -1309def _reduce_deltas(deltas, idx_old, idx_new): -1310 """Extract deltas defined on idx_old on all configs of idx_new. +1309 return final_result +1310 1311 -1312 Assumes, that idx_old and idx_new are correctly defined idl, i.e., they -1313 are ordered in an ascending order. +1312def _reduce_deltas(deltas, idx_old, idx_new): +1313 """Extract deltas defined on idx_old on all configs of idx_new. 1314 -1315 Parameters -1316 ---------- -1317 deltas : list -1318 List of fluctuations -1319 idx_old : list -1320 List or range of configs on which the deltas are defined -1321 idx_new : list -1322 List of configs for which we want to extract the deltas. -1323 Has to be a subset of idx_old. -1324 """ -1325 if not len(deltas) == len(idx_old): -1326 raise Exception('Length of deltas and idx_old have to be the same: %d != %d' % (len(deltas), len(idx_old))) -1327 if type(idx_old) is range and type(idx_new) is range: -1328 if idx_old == idx_new: -1329 return deltas -1330 # Use groupby to efficiently check whether all elements of idx_old and idx_new are identical -1331 try: -1332 g = groupby([idx_old, idx_new]) -1333 if next(g, True) and not next(g, False): -1334 return deltas -1335 except Exception: -1336 pass -1337 indices = np.intersect1d(idx_old, idx_new, assume_unique=True, return_indices=True)[1] -1338 if len(indices) < len(idx_new): -1339 raise Exception('Error in _reduce_deltas: Config of idx_new not in idx_old') -1340 return np.array(deltas)[indices] -1341 -1342 -1343def reweight(weight, obs, **kwargs): -1344 """Reweight a list of observables. +1315 Assumes, that idx_old and idx_new are correctly defined idl, i.e., they +1316 are ordered in an ascending order. +1317 +1318 Parameters +1319 ---------- +1320 deltas : list +1321 List of fluctuations +1322 idx_old : list +1323 List or range of configs on which the deltas are defined +1324 idx_new : list +1325 List of configs for which we want to extract the deltas. +1326 Has to be a subset of idx_old. +1327 """ +1328 if not len(deltas) == len(idx_old): +1329 raise Exception('Length of deltas and idx_old have to be the same: %d != %d' % (len(deltas), len(idx_old))) +1330 if type(idx_old) is range and type(idx_new) is range: +1331 if idx_old == idx_new: +1332 return deltas +1333 # Use groupby to efficiently check whether all elements of idx_old and idx_new are identical +1334 try: +1335 g = groupby([idx_old, idx_new]) +1336 if next(g, True) and not next(g, False): +1337 return deltas +1338 except Exception: +1339 pass +1340 indices = np.intersect1d(idx_old, idx_new, assume_unique=True, return_indices=True)[1] +1341 if len(indices) < len(idx_new): +1342 raise Exception('Error in _reduce_deltas: Config of idx_new not in idx_old') +1343 return np.array(deltas)[indices] +1344 1345 -1346 Parameters -1347 ---------- -1348 weight : Obs -1349 Reweighting factor. An Observable that has to be defined on a superset of the -1350 configurations in obs[i].idl for all i. -1351 obs : list -1352 list of Obs, e.g. [obs1, obs2, obs3]. -1353 all_configs : bool -1354 if True, the reweighted observables are normalized by the average of -1355 the reweighting factor on all configurations in weight.idl and not -1356 on the configurations in obs[i].idl. Default False. -1357 """ -1358 result = [] -1359 for i in range(len(obs)): -1360 if len(obs[i].cov_names): -1361 raise Exception('Error: Not possible to reweight an Obs that contains covobs!') -1362 if not set(obs[i].names).issubset(weight.names): -1363 raise Exception('Error: Ensembles do not fit') -1364 for name in obs[i].names: -1365 if not set(obs[i].idl[name]).issubset(weight.idl[name]): -1366 raise Exception('obs[%d] has to be defined on a subset of the configs in weight.idl[%s]!' % (i, name)) -1367 new_samples = [] -1368 w_deltas = {} -1369 for name in sorted(obs[i].names): -1370 w_deltas[name] = _reduce_deltas(weight.deltas[name], weight.idl[name], obs[i].idl[name]) -1371 new_samples.append((w_deltas[name] + weight.r_values[name]) * (obs[i].deltas[name] + obs[i].r_values[name])) -1372 tmp_obs = Obs(new_samples, sorted(obs[i].names), idl=[obs[i].idl[name] for name in sorted(obs[i].names)]) -1373 -1374 if kwargs.get('all_configs'): -1375 new_weight = weight -1376 else: -1377 new_weight = Obs([w_deltas[name] + weight.r_values[name] for name in sorted(obs[i].names)], sorted(obs[i].names), idl=[obs[i].idl[name] for name in sorted(obs[i].names)]) -1378 -1379 result.append(tmp_obs / new_weight) -1380 result[-1].reweighted = True -1381 result[-1].is_merged = obs[i].is_merged -1382 -1383 return result -1384 +1346def reweight(weight, obs, **kwargs): +1347 """Reweight a list of observables. +1348 +1349 Parameters +1350 ---------- +1351 weight : Obs +1352 Reweighting factor. An Observable that has to be defined on a superset of the +1353 configurations in obs[i].idl for all i. +1354 obs : list +1355 list of Obs, e.g. [obs1, obs2, obs3]. +1356 all_configs : bool +1357 if True, the reweighted observables are normalized by the average of +1358 the reweighting factor on all configurations in weight.idl and not +1359 on the configurations in obs[i].idl. Default False. +1360 """ +1361 result = [] +1362 for i in range(len(obs)): +1363 if len(obs[i].cov_names): +1364 raise Exception('Error: Not possible to reweight an Obs that contains covobs!') +1365 if not set(obs[i].names).issubset(weight.names): +1366 raise Exception('Error: Ensembles do not fit') +1367 for name in obs[i].names: +1368 if not set(obs[i].idl[name]).issubset(weight.idl[name]): +1369 raise Exception('obs[%d] has to be defined on a subset of the configs in weight.idl[%s]!' % (i, name)) +1370 new_samples = [] +1371 w_deltas = {} +1372 for name in sorted(obs[i].names): +1373 w_deltas[name] = _reduce_deltas(weight.deltas[name], weight.idl[name], obs[i].idl[name]) +1374 new_samples.append((w_deltas[name] + weight.r_values[name]) * (obs[i].deltas[name] + obs[i].r_values[name])) +1375 tmp_obs = Obs(new_samples, sorted(obs[i].names), idl=[obs[i].idl[name] for name in sorted(obs[i].names)]) +1376 +1377 if kwargs.get('all_configs'): +1378 new_weight = weight +1379 else: +1380 new_weight = Obs([w_deltas[name] + weight.r_values[name] for name in sorted(obs[i].names)], sorted(obs[i].names), idl=[obs[i].idl[name] for name in sorted(obs[i].names)]) +1381 +1382 result.append(tmp_obs / new_weight) +1383 result[-1].reweighted = True +1384 result[-1].is_merged = obs[i].is_merged 1385 -1386def correlate(obs_a, obs_b): -1387 """Correlate two observables. +1386 return result +1387 1388 -1389 Parameters -1390 ---------- -1391 obs_a : Obs -1392 First observable -1393 obs_b : Obs -1394 Second observable -1395 -1396 Notes -1397 ----- -1398 Keep in mind to only correlate primary observables which have not been reweighted -1399 yet. The reweighting has to be applied after correlating the observables. -1400 Currently only works if ensembles are identical (this is not strictly necessary). -1401 """ -1402 -1403 if sorted(obs_a.names) != sorted(obs_b.names): -1404 raise Exception(f"Ensembles do not fit {set(sorted(obs_a.names)) ^ set(sorted(obs_b.names))}") -1405 if len(obs_a.cov_names) or len(obs_b.cov_names): -1406 raise Exception('Error: Not possible to correlate Obs that contain covobs!') -1407 for name in obs_a.names: -1408 if obs_a.shape[name] != obs_b.shape[name]: -1409 raise Exception('Shapes of ensemble', name, 'do not fit') -1410 if obs_a.idl[name] != obs_b.idl[name]: -1411 raise Exception('idl of ensemble', name, 'do not fit') -1412 -1413 if obs_a.reweighted is True: -1414 warnings.warn("The first observable is already reweighted.", RuntimeWarning) -1415 if obs_b.reweighted is True: -1416 warnings.warn("The second observable is already reweighted.", RuntimeWarning) -1417 -1418 new_samples = [] -1419 new_idl = [] -1420 for name in sorted(obs_a.names): -1421 new_samples.append((obs_a.deltas[name] + obs_a.r_values[name]) * (obs_b.deltas[name] + obs_b.r_values[name])) -1422 new_idl.append(obs_a.idl[name]) -1423 -1424 o = Obs(new_samples, sorted(obs_a.names), idl=new_idl) -1425 o.is_merged = {name: (obs_a.is_merged.get(name, False) or obs_b.is_merged.get(name, False)) for name in o.names} -1426 o.reweighted = obs_a.reweighted or obs_b.reweighted -1427 return o -1428 -1429 -1430def covariance(obs, visualize=False, correlation=False, smooth=None, **kwargs): -1431 r'''Calculates the error covariance matrix of a set of observables. +1389def correlate(obs_a, obs_b): +1390 """Correlate two observables. +1391 +1392 Parameters +1393 ---------- +1394 obs_a : Obs +1395 First observable +1396 obs_b : Obs +1397 Second observable +1398 +1399 Notes +1400 ----- +1401 Keep in mind to only correlate primary observables which have not been reweighted +1402 yet. The reweighting has to be applied after correlating the observables. +1403 Currently only works if ensembles are identical (this is not strictly necessary). +1404 """ +1405 +1406 if sorted(obs_a.names) != sorted(obs_b.names): +1407 raise Exception(f"Ensembles do not fit {set(sorted(obs_a.names)) ^ set(sorted(obs_b.names))}") +1408 if len(obs_a.cov_names) or len(obs_b.cov_names): +1409 raise Exception('Error: Not possible to correlate Obs that contain covobs!') +1410 for name in obs_a.names: +1411 if obs_a.shape[name] != obs_b.shape[name]: +1412 raise Exception('Shapes of ensemble', name, 'do not fit') +1413 if obs_a.idl[name] != obs_b.idl[name]: +1414 raise Exception('idl of ensemble', name, 'do not fit') +1415 +1416 if obs_a.reweighted is True: +1417 warnings.warn("The first observable is already reweighted.", RuntimeWarning) +1418 if obs_b.reweighted is True: +1419 warnings.warn("The second observable is already reweighted.", RuntimeWarning) +1420 +1421 new_samples = [] +1422 new_idl = [] +1423 for name in sorted(obs_a.names): +1424 new_samples.append((obs_a.deltas[name] + obs_a.r_values[name]) * (obs_b.deltas[name] + obs_b.r_values[name])) +1425 new_idl.append(obs_a.idl[name]) +1426 +1427 o = Obs(new_samples, sorted(obs_a.names), idl=new_idl) +1428 o.is_merged = {name: (obs_a.is_merged.get(name, False) or obs_b.is_merged.get(name, False)) for name in o.names} +1429 o.reweighted = obs_a.reweighted or obs_b.reweighted +1430 return o +1431 1432 -1433 WARNING: This function should be used with care, especially for observables with support on multiple -1434 ensembles with differing autocorrelations. See the notes below for details. +1433def covariance(obs, visualize=False, correlation=False, smooth=None, **kwargs): +1434 r'''Calculates the error covariance matrix of a set of observables. 1435 -1436 The gamma method has to be applied first to all observables. -1437 -1438 Parameters -1439 ---------- -1440 obs : list or numpy.ndarray -1441 List or one dimensional array of Obs -1442 visualize : bool -1443 If True plots the corresponding normalized correlation matrix (default False). -1444 correlation : bool -1445 If True the correlation matrix instead of the error covariance matrix is returned (default False). -1446 smooth : None or int -1447 If smooth is an integer 'E' between 2 and the dimension of the matrix minus 1 the eigenvalue -1448 smoothing procedure of hep-lat/9412087 is applied to the correlation matrix which leaves the -1449 largest E eigenvalues essentially unchanged and smoothes the smaller eigenvalues to avoid extremely -1450 small ones. -1451 -1452 Notes -1453 ----- -1454 The error covariance is defined such that it agrees with the squared standard error for two identical observables -1455 $$\operatorname{cov}(a,a)=\sum_{s=1}^N\delta_a^s\delta_a^s/N^2=\Gamma_{aa}(0)/N=\operatorname{var}(a)/N=\sigma_a^2$$ -1456 in the absence of autocorrelation. -1457 The error covariance is estimated by calculating the correlation matrix assuming no autocorrelation and then rescaling the correlation matrix by the full errors including the previous gamma method estimate for the autocorrelation of the observables. The covariance at windowsize 0 is guaranteed to be positive semi-definite -1458 $$\sum_{i,j}v_i\Gamma_{ij}(0)v_j=\frac{1}{N}\sum_{s=1}^N\sum_{i,j}v_i\delta_i^s\delta_j^s v_j=\frac{1}{N}\sum_{s=1}^N\sum_{i}|v_i\delta_i^s|^2\geq 0\,,$$ for every $v\in\mathbb{R}^M$, while such an identity does not hold for larger windows/lags. -1459 For observables defined on a single ensemble our approximation is equivalent to assuming that the integrated autocorrelation time of an off-diagonal element is equal to the geometric mean of the integrated autocorrelation times of the corresponding diagonal elements. -1460 $$\tau_{\mathrm{int}, ij}=\sqrt{\tau_{\mathrm{int}, i}\times \tau_{\mathrm{int}, j}}$$ -1461 This construction ensures that the estimated covariance matrix is positive semi-definite (up to numerical rounding errors). -1462 ''' -1463 -1464 length = len(obs) -1465 -1466 max_samples = np.max([o.N for o in obs]) -1467 if max_samples <= length and not [item for sublist in [o.cov_names for o in obs] for item in sublist]: -1468 warnings.warn(f"The dimension of the covariance matrix ({length}) is larger or equal to the number of samples ({max_samples}). This will result in a rank deficient matrix.", RuntimeWarning) -1469 -1470 cov = np.zeros((length, length)) -1471 for i in range(length): -1472 for j in range(i, length): -1473 cov[i, j] = _covariance_element(obs[i], obs[j]) -1474 cov = cov + cov.T - np.diag(np.diag(cov)) -1475 -1476 corr = np.diag(1 / np.sqrt(np.diag(cov))) @ cov @ np.diag(1 / np.sqrt(np.diag(cov))) -1477 -1478 if isinstance(smooth, int): -1479 corr = _smooth_eigenvalues(corr, smooth) +1436 WARNING: This function should be used with care, especially for observables with support on multiple +1437 ensembles with differing autocorrelations. See the notes below for details. +1438 +1439 The gamma method has to be applied first to all observables. +1440 +1441 Parameters +1442 ---------- +1443 obs : list or numpy.ndarray +1444 List or one dimensional array of Obs +1445 visualize : bool +1446 If True plots the corresponding normalized correlation matrix (default False). +1447 correlation : bool +1448 If True the correlation matrix instead of the error covariance matrix is returned (default False). +1449 smooth : None or int +1450 If smooth is an integer 'E' between 2 and the dimension of the matrix minus 1 the eigenvalue +1451 smoothing procedure of hep-lat/9412087 is applied to the correlation matrix which leaves the +1452 largest E eigenvalues essentially unchanged and smoothes the smaller eigenvalues to avoid extremely +1453 small ones. +1454 +1455 Notes +1456 ----- +1457 The error covariance is defined such that it agrees with the squared standard error for two identical observables +1458 $$\operatorname{cov}(a,a)=\sum_{s=1}^N\delta_a^s\delta_a^s/N^2=\Gamma_{aa}(0)/N=\operatorname{var}(a)/N=\sigma_a^2$$ +1459 in the absence of autocorrelation. +1460 The error covariance is estimated by calculating the correlation matrix assuming no autocorrelation and then rescaling the correlation matrix by the full errors including the previous gamma method estimate for the autocorrelation of the observables. The covariance at windowsize 0 is guaranteed to be positive semi-definite +1461 $$\sum_{i,j}v_i\Gamma_{ij}(0)v_j=\frac{1}{N}\sum_{s=1}^N\sum_{i,j}v_i\delta_i^s\delta_j^s v_j=\frac{1}{N}\sum_{s=1}^N\sum_{i}|v_i\delta_i^s|^2\geq 0\,,$$ for every $v\in\mathbb{R}^M$, while such an identity does not hold for larger windows/lags. +1462 For observables defined on a single ensemble our approximation is equivalent to assuming that the integrated autocorrelation time of an off-diagonal element is equal to the geometric mean of the integrated autocorrelation times of the corresponding diagonal elements. +1463 $$\tau_{\mathrm{int}, ij}=\sqrt{\tau_{\mathrm{int}, i}\times \tau_{\mathrm{int}, j}}$$ +1464 This construction ensures that the estimated covariance matrix is positive semi-definite (up to numerical rounding errors). +1465 ''' +1466 +1467 length = len(obs) +1468 +1469 max_samples = np.max([o.N for o in obs]) +1470 if max_samples <= length and not [item for sublist in [o.cov_names for o in obs] for item in sublist]: +1471 warnings.warn(f"The dimension of the covariance matrix ({length}) is larger or equal to the number of samples ({max_samples}). This will result in a rank deficient matrix.", RuntimeWarning) +1472 +1473 cov = np.zeros((length, length)) +1474 for i in range(length): +1475 for j in range(i, length): +1476 cov[i, j] = _covariance_element(obs[i], obs[j]) +1477 cov = cov + cov.T - np.diag(np.diag(cov)) +1478 +1479 corr = np.diag(1 / np.sqrt(np.diag(cov))) @ cov @ np.diag(1 / np.sqrt(np.diag(cov))) 1480 -1481 if visualize: -1482 plt.matshow(corr, vmin=-1, vmax=1) -1483 plt.set_cmap('RdBu') -1484 plt.colorbar() -1485 plt.draw() -1486 -1487 if correlation is True: -1488 return corr +1481 if isinstance(smooth, int): +1482 corr = _smooth_eigenvalues(corr, smooth) +1483 +1484 if visualize: +1485 plt.matshow(corr, vmin=-1, vmax=1) +1486 plt.set_cmap('RdBu') +1487 plt.colorbar() +1488 plt.draw() 1489 -1490 errors = [o.dvalue for o in obs] -1491 cov = np.diag(errors) @ corr @ np.diag(errors) +1490 if correlation is True: +1491 return corr 1492 -1493 eigenvalues = np.linalg.eigh(cov)[0] -1494 if not np.all(eigenvalues >= 0): -1495 warnings.warn("Covariance matrix is not positive semi-definite (Eigenvalues: " + str(eigenvalues) + ")", RuntimeWarning) -1496 -1497 return cov -1498 +1493 errors = [o.dvalue for o in obs] +1494 cov = np.diag(errors) @ corr @ np.diag(errors) +1495 +1496 eigenvalues = np.linalg.eigh(cov)[0] +1497 if not np.all(eigenvalues >= 0): +1498 warnings.warn("Covariance matrix is not positive semi-definite (Eigenvalues: " + str(eigenvalues) + ")", RuntimeWarning) 1499 -1500def _smooth_eigenvalues(corr, E): -1501 """Eigenvalue smoothing as described in hep-lat/9412087 +1500 return cov +1501 1502 -1503 corr : np.ndarray -1504 correlation matrix -1505 E : integer -1506 Number of eigenvalues to be left substantially unchanged -1507 """ -1508 if not (2 < E < corr.shape[0] - 1): -1509 raise Exception(f"'E' has to be between 2 and the dimension of the correlation matrix minus 1 ({corr.shape[0] - 1}).") -1510 vals, vec = np.linalg.eigh(corr) -1511 lambda_min = np.mean(vals[:-E]) -1512 vals[vals < lambda_min] = lambda_min -1513 vals /= np.mean(vals) -1514 return vec @ np.diag(vals) @ vec.T -1515 -1516 -1517def _covariance_element(obs1, obs2): -1518 """Estimates the covariance of two Obs objects, neglecting autocorrelations.""" +1503def _smooth_eigenvalues(corr, E): +1504 """Eigenvalue smoothing as described in hep-lat/9412087 +1505 +1506 corr : np.ndarray +1507 correlation matrix +1508 E : integer +1509 Number of eigenvalues to be left substantially unchanged +1510 """ +1511 if not (2 < E < corr.shape[0] - 1): +1512 raise Exception(f"'E' has to be between 2 and the dimension of the correlation matrix minus 1 ({corr.shape[0] - 1}).") +1513 vals, vec = np.linalg.eigh(corr) +1514 lambda_min = np.mean(vals[:-E]) +1515 vals[vals < lambda_min] = lambda_min +1516 vals /= np.mean(vals) +1517 return vec @ np.diag(vals) @ vec.T +1518 1519 -1520 def calc_gamma(deltas1, deltas2, idx1, idx2, new_idx): -1521 deltas1 = _reduce_deltas(deltas1, idx1, new_idx) -1522 deltas2 = _reduce_deltas(deltas2, idx2, new_idx) -1523 return np.sum(deltas1 * deltas2) -1524 -1525 if set(obs1.names).isdisjoint(set(obs2.names)): -1526 return 0.0 +1520def _covariance_element(obs1, obs2): +1521 """Estimates the covariance of two Obs objects, neglecting autocorrelations.""" +1522 +1523 def calc_gamma(deltas1, deltas2, idx1, idx2, new_idx): +1524 deltas1 = _reduce_deltas(deltas1, idx1, new_idx) +1525 deltas2 = _reduce_deltas(deltas2, idx2, new_idx) +1526 return np.sum(deltas1 * deltas2) 1527 -1528 if not hasattr(obs1, 'e_dvalue') or not hasattr(obs2, 'e_dvalue'): -1529 raise Exception('The gamma method has to be applied to both Obs first.') +1528 if set(obs1.names).isdisjoint(set(obs2.names)): +1529 return 0.0 1530 -1531 dvalue = 0.0 -1532 -1533 for e_name in obs1.mc_names: -1534 -1535 if e_name not in obs2.mc_names: -1536 continue +1531 if not hasattr(obs1, 'e_dvalue') or not hasattr(obs2, 'e_dvalue'): +1532 raise Exception('The gamma method has to be applied to both Obs first.') +1533 +1534 dvalue = 0.0 +1535 +1536 for e_name in obs1.mc_names: 1537 -1538 idl_d = {} -1539 for r_name in obs1.e_content[e_name]: -1540 if r_name not in obs2.e_content[e_name]: -1541 continue -1542 idl_d[r_name] = _intersection_idx([obs1.idl[r_name], obs2.idl[r_name]]) -1543 -1544 gamma = 0.0 -1545 -1546 for r_name in obs1.e_content[e_name]: -1547 if r_name not in obs2.e_content[e_name]: -1548 continue -1549 if len(idl_d[r_name]) == 0: -1550 continue -1551 gamma += calc_gamma(obs1.deltas[r_name], obs2.deltas[r_name], obs1.idl[r_name], obs2.idl[r_name], idl_d[r_name]) -1552 -1553 if gamma == 0.0: -1554 continue +1538 if e_name not in obs2.mc_names: +1539 continue +1540 +1541 idl_d = {} +1542 for r_name in obs1.e_content[e_name]: +1543 if r_name not in obs2.e_content[e_name]: +1544 continue +1545 idl_d[r_name] = _intersection_idx([obs1.idl[r_name], obs2.idl[r_name]]) +1546 +1547 gamma = 0.0 +1548 +1549 for r_name in obs1.e_content[e_name]: +1550 if r_name not in obs2.e_content[e_name]: +1551 continue +1552 if len(idl_d[r_name]) == 0: +1553 continue +1554 gamma += calc_gamma(obs1.deltas[r_name], obs2.deltas[r_name], obs1.idl[r_name], obs2.idl[r_name], idl_d[r_name]) 1555 -1556 gamma_div = 0.0 -1557 for r_name in obs1.e_content[e_name]: -1558 if r_name not in obs2.e_content[e_name]: -1559 continue -1560 if len(idl_d[r_name]) == 0: -1561 continue -1562 gamma_div += np.sqrt(calc_gamma(obs1.deltas[r_name], obs1.deltas[r_name], obs1.idl[r_name], obs1.idl[r_name], idl_d[r_name]) * calc_gamma(obs2.deltas[r_name], obs2.deltas[r_name], obs2.idl[r_name], obs2.idl[r_name], idl_d[r_name])) -1563 gamma /= gamma_div -1564 -1565 dvalue += gamma -1566 -1567 for e_name in obs1.cov_names: -1568 -1569 if e_name not in obs2.cov_names: -1570 continue +1556 if gamma == 0.0: +1557 continue +1558 +1559 gamma_div = 0.0 +1560 for r_name in obs1.e_content[e_name]: +1561 if r_name not in obs2.e_content[e_name]: +1562 continue +1563 if len(idl_d[r_name]) == 0: +1564 continue +1565 gamma_div += np.sqrt(calc_gamma(obs1.deltas[r_name], obs1.deltas[r_name], obs1.idl[r_name], obs1.idl[r_name], idl_d[r_name]) * calc_gamma(obs2.deltas[r_name], obs2.deltas[r_name], obs2.idl[r_name], obs2.idl[r_name], idl_d[r_name])) +1566 gamma /= gamma_div +1567 +1568 dvalue += gamma +1569 +1570 for e_name in obs1.cov_names: 1571 -1572 dvalue += float(np.dot(np.transpose(obs1.covobs[e_name].grad), np.dot(obs1.covobs[e_name].cov, obs2.covobs[e_name].grad))) -1573 -1574 return dvalue -1575 +1572 if e_name not in obs2.cov_names: +1573 continue +1574 +1575 dvalue += float(np.dot(np.transpose(obs1.covobs[e_name].grad), np.dot(obs1.covobs[e_name].cov, obs2.covobs[e_name].grad))) 1576 -1577def import_jackknife(jacks, name, idl=None): -1578 """Imports jackknife samples and returns an Obs +1577 return dvalue +1578 1579 -1580 Parameters -1581 ---------- -1582 jacks : numpy.ndarray -1583 numpy array containing the mean value as zeroth entry and -1584 the N jackknife samples as first to Nth entry. -1585 name : str -1586 name of the ensemble the samples are defined on. -1587 """ -1588 length = len(jacks) - 1 -1589 prj = (np.ones((length, length)) - (length - 1) * np.identity(length)) -1590 samples = jacks[1:] @ prj -1591 mean = np.mean(samples) -1592 new_obs = Obs([samples - mean], [name], idl=idl, means=[mean]) -1593 new_obs._value = jacks[0] -1594 return new_obs -1595 -1596 -1597def merge_obs(list_of_obs): -1598 """Combine all observables in list_of_obs into one new observable +1580def import_jackknife(jacks, name, idl=None): +1581 """Imports jackknife samples and returns an Obs +1582 +1583 Parameters +1584 ---------- +1585 jacks : numpy.ndarray +1586 numpy array containing the mean value as zeroth entry and +1587 the N jackknife samples as first to Nth entry. +1588 name : str +1589 name of the ensemble the samples are defined on. +1590 """ +1591 length = len(jacks) - 1 +1592 prj = (np.ones((length, length)) - (length - 1) * np.identity(length)) +1593 samples = jacks[1:] @ prj +1594 mean = np.mean(samples) +1595 new_obs = Obs([samples - mean], [name], idl=idl, means=[mean]) +1596 new_obs._value = jacks[0] +1597 return new_obs +1598 1599 -1600 Parameters -1601 ---------- -1602 list_of_obs : list -1603 list of the Obs object to be combined -1604 -1605 Notes -1606 ----- -1607 It is not possible to combine obs which are based on the same replicum -1608 """ -1609 replist = [item for obs in list_of_obs for item in obs.names] -1610 if (len(replist) == len(set(replist))) is False: -1611 raise Exception('list_of_obs contains duplicate replica: %s' % (str(replist))) -1612 if any([len(o.cov_names) for o in list_of_obs]): -1613 raise Exception('Not possible to merge data that contains covobs!') -1614 new_dict = {} -1615 idl_dict = {} -1616 for o in list_of_obs: -1617 new_dict.update({key: o.deltas.get(key, 0) + o.r_values.get(key, 0) -1618 for key in set(o.deltas) | set(o.r_values)}) -1619 idl_dict.update({key: o.idl.get(key, 0) for key in set(o.deltas)}) -1620 -1621 names = sorted(new_dict.keys()) -1622 o = Obs([new_dict[name] for name in names], names, idl=[idl_dict[name] for name in names]) -1623 o.is_merged = {name: np.any([oi.is_merged.get(name, False) for oi in list_of_obs]) for name in o.names} -1624 o.reweighted = np.max([oi.reweighted for oi in list_of_obs]) -1625 return o -1626 -1627 -1628def cov_Obs(means, cov, name, grad=None): -1629 """Create an Obs based on mean(s) and a covariance matrix +1600def merge_obs(list_of_obs): +1601 """Combine all observables in list_of_obs into one new observable +1602 +1603 Parameters +1604 ---------- +1605 list_of_obs : list +1606 list of the Obs object to be combined +1607 +1608 Notes +1609 ----- +1610 It is not possible to combine obs which are based on the same replicum +1611 """ +1612 replist = [item for obs in list_of_obs for item in obs.names] +1613 if (len(replist) == len(set(replist))) is False: +1614 raise Exception('list_of_obs contains duplicate replica: %s' % (str(replist))) +1615 if any([len(o.cov_names) for o in list_of_obs]): +1616 raise Exception('Not possible to merge data that contains covobs!') +1617 new_dict = {} +1618 idl_dict = {} +1619 for o in list_of_obs: +1620 new_dict.update({key: o.deltas.get(key, 0) + o.r_values.get(key, 0) +1621 for key in set(o.deltas) | set(o.r_values)}) +1622 idl_dict.update({key: o.idl.get(key, 0) for key in set(o.deltas)}) +1623 +1624 names = sorted(new_dict.keys()) +1625 o = Obs([new_dict[name] for name in names], names, idl=[idl_dict[name] for name in names]) +1626 o.is_merged = {name: np.any([oi.is_merged.get(name, False) for oi in list_of_obs]) for name in o.names} +1627 o.reweighted = np.max([oi.reweighted for oi in list_of_obs]) +1628 return o +1629 1630 -1631 Parameters -1632 ---------- -1633 mean : list of floats or float -1634 N mean value(s) of the new Obs -1635 cov : list or array -1636 2d (NxN) Covariance matrix, 1d diagonal entries or 0d covariance -1637 name : str -1638 identifier for the covariance matrix -1639 grad : list or array -1640 Gradient of the Covobs wrt. the means belonging to cov. -1641 """ -1642 -1643 def covobs_to_obs(co): -1644 """Make an Obs out of a Covobs +1631def cov_Obs(means, cov, name, grad=None): +1632 """Create an Obs based on mean(s) and a covariance matrix +1633 +1634 Parameters +1635 ---------- +1636 mean : list of floats or float +1637 N mean value(s) of the new Obs +1638 cov : list or array +1639 2d (NxN) Covariance matrix, 1d diagonal entries or 0d covariance +1640 name : str +1641 identifier for the covariance matrix +1642 grad : list or array +1643 Gradient of the Covobs wrt. the means belonging to cov. +1644 """ 1645 -1646 Parameters -1647 ---------- -1648 co : Covobs -1649 Covobs to be embedded into the Obs -1650 """ -1651 o = Obs([], [], means=[]) -1652 o._value = co.value -1653 o.names.append(co.name) -1654 o._covobs[co.name] = co -1655 o._dvalue = np.sqrt(co.errsq()) -1656 return o -1657 -1658 ol = [] -1659 if isinstance(means, (float, int)): -1660 means = [means] -1661 -1662 for i in range(len(means)): -1663 ol.append(covobs_to_obs(Covobs(means[i], cov, name, pos=i, grad=grad))) -1664 if ol[0].covobs[name].N != len(means): -1665 raise Exception('You have to provide %d mean values!' % (ol[0].N)) -1666 if len(ol) == 1: -1667 return ol[0] -1668 return ol +1646 def covobs_to_obs(co): +1647 """Make an Obs out of a Covobs +1648 +1649 Parameters +1650 ---------- +1651 co : Covobs +1652 Covobs to be embedded into the Obs +1653 """ +1654 o = Obs([], [], means=[]) +1655 o._value = co.value +1656 o.names.append(co.name) +1657 o._covobs[co.name] = co +1658 o._dvalue = np.sqrt(co.errsq()) +1659 return o +1660 +1661 ol = [] +1662 if isinstance(means, (float, int)): +1663 means = [means] +1664 +1665 for i in range(len(means)): +1666 ol.append(covobs_to_obs(Covobs(means[i], cov, name, pos=i, grad=grad))) +1667 if ol[0].covobs[name].N != len(means): +1668 raise Exception('You have to provide %d mean values!' % (ol[0].N)) +1669 if len(ol) == 1: +1670 return ol[0] +1671 return ol @@ -1885,855 +1888,855 @@ -
 17class Obs:
- 18    """Class for a general observable.
- 19
- 20    Instances of Obs are the basic objects of a pyerrors error analysis.
- 21    They are initialized with a list which contains arrays of samples for
- 22    different ensembles/replica and another list of same length which contains
- 23    the names of the ensembles/replica. Mathematical operations can be
- 24    performed on instances. The result is another instance of Obs. The error of
- 25    an instance can be computed with the gamma_method. Also contains additional
- 26    methods for output and visualization of the error calculation.
- 27
- 28    Attributes
- 29    ----------
- 30    S_global : float
- 31        Standard value for S (default 2.0)
- 32    S_dict : dict
- 33        Dictionary for S values. If an entry for a given ensemble
- 34        exists this overwrites the standard value for that ensemble.
- 35    tau_exp_global : float
- 36        Standard value for tau_exp (default 0.0)
- 37    tau_exp_dict : dict
- 38        Dictionary for tau_exp values. If an entry for a given ensemble exists
- 39        this overwrites the standard value for that ensemble.
- 40    N_sigma_global : float
- 41        Standard value for N_sigma (default 1.0)
- 42    N_sigma_dict : dict
- 43        Dictionary for N_sigma values. If an entry for a given ensemble exists
- 44        this overwrites the standard value for that ensemble.
- 45    """
- 46    __slots__ = ['names', 'shape', 'r_values', 'deltas', 'N', '_value', '_dvalue',
- 47                 'ddvalue', 'reweighted', 'S', 'tau_exp', 'N_sigma',
- 48                 'e_dvalue', 'e_ddvalue', 'e_tauint', 'e_dtauint',
- 49                 'e_windowsize', 'e_rho', 'e_drho', 'e_n_tauint', 'e_n_dtauint',
- 50                 'idl', 'is_merged', 'tag', '_covobs', '__dict__']
- 51
- 52    S_global = 2.0
- 53    S_dict = {}
- 54    tau_exp_global = 0.0
- 55    tau_exp_dict = {}
- 56    N_sigma_global = 1.0
- 57    N_sigma_dict = {}
- 58    filter_eps = 1e-10
- 59
- 60    def __init__(self, samples, names, idl=None, **kwargs):
- 61        """ Initialize Obs object.
+            
 20class Obs:
+ 21    """Class for a general observable.
+ 22
+ 23    Instances of Obs are the basic objects of a pyerrors error analysis.
+ 24    They are initialized with a list which contains arrays of samples for
+ 25    different ensembles/replica and another list of same length which contains
+ 26    the names of the ensembles/replica. Mathematical operations can be
+ 27    performed on instances. The result is another instance of Obs. The error of
+ 28    an instance can be computed with the gamma_method. Also contains additional
+ 29    methods for output and visualization of the error calculation.
+ 30
+ 31    Attributes
+ 32    ----------
+ 33    S_global : float
+ 34        Standard value for S (default 2.0)
+ 35    S_dict : dict
+ 36        Dictionary for S values. If an entry for a given ensemble
+ 37        exists this overwrites the standard value for that ensemble.
+ 38    tau_exp_global : float
+ 39        Standard value for tau_exp (default 0.0)
+ 40    tau_exp_dict : dict
+ 41        Dictionary for tau_exp values. If an entry for a given ensemble exists
+ 42        this overwrites the standard value for that ensemble.
+ 43    N_sigma_global : float
+ 44        Standard value for N_sigma (default 1.0)
+ 45    N_sigma_dict : dict
+ 46        Dictionary for N_sigma values. If an entry for a given ensemble exists
+ 47        this overwrites the standard value for that ensemble.
+ 48    """
+ 49    __slots__ = ['names', 'shape', 'r_values', 'deltas', 'N', '_value', '_dvalue',
+ 50                 'ddvalue', 'reweighted', 'S', 'tau_exp', 'N_sigma',
+ 51                 'e_dvalue', 'e_ddvalue', 'e_tauint', 'e_dtauint',
+ 52                 'e_windowsize', 'e_rho', 'e_drho', 'e_n_tauint', 'e_n_dtauint',
+ 53                 'idl', 'is_merged', 'tag', '_covobs', '__dict__']
+ 54
+ 55    S_global = 2.0
+ 56    S_dict = {}
+ 57    tau_exp_global = 0.0
+ 58    tau_exp_dict = {}
+ 59    N_sigma_global = 1.0
+ 60    N_sigma_dict = {}
+ 61    filter_eps = 1e-10
  62
- 63        Parameters
- 64        ----------
- 65        samples : list
- 66            list of numpy arrays containing the Monte Carlo samples
- 67        names : list
- 68            list of strings labeling the individual samples
- 69        idl : list, optional
- 70            list of ranges or lists on which the samples are defined
- 71        """
- 72
- 73        if kwargs.get("means") is None and len(samples):
- 74            if len(samples) != len(names):
- 75                raise Exception('Length of samples and names incompatible.')
- 76            if idl is not None:
- 77                if len(idl) != len(names):
- 78                    raise Exception('Length of idl incompatible with samples and names.')
- 79            name_length = len(names)
- 80            if name_length > 1:
- 81                if name_length != len(set(names)):
- 82                    raise Exception('names are not unique.')
- 83                if not all(isinstance(x, str) for x in names):
- 84                    raise TypeError('All names have to be strings.')
- 85            else:
- 86                if not isinstance(names[0], str):
+ 63    def __init__(self, samples, names, idl=None, **kwargs):
+ 64        """ Initialize Obs object.
+ 65
+ 66        Parameters
+ 67        ----------
+ 68        samples : list
+ 69            list of numpy arrays containing the Monte Carlo samples
+ 70        names : list
+ 71            list of strings labeling the individual samples
+ 72        idl : list, optional
+ 73            list of ranges or lists on which the samples are defined
+ 74        """
+ 75
+ 76        if kwargs.get("means") is None and len(samples):
+ 77            if len(samples) != len(names):
+ 78                raise Exception('Length of samples and names incompatible.')
+ 79            if idl is not None:
+ 80                if len(idl) != len(names):
+ 81                    raise Exception('Length of idl incompatible with samples and names.')
+ 82            name_length = len(names)
+ 83            if name_length > 1:
+ 84                if name_length != len(set(names)):
+ 85                    raise Exception('names are not unique.')
+ 86                if not all(isinstance(x, str) for x in names):
  87                    raise TypeError('All names have to be strings.')
- 88            if min(len(x) for x in samples) <= 4:
- 89                raise Exception('Samples have to have at least 5 entries.')
- 90
- 91        self.names = sorted(names)
- 92        self.shape = {}
- 93        self.r_values = {}
- 94        self.deltas = {}
- 95        self._covobs = {}
- 96
- 97        self._value = 0
- 98        self.N = 0
- 99        self.is_merged = {}
-100        self.idl = {}
-101        if idl is not None:
-102            for name, idx in sorted(zip(names, idl)):
-103                if isinstance(idx, range):
-104                    self.idl[name] = idx
-105                elif isinstance(idx, (list, np.ndarray)):
-106                    dc = np.unique(np.diff(idx))
-107                    if np.any(dc < 0):
-108                        raise Exception("Unsorted idx for idl[%s]" % (name))
-109                    if len(dc) == 1:
-110                        self.idl[name] = range(idx[0], idx[-1] + dc[0], dc[0])
-111                    else:
-112                        self.idl[name] = list(idx)
-113                else:
-114                    raise Exception('incompatible type for idl[%s].' % (name))
-115        else:
-116            for name, sample in sorted(zip(names, samples)):
-117                self.idl[name] = range(1, len(sample) + 1)
-118
-119        if kwargs.get("means") is not None:
-120            for name, sample, mean in sorted(zip(names, samples, kwargs.get("means"))):
-121                self.shape[name] = len(self.idl[name])
-122                self.N += self.shape[name]
-123                self.r_values[name] = mean
-124                self.deltas[name] = sample
-125        else:
-126            for name, sample in sorted(zip(names, samples)):
-127                self.shape[name] = len(self.idl[name])
-128                self.N += self.shape[name]
-129                if len(sample) != self.shape[name]:
-130                    raise Exception('Incompatible samples and idx for %s: %d vs. %d' % (name, len(sample), self.shape[name]))
-131                self.r_values[name] = np.mean(sample)
-132                self.deltas[name] = sample - self.r_values[name]
-133                self._value += self.shape[name] * self.r_values[name]
-134            self._value /= self.N
-135
-136        self._dvalue = 0.0
-137        self.ddvalue = 0.0
-138        self.reweighted = False
-139
-140        self.tag = None
-141
-142    @property
-143    def value(self):
-144        return self._value
-145
-146    @property
-147    def dvalue(self):
-148        return self._dvalue
-149
-150    @property
-151    def e_names(self):
-152        return sorted(set([o.split('|')[0] for o in self.names]))
-153
-154    @property
-155    def cov_names(self):
-156        return sorted(set([o for o in self.covobs.keys()]))
-157
-158    @property
-159    def mc_names(self):
-160        return sorted(set([o.split('|')[0] for o in self.names if o not in self.cov_names]))
-161
-162    @property
-163    def e_content(self):
-164        res = {}
-165        for e, e_name in enumerate(self.e_names):
-166            res[e_name] = sorted(filter(lambda x: x.startswith(e_name + '|'), self.names))
-167            if e_name in self.names:
-168                res[e_name].append(e_name)
-169        return res
-170
-171    @property
-172    def covobs(self):
-173        return self._covobs
-174
-175    def gamma_method(self, **kwargs):
-176        """Estimate the error and related properties of the Obs.
+ 88            else:
+ 89                if not isinstance(names[0], str):
+ 90                    raise TypeError('All names have to be strings.')
+ 91            if min(len(x) for x in samples) <= 4:
+ 92                raise Exception('Samples have to have at least 5 entries.')
+ 93
+ 94        self.names = sorted(names)
+ 95        self.shape = {}
+ 96        self.r_values = {}
+ 97        self.deltas = {}
+ 98        self._covobs = {}
+ 99
+100        self._value = 0
+101        self.N = 0
+102        self.is_merged = {}
+103        self.idl = {}
+104        if idl is not None:
+105            for name, idx in sorted(zip(names, idl)):
+106                if isinstance(idx, range):
+107                    self.idl[name] = idx
+108                elif isinstance(idx, (list, np.ndarray)):
+109                    dc = np.unique(np.diff(idx))
+110                    if np.any(dc < 0):
+111                        raise Exception("Unsorted idx for idl[%s]" % (name))
+112                    if len(dc) == 1:
+113                        self.idl[name] = range(idx[0], idx[-1] + dc[0], dc[0])
+114                    else:
+115                        self.idl[name] = list(idx)
+116                else:
+117                    raise Exception('incompatible type for idl[%s].' % (name))
+118        else:
+119            for name, sample in sorted(zip(names, samples)):
+120                self.idl[name] = range(1, len(sample) + 1)
+121
+122        if kwargs.get("means") is not None:
+123            for name, sample, mean in sorted(zip(names, samples, kwargs.get("means"))):
+124                self.shape[name] = len(self.idl[name])
+125                self.N += self.shape[name]
+126                self.r_values[name] = mean
+127                self.deltas[name] = sample
+128        else:
+129            for name, sample in sorted(zip(names, samples)):
+130                self.shape[name] = len(self.idl[name])
+131                self.N += self.shape[name]
+132                if len(sample) != self.shape[name]:
+133                    raise Exception('Incompatible samples and idx for %s: %d vs. %d' % (name, len(sample), self.shape[name]))
+134                self.r_values[name] = np.mean(sample)
+135                self.deltas[name] = sample - self.r_values[name]
+136                self._value += self.shape[name] * self.r_values[name]
+137            self._value /= self.N
+138
+139        self._dvalue = 0.0
+140        self.ddvalue = 0.0
+141        self.reweighted = False
+142
+143        self.tag = None
+144
+145    @property
+146    def value(self):
+147        return self._value
+148
+149    @property
+150    def dvalue(self):
+151        return self._dvalue
+152
+153    @property
+154    def e_names(self):
+155        return sorted(set([o.split('|')[0] for o in self.names]))
+156
+157    @property
+158    def cov_names(self):
+159        return sorted(set([o for o in self.covobs.keys()]))
+160
+161    @property
+162    def mc_names(self):
+163        return sorted(set([o.split('|')[0] for o in self.names if o not in self.cov_names]))
+164
+165    @property
+166    def e_content(self):
+167        res = {}
+168        for e, e_name in enumerate(self.e_names):
+169            res[e_name] = sorted(filter(lambda x: x.startswith(e_name + '|'), self.names))
+170            if e_name in self.names:
+171                res[e_name].append(e_name)
+172        return res
+173
+174    @property
+175    def covobs(self):
+176        return self._covobs
 177
-178        Parameters
-179        ----------
-180        S : float
-181            specifies a custom value for the parameter S (default 2.0).
-182            If set to 0 it is assumed that the data exhibits no
-183            autocorrelation. In this case the error estimates coincides
-184            with the sample standard error.
-185        tau_exp : float
-186            positive value triggers the critical slowing down analysis
-187            (default 0.0).
-188        N_sigma : float
-189            number of standard deviations from zero until the tail is
-190            attached to the autocorrelation function (default 1).
-191        fft : bool
-192            determines whether the fft algorithm is used for the computation
-193            of the autocorrelation function (default True)
-194        """
-195
-196        e_content = self.e_content
-197        self.e_dvalue = {}
-198        self.e_ddvalue = {}
-199        self.e_tauint = {}
-200        self.e_dtauint = {}
-201        self.e_windowsize = {}
-202        self.e_n_tauint = {}
-203        self.e_n_dtauint = {}
-204        e_gamma = {}
-205        self.e_rho = {}
-206        self.e_drho = {}
-207        self._dvalue = 0
-208        self.ddvalue = 0
-209
-210        self.S = {}
-211        self.tau_exp = {}
-212        self.N_sigma = {}
-213
-214        if kwargs.get('fft') is False:
-215            fft = False
-216        else:
-217            fft = True
-218
-219        def _parse_kwarg(kwarg_name):
-220            if kwarg_name in kwargs:
-221                tmp = kwargs.get(kwarg_name)
-222                if isinstance(tmp, (int, float)):
-223                    if tmp < 0:
-224                        raise Exception(kwarg_name + ' has to be larger or equal to 0.')
-225                    for e, e_name in enumerate(self.e_names):
-226                        getattr(self, kwarg_name)[e_name] = tmp
-227                else:
-228                    raise TypeError(kwarg_name + ' is not in proper format.')
-229            else:
-230                for e, e_name in enumerate(self.e_names):
-231                    if e_name in getattr(Obs, kwarg_name + '_dict'):
-232                        getattr(self, kwarg_name)[e_name] = getattr(Obs, kwarg_name + '_dict')[e_name]
-233                    else:
-234                        getattr(self, kwarg_name)[e_name] = getattr(Obs, kwarg_name + '_global')
-235
-236        _parse_kwarg('S')
-237        _parse_kwarg('tau_exp')
-238        _parse_kwarg('N_sigma')
-239
-240        for e, e_name in enumerate(self.mc_names):
-241            r_length = []
-242            for r_name in e_content[e_name]:
-243                if isinstance(self.idl[r_name], range):
-244                    r_length.append(len(self.idl[r_name]))
-245                else:
-246                    r_length.append((self.idl[r_name][-1] - self.idl[r_name][0] + 1))
-247
-248            e_N = np.sum([self.shape[r_name] for r_name in e_content[e_name]])
-249            w_max = max(r_length) // 2
-250            e_gamma[e_name] = np.zeros(w_max)
-251            self.e_rho[e_name] = np.zeros(w_max)
-252            self.e_drho[e_name] = np.zeros(w_max)
-253
-254            for r_name in e_content[e_name]:
-255                e_gamma[e_name] += self._calc_gamma(self.deltas[r_name], self.idl[r_name], self.shape[r_name], w_max, fft)
+178    def gamma_method(self, **kwargs):
+179        """Estimate the error and related properties of the Obs.
+180
+181        Parameters
+182        ----------
+183        S : float
+184            specifies a custom value for the parameter S (default 2.0).
+185            If set to 0 it is assumed that the data exhibits no
+186            autocorrelation. In this case the error estimates coincides
+187            with the sample standard error.
+188        tau_exp : float
+189            positive value triggers the critical slowing down analysis
+190            (default 0.0).
+191        N_sigma : float
+192            number of standard deviations from zero until the tail is
+193            attached to the autocorrelation function (default 1).
+194        fft : bool
+195            determines whether the fft algorithm is used for the computation
+196            of the autocorrelation function (default True)
+197        """
+198
+199        e_content = self.e_content
+200        self.e_dvalue = {}
+201        self.e_ddvalue = {}
+202        self.e_tauint = {}
+203        self.e_dtauint = {}
+204        self.e_windowsize = {}
+205        self.e_n_tauint = {}
+206        self.e_n_dtauint = {}
+207        e_gamma = {}
+208        self.e_rho = {}
+209        self.e_drho = {}
+210        self._dvalue = 0
+211        self.ddvalue = 0
+212
+213        self.S = {}
+214        self.tau_exp = {}
+215        self.N_sigma = {}
+216
+217        if kwargs.get('fft') is False:
+218            fft = False
+219        else:
+220            fft = True
+221
+222        def _parse_kwarg(kwarg_name):
+223            if kwarg_name in kwargs:
+224                tmp = kwargs.get(kwarg_name)
+225                if isinstance(tmp, (int, float)):
+226                    if tmp < 0:
+227                        raise Exception(kwarg_name + ' has to be larger or equal to 0.')
+228                    for e, e_name in enumerate(self.e_names):
+229                        getattr(self, kwarg_name)[e_name] = tmp
+230                else:
+231                    raise TypeError(kwarg_name + ' is not in proper format.')
+232            else:
+233                for e, e_name in enumerate(self.e_names):
+234                    if e_name in getattr(Obs, kwarg_name + '_dict'):
+235                        getattr(self, kwarg_name)[e_name] = getattr(Obs, kwarg_name + '_dict')[e_name]
+236                    else:
+237                        getattr(self, kwarg_name)[e_name] = getattr(Obs, kwarg_name + '_global')
+238
+239        _parse_kwarg('S')
+240        _parse_kwarg('tau_exp')
+241        _parse_kwarg('N_sigma')
+242
+243        for e, e_name in enumerate(self.mc_names):
+244            r_length = []
+245            for r_name in e_content[e_name]:
+246                if isinstance(self.idl[r_name], range):
+247                    r_length.append(len(self.idl[r_name]))
+248                else:
+249                    r_length.append((self.idl[r_name][-1] - self.idl[r_name][0] + 1))
+250
+251            e_N = np.sum([self.shape[r_name] for r_name in e_content[e_name]])
+252            w_max = max(r_length) // 2
+253            e_gamma[e_name] = np.zeros(w_max)
+254            self.e_rho[e_name] = np.zeros(w_max)
+255            self.e_drho[e_name] = np.zeros(w_max)
 256
-257            gamma_div = np.zeros(w_max)
-258            for r_name in e_content[e_name]:
-259                gamma_div += self._calc_gamma(np.ones((self.shape[r_name])), self.idl[r_name], self.shape[r_name], w_max, fft)
-260            gamma_div[gamma_div < 1] = 1.0
-261            e_gamma[e_name] /= gamma_div[:w_max]
-262
-263            if np.abs(e_gamma[e_name][0]) < 10 * np.finfo(float).tiny:  # Prevent division by zero
-264                self.e_tauint[e_name] = 0.5
-265                self.e_dtauint[e_name] = 0.0
-266                self.e_dvalue[e_name] = 0.0
-267                self.e_ddvalue[e_name] = 0.0
-268                self.e_windowsize[e_name] = 0
-269                continue
-270
-271            gaps = []
-272            for r_name in e_content[e_name]:
-273                if isinstance(self.idl[r_name], range):
-274                    gaps.append(1)
-275                else:
-276                    gaps.append(np.min(np.diff(self.idl[r_name])))
-277
-278            if not np.all([gi == gaps[0] for gi in gaps]):
-279                raise Exception(f"Replica for ensemble {e_name} are not equally spaced.", gaps)
-280            else:
-281                gapsize = gaps[0]
-282
-283            self.e_rho[e_name] = e_gamma[e_name][:w_max] / e_gamma[e_name][0]
-284            self.e_n_tauint[e_name] = np.cumsum(np.concatenate(([0.5], self.e_rho[e_name][1:])))
-285            # Make sure no entry of tauint is smaller than 0.5
-286            self.e_n_tauint[e_name][self.e_n_tauint[e_name] <= 0.5] = 0.5 + np.finfo(np.float64).eps
-287            # hep-lat/0306017 eq. (42)
-288            self.e_n_dtauint[e_name] = self.e_n_tauint[e_name] * 2 * np.sqrt(np.abs(np.arange(w_max) / gapsize + 0.5 - self.e_n_tauint[e_name]) / e_N)
-289            self.e_n_dtauint[e_name][0] = 0.0
-290
-291            def _compute_drho(i):
-292                tmp = self.e_rho[e_name][i + 1:w_max] + np.concatenate([self.e_rho[e_name][i - 1::-1], self.e_rho[e_name][1:w_max - 2 * i]]) - 2 * self.e_rho[e_name][i] * self.e_rho[e_name][1:w_max - i]
-293                self.e_drho[e_name][i] = np.sqrt(np.sum(tmp ** 2) / e_N)
-294
-295            _compute_drho(gapsize)
-296            if self.tau_exp[e_name] > 0:
-297                texp = self.tau_exp[e_name]
-298                # Critical slowing down analysis
-299                if w_max // 2 <= 1:
-300                    raise Exception("Need at least 8 samples for tau_exp error analysis")
-301                for n in range(gapsize, w_max // 2, gapsize):
-302                    _compute_drho(n + gapsize)
-303                    if (self.e_rho[e_name][n] - self.N_sigma[e_name] * self.e_drho[e_name][n]) < 0 or n >= w_max // 2 - 2:
-304                        # Bias correction hep-lat/0306017 eq. (49) included
-305                        self.e_tauint[e_name] = self.e_n_tauint[e_name][n] * (1 + (2 * n / gapsize + 1) / e_N) / (1 + 1 / e_N) + texp * np.abs(self.e_rho[e_name][n + 1])  # The absolute makes sure, that the tail contribution is always positive
-306                        self.e_dtauint[e_name] = np.sqrt(self.e_n_dtauint[e_name][n] ** 2 + texp ** 2 * self.e_drho[e_name][n + 1] ** 2)
-307                        # Error of tau_exp neglected so far, missing term: self.e_rho[e_name][n + 1] ** 2 * d_tau_exp ** 2
-308                        self.e_dvalue[e_name] = np.sqrt(2 * self.e_tauint[e_name] * e_gamma[e_name][0] * (1 + 1 / e_N) / e_N)
-309                        self.e_ddvalue[e_name] = self.e_dvalue[e_name] * np.sqrt((n / gapsize + 0.5) / e_N)
-310                        self.e_windowsize[e_name] = n
-311                        break
-312            else:
-313                if self.S[e_name] == 0.0:
-314                    self.e_tauint[e_name] = 0.5
-315                    self.e_dtauint[e_name] = 0.0
-316                    self.e_dvalue[e_name] = np.sqrt(e_gamma[e_name][0] / (e_N - 1))
-317                    self.e_ddvalue[e_name] = self.e_dvalue[e_name] * np.sqrt(0.5 / e_N)
-318                    self.e_windowsize[e_name] = 0
-319                else:
-320                    # Standard automatic windowing procedure
-321                    tau = self.S[e_name] / np.log((2 * self.e_n_tauint[e_name][gapsize::gapsize] + 1) / (2 * self.e_n_tauint[e_name][gapsize::gapsize] - 1))
-322                    g_w = np.exp(- np.arange(1, len(tau) + 1) / tau) - tau / np.sqrt(np.arange(1, len(tau) + 1) * e_N)
-323                    for n in range(1, w_max):
-324                        if n < w_max // 2 - 2:
-325                            _compute_drho(gapsize * n + gapsize)
-326                        if g_w[n - 1] < 0 or n >= w_max - 1:
-327                            n *= gapsize
-328                            self.e_tauint[e_name] = self.e_n_tauint[e_name][n] * (1 + (2 * n / gapsize + 1) / e_N) / (1 + 1 / e_N)  # Bias correction hep-lat/0306017 eq. (49)
-329                            self.e_dtauint[e_name] = self.e_n_dtauint[e_name][n]
-330                            self.e_dvalue[e_name] = np.sqrt(2 * self.e_tauint[e_name] * e_gamma[e_name][0] * (1 + 1 / e_N) / e_N)
-331                            self.e_ddvalue[e_name] = self.e_dvalue[e_name] * np.sqrt((n / gapsize + 0.5) / e_N)
-332                            self.e_windowsize[e_name] = n
-333                            break
-334
-335            self._dvalue += self.e_dvalue[e_name] ** 2
-336            self.ddvalue += (self.e_dvalue[e_name] * self.e_ddvalue[e_name]) ** 2
+257            for r_name in e_content[e_name]:
+258                e_gamma[e_name] += self._calc_gamma(self.deltas[r_name], self.idl[r_name], self.shape[r_name], w_max, fft)
+259
+260            gamma_div = np.zeros(w_max)
+261            for r_name in e_content[e_name]:
+262                gamma_div += self._calc_gamma(np.ones((self.shape[r_name])), self.idl[r_name], self.shape[r_name], w_max, fft)
+263            gamma_div[gamma_div < 1] = 1.0
+264            e_gamma[e_name] /= gamma_div[:w_max]
+265
+266            if np.abs(e_gamma[e_name][0]) < 10 * np.finfo(float).tiny:  # Prevent division by zero
+267                self.e_tauint[e_name] = 0.5
+268                self.e_dtauint[e_name] = 0.0
+269                self.e_dvalue[e_name] = 0.0
+270                self.e_ddvalue[e_name] = 0.0
+271                self.e_windowsize[e_name] = 0
+272                continue
+273
+274            gaps = []
+275            for r_name in e_content[e_name]:
+276                if isinstance(self.idl[r_name], range):
+277                    gaps.append(1)
+278                else:
+279                    gaps.append(np.min(np.diff(self.idl[r_name])))
+280
+281            if not np.all([gi == gaps[0] for gi in gaps]):
+282                raise Exception(f"Replica for ensemble {e_name} are not equally spaced.", gaps)
+283            else:
+284                gapsize = gaps[0]
+285
+286            self.e_rho[e_name] = e_gamma[e_name][:w_max] / e_gamma[e_name][0]
+287            self.e_n_tauint[e_name] = np.cumsum(np.concatenate(([0.5], self.e_rho[e_name][1:])))
+288            # Make sure no entry of tauint is smaller than 0.5
+289            self.e_n_tauint[e_name][self.e_n_tauint[e_name] <= 0.5] = 0.5 + np.finfo(np.float64).eps
+290            # hep-lat/0306017 eq. (42)
+291            self.e_n_dtauint[e_name] = self.e_n_tauint[e_name] * 2 * np.sqrt(np.abs(np.arange(w_max) / gapsize + 0.5 - self.e_n_tauint[e_name]) / e_N)
+292            self.e_n_dtauint[e_name][0] = 0.0
+293
+294            def _compute_drho(i):
+295                tmp = self.e_rho[e_name][i + 1:w_max] + np.concatenate([self.e_rho[e_name][i - 1::-1], self.e_rho[e_name][1:w_max - 2 * i]]) - 2 * self.e_rho[e_name][i] * self.e_rho[e_name][1:w_max - i]
+296                self.e_drho[e_name][i] = np.sqrt(np.sum(tmp ** 2) / e_N)
+297
+298            _compute_drho(gapsize)
+299            if self.tau_exp[e_name] > 0:
+300                texp = self.tau_exp[e_name]
+301                # Critical slowing down analysis
+302                if w_max // 2 <= 1:
+303                    raise Exception("Need at least 8 samples for tau_exp error analysis")
+304                for n in range(gapsize, w_max // 2, gapsize):
+305                    _compute_drho(n + gapsize)
+306                    if (self.e_rho[e_name][n] - self.N_sigma[e_name] * self.e_drho[e_name][n]) < 0 or n >= w_max // 2 - 2:
+307                        # Bias correction hep-lat/0306017 eq. (49) included
+308                        self.e_tauint[e_name] = self.e_n_tauint[e_name][n] * (1 + (2 * n / gapsize + 1) / e_N) / (1 + 1 / e_N) + texp * np.abs(self.e_rho[e_name][n + 1])  # The absolute makes sure, that the tail contribution is always positive
+309                        self.e_dtauint[e_name] = np.sqrt(self.e_n_dtauint[e_name][n] ** 2 + texp ** 2 * self.e_drho[e_name][n + 1] ** 2)
+310                        # Error of tau_exp neglected so far, missing term: self.e_rho[e_name][n + 1] ** 2 * d_tau_exp ** 2
+311                        self.e_dvalue[e_name] = np.sqrt(2 * self.e_tauint[e_name] * e_gamma[e_name][0] * (1 + 1 / e_N) / e_N)
+312                        self.e_ddvalue[e_name] = self.e_dvalue[e_name] * np.sqrt((n / gapsize + 0.5) / e_N)
+313                        self.e_windowsize[e_name] = n
+314                        break
+315            else:
+316                if self.S[e_name] == 0.0:
+317                    self.e_tauint[e_name] = 0.5
+318                    self.e_dtauint[e_name] = 0.0
+319                    self.e_dvalue[e_name] = np.sqrt(e_gamma[e_name][0] / (e_N - 1))
+320                    self.e_ddvalue[e_name] = self.e_dvalue[e_name] * np.sqrt(0.5 / e_N)
+321                    self.e_windowsize[e_name] = 0
+322                else:
+323                    # Standard automatic windowing procedure
+324                    tau = self.S[e_name] / np.log((2 * self.e_n_tauint[e_name][gapsize::gapsize] + 1) / (2 * self.e_n_tauint[e_name][gapsize::gapsize] - 1))
+325                    g_w = np.exp(- np.arange(1, len(tau) + 1) / tau) - tau / np.sqrt(np.arange(1, len(tau) + 1) * e_N)
+326                    for n in range(1, w_max):
+327                        if n < w_max // 2 - 2:
+328                            _compute_drho(gapsize * n + gapsize)
+329                        if g_w[n - 1] < 0 or n >= w_max - 1:
+330                            n *= gapsize
+331                            self.e_tauint[e_name] = self.e_n_tauint[e_name][n] * (1 + (2 * n / gapsize + 1) / e_N) / (1 + 1 / e_N)  # Bias correction hep-lat/0306017 eq. (49)
+332                            self.e_dtauint[e_name] = self.e_n_dtauint[e_name][n]
+333                            self.e_dvalue[e_name] = np.sqrt(2 * self.e_tauint[e_name] * e_gamma[e_name][0] * (1 + 1 / e_N) / e_N)
+334                            self.e_ddvalue[e_name] = self.e_dvalue[e_name] * np.sqrt((n / gapsize + 0.5) / e_N)
+335                            self.e_windowsize[e_name] = n
+336                            break
 337
-338        for e_name in self.cov_names:
-339            self.e_dvalue[e_name] = np.sqrt(self.covobs[e_name].errsq())
-340            self.e_ddvalue[e_name] = 0
-341            self._dvalue += self.e_dvalue[e_name]**2
-342
-343        self._dvalue = np.sqrt(self._dvalue)
-344        if self._dvalue == 0.0:
-345            self.ddvalue = 0.0
-346        else:
-347            self.ddvalue = np.sqrt(self.ddvalue) / self._dvalue
-348        return
-349
-350    def _calc_gamma(self, deltas, idx, shape, w_max, fft):
-351        """Calculate Gamma_{AA} from the deltas, which are defined on idx.
-352           idx is assumed to be a contiguous range (possibly with a stepsize != 1)
-353
-354        Parameters
-355        ----------
-356        deltas : list
-357            List of fluctuations
-358        idx : list
-359            List or range of configurations on which the deltas are defined.
-360        shape : int
-361            Number of configurations in idx.
-362        w_max : int
-363            Upper bound for the summation window.
-364        fft : bool
-365            determines whether the fft algorithm is used for the computation
-366            of the autocorrelation function.
-367        """
-368        gamma = np.zeros(w_max)
-369        deltas = _expand_deltas(deltas, idx, shape)
-370        new_shape = len(deltas)
-371        if fft:
-372            max_gamma = min(new_shape, w_max)
-373            # The padding for the fft has to be even
-374            padding = new_shape + max_gamma + (new_shape + max_gamma) % 2
-375            gamma[:max_gamma] += np.fft.irfft(np.abs(np.fft.rfft(deltas, padding)) ** 2)[:max_gamma]
-376        else:
-377            for n in range(w_max):
-378                if new_shape - n >= 0:
-379                    gamma[n] += deltas[0:new_shape - n].dot(deltas[n:new_shape])
-380
-381        return gamma
-382
-383    def details(self, ens_content=True):
-384        """Output detailed properties of the Obs.
+338            self._dvalue += self.e_dvalue[e_name] ** 2
+339            self.ddvalue += (self.e_dvalue[e_name] * self.e_ddvalue[e_name]) ** 2
+340
+341        for e_name in self.cov_names:
+342            self.e_dvalue[e_name] = np.sqrt(self.covobs[e_name].errsq())
+343            self.e_ddvalue[e_name] = 0
+344            self._dvalue += self.e_dvalue[e_name]**2
+345
+346        self._dvalue = np.sqrt(self._dvalue)
+347        if self._dvalue == 0.0:
+348            self.ddvalue = 0.0
+349        else:
+350            self.ddvalue = np.sqrt(self.ddvalue) / self._dvalue
+351        return
+352
+353    def _calc_gamma(self, deltas, idx, shape, w_max, fft):
+354        """Calculate Gamma_{AA} from the deltas, which are defined on idx.
+355           idx is assumed to be a contiguous range (possibly with a stepsize != 1)
+356
+357        Parameters
+358        ----------
+359        deltas : list
+360            List of fluctuations
+361        idx : list
+362            List or range of configurations on which the deltas are defined.
+363        shape : int
+364            Number of configurations in idx.
+365        w_max : int
+366            Upper bound for the summation window.
+367        fft : bool
+368            determines whether the fft algorithm is used for the computation
+369            of the autocorrelation function.
+370        """
+371        gamma = np.zeros(w_max)
+372        deltas = _expand_deltas(deltas, idx, shape)
+373        new_shape = len(deltas)
+374        if fft:
+375            max_gamma = min(new_shape, w_max)
+376            # The padding for the fft has to be even
+377            padding = new_shape + max_gamma + (new_shape + max_gamma) % 2
+378            gamma[:max_gamma] += np.fft.irfft(np.abs(np.fft.rfft(deltas, padding)) ** 2)[:max_gamma]
+379        else:
+380            for n in range(w_max):
+381                if new_shape - n >= 0:
+382                    gamma[n] += deltas[0:new_shape - n].dot(deltas[n:new_shape])
+383
+384        return gamma
 385
-386        Parameters
-387        ----------
-388        ens_content : bool
-389            print details about the ensembles and replica if true.
-390        """
-391        if self.tag is not None:
-392            print("Description:", self.tag)
-393        if not hasattr(self, 'e_dvalue'):
-394            print('Result\t %3.8e' % (self.value))
-395        else:
-396            if self.value == 0.0:
-397                percentage = np.nan
-398            else:
-399                percentage = np.abs(self._dvalue / self.value) * 100
-400            print('Result\t %3.8e +/- %3.8e +/- %3.8e (%3.3f%%)' % (self.value, self._dvalue, self.ddvalue, percentage))
-401            if len(self.e_names) > 1:
-402                print(' Ensemble errors:')
-403            e_content = self.e_content
-404            for e_name in self.mc_names:
-405                if isinstance(self.idl[e_content[e_name][0]], range):
-406                    gap = self.idl[e_content[e_name][0]].step
-407                else:
-408                    gap = np.min(np.diff(self.idl[e_content[e_name][0]]))
-409
-410                if len(self.e_names) > 1:
-411                    print('', e_name, '\t %3.6e +/- %3.6e' % (self.e_dvalue[e_name], self.e_ddvalue[e_name]))
-412                tau_string = " \N{GREEK SMALL LETTER TAU}_int\t " + _format_uncertainty(self.e_tauint[e_name], self.e_dtauint[e_name])
-413                tau_string += f" in units of {gap} config"
-414                if gap > 1:
-415                    tau_string += "s"
-416                if self.tau_exp[e_name] > 0:
-417                    tau_string = f"{tau_string: <45}" + '\t(\N{GREEK SMALL LETTER TAU}_exp=%3.2f, N_\N{GREEK SMALL LETTER SIGMA}=%1.0i)' % (self.tau_exp[e_name], self.N_sigma[e_name])
-418                else:
-419                    tau_string = f"{tau_string: <45}" + '\t(S=%3.2f)' % (self.S[e_name])
-420                print(tau_string)
-421            for e_name in self.cov_names:
-422                print('', e_name, '\t %3.8e' % (self.e_dvalue[e_name]))
-423        if ens_content is True:
-424            if len(self.e_names) == 1:
-425                print(self.N, 'samples in', len(self.e_names), 'ensemble:')
-426            else:
-427                print(self.N, 'samples in', len(self.e_names), 'ensembles:')
-428            my_string_list = []
-429            for key, value in sorted(self.e_content.items()):
-430                if key not in self.covobs:
-431                    my_string = '  ' + "\u00B7 Ensemble '" + key + "' "
-432                    if len(value) == 1:
-433                        my_string += f': {self.shape[value[0]]} configurations'
-434                        if isinstance(self.idl[value[0]], range):
-435                            my_string += f' (from {self.idl[value[0]].start} to {self.idl[value[0]][-1]}' + int(self.idl[value[0]].step != 1) * f' in steps of {self.idl[value[0]].step}' + ')'
-436                        else:
-437                            my_string += f' (irregular range from {self.idl[value[0]][0]} to {self.idl[value[0]][-1]})'
-438                    else:
-439                        sublist = []
-440                        for v in value:
-441                            my_substring = '    ' + "\u00B7 Replicum '" + v[len(key) + 1:] + "' "
-442                            my_substring += f': {self.shape[v]} configurations'
-443                            if isinstance(self.idl[v], range):
-444                                my_substring += f' (from {self.idl[v].start} to {self.idl[v][-1]}' + int(self.idl[v].step != 1) * f' in steps of {self.idl[v].step}' + ')'
-445                            else:
-446                                my_substring += f' (irregular range from {self.idl[v][0]} to {self.idl[v][-1]})'
-447                            sublist.append(my_substring)
-448
-449                        my_string += '\n' + '\n'.join(sublist)
-450                else:
-451                    my_string = '  ' + "\u00B7 Covobs   '" + key + "' "
-452                my_string_list.append(my_string)
-453            print('\n'.join(my_string_list))
-454
-455    def reweight(self, weight):
-456        """Reweight the obs with given rewighting factors.
+386    def details(self, ens_content=True):
+387        """Output detailed properties of the Obs.
+388
+389        Parameters
+390        ----------
+391        ens_content : bool
+392            print details about the ensembles and replica if true.
+393        """
+394        if self.tag is not None:
+395            print("Description:", self.tag)
+396        if not hasattr(self, 'e_dvalue'):
+397            print('Result\t %3.8e' % (self.value))
+398        else:
+399            if self.value == 0.0:
+400                percentage = np.nan
+401            else:
+402                percentage = np.abs(self._dvalue / self.value) * 100
+403            print('Result\t %3.8e +/- %3.8e +/- %3.8e (%3.3f%%)' % (self.value, self._dvalue, self.ddvalue, percentage))
+404            if len(self.e_names) > 1:
+405                print(' Ensemble errors:')
+406            e_content = self.e_content
+407            for e_name in self.mc_names:
+408                if isinstance(self.idl[e_content[e_name][0]], range):
+409                    gap = self.idl[e_content[e_name][0]].step
+410                else:
+411                    gap = np.min(np.diff(self.idl[e_content[e_name][0]]))
+412
+413                if len(self.e_names) > 1:
+414                    print('', e_name, '\t %3.6e +/- %3.6e' % (self.e_dvalue[e_name], self.e_ddvalue[e_name]))
+415                tau_string = " \N{GREEK SMALL LETTER TAU}_int\t " + _format_uncertainty(self.e_tauint[e_name], self.e_dtauint[e_name])
+416                tau_string += f" in units of {gap} config"
+417                if gap > 1:
+418                    tau_string += "s"
+419                if self.tau_exp[e_name] > 0:
+420                    tau_string = f"{tau_string: <45}" + '\t(\N{GREEK SMALL LETTER TAU}_exp=%3.2f, N_\N{GREEK SMALL LETTER SIGMA}=%1.0i)' % (self.tau_exp[e_name], self.N_sigma[e_name])
+421                else:
+422                    tau_string = f"{tau_string: <45}" + '\t(S=%3.2f)' % (self.S[e_name])
+423                print(tau_string)
+424            for e_name in self.cov_names:
+425                print('', e_name, '\t %3.8e' % (self.e_dvalue[e_name]))
+426        if ens_content is True:
+427            if len(self.e_names) == 1:
+428                print(self.N, 'samples in', len(self.e_names), 'ensemble:')
+429            else:
+430                print(self.N, 'samples in', len(self.e_names), 'ensembles:')
+431            my_string_list = []
+432            for key, value in sorted(self.e_content.items()):
+433                if key not in self.covobs:
+434                    my_string = '  ' + "\u00B7 Ensemble '" + key + "' "
+435                    if len(value) == 1:
+436                        my_string += f': {self.shape[value[0]]} configurations'
+437                        if isinstance(self.idl[value[0]], range):
+438                            my_string += f' (from {self.idl[value[0]].start} to {self.idl[value[0]][-1]}' + int(self.idl[value[0]].step != 1) * f' in steps of {self.idl[value[0]].step}' + ')'
+439                        else:
+440                            my_string += f' (irregular range from {self.idl[value[0]][0]} to {self.idl[value[0]][-1]})'
+441                    else:
+442                        sublist = []
+443                        for v in value:
+444                            my_substring = '    ' + "\u00B7 Replicum '" + v[len(key) + 1:] + "' "
+445                            my_substring += f': {self.shape[v]} configurations'
+446                            if isinstance(self.idl[v], range):
+447                                my_substring += f' (from {self.idl[v].start} to {self.idl[v][-1]}' + int(self.idl[v].step != 1) * f' in steps of {self.idl[v].step}' + ')'
+448                            else:
+449                                my_substring += f' (irregular range from {self.idl[v][0]} to {self.idl[v][-1]})'
+450                            sublist.append(my_substring)
+451
+452                        my_string += '\n' + '\n'.join(sublist)
+453                else:
+454                    my_string = '  ' + "\u00B7 Covobs   '" + key + "' "
+455                my_string_list.append(my_string)
+456            print('\n'.join(my_string_list))
 457
-458        Parameters
-459        ----------
-460        weight : Obs
-461            Reweighting factor. An Observable that has to be defined on a superset of the
-462            configurations in obs[i].idl for all i.
-463        all_configs : bool
-464            if True, the reweighted observables are normalized by the average of
-465            the reweighting factor on all configurations in weight.idl and not
-466            on the configurations in obs[i].idl. Default False.
-467        """
-468        return reweight(weight, [self])[0]
-469
-470    def is_zero_within_error(self, sigma=1):
-471        """Checks whether the observable is zero within 'sigma' standard errors.
+458    def reweight(self, weight):
+459        """Reweight the obs with given rewighting factors.
+460
+461        Parameters
+462        ----------
+463        weight : Obs
+464            Reweighting factor. An Observable that has to be defined on a superset of the
+465            configurations in obs[i].idl for all i.
+466        all_configs : bool
+467            if True, the reweighted observables are normalized by the average of
+468            the reweighting factor on all configurations in weight.idl and not
+469            on the configurations in obs[i].idl. Default False.
+470        """
+471        return reweight(weight, [self])[0]
 472
-473        Parameters
-474        ----------
-475        sigma : int
-476            Number of standard errors used for the check.
-477
-478        Works only properly when the gamma method was run.
-479        """
-480        return self.is_zero() or np.abs(self.value) <= sigma * self._dvalue
-481
-482    def is_zero(self, atol=1e-10):
-483        """Checks whether the observable is zero within a given tolerance.
+473    def is_zero_within_error(self, sigma=1):
+474        """Checks whether the observable is zero within 'sigma' standard errors.
+475
+476        Parameters
+477        ----------
+478        sigma : int
+479            Number of standard errors used for the check.
+480
+481        Works only properly when the gamma method was run.
+482        """
+483        return self.is_zero() or np.abs(self.value) <= sigma * self._dvalue
 484
-485        Parameters
-486        ----------
-487        atol : float
-488            Absolute tolerance (for details see numpy documentation).
-489        """
-490        return np.isclose(0.0, self.value, 1e-14, atol) and all(np.allclose(0.0, delta, 1e-14, atol) for delta in self.deltas.values()) and all(np.allclose(0.0, delta.errsq(), 1e-14, atol) for delta in self.covobs.values())
-491
-492    def plot_tauint(self, save=None):
-493        """Plot integrated autocorrelation time for each ensemble.
+485    def is_zero(self, atol=1e-10):
+486        """Checks whether the observable is zero within a given tolerance.
+487
+488        Parameters
+489        ----------
+490        atol : float
+491            Absolute tolerance (for details see numpy documentation).
+492        """
+493        return np.isclose(0.0, self.value, 1e-14, atol) and all(np.allclose(0.0, delta, 1e-14, atol) for delta in self.deltas.values()) and all(np.allclose(0.0, delta.errsq(), 1e-14, atol) for delta in self.covobs.values())
 494
-495        Parameters
-496        ----------
-497        save : str
-498            saves the figure to a file named 'save' if.
-499        """
-500        if not hasattr(self, 'e_dvalue'):
-501            raise Exception('Run the gamma method first.')
-502
-503        for e, e_name in enumerate(self.mc_names):
-504            fig = plt.figure()
-505            plt.xlabel(r'$W$')
-506            plt.ylabel(r'$\tau_\mathrm{int}$')
-507            length = int(len(self.e_n_tauint[e_name]))
-508            if self.tau_exp[e_name] > 0:
-509                base = self.e_n_tauint[e_name][self.e_windowsize[e_name]]
-510                x_help = np.arange(2 * self.tau_exp[e_name])
-511                y_help = (x_help + 1) * np.abs(self.e_rho[e_name][self.e_windowsize[e_name] + 1]) * (1 - x_help / (2 * (2 * self.tau_exp[e_name] - 1))) + base
-512                x_arr = np.arange(self.e_windowsize[e_name] + 1, self.e_windowsize[e_name] + 1 + 2 * self.tau_exp[e_name])
-513                plt.plot(x_arr, y_help, 'C' + str(e), linewidth=1, ls='--', marker=',')
-514                plt.errorbar([self.e_windowsize[e_name] + 2 * self.tau_exp[e_name]], [self.e_tauint[e_name]],
-515                             yerr=[self.e_dtauint[e_name]], fmt='C' + str(e), linewidth=1, capsize=2, marker='o', mfc=plt.rcParams['axes.facecolor'])
-516                xmax = self.e_windowsize[e_name] + 2 * self.tau_exp[e_name] + 1.5
-517                label = e_name + r', $\tau_\mathrm{exp}$=' + str(np.around(self.tau_exp[e_name], decimals=2))
-518            else:
-519                label = e_name + ', S=' + str(np.around(self.S[e_name], decimals=2))
-520                xmax = max(10.5, 2 * self.e_windowsize[e_name] - 0.5)
-521
-522            plt.errorbar(np.arange(length)[:int(xmax) + 1], self.e_n_tauint[e_name][:int(xmax) + 1], yerr=self.e_n_dtauint[e_name][:int(xmax) + 1], linewidth=1, capsize=2, label=label)
-523            plt.axvline(x=self.e_windowsize[e_name], color='C' + str(e), alpha=0.5, marker=',', ls='--')
-524            plt.legend()
-525            plt.xlim(-0.5, xmax)
-526            ylim = plt.ylim()
-527            plt.ylim(bottom=0.0, top=max(1.0, ylim[1]))
-528            plt.draw()
-529            if save:
-530                fig.savefig(save + "_" + str(e))
-531
-532    def plot_rho(self, save=None):
-533        """Plot normalized autocorrelation function time for each ensemble.
+495    def plot_tauint(self, save=None):
+496        """Plot integrated autocorrelation time for each ensemble.
+497
+498        Parameters
+499        ----------
+500        save : str
+501            saves the figure to a file named 'save' if.
+502        """
+503        if not hasattr(self, 'e_dvalue'):
+504            raise Exception('Run the gamma method first.')
+505
+506        for e, e_name in enumerate(self.mc_names):
+507            fig = plt.figure()
+508            plt.xlabel(r'$W$')
+509            plt.ylabel(r'$\tau_\mathrm{int}$')
+510            length = int(len(self.e_n_tauint[e_name]))
+511            if self.tau_exp[e_name] > 0:
+512                base = self.e_n_tauint[e_name][self.e_windowsize[e_name]]
+513                x_help = np.arange(2 * self.tau_exp[e_name])
+514                y_help = (x_help + 1) * np.abs(self.e_rho[e_name][self.e_windowsize[e_name] + 1]) * (1 - x_help / (2 * (2 * self.tau_exp[e_name] - 1))) + base
+515                x_arr = np.arange(self.e_windowsize[e_name] + 1, self.e_windowsize[e_name] + 1 + 2 * self.tau_exp[e_name])
+516                plt.plot(x_arr, y_help, 'C' + str(e), linewidth=1, ls='--', marker=',')
+517                plt.errorbar([self.e_windowsize[e_name] + 2 * self.tau_exp[e_name]], [self.e_tauint[e_name]],
+518                             yerr=[self.e_dtauint[e_name]], fmt='C' + str(e), linewidth=1, capsize=2, marker='o', mfc=plt.rcParams['axes.facecolor'])
+519                xmax = self.e_windowsize[e_name] + 2 * self.tau_exp[e_name] + 1.5
+520                label = e_name + r', $\tau_\mathrm{exp}$=' + str(np.around(self.tau_exp[e_name], decimals=2))
+521            else:
+522                label = e_name + ', S=' + str(np.around(self.S[e_name], decimals=2))
+523                xmax = max(10.5, 2 * self.e_windowsize[e_name] - 0.5)
+524
+525            plt.errorbar(np.arange(length)[:int(xmax) + 1], self.e_n_tauint[e_name][:int(xmax) + 1], yerr=self.e_n_dtauint[e_name][:int(xmax) + 1], linewidth=1, capsize=2, label=label)
+526            plt.axvline(x=self.e_windowsize[e_name], color='C' + str(e), alpha=0.5, marker=',', ls='--')
+527            plt.legend()
+528            plt.xlim(-0.5, xmax)
+529            ylim = plt.ylim()
+530            plt.ylim(bottom=0.0, top=max(1.0, ylim[1]))
+531            plt.draw()
+532            if save:
+533                fig.savefig(save + "_" + str(e))
 534
-535        Parameters
-536        ----------
-537        save : str
-538            saves the figure to a file named 'save' if.
-539        """
-540        if not hasattr(self, 'e_dvalue'):
-541            raise Exception('Run the gamma method first.')
-542        for e, e_name in enumerate(self.mc_names):
-543            fig = plt.figure()
-544            plt.xlabel('W')
-545            plt.ylabel('rho')
-546            length = int(len(self.e_drho[e_name]))
-547            plt.errorbar(np.arange(length), self.e_rho[e_name][:length], yerr=self.e_drho[e_name][:], linewidth=1, capsize=2)
-548            plt.axvline(x=self.e_windowsize[e_name], color='r', alpha=0.25, ls='--', marker=',')
-549            if self.tau_exp[e_name] > 0:
-550                plt.plot([self.e_windowsize[e_name] + 1, self.e_windowsize[e_name] + 1 + 2 * self.tau_exp[e_name]],
-551                         [self.e_rho[e_name][self.e_windowsize[e_name] + 1], 0], 'k-', lw=1)
-552                xmax = self.e_windowsize[e_name] + 2 * self.tau_exp[e_name] + 1.5
-553                plt.title('Rho ' + e_name + r', tau\_exp=' + str(np.around(self.tau_exp[e_name], decimals=2)))
-554            else:
-555                xmax = max(10.5, 2 * self.e_windowsize[e_name] - 0.5)
-556                plt.title('Rho ' + e_name + ', S=' + str(np.around(self.S[e_name], decimals=2)))
-557            plt.plot([-0.5, xmax], [0, 0], 'k--', lw=1)
-558            plt.xlim(-0.5, xmax)
-559            plt.draw()
-560            if save:
-561                fig.savefig(save + "_" + str(e))
-562
-563    def plot_rep_dist(self):
-564        """Plot replica distribution for each ensemble with more than one replicum."""
-565        if not hasattr(self, 'e_dvalue'):
-566            raise Exception('Run the gamma method first.')
-567        for e, e_name in enumerate(self.mc_names):
-568            if len(self.e_content[e_name]) == 1:
-569                print('No replica distribution for a single replicum (', e_name, ')')
-570                continue
-571            r_length = []
-572            sub_r_mean = 0
-573            for r, r_name in enumerate(self.e_content[e_name]):
-574                r_length.append(len(self.deltas[r_name]))
-575                sub_r_mean += self.shape[r_name] * self.r_values[r_name]
-576            e_N = np.sum(r_length)
-577            sub_r_mean /= e_N
-578            arr = np.zeros(len(self.e_content[e_name]))
-579            for r, r_name in enumerate(self.e_content[e_name]):
-580                arr[r] = (self.r_values[r_name] - sub_r_mean) / (self.e_dvalue[e_name] * np.sqrt(e_N / self.shape[r_name] - 1))
-581            plt.hist(arr, rwidth=0.8, bins=len(self.e_content[e_name]))
-582            plt.title('Replica distribution' + e_name + ' (mean=0, var=1)')
-583            plt.draw()
-584
-585    def plot_history(self, expand=True):
-586        """Plot derived Monte Carlo history for each ensemble
+535    def plot_rho(self, save=None):
+536        """Plot normalized autocorrelation function time for each ensemble.
+537
+538        Parameters
+539        ----------
+540        save : str
+541            saves the figure to a file named 'save' if.
+542        """
+543        if not hasattr(self, 'e_dvalue'):
+544            raise Exception('Run the gamma method first.')
+545        for e, e_name in enumerate(self.mc_names):
+546            fig = plt.figure()
+547            plt.xlabel('W')
+548            plt.ylabel('rho')
+549            length = int(len(self.e_drho[e_name]))
+550            plt.errorbar(np.arange(length), self.e_rho[e_name][:length], yerr=self.e_drho[e_name][:], linewidth=1, capsize=2)
+551            plt.axvline(x=self.e_windowsize[e_name], color='r', alpha=0.25, ls='--', marker=',')
+552            if self.tau_exp[e_name] > 0:
+553                plt.plot([self.e_windowsize[e_name] + 1, self.e_windowsize[e_name] + 1 + 2 * self.tau_exp[e_name]],
+554                         [self.e_rho[e_name][self.e_windowsize[e_name] + 1], 0], 'k-', lw=1)
+555                xmax = self.e_windowsize[e_name] + 2 * self.tau_exp[e_name] + 1.5
+556                plt.title('Rho ' + e_name + r', tau\_exp=' + str(np.around(self.tau_exp[e_name], decimals=2)))
+557            else:
+558                xmax = max(10.5, 2 * self.e_windowsize[e_name] - 0.5)
+559                plt.title('Rho ' + e_name + ', S=' + str(np.around(self.S[e_name], decimals=2)))
+560            plt.plot([-0.5, xmax], [0, 0], 'k--', lw=1)
+561            plt.xlim(-0.5, xmax)
+562            plt.draw()
+563            if save:
+564                fig.savefig(save + "_" + str(e))
+565
+566    def plot_rep_dist(self):
+567        """Plot replica distribution for each ensemble with more than one replicum."""
+568        if not hasattr(self, 'e_dvalue'):
+569            raise Exception('Run the gamma method first.')
+570        for e, e_name in enumerate(self.mc_names):
+571            if len(self.e_content[e_name]) == 1:
+572                print('No replica distribution for a single replicum (', e_name, ')')
+573                continue
+574            r_length = []
+575            sub_r_mean = 0
+576            for r, r_name in enumerate(self.e_content[e_name]):
+577                r_length.append(len(self.deltas[r_name]))
+578                sub_r_mean += self.shape[r_name] * self.r_values[r_name]
+579            e_N = np.sum(r_length)
+580            sub_r_mean /= e_N
+581            arr = np.zeros(len(self.e_content[e_name]))
+582            for r, r_name in enumerate(self.e_content[e_name]):
+583                arr[r] = (self.r_values[r_name] - sub_r_mean) / (self.e_dvalue[e_name] * np.sqrt(e_N / self.shape[r_name] - 1))
+584            plt.hist(arr, rwidth=0.8, bins=len(self.e_content[e_name]))
+585            plt.title('Replica distribution' + e_name + ' (mean=0, var=1)')
+586            plt.draw()
 587
-588        Parameters
-589        ----------
-590        expand : bool
-591            show expanded history for irregular Monte Carlo chains (default: True).
-592        """
-593        for e, e_name in enumerate(self.mc_names):
-594            plt.figure()
-595            r_length = []
-596            tmp = []
-597            tmp_expanded = []
-598            for r, r_name in enumerate(self.e_content[e_name]):
-599                tmp.append(self.deltas[r_name] + self.r_values[r_name])
-600                if expand:
-601                    tmp_expanded.append(_expand_deltas(self.deltas[r_name], list(self.idl[r_name]), self.shape[r_name]) + self.r_values[r_name])
-602                    r_length.append(len(tmp_expanded[-1]))
-603                else:
-604                    r_length.append(len(tmp[-1]))
-605            e_N = np.sum(r_length)
-606            x = np.arange(e_N)
-607            y_test = np.concatenate(tmp, axis=0)
-608            if expand:
-609                y = np.concatenate(tmp_expanded, axis=0)
-610            else:
-611                y = y_test
-612            plt.errorbar(x, y, fmt='.', markersize=3)
-613            plt.xlim(-0.5, e_N - 0.5)
-614            plt.title(e_name + f'\nskew: {skew(y_test):.3f} (p={skewtest(y_test).pvalue:.3f}), kurtosis: {kurtosis(y_test):.3f} (p={kurtosistest(y_test).pvalue:.3f})')
-615            plt.draw()
-616
-617    def plot_piechart(self, save=None):
-618        """Plot piechart which shows the fractional contribution of each
-619        ensemble to the error and returns a dictionary containing the fractions.
-620
-621        Parameters
-622        ----------
-623        save : str
-624            saves the figure to a file named 'save' if.
-625        """
-626        if not hasattr(self, 'e_dvalue'):
-627            raise Exception('Run the gamma method first.')
-628        if np.isclose(0.0, self._dvalue, atol=1e-15):
-629            raise Exception('Error is 0.0')
-630        labels = self.e_names
-631        sizes = [self.e_dvalue[name] ** 2 for name in labels] / self._dvalue ** 2
-632        fig1, ax1 = plt.subplots()
-633        ax1.pie(sizes, labels=labels, startangle=90, normalize=True)
-634        ax1.axis('equal')
-635        plt.draw()
-636        if save:
-637            fig1.savefig(save)
-638
-639        return dict(zip(self.e_names, sizes))
-640
-641    def dump(self, filename, datatype="json.gz", description="", **kwargs):
-642        """Dump the Obs to a file 'name' of chosen format.
+588    def plot_history(self, expand=True):
+589        """Plot derived Monte Carlo history for each ensemble
+590
+591        Parameters
+592        ----------
+593        expand : bool
+594            show expanded history for irregular Monte Carlo chains (default: True).
+595        """
+596        for e, e_name in enumerate(self.mc_names):
+597            plt.figure()
+598            r_length = []
+599            tmp = []
+600            tmp_expanded = []
+601            for r, r_name in enumerate(self.e_content[e_name]):
+602                tmp.append(self.deltas[r_name] + self.r_values[r_name])
+603                if expand:
+604                    tmp_expanded.append(_expand_deltas(self.deltas[r_name], list(self.idl[r_name]), self.shape[r_name]) + self.r_values[r_name])
+605                    r_length.append(len(tmp_expanded[-1]))
+606                else:
+607                    r_length.append(len(tmp[-1]))
+608            e_N = np.sum(r_length)
+609            x = np.arange(e_N)
+610            y_test = np.concatenate(tmp, axis=0)
+611            if expand:
+612                y = np.concatenate(tmp_expanded, axis=0)
+613            else:
+614                y = y_test
+615            plt.errorbar(x, y, fmt='.', markersize=3)
+616            plt.xlim(-0.5, e_N - 0.5)
+617            plt.title(e_name + f'\nskew: {skew(y_test):.3f} (p={skewtest(y_test).pvalue:.3f}), kurtosis: {kurtosis(y_test):.3f} (p={kurtosistest(y_test).pvalue:.3f})')
+618            plt.draw()
+619
+620    def plot_piechart(self, save=None):
+621        """Plot piechart which shows the fractional contribution of each
+622        ensemble to the error and returns a dictionary containing the fractions.
+623
+624        Parameters
+625        ----------
+626        save : str
+627            saves the figure to a file named 'save' if.
+628        """
+629        if not hasattr(self, 'e_dvalue'):
+630            raise Exception('Run the gamma method first.')
+631        if np.isclose(0.0, self._dvalue, atol=1e-15):
+632            raise Exception('Error is 0.0')
+633        labels = self.e_names
+634        sizes = [self.e_dvalue[name] ** 2 for name in labels] / self._dvalue ** 2
+635        fig1, ax1 = plt.subplots()
+636        ax1.pie(sizes, labels=labels, startangle=90, normalize=True)
+637        ax1.axis('equal')
+638        plt.draw()
+639        if save:
+640            fig1.savefig(save)
+641
+642        return dict(zip(self.e_names, sizes))
 643
-644        Parameters
-645        ----------
-646        filename : str
-647            name of the file to be saved.
-648        datatype : str
-649            Format of the exported file. Supported formats include
-650            "json.gz" and "pickle"
-651        description : str
-652            Description for output file, only relevant for json.gz format.
-653        path : str
-654            specifies a custom path for the file (default '.')
-655        """
-656        if 'path' in kwargs:
-657            file_name = kwargs.get('path') + '/' + filename
-658        else:
-659            file_name = filename
-660
-661        if datatype == "json.gz":
-662            from .input.json import dump_to_json
-663            dump_to_json([self], file_name, description=description)
-664        elif datatype == "pickle":
-665            with open(file_name + '.p', 'wb') as fb:
-666                pickle.dump(self, fb)
-667        else:
-668            raise Exception("Unknown datatype " + str(datatype))
-669
-670    def export_jackknife(self):
-671        """Export jackknife samples from the Obs
+644    def dump(self, filename, datatype="json.gz", description="", **kwargs):
+645        """Dump the Obs to a file 'name' of chosen format.
+646
+647        Parameters
+648        ----------
+649        filename : str
+650            name of the file to be saved.
+651        datatype : str
+652            Format of the exported file. Supported formats include
+653            "json.gz" and "pickle"
+654        description : str
+655            Description for output file, only relevant for json.gz format.
+656        path : str
+657            specifies a custom path for the file (default '.')
+658        """
+659        if 'path' in kwargs:
+660            file_name = kwargs.get('path') + '/' + filename
+661        else:
+662            file_name = filename
+663
+664        if datatype == "json.gz":
+665            from .input.json import dump_to_json
+666            dump_to_json([self], file_name, description=description)
+667        elif datatype == "pickle":
+668            with open(file_name + '.p', 'wb') as fb:
+669                pickle.dump(self, fb)
+670        else:
+671            raise Exception("Unknown datatype " + str(datatype))
 672
-673        Returns
-674        -------
-675        numpy.ndarray
-676            Returns a numpy array of length N + 1 where N is the number of samples
-677            for the given ensemble and replicum. The zeroth entry of the array contains
-678            the mean value of the Obs, entries 1 to N contain the N jackknife samples
-679            derived from the Obs. The current implementation only works for observables
-680            defined on exactly one ensemble and replicum. The derived jackknife samples
-681            should agree with samples from a full jackknife analysis up to O(1/N).
-682        """
-683
-684        if len(self.names) != 1:
-685            raise Exception("'export_jackknife' is only implemented for Obs defined on one ensemble and replicum.")
+673    def export_jackknife(self):
+674        """Export jackknife samples from the Obs
+675
+676        Returns
+677        -------
+678        numpy.ndarray
+679            Returns a numpy array of length N + 1 where N is the number of samples
+680            for the given ensemble and replicum. The zeroth entry of the array contains
+681            the mean value of the Obs, entries 1 to N contain the N jackknife samples
+682            derived from the Obs. The current implementation only works for observables
+683            defined on exactly one ensemble and replicum. The derived jackknife samples
+684            should agree with samples from a full jackknife analysis up to O(1/N).
+685        """
 686
-687        name = self.names[0]
-688        full_data = self.deltas[name] + self.r_values[name]
-689        n = full_data.size
-690        mean = self.value
-691        tmp_jacks = np.zeros(n + 1)
-692        tmp_jacks[0] = mean
-693        tmp_jacks[1:] = (n * mean - full_data) / (n - 1)
-694        return tmp_jacks
-695
-696    def __float__(self):
-697        return float(self.value)
+687        if len(self.names) != 1:
+688            raise Exception("'export_jackknife' is only implemented for Obs defined on one ensemble and replicum.")
+689
+690        name = self.names[0]
+691        full_data = self.deltas[name] + self.r_values[name]
+692        n = full_data.size
+693        mean = self.value
+694        tmp_jacks = np.zeros(n + 1)
+695        tmp_jacks[0] = mean
+696        tmp_jacks[1:] = (n * mean - full_data) / (n - 1)
+697        return tmp_jacks
 698
-699    def __repr__(self):
-700        return 'Obs[' + str(self) + ']'
+699    def __float__(self):
+700        return float(self.value)
 701
-702    def __str__(self):
-703        return _format_uncertainty(self.value, self._dvalue)
+702    def __repr__(self):
+703        return 'Obs[' + str(self) + ']'
 704
-705    def __hash__(self):
-706        hash_tuple = (np.array([self.value]).astype(np.float32).data.tobytes(),)
-707        hash_tuple += tuple([o.astype(np.float32).data.tobytes() for o in self.deltas.values()])
-708        hash_tuple += tuple([np.array([o.errsq()]).astype(np.float32).data.tobytes() for o in self.covobs.values()])
-709        hash_tuple += tuple([o.encode() for o in self.names])
-710        m = hashlib.md5()
-711        [m.update(o) for o in hash_tuple]
-712        return int(m.hexdigest(), 16) & 0xFFFFFFFF
-713
-714    # Overload comparisons
-715    def __lt__(self, other):
-716        return self.value < other
-717
-718    def __le__(self, other):
-719        return self.value <= other
+705    def __str__(self):
+706        return _format_uncertainty(self.value, self._dvalue)
+707
+708    def __hash__(self):
+709        hash_tuple = (np.array([self.value]).astype(np.float32).data.tobytes(),)
+710        hash_tuple += tuple([o.astype(np.float32).data.tobytes() for o in self.deltas.values()])
+711        hash_tuple += tuple([np.array([o.errsq()]).astype(np.float32).data.tobytes() for o in self.covobs.values()])
+712        hash_tuple += tuple([o.encode() for o in self.names])
+713        m = hashlib.md5()
+714        [m.update(o) for o in hash_tuple]
+715        return int(m.hexdigest(), 16) & 0xFFFFFFFF
+716
+717    # Overload comparisons
+718    def __lt__(self, other):
+719        return self.value < other
 720
-721    def __gt__(self, other):
-722        return self.value > other
+721    def __le__(self, other):
+722        return self.value <= other
 723
-724    def __ge__(self, other):
-725        return self.value >= other
+724    def __gt__(self, other):
+725        return self.value > other
 726
-727    def __eq__(self, other):
-728        return (self - other).is_zero()
+727    def __ge__(self, other):
+728        return self.value >= other
 729
-730    def __ne__(self, other):
-731        return not (self - other).is_zero()
+730    def __eq__(self, other):
+731        return (self - other).is_zero()
 732
-733    # Overload math operations
-734    def __add__(self, y):
-735        if isinstance(y, Obs):
-736            return derived_observable(lambda x, **kwargs: x[0] + x[1], [self, y], man_grad=[1, 1])
-737        else:
-738            if isinstance(y, np.ndarray):
-739                return np.array([self + o for o in y])
-740            elif y.__class__.__name__ in ['Corr', 'CObs']:
-741                return NotImplemented
-742            else:
-743                return derived_observable(lambda x, **kwargs: x[0] + y, [self], man_grad=[1])
-744
-745    def __radd__(self, y):
-746        return self + y
+733    def __ne__(self, other):
+734        return not (self - other).is_zero()
+735
+736    # Overload math operations
+737    def __add__(self, y):
+738        if isinstance(y, Obs):
+739            return derived_observable(lambda x, **kwargs: x[0] + x[1], [self, y], man_grad=[1, 1])
+740        else:
+741            if isinstance(y, np.ndarray):
+742                return np.array([self + o for o in y])
+743            elif y.__class__.__name__ in ['Corr', 'CObs']:
+744                return NotImplemented
+745            else:
+746                return derived_observable(lambda x, **kwargs: x[0] + y, [self], man_grad=[1])
 747
-748    def __mul__(self, y):
-749        if isinstance(y, Obs):
-750            return derived_observable(lambda x, **kwargs: x[0] * x[1], [self, y], man_grad=[y.value, self.value])
-751        else:
-752            if isinstance(y, np.ndarray):
-753                return np.array([self * o for o in y])
-754            elif isinstance(y, complex):
-755                return CObs(self * y.real, self * y.imag)
-756            elif y.__class__.__name__ in ['Corr', 'CObs']:
-757                return NotImplemented
-758            else:
-759                return derived_observable(lambda x, **kwargs: x[0] * y, [self], man_grad=[y])
-760
-761    def __rmul__(self, y):
-762        return self * y
+748    def __radd__(self, y):
+749        return self + y
+750
+751    def __mul__(self, y):
+752        if isinstance(y, Obs):
+753            return derived_observable(lambda x, **kwargs: x[0] * x[1], [self, y], man_grad=[y.value, self.value])
+754        else:
+755            if isinstance(y, np.ndarray):
+756                return np.array([self * o for o in y])
+757            elif isinstance(y, complex):
+758                return CObs(self * y.real, self * y.imag)
+759            elif y.__class__.__name__ in ['Corr', 'CObs']:
+760                return NotImplemented
+761            else:
+762                return derived_observable(lambda x, **kwargs: x[0] * y, [self], man_grad=[y])
 763
-764    def __sub__(self, y):
-765        if isinstance(y, Obs):
-766            return derived_observable(lambda x, **kwargs: x[0] - x[1], [self, y], man_grad=[1, -1])
-767        else:
-768            if isinstance(y, np.ndarray):
-769                return np.array([self - o for o in y])
-770            elif y.__class__.__name__ in ['Corr', 'CObs']:
-771                return NotImplemented
-772            else:
-773                return derived_observable(lambda x, **kwargs: x[0] - y, [self], man_grad=[1])
-774
-775    def __rsub__(self, y):
-776        return -1 * (self - y)
+764    def __rmul__(self, y):
+765        return self * y
+766
+767    def __sub__(self, y):
+768        if isinstance(y, Obs):
+769            return derived_observable(lambda x, **kwargs: x[0] - x[1], [self, y], man_grad=[1, -1])
+770        else:
+771            if isinstance(y, np.ndarray):
+772                return np.array([self - o for o in y])
+773            elif y.__class__.__name__ in ['Corr', 'CObs']:
+774                return NotImplemented
+775            else:
+776                return derived_observable(lambda x, **kwargs: x[0] - y, [self], man_grad=[1])
 777
-778    def __pos__(self):
-779        return self
+778    def __rsub__(self, y):
+779        return -1 * (self - y)
 780
-781    def __neg__(self):
-782        return -1 * self
+781    def __pos__(self):
+782        return self
 783
-784    def __truediv__(self, y):
-785        if isinstance(y, Obs):
-786            return derived_observable(lambda x, **kwargs: x[0] / x[1], [self, y], man_grad=[1 / y.value, - self.value / y.value ** 2])
-787        else:
-788            if isinstance(y, np.ndarray):
-789                return np.array([self / o for o in y])
-790            elif y.__class__.__name__ in ['Corr', 'CObs']:
-791                return NotImplemented
-792            else:
-793                return derived_observable(lambda x, **kwargs: x[0] / y, [self], man_grad=[1 / y])
-794
-795    def __rtruediv__(self, y):
-796        if isinstance(y, Obs):
-797            return derived_observable(lambda x, **kwargs: x[0] / x[1], [y, self], man_grad=[1 / self.value, - y.value / self.value ** 2])
-798        else:
-799            if isinstance(y, np.ndarray):
-800                return np.array([o / self for o in y])
-801            elif y.__class__.__name__ in ['Corr', 'CObs']:
-802                return NotImplemented
-803            else:
-804                return derived_observable(lambda x, **kwargs: y / x[0], [self], man_grad=[-y / self.value ** 2])
-805
-806    def __pow__(self, y):
-807        if isinstance(y, Obs):
-808            return derived_observable(lambda x: x[0] ** x[1], [self, y])
-809        else:
-810            return derived_observable(lambda x: x[0] ** y, [self])
-811
-812    def __rpow__(self, y):
-813        if isinstance(y, Obs):
-814            return derived_observable(lambda x: x[0] ** x[1], [y, self])
-815        else:
-816            return derived_observable(lambda x: y ** x[0], [self])
-817
-818    def __abs__(self):
-819        return derived_observable(lambda x: anp.abs(x[0]), [self])
+784    def __neg__(self):
+785        return -1 * self
+786
+787    def __truediv__(self, y):
+788        if isinstance(y, Obs):
+789            return derived_observable(lambda x, **kwargs: x[0] / x[1], [self, y], man_grad=[1 / y.value, - self.value / y.value ** 2])
+790        else:
+791            if isinstance(y, np.ndarray):
+792                return np.array([self / o for o in y])
+793            elif y.__class__.__name__ in ['Corr', 'CObs']:
+794                return NotImplemented
+795            else:
+796                return derived_observable(lambda x, **kwargs: x[0] / y, [self], man_grad=[1 / y])
+797
+798    def __rtruediv__(self, y):
+799        if isinstance(y, Obs):
+800            return derived_observable(lambda x, **kwargs: x[0] / x[1], [y, self], man_grad=[1 / self.value, - y.value / self.value ** 2])
+801        else:
+802            if isinstance(y, np.ndarray):
+803                return np.array([o / self for o in y])
+804            elif y.__class__.__name__ in ['Corr', 'CObs']:
+805                return NotImplemented
+806            else:
+807                return derived_observable(lambda x, **kwargs: y / x[0], [self], man_grad=[-y / self.value ** 2])
+808
+809    def __pow__(self, y):
+810        if isinstance(y, Obs):
+811            return derived_observable(lambda x: x[0] ** x[1], [self, y])
+812        else:
+813            return derived_observable(lambda x: x[0] ** y, [self])
+814
+815    def __rpow__(self, y):
+816        if isinstance(y, Obs):
+817            return derived_observable(lambda x: x[0] ** x[1], [y, self])
+818        else:
+819            return derived_observable(lambda x: y ** x[0], [self])
 820
-821    # Overload numpy functions
-822    def sqrt(self):
-823        return derived_observable(lambda x, **kwargs: np.sqrt(x[0]), [self], man_grad=[1 / 2 / np.sqrt(self.value)])
-824
-825    def log(self):
-826        return derived_observable(lambda x, **kwargs: np.log(x[0]), [self], man_grad=[1 / self.value])
+821    def __abs__(self):
+822        return derived_observable(lambda x: anp.abs(x[0]), [self])
+823
+824    # Overload numpy functions
+825    def sqrt(self):
+826        return derived_observable(lambda x, **kwargs: np.sqrt(x[0]), [self], man_grad=[1 / 2 / np.sqrt(self.value)])
 827
-828    def exp(self):
-829        return derived_observable(lambda x, **kwargs: np.exp(x[0]), [self], man_grad=[np.exp(self.value)])
+828    def log(self):
+829        return derived_observable(lambda x, **kwargs: np.log(x[0]), [self], man_grad=[1 / self.value])
 830
-831    def sin(self):
-832        return derived_observable(lambda x, **kwargs: np.sin(x[0]), [self], man_grad=[np.cos(self.value)])
+831    def exp(self):
+832        return derived_observable(lambda x, **kwargs: np.exp(x[0]), [self], man_grad=[np.exp(self.value)])
 833
-834    def cos(self):
-835        return derived_observable(lambda x, **kwargs: np.cos(x[0]), [self], man_grad=[-np.sin(self.value)])
+834    def sin(self):
+835        return derived_observable(lambda x, **kwargs: np.sin(x[0]), [self], man_grad=[np.cos(self.value)])
 836
-837    def tan(self):
-838        return derived_observable(lambda x, **kwargs: np.tan(x[0]), [self], man_grad=[1 / np.cos(self.value) ** 2])
+837    def cos(self):
+838        return derived_observable(lambda x, **kwargs: np.cos(x[0]), [self], man_grad=[-np.sin(self.value)])
 839
-840    def arcsin(self):
-841        return derived_observable(lambda x: anp.arcsin(x[0]), [self])
+840    def tan(self):
+841        return derived_observable(lambda x, **kwargs: np.tan(x[0]), [self], man_grad=[1 / np.cos(self.value) ** 2])
 842
-843    def arccos(self):
-844        return derived_observable(lambda x: anp.arccos(x[0]), [self])
+843    def arcsin(self):
+844        return derived_observable(lambda x: anp.arcsin(x[0]), [self])
 845
-846    def arctan(self):
-847        return derived_observable(lambda x: anp.arctan(x[0]), [self])
+846    def arccos(self):
+847        return derived_observable(lambda x: anp.arccos(x[0]), [self])
 848
-849    def sinh(self):
-850        return derived_observable(lambda x, **kwargs: np.sinh(x[0]), [self], man_grad=[np.cosh(self.value)])
+849    def arctan(self):
+850        return derived_observable(lambda x: anp.arctan(x[0]), [self])
 851
-852    def cosh(self):
-853        return derived_observable(lambda x, **kwargs: np.cosh(x[0]), [self], man_grad=[np.sinh(self.value)])
+852    def sinh(self):
+853        return derived_observable(lambda x, **kwargs: np.sinh(x[0]), [self], man_grad=[np.cosh(self.value)])
 854
-855    def tanh(self):
-856        return derived_observable(lambda x, **kwargs: np.tanh(x[0]), [self], man_grad=[1 / np.cosh(self.value) ** 2])
+855    def cosh(self):
+856        return derived_observable(lambda x, **kwargs: np.cosh(x[0]), [self], man_grad=[np.sinh(self.value)])
 857
-858    def arcsinh(self):
-859        return derived_observable(lambda x: anp.arcsinh(x[0]), [self])
+858    def tanh(self):
+859        return derived_observable(lambda x, **kwargs: np.tanh(x[0]), [self], man_grad=[1 / np.cosh(self.value) ** 2])
 860
-861    def arccosh(self):
-862        return derived_observable(lambda x: anp.arccosh(x[0]), [self])
+861    def arcsinh(self):
+862        return derived_observable(lambda x: anp.arcsinh(x[0]), [self])
 863
-864    def arctanh(self):
-865        return derived_observable(lambda x: anp.arctanh(x[0]), [self])
+864    def arccosh(self):
+865        return derived_observable(lambda x: anp.arccosh(x[0]), [self])
+866
+867    def arctanh(self):
+868        return derived_observable(lambda x: anp.arctanh(x[0]), [self])
 
@@ -2779,87 +2782,87 @@ this overwrites the standard value for that ensemble.
-
 60    def __init__(self, samples, names, idl=None, **kwargs):
- 61        """ Initialize Obs object.
- 62
- 63        Parameters
- 64        ----------
- 65        samples : list
- 66            list of numpy arrays containing the Monte Carlo samples
- 67        names : list
- 68            list of strings labeling the individual samples
- 69        idl : list, optional
- 70            list of ranges or lists on which the samples are defined
- 71        """
- 72
- 73        if kwargs.get("means") is None and len(samples):
- 74            if len(samples) != len(names):
- 75                raise Exception('Length of samples and names incompatible.')
- 76            if idl is not None:
- 77                if len(idl) != len(names):
- 78                    raise Exception('Length of idl incompatible with samples and names.')
- 79            name_length = len(names)
- 80            if name_length > 1:
- 81                if name_length != len(set(names)):
- 82                    raise Exception('names are not unique.')
- 83                if not all(isinstance(x, str) for x in names):
- 84                    raise TypeError('All names have to be strings.')
- 85            else:
- 86                if not isinstance(names[0], str):
+            
 63    def __init__(self, samples, names, idl=None, **kwargs):
+ 64        """ Initialize Obs object.
+ 65
+ 66        Parameters
+ 67        ----------
+ 68        samples : list
+ 69            list of numpy arrays containing the Monte Carlo samples
+ 70        names : list
+ 71            list of strings labeling the individual samples
+ 72        idl : list, optional
+ 73            list of ranges or lists on which the samples are defined
+ 74        """
+ 75
+ 76        if kwargs.get("means") is None and len(samples):
+ 77            if len(samples) != len(names):
+ 78                raise Exception('Length of samples and names incompatible.')
+ 79            if idl is not None:
+ 80                if len(idl) != len(names):
+ 81                    raise Exception('Length of idl incompatible with samples and names.')
+ 82            name_length = len(names)
+ 83            if name_length > 1:
+ 84                if name_length != len(set(names)):
+ 85                    raise Exception('names are not unique.')
+ 86                if not all(isinstance(x, str) for x in names):
  87                    raise TypeError('All names have to be strings.')
- 88            if min(len(x) for x in samples) <= 4:
- 89                raise Exception('Samples have to have at least 5 entries.')
- 90
- 91        self.names = sorted(names)
- 92        self.shape = {}
- 93        self.r_values = {}
- 94        self.deltas = {}
- 95        self._covobs = {}
- 96
- 97        self._value = 0
- 98        self.N = 0
- 99        self.is_merged = {}
-100        self.idl = {}
-101        if idl is not None:
-102            for name, idx in sorted(zip(names, idl)):
-103                if isinstance(idx, range):
-104                    self.idl[name] = idx
-105                elif isinstance(idx, (list, np.ndarray)):
-106                    dc = np.unique(np.diff(idx))
-107                    if np.any(dc < 0):
-108                        raise Exception("Unsorted idx for idl[%s]" % (name))
-109                    if len(dc) == 1:
-110                        self.idl[name] = range(idx[0], idx[-1] + dc[0], dc[0])
-111                    else:
-112                        self.idl[name] = list(idx)
-113                else:
-114                    raise Exception('incompatible type for idl[%s].' % (name))
-115        else:
-116            for name, sample in sorted(zip(names, samples)):
-117                self.idl[name] = range(1, len(sample) + 1)
-118
-119        if kwargs.get("means") is not None:
-120            for name, sample, mean in sorted(zip(names, samples, kwargs.get("means"))):
-121                self.shape[name] = len(self.idl[name])
-122                self.N += self.shape[name]
-123                self.r_values[name] = mean
-124                self.deltas[name] = sample
-125        else:
-126            for name, sample in sorted(zip(names, samples)):
-127                self.shape[name] = len(self.idl[name])
-128                self.N += self.shape[name]
-129                if len(sample) != self.shape[name]:
-130                    raise Exception('Incompatible samples and idx for %s: %d vs. %d' % (name, len(sample), self.shape[name]))
-131                self.r_values[name] = np.mean(sample)
-132                self.deltas[name] = sample - self.r_values[name]
-133                self._value += self.shape[name] * self.r_values[name]
-134            self._value /= self.N
-135
-136        self._dvalue = 0.0
-137        self.ddvalue = 0.0
-138        self.reweighted = False
-139
-140        self.tag = None
+ 88            else:
+ 89                if not isinstance(names[0], str):
+ 90                    raise TypeError('All names have to be strings.')
+ 91            if min(len(x) for x in samples) <= 4:
+ 92                raise Exception('Samples have to have at least 5 entries.')
+ 93
+ 94        self.names = sorted(names)
+ 95        self.shape = {}
+ 96        self.r_values = {}
+ 97        self.deltas = {}
+ 98        self._covobs = {}
+ 99
+100        self._value = 0
+101        self.N = 0
+102        self.is_merged = {}
+103        self.idl = {}
+104        if idl is not None:
+105            for name, idx in sorted(zip(names, idl)):
+106                if isinstance(idx, range):
+107                    self.idl[name] = idx
+108                elif isinstance(idx, (list, np.ndarray)):
+109                    dc = np.unique(np.diff(idx))
+110                    if np.any(dc < 0):
+111                        raise Exception("Unsorted idx for idl[%s]" % (name))
+112                    if len(dc) == 1:
+113                        self.idl[name] = range(idx[0], idx[-1] + dc[0], dc[0])
+114                    else:
+115                        self.idl[name] = list(idx)
+116                else:
+117                    raise Exception('incompatible type for idl[%s].' % (name))
+118        else:
+119            for name, sample in sorted(zip(names, samples)):
+120                self.idl[name] = range(1, len(sample) + 1)
+121
+122        if kwargs.get("means") is not None:
+123            for name, sample, mean in sorted(zip(names, samples, kwargs.get("means"))):
+124                self.shape[name] = len(self.idl[name])
+125                self.N += self.shape[name]
+126                self.r_values[name] = mean
+127                self.deltas[name] = sample
+128        else:
+129            for name, sample in sorted(zip(names, samples)):
+130                self.shape[name] = len(self.idl[name])
+131                self.N += self.shape[name]
+132                if len(sample) != self.shape[name]:
+133                    raise Exception('Incompatible samples and idx for %s: %d vs. %d' % (name, len(sample), self.shape[name]))
+134                self.r_values[name] = np.mean(sample)
+135                self.deltas[name] = sample - self.r_values[name]
+136                self._value += self.shape[name] * self.r_values[name]
+137            self._value /= self.N
+138
+139        self._dvalue = 0.0
+140        self.ddvalue = 0.0
+141        self.reweighted = False
+142
+143        self.tag = None
 
@@ -2890,180 +2893,180 @@ list of ranges or lists on which the samples are defined
-
175    def gamma_method(self, **kwargs):
-176        """Estimate the error and related properties of the Obs.
-177
-178        Parameters
-179        ----------
-180        S : float
-181            specifies a custom value for the parameter S (default 2.0).
-182            If set to 0 it is assumed that the data exhibits no
-183            autocorrelation. In this case the error estimates coincides
-184            with the sample standard error.
-185        tau_exp : float
-186            positive value triggers the critical slowing down analysis
-187            (default 0.0).
-188        N_sigma : float
-189            number of standard deviations from zero until the tail is
-190            attached to the autocorrelation function (default 1).
-191        fft : bool
-192            determines whether the fft algorithm is used for the computation
-193            of the autocorrelation function (default True)
-194        """
-195
-196        e_content = self.e_content
-197        self.e_dvalue = {}
-198        self.e_ddvalue = {}
-199        self.e_tauint = {}
-200        self.e_dtauint = {}
-201        self.e_windowsize = {}
-202        self.e_n_tauint = {}
-203        self.e_n_dtauint = {}
-204        e_gamma = {}
-205        self.e_rho = {}
-206        self.e_drho = {}
-207        self._dvalue = 0
-208        self.ddvalue = 0
-209
-210        self.S = {}
-211        self.tau_exp = {}
-212        self.N_sigma = {}
-213
-214        if kwargs.get('fft') is False:
-215            fft = False
-216        else:
-217            fft = True
-218
-219        def _parse_kwarg(kwarg_name):
-220            if kwarg_name in kwargs:
-221                tmp = kwargs.get(kwarg_name)
-222                if isinstance(tmp, (int, float)):
-223                    if tmp < 0:
-224                        raise Exception(kwarg_name + ' has to be larger or equal to 0.')
-225                    for e, e_name in enumerate(self.e_names):
-226                        getattr(self, kwarg_name)[e_name] = tmp
-227                else:
-228                    raise TypeError(kwarg_name + ' is not in proper format.')
-229            else:
-230                for e, e_name in enumerate(self.e_names):
-231                    if e_name in getattr(Obs, kwarg_name + '_dict'):
-232                        getattr(self, kwarg_name)[e_name] = getattr(Obs, kwarg_name + '_dict')[e_name]
-233                    else:
-234                        getattr(self, kwarg_name)[e_name] = getattr(Obs, kwarg_name + '_global')
-235
-236        _parse_kwarg('S')
-237        _parse_kwarg('tau_exp')
-238        _parse_kwarg('N_sigma')
-239
-240        for e, e_name in enumerate(self.mc_names):
-241            r_length = []
-242            for r_name in e_content[e_name]:
-243                if isinstance(self.idl[r_name], range):
-244                    r_length.append(len(self.idl[r_name]))
-245                else:
-246                    r_length.append((self.idl[r_name][-1] - self.idl[r_name][0] + 1))
-247
-248            e_N = np.sum([self.shape[r_name] for r_name in e_content[e_name]])
-249            w_max = max(r_length) // 2
-250            e_gamma[e_name] = np.zeros(w_max)
-251            self.e_rho[e_name] = np.zeros(w_max)
-252            self.e_drho[e_name] = np.zeros(w_max)
-253
-254            for r_name in e_content[e_name]:
-255                e_gamma[e_name] += self._calc_gamma(self.deltas[r_name], self.idl[r_name], self.shape[r_name], w_max, fft)
+            
178    def gamma_method(self, **kwargs):
+179        """Estimate the error and related properties of the Obs.
+180
+181        Parameters
+182        ----------
+183        S : float
+184            specifies a custom value for the parameter S (default 2.0).
+185            If set to 0 it is assumed that the data exhibits no
+186            autocorrelation. In this case the error estimates coincides
+187            with the sample standard error.
+188        tau_exp : float
+189            positive value triggers the critical slowing down analysis
+190            (default 0.0).
+191        N_sigma : float
+192            number of standard deviations from zero until the tail is
+193            attached to the autocorrelation function (default 1).
+194        fft : bool
+195            determines whether the fft algorithm is used for the computation
+196            of the autocorrelation function (default True)
+197        """
+198
+199        e_content = self.e_content
+200        self.e_dvalue = {}
+201        self.e_ddvalue = {}
+202        self.e_tauint = {}
+203        self.e_dtauint = {}
+204        self.e_windowsize = {}
+205        self.e_n_tauint = {}
+206        self.e_n_dtauint = {}
+207        e_gamma = {}
+208        self.e_rho = {}
+209        self.e_drho = {}
+210        self._dvalue = 0
+211        self.ddvalue = 0
+212
+213        self.S = {}
+214        self.tau_exp = {}
+215        self.N_sigma = {}
+216
+217        if kwargs.get('fft') is False:
+218            fft = False
+219        else:
+220            fft = True
+221
+222        def _parse_kwarg(kwarg_name):
+223            if kwarg_name in kwargs:
+224                tmp = kwargs.get(kwarg_name)
+225                if isinstance(tmp, (int, float)):
+226                    if tmp < 0:
+227                        raise Exception(kwarg_name + ' has to be larger or equal to 0.')
+228                    for e, e_name in enumerate(self.e_names):
+229                        getattr(self, kwarg_name)[e_name] = tmp
+230                else:
+231                    raise TypeError(kwarg_name + ' is not in proper format.')
+232            else:
+233                for e, e_name in enumerate(self.e_names):
+234                    if e_name in getattr(Obs, kwarg_name + '_dict'):
+235                        getattr(self, kwarg_name)[e_name] = getattr(Obs, kwarg_name + '_dict')[e_name]
+236                    else:
+237                        getattr(self, kwarg_name)[e_name] = getattr(Obs, kwarg_name + '_global')
+238
+239        _parse_kwarg('S')
+240        _parse_kwarg('tau_exp')
+241        _parse_kwarg('N_sigma')
+242
+243        for e, e_name in enumerate(self.mc_names):
+244            r_length = []
+245            for r_name in e_content[e_name]:
+246                if isinstance(self.idl[r_name], range):
+247                    r_length.append(len(self.idl[r_name]))
+248                else:
+249                    r_length.append((self.idl[r_name][-1] - self.idl[r_name][0] + 1))
+250
+251            e_N = np.sum([self.shape[r_name] for r_name in e_content[e_name]])
+252            w_max = max(r_length) // 2
+253            e_gamma[e_name] = np.zeros(w_max)
+254            self.e_rho[e_name] = np.zeros(w_max)
+255            self.e_drho[e_name] = np.zeros(w_max)
 256
-257            gamma_div = np.zeros(w_max)
-258            for r_name in e_content[e_name]:
-259                gamma_div += self._calc_gamma(np.ones((self.shape[r_name])), self.idl[r_name], self.shape[r_name], w_max, fft)
-260            gamma_div[gamma_div < 1] = 1.0
-261            e_gamma[e_name] /= gamma_div[:w_max]
-262
-263            if np.abs(e_gamma[e_name][0]) < 10 * np.finfo(float).tiny:  # Prevent division by zero
-264                self.e_tauint[e_name] = 0.5
-265                self.e_dtauint[e_name] = 0.0
-266                self.e_dvalue[e_name] = 0.0
-267                self.e_ddvalue[e_name] = 0.0
-268                self.e_windowsize[e_name] = 0
-269                continue
-270
-271            gaps = []
-272            for r_name in e_content[e_name]:
-273                if isinstance(self.idl[r_name], range):
-274                    gaps.append(1)
-275                else:
-276                    gaps.append(np.min(np.diff(self.idl[r_name])))
-277
-278            if not np.all([gi == gaps[0] for gi in gaps]):
-279                raise Exception(f"Replica for ensemble {e_name} are not equally spaced.", gaps)
-280            else:
-281                gapsize = gaps[0]
-282
-283            self.e_rho[e_name] = e_gamma[e_name][:w_max] / e_gamma[e_name][0]
-284            self.e_n_tauint[e_name] = np.cumsum(np.concatenate(([0.5], self.e_rho[e_name][1:])))
-285            # Make sure no entry of tauint is smaller than 0.5
-286            self.e_n_tauint[e_name][self.e_n_tauint[e_name] <= 0.5] = 0.5 + np.finfo(np.float64).eps
-287            # hep-lat/0306017 eq. (42)
-288            self.e_n_dtauint[e_name] = self.e_n_tauint[e_name] * 2 * np.sqrt(np.abs(np.arange(w_max) / gapsize + 0.5 - self.e_n_tauint[e_name]) / e_N)
-289            self.e_n_dtauint[e_name][0] = 0.0
-290
-291            def _compute_drho(i):
-292                tmp = self.e_rho[e_name][i + 1:w_max] + np.concatenate([self.e_rho[e_name][i - 1::-1], self.e_rho[e_name][1:w_max - 2 * i]]) - 2 * self.e_rho[e_name][i] * self.e_rho[e_name][1:w_max - i]
-293                self.e_drho[e_name][i] = np.sqrt(np.sum(tmp ** 2) / e_N)
-294
-295            _compute_drho(gapsize)
-296            if self.tau_exp[e_name] > 0:
-297                texp = self.tau_exp[e_name]
-298                # Critical slowing down analysis
-299                if w_max // 2 <= 1:
-300                    raise Exception("Need at least 8 samples for tau_exp error analysis")
-301                for n in range(gapsize, w_max // 2, gapsize):
-302                    _compute_drho(n + gapsize)
-303                    if (self.e_rho[e_name][n] - self.N_sigma[e_name] * self.e_drho[e_name][n]) < 0 or n >= w_max // 2 - 2:
-304                        # Bias correction hep-lat/0306017 eq. (49) included
-305                        self.e_tauint[e_name] = self.e_n_tauint[e_name][n] * (1 + (2 * n / gapsize + 1) / e_N) / (1 + 1 / e_N) + texp * np.abs(self.e_rho[e_name][n + 1])  # The absolute makes sure, that the tail contribution is always positive
-306                        self.e_dtauint[e_name] = np.sqrt(self.e_n_dtauint[e_name][n] ** 2 + texp ** 2 * self.e_drho[e_name][n + 1] ** 2)
-307                        # Error of tau_exp neglected so far, missing term: self.e_rho[e_name][n + 1] ** 2 * d_tau_exp ** 2
-308                        self.e_dvalue[e_name] = np.sqrt(2 * self.e_tauint[e_name] * e_gamma[e_name][0] * (1 + 1 / e_N) / e_N)
-309                        self.e_ddvalue[e_name] = self.e_dvalue[e_name] * np.sqrt((n / gapsize + 0.5) / e_N)
-310                        self.e_windowsize[e_name] = n
-311                        break
-312            else:
-313                if self.S[e_name] == 0.0:
-314                    self.e_tauint[e_name] = 0.5
-315                    self.e_dtauint[e_name] = 0.0
-316                    self.e_dvalue[e_name] = np.sqrt(e_gamma[e_name][0] / (e_N - 1))
-317                    self.e_ddvalue[e_name] = self.e_dvalue[e_name] * np.sqrt(0.5 / e_N)
-318                    self.e_windowsize[e_name] = 0
-319                else:
-320                    # Standard automatic windowing procedure
-321                    tau = self.S[e_name] / np.log((2 * self.e_n_tauint[e_name][gapsize::gapsize] + 1) / (2 * self.e_n_tauint[e_name][gapsize::gapsize] - 1))
-322                    g_w = np.exp(- np.arange(1, len(tau) + 1) / tau) - tau / np.sqrt(np.arange(1, len(tau) + 1) * e_N)
-323                    for n in range(1, w_max):
-324                        if n < w_max // 2 - 2:
-325                            _compute_drho(gapsize * n + gapsize)
-326                        if g_w[n - 1] < 0 or n >= w_max - 1:
-327                            n *= gapsize
-328                            self.e_tauint[e_name] = self.e_n_tauint[e_name][n] * (1 + (2 * n / gapsize + 1) / e_N) / (1 + 1 / e_N)  # Bias correction hep-lat/0306017 eq. (49)
-329                            self.e_dtauint[e_name] = self.e_n_dtauint[e_name][n]
-330                            self.e_dvalue[e_name] = np.sqrt(2 * self.e_tauint[e_name] * e_gamma[e_name][0] * (1 + 1 / e_N) / e_N)
-331                            self.e_ddvalue[e_name] = self.e_dvalue[e_name] * np.sqrt((n / gapsize + 0.5) / e_N)
-332                            self.e_windowsize[e_name] = n
-333                            break
-334
-335            self._dvalue += self.e_dvalue[e_name] ** 2
-336            self.ddvalue += (self.e_dvalue[e_name] * self.e_ddvalue[e_name]) ** 2
+257            for r_name in e_content[e_name]:
+258                e_gamma[e_name] += self._calc_gamma(self.deltas[r_name], self.idl[r_name], self.shape[r_name], w_max, fft)
+259
+260            gamma_div = np.zeros(w_max)
+261            for r_name in e_content[e_name]:
+262                gamma_div += self._calc_gamma(np.ones((self.shape[r_name])), self.idl[r_name], self.shape[r_name], w_max, fft)
+263            gamma_div[gamma_div < 1] = 1.0
+264            e_gamma[e_name] /= gamma_div[:w_max]
+265
+266            if np.abs(e_gamma[e_name][0]) < 10 * np.finfo(float).tiny:  # Prevent division by zero
+267                self.e_tauint[e_name] = 0.5
+268                self.e_dtauint[e_name] = 0.0
+269                self.e_dvalue[e_name] = 0.0
+270                self.e_ddvalue[e_name] = 0.0
+271                self.e_windowsize[e_name] = 0
+272                continue
+273
+274            gaps = []
+275            for r_name in e_content[e_name]:
+276                if isinstance(self.idl[r_name], range):
+277                    gaps.append(1)
+278                else:
+279                    gaps.append(np.min(np.diff(self.idl[r_name])))
+280
+281            if not np.all([gi == gaps[0] for gi in gaps]):
+282                raise Exception(f"Replica for ensemble {e_name} are not equally spaced.", gaps)
+283            else:
+284                gapsize = gaps[0]
+285
+286            self.e_rho[e_name] = e_gamma[e_name][:w_max] / e_gamma[e_name][0]
+287            self.e_n_tauint[e_name] = np.cumsum(np.concatenate(([0.5], self.e_rho[e_name][1:])))
+288            # Make sure no entry of tauint is smaller than 0.5
+289            self.e_n_tauint[e_name][self.e_n_tauint[e_name] <= 0.5] = 0.5 + np.finfo(np.float64).eps
+290            # hep-lat/0306017 eq. (42)
+291            self.e_n_dtauint[e_name] = self.e_n_tauint[e_name] * 2 * np.sqrt(np.abs(np.arange(w_max) / gapsize + 0.5 - self.e_n_tauint[e_name]) / e_N)
+292            self.e_n_dtauint[e_name][0] = 0.0
+293
+294            def _compute_drho(i):
+295                tmp = self.e_rho[e_name][i + 1:w_max] + np.concatenate([self.e_rho[e_name][i - 1::-1], self.e_rho[e_name][1:w_max - 2 * i]]) - 2 * self.e_rho[e_name][i] * self.e_rho[e_name][1:w_max - i]
+296                self.e_drho[e_name][i] = np.sqrt(np.sum(tmp ** 2) / e_N)
+297
+298            _compute_drho(gapsize)
+299            if self.tau_exp[e_name] > 0:
+300                texp = self.tau_exp[e_name]
+301                # Critical slowing down analysis
+302                if w_max // 2 <= 1:
+303                    raise Exception("Need at least 8 samples for tau_exp error analysis")
+304                for n in range(gapsize, w_max // 2, gapsize):
+305                    _compute_drho(n + gapsize)
+306                    if (self.e_rho[e_name][n] - self.N_sigma[e_name] * self.e_drho[e_name][n]) < 0 or n >= w_max // 2 - 2:
+307                        # Bias correction hep-lat/0306017 eq. (49) included
+308                        self.e_tauint[e_name] = self.e_n_tauint[e_name][n] * (1 + (2 * n / gapsize + 1) / e_N) / (1 + 1 / e_N) + texp * np.abs(self.e_rho[e_name][n + 1])  # The absolute makes sure, that the tail contribution is always positive
+309                        self.e_dtauint[e_name] = np.sqrt(self.e_n_dtauint[e_name][n] ** 2 + texp ** 2 * self.e_drho[e_name][n + 1] ** 2)
+310                        # Error of tau_exp neglected so far, missing term: self.e_rho[e_name][n + 1] ** 2 * d_tau_exp ** 2
+311                        self.e_dvalue[e_name] = np.sqrt(2 * self.e_tauint[e_name] * e_gamma[e_name][0] * (1 + 1 / e_N) / e_N)
+312                        self.e_ddvalue[e_name] = self.e_dvalue[e_name] * np.sqrt((n / gapsize + 0.5) / e_N)
+313                        self.e_windowsize[e_name] = n
+314                        break
+315            else:
+316                if self.S[e_name] == 0.0:
+317                    self.e_tauint[e_name] = 0.5
+318                    self.e_dtauint[e_name] = 0.0
+319                    self.e_dvalue[e_name] = np.sqrt(e_gamma[e_name][0] / (e_N - 1))
+320                    self.e_ddvalue[e_name] = self.e_dvalue[e_name] * np.sqrt(0.5 / e_N)
+321                    self.e_windowsize[e_name] = 0
+322                else:
+323                    # Standard automatic windowing procedure
+324                    tau = self.S[e_name] / np.log((2 * self.e_n_tauint[e_name][gapsize::gapsize] + 1) / (2 * self.e_n_tauint[e_name][gapsize::gapsize] - 1))
+325                    g_w = np.exp(- np.arange(1, len(tau) + 1) / tau) - tau / np.sqrt(np.arange(1, len(tau) + 1) * e_N)
+326                    for n in range(1, w_max):
+327                        if n < w_max // 2 - 2:
+328                            _compute_drho(gapsize * n + gapsize)
+329                        if g_w[n - 1] < 0 or n >= w_max - 1:
+330                            n *= gapsize
+331                            self.e_tauint[e_name] = self.e_n_tauint[e_name][n] * (1 + (2 * n / gapsize + 1) / e_N) / (1 + 1 / e_N)  # Bias correction hep-lat/0306017 eq. (49)
+332                            self.e_dtauint[e_name] = self.e_n_dtauint[e_name][n]
+333                            self.e_dvalue[e_name] = np.sqrt(2 * self.e_tauint[e_name] * e_gamma[e_name][0] * (1 + 1 / e_N) / e_N)
+334                            self.e_ddvalue[e_name] = self.e_dvalue[e_name] * np.sqrt((n / gapsize + 0.5) / e_N)
+335                            self.e_windowsize[e_name] = n
+336                            break
 337
-338        for e_name in self.cov_names:
-339            self.e_dvalue[e_name] = np.sqrt(self.covobs[e_name].errsq())
-340            self.e_ddvalue[e_name] = 0
-341            self._dvalue += self.e_dvalue[e_name]**2
-342
-343        self._dvalue = np.sqrt(self._dvalue)
-344        if self._dvalue == 0.0:
-345            self.ddvalue = 0.0
-346        else:
-347            self.ddvalue = np.sqrt(self.ddvalue) / self._dvalue
-348        return
+338            self._dvalue += self.e_dvalue[e_name] ** 2
+339            self.ddvalue += (self.e_dvalue[e_name] * self.e_ddvalue[e_name]) ** 2
+340
+341        for e_name in self.cov_names:
+342            self.e_dvalue[e_name] = np.sqrt(self.covobs[e_name].errsq())
+343            self.e_ddvalue[e_name] = 0
+344            self._dvalue += self.e_dvalue[e_name]**2
+345
+346        self._dvalue = np.sqrt(self._dvalue)
+347        if self._dvalue == 0.0:
+348            self.ddvalue = 0.0
+349        else:
+350            self.ddvalue = np.sqrt(self.ddvalue) / self._dvalue
+351        return
 
@@ -3102,77 +3105,77 @@ of the autocorrelation function (default True)
-
383    def details(self, ens_content=True):
-384        """Output detailed properties of the Obs.
-385
-386        Parameters
-387        ----------
-388        ens_content : bool
-389            print details about the ensembles and replica if true.
-390        """
-391        if self.tag is not None:
-392            print("Description:", self.tag)
-393        if not hasattr(self, 'e_dvalue'):
-394            print('Result\t %3.8e' % (self.value))
-395        else:
-396            if self.value == 0.0:
-397                percentage = np.nan
-398            else:
-399                percentage = np.abs(self._dvalue / self.value) * 100
-400            print('Result\t %3.8e +/- %3.8e +/- %3.8e (%3.3f%%)' % (self.value, self._dvalue, self.ddvalue, percentage))
-401            if len(self.e_names) > 1:
-402                print(' Ensemble errors:')
-403            e_content = self.e_content
-404            for e_name in self.mc_names:
-405                if isinstance(self.idl[e_content[e_name][0]], range):
-406                    gap = self.idl[e_content[e_name][0]].step
-407                else:
-408                    gap = np.min(np.diff(self.idl[e_content[e_name][0]]))
-409
-410                if len(self.e_names) > 1:
-411                    print('', e_name, '\t %3.6e +/- %3.6e' % (self.e_dvalue[e_name], self.e_ddvalue[e_name]))
-412                tau_string = " \N{GREEK SMALL LETTER TAU}_int\t " + _format_uncertainty(self.e_tauint[e_name], self.e_dtauint[e_name])
-413                tau_string += f" in units of {gap} config"
-414                if gap > 1:
-415                    tau_string += "s"
-416                if self.tau_exp[e_name] > 0:
-417                    tau_string = f"{tau_string: <45}" + '\t(\N{GREEK SMALL LETTER TAU}_exp=%3.2f, N_\N{GREEK SMALL LETTER SIGMA}=%1.0i)' % (self.tau_exp[e_name], self.N_sigma[e_name])
-418                else:
-419                    tau_string = f"{tau_string: <45}" + '\t(S=%3.2f)' % (self.S[e_name])
-420                print(tau_string)
-421            for e_name in self.cov_names:
-422                print('', e_name, '\t %3.8e' % (self.e_dvalue[e_name]))
-423        if ens_content is True:
-424            if len(self.e_names) == 1:
-425                print(self.N, 'samples in', len(self.e_names), 'ensemble:')
-426            else:
-427                print(self.N, 'samples in', len(self.e_names), 'ensembles:')
-428            my_string_list = []
-429            for key, value in sorted(self.e_content.items()):
-430                if key not in self.covobs:
-431                    my_string = '  ' + "\u00B7 Ensemble '" + key + "' "
-432                    if len(value) == 1:
-433                        my_string += f': {self.shape[value[0]]} configurations'
-434                        if isinstance(self.idl[value[0]], range):
-435                            my_string += f' (from {self.idl[value[0]].start} to {self.idl[value[0]][-1]}' + int(self.idl[value[0]].step != 1) * f' in steps of {self.idl[value[0]].step}' + ')'
-436                        else:
-437                            my_string += f' (irregular range from {self.idl[value[0]][0]} to {self.idl[value[0]][-1]})'
-438                    else:
-439                        sublist = []
-440                        for v in value:
-441                            my_substring = '    ' + "\u00B7 Replicum '" + v[len(key) + 1:] + "' "
-442                            my_substring += f': {self.shape[v]} configurations'
-443                            if isinstance(self.idl[v], range):
-444                                my_substring += f' (from {self.idl[v].start} to {self.idl[v][-1]}' + int(self.idl[v].step != 1) * f' in steps of {self.idl[v].step}' + ')'
-445                            else:
-446                                my_substring += f' (irregular range from {self.idl[v][0]} to {self.idl[v][-1]})'
-447                            sublist.append(my_substring)
-448
-449                        my_string += '\n' + '\n'.join(sublist)
-450                else:
-451                    my_string = '  ' + "\u00B7 Covobs   '" + key + "' "
-452                my_string_list.append(my_string)
-453            print('\n'.join(my_string_list))
+            
386    def details(self, ens_content=True):
+387        """Output detailed properties of the Obs.
+388
+389        Parameters
+390        ----------
+391        ens_content : bool
+392            print details about the ensembles and replica if true.
+393        """
+394        if self.tag is not None:
+395            print("Description:", self.tag)
+396        if not hasattr(self, 'e_dvalue'):
+397            print('Result\t %3.8e' % (self.value))
+398        else:
+399            if self.value == 0.0:
+400                percentage = np.nan
+401            else:
+402                percentage = np.abs(self._dvalue / self.value) * 100
+403            print('Result\t %3.8e +/- %3.8e +/- %3.8e (%3.3f%%)' % (self.value, self._dvalue, self.ddvalue, percentage))
+404            if len(self.e_names) > 1:
+405                print(' Ensemble errors:')
+406            e_content = self.e_content
+407            for e_name in self.mc_names:
+408                if isinstance(self.idl[e_content[e_name][0]], range):
+409                    gap = self.idl[e_content[e_name][0]].step
+410                else:
+411                    gap = np.min(np.diff(self.idl[e_content[e_name][0]]))
+412
+413                if len(self.e_names) > 1:
+414                    print('', e_name, '\t %3.6e +/- %3.6e' % (self.e_dvalue[e_name], self.e_ddvalue[e_name]))
+415                tau_string = " \N{GREEK SMALL LETTER TAU}_int\t " + _format_uncertainty(self.e_tauint[e_name], self.e_dtauint[e_name])
+416                tau_string += f" in units of {gap} config"
+417                if gap > 1:
+418                    tau_string += "s"
+419                if self.tau_exp[e_name] > 0:
+420                    tau_string = f"{tau_string: <45}" + '\t(\N{GREEK SMALL LETTER TAU}_exp=%3.2f, N_\N{GREEK SMALL LETTER SIGMA}=%1.0i)' % (self.tau_exp[e_name], self.N_sigma[e_name])
+421                else:
+422                    tau_string = f"{tau_string: <45}" + '\t(S=%3.2f)' % (self.S[e_name])
+423                print(tau_string)
+424            for e_name in self.cov_names:
+425                print('', e_name, '\t %3.8e' % (self.e_dvalue[e_name]))
+426        if ens_content is True:
+427            if len(self.e_names) == 1:
+428                print(self.N, 'samples in', len(self.e_names), 'ensemble:')
+429            else:
+430                print(self.N, 'samples in', len(self.e_names), 'ensembles:')
+431            my_string_list = []
+432            for key, value in sorted(self.e_content.items()):
+433                if key not in self.covobs:
+434                    my_string = '  ' + "\u00B7 Ensemble '" + key + "' "
+435                    if len(value) == 1:
+436                        my_string += f': {self.shape[value[0]]} configurations'
+437                        if isinstance(self.idl[value[0]], range):
+438                            my_string += f' (from {self.idl[value[0]].start} to {self.idl[value[0]][-1]}' + int(self.idl[value[0]].step != 1) * f' in steps of {self.idl[value[0]].step}' + ')'
+439                        else:
+440                            my_string += f' (irregular range from {self.idl[value[0]][0]} to {self.idl[value[0]][-1]})'
+441                    else:
+442                        sublist = []
+443                        for v in value:
+444                            my_substring = '    ' + "\u00B7 Replicum '" + v[len(key) + 1:] + "' "
+445                            my_substring += f': {self.shape[v]} configurations'
+446                            if isinstance(self.idl[v], range):
+447                                my_substring += f' (from {self.idl[v].start} to {self.idl[v][-1]}' + int(self.idl[v].step != 1) * f' in steps of {self.idl[v].step}' + ')'
+448                            else:
+449                                my_substring += f' (irregular range from {self.idl[v][0]} to {self.idl[v][-1]})'
+450                            sublist.append(my_substring)
+451
+452                        my_string += '\n' + '\n'.join(sublist)
+453                else:
+454                    my_string = '  ' + "\u00B7 Covobs   '" + key + "' "
+455                my_string_list.append(my_string)
+456            print('\n'.join(my_string_list))
 
@@ -3199,20 +3202,20 @@ print details about the ensembles and replica if true.
-
455    def reweight(self, weight):
-456        """Reweight the obs with given rewighting factors.
-457
-458        Parameters
-459        ----------
-460        weight : Obs
-461            Reweighting factor. An Observable that has to be defined on a superset of the
-462            configurations in obs[i].idl for all i.
-463        all_configs : bool
-464            if True, the reweighted observables are normalized by the average of
-465            the reweighting factor on all configurations in weight.idl and not
-466            on the configurations in obs[i].idl. Default False.
-467        """
-468        return reweight(weight, [self])[0]
+            
458    def reweight(self, weight):
+459        """Reweight the obs with given rewighting factors.
+460
+461        Parameters
+462        ----------
+463        weight : Obs
+464            Reweighting factor. An Observable that has to be defined on a superset of the
+465            configurations in obs[i].idl for all i.
+466        all_configs : bool
+467            if True, the reweighted observables are normalized by the average of
+468            the reweighting factor on all configurations in weight.idl and not
+469            on the configurations in obs[i].idl. Default False.
+470        """
+471        return reweight(weight, [self])[0]
 
@@ -3244,17 +3247,17 @@ on the configurations in obs[i].idl. Default False.
-
470    def is_zero_within_error(self, sigma=1):
-471        """Checks whether the observable is zero within 'sigma' standard errors.
-472
-473        Parameters
-474        ----------
-475        sigma : int
-476            Number of standard errors used for the check.
-477
-478        Works only properly when the gamma method was run.
-479        """
-480        return self.is_zero() or np.abs(self.value) <= sigma * self._dvalue
+            
473    def is_zero_within_error(self, sigma=1):
+474        """Checks whether the observable is zero within 'sigma' standard errors.
+475
+476        Parameters
+477        ----------
+478        sigma : int
+479            Number of standard errors used for the check.
+480
+481        Works only properly when the gamma method was run.
+482        """
+483        return self.is_zero() or np.abs(self.value) <= sigma * self._dvalue
 
@@ -3282,15 +3285,15 @@ Number of standard errors used for the check.
-
482    def is_zero(self, atol=1e-10):
-483        """Checks whether the observable is zero within a given tolerance.
-484
-485        Parameters
-486        ----------
-487        atol : float
-488            Absolute tolerance (for details see numpy documentation).
-489        """
-490        return np.isclose(0.0, self.value, 1e-14, atol) and all(np.allclose(0.0, delta, 1e-14, atol) for delta in self.deltas.values()) and all(np.allclose(0.0, delta.errsq(), 1e-14, atol) for delta in self.covobs.values())
+            
485    def is_zero(self, atol=1e-10):
+486        """Checks whether the observable is zero within a given tolerance.
+487
+488        Parameters
+489        ----------
+490        atol : float
+491            Absolute tolerance (for details see numpy documentation).
+492        """
+493        return np.isclose(0.0, self.value, 1e-14, atol) and all(np.allclose(0.0, delta, 1e-14, atol) for delta in self.deltas.values()) and all(np.allclose(0.0, delta.errsq(), 1e-14, atol) for delta in self.covobs.values())
 
@@ -3317,45 +3320,45 @@ Absolute tolerance (for details see numpy documentation).
-
492    def plot_tauint(self, save=None):
-493        """Plot integrated autocorrelation time for each ensemble.
-494
-495        Parameters
-496        ----------
-497        save : str
-498            saves the figure to a file named 'save' if.
-499        """
-500        if not hasattr(self, 'e_dvalue'):
-501            raise Exception('Run the gamma method first.')
-502
-503        for e, e_name in enumerate(self.mc_names):
-504            fig = plt.figure()
-505            plt.xlabel(r'$W$')
-506            plt.ylabel(r'$\tau_\mathrm{int}$')
-507            length = int(len(self.e_n_tauint[e_name]))
-508            if self.tau_exp[e_name] > 0:
-509                base = self.e_n_tauint[e_name][self.e_windowsize[e_name]]
-510                x_help = np.arange(2 * self.tau_exp[e_name])
-511                y_help = (x_help + 1) * np.abs(self.e_rho[e_name][self.e_windowsize[e_name] + 1]) * (1 - x_help / (2 * (2 * self.tau_exp[e_name] - 1))) + base
-512                x_arr = np.arange(self.e_windowsize[e_name] + 1, self.e_windowsize[e_name] + 1 + 2 * self.tau_exp[e_name])
-513                plt.plot(x_arr, y_help, 'C' + str(e), linewidth=1, ls='--', marker=',')
-514                plt.errorbar([self.e_windowsize[e_name] + 2 * self.tau_exp[e_name]], [self.e_tauint[e_name]],
-515                             yerr=[self.e_dtauint[e_name]], fmt='C' + str(e), linewidth=1, capsize=2, marker='o', mfc=plt.rcParams['axes.facecolor'])
-516                xmax = self.e_windowsize[e_name] + 2 * self.tau_exp[e_name] + 1.5
-517                label = e_name + r', $\tau_\mathrm{exp}$=' + str(np.around(self.tau_exp[e_name], decimals=2))
-518            else:
-519                label = e_name + ', S=' + str(np.around(self.S[e_name], decimals=2))
-520                xmax = max(10.5, 2 * self.e_windowsize[e_name] - 0.5)
-521
-522            plt.errorbar(np.arange(length)[:int(xmax) + 1], self.e_n_tauint[e_name][:int(xmax) + 1], yerr=self.e_n_dtauint[e_name][:int(xmax) + 1], linewidth=1, capsize=2, label=label)
-523            plt.axvline(x=self.e_windowsize[e_name], color='C' + str(e), alpha=0.5, marker=',', ls='--')
-524            plt.legend()
-525            plt.xlim(-0.5, xmax)
-526            ylim = plt.ylim()
-527            plt.ylim(bottom=0.0, top=max(1.0, ylim[1]))
-528            plt.draw()
-529            if save:
-530                fig.savefig(save + "_" + str(e))
+            
495    def plot_tauint(self, save=None):
+496        """Plot integrated autocorrelation time for each ensemble.
+497
+498        Parameters
+499        ----------
+500        save : str
+501            saves the figure to a file named 'save' if.
+502        """
+503        if not hasattr(self, 'e_dvalue'):
+504            raise Exception('Run the gamma method first.')
+505
+506        for e, e_name in enumerate(self.mc_names):
+507            fig = plt.figure()
+508            plt.xlabel(r'$W$')
+509            plt.ylabel(r'$\tau_\mathrm{int}$')
+510            length = int(len(self.e_n_tauint[e_name]))
+511            if self.tau_exp[e_name] > 0:
+512                base = self.e_n_tauint[e_name][self.e_windowsize[e_name]]
+513                x_help = np.arange(2 * self.tau_exp[e_name])
+514                y_help = (x_help + 1) * np.abs(self.e_rho[e_name][self.e_windowsize[e_name] + 1]) * (1 - x_help / (2 * (2 * self.tau_exp[e_name] - 1))) + base
+515                x_arr = np.arange(self.e_windowsize[e_name] + 1, self.e_windowsize[e_name] + 1 + 2 * self.tau_exp[e_name])
+516                plt.plot(x_arr, y_help, 'C' + str(e), linewidth=1, ls='--', marker=',')
+517                plt.errorbar([self.e_windowsize[e_name] + 2 * self.tau_exp[e_name]], [self.e_tauint[e_name]],
+518                             yerr=[self.e_dtauint[e_name]], fmt='C' + str(e), linewidth=1, capsize=2, marker='o', mfc=plt.rcParams['axes.facecolor'])
+519                xmax = self.e_windowsize[e_name] + 2 * self.tau_exp[e_name] + 1.5
+520                label = e_name + r', $\tau_\mathrm{exp}$=' + str(np.around(self.tau_exp[e_name], decimals=2))
+521            else:
+522                label = e_name + ', S=' + str(np.around(self.S[e_name], decimals=2))
+523                xmax = max(10.5, 2 * self.e_windowsize[e_name] - 0.5)
+524
+525            plt.errorbar(np.arange(length)[:int(xmax) + 1], self.e_n_tauint[e_name][:int(xmax) + 1], yerr=self.e_n_dtauint[e_name][:int(xmax) + 1], linewidth=1, capsize=2, label=label)
+526            plt.axvline(x=self.e_windowsize[e_name], color='C' + str(e), alpha=0.5, marker=',', ls='--')
+527            plt.legend()
+528            plt.xlim(-0.5, xmax)
+529            ylim = plt.ylim()
+530            plt.ylim(bottom=0.0, top=max(1.0, ylim[1]))
+531            plt.draw()
+532            if save:
+533                fig.savefig(save + "_" + str(e))
 
@@ -3382,36 +3385,36 @@ saves the figure to a file named 'save' if.
-
532    def plot_rho(self, save=None):
-533        """Plot normalized autocorrelation function time for each ensemble.
-534
-535        Parameters
-536        ----------
-537        save : str
-538            saves the figure to a file named 'save' if.
-539        """
-540        if not hasattr(self, 'e_dvalue'):
-541            raise Exception('Run the gamma method first.')
-542        for e, e_name in enumerate(self.mc_names):
-543            fig = plt.figure()
-544            plt.xlabel('W')
-545            plt.ylabel('rho')
-546            length = int(len(self.e_drho[e_name]))
-547            plt.errorbar(np.arange(length), self.e_rho[e_name][:length], yerr=self.e_drho[e_name][:], linewidth=1, capsize=2)
-548            plt.axvline(x=self.e_windowsize[e_name], color='r', alpha=0.25, ls='--', marker=',')
-549            if self.tau_exp[e_name] > 0:
-550                plt.plot([self.e_windowsize[e_name] + 1, self.e_windowsize[e_name] + 1 + 2 * self.tau_exp[e_name]],
-551                         [self.e_rho[e_name][self.e_windowsize[e_name] + 1], 0], 'k-', lw=1)
-552                xmax = self.e_windowsize[e_name] + 2 * self.tau_exp[e_name] + 1.5
-553                plt.title('Rho ' + e_name + r', tau\_exp=' + str(np.around(self.tau_exp[e_name], decimals=2)))
-554            else:
-555                xmax = max(10.5, 2 * self.e_windowsize[e_name] - 0.5)
-556                plt.title('Rho ' + e_name + ', S=' + str(np.around(self.S[e_name], decimals=2)))
-557            plt.plot([-0.5, xmax], [0, 0], 'k--', lw=1)
-558            plt.xlim(-0.5, xmax)
-559            plt.draw()
-560            if save:
-561                fig.savefig(save + "_" + str(e))
+            
535    def plot_rho(self, save=None):
+536        """Plot normalized autocorrelation function time for each ensemble.
+537
+538        Parameters
+539        ----------
+540        save : str
+541            saves the figure to a file named 'save' if.
+542        """
+543        if not hasattr(self, 'e_dvalue'):
+544            raise Exception('Run the gamma method first.')
+545        for e, e_name in enumerate(self.mc_names):
+546            fig = plt.figure()
+547            plt.xlabel('W')
+548            plt.ylabel('rho')
+549            length = int(len(self.e_drho[e_name]))
+550            plt.errorbar(np.arange(length), self.e_rho[e_name][:length], yerr=self.e_drho[e_name][:], linewidth=1, capsize=2)
+551            plt.axvline(x=self.e_windowsize[e_name], color='r', alpha=0.25, ls='--', marker=',')
+552            if self.tau_exp[e_name] > 0:
+553                plt.plot([self.e_windowsize[e_name] + 1, self.e_windowsize[e_name] + 1 + 2 * self.tau_exp[e_name]],
+554                         [self.e_rho[e_name][self.e_windowsize[e_name] + 1], 0], 'k-', lw=1)
+555                xmax = self.e_windowsize[e_name] + 2 * self.tau_exp[e_name] + 1.5
+556                plt.title('Rho ' + e_name + r', tau\_exp=' + str(np.around(self.tau_exp[e_name], decimals=2)))
+557            else:
+558                xmax = max(10.5, 2 * self.e_windowsize[e_name] - 0.5)
+559                plt.title('Rho ' + e_name + ', S=' + str(np.around(self.S[e_name], decimals=2)))
+560            plt.plot([-0.5, xmax], [0, 0], 'k--', lw=1)
+561            plt.xlim(-0.5, xmax)
+562            plt.draw()
+563            if save:
+564                fig.savefig(save + "_" + str(e))
 
@@ -3438,27 +3441,27 @@ saves the figure to a file named 'save' if.
-
563    def plot_rep_dist(self):
-564        """Plot replica distribution for each ensemble with more than one replicum."""
-565        if not hasattr(self, 'e_dvalue'):
-566            raise Exception('Run the gamma method first.')
-567        for e, e_name in enumerate(self.mc_names):
-568            if len(self.e_content[e_name]) == 1:
-569                print('No replica distribution for a single replicum (', e_name, ')')
-570                continue
-571            r_length = []
-572            sub_r_mean = 0
-573            for r, r_name in enumerate(self.e_content[e_name]):
-574                r_length.append(len(self.deltas[r_name]))
-575                sub_r_mean += self.shape[r_name] * self.r_values[r_name]
-576            e_N = np.sum(r_length)
-577            sub_r_mean /= e_N
-578            arr = np.zeros(len(self.e_content[e_name]))
-579            for r, r_name in enumerate(self.e_content[e_name]):
-580                arr[r] = (self.r_values[r_name] - sub_r_mean) / (self.e_dvalue[e_name] * np.sqrt(e_N / self.shape[r_name] - 1))
-581            plt.hist(arr, rwidth=0.8, bins=len(self.e_content[e_name]))
-582            plt.title('Replica distribution' + e_name + ' (mean=0, var=1)')
-583            plt.draw()
+            
566    def plot_rep_dist(self):
+567        """Plot replica distribution for each ensemble with more than one replicum."""
+568        if not hasattr(self, 'e_dvalue'):
+569            raise Exception('Run the gamma method first.')
+570        for e, e_name in enumerate(self.mc_names):
+571            if len(self.e_content[e_name]) == 1:
+572                print('No replica distribution for a single replicum (', e_name, ')')
+573                continue
+574            r_length = []
+575            sub_r_mean = 0
+576            for r, r_name in enumerate(self.e_content[e_name]):
+577                r_length.append(len(self.deltas[r_name]))
+578                sub_r_mean += self.shape[r_name] * self.r_values[r_name]
+579            e_N = np.sum(r_length)
+580            sub_r_mean /= e_N
+581            arr = np.zeros(len(self.e_content[e_name]))
+582            for r, r_name in enumerate(self.e_content[e_name]):
+583                arr[r] = (self.r_values[r_name] - sub_r_mean) / (self.e_dvalue[e_name] * np.sqrt(e_N / self.shape[r_name] - 1))
+584            plt.hist(arr, rwidth=0.8, bins=len(self.e_content[e_name]))
+585            plt.title('Replica distribution' + e_name + ' (mean=0, var=1)')
+586            plt.draw()
 
@@ -3478,37 +3481,37 @@ saves the figure to a file named 'save' if.
-
585    def plot_history(self, expand=True):
-586        """Plot derived Monte Carlo history for each ensemble
-587
-588        Parameters
-589        ----------
-590        expand : bool
-591            show expanded history for irregular Monte Carlo chains (default: True).
-592        """
-593        for e, e_name in enumerate(self.mc_names):
-594            plt.figure()
-595            r_length = []
-596            tmp = []
-597            tmp_expanded = []
-598            for r, r_name in enumerate(self.e_content[e_name]):
-599                tmp.append(self.deltas[r_name] + self.r_values[r_name])
-600                if expand:
-601                    tmp_expanded.append(_expand_deltas(self.deltas[r_name], list(self.idl[r_name]), self.shape[r_name]) + self.r_values[r_name])
-602                    r_length.append(len(tmp_expanded[-1]))
-603                else:
-604                    r_length.append(len(tmp[-1]))
-605            e_N = np.sum(r_length)
-606            x = np.arange(e_N)
-607            y_test = np.concatenate(tmp, axis=0)
-608            if expand:
-609                y = np.concatenate(tmp_expanded, axis=0)
-610            else:
-611                y = y_test
-612            plt.errorbar(x, y, fmt='.', markersize=3)
-613            plt.xlim(-0.5, e_N - 0.5)
-614            plt.title(e_name + f'\nskew: {skew(y_test):.3f} (p={skewtest(y_test).pvalue:.3f}), kurtosis: {kurtosis(y_test):.3f} (p={kurtosistest(y_test).pvalue:.3f})')
-615            plt.draw()
+            
588    def plot_history(self, expand=True):
+589        """Plot derived Monte Carlo history for each ensemble
+590
+591        Parameters
+592        ----------
+593        expand : bool
+594            show expanded history for irregular Monte Carlo chains (default: True).
+595        """
+596        for e, e_name in enumerate(self.mc_names):
+597            plt.figure()
+598            r_length = []
+599            tmp = []
+600            tmp_expanded = []
+601            for r, r_name in enumerate(self.e_content[e_name]):
+602                tmp.append(self.deltas[r_name] + self.r_values[r_name])
+603                if expand:
+604                    tmp_expanded.append(_expand_deltas(self.deltas[r_name], list(self.idl[r_name]), self.shape[r_name]) + self.r_values[r_name])
+605                    r_length.append(len(tmp_expanded[-1]))
+606                else:
+607                    r_length.append(len(tmp[-1]))
+608            e_N = np.sum(r_length)
+609            x = np.arange(e_N)
+610            y_test = np.concatenate(tmp, axis=0)
+611            if expand:
+612                y = np.concatenate(tmp_expanded, axis=0)
+613            else:
+614                y = y_test
+615            plt.errorbar(x, y, fmt='.', markersize=3)
+616            plt.xlim(-0.5, e_N - 0.5)
+617            plt.title(e_name + f'\nskew: {skew(y_test):.3f} (p={skewtest(y_test).pvalue:.3f}), kurtosis: {kurtosis(y_test):.3f} (p={kurtosistest(y_test).pvalue:.3f})')
+618            plt.draw()
 
@@ -3535,29 +3538,29 @@ show expanded history for irregular Monte Carlo chains (default: True).
-
617    def plot_piechart(self, save=None):
-618        """Plot piechart which shows the fractional contribution of each
-619        ensemble to the error and returns a dictionary containing the fractions.
-620
-621        Parameters
-622        ----------
-623        save : str
-624            saves the figure to a file named 'save' if.
-625        """
-626        if not hasattr(self, 'e_dvalue'):
-627            raise Exception('Run the gamma method first.')
-628        if np.isclose(0.0, self._dvalue, atol=1e-15):
-629            raise Exception('Error is 0.0')
-630        labels = self.e_names
-631        sizes = [self.e_dvalue[name] ** 2 for name in labels] / self._dvalue ** 2
-632        fig1, ax1 = plt.subplots()
-633        ax1.pie(sizes, labels=labels, startangle=90, normalize=True)
-634        ax1.axis('equal')
-635        plt.draw()
-636        if save:
-637            fig1.savefig(save)
-638
-639        return dict(zip(self.e_names, sizes))
+            
620    def plot_piechart(self, save=None):
+621        """Plot piechart which shows the fractional contribution of each
+622        ensemble to the error and returns a dictionary containing the fractions.
+623
+624        Parameters
+625        ----------
+626        save : str
+627            saves the figure to a file named 'save' if.
+628        """
+629        if not hasattr(self, 'e_dvalue'):
+630            raise Exception('Run the gamma method first.')
+631        if np.isclose(0.0, self._dvalue, atol=1e-15):
+632            raise Exception('Error is 0.0')
+633        labels = self.e_names
+634        sizes = [self.e_dvalue[name] ** 2 for name in labels] / self._dvalue ** 2
+635        fig1, ax1 = plt.subplots()
+636        ax1.pie(sizes, labels=labels, startangle=90, normalize=True)
+637        ax1.axis('equal')
+638        plt.draw()
+639        if save:
+640            fig1.savefig(save)
+641
+642        return dict(zip(self.e_names, sizes))
 
@@ -3585,34 +3588,34 @@ saves the figure to a file named 'save' if.
-
641    def dump(self, filename, datatype="json.gz", description="", **kwargs):
-642        """Dump the Obs to a file 'name' of chosen format.
-643
-644        Parameters
-645        ----------
-646        filename : str
-647            name of the file to be saved.
-648        datatype : str
-649            Format of the exported file. Supported formats include
-650            "json.gz" and "pickle"
-651        description : str
-652            Description for output file, only relevant for json.gz format.
-653        path : str
-654            specifies a custom path for the file (default '.')
-655        """
-656        if 'path' in kwargs:
-657            file_name = kwargs.get('path') + '/' + filename
-658        else:
-659            file_name = filename
-660
-661        if datatype == "json.gz":
-662            from .input.json import dump_to_json
-663            dump_to_json([self], file_name, description=description)
-664        elif datatype == "pickle":
-665            with open(file_name + '.p', 'wb') as fb:
-666                pickle.dump(self, fb)
-667        else:
-668            raise Exception("Unknown datatype " + str(datatype))
+            
644    def dump(self, filename, datatype="json.gz", description="", **kwargs):
+645        """Dump the Obs to a file 'name' of chosen format.
+646
+647        Parameters
+648        ----------
+649        filename : str
+650            name of the file to be saved.
+651        datatype : str
+652            Format of the exported file. Supported formats include
+653            "json.gz" and "pickle"
+654        description : str
+655            Description for output file, only relevant for json.gz format.
+656        path : str
+657            specifies a custom path for the file (default '.')
+658        """
+659        if 'path' in kwargs:
+660            file_name = kwargs.get('path') + '/' + filename
+661        else:
+662            file_name = filename
+663
+664        if datatype == "json.gz":
+665            from .input.json import dump_to_json
+666            dump_to_json([self], file_name, description=description)
+667        elif datatype == "pickle":
+668            with open(file_name + '.p', 'wb') as fb:
+669                pickle.dump(self, fb)
+670        else:
+671            raise Exception("Unknown datatype " + str(datatype))
 
@@ -3646,31 +3649,31 @@ specifies a custom path for the file (default '.')
-
670    def export_jackknife(self):
-671        """Export jackknife samples from the Obs
-672
-673        Returns
-674        -------
-675        numpy.ndarray
-676            Returns a numpy array of length N + 1 where N is the number of samples
-677            for the given ensemble and replicum. The zeroth entry of the array contains
-678            the mean value of the Obs, entries 1 to N contain the N jackknife samples
-679            derived from the Obs. The current implementation only works for observables
-680            defined on exactly one ensemble and replicum. The derived jackknife samples
-681            should agree with samples from a full jackknife analysis up to O(1/N).
-682        """
-683
-684        if len(self.names) != 1:
-685            raise Exception("'export_jackknife' is only implemented for Obs defined on one ensemble and replicum.")
+            
673    def export_jackknife(self):
+674        """Export jackknife samples from the Obs
+675
+676        Returns
+677        -------
+678        numpy.ndarray
+679            Returns a numpy array of length N + 1 where N is the number of samples
+680            for the given ensemble and replicum. The zeroth entry of the array contains
+681            the mean value of the Obs, entries 1 to N contain the N jackknife samples
+682            derived from the Obs. The current implementation only works for observables
+683            defined on exactly one ensemble and replicum. The derived jackknife samples
+684            should agree with samples from a full jackknife analysis up to O(1/N).
+685        """
 686
-687        name = self.names[0]
-688        full_data = self.deltas[name] + self.r_values[name]
-689        n = full_data.size
-690        mean = self.value
-691        tmp_jacks = np.zeros(n + 1)
-692        tmp_jacks[0] = mean
-693        tmp_jacks[1:] = (n * mean - full_data) / (n - 1)
-694        return tmp_jacks
+687        if len(self.names) != 1:
+688            raise Exception("'export_jackknife' is only implemented for Obs defined on one ensemble and replicum.")
+689
+690        name = self.names[0]
+691        full_data = self.deltas[name] + self.r_values[name]
+692        n = full_data.size
+693        mean = self.value
+694        tmp_jacks = np.zeros(n + 1)
+695        tmp_jacks[0] = mean
+696        tmp_jacks[1:] = (n * mean - full_data) / (n - 1)
+697        return tmp_jacks
 
@@ -3701,8 +3704,8 @@ should agree with samples from a full jackknife analysis up to O(1/N).
-
822    def sqrt(self):
-823        return derived_observable(lambda x, **kwargs: np.sqrt(x[0]), [self], man_grad=[1 / 2 / np.sqrt(self.value)])
+            
825    def sqrt(self):
+826        return derived_observable(lambda x, **kwargs: np.sqrt(x[0]), [self], man_grad=[1 / 2 / np.sqrt(self.value)])
 
@@ -3720,8 +3723,8 @@ should agree with samples from a full jackknife analysis up to O(1/N).
-
825    def log(self):
-826        return derived_observable(lambda x, **kwargs: np.log(x[0]), [self], man_grad=[1 / self.value])
+            
828    def log(self):
+829        return derived_observable(lambda x, **kwargs: np.log(x[0]), [self], man_grad=[1 / self.value])
 
@@ -3739,8 +3742,8 @@ should agree with samples from a full jackknife analysis up to O(1/N).
-
828    def exp(self):
-829        return derived_observable(lambda x, **kwargs: np.exp(x[0]), [self], man_grad=[np.exp(self.value)])
+            
831    def exp(self):
+832        return derived_observable(lambda x, **kwargs: np.exp(x[0]), [self], man_grad=[np.exp(self.value)])
 
@@ -3758,8 +3761,8 @@ should agree with samples from a full jackknife analysis up to O(1/N).
-
831    def sin(self):
-832        return derived_observable(lambda x, **kwargs: np.sin(x[0]), [self], man_grad=[np.cos(self.value)])
+            
834    def sin(self):
+835        return derived_observable(lambda x, **kwargs: np.sin(x[0]), [self], man_grad=[np.cos(self.value)])
 
@@ -3777,8 +3780,8 @@ should agree with samples from a full jackknife analysis up to O(1/N).
-
834    def cos(self):
-835        return derived_observable(lambda x, **kwargs: np.cos(x[0]), [self], man_grad=[-np.sin(self.value)])
+            
837    def cos(self):
+838        return derived_observable(lambda x, **kwargs: np.cos(x[0]), [self], man_grad=[-np.sin(self.value)])
 
@@ -3796,8 +3799,8 @@ should agree with samples from a full jackknife analysis up to O(1/N).
-
837    def tan(self):
-838        return derived_observable(lambda x, **kwargs: np.tan(x[0]), [self], man_grad=[1 / np.cos(self.value) ** 2])
+            
840    def tan(self):
+841        return derived_observable(lambda x, **kwargs: np.tan(x[0]), [self], man_grad=[1 / np.cos(self.value) ** 2])
 
@@ -3815,8 +3818,8 @@ should agree with samples from a full jackknife analysis up to O(1/N).
-
840    def arcsin(self):
-841        return derived_observable(lambda x: anp.arcsin(x[0]), [self])
+            
843    def arcsin(self):
+844        return derived_observable(lambda x: anp.arcsin(x[0]), [self])
 
@@ -3834,8 +3837,8 @@ should agree with samples from a full jackknife analysis up to O(1/N).
-
843    def arccos(self):
-844        return derived_observable(lambda x: anp.arccos(x[0]), [self])
+            
846    def arccos(self):
+847        return derived_observable(lambda x: anp.arccos(x[0]), [self])
 
@@ -3853,8 +3856,8 @@ should agree with samples from a full jackknife analysis up to O(1/N).
-
846    def arctan(self):
-847        return derived_observable(lambda x: anp.arctan(x[0]), [self])
+            
849    def arctan(self):
+850        return derived_observable(lambda x: anp.arctan(x[0]), [self])
 
@@ -3872,8 +3875,8 @@ should agree with samples from a full jackknife analysis up to O(1/N).
-
849    def sinh(self):
-850        return derived_observable(lambda x, **kwargs: np.sinh(x[0]), [self], man_grad=[np.cosh(self.value)])
+            
852    def sinh(self):
+853        return derived_observable(lambda x, **kwargs: np.sinh(x[0]), [self], man_grad=[np.cosh(self.value)])
 
@@ -3891,8 +3894,8 @@ should agree with samples from a full jackknife analysis up to O(1/N).
-
852    def cosh(self):
-853        return derived_observable(lambda x, **kwargs: np.cosh(x[0]), [self], man_grad=[np.sinh(self.value)])
+            
855    def cosh(self):
+856        return derived_observable(lambda x, **kwargs: np.cosh(x[0]), [self], man_grad=[np.sinh(self.value)])
 
@@ -3910,8 +3913,8 @@ should agree with samples from a full jackknife analysis up to O(1/N).
-
855    def tanh(self):
-856        return derived_observable(lambda x, **kwargs: np.tanh(x[0]), [self], man_grad=[1 / np.cosh(self.value) ** 2])
+            
858    def tanh(self):
+859        return derived_observable(lambda x, **kwargs: np.tanh(x[0]), [self], man_grad=[1 / np.cosh(self.value) ** 2])
 
@@ -3929,8 +3932,8 @@ should agree with samples from a full jackknife analysis up to O(1/N).
-
858    def arcsinh(self):
-859        return derived_observable(lambda x: anp.arcsinh(x[0]), [self])
+            
861    def arcsinh(self):
+862        return derived_observable(lambda x: anp.arcsinh(x[0]), [self])
 
@@ -3948,8 +3951,8 @@ should agree with samples from a full jackknife analysis up to O(1/N).
-
861    def arccosh(self):
-862        return derived_observable(lambda x: anp.arccosh(x[0]), [self])
+            
864    def arccosh(self):
+865        return derived_observable(lambda x: anp.arccosh(x[0]), [self])
 
@@ -3967,8 +3970,8 @@ should agree with samples from a full jackknife analysis up to O(1/N).
-
864    def arctanh(self):
-865        return derived_observable(lambda x: anp.arctanh(x[0]), [self])
+            
867    def arctanh(self):
+868        return derived_observable(lambda x: anp.arctanh(x[0]), [self])
 
@@ -3987,115 +3990,115 @@ should agree with samples from a full jackknife analysis up to O(1/N).
-
868class CObs:
-869    """Class for a complex valued observable."""
-870    __slots__ = ['_real', '_imag', 'tag']
-871
-872    def __init__(self, real, imag=0.0):
-873        self._real = real
-874        self._imag = imag
-875        self.tag = None
-876
-877    @property
-878    def real(self):
-879        return self._real
-880
-881    @property
-882    def imag(self):
-883        return self._imag
-884
-885    def gamma_method(self, **kwargs):
-886        """Executes the gamma_method for the real and the imaginary part."""
-887        if isinstance(self.real, Obs):
-888            self.real.gamma_method(**kwargs)
-889        if isinstance(self.imag, Obs):
-890            self.imag.gamma_method(**kwargs)
-891
-892    def is_zero(self):
-893        """Checks whether both real and imaginary part are zero within machine precision."""
-894        return self.real == 0.0 and self.imag == 0.0
-895
-896    def conjugate(self):
-897        return CObs(self.real, -self.imag)
+            
871class CObs:
+872    """Class for a complex valued observable."""
+873    __slots__ = ['_real', '_imag', 'tag']
+874
+875    def __init__(self, real, imag=0.0):
+876        self._real = real
+877        self._imag = imag
+878        self.tag = None
+879
+880    @property
+881    def real(self):
+882        return self._real
+883
+884    @property
+885    def imag(self):
+886        return self._imag
+887
+888    def gamma_method(self, **kwargs):
+889        """Executes the gamma_method for the real and the imaginary part."""
+890        if isinstance(self.real, Obs):
+891            self.real.gamma_method(**kwargs)
+892        if isinstance(self.imag, Obs):
+893            self.imag.gamma_method(**kwargs)
+894
+895    def is_zero(self):
+896        """Checks whether both real and imaginary part are zero within machine precision."""
+897        return self.real == 0.0 and self.imag == 0.0
 898
-899    def __add__(self, other):
-900        if isinstance(other, np.ndarray):
-901            return other + self
-902        elif hasattr(other, 'real') and hasattr(other, 'imag'):
-903            return CObs(self.real + other.real,
-904                        self.imag + other.imag)
-905        else:
-906            return CObs(self.real + other, self.imag)
-907
-908    def __radd__(self, y):
-909        return self + y
+899    def conjugate(self):
+900        return CObs(self.real, -self.imag)
+901
+902    def __add__(self, other):
+903        if isinstance(other, np.ndarray):
+904            return other + self
+905        elif hasattr(other, 'real') and hasattr(other, 'imag'):
+906            return CObs(self.real + other.real,
+907                        self.imag + other.imag)
+908        else:
+909            return CObs(self.real + other, self.imag)
 910
-911    def __sub__(self, other):
-912        if isinstance(other, np.ndarray):
-913            return -1 * (other - self)
-914        elif hasattr(other, 'real') and hasattr(other, 'imag'):
-915            return CObs(self.real - other.real, self.imag - other.imag)
-916        else:
-917            return CObs(self.real - other, self.imag)
-918
-919    def __rsub__(self, other):
-920        return -1 * (self - other)
+911    def __radd__(self, y):
+912        return self + y
+913
+914    def __sub__(self, other):
+915        if isinstance(other, np.ndarray):
+916            return -1 * (other - self)
+917        elif hasattr(other, 'real') and hasattr(other, 'imag'):
+918            return CObs(self.real - other.real, self.imag - other.imag)
+919        else:
+920            return CObs(self.real - other, self.imag)
 921
-922    def __mul__(self, other):
-923        if isinstance(other, np.ndarray):
-924            return other * self
-925        elif hasattr(other, 'real') and hasattr(other, 'imag'):
-926            if all(isinstance(i, Obs) for i in [self.real, self.imag, other.real, other.imag]):
-927                return CObs(derived_observable(lambda x, **kwargs: x[0] * x[1] - x[2] * x[3],
-928                                               [self.real, other.real, self.imag, other.imag],
-929                                               man_grad=[other.real.value, self.real.value, -other.imag.value, -self.imag.value]),
-930                            derived_observable(lambda x, **kwargs: x[2] * x[1] + x[0] * x[3],
+922    def __rsub__(self, other):
+923        return -1 * (self - other)
+924
+925    def __mul__(self, other):
+926        if isinstance(other, np.ndarray):
+927            return other * self
+928        elif hasattr(other, 'real') and hasattr(other, 'imag'):
+929            if all(isinstance(i, Obs) for i in [self.real, self.imag, other.real, other.imag]):
+930                return CObs(derived_observable(lambda x, **kwargs: x[0] * x[1] - x[2] * x[3],
 931                                               [self.real, other.real, self.imag, other.imag],
-932                                               man_grad=[other.imag.value, self.imag.value, other.real.value, self.real.value]))
-933            elif getattr(other, 'imag', 0) != 0:
-934                return CObs(self.real * other.real - self.imag * other.imag,
-935                            self.imag * other.real + self.real * other.imag)
-936            else:
-937                return CObs(self.real * other.real, self.imag * other.real)
-938        else:
-939            return CObs(self.real * other, self.imag * other)
-940
-941    def __rmul__(self, other):
-942        return self * other
+932                                               man_grad=[other.real.value, self.real.value, -other.imag.value, -self.imag.value]),
+933                            derived_observable(lambda x, **kwargs: x[2] * x[1] + x[0] * x[3],
+934                                               [self.real, other.real, self.imag, other.imag],
+935                                               man_grad=[other.imag.value, self.imag.value, other.real.value, self.real.value]))
+936            elif getattr(other, 'imag', 0) != 0:
+937                return CObs(self.real * other.real - self.imag * other.imag,
+938                            self.imag * other.real + self.real * other.imag)
+939            else:
+940                return CObs(self.real * other.real, self.imag * other.real)
+941        else:
+942            return CObs(self.real * other, self.imag * other)
 943
-944    def __truediv__(self, other):
-945        if isinstance(other, np.ndarray):
-946            return 1 / (other / self)
-947        elif hasattr(other, 'real') and hasattr(other, 'imag'):
-948            r = other.real ** 2 + other.imag ** 2
-949            return CObs((self.real * other.real + self.imag * other.imag) / r, (self.imag * other.real - self.real * other.imag) / r)
-950        else:
-951            return CObs(self.real / other, self.imag / other)
-952
-953    def __rtruediv__(self, other):
-954        r = self.real ** 2 + self.imag ** 2
-955        if hasattr(other, 'real') and hasattr(other, 'imag'):
-956            return CObs((self.real * other.real + self.imag * other.imag) / r, (self.real * other.imag - self.imag * other.real) / r)
-957        else:
-958            return CObs(self.real * other / r, -self.imag * other / r)
-959
-960    def __abs__(self):
-961        return np.sqrt(self.real**2 + self.imag**2)
+944    def __rmul__(self, other):
+945        return self * other
+946
+947    def __truediv__(self, other):
+948        if isinstance(other, np.ndarray):
+949            return 1 / (other / self)
+950        elif hasattr(other, 'real') and hasattr(other, 'imag'):
+951            r = other.real ** 2 + other.imag ** 2
+952            return CObs((self.real * other.real + self.imag * other.imag) / r, (self.imag * other.real - self.real * other.imag) / r)
+953        else:
+954            return CObs(self.real / other, self.imag / other)
+955
+956    def __rtruediv__(self, other):
+957        r = self.real ** 2 + self.imag ** 2
+958        if hasattr(other, 'real') and hasattr(other, 'imag'):
+959            return CObs((self.real * other.real + self.imag * other.imag) / r, (self.real * other.imag - self.imag * other.real) / r)
+960        else:
+961            return CObs(self.real * other / r, -self.imag * other / r)
 962
-963    def __pos__(self):
-964        return self
+963    def __abs__(self):
+964        return np.sqrt(self.real**2 + self.imag**2)
 965
-966    def __neg__(self):
-967        return -1 * self
+966    def __pos__(self):
+967        return self
 968
-969    def __eq__(self, other):
-970        return self.real == other.real and self.imag == other.imag
+969    def __neg__(self):
+970        return -1 * self
 971
-972    def __str__(self):
-973        return '(' + str(self.real) + int(self.imag >= 0.0) * '+' + str(self.imag) + 'j)'
+972    def __eq__(self, other):
+973        return self.real == other.real and self.imag == other.imag
 974
-975    def __repr__(self):
-976        return 'CObs[' + str(self) + ']'
+975    def __str__(self):
+976        return '(' + str(self.real) + int(self.imag >= 0.0) * '+' + str(self.imag) + 'j)'
+977
+978    def __repr__(self):
+979        return 'CObs[' + str(self) + ']'
 
@@ -4113,10 +4116,10 @@ should agree with samples from a full jackknife analysis up to O(1/N).
-
872    def __init__(self, real, imag=0.0):
-873        self._real = real
-874        self._imag = imag
-875        self.tag = None
+            
875    def __init__(self, real, imag=0.0):
+876        self._real = real
+877        self._imag = imag
+878        self.tag = None
 
@@ -4134,12 +4137,12 @@ should agree with samples from a full jackknife analysis up to O(1/N).
-
885    def gamma_method(self, **kwargs):
-886        """Executes the gamma_method for the real and the imaginary part."""
-887        if isinstance(self.real, Obs):
-888            self.real.gamma_method(**kwargs)
-889        if isinstance(self.imag, Obs):
-890            self.imag.gamma_method(**kwargs)
+            
888    def gamma_method(self, **kwargs):
+889        """Executes the gamma_method for the real and the imaginary part."""
+890        if isinstance(self.real, Obs):
+891            self.real.gamma_method(**kwargs)
+892        if isinstance(self.imag, Obs):
+893            self.imag.gamma_method(**kwargs)
 
@@ -4159,9 +4162,9 @@ should agree with samples from a full jackknife analysis up to O(1/N).
-
892    def is_zero(self):
-893        """Checks whether both real and imaginary part are zero within machine precision."""
-894        return self.real == 0.0 and self.imag == 0.0
+            
895    def is_zero(self):
+896        """Checks whether both real and imaginary part are zero within machine precision."""
+897        return self.real == 0.0 and self.imag == 0.0
 
@@ -4181,8 +4184,8 @@ should agree with samples from a full jackknife analysis up to O(1/N).
-
896    def conjugate(self):
-897        return CObs(self.real, -self.imag)
+            
899    def conjugate(self):
+900        return CObs(self.real, -self.imag)
 
@@ -4201,184 +4204,184 @@ should agree with samples from a full jackknife analysis up to O(1/N).
-
1130def derived_observable(func, data, array_mode=False, **kwargs):
-1131    """Construct a derived Obs according to func(data, **kwargs) using automatic differentiation.
-1132
-1133    Parameters
-1134    ----------
-1135    func : object
-1136        arbitrary function of the form func(data, **kwargs). For the
-1137        automatic differentiation to work, all numpy functions have to have
-1138        the autograd wrapper (use 'import autograd.numpy as anp').
-1139    data : list
-1140        list of Obs, e.g. [obs1, obs2, obs3].
-1141    num_grad : bool
-1142        if True, numerical derivatives are used instead of autograd
-1143        (default False). To control the numerical differentiation the
-1144        kwargs of numdifftools.step_generators.MaxStepGenerator
-1145        can be used.
-1146    man_grad : list
-1147        manually supply a list or an array which contains the jacobian
-1148        of func. Use cautiously, supplying the wrong derivative will
-1149        not be intercepted.
-1150
-1151    Notes
-1152    -----
-1153    For simple mathematical operations it can be practical to use anonymous
-1154    functions. For the ratio of two observables one can e.g. use
-1155
-1156    new_obs = derived_observable(lambda x: x[0] / x[1], [obs1, obs2])
-1157    """
+            
1133def derived_observable(func, data, array_mode=False, **kwargs):
+1134    """Construct a derived Obs according to func(data, **kwargs) using automatic differentiation.
+1135
+1136    Parameters
+1137    ----------
+1138    func : object
+1139        arbitrary function of the form func(data, **kwargs). For the
+1140        automatic differentiation to work, all numpy functions have to have
+1141        the autograd wrapper (use 'import autograd.numpy as anp').
+1142    data : list
+1143        list of Obs, e.g. [obs1, obs2, obs3].
+1144    num_grad : bool
+1145        if True, numerical derivatives are used instead of autograd
+1146        (default False). To control the numerical differentiation the
+1147        kwargs of numdifftools.step_generators.MaxStepGenerator
+1148        can be used.
+1149    man_grad : list
+1150        manually supply a list or an array which contains the jacobian
+1151        of func. Use cautiously, supplying the wrong derivative will
+1152        not be intercepted.
+1153
+1154    Notes
+1155    -----
+1156    For simple mathematical operations it can be practical to use anonymous
+1157    functions. For the ratio of two observables one can e.g. use
 1158
-1159    data = np.asarray(data)
-1160    raveled_data = data.ravel()
+1159    new_obs = derived_observable(lambda x: x[0] / x[1], [obs1, obs2])
+1160    """
 1161
-1162    # Workaround for matrix operations containing non Obs data
-1163    if not all(isinstance(x, Obs) for x in raveled_data):
-1164        for i in range(len(raveled_data)):
-1165            if isinstance(raveled_data[i], (int, float)):
-1166                raveled_data[i] = cov_Obs(raveled_data[i], 0.0, "###dummy_covobs###")
-1167
-1168    allcov = {}
-1169    for o in raveled_data:
-1170        for name in o.cov_names:
-1171            if name in allcov:
-1172                if not np.allclose(allcov[name], o.covobs[name].cov):
-1173                    raise Exception('Inconsistent covariance matrices for %s!' % (name))
-1174            else:
-1175                allcov[name] = o.covobs[name].cov
-1176
-1177    n_obs = len(raveled_data)
-1178    new_names = sorted(set([y for x in [o.names for o in raveled_data] for y in x]))
-1179    new_cov_names = sorted(set([y for x in [o.cov_names for o in raveled_data] for y in x]))
-1180    new_sample_names = sorted(set(new_names) - set(new_cov_names))
-1181
-1182    is_merged = {name: (len(list(filter(lambda o: o.is_merged.get(name, False) is True, raveled_data))) > 0) for name in new_sample_names}
-1183    reweighted = len(list(filter(lambda o: o.reweighted is True, raveled_data))) > 0
+1162    data = np.asarray(data)
+1163    raveled_data = data.ravel()
+1164
+1165    # Workaround for matrix operations containing non Obs data
+1166    if not all(isinstance(x, Obs) for x in raveled_data):
+1167        for i in range(len(raveled_data)):
+1168            if isinstance(raveled_data[i], (int, float)):
+1169                raveled_data[i] = cov_Obs(raveled_data[i], 0.0, "###dummy_covobs###")
+1170
+1171    allcov = {}
+1172    for o in raveled_data:
+1173        for name in o.cov_names:
+1174            if name in allcov:
+1175                if not np.allclose(allcov[name], o.covobs[name].cov):
+1176                    raise Exception('Inconsistent covariance matrices for %s!' % (name))
+1177            else:
+1178                allcov[name] = o.covobs[name].cov
+1179
+1180    n_obs = len(raveled_data)
+1181    new_names = sorted(set([y for x in [o.names for o in raveled_data] for y in x]))
+1182    new_cov_names = sorted(set([y for x in [o.cov_names for o in raveled_data] for y in x]))
+1183    new_sample_names = sorted(set(new_names) - set(new_cov_names))
 1184
-1185    if data.ndim == 1:
-1186        values = np.array([o.value for o in data])
-1187    else:
-1188        values = np.vectorize(lambda x: x.value)(data)
-1189
-1190    new_values = func(values, **kwargs)
-1191
-1192    multi = int(isinstance(new_values, np.ndarray))
-1193
-1194    new_r_values = {}
-1195    new_idl_d = {}
-1196    for name in new_sample_names:
-1197        idl = []
-1198        tmp_values = np.zeros(n_obs)
-1199        for i, item in enumerate(raveled_data):
-1200            tmp_values[i] = item.r_values.get(name, item.value)
-1201            tmp_idl = item.idl.get(name)
-1202            if tmp_idl is not None:
-1203                idl.append(tmp_idl)
-1204        if multi > 0:
-1205            tmp_values = np.array(tmp_values).reshape(data.shape)
-1206        new_r_values[name] = func(tmp_values, **kwargs)
-1207        new_idl_d[name] = _merge_idx(idl)
-1208        if not is_merged[name]:
-1209            is_merged[name] = (1 != len(set([len(idx) for idx in [*idl, new_idl_d[name]]])))
-1210
-1211    if 'man_grad' in kwargs:
-1212        deriv = np.asarray(kwargs.get('man_grad'))
-1213        if new_values.shape + data.shape != deriv.shape:
-1214            raise Exception('Manual derivative does not have correct shape.')
-1215    elif kwargs.get('num_grad') is True:
-1216        if multi > 0:
-1217            raise Exception('Multi mode currently not supported for numerical derivative')
-1218        options = {
-1219            'base_step': 0.1,
-1220            'step_ratio': 2.5}
-1221        for key in options.keys():
-1222            kwarg = kwargs.get(key)
-1223            if kwarg is not None:
-1224                options[key] = kwarg
-1225        tmp_df = nd.Gradient(func, order=4, **{k: v for k, v in options.items() if v is not None})(values, **kwargs)
-1226        if tmp_df.size == 1:
-1227            deriv = np.array([tmp_df.real])
-1228        else:
-1229            deriv = tmp_df.real
-1230    else:
-1231        deriv = jacobian(func)(values, **kwargs)
-1232
-1233    final_result = np.zeros(new_values.shape, dtype=object)
-1234
-1235    if array_mode is True:
-1236
-1237        class _Zero_grad():
-1238            def __init__(self, N):
-1239                self.grad = np.zeros((N, 1))
-1240
-1241        new_covobs_lengths = dict(set([y for x in [[(n, o.covobs[n].N) for n in o.cov_names] for o in raveled_data] for y in x]))
-1242        d_extracted = {}
-1243        g_extracted = {}
-1244        for name in new_sample_names:
-1245            d_extracted[name] = []
-1246            ens_length = len(new_idl_d[name])
-1247            for i_dat, dat in enumerate(data):
-1248                d_extracted[name].append(np.array([_expand_deltas_for_merge(o.deltas.get(name, np.zeros(ens_length)), o.idl.get(name, new_idl_d[name]), o.shape.get(name, ens_length), new_idl_d[name]) for o in dat.reshape(np.prod(dat.shape))]).reshape(dat.shape + (ens_length, )))
-1249        for name in new_cov_names:
-1250            g_extracted[name] = []
-1251            zero_grad = _Zero_grad(new_covobs_lengths[name])
-1252            for i_dat, dat in enumerate(data):
-1253                g_extracted[name].append(np.array([o.covobs.get(name, zero_grad).grad for o in dat.reshape(np.prod(dat.shape))]).reshape(dat.shape + (new_covobs_lengths[name], 1)))
-1254
-1255    for i_val, new_val in np.ndenumerate(new_values):
-1256        new_deltas = {}
-1257        new_grad = {}
-1258        if array_mode is True:
-1259            for name in new_sample_names:
-1260                ens_length = d_extracted[name][0].shape[-1]
-1261                new_deltas[name] = np.zeros(ens_length)
-1262                for i_dat, dat in enumerate(d_extracted[name]):
-1263                    new_deltas[name] += np.tensordot(deriv[i_val + (i_dat, )], dat)
-1264            for name in new_cov_names:
-1265                new_grad[name] = 0
-1266                for i_dat, dat in enumerate(g_extracted[name]):
-1267                    new_grad[name] += np.tensordot(deriv[i_val + (i_dat, )], dat)
-1268        else:
-1269            for j_obs, obs in np.ndenumerate(data):
-1270                for name in obs.names:
-1271                    if name in obs.cov_names:
-1272                        new_grad[name] = new_grad.get(name, 0) + deriv[i_val + j_obs] * obs.covobs[name].grad
-1273                    else:
-1274                        new_deltas[name] = new_deltas.get(name, 0) + deriv[i_val + j_obs] * _expand_deltas_for_merge(obs.deltas[name], obs.idl[name], obs.shape[name], new_idl_d[name])
-1275
-1276        new_covobs = {name: Covobs(0, allcov[name], name, grad=new_grad[name]) for name in new_grad}
-1277
-1278        if not set(new_covobs.keys()).isdisjoint(new_deltas.keys()):
-1279            raise Exception('The same name has been used for deltas and covobs!')
-1280        new_samples = []
-1281        new_means = []
-1282        new_idl = []
-1283        new_names_obs = []
-1284        for name in new_names:
-1285            if name not in new_covobs:
-1286                if is_merged[name]:
-1287                    filtered_deltas, filtered_idl_d = _filter_zeroes(new_deltas[name], new_idl_d[name])
-1288                else:
-1289                    filtered_deltas = new_deltas[name]
-1290                    filtered_idl_d = new_idl_d[name]
-1291
-1292                new_samples.append(filtered_deltas)
-1293                new_idl.append(filtered_idl_d)
-1294                new_means.append(new_r_values[name][i_val])
-1295                new_names_obs.append(name)
-1296        final_result[i_val] = Obs(new_samples, new_names_obs, means=new_means, idl=new_idl)
-1297        for name in new_covobs:
-1298            final_result[i_val].names.append(name)
-1299        final_result[i_val]._covobs = new_covobs
-1300        final_result[i_val]._value = new_val
-1301        final_result[i_val].is_merged = is_merged
-1302        final_result[i_val].reweighted = reweighted
-1303
-1304    if multi == 0:
-1305        final_result = final_result.item()
+1185    is_merged = {name: (len(list(filter(lambda o: o.is_merged.get(name, False) is True, raveled_data))) > 0) for name in new_sample_names}
+1186    reweighted = len(list(filter(lambda o: o.reweighted is True, raveled_data))) > 0
+1187
+1188    if data.ndim == 1:
+1189        values = np.array([o.value for o in data])
+1190    else:
+1191        values = np.vectorize(lambda x: x.value)(data)
+1192
+1193    new_values = func(values, **kwargs)
+1194
+1195    multi = int(isinstance(new_values, np.ndarray))
+1196
+1197    new_r_values = {}
+1198    new_idl_d = {}
+1199    for name in new_sample_names:
+1200        idl = []
+1201        tmp_values = np.zeros(n_obs)
+1202        for i, item in enumerate(raveled_data):
+1203            tmp_values[i] = item.r_values.get(name, item.value)
+1204            tmp_idl = item.idl.get(name)
+1205            if tmp_idl is not None:
+1206                idl.append(tmp_idl)
+1207        if multi > 0:
+1208            tmp_values = np.array(tmp_values).reshape(data.shape)
+1209        new_r_values[name] = func(tmp_values, **kwargs)
+1210        new_idl_d[name] = _merge_idx(idl)
+1211        if not is_merged[name]:
+1212            is_merged[name] = (1 != len(set([len(idx) for idx in [*idl, new_idl_d[name]]])))
+1213
+1214    if 'man_grad' in kwargs:
+1215        deriv = np.asarray(kwargs.get('man_grad'))
+1216        if new_values.shape + data.shape != deriv.shape:
+1217            raise Exception('Manual derivative does not have correct shape.')
+1218    elif kwargs.get('num_grad') is True:
+1219        if multi > 0:
+1220            raise Exception('Multi mode currently not supported for numerical derivative')
+1221        options = {
+1222            'base_step': 0.1,
+1223            'step_ratio': 2.5}
+1224        for key in options.keys():
+1225            kwarg = kwargs.get(key)
+1226            if kwarg is not None:
+1227                options[key] = kwarg
+1228        tmp_df = nd.Gradient(func, order=4, **{k: v for k, v in options.items() if v is not None})(values, **kwargs)
+1229        if tmp_df.size == 1:
+1230            deriv = np.array([tmp_df.real])
+1231        else:
+1232            deriv = tmp_df.real
+1233    else:
+1234        deriv = jacobian(func)(values, **kwargs)
+1235
+1236    final_result = np.zeros(new_values.shape, dtype=object)
+1237
+1238    if array_mode is True:
+1239
+1240        class _Zero_grad():
+1241            def __init__(self, N):
+1242                self.grad = np.zeros((N, 1))
+1243
+1244        new_covobs_lengths = dict(set([y for x in [[(n, o.covobs[n].N) for n in o.cov_names] for o in raveled_data] for y in x]))
+1245        d_extracted = {}
+1246        g_extracted = {}
+1247        for name in new_sample_names:
+1248            d_extracted[name] = []
+1249            ens_length = len(new_idl_d[name])
+1250            for i_dat, dat in enumerate(data):
+1251                d_extracted[name].append(np.array([_expand_deltas_for_merge(o.deltas.get(name, np.zeros(ens_length)), o.idl.get(name, new_idl_d[name]), o.shape.get(name, ens_length), new_idl_d[name]) for o in dat.reshape(np.prod(dat.shape))]).reshape(dat.shape + (ens_length, )))
+1252        for name in new_cov_names:
+1253            g_extracted[name] = []
+1254            zero_grad = _Zero_grad(new_covobs_lengths[name])
+1255            for i_dat, dat in enumerate(data):
+1256                g_extracted[name].append(np.array([o.covobs.get(name, zero_grad).grad for o in dat.reshape(np.prod(dat.shape))]).reshape(dat.shape + (new_covobs_lengths[name], 1)))
+1257
+1258    for i_val, new_val in np.ndenumerate(new_values):
+1259        new_deltas = {}
+1260        new_grad = {}
+1261        if array_mode is True:
+1262            for name in new_sample_names:
+1263                ens_length = d_extracted[name][0].shape[-1]
+1264                new_deltas[name] = np.zeros(ens_length)
+1265                for i_dat, dat in enumerate(d_extracted[name]):
+1266                    new_deltas[name] += np.tensordot(deriv[i_val + (i_dat, )], dat)
+1267            for name in new_cov_names:
+1268                new_grad[name] = 0
+1269                for i_dat, dat in enumerate(g_extracted[name]):
+1270                    new_grad[name] += np.tensordot(deriv[i_val + (i_dat, )], dat)
+1271        else:
+1272            for j_obs, obs in np.ndenumerate(data):
+1273                for name in obs.names:
+1274                    if name in obs.cov_names:
+1275                        new_grad[name] = new_grad.get(name, 0) + deriv[i_val + j_obs] * obs.covobs[name].grad
+1276                    else:
+1277                        new_deltas[name] = new_deltas.get(name, 0) + deriv[i_val + j_obs] * _expand_deltas_for_merge(obs.deltas[name], obs.idl[name], obs.shape[name], new_idl_d[name])
+1278
+1279        new_covobs = {name: Covobs(0, allcov[name], name, grad=new_grad[name]) for name in new_grad}
+1280
+1281        if not set(new_covobs.keys()).isdisjoint(new_deltas.keys()):
+1282            raise Exception('The same name has been used for deltas and covobs!')
+1283        new_samples = []
+1284        new_means = []
+1285        new_idl = []
+1286        new_names_obs = []
+1287        for name in new_names:
+1288            if name not in new_covobs:
+1289                if is_merged[name]:
+1290                    filtered_deltas, filtered_idl_d = _filter_zeroes(new_deltas[name], new_idl_d[name])
+1291                else:
+1292                    filtered_deltas = new_deltas[name]
+1293                    filtered_idl_d = new_idl_d[name]
+1294
+1295                new_samples.append(filtered_deltas)
+1296                new_idl.append(filtered_idl_d)
+1297                new_means.append(new_r_values[name][i_val])
+1298                new_names_obs.append(name)
+1299        final_result[i_val] = Obs(new_samples, new_names_obs, means=new_means, idl=new_idl)
+1300        for name in new_covobs:
+1301            final_result[i_val].names.append(name)
+1302        final_result[i_val]._covobs = new_covobs
+1303        final_result[i_val]._value = new_val
+1304        final_result[i_val].is_merged = is_merged
+1305        final_result[i_val].reweighted = reweighted
 1306
-1307    return final_result
+1307    if multi == 0:
+1308        final_result = final_result.item()
+1309
+1310    return final_result
 
@@ -4425,47 +4428,47 @@ functions. For the ratio of two observables one can e.g. use

-
1344def reweight(weight, obs, **kwargs):
-1345    """Reweight a list of observables.
-1346
-1347    Parameters
-1348    ----------
-1349    weight : Obs
-1350        Reweighting factor. An Observable that has to be defined on a superset of the
-1351        configurations in obs[i].idl for all i.
-1352    obs : list
-1353        list of Obs, e.g. [obs1, obs2, obs3].
-1354    all_configs : bool
-1355        if True, the reweighted observables are normalized by the average of
-1356        the reweighting factor on all configurations in weight.idl and not
-1357        on the configurations in obs[i].idl. Default False.
-1358    """
-1359    result = []
-1360    for i in range(len(obs)):
-1361        if len(obs[i].cov_names):
-1362            raise Exception('Error: Not possible to reweight an Obs that contains covobs!')
-1363        if not set(obs[i].names).issubset(weight.names):
-1364            raise Exception('Error: Ensembles do not fit')
-1365        for name in obs[i].names:
-1366            if not set(obs[i].idl[name]).issubset(weight.idl[name]):
-1367                raise Exception('obs[%d] has to be defined on a subset of the configs in weight.idl[%s]!' % (i, name))
-1368        new_samples = []
-1369        w_deltas = {}
-1370        for name in sorted(obs[i].names):
-1371            w_deltas[name] = _reduce_deltas(weight.deltas[name], weight.idl[name], obs[i].idl[name])
-1372            new_samples.append((w_deltas[name] + weight.r_values[name]) * (obs[i].deltas[name] + obs[i].r_values[name]))
-1373        tmp_obs = Obs(new_samples, sorted(obs[i].names), idl=[obs[i].idl[name] for name in sorted(obs[i].names)])
-1374
-1375        if kwargs.get('all_configs'):
-1376            new_weight = weight
-1377        else:
-1378            new_weight = Obs([w_deltas[name] + weight.r_values[name] for name in sorted(obs[i].names)], sorted(obs[i].names), idl=[obs[i].idl[name] for name in sorted(obs[i].names)])
-1379
-1380        result.append(tmp_obs / new_weight)
-1381        result[-1].reweighted = True
-1382        result[-1].is_merged = obs[i].is_merged
-1383
-1384    return result
+            
1347def reweight(weight, obs, **kwargs):
+1348    """Reweight a list of observables.
+1349
+1350    Parameters
+1351    ----------
+1352    weight : Obs
+1353        Reweighting factor. An Observable that has to be defined on a superset of the
+1354        configurations in obs[i].idl for all i.
+1355    obs : list
+1356        list of Obs, e.g. [obs1, obs2, obs3].
+1357    all_configs : bool
+1358        if True, the reweighted observables are normalized by the average of
+1359        the reweighting factor on all configurations in weight.idl and not
+1360        on the configurations in obs[i].idl. Default False.
+1361    """
+1362    result = []
+1363    for i in range(len(obs)):
+1364        if len(obs[i].cov_names):
+1365            raise Exception('Error: Not possible to reweight an Obs that contains covobs!')
+1366        if not set(obs[i].names).issubset(weight.names):
+1367            raise Exception('Error: Ensembles do not fit')
+1368        for name in obs[i].names:
+1369            if not set(obs[i].idl[name]).issubset(weight.idl[name]):
+1370                raise Exception('obs[%d] has to be defined on a subset of the configs in weight.idl[%s]!' % (i, name))
+1371        new_samples = []
+1372        w_deltas = {}
+1373        for name in sorted(obs[i].names):
+1374            w_deltas[name] = _reduce_deltas(weight.deltas[name], weight.idl[name], obs[i].idl[name])
+1375            new_samples.append((w_deltas[name] + weight.r_values[name]) * (obs[i].deltas[name] + obs[i].r_values[name]))
+1376        tmp_obs = Obs(new_samples, sorted(obs[i].names), idl=[obs[i].idl[name] for name in sorted(obs[i].names)])
+1377
+1378        if kwargs.get('all_configs'):
+1379            new_weight = weight
+1380        else:
+1381            new_weight = Obs([w_deltas[name] + weight.r_values[name] for name in sorted(obs[i].names)], sorted(obs[i].names), idl=[obs[i].idl[name] for name in sorted(obs[i].names)])
+1382
+1383        result.append(tmp_obs / new_weight)
+1384        result[-1].reweighted = True
+1385        result[-1].is_merged = obs[i].is_merged
+1386
+1387    return result
 
@@ -4499,48 +4502,48 @@ on the configurations in obs[i].idl. Default False.
-
1387def correlate(obs_a, obs_b):
-1388    """Correlate two observables.
-1389
-1390    Parameters
-1391    ----------
-1392    obs_a : Obs
-1393        First observable
-1394    obs_b : Obs
-1395        Second observable
-1396
-1397    Notes
-1398    -----
-1399    Keep in mind to only correlate primary observables which have not been reweighted
-1400    yet. The reweighting has to be applied after correlating the observables.
-1401    Currently only works if ensembles are identical (this is not strictly necessary).
-1402    """
-1403
-1404    if sorted(obs_a.names) != sorted(obs_b.names):
-1405        raise Exception(f"Ensembles do not fit {set(sorted(obs_a.names)) ^ set(sorted(obs_b.names))}")
-1406    if len(obs_a.cov_names) or len(obs_b.cov_names):
-1407        raise Exception('Error: Not possible to correlate Obs that contain covobs!')
-1408    for name in obs_a.names:
-1409        if obs_a.shape[name] != obs_b.shape[name]:
-1410            raise Exception('Shapes of ensemble', name, 'do not fit')
-1411        if obs_a.idl[name] != obs_b.idl[name]:
-1412            raise Exception('idl of ensemble', name, 'do not fit')
-1413
-1414    if obs_a.reweighted is True:
-1415        warnings.warn("The first observable is already reweighted.", RuntimeWarning)
-1416    if obs_b.reweighted is True:
-1417        warnings.warn("The second observable is already reweighted.", RuntimeWarning)
-1418
-1419    new_samples = []
-1420    new_idl = []
-1421    for name in sorted(obs_a.names):
-1422        new_samples.append((obs_a.deltas[name] + obs_a.r_values[name]) * (obs_b.deltas[name] + obs_b.r_values[name]))
-1423        new_idl.append(obs_a.idl[name])
-1424
-1425    o = Obs(new_samples, sorted(obs_a.names), idl=new_idl)
-1426    o.is_merged = {name: (obs_a.is_merged.get(name, False) or obs_b.is_merged.get(name, False)) for name in o.names}
-1427    o.reweighted = obs_a.reweighted or obs_b.reweighted
-1428    return o
+            
1390def correlate(obs_a, obs_b):
+1391    """Correlate two observables.
+1392
+1393    Parameters
+1394    ----------
+1395    obs_a : Obs
+1396        First observable
+1397    obs_b : Obs
+1398        Second observable
+1399
+1400    Notes
+1401    -----
+1402    Keep in mind to only correlate primary observables which have not been reweighted
+1403    yet. The reweighting has to be applied after correlating the observables.
+1404    Currently only works if ensembles are identical (this is not strictly necessary).
+1405    """
+1406
+1407    if sorted(obs_a.names) != sorted(obs_b.names):
+1408        raise Exception(f"Ensembles do not fit {set(sorted(obs_a.names)) ^ set(sorted(obs_b.names))}")
+1409    if len(obs_a.cov_names) or len(obs_b.cov_names):
+1410        raise Exception('Error: Not possible to correlate Obs that contain covobs!')
+1411    for name in obs_a.names:
+1412        if obs_a.shape[name] != obs_b.shape[name]:
+1413            raise Exception('Shapes of ensemble', name, 'do not fit')
+1414        if obs_a.idl[name] != obs_b.idl[name]:
+1415            raise Exception('idl of ensemble', name, 'do not fit')
+1416
+1417    if obs_a.reweighted is True:
+1418        warnings.warn("The first observable is already reweighted.", RuntimeWarning)
+1419    if obs_b.reweighted is True:
+1420        warnings.warn("The second observable is already reweighted.", RuntimeWarning)
+1421
+1422    new_samples = []
+1423    new_idl = []
+1424    for name in sorted(obs_a.names):
+1425        new_samples.append((obs_a.deltas[name] + obs_a.r_values[name]) * (obs_b.deltas[name] + obs_b.r_values[name]))
+1426        new_idl.append(obs_a.idl[name])
+1427
+1428    o = Obs(new_samples, sorted(obs_a.names), idl=new_idl)
+1429    o.is_merged = {name: (obs_a.is_merged.get(name, False) or obs_b.is_merged.get(name, False)) for name in o.names}
+1430    o.reweighted = obs_a.reweighted or obs_b.reweighted
+1431    return o
 
@@ -4575,74 +4578,74 @@ Currently only works if ensembles are identical (this is not strictly necessary)
-
1431def covariance(obs, visualize=False, correlation=False, smooth=None, **kwargs):
-1432    r'''Calculates the error covariance matrix of a set of observables.
-1433
-1434    WARNING: This function should be used with care, especially for observables with support on multiple
-1435             ensembles with differing autocorrelations. See the notes below for details.
+            
1434def covariance(obs, visualize=False, correlation=False, smooth=None, **kwargs):
+1435    r'''Calculates the error covariance matrix of a set of observables.
 1436
-1437    The gamma method has to be applied first to all observables.
-1438
-1439    Parameters
-1440    ----------
-1441    obs : list or numpy.ndarray
-1442        List or one dimensional array of Obs
-1443    visualize : bool
-1444        If True plots the corresponding normalized correlation matrix (default False).
-1445    correlation : bool
-1446        If True the correlation matrix instead of the error covariance matrix is returned (default False).
-1447    smooth : None or int
-1448        If smooth is an integer 'E' between 2 and the dimension of the matrix minus 1 the eigenvalue
-1449        smoothing procedure of hep-lat/9412087 is applied to the correlation matrix which leaves the
-1450        largest E eigenvalues essentially unchanged and smoothes the smaller eigenvalues to avoid extremely
-1451        small ones.
-1452
-1453    Notes
-1454    -----
-1455    The error covariance is defined such that it agrees with the squared standard error for two identical observables
-1456    $$\operatorname{cov}(a,a)=\sum_{s=1}^N\delta_a^s\delta_a^s/N^2=\Gamma_{aa}(0)/N=\operatorname{var}(a)/N=\sigma_a^2$$
-1457    in the absence of autocorrelation.
-1458    The error covariance is estimated by calculating the correlation matrix assuming no autocorrelation and then rescaling the correlation matrix by the full errors including the previous gamma method estimate for the autocorrelation of the observables. The covariance at windowsize 0 is guaranteed to be positive semi-definite
-1459    $$\sum_{i,j}v_i\Gamma_{ij}(0)v_j=\frac{1}{N}\sum_{s=1}^N\sum_{i,j}v_i\delta_i^s\delta_j^s v_j=\frac{1}{N}\sum_{s=1}^N\sum_{i}|v_i\delta_i^s|^2\geq 0\,,$$ for every $v\in\mathbb{R}^M$, while such an identity does not hold for larger windows/lags.
-1460    For observables defined on a single ensemble our approximation is equivalent to assuming that the integrated autocorrelation time of an off-diagonal element is equal to the geometric mean of the integrated autocorrelation times of the corresponding diagonal elements.
-1461    $$\tau_{\mathrm{int}, ij}=\sqrt{\tau_{\mathrm{int}, i}\times \tau_{\mathrm{int}, j}}$$
-1462    This construction ensures that the estimated covariance matrix is positive semi-definite (up to numerical rounding errors).
-1463    '''
-1464
-1465    length = len(obs)
-1466
-1467    max_samples = np.max([o.N for o in obs])
-1468    if max_samples <= length and not [item for sublist in [o.cov_names for o in obs] for item in sublist]:
-1469        warnings.warn(f"The dimension of the covariance matrix ({length}) is larger or equal to the number of samples ({max_samples}). This will result in a rank deficient matrix.", RuntimeWarning)
-1470
-1471    cov = np.zeros((length, length))
-1472    for i in range(length):
-1473        for j in range(i, length):
-1474            cov[i, j] = _covariance_element(obs[i], obs[j])
-1475    cov = cov + cov.T - np.diag(np.diag(cov))
-1476
-1477    corr = np.diag(1 / np.sqrt(np.diag(cov))) @ cov @ np.diag(1 / np.sqrt(np.diag(cov)))
-1478
-1479    if isinstance(smooth, int):
-1480        corr = _smooth_eigenvalues(corr, smooth)
+1437    WARNING: This function should be used with care, especially for observables with support on multiple
+1438             ensembles with differing autocorrelations. See the notes below for details.
+1439
+1440    The gamma method has to be applied first to all observables.
+1441
+1442    Parameters
+1443    ----------
+1444    obs : list or numpy.ndarray
+1445        List or one dimensional array of Obs
+1446    visualize : bool
+1447        If True plots the corresponding normalized correlation matrix (default False).
+1448    correlation : bool
+1449        If True the correlation matrix instead of the error covariance matrix is returned (default False).
+1450    smooth : None or int
+1451        If smooth is an integer 'E' between 2 and the dimension of the matrix minus 1 the eigenvalue
+1452        smoothing procedure of hep-lat/9412087 is applied to the correlation matrix which leaves the
+1453        largest E eigenvalues essentially unchanged and smoothes the smaller eigenvalues to avoid extremely
+1454        small ones.
+1455
+1456    Notes
+1457    -----
+1458    The error covariance is defined such that it agrees with the squared standard error for two identical observables
+1459    $$\operatorname{cov}(a,a)=\sum_{s=1}^N\delta_a^s\delta_a^s/N^2=\Gamma_{aa}(0)/N=\operatorname{var}(a)/N=\sigma_a^2$$
+1460    in the absence of autocorrelation.
+1461    The error covariance is estimated by calculating the correlation matrix assuming no autocorrelation and then rescaling the correlation matrix by the full errors including the previous gamma method estimate for the autocorrelation of the observables. The covariance at windowsize 0 is guaranteed to be positive semi-definite
+1462    $$\sum_{i,j}v_i\Gamma_{ij}(0)v_j=\frac{1}{N}\sum_{s=1}^N\sum_{i,j}v_i\delta_i^s\delta_j^s v_j=\frac{1}{N}\sum_{s=1}^N\sum_{i}|v_i\delta_i^s|^2\geq 0\,,$$ for every $v\in\mathbb{R}^M$, while such an identity does not hold for larger windows/lags.
+1463    For observables defined on a single ensemble our approximation is equivalent to assuming that the integrated autocorrelation time of an off-diagonal element is equal to the geometric mean of the integrated autocorrelation times of the corresponding diagonal elements.
+1464    $$\tau_{\mathrm{int}, ij}=\sqrt{\tau_{\mathrm{int}, i}\times \tau_{\mathrm{int}, j}}$$
+1465    This construction ensures that the estimated covariance matrix is positive semi-definite (up to numerical rounding errors).
+1466    '''
+1467
+1468    length = len(obs)
+1469
+1470    max_samples = np.max([o.N for o in obs])
+1471    if max_samples <= length and not [item for sublist in [o.cov_names for o in obs] for item in sublist]:
+1472        warnings.warn(f"The dimension of the covariance matrix ({length}) is larger or equal to the number of samples ({max_samples}). This will result in a rank deficient matrix.", RuntimeWarning)
+1473
+1474    cov = np.zeros((length, length))
+1475    for i in range(length):
+1476        for j in range(i, length):
+1477            cov[i, j] = _covariance_element(obs[i], obs[j])
+1478    cov = cov + cov.T - np.diag(np.diag(cov))
+1479
+1480    corr = np.diag(1 / np.sqrt(np.diag(cov))) @ cov @ np.diag(1 / np.sqrt(np.diag(cov)))
 1481
-1482    if visualize:
-1483        plt.matshow(corr, vmin=-1, vmax=1)
-1484        plt.set_cmap('RdBu')
-1485        plt.colorbar()
-1486        plt.draw()
-1487
-1488    if correlation is True:
-1489        return corr
+1482    if isinstance(smooth, int):
+1483        corr = _smooth_eigenvalues(corr, smooth)
+1484
+1485    if visualize:
+1486        plt.matshow(corr, vmin=-1, vmax=1)
+1487        plt.set_cmap('RdBu')
+1488        plt.colorbar()
+1489        plt.draw()
 1490
-1491    errors = [o.dvalue for o in obs]
-1492    cov = np.diag(errors) @ corr @ np.diag(errors)
+1491    if correlation is True:
+1492        return corr
 1493
-1494    eigenvalues = np.linalg.eigh(cov)[0]
-1495    if not np.all(eigenvalues >= 0):
-1496        warnings.warn("Covariance matrix is not positive semi-definite (Eigenvalues: " + str(eigenvalues) + ")", RuntimeWarning)
-1497
-1498    return cov
+1494    errors = [o.dvalue for o in obs]
+1495    cov = np.diag(errors) @ corr @ np.diag(errors)
+1496
+1497    eigenvalues = np.linalg.eigh(cov)[0]
+1498    if not np.all(eigenvalues >= 0):
+1499        warnings.warn("Covariance matrix is not positive semi-definite (Eigenvalues: " + str(eigenvalues) + ")", RuntimeWarning)
+1500
+1501    return cov
 
@@ -4694,24 +4697,24 @@ This construction ensures that the estimated covariance matrix is positive semi-
-
1578def import_jackknife(jacks, name, idl=None):
-1579    """Imports jackknife samples and returns an Obs
-1580
-1581    Parameters
-1582    ----------
-1583    jacks : numpy.ndarray
-1584        numpy array containing the mean value as zeroth entry and
-1585        the N jackknife samples as first to Nth entry.
-1586    name : str
-1587        name of the ensemble the samples are defined on.
-1588    """
-1589    length = len(jacks) - 1
-1590    prj = (np.ones((length, length)) - (length - 1) * np.identity(length))
-1591    samples = jacks[1:] @ prj
-1592    mean = np.mean(samples)
-1593    new_obs = Obs([samples - mean], [name], idl=idl, means=[mean])
-1594    new_obs._value = jacks[0]
-1595    return new_obs
+            
1581def import_jackknife(jacks, name, idl=None):
+1582    """Imports jackknife samples and returns an Obs
+1583
+1584    Parameters
+1585    ----------
+1586    jacks : numpy.ndarray
+1587        numpy array containing the mean value as zeroth entry and
+1588        the N jackknife samples as first to Nth entry.
+1589    name : str
+1590        name of the ensemble the samples are defined on.
+1591    """
+1592    length = len(jacks) - 1
+1593    prj = (np.ones((length, length)) - (length - 1) * np.identity(length))
+1594    samples = jacks[1:] @ prj
+1595    mean = np.mean(samples)
+1596    new_obs = Obs([samples - mean], [name], idl=idl, means=[mean])
+1597    new_obs._value = jacks[0]
+1598    return new_obs
 
@@ -4741,35 +4744,35 @@ name of the ensemble the samples are defined on.
-
1598def merge_obs(list_of_obs):
-1599    """Combine all observables in list_of_obs into one new observable
-1600
-1601    Parameters
-1602    ----------
-1603    list_of_obs : list
-1604        list of the Obs object to be combined
-1605
-1606    Notes
-1607    -----
-1608    It is not possible to combine obs which are based on the same replicum
-1609    """
-1610    replist = [item for obs in list_of_obs for item in obs.names]
-1611    if (len(replist) == len(set(replist))) is False:
-1612        raise Exception('list_of_obs contains duplicate replica: %s' % (str(replist)))
-1613    if any([len(o.cov_names) for o in list_of_obs]):
-1614        raise Exception('Not possible to merge data that contains covobs!')
-1615    new_dict = {}
-1616    idl_dict = {}
-1617    for o in list_of_obs:
-1618        new_dict.update({key: o.deltas.get(key, 0) + o.r_values.get(key, 0)
-1619                        for key in set(o.deltas) | set(o.r_values)})
-1620        idl_dict.update({key: o.idl.get(key, 0) for key in set(o.deltas)})
-1621
-1622    names = sorted(new_dict.keys())
-1623    o = Obs([new_dict[name] for name in names], names, idl=[idl_dict[name] for name in names])
-1624    o.is_merged = {name: np.any([oi.is_merged.get(name, False) for oi in list_of_obs]) for name in o.names}
-1625    o.reweighted = np.max([oi.reweighted for oi in list_of_obs])
-1626    return o
+            
1601def merge_obs(list_of_obs):
+1602    """Combine all observables in list_of_obs into one new observable
+1603
+1604    Parameters
+1605    ----------
+1606    list_of_obs : list
+1607        list of the Obs object to be combined
+1608
+1609    Notes
+1610    -----
+1611    It is not possible to combine obs which are based on the same replicum
+1612    """
+1613    replist = [item for obs in list_of_obs for item in obs.names]
+1614    if (len(replist) == len(set(replist))) is False:
+1615        raise Exception('list_of_obs contains duplicate replica: %s' % (str(replist)))
+1616    if any([len(o.cov_names) for o in list_of_obs]):
+1617        raise Exception('Not possible to merge data that contains covobs!')
+1618    new_dict = {}
+1619    idl_dict = {}
+1620    for o in list_of_obs:
+1621        new_dict.update({key: o.deltas.get(key, 0) + o.r_values.get(key, 0)
+1622                        for key in set(o.deltas) | set(o.r_values)})
+1623        idl_dict.update({key: o.idl.get(key, 0) for key in set(o.deltas)})
+1624
+1625    names = sorted(new_dict.keys())
+1626    o = Obs([new_dict[name] for name in names], names, idl=[idl_dict[name] for name in names])
+1627    o.is_merged = {name: np.any([oi.is_merged.get(name, False) for oi in list_of_obs]) for name in o.names}
+1628    o.reweighted = np.max([oi.reweighted for oi in list_of_obs])
+1629    return o
 
@@ -4800,47 +4803,47 @@ list of the Obs object to be combined
-
1629def cov_Obs(means, cov, name, grad=None):
-1630    """Create an Obs based on mean(s) and a covariance matrix
-1631
-1632    Parameters
-1633    ----------
-1634    mean : list of floats or float
-1635        N mean value(s) of the new Obs
-1636    cov : list or array
-1637        2d (NxN) Covariance matrix, 1d diagonal entries or 0d covariance
-1638    name : str
-1639        identifier for the covariance matrix
-1640    grad : list or array
-1641        Gradient of the Covobs wrt. the means belonging to cov.
-1642    """
-1643
-1644    def covobs_to_obs(co):
-1645        """Make an Obs out of a Covobs
+            
1632def cov_Obs(means, cov, name, grad=None):
+1633    """Create an Obs based on mean(s) and a covariance matrix
+1634
+1635    Parameters
+1636    ----------
+1637    mean : list of floats or float
+1638        N mean value(s) of the new Obs
+1639    cov : list or array
+1640        2d (NxN) Covariance matrix, 1d diagonal entries or 0d covariance
+1641    name : str
+1642        identifier for the covariance matrix
+1643    grad : list or array
+1644        Gradient of the Covobs wrt. the means belonging to cov.
+1645    """
 1646
-1647        Parameters
-1648        ----------
-1649        co : Covobs
-1650            Covobs to be embedded into the Obs
-1651        """
-1652        o = Obs([], [], means=[])
-1653        o._value = co.value
-1654        o.names.append(co.name)
-1655        o._covobs[co.name] = co
-1656        o._dvalue = np.sqrt(co.errsq())
-1657        return o
-1658
-1659    ol = []
-1660    if isinstance(means, (float, int)):
-1661        means = [means]
-1662
-1663    for i in range(len(means)):
-1664        ol.append(covobs_to_obs(Covobs(means[i], cov, name, pos=i, grad=grad)))
-1665    if ol[0].covobs[name].N != len(means):
-1666        raise Exception('You have to provide %d mean values!' % (ol[0].N))
-1667    if len(ol) == 1:
-1668        return ol[0]
-1669    return ol
+1647    def covobs_to_obs(co):
+1648        """Make an Obs out of a Covobs
+1649
+1650        Parameters
+1651        ----------
+1652        co : Covobs
+1653            Covobs to be embedded into the Obs
+1654        """
+1655        o = Obs([], [], means=[])
+1656        o._value = co.value
+1657        o.names.append(co.name)
+1658        o._covobs[co.name] = co
+1659        o._dvalue = np.sqrt(co.errsq())
+1660        return o
+1661
+1662    ol = []
+1663    if isinstance(means, (float, int)):
+1664        means = [means]
+1665
+1666    for i in range(len(means)):
+1667        ol.append(covobs_to_obs(Covobs(means[i], cov, name, pos=i, grad=grad)))
+1668    if ol[0].covobs[name].N != len(means):
+1669        raise Exception('You have to provide %d mean values!' % (ol[0].N))
+1670    if len(ol) == 1:
+1671        return ol[0]
+1672    return ol