diff --git a/docs/pyerrors/obs.html b/docs/pyerrors/obs.html index 2c9557c3..ad14f793 100644 --- a/docs/pyerrors/obs.html +++ b/docs/pyerrors/obs.html @@ -434,1647 +434,1649 @@ 104 elif isinstance(idx, (list, np.ndarray)): 105 dc = np.unique(np.diff(idx)) 106 if np.any(dc < 0): - 107 raise ValueError("Unsorted idx for idl[%s]" % (name)) - 108 if len(dc) == 1: - 109 self.idl[name] = range(idx[0], idx[-1] + dc[0], dc[0]) - 110 else: - 111 self.idl[name] = list(idx) - 112 else: - 113 raise TypeError('incompatible type for idl[%s].' % (name)) - 114 else: - 115 for name, sample in sorted(zip(names, samples)): - 116 self.idl[name] = range(1, len(sample) + 1) - 117 - 118 if kwargs.get("means") is not None: - 119 for name, sample, mean in sorted(zip(names, samples, kwargs.get("means"))): - 120 self.shape[name] = len(self.idl[name]) - 121 self.N += self.shape[name] - 122 self.r_values[name] = mean - 123 self.deltas[name] = sample - 124 else: - 125 for name, sample in sorted(zip(names, samples)): - 126 self.shape[name] = len(self.idl[name]) - 127 self.N += self.shape[name] - 128 if len(sample) != self.shape[name]: - 129 raise ValueError('Incompatible samples and idx for %s: %d vs. %d' % (name, len(sample), self.shape[name])) - 130 self.r_values[name] = np.mean(sample) - 131 self.deltas[name] = sample - self.r_values[name] - 132 self._value += self.shape[name] * self.r_values[name] - 133 self._value /= self.N - 134 - 135 self._dvalue = 0.0 - 136 self.ddvalue = 0.0 - 137 self.reweighted = False - 138 - 139 self.tag = None + 107 raise ValueError("Unsorted idx for idl[%s] at position %s" % (name, ' '.join(['%s' % (pos + 1) for pos in np.where(np.diff(idx) < 0)[0]]))) + 108 elif np.any(dc == 0): + 109 raise ValueError("Duplicate entries in idx for idl[%s] at position %s" % (name, ' '.join(['%s' % (pos + 1) for pos in np.where(np.diff(idx) == 0)[0]]))) + 110 if len(dc) == 1: + 111 self.idl[name] = range(idx[0], idx[-1] + dc[0], dc[0]) + 112 else: + 113 self.idl[name] = list(idx) + 114 else: + 115 raise TypeError('incompatible type for idl[%s].' % (name)) + 116 else: + 117 for name, sample in sorted(zip(names, samples)): + 118 self.idl[name] = range(1, len(sample) + 1) + 119 + 120 if kwargs.get("means") is not None: + 121 for name, sample, mean in sorted(zip(names, samples, kwargs.get("means"))): + 122 self.shape[name] = len(self.idl[name]) + 123 self.N += self.shape[name] + 124 self.r_values[name] = mean + 125 self.deltas[name] = sample + 126 else: + 127 for name, sample in sorted(zip(names, samples)): + 128 self.shape[name] = len(self.idl[name]) + 129 self.N += self.shape[name] + 130 if len(sample) != self.shape[name]: + 131 raise ValueError('Incompatible samples and idx for %s: %d vs. %d' % (name, len(sample), self.shape[name])) + 132 self.r_values[name] = np.mean(sample) + 133 self.deltas[name] = sample - self.r_values[name] + 134 self._value += self.shape[name] * self.r_values[name] + 135 self._value /= self.N + 136 + 137 self._dvalue = 0.0 + 138 self.ddvalue = 0.0 + 139 self.reweighted = False 140 - 141 @property - 142 def value(self): - 143 return self._value - 144 - 145 @property - 146 def dvalue(self): - 147 return self._dvalue - 148 - 149 @property - 150 def e_names(self): - 151 return sorted(set([o.split('|')[0] for o in self.names])) - 152 - 153 @property - 154 def cov_names(self): - 155 return sorted(set([o for o in self.covobs.keys()])) - 156 - 157 @property - 158 def mc_names(self): - 159 return sorted(set([o.split('|')[0] for o in self.names if o not in self.cov_names])) - 160 - 161 @property - 162 def e_content(self): - 163 res = {} - 164 for e, e_name in enumerate(self.e_names): - 165 res[e_name] = sorted(filter(lambda x: x.startswith(e_name + '|'), self.names)) - 166 if e_name in self.names: - 167 res[e_name].append(e_name) - 168 return res - 169 - 170 @property - 171 def covobs(self): - 172 return self._covobs - 173 - 174 def gamma_method(self, **kwargs): - 175 """Estimate the error and related properties of the Obs. - 176 - 177 Parameters - 178 ---------- - 179 S : float - 180 specifies a custom value for the parameter S (default 2.0). - 181 If set to 0 it is assumed that the data exhibits no - 182 autocorrelation. In this case the error estimates coincides - 183 with the sample standard error. - 184 tau_exp : float - 185 positive value triggers the critical slowing down analysis - 186 (default 0.0). - 187 N_sigma : float - 188 number of standard deviations from zero until the tail is - 189 attached to the autocorrelation function (default 1). - 190 fft : bool - 191 determines whether the fft algorithm is used for the computation - 192 of the autocorrelation function (default True) - 193 """ - 194 - 195 e_content = self.e_content - 196 self.e_dvalue = {} - 197 self.e_ddvalue = {} - 198 self.e_tauint = {} - 199 self.e_dtauint = {} - 200 self.e_windowsize = {} - 201 self.e_n_tauint = {} - 202 self.e_n_dtauint = {} - 203 e_gamma = {} - 204 self.e_rho = {} - 205 self.e_drho = {} - 206 self._dvalue = 0 - 207 self.ddvalue = 0 - 208 - 209 self.S = {} - 210 self.tau_exp = {} - 211 self.N_sigma = {} - 212 - 213 if kwargs.get('fft') is False: - 214 fft = False - 215 else: - 216 fft = True - 217 - 218 def _parse_kwarg(kwarg_name): - 219 if kwarg_name in kwargs: - 220 tmp = kwargs.get(kwarg_name) - 221 if isinstance(tmp, (int, float)): - 222 if tmp < 0: - 223 raise Exception(kwarg_name + ' has to be larger or equal to 0.') - 224 for e, e_name in enumerate(self.e_names): - 225 getattr(self, kwarg_name)[e_name] = tmp - 226 else: - 227 raise TypeError(kwarg_name + ' is not in proper format.') - 228 else: - 229 for e, e_name in enumerate(self.e_names): - 230 if e_name in getattr(Obs, kwarg_name + '_dict'): - 231 getattr(self, kwarg_name)[e_name] = getattr(Obs, kwarg_name + '_dict')[e_name] - 232 else: - 233 getattr(self, kwarg_name)[e_name] = getattr(Obs, kwarg_name + '_global') - 234 - 235 _parse_kwarg('S') - 236 _parse_kwarg('tau_exp') - 237 _parse_kwarg('N_sigma') - 238 - 239 for e, e_name in enumerate(self.mc_names): - 240 gapsize = _determine_gap(self, e_content, e_name) - 241 - 242 r_length = [] - 243 for r_name in e_content[e_name]: - 244 if isinstance(self.idl[r_name], range): - 245 r_length.append(len(self.idl[r_name]) * self.idl[r_name].step // gapsize) - 246 else: - 247 r_length.append((self.idl[r_name][-1] - self.idl[r_name][0] + 1) // gapsize) - 248 - 249 e_N = np.sum([self.shape[r_name] for r_name in e_content[e_name]]) - 250 w_max = max(r_length) // 2 - 251 e_gamma[e_name] = np.zeros(w_max) - 252 self.e_rho[e_name] = np.zeros(w_max) - 253 self.e_drho[e_name] = np.zeros(w_max) - 254 - 255 for r_name in e_content[e_name]: - 256 e_gamma[e_name] += self._calc_gamma(self.deltas[r_name], self.idl[r_name], self.shape[r_name], w_max, fft, gapsize) - 257 - 258 gamma_div = np.zeros(w_max) - 259 for r_name in e_content[e_name]: - 260 gamma_div += self._calc_gamma(np.ones((self.shape[r_name])), self.idl[r_name], self.shape[r_name], w_max, fft, gapsize) - 261 gamma_div[gamma_div < 1] = 1.0 - 262 e_gamma[e_name] /= gamma_div[:w_max] - 263 - 264 if np.abs(e_gamma[e_name][0]) < 10 * np.finfo(float).tiny: # Prevent division by zero - 265 self.e_tauint[e_name] = 0.5 - 266 self.e_dtauint[e_name] = 0.0 - 267 self.e_dvalue[e_name] = 0.0 - 268 self.e_ddvalue[e_name] = 0.0 - 269 self.e_windowsize[e_name] = 0 - 270 continue - 271 - 272 self.e_rho[e_name] = e_gamma[e_name][:w_max] / e_gamma[e_name][0] - 273 self.e_n_tauint[e_name] = np.cumsum(np.concatenate(([0.5], self.e_rho[e_name][1:]))) - 274 # Make sure no entry of tauint is smaller than 0.5 - 275 self.e_n_tauint[e_name][self.e_n_tauint[e_name] <= 0.5] = 0.5 + np.finfo(np.float64).eps - 276 # hep-lat/0306017 eq. (42) - 277 self.e_n_dtauint[e_name] = self.e_n_tauint[e_name] * 2 * np.sqrt(np.abs(np.arange(w_max) + 0.5 - self.e_n_tauint[e_name]) / e_N) - 278 self.e_n_dtauint[e_name][0] = 0.0 - 279 - 280 def _compute_drho(i): - 281 tmp = (self.e_rho[e_name][i + 1:w_max] - 282 + np.concatenate([self.e_rho[e_name][i - 1:None if i - (w_max - 1) // 2 <= 0 else (2 * i - (2 * w_max) // 2):-1], - 283 self.e_rho[e_name][1:max(1, w_max - 2 * i)]]) - 284 - 2 * self.e_rho[e_name][i] * self.e_rho[e_name][1:w_max - i]) - 285 self.e_drho[e_name][i] = np.sqrt(np.sum(tmp ** 2) / e_N) - 286 - 287 if self.tau_exp[e_name] > 0: - 288 _compute_drho(1) - 289 texp = self.tau_exp[e_name] - 290 # Critical slowing down analysis - 291 if w_max // 2 <= 1: - 292 raise Exception("Need at least 8 samples for tau_exp error analysis") - 293 for n in range(1, w_max // 2): - 294 _compute_drho(n + 1) - 295 if (self.e_rho[e_name][n] - self.N_sigma[e_name] * self.e_drho[e_name][n]) < 0 or n >= w_max // 2 - 2: - 296 # Bias correction hep-lat/0306017 eq. (49) included - 297 self.e_tauint[e_name] = self.e_n_tauint[e_name][n] * (1 + (2 * n + 1) / e_N) / (1 + 1 / e_N) + texp * np.abs(self.e_rho[e_name][n + 1]) # The absolute makes sure, that the tail contribution is always positive - 298 self.e_dtauint[e_name] = np.sqrt(self.e_n_dtauint[e_name][n] ** 2 + texp ** 2 * self.e_drho[e_name][n + 1] ** 2) - 299 # Error of tau_exp neglected so far, missing term: self.e_rho[e_name][n + 1] ** 2 * d_tau_exp ** 2 - 300 self.e_dvalue[e_name] = np.sqrt(2 * self.e_tauint[e_name] * e_gamma[e_name][0] * (1 + 1 / e_N) / e_N) - 301 self.e_ddvalue[e_name] = self.e_dvalue[e_name] * np.sqrt((n + 0.5) / e_N) - 302 self.e_windowsize[e_name] = n - 303 break - 304 else: - 305 if self.S[e_name] == 0.0: - 306 self.e_tauint[e_name] = 0.5 - 307 self.e_dtauint[e_name] = 0.0 - 308 self.e_dvalue[e_name] = np.sqrt(e_gamma[e_name][0] / (e_N - 1)) - 309 self.e_ddvalue[e_name] = self.e_dvalue[e_name] * np.sqrt(0.5 / e_N) - 310 self.e_windowsize[e_name] = 0 - 311 else: - 312 # Standard automatic windowing procedure - 313 tau = self.S[e_name] / np.log((2 * self.e_n_tauint[e_name][1:] + 1) / (2 * self.e_n_tauint[e_name][1:] - 1)) - 314 g_w = np.exp(- np.arange(1, len(tau) + 1) / tau) - tau / np.sqrt(np.arange(1, len(tau) + 1) * e_N) - 315 for n in range(1, w_max): - 316 if g_w[n - 1] < 0 or n >= w_max - 1: - 317 _compute_drho(n) - 318 self.e_tauint[e_name] = self.e_n_tauint[e_name][n] * (1 + (2 * n + 1) / e_N) / (1 + 1 / e_N) # Bias correction hep-lat/0306017 eq. (49) - 319 self.e_dtauint[e_name] = self.e_n_dtauint[e_name][n] - 320 self.e_dvalue[e_name] = np.sqrt(2 * self.e_tauint[e_name] * e_gamma[e_name][0] * (1 + 1 / e_N) / e_N) - 321 self.e_ddvalue[e_name] = self.e_dvalue[e_name] * np.sqrt((n + 0.5) / e_N) - 322 self.e_windowsize[e_name] = n - 323 break - 324 - 325 self._dvalue += self.e_dvalue[e_name] ** 2 - 326 self.ddvalue += (self.e_dvalue[e_name] * self.e_ddvalue[e_name]) ** 2 - 327 - 328 for e_name in self.cov_names: - 329 self.e_dvalue[e_name] = np.sqrt(self.covobs[e_name].errsq()) - 330 self.e_ddvalue[e_name] = 0 - 331 self._dvalue += self.e_dvalue[e_name]**2 - 332 - 333 self._dvalue = np.sqrt(self._dvalue) - 334 if self._dvalue == 0.0: - 335 self.ddvalue = 0.0 - 336 else: - 337 self.ddvalue = np.sqrt(self.ddvalue) / self._dvalue - 338 return - 339 - 340 gm = gamma_method + 141 self.tag = None + 142 + 143 @property + 144 def value(self): + 145 return self._value + 146 + 147 @property + 148 def dvalue(self): + 149 return self._dvalue + 150 + 151 @property + 152 def e_names(self): + 153 return sorted(set([o.split('|')[0] for o in self.names])) + 154 + 155 @property + 156 def cov_names(self): + 157 return sorted(set([o for o in self.covobs.keys()])) + 158 + 159 @property + 160 def mc_names(self): + 161 return sorted(set([o.split('|')[0] for o in self.names if o not in self.cov_names])) + 162 + 163 @property + 164 def e_content(self): + 165 res = {} + 166 for e, e_name in enumerate(self.e_names): + 167 res[e_name] = sorted(filter(lambda x: x.startswith(e_name + '|'), self.names)) + 168 if e_name in self.names: + 169 res[e_name].append(e_name) + 170 return res + 171 + 172 @property + 173 def covobs(self): + 174 return self._covobs + 175 + 176 def gamma_method(self, **kwargs): + 177 """Estimate the error and related properties of the Obs. + 178 + 179 Parameters + 180 ---------- + 181 S : float + 182 specifies a custom value for the parameter S (default 2.0). + 183 If set to 0 it is assumed that the data exhibits no + 184 autocorrelation. In this case the error estimates coincides + 185 with the sample standard error. + 186 tau_exp : float + 187 positive value triggers the critical slowing down analysis + 188 (default 0.0). + 189 N_sigma : float + 190 number of standard deviations from zero until the tail is + 191 attached to the autocorrelation function (default 1). + 192 fft : bool + 193 determines whether the fft algorithm is used for the computation + 194 of the autocorrelation function (default True) + 195 """ + 196 + 197 e_content = self.e_content + 198 self.e_dvalue = {} + 199 self.e_ddvalue = {} + 200 self.e_tauint = {} + 201 self.e_dtauint = {} + 202 self.e_windowsize = {} + 203 self.e_n_tauint = {} + 204 self.e_n_dtauint = {} + 205 e_gamma = {} + 206 self.e_rho = {} + 207 self.e_drho = {} + 208 self._dvalue = 0 + 209 self.ddvalue = 0 + 210 + 211 self.S = {} + 212 self.tau_exp = {} + 213 self.N_sigma = {} + 214 + 215 if kwargs.get('fft') is False: + 216 fft = False + 217 else: + 218 fft = True + 219 + 220 def _parse_kwarg(kwarg_name): + 221 if kwarg_name in kwargs: + 222 tmp = kwargs.get(kwarg_name) + 223 if isinstance(tmp, (int, float)): + 224 if tmp < 0: + 225 raise Exception(kwarg_name + ' has to be larger or equal to 0.') + 226 for e, e_name in enumerate(self.e_names): + 227 getattr(self, kwarg_name)[e_name] = tmp + 228 else: + 229 raise TypeError(kwarg_name + ' is not in proper format.') + 230 else: + 231 for e, e_name in enumerate(self.e_names): + 232 if e_name in getattr(Obs, kwarg_name + '_dict'): + 233 getattr(self, kwarg_name)[e_name] = getattr(Obs, kwarg_name + '_dict')[e_name] + 234 else: + 235 getattr(self, kwarg_name)[e_name] = getattr(Obs, kwarg_name + '_global') + 236 + 237 _parse_kwarg('S') + 238 _parse_kwarg('tau_exp') + 239 _parse_kwarg('N_sigma') + 240 + 241 for e, e_name in enumerate(self.mc_names): + 242 gapsize = _determine_gap(self, e_content, e_name) + 243 + 244 r_length = [] + 245 for r_name in e_content[e_name]: + 246 if isinstance(self.idl[r_name], range): + 247 r_length.append(len(self.idl[r_name]) * self.idl[r_name].step // gapsize) + 248 else: + 249 r_length.append((self.idl[r_name][-1] - self.idl[r_name][0] + 1) // gapsize) + 250 + 251 e_N = np.sum([self.shape[r_name] for r_name in e_content[e_name]]) + 252 w_max = max(r_length) // 2 + 253 e_gamma[e_name] = np.zeros(w_max) + 254 self.e_rho[e_name] = np.zeros(w_max) + 255 self.e_drho[e_name] = np.zeros(w_max) + 256 + 257 for r_name in e_content[e_name]: + 258 e_gamma[e_name] += self._calc_gamma(self.deltas[r_name], self.idl[r_name], self.shape[r_name], w_max, fft, gapsize) + 259 + 260 gamma_div = np.zeros(w_max) + 261 for r_name in e_content[e_name]: + 262 gamma_div += self._calc_gamma(np.ones((self.shape[r_name])), self.idl[r_name], self.shape[r_name], w_max, fft, gapsize) + 263 gamma_div[gamma_div < 1] = 1.0 + 264 e_gamma[e_name] /= gamma_div[:w_max] + 265 + 266 if np.abs(e_gamma[e_name][0]) < 10 * np.finfo(float).tiny: # Prevent division by zero + 267 self.e_tauint[e_name] = 0.5 + 268 self.e_dtauint[e_name] = 0.0 + 269 self.e_dvalue[e_name] = 0.0 + 270 self.e_ddvalue[e_name] = 0.0 + 271 self.e_windowsize[e_name] = 0 + 272 continue + 273 + 274 self.e_rho[e_name] = e_gamma[e_name][:w_max] / e_gamma[e_name][0] + 275 self.e_n_tauint[e_name] = np.cumsum(np.concatenate(([0.5], self.e_rho[e_name][1:]))) + 276 # Make sure no entry of tauint is smaller than 0.5 + 277 self.e_n_tauint[e_name][self.e_n_tauint[e_name] <= 0.5] = 0.5 + np.finfo(np.float64).eps + 278 # hep-lat/0306017 eq. (42) + 279 self.e_n_dtauint[e_name] = self.e_n_tauint[e_name] * 2 * np.sqrt(np.abs(np.arange(w_max) + 0.5 - self.e_n_tauint[e_name]) / e_N) + 280 self.e_n_dtauint[e_name][0] = 0.0 + 281 + 282 def _compute_drho(i): + 283 tmp = (self.e_rho[e_name][i + 1:w_max] + 284 + np.concatenate([self.e_rho[e_name][i - 1:None if i - (w_max - 1) // 2 <= 0 else (2 * i - (2 * w_max) // 2):-1], + 285 self.e_rho[e_name][1:max(1, w_max - 2 * i)]]) + 286 - 2 * self.e_rho[e_name][i] * self.e_rho[e_name][1:w_max - i]) + 287 self.e_drho[e_name][i] = np.sqrt(np.sum(tmp ** 2) / e_N) + 288 + 289 if self.tau_exp[e_name] > 0: + 290 _compute_drho(1) + 291 texp = self.tau_exp[e_name] + 292 # Critical slowing down analysis + 293 if w_max // 2 <= 1: + 294 raise Exception("Need at least 8 samples for tau_exp error analysis") + 295 for n in range(1, w_max // 2): + 296 _compute_drho(n + 1) + 297 if (self.e_rho[e_name][n] - self.N_sigma[e_name] * self.e_drho[e_name][n]) < 0 or n >= w_max // 2 - 2: + 298 # Bias correction hep-lat/0306017 eq. (49) included + 299 self.e_tauint[e_name] = self.e_n_tauint[e_name][n] * (1 + (2 * n + 1) / e_N) / (1 + 1 / e_N) + texp * np.abs(self.e_rho[e_name][n + 1]) # The absolute makes sure, that the tail contribution is always positive + 300 self.e_dtauint[e_name] = np.sqrt(self.e_n_dtauint[e_name][n] ** 2 + texp ** 2 * self.e_drho[e_name][n + 1] ** 2) + 301 # Error of tau_exp neglected so far, missing term: self.e_rho[e_name][n + 1] ** 2 * d_tau_exp ** 2 + 302 self.e_dvalue[e_name] = np.sqrt(2 * self.e_tauint[e_name] * e_gamma[e_name][0] * (1 + 1 / e_N) / e_N) + 303 self.e_ddvalue[e_name] = self.e_dvalue[e_name] * np.sqrt((n + 0.5) / e_N) + 304 self.e_windowsize[e_name] = n + 305 break + 306 else: + 307 if self.S[e_name] == 0.0: + 308 self.e_tauint[e_name] = 0.5 + 309 self.e_dtauint[e_name] = 0.0 + 310 self.e_dvalue[e_name] = np.sqrt(e_gamma[e_name][0] / (e_N - 1)) + 311 self.e_ddvalue[e_name] = self.e_dvalue[e_name] * np.sqrt(0.5 / e_N) + 312 self.e_windowsize[e_name] = 0 + 313 else: + 314 # Standard automatic windowing procedure + 315 tau = self.S[e_name] / np.log((2 * self.e_n_tauint[e_name][1:] + 1) / (2 * self.e_n_tauint[e_name][1:] - 1)) + 316 g_w = np.exp(- np.arange(1, len(tau) + 1) / tau) - tau / np.sqrt(np.arange(1, len(tau) + 1) * e_N) + 317 for n in range(1, w_max): + 318 if g_w[n - 1] < 0 or n >= w_max - 1: + 319 _compute_drho(n) + 320 self.e_tauint[e_name] = self.e_n_tauint[e_name][n] * (1 + (2 * n + 1) / e_N) / (1 + 1 / e_N) # Bias correction hep-lat/0306017 eq. (49) + 321 self.e_dtauint[e_name] = self.e_n_dtauint[e_name][n] + 322 self.e_dvalue[e_name] = np.sqrt(2 * self.e_tauint[e_name] * e_gamma[e_name][0] * (1 + 1 / e_N) / e_N) + 323 self.e_ddvalue[e_name] = self.e_dvalue[e_name] * np.sqrt((n + 0.5) / e_N) + 324 self.e_windowsize[e_name] = n + 325 break + 326 + 327 self._dvalue += self.e_dvalue[e_name] ** 2 + 328 self.ddvalue += (self.e_dvalue[e_name] * self.e_ddvalue[e_name]) ** 2 + 329 + 330 for e_name in self.cov_names: + 331 self.e_dvalue[e_name] = np.sqrt(self.covobs[e_name].errsq()) + 332 self.e_ddvalue[e_name] = 0 + 333 self._dvalue += self.e_dvalue[e_name]**2 + 334 + 335 self._dvalue = np.sqrt(self._dvalue) + 336 if self._dvalue == 0.0: + 337 self.ddvalue = 0.0 + 338 else: + 339 self.ddvalue = np.sqrt(self.ddvalue) / self._dvalue + 340 return 341 - 342 def _calc_gamma(self, deltas, idx, shape, w_max, fft, gapsize): - 343 """Calculate Gamma_{AA} from the deltas, which are defined on idx. - 344 idx is assumed to be a contiguous range (possibly with a stepsize != 1) - 345 - 346 Parameters - 347 ---------- - 348 deltas : list - 349 List of fluctuations - 350 idx : list - 351 List or range of configurations on which the deltas are defined. - 352 shape : int - 353 Number of configurations in idx. - 354 w_max : int - 355 Upper bound for the summation window. - 356 fft : bool - 357 determines whether the fft algorithm is used for the computation - 358 of the autocorrelation function. - 359 gapsize : int - 360 The target distance between two configurations. If longer distances - 361 are found in idx, the data is expanded. - 362 """ - 363 gamma = np.zeros(w_max) - 364 deltas = _expand_deltas(deltas, idx, shape, gapsize) - 365 new_shape = len(deltas) - 366 if fft: - 367 max_gamma = min(new_shape, w_max) - 368 # The padding for the fft has to be even - 369 padding = new_shape + max_gamma + (new_shape + max_gamma) % 2 - 370 gamma[:max_gamma] += np.fft.irfft(np.abs(np.fft.rfft(deltas, padding)) ** 2)[:max_gamma] - 371 else: - 372 for n in range(w_max): - 373 if new_shape - n >= 0: - 374 gamma[n] += deltas[0:new_shape - n].dot(deltas[n:new_shape]) - 375 - 376 return gamma + 342 gm = gamma_method + 343 + 344 def _calc_gamma(self, deltas, idx, shape, w_max, fft, gapsize): + 345 """Calculate Gamma_{AA} from the deltas, which are defined on idx. + 346 idx is assumed to be a contiguous range (possibly with a stepsize != 1) + 347 + 348 Parameters + 349 ---------- + 350 deltas : list + 351 List of fluctuations + 352 idx : list + 353 List or range of configurations on which the deltas are defined. + 354 shape : int + 355 Number of configurations in idx. + 356 w_max : int + 357 Upper bound for the summation window. + 358 fft : bool + 359 determines whether the fft algorithm is used for the computation + 360 of the autocorrelation function. + 361 gapsize : int + 362 The target distance between two configurations. If longer distances + 363 are found in idx, the data is expanded. + 364 """ + 365 gamma = np.zeros(w_max) + 366 deltas = _expand_deltas(deltas, idx, shape, gapsize) + 367 new_shape = len(deltas) + 368 if fft: + 369 max_gamma = min(new_shape, w_max) + 370 # The padding for the fft has to be even + 371 padding = new_shape + max_gamma + (new_shape + max_gamma) % 2 + 372 gamma[:max_gamma] += np.fft.irfft(np.abs(np.fft.rfft(deltas, padding)) ** 2)[:max_gamma] + 373 else: + 374 for n in range(w_max): + 375 if new_shape - n >= 0: + 376 gamma[n] += deltas[0:new_shape - n].dot(deltas[n:new_shape]) 377 - 378 def details(self, ens_content=True): - 379 """Output detailed properties of the Obs. - 380 - 381 Parameters - 382 ---------- - 383 ens_content : bool - 384 print details about the ensembles and replica if true. - 385 """ - 386 if self.tag is not None: - 387 print("Description:", self.tag) - 388 if not hasattr(self, 'e_dvalue'): - 389 print('Result\t %3.8e' % (self.value)) - 390 else: - 391 if self.value == 0.0: - 392 percentage = np.nan - 393 else: - 394 percentage = np.abs(self._dvalue / self.value) * 100 - 395 print('Result\t %3.8e +/- %3.8e +/- %3.8e (%3.3f%%)' % (self.value, self._dvalue, self.ddvalue, percentage)) - 396 if len(self.e_names) > 1: - 397 print(' Ensemble errors:') - 398 e_content = self.e_content - 399 for e_name in self.mc_names: - 400 gap = _determine_gap(self, e_content, e_name) - 401 - 402 if len(self.e_names) > 1: - 403 print('', e_name, '\t %3.6e +/- %3.6e' % (self.e_dvalue[e_name], self.e_ddvalue[e_name])) - 404 tau_string = " \N{GREEK SMALL LETTER TAU}_int\t " + _format_uncertainty(self.e_tauint[e_name], self.e_dtauint[e_name]) - 405 tau_string += f" in units of {gap} config" - 406 if gap > 1: - 407 tau_string += "s" - 408 if self.tau_exp[e_name] > 0: - 409 tau_string = f"{tau_string: <45}" + '\t(\N{GREEK SMALL LETTER TAU}_exp=%3.2f, N_\N{GREEK SMALL LETTER SIGMA}=%1.0i)' % (self.tau_exp[e_name], self.N_sigma[e_name]) - 410 else: - 411 tau_string = f"{tau_string: <45}" + '\t(S=%3.2f)' % (self.S[e_name]) - 412 print(tau_string) - 413 for e_name in self.cov_names: - 414 print('', e_name, '\t %3.8e' % (self.e_dvalue[e_name])) - 415 if ens_content is True: - 416 if len(self.e_names) == 1: - 417 print(self.N, 'samples in', len(self.e_names), 'ensemble:') - 418 else: - 419 print(self.N, 'samples in', len(self.e_names), 'ensembles:') - 420 my_string_list = [] - 421 for key, value in sorted(self.e_content.items()): - 422 if key not in self.covobs: - 423 my_string = ' ' + "\u00B7 Ensemble '" + key + "' " - 424 if len(value) == 1: - 425 my_string += f': {self.shape[value[0]]} configurations' - 426 if isinstance(self.idl[value[0]], range): - 427 my_string += f' (from {self.idl[value[0]].start} to {self.idl[value[0]][-1]}' + int(self.idl[value[0]].step != 1) * f' in steps of {self.idl[value[0]].step}' + ')' - 428 else: - 429 my_string += f' (irregular range from {self.idl[value[0]][0]} to {self.idl[value[0]][-1]})' - 430 else: - 431 sublist = [] - 432 for v in value: - 433 my_substring = ' ' + "\u00B7 Replicum '" + v[len(key) + 1:] + "' " - 434 my_substring += f': {self.shape[v]} configurations' - 435 if isinstance(self.idl[v], range): - 436 my_substring += f' (from {self.idl[v].start} to {self.idl[v][-1]}' + int(self.idl[v].step != 1) * f' in steps of {self.idl[v].step}' + ')' - 437 else: - 438 my_substring += f' (irregular range from {self.idl[v][0]} to {self.idl[v][-1]})' - 439 sublist.append(my_substring) - 440 - 441 my_string += '\n' + '\n'.join(sublist) - 442 else: - 443 my_string = ' ' + "\u00B7 Covobs '" + key + "' " - 444 my_string_list.append(my_string) - 445 print('\n'.join(my_string_list)) - 446 - 447 def reweight(self, weight): - 448 """Reweight the obs with given rewighting factors. - 449 - 450 Parameters - 451 ---------- - 452 weight : Obs - 453 Reweighting factor. An Observable that has to be defined on a superset of the - 454 configurations in obs[i].idl for all i. - 455 all_configs : bool - 456 if True, the reweighted observables are normalized by the average of - 457 the reweighting factor on all configurations in weight.idl and not - 458 on the configurations in obs[i].idl. Default False. - 459 """ - 460 return reweight(weight, [self])[0] - 461 - 462 def is_zero_within_error(self, sigma=1): - 463 """Checks whether the observable is zero within 'sigma' standard errors. - 464 - 465 Parameters - 466 ---------- - 467 sigma : int - 468 Number of standard errors used for the check. - 469 - 470 Works only properly when the gamma method was run. - 471 """ - 472 return self.is_zero() or np.abs(self.value) <= sigma * self._dvalue - 473 - 474 def is_zero(self, atol=1e-10): - 475 """Checks whether the observable is zero within a given tolerance. - 476 - 477 Parameters - 478 ---------- - 479 atol : float - 480 Absolute tolerance (for details see numpy documentation). - 481 """ - 482 return np.isclose(0.0, self.value, 1e-14, atol) and all(np.allclose(0.0, delta, 1e-14, atol) for delta in self.deltas.values()) and all(np.allclose(0.0, delta.errsq(), 1e-14, atol) for delta in self.covobs.values()) - 483 - 484 def plot_tauint(self, save=None): - 485 """Plot integrated autocorrelation time for each ensemble. - 486 - 487 Parameters - 488 ---------- - 489 save : str - 490 saves the figure to a file named 'save' if. - 491 """ - 492 if not hasattr(self, 'e_dvalue'): - 493 raise Exception('Run the gamma method first.') - 494 - 495 for e, e_name in enumerate(self.mc_names): - 496 fig = plt.figure() - 497 plt.xlabel(r'$W$') - 498 plt.ylabel(r'$\tau_\mathrm{int}$') - 499 length = int(len(self.e_n_tauint[e_name])) - 500 if self.tau_exp[e_name] > 0: - 501 base = self.e_n_tauint[e_name][self.e_windowsize[e_name]] - 502 x_help = np.arange(2 * self.tau_exp[e_name]) - 503 y_help = (x_help + 1) * np.abs(self.e_rho[e_name][self.e_windowsize[e_name] + 1]) * (1 - x_help / (2 * (2 * self.tau_exp[e_name] - 1))) + base - 504 x_arr = np.arange(self.e_windowsize[e_name] + 1, self.e_windowsize[e_name] + 1 + 2 * self.tau_exp[e_name]) - 505 plt.plot(x_arr, y_help, 'C' + str(e), linewidth=1, ls='--', marker=',') - 506 plt.errorbar([self.e_windowsize[e_name] + 2 * self.tau_exp[e_name]], [self.e_tauint[e_name]], - 507 yerr=[self.e_dtauint[e_name]], fmt='C' + str(e), linewidth=1, capsize=2, marker='o', mfc=plt.rcParams['axes.facecolor']) - 508 xmax = self.e_windowsize[e_name] + 2 * self.tau_exp[e_name] + 1.5 - 509 label = e_name + r', $\tau_\mathrm{exp}$=' + str(np.around(self.tau_exp[e_name], decimals=2)) - 510 else: - 511 label = e_name + ', S=' + str(np.around(self.S[e_name], decimals=2)) - 512 xmax = max(10.5, 2 * self.e_windowsize[e_name] - 0.5) - 513 - 514 plt.errorbar(np.arange(length)[:int(xmax) + 1], self.e_n_tauint[e_name][:int(xmax) + 1], yerr=self.e_n_dtauint[e_name][:int(xmax) + 1], linewidth=1, capsize=2, label=label) - 515 plt.axvline(x=self.e_windowsize[e_name], color='C' + str(e), alpha=0.5, marker=',', ls='--') - 516 plt.legend() - 517 plt.xlim(-0.5, xmax) - 518 ylim = plt.ylim() - 519 plt.ylim(bottom=0.0, top=max(1.0, ylim[1])) - 520 plt.draw() - 521 if save: - 522 fig.savefig(save + "_" + str(e)) - 523 - 524 def plot_rho(self, save=None): - 525 """Plot normalized autocorrelation function time for each ensemble. - 526 - 527 Parameters - 528 ---------- - 529 save : str - 530 saves the figure to a file named 'save' if. - 531 """ - 532 if not hasattr(self, 'e_dvalue'): - 533 raise Exception('Run the gamma method first.') - 534 for e, e_name in enumerate(self.mc_names): - 535 fig = plt.figure() - 536 plt.xlabel('W') - 537 plt.ylabel('rho') - 538 length = int(len(self.e_drho[e_name])) - 539 plt.errorbar(np.arange(length), self.e_rho[e_name][:length], yerr=self.e_drho[e_name][:], linewidth=1, capsize=2) - 540 plt.axvline(x=self.e_windowsize[e_name], color='r', alpha=0.25, ls='--', marker=',') - 541 if self.tau_exp[e_name] > 0: - 542 plt.plot([self.e_windowsize[e_name] + 1, self.e_windowsize[e_name] + 1 + 2 * self.tau_exp[e_name]], - 543 [self.e_rho[e_name][self.e_windowsize[e_name] + 1], 0], 'k-', lw=1) - 544 xmax = self.e_windowsize[e_name] + 2 * self.tau_exp[e_name] + 1.5 - 545 plt.title('Rho ' + e_name + r', tau\_exp=' + str(np.around(self.tau_exp[e_name], decimals=2))) - 546 else: - 547 xmax = max(10.5, 2 * self.e_windowsize[e_name] - 0.5) - 548 plt.title('Rho ' + e_name + ', S=' + str(np.around(self.S[e_name], decimals=2))) - 549 plt.plot([-0.5, xmax], [0, 0], 'k--', lw=1) - 550 plt.xlim(-0.5, xmax) - 551 plt.draw() - 552 if save: - 553 fig.savefig(save + "_" + str(e)) - 554 - 555 def plot_rep_dist(self): - 556 """Plot replica distribution for each ensemble with more than one replicum.""" - 557 if not hasattr(self, 'e_dvalue'): - 558 raise Exception('Run the gamma method first.') - 559 for e, e_name in enumerate(self.mc_names): - 560 if len(self.e_content[e_name]) == 1: - 561 print('No replica distribution for a single replicum (', e_name, ')') - 562 continue - 563 r_length = [] - 564 sub_r_mean = 0 - 565 for r, r_name in enumerate(self.e_content[e_name]): - 566 r_length.append(len(self.deltas[r_name])) - 567 sub_r_mean += self.shape[r_name] * self.r_values[r_name] - 568 e_N = np.sum(r_length) - 569 sub_r_mean /= e_N - 570 arr = np.zeros(len(self.e_content[e_name])) - 571 for r, r_name in enumerate(self.e_content[e_name]): - 572 arr[r] = (self.r_values[r_name] - sub_r_mean) / (self.e_dvalue[e_name] * np.sqrt(e_N / self.shape[r_name] - 1)) - 573 plt.hist(arr, rwidth=0.8, bins=len(self.e_content[e_name])) - 574 plt.title('Replica distribution' + e_name + ' (mean=0, var=1)') - 575 plt.draw() - 576 - 577 def plot_history(self, expand=True): - 578 """Plot derived Monte Carlo history for each ensemble - 579 - 580 Parameters - 581 ---------- - 582 expand : bool - 583 show expanded history for irregular Monte Carlo chains (default: True). - 584 """ - 585 for e, e_name in enumerate(self.mc_names): - 586 plt.figure() - 587 r_length = [] - 588 tmp = [] - 589 tmp_expanded = [] - 590 for r, r_name in enumerate(self.e_content[e_name]): - 591 tmp.append(self.deltas[r_name] + self.r_values[r_name]) - 592 if expand: - 593 tmp_expanded.append(_expand_deltas(self.deltas[r_name], list(self.idl[r_name]), self.shape[r_name], 1) + self.r_values[r_name]) - 594 r_length.append(len(tmp_expanded[-1])) - 595 else: - 596 r_length.append(len(tmp[-1])) - 597 e_N = np.sum(r_length) - 598 x = np.arange(e_N) - 599 y_test = np.concatenate(tmp, axis=0) - 600 if expand: - 601 y = np.concatenate(tmp_expanded, axis=0) - 602 else: - 603 y = y_test - 604 plt.errorbar(x, y, fmt='.', markersize=3) - 605 plt.xlim(-0.5, e_N - 0.5) - 606 plt.title(e_name + f'\nskew: {skew(y_test):.3f} (p={skewtest(y_test).pvalue:.3f}), kurtosis: {kurtosis(y_test):.3f} (p={kurtosistest(y_test).pvalue:.3f})') - 607 plt.draw() - 608 - 609 def plot_piechart(self, save=None): - 610 """Plot piechart which shows the fractional contribution of each - 611 ensemble to the error and returns a dictionary containing the fractions. - 612 - 613 Parameters - 614 ---------- - 615 save : str - 616 saves the figure to a file named 'save' if. - 617 """ - 618 if not hasattr(self, 'e_dvalue'): - 619 raise Exception('Run the gamma method first.') - 620 if np.isclose(0.0, self._dvalue, atol=1e-15): - 621 raise Exception('Error is 0.0') - 622 labels = self.e_names - 623 sizes = [self.e_dvalue[name] ** 2 for name in labels] / self._dvalue ** 2 - 624 fig1, ax1 = plt.subplots() - 625 ax1.pie(sizes, labels=labels, startangle=90, normalize=True) - 626 ax1.axis('equal') - 627 plt.draw() - 628 if save: - 629 fig1.savefig(save) - 630 - 631 return dict(zip(labels, sizes)) + 378 return gamma + 379 + 380 def details(self, ens_content=True): + 381 """Output detailed properties of the Obs. + 382 + 383 Parameters + 384 ---------- + 385 ens_content : bool + 386 print details about the ensembles and replica if true. + 387 """ + 388 if self.tag is not None: + 389 print("Description:", self.tag) + 390 if not hasattr(self, 'e_dvalue'): + 391 print('Result\t %3.8e' % (self.value)) + 392 else: + 393 if self.value == 0.0: + 394 percentage = np.nan + 395 else: + 396 percentage = np.abs(self._dvalue / self.value) * 100 + 397 print('Result\t %3.8e +/- %3.8e +/- %3.8e (%3.3f%%)' % (self.value, self._dvalue, self.ddvalue, percentage)) + 398 if len(self.e_names) > 1: + 399 print(' Ensemble errors:') + 400 e_content = self.e_content + 401 for e_name in self.mc_names: + 402 gap = _determine_gap(self, e_content, e_name) + 403 + 404 if len(self.e_names) > 1: + 405 print('', e_name, '\t %3.6e +/- %3.6e' % (self.e_dvalue[e_name], self.e_ddvalue[e_name])) + 406 tau_string = " \N{GREEK SMALL LETTER TAU}_int\t " + _format_uncertainty(self.e_tauint[e_name], self.e_dtauint[e_name]) + 407 tau_string += f" in units of {gap} config" + 408 if gap > 1: + 409 tau_string += "s" + 410 if self.tau_exp[e_name] > 0: + 411 tau_string = f"{tau_string: <45}" + '\t(\N{GREEK SMALL LETTER TAU}_exp=%3.2f, N_\N{GREEK SMALL LETTER SIGMA}=%1.0i)' % (self.tau_exp[e_name], self.N_sigma[e_name]) + 412 else: + 413 tau_string = f"{tau_string: <45}" + '\t(S=%3.2f)' % (self.S[e_name]) + 414 print(tau_string) + 415 for e_name in self.cov_names: + 416 print('', e_name, '\t %3.8e' % (self.e_dvalue[e_name])) + 417 if ens_content is True: + 418 if len(self.e_names) == 1: + 419 print(self.N, 'samples in', len(self.e_names), 'ensemble:') + 420 else: + 421 print(self.N, 'samples in', len(self.e_names), 'ensembles:') + 422 my_string_list = [] + 423 for key, value in sorted(self.e_content.items()): + 424 if key not in self.covobs: + 425 my_string = ' ' + "\u00B7 Ensemble '" + key + "' " + 426 if len(value) == 1: + 427 my_string += f': {self.shape[value[0]]} configurations' + 428 if isinstance(self.idl[value[0]], range): + 429 my_string += f' (from {self.idl[value[0]].start} to {self.idl[value[0]][-1]}' + int(self.idl[value[0]].step != 1) * f' in steps of {self.idl[value[0]].step}' + ')' + 430 else: + 431 my_string += f' (irregular range from {self.idl[value[0]][0]} to {self.idl[value[0]][-1]})' + 432 else: + 433 sublist = [] + 434 for v in value: + 435 my_substring = ' ' + "\u00B7 Replicum '" + v[len(key) + 1:] + "' " + 436 my_substring += f': {self.shape[v]} configurations' + 437 if isinstance(self.idl[v], range): + 438 my_substring += f' (from {self.idl[v].start} to {self.idl[v][-1]}' + int(self.idl[v].step != 1) * f' in steps of {self.idl[v].step}' + ')' + 439 else: + 440 my_substring += f' (irregular range from {self.idl[v][0]} to {self.idl[v][-1]})' + 441 sublist.append(my_substring) + 442 + 443 my_string += '\n' + '\n'.join(sublist) + 444 else: + 445 my_string = ' ' + "\u00B7 Covobs '" + key + "' " + 446 my_string_list.append(my_string) + 447 print('\n'.join(my_string_list)) + 448 + 449 def reweight(self, weight): + 450 """Reweight the obs with given rewighting factors. + 451 + 452 Parameters + 453 ---------- + 454 weight : Obs + 455 Reweighting factor. An Observable that has to be defined on a superset of the + 456 configurations in obs[i].idl for all i. + 457 all_configs : bool + 458 if True, the reweighted observables are normalized by the average of + 459 the reweighting factor on all configurations in weight.idl and not + 460 on the configurations in obs[i].idl. Default False. + 461 """ + 462 return reweight(weight, [self])[0] + 463 + 464 def is_zero_within_error(self, sigma=1): + 465 """Checks whether the observable is zero within 'sigma' standard errors. + 466 + 467 Parameters + 468 ---------- + 469 sigma : int + 470 Number of standard errors used for the check. + 471 + 472 Works only properly when the gamma method was run. + 473 """ + 474 return self.is_zero() or np.abs(self.value) <= sigma * self._dvalue + 475 + 476 def is_zero(self, atol=1e-10): + 477 """Checks whether the observable is zero within a given tolerance. + 478 + 479 Parameters + 480 ---------- + 481 atol : float + 482 Absolute tolerance (for details see numpy documentation). + 483 """ + 484 return np.isclose(0.0, self.value, 1e-14, atol) and all(np.allclose(0.0, delta, 1e-14, atol) for delta in self.deltas.values()) and all(np.allclose(0.0, delta.errsq(), 1e-14, atol) for delta in self.covobs.values()) + 485 + 486 def plot_tauint(self, save=None): + 487 """Plot integrated autocorrelation time for each ensemble. + 488 + 489 Parameters + 490 ---------- + 491 save : str + 492 saves the figure to a file named 'save' if. + 493 """ + 494 if not hasattr(self, 'e_dvalue'): + 495 raise Exception('Run the gamma method first.') + 496 + 497 for e, e_name in enumerate(self.mc_names): + 498 fig = plt.figure() + 499 plt.xlabel(r'$W$') + 500 plt.ylabel(r'$\tau_\mathrm{int}$') + 501 length = int(len(self.e_n_tauint[e_name])) + 502 if self.tau_exp[e_name] > 0: + 503 base = self.e_n_tauint[e_name][self.e_windowsize[e_name]] + 504 x_help = np.arange(2 * self.tau_exp[e_name]) + 505 y_help = (x_help + 1) * np.abs(self.e_rho[e_name][self.e_windowsize[e_name] + 1]) * (1 - x_help / (2 * (2 * self.tau_exp[e_name] - 1))) + base + 506 x_arr = np.arange(self.e_windowsize[e_name] + 1, self.e_windowsize[e_name] + 1 + 2 * self.tau_exp[e_name]) + 507 plt.plot(x_arr, y_help, 'C' + str(e), linewidth=1, ls='--', marker=',') + 508 plt.errorbar([self.e_windowsize[e_name] + 2 * self.tau_exp[e_name]], [self.e_tauint[e_name]], + 509 yerr=[self.e_dtauint[e_name]], fmt='C' + str(e), linewidth=1, capsize=2, marker='o', mfc=plt.rcParams['axes.facecolor']) + 510 xmax = self.e_windowsize[e_name] + 2 * self.tau_exp[e_name] + 1.5 + 511 label = e_name + r', $\tau_\mathrm{exp}$=' + str(np.around(self.tau_exp[e_name], decimals=2)) + 512 else: + 513 label = e_name + ', S=' + str(np.around(self.S[e_name], decimals=2)) + 514 xmax = max(10.5, 2 * self.e_windowsize[e_name] - 0.5) + 515 + 516 plt.errorbar(np.arange(length)[:int(xmax) + 1], self.e_n_tauint[e_name][:int(xmax) + 1], yerr=self.e_n_dtauint[e_name][:int(xmax) + 1], linewidth=1, capsize=2, label=label) + 517 plt.axvline(x=self.e_windowsize[e_name], color='C' + str(e), alpha=0.5, marker=',', ls='--') + 518 plt.legend() + 519 plt.xlim(-0.5, xmax) + 520 ylim = plt.ylim() + 521 plt.ylim(bottom=0.0, top=max(1.0, ylim[1])) + 522 plt.draw() + 523 if save: + 524 fig.savefig(save + "_" + str(e)) + 525 + 526 def plot_rho(self, save=None): + 527 """Plot normalized autocorrelation function time for each ensemble. + 528 + 529 Parameters + 530 ---------- + 531 save : str + 532 saves the figure to a file named 'save' if. + 533 """ + 534 if not hasattr(self, 'e_dvalue'): + 535 raise Exception('Run the gamma method first.') + 536 for e, e_name in enumerate(self.mc_names): + 537 fig = plt.figure() + 538 plt.xlabel('W') + 539 plt.ylabel('rho') + 540 length = int(len(self.e_drho[e_name])) + 541 plt.errorbar(np.arange(length), self.e_rho[e_name][:length], yerr=self.e_drho[e_name][:], linewidth=1, capsize=2) + 542 plt.axvline(x=self.e_windowsize[e_name], color='r', alpha=0.25, ls='--', marker=',') + 543 if self.tau_exp[e_name] > 0: + 544 plt.plot([self.e_windowsize[e_name] + 1, self.e_windowsize[e_name] + 1 + 2 * self.tau_exp[e_name]], + 545 [self.e_rho[e_name][self.e_windowsize[e_name] + 1], 0], 'k-', lw=1) + 546 xmax = self.e_windowsize[e_name] + 2 * self.tau_exp[e_name] + 1.5 + 547 plt.title('Rho ' + e_name + r', tau\_exp=' + str(np.around(self.tau_exp[e_name], decimals=2))) + 548 else: + 549 xmax = max(10.5, 2 * self.e_windowsize[e_name] - 0.5) + 550 plt.title('Rho ' + e_name + ', S=' + str(np.around(self.S[e_name], decimals=2))) + 551 plt.plot([-0.5, xmax], [0, 0], 'k--', lw=1) + 552 plt.xlim(-0.5, xmax) + 553 plt.draw() + 554 if save: + 555 fig.savefig(save + "_" + str(e)) + 556 + 557 def plot_rep_dist(self): + 558 """Plot replica distribution for each ensemble with more than one replicum.""" + 559 if not hasattr(self, 'e_dvalue'): + 560 raise Exception('Run the gamma method first.') + 561 for e, e_name in enumerate(self.mc_names): + 562 if len(self.e_content[e_name]) == 1: + 563 print('No replica distribution for a single replicum (', e_name, ')') + 564 continue + 565 r_length = [] + 566 sub_r_mean = 0 + 567 for r, r_name in enumerate(self.e_content[e_name]): + 568 r_length.append(len(self.deltas[r_name])) + 569 sub_r_mean += self.shape[r_name] * self.r_values[r_name] + 570 e_N = np.sum(r_length) + 571 sub_r_mean /= e_N + 572 arr = np.zeros(len(self.e_content[e_name])) + 573 for r, r_name in enumerate(self.e_content[e_name]): + 574 arr[r] = (self.r_values[r_name] - sub_r_mean) / (self.e_dvalue[e_name] * np.sqrt(e_N / self.shape[r_name] - 1)) + 575 plt.hist(arr, rwidth=0.8, bins=len(self.e_content[e_name])) + 576 plt.title('Replica distribution' + e_name + ' (mean=0, var=1)') + 577 plt.draw() + 578 + 579 def plot_history(self, expand=True): + 580 """Plot derived Monte Carlo history for each ensemble + 581 + 582 Parameters + 583 ---------- + 584 expand : bool + 585 show expanded history for irregular Monte Carlo chains (default: True). + 586 """ + 587 for e, e_name in enumerate(self.mc_names): + 588 plt.figure() + 589 r_length = [] + 590 tmp = [] + 591 tmp_expanded = [] + 592 for r, r_name in enumerate(self.e_content[e_name]): + 593 tmp.append(self.deltas[r_name] + self.r_values[r_name]) + 594 if expand: + 595 tmp_expanded.append(_expand_deltas(self.deltas[r_name], list(self.idl[r_name]), self.shape[r_name], 1) + self.r_values[r_name]) + 596 r_length.append(len(tmp_expanded[-1])) + 597 else: + 598 r_length.append(len(tmp[-1])) + 599 e_N = np.sum(r_length) + 600 x = np.arange(e_N) + 601 y_test = np.concatenate(tmp, axis=0) + 602 if expand: + 603 y = np.concatenate(tmp_expanded, axis=0) + 604 else: + 605 y = y_test + 606 plt.errorbar(x, y, fmt='.', markersize=3) + 607 plt.xlim(-0.5, e_N - 0.5) + 608 plt.title(e_name + f'\nskew: {skew(y_test):.3f} (p={skewtest(y_test).pvalue:.3f}), kurtosis: {kurtosis(y_test):.3f} (p={kurtosistest(y_test).pvalue:.3f})') + 609 plt.draw() + 610 + 611 def plot_piechart(self, save=None): + 612 """Plot piechart which shows the fractional contribution of each + 613 ensemble to the error and returns a dictionary containing the fractions. + 614 + 615 Parameters + 616 ---------- + 617 save : str + 618 saves the figure to a file named 'save' if. + 619 """ + 620 if not hasattr(self, 'e_dvalue'): + 621 raise Exception('Run the gamma method first.') + 622 if np.isclose(0.0, self._dvalue, atol=1e-15): + 623 raise Exception('Error is 0.0') + 624 labels = self.e_names + 625 sizes = [self.e_dvalue[name] ** 2 for name in labels] / self._dvalue ** 2 + 626 fig1, ax1 = plt.subplots() + 627 ax1.pie(sizes, labels=labels, startangle=90, normalize=True) + 628 ax1.axis('equal') + 629 plt.draw() + 630 if save: + 631 fig1.savefig(save) 632 - 633 def dump(self, filename, datatype="json.gz", description="", **kwargs): - 634 """Dump the Obs to a file 'name' of chosen format. - 635 - 636 Parameters - 637 ---------- - 638 filename : str - 639 name of the file to be saved. - 640 datatype : str - 641 Format of the exported file. Supported formats include - 642 "json.gz" and "pickle" - 643 description : str - 644 Description for output file, only relevant for json.gz format. - 645 path : str - 646 specifies a custom path for the file (default '.') - 647 """ - 648 if 'path' in kwargs: - 649 file_name = kwargs.get('path') + '/' + filename - 650 else: - 651 file_name = filename - 652 - 653 if datatype == "json.gz": - 654 from .input.json import dump_to_json - 655 dump_to_json([self], file_name, description=description) - 656 elif datatype == "pickle": - 657 with open(file_name + '.p', 'wb') as fb: - 658 pickle.dump(self, fb) - 659 else: - 660 raise Exception("Unknown datatype " + str(datatype)) - 661 - 662 def export_jackknife(self): - 663 """Export jackknife samples from the Obs - 664 - 665 Returns - 666 ------- - 667 numpy.ndarray - 668 Returns a numpy array of length N + 1 where N is the number of samples - 669 for the given ensemble and replicum. The zeroth entry of the array contains - 670 the mean value of the Obs, entries 1 to N contain the N jackknife samples - 671 derived from the Obs. The current implementation only works for observables - 672 defined on exactly one ensemble and replicum. The derived jackknife samples - 673 should agree with samples from a full jackknife analysis up to O(1/N). - 674 """ - 675 - 676 if len(self.names) != 1: - 677 raise Exception("'export_jackknife' is only implemented for Obs defined on one ensemble and replicum.") - 678 - 679 name = self.names[0] - 680 full_data = self.deltas[name] + self.r_values[name] - 681 n = full_data.size - 682 mean = self.value - 683 tmp_jacks = np.zeros(n + 1) - 684 tmp_jacks[0] = mean - 685 tmp_jacks[1:] = (n * mean - full_data) / (n - 1) - 686 return tmp_jacks - 687 - 688 def export_bootstrap(self, samples=500, random_numbers=None, save_rng=None): - 689 """Export bootstrap samples from the Obs - 690 - 691 Parameters - 692 ---------- - 693 samples : int - 694 Number of bootstrap samples to generate. - 695 random_numbers : np.ndarray - 696 Array of shape (samples, length) containing the random numbers to generate the bootstrap samples. - 697 If not provided the bootstrap samples are generated bashed on the md5 hash of the enesmble name. - 698 save_rng : str - 699 Save the random numbers to a file if a path is specified. - 700 - 701 Returns - 702 ------- - 703 numpy.ndarray - 704 Returns a numpy array of length N + 1 where N is the number of samples - 705 for the given ensemble and replicum. The zeroth entry of the array contains - 706 the mean value of the Obs, entries 1 to N contain the N import_bootstrap samples - 707 derived from the Obs. The current implementation only works for observables - 708 defined on exactly one ensemble and replicum. The derived bootstrap samples - 709 should agree with samples from a full bootstrap analysis up to O(1/N). - 710 """ - 711 if len(self.names) != 1: - 712 raise Exception("'export_boostrap' is only implemented for Obs defined on one ensemble and replicum.") - 713 - 714 name = self.names[0] - 715 length = self.N - 716 - 717 if random_numbers is None: - 718 seed = int(hashlib.md5(name.encode()).hexdigest(), 16) & 0xFFFFFFFF - 719 rng = np.random.default_rng(seed) - 720 random_numbers = rng.integers(0, length, size=(samples, length)) - 721 - 722 if save_rng is not None: - 723 np.savetxt(save_rng, random_numbers, fmt='%i') - 724 - 725 proj = np.vstack([np.bincount(o, minlength=length) for o in random_numbers]) / length - 726 ret = np.zeros(samples + 1) - 727 ret[0] = self.value - 728 ret[1:] = proj @ (self.deltas[name] + self.r_values[name]) - 729 return ret - 730 - 731 def __float__(self): - 732 return float(self.value) - 733 - 734 def __repr__(self): - 735 return 'Obs[' + str(self) + ']' - 736 - 737 def __str__(self): - 738 return _format_uncertainty(self.value, self._dvalue) - 739 - 740 def __format__(self, format_type): - 741 if format_type == "": - 742 significance = 2 - 743 else: - 744 significance = int(float(format_type.replace("+", "").replace("-", ""))) - 745 my_str = _format_uncertainty(self.value, self._dvalue, - 746 significance=significance) - 747 for char in ["+", " "]: - 748 if format_type.startswith(char): - 749 if my_str[0] != "-": - 750 my_str = char + my_str - 751 return my_str - 752 - 753 def __hash__(self): - 754 hash_tuple = (np.array([self.value]).astype(np.float32).data.tobytes(),) - 755 hash_tuple += tuple([o.astype(np.float32).data.tobytes() for o in self.deltas.values()]) - 756 hash_tuple += tuple([np.array([o.errsq()]).astype(np.float32).data.tobytes() for o in self.covobs.values()]) - 757 hash_tuple += tuple([o.encode() for o in self.names]) - 758 m = hashlib.md5() - 759 [m.update(o) for o in hash_tuple] - 760 return int(m.hexdigest(), 16) & 0xFFFFFFFF - 761 - 762 # Overload comparisons - 763 def __lt__(self, other): - 764 return self.value < other - 765 - 766 def __le__(self, other): - 767 return self.value <= other - 768 - 769 def __gt__(self, other): - 770 return self.value > other - 771 - 772 def __ge__(self, other): - 773 return self.value >= other - 774 - 775 def __eq__(self, other): - 776 if other is None: - 777 return False - 778 return (self - other).is_zero() - 779 - 780 # Overload math operations - 781 def __add__(self, y): - 782 if isinstance(y, Obs): - 783 return derived_observable(lambda x, **kwargs: x[0] + x[1], [self, y], man_grad=[1, 1]) - 784 else: - 785 if isinstance(y, np.ndarray): - 786 return np.array([self + o for o in y]) - 787 elif isinstance(y, complex): - 788 return CObs(self, 0) + y - 789 elif y.__class__.__name__ in ['Corr', 'CObs']: - 790 return NotImplemented - 791 else: - 792 return derived_observable(lambda x, **kwargs: x[0] + y, [self], man_grad=[1]) - 793 - 794 def __radd__(self, y): - 795 return self + y - 796 - 797 def __mul__(self, y): - 798 if isinstance(y, Obs): - 799 return derived_observable(lambda x, **kwargs: x[0] * x[1], [self, y], man_grad=[y.value, self.value]) - 800 else: - 801 if isinstance(y, np.ndarray): - 802 return np.array([self * o for o in y]) - 803 elif isinstance(y, complex): - 804 return CObs(self * y.real, self * y.imag) - 805 elif y.__class__.__name__ in ['Corr', 'CObs']: - 806 return NotImplemented - 807 else: - 808 return derived_observable(lambda x, **kwargs: x[0] * y, [self], man_grad=[y]) - 809 - 810 def __rmul__(self, y): - 811 return self * y - 812 - 813 def __sub__(self, y): - 814 if isinstance(y, Obs): - 815 return derived_observable(lambda x, **kwargs: x[0] - x[1], [self, y], man_grad=[1, -1]) - 816 else: - 817 if isinstance(y, np.ndarray): - 818 return np.array([self - o for o in y]) - 819 elif y.__class__.__name__ in ['Corr', 'CObs']: - 820 return NotImplemented - 821 else: - 822 return derived_observable(lambda x, **kwargs: x[0] - y, [self], man_grad=[1]) - 823 - 824 def __rsub__(self, y): - 825 return -1 * (self - y) - 826 - 827 def __pos__(self): - 828 return self - 829 - 830 def __neg__(self): - 831 return -1 * self - 832 - 833 def __truediv__(self, y): - 834 if isinstance(y, Obs): - 835 return derived_observable(lambda x, **kwargs: x[0] / x[1], [self, y], man_grad=[1 / y.value, - self.value / y.value ** 2]) - 836 else: - 837 if isinstance(y, np.ndarray): - 838 return np.array([self / o for o in y]) - 839 elif y.__class__.__name__ in ['Corr', 'CObs']: - 840 return NotImplemented - 841 else: - 842 return derived_observable(lambda x, **kwargs: x[0] / y, [self], man_grad=[1 / y]) - 843 - 844 def __rtruediv__(self, y): - 845 if isinstance(y, Obs): - 846 return derived_observable(lambda x, **kwargs: x[0] / x[1], [y, self], man_grad=[1 / self.value, - y.value / self.value ** 2]) - 847 else: - 848 if isinstance(y, np.ndarray): - 849 return np.array([o / self for o in y]) - 850 elif y.__class__.__name__ in ['Corr', 'CObs']: - 851 return NotImplemented - 852 else: - 853 return derived_observable(lambda x, **kwargs: y / x[0], [self], man_grad=[-y / self.value ** 2]) - 854 - 855 def __pow__(self, y): - 856 if isinstance(y, Obs): - 857 return derived_observable(lambda x: x[0] ** x[1], [self, y]) - 858 else: - 859 return derived_observable(lambda x: x[0] ** y, [self]) - 860 - 861 def __rpow__(self, y): - 862 if isinstance(y, Obs): - 863 return derived_observable(lambda x: x[0] ** x[1], [y, self]) - 864 else: - 865 return derived_observable(lambda x: y ** x[0], [self]) - 866 - 867 def __abs__(self): - 868 return derived_observable(lambda x: anp.abs(x[0]), [self]) - 869 - 870 # Overload numpy functions - 871 def sqrt(self): - 872 return derived_observable(lambda x, **kwargs: np.sqrt(x[0]), [self], man_grad=[1 / 2 / np.sqrt(self.value)]) - 873 - 874 def log(self): - 875 return derived_observable(lambda x, **kwargs: np.log(x[0]), [self], man_grad=[1 / self.value]) - 876 - 877 def exp(self): - 878 return derived_observable(lambda x, **kwargs: np.exp(x[0]), [self], man_grad=[np.exp(self.value)]) - 879 - 880 def sin(self): - 881 return derived_observable(lambda x, **kwargs: np.sin(x[0]), [self], man_grad=[np.cos(self.value)]) - 882 - 883 def cos(self): - 884 return derived_observable(lambda x, **kwargs: np.cos(x[0]), [self], man_grad=[-np.sin(self.value)]) - 885 - 886 def tan(self): - 887 return derived_observable(lambda x, **kwargs: np.tan(x[0]), [self], man_grad=[1 / np.cos(self.value) ** 2]) - 888 - 889 def arcsin(self): - 890 return derived_observable(lambda x: anp.arcsin(x[0]), [self]) - 891 - 892 def arccos(self): - 893 return derived_observable(lambda x: anp.arccos(x[0]), [self]) - 894 - 895 def arctan(self): - 896 return derived_observable(lambda x: anp.arctan(x[0]), [self]) - 897 - 898 def sinh(self): - 899 return derived_observable(lambda x, **kwargs: np.sinh(x[0]), [self], man_grad=[np.cosh(self.value)]) - 900 - 901 def cosh(self): - 902 return derived_observable(lambda x, **kwargs: np.cosh(x[0]), [self], man_grad=[np.sinh(self.value)]) - 903 - 904 def tanh(self): - 905 return derived_observable(lambda x, **kwargs: np.tanh(x[0]), [self], man_grad=[1 / np.cosh(self.value) ** 2]) - 906 - 907 def arcsinh(self): - 908 return derived_observable(lambda x: anp.arcsinh(x[0]), [self]) - 909 - 910 def arccosh(self): - 911 return derived_observable(lambda x: anp.arccosh(x[0]), [self]) - 912 - 913 def arctanh(self): - 914 return derived_observable(lambda x: anp.arctanh(x[0]), [self]) - 915 - 916 - 917class CObs: - 918 """Class for a complex valued observable.""" - 919 __slots__ = ['_real', '_imag', 'tag'] - 920 - 921 def __init__(self, real, imag=0.0): - 922 self._real = real - 923 self._imag = imag - 924 self.tag = None - 925 - 926 @property - 927 def real(self): - 928 return self._real - 929 - 930 @property - 931 def imag(self): - 932 return self._imag - 933 - 934 def gamma_method(self, **kwargs): - 935 """Executes the gamma_method for the real and the imaginary part.""" - 936 if isinstance(self.real, Obs): - 937 self.real.gamma_method(**kwargs) - 938 if isinstance(self.imag, Obs): - 939 self.imag.gamma_method(**kwargs) - 940 - 941 def is_zero(self): - 942 """Checks whether both real and imaginary part are zero within machine precision.""" - 943 return self.real == 0.0 and self.imag == 0.0 - 944 - 945 def conjugate(self): - 946 return CObs(self.real, -self.imag) - 947 - 948 def __add__(self, other): - 949 if isinstance(other, np.ndarray): - 950 return other + self - 951 elif hasattr(other, 'real') and hasattr(other, 'imag'): - 952 return CObs(self.real + other.real, - 953 self.imag + other.imag) - 954 else: - 955 return CObs(self.real + other, self.imag) - 956 - 957 def __radd__(self, y): - 958 return self + y - 959 - 960 def __sub__(self, other): - 961 if isinstance(other, np.ndarray): - 962 return -1 * (other - self) - 963 elif hasattr(other, 'real') and hasattr(other, 'imag'): - 964 return CObs(self.real - other.real, self.imag - other.imag) - 965 else: - 966 return CObs(self.real - other, self.imag) - 967 - 968 def __rsub__(self, other): - 969 return -1 * (self - other) - 970 - 971 def __mul__(self, other): - 972 if isinstance(other, np.ndarray): - 973 return other * self - 974 elif hasattr(other, 'real') and hasattr(other, 'imag'): - 975 if all(isinstance(i, Obs) for i in [self.real, self.imag, other.real, other.imag]): - 976 return CObs(derived_observable(lambda x, **kwargs: x[0] * x[1] - x[2] * x[3], - 977 [self.real, other.real, self.imag, other.imag], - 978 man_grad=[other.real.value, self.real.value, -other.imag.value, -self.imag.value]), - 979 derived_observable(lambda x, **kwargs: x[2] * x[1] + x[0] * x[3], - 980 [self.real, other.real, self.imag, other.imag], - 981 man_grad=[other.imag.value, self.imag.value, other.real.value, self.real.value])) - 982 elif getattr(other, 'imag', 0) != 0: - 983 return CObs(self.real * other.real - self.imag * other.imag, - 984 self.imag * other.real + self.real * other.imag) - 985 else: - 986 return CObs(self.real * other.real, self.imag * other.real) - 987 else: - 988 return CObs(self.real * other, self.imag * other) - 989 - 990 def __rmul__(self, other): - 991 return self * other - 992 - 993 def __truediv__(self, other): - 994 if isinstance(other, np.ndarray): - 995 return 1 / (other / self) - 996 elif hasattr(other, 'real') and hasattr(other, 'imag'): - 997 r = other.real ** 2 + other.imag ** 2 - 998 return CObs((self.real * other.real + self.imag * other.imag) / r, (self.imag * other.real - self.real * other.imag) / r) - 999 else: -1000 return CObs(self.real / other, self.imag / other) -1001 -1002 def __rtruediv__(self, other): -1003 r = self.real ** 2 + self.imag ** 2 -1004 if hasattr(other, 'real') and hasattr(other, 'imag'): -1005 return CObs((self.real * other.real + self.imag * other.imag) / r, (self.real * other.imag - self.imag * other.real) / r) -1006 else: -1007 return CObs(self.real * other / r, -self.imag * other / r) -1008 -1009 def __abs__(self): -1010 return np.sqrt(self.real**2 + self.imag**2) -1011 -1012 def __pos__(self): -1013 return self -1014 -1015 def __neg__(self): -1016 return -1 * self -1017 -1018 def __eq__(self, other): -1019 return self.real == other.real and self.imag == other.imag -1020 -1021 def __str__(self): -1022 return '(' + str(self.real) + int(self.imag >= 0.0) * '+' + str(self.imag) + 'j)' -1023 -1024 def __repr__(self): -1025 return 'CObs[' + str(self) + ']' -1026 -1027 def __format__(self, format_type): -1028 if format_type == "": -1029 significance = 2 -1030 format_type = "2" -1031 else: -1032 significance = int(float(format_type.replace("+", "").replace("-", ""))) -1033 return f"({self.real:{format_type}}{self.imag:+{significance}}j)" -1034 -1035 -1036def gamma_method(x, **kwargs): -1037 """Vectorized version of the gamma_method applicable to lists or arrays of Obs. -1038 -1039 See docstring of pe.Obs.gamma_method for details. -1040 """ -1041 return np.vectorize(lambda o: o.gm(**kwargs))(x) -1042 -1043 -1044gm = gamma_method + 633 return dict(zip(labels, sizes)) + 634 + 635 def dump(self, filename, datatype="json.gz", description="", **kwargs): + 636 """Dump the Obs to a file 'name' of chosen format. + 637 + 638 Parameters + 639 ---------- + 640 filename : str + 641 name of the file to be saved. + 642 datatype : str + 643 Format of the exported file. Supported formats include + 644 "json.gz" and "pickle" + 645 description : str + 646 Description for output file, only relevant for json.gz format. + 647 path : str + 648 specifies a custom path for the file (default '.') + 649 """ + 650 if 'path' in kwargs: + 651 file_name = kwargs.get('path') + '/' + filename + 652 else: + 653 file_name = filename + 654 + 655 if datatype == "json.gz": + 656 from .input.json import dump_to_json + 657 dump_to_json([self], file_name, description=description) + 658 elif datatype == "pickle": + 659 with open(file_name + '.p', 'wb') as fb: + 660 pickle.dump(self, fb) + 661 else: + 662 raise Exception("Unknown datatype " + str(datatype)) + 663 + 664 def export_jackknife(self): + 665 """Export jackknife samples from the Obs + 666 + 667 Returns + 668 ------- + 669 numpy.ndarray + 670 Returns a numpy array of length N + 1 where N is the number of samples + 671 for the given ensemble and replicum. The zeroth entry of the array contains + 672 the mean value of the Obs, entries 1 to N contain the N jackknife samples + 673 derived from the Obs. The current implementation only works for observables + 674 defined on exactly one ensemble and replicum. The derived jackknife samples + 675 should agree with samples from a full jackknife analysis up to O(1/N). + 676 """ + 677 + 678 if len(self.names) != 1: + 679 raise Exception("'export_jackknife' is only implemented for Obs defined on one ensemble and replicum.") + 680 + 681 name = self.names[0] + 682 full_data = self.deltas[name] + self.r_values[name] + 683 n = full_data.size + 684 mean = self.value + 685 tmp_jacks = np.zeros(n + 1) + 686 tmp_jacks[0] = mean + 687 tmp_jacks[1:] = (n * mean - full_data) / (n - 1) + 688 return tmp_jacks + 689 + 690 def export_bootstrap(self, samples=500, random_numbers=None, save_rng=None): + 691 """Export bootstrap samples from the Obs + 692 + 693 Parameters + 694 ---------- + 695 samples : int + 696 Number of bootstrap samples to generate. + 697 random_numbers : np.ndarray + 698 Array of shape (samples, length) containing the random numbers to generate the bootstrap samples. + 699 If not provided the bootstrap samples are generated bashed on the md5 hash of the enesmble name. + 700 save_rng : str + 701 Save the random numbers to a file if a path is specified. + 702 + 703 Returns + 704 ------- + 705 numpy.ndarray + 706 Returns a numpy array of length N + 1 where N is the number of samples + 707 for the given ensemble and replicum. The zeroth entry of the array contains + 708 the mean value of the Obs, entries 1 to N contain the N import_bootstrap samples + 709 derived from the Obs. The current implementation only works for observables + 710 defined on exactly one ensemble and replicum. The derived bootstrap samples + 711 should agree with samples from a full bootstrap analysis up to O(1/N). + 712 """ + 713 if len(self.names) != 1: + 714 raise Exception("'export_boostrap' is only implemented for Obs defined on one ensemble and replicum.") + 715 + 716 name = self.names[0] + 717 length = self.N + 718 + 719 if random_numbers is None: + 720 seed = int(hashlib.md5(name.encode()).hexdigest(), 16) & 0xFFFFFFFF + 721 rng = np.random.default_rng(seed) + 722 random_numbers = rng.integers(0, length, size=(samples, length)) + 723 + 724 if save_rng is not None: + 725 np.savetxt(save_rng, random_numbers, fmt='%i') + 726 + 727 proj = np.vstack([np.bincount(o, minlength=length) for o in random_numbers]) / length + 728 ret = np.zeros(samples + 1) + 729 ret[0] = self.value + 730 ret[1:] = proj @ (self.deltas[name] + self.r_values[name]) + 731 return ret + 732 + 733 def __float__(self): + 734 return float(self.value) + 735 + 736 def __repr__(self): + 737 return 'Obs[' + str(self) + ']' + 738 + 739 def __str__(self): + 740 return _format_uncertainty(self.value, self._dvalue) + 741 + 742 def __format__(self, format_type): + 743 if format_type == "": + 744 significance = 2 + 745 else: + 746 significance = int(float(format_type.replace("+", "").replace("-", ""))) + 747 my_str = _format_uncertainty(self.value, self._dvalue, + 748 significance=significance) + 749 for char in ["+", " "]: + 750 if format_type.startswith(char): + 751 if my_str[0] != "-": + 752 my_str = char + my_str + 753 return my_str + 754 + 755 def __hash__(self): + 756 hash_tuple = (np.array([self.value]).astype(np.float32).data.tobytes(),) + 757 hash_tuple += tuple([o.astype(np.float32).data.tobytes() for o in self.deltas.values()]) + 758 hash_tuple += tuple([np.array([o.errsq()]).astype(np.float32).data.tobytes() for o in self.covobs.values()]) + 759 hash_tuple += tuple([o.encode() for o in self.names]) + 760 m = hashlib.md5() + 761 [m.update(o) for o in hash_tuple] + 762 return int(m.hexdigest(), 16) & 0xFFFFFFFF + 763 + 764 # Overload comparisons + 765 def __lt__(self, other): + 766 return self.value < other + 767 + 768 def __le__(self, other): + 769 return self.value <= other + 770 + 771 def __gt__(self, other): + 772 return self.value > other + 773 + 774 def __ge__(self, other): + 775 return self.value >= other + 776 + 777 def __eq__(self, other): + 778 if other is None: + 779 return False + 780 return (self - other).is_zero() + 781 + 782 # Overload math operations + 783 def __add__(self, y): + 784 if isinstance(y, Obs): + 785 return derived_observable(lambda x, **kwargs: x[0] + x[1], [self, y], man_grad=[1, 1]) + 786 else: + 787 if isinstance(y, np.ndarray): + 788 return np.array([self + o for o in y]) + 789 elif isinstance(y, complex): + 790 return CObs(self, 0) + y + 791 elif y.__class__.__name__ in ['Corr', 'CObs']: + 792 return NotImplemented + 793 else: + 794 return derived_observable(lambda x, **kwargs: x[0] + y, [self], man_grad=[1]) + 795 + 796 def __radd__(self, y): + 797 return self + y + 798 + 799 def __mul__(self, y): + 800 if isinstance(y, Obs): + 801 return derived_observable(lambda x, **kwargs: x[0] * x[1], [self, y], man_grad=[y.value, self.value]) + 802 else: + 803 if isinstance(y, np.ndarray): + 804 return np.array([self * o for o in y]) + 805 elif isinstance(y, complex): + 806 return CObs(self * y.real, self * y.imag) + 807 elif y.__class__.__name__ in ['Corr', 'CObs']: + 808 return NotImplemented + 809 else: + 810 return derived_observable(lambda x, **kwargs: x[0] * y, [self], man_grad=[y]) + 811 + 812 def __rmul__(self, y): + 813 return self * y + 814 + 815 def __sub__(self, y): + 816 if isinstance(y, Obs): + 817 return derived_observable(lambda x, **kwargs: x[0] - x[1], [self, y], man_grad=[1, -1]) + 818 else: + 819 if isinstance(y, np.ndarray): + 820 return np.array([self - o for o in y]) + 821 elif y.__class__.__name__ in ['Corr', 'CObs']: + 822 return NotImplemented + 823 else: + 824 return derived_observable(lambda x, **kwargs: x[0] - y, [self], man_grad=[1]) + 825 + 826 def __rsub__(self, y): + 827 return -1 * (self - y) + 828 + 829 def __pos__(self): + 830 return self + 831 + 832 def __neg__(self): + 833 return -1 * self + 834 + 835 def __truediv__(self, y): + 836 if isinstance(y, Obs): + 837 return derived_observable(lambda x, **kwargs: x[0] / x[1], [self, y], man_grad=[1 / y.value, - self.value / y.value ** 2]) + 838 else: + 839 if isinstance(y, np.ndarray): + 840 return np.array([self / o for o in y]) + 841 elif y.__class__.__name__ in ['Corr', 'CObs']: + 842 return NotImplemented + 843 else: + 844 return derived_observable(lambda x, **kwargs: x[0] / y, [self], man_grad=[1 / y]) + 845 + 846 def __rtruediv__(self, y): + 847 if isinstance(y, Obs): + 848 return derived_observable(lambda x, **kwargs: x[0] / x[1], [y, self], man_grad=[1 / self.value, - y.value / self.value ** 2]) + 849 else: + 850 if isinstance(y, np.ndarray): + 851 return np.array([o / self for o in y]) + 852 elif y.__class__.__name__ in ['Corr', 'CObs']: + 853 return NotImplemented + 854 else: + 855 return derived_observable(lambda x, **kwargs: y / x[0], [self], man_grad=[-y / self.value ** 2]) + 856 + 857 def __pow__(self, y): + 858 if isinstance(y, Obs): + 859 return derived_observable(lambda x: x[0] ** x[1], [self, y]) + 860 else: + 861 return derived_observable(lambda x: x[0] ** y, [self]) + 862 + 863 def __rpow__(self, y): + 864 if isinstance(y, Obs): + 865 return derived_observable(lambda x: x[0] ** x[1], [y, self]) + 866 else: + 867 return derived_observable(lambda x: y ** x[0], [self]) + 868 + 869 def __abs__(self): + 870 return derived_observable(lambda x: anp.abs(x[0]), [self]) + 871 + 872 # Overload numpy functions + 873 def sqrt(self): + 874 return derived_observable(lambda x, **kwargs: np.sqrt(x[0]), [self], man_grad=[1 / 2 / np.sqrt(self.value)]) + 875 + 876 def log(self): + 877 return derived_observable(lambda x, **kwargs: np.log(x[0]), [self], man_grad=[1 / self.value]) + 878 + 879 def exp(self): + 880 return derived_observable(lambda x, **kwargs: np.exp(x[0]), [self], man_grad=[np.exp(self.value)]) + 881 + 882 def sin(self): + 883 return derived_observable(lambda x, **kwargs: np.sin(x[0]), [self], man_grad=[np.cos(self.value)]) + 884 + 885 def cos(self): + 886 return derived_observable(lambda x, **kwargs: np.cos(x[0]), [self], man_grad=[-np.sin(self.value)]) + 887 + 888 def tan(self): + 889 return derived_observable(lambda x, **kwargs: np.tan(x[0]), [self], man_grad=[1 / np.cos(self.value) ** 2]) + 890 + 891 def arcsin(self): + 892 return derived_observable(lambda x: anp.arcsin(x[0]), [self]) + 893 + 894 def arccos(self): + 895 return derived_observable(lambda x: anp.arccos(x[0]), [self]) + 896 + 897 def arctan(self): + 898 return derived_observable(lambda x: anp.arctan(x[0]), [self]) + 899 + 900 def sinh(self): + 901 return derived_observable(lambda x, **kwargs: np.sinh(x[0]), [self], man_grad=[np.cosh(self.value)]) + 902 + 903 def cosh(self): + 904 return derived_observable(lambda x, **kwargs: np.cosh(x[0]), [self], man_grad=[np.sinh(self.value)]) + 905 + 906 def tanh(self): + 907 return derived_observable(lambda x, **kwargs: np.tanh(x[0]), [self], man_grad=[1 / np.cosh(self.value) ** 2]) + 908 + 909 def arcsinh(self): + 910 return derived_observable(lambda x: anp.arcsinh(x[0]), [self]) + 911 + 912 def arccosh(self): + 913 return derived_observable(lambda x: anp.arccosh(x[0]), [self]) + 914 + 915 def arctanh(self): + 916 return derived_observable(lambda x: anp.arctanh(x[0]), [self]) + 917 + 918 + 919class CObs: + 920 """Class for a complex valued observable.""" + 921 __slots__ = ['_real', '_imag', 'tag'] + 922 + 923 def __init__(self, real, imag=0.0): + 924 self._real = real + 925 self._imag = imag + 926 self.tag = None + 927 + 928 @property + 929 def real(self): + 930 return self._real + 931 + 932 @property + 933 def imag(self): + 934 return self._imag + 935 + 936 def gamma_method(self, **kwargs): + 937 """Executes the gamma_method for the real and the imaginary part.""" + 938 if isinstance(self.real, Obs): + 939 self.real.gamma_method(**kwargs) + 940 if isinstance(self.imag, Obs): + 941 self.imag.gamma_method(**kwargs) + 942 + 943 def is_zero(self): + 944 """Checks whether both real and imaginary part are zero within machine precision.""" + 945 return self.real == 0.0 and self.imag == 0.0 + 946 + 947 def conjugate(self): + 948 return CObs(self.real, -self.imag) + 949 + 950 def __add__(self, other): + 951 if isinstance(other, np.ndarray): + 952 return other + self + 953 elif hasattr(other, 'real') and hasattr(other, 'imag'): + 954 return CObs(self.real + other.real, + 955 self.imag + other.imag) + 956 else: + 957 return CObs(self.real + other, self.imag) + 958 + 959 def __radd__(self, y): + 960 return self + y + 961 + 962 def __sub__(self, other): + 963 if isinstance(other, np.ndarray): + 964 return -1 * (other - self) + 965 elif hasattr(other, 'real') and hasattr(other, 'imag'): + 966 return CObs(self.real - other.real, self.imag - other.imag) + 967 else: + 968 return CObs(self.real - other, self.imag) + 969 + 970 def __rsub__(self, other): + 971 return -1 * (self - other) + 972 + 973 def __mul__(self, other): + 974 if isinstance(other, np.ndarray): + 975 return other * self + 976 elif hasattr(other, 'real') and hasattr(other, 'imag'): + 977 if all(isinstance(i, Obs) for i in [self.real, self.imag, other.real, other.imag]): + 978 return CObs(derived_observable(lambda x, **kwargs: x[0] * x[1] - x[2] * x[3], + 979 [self.real, other.real, self.imag, other.imag], + 980 man_grad=[other.real.value, self.real.value, -other.imag.value, -self.imag.value]), + 981 derived_observable(lambda x, **kwargs: x[2] * x[1] + x[0] * x[3], + 982 [self.real, other.real, self.imag, other.imag], + 983 man_grad=[other.imag.value, self.imag.value, other.real.value, self.real.value])) + 984 elif getattr(other, 'imag', 0) != 0: + 985 return CObs(self.real * other.real - self.imag * other.imag, + 986 self.imag * other.real + self.real * other.imag) + 987 else: + 988 return CObs(self.real * other.real, self.imag * other.real) + 989 else: + 990 return CObs(self.real * other, self.imag * other) + 991 + 992 def __rmul__(self, other): + 993 return self * other + 994 + 995 def __truediv__(self, other): + 996 if isinstance(other, np.ndarray): + 997 return 1 / (other / self) + 998 elif hasattr(other, 'real') and hasattr(other, 'imag'): + 999 r = other.real ** 2 + other.imag ** 2 +1000 return CObs((self.real * other.real + self.imag * other.imag) / r, (self.imag * other.real - self.real * other.imag) / r) +1001 else: +1002 return CObs(self.real / other, self.imag / other) +1003 +1004 def __rtruediv__(self, other): +1005 r = self.real ** 2 + self.imag ** 2 +1006 if hasattr(other, 'real') and hasattr(other, 'imag'): +1007 return CObs((self.real * other.real + self.imag * other.imag) / r, (self.real * other.imag - self.imag * other.real) / r) +1008 else: +1009 return CObs(self.real * other / r, -self.imag * other / r) +1010 +1011 def __abs__(self): +1012 return np.sqrt(self.real**2 + self.imag**2) +1013 +1014 def __pos__(self): +1015 return self +1016 +1017 def __neg__(self): +1018 return -1 * self +1019 +1020 def __eq__(self, other): +1021 return self.real == other.real and self.imag == other.imag +1022 +1023 def __str__(self): +1024 return '(' + str(self.real) + int(self.imag >= 0.0) * '+' + str(self.imag) + 'j)' +1025 +1026 def __repr__(self): +1027 return 'CObs[' + str(self) + ']' +1028 +1029 def __format__(self, format_type): +1030 if format_type == "": +1031 significance = 2 +1032 format_type = "2" +1033 else: +1034 significance = int(float(format_type.replace("+", "").replace("-", ""))) +1035 return f"({self.real:{format_type}}{self.imag:+{significance}}j)" +1036 +1037 +1038def gamma_method(x, **kwargs): +1039 """Vectorized version of the gamma_method applicable to lists or arrays of Obs. +1040 +1041 See docstring of pe.Obs.gamma_method for details. +1042 """ +1043 return np.vectorize(lambda o: o.gm(**kwargs))(x) +1044 1045 -1046 -1047def _format_uncertainty(value, dvalue, significance=2): -1048 """Creates a string of a value and its error in paranthesis notation, e.g., 13.02(45)""" -1049 if dvalue == 0.0 or (not np.isfinite(dvalue)): -1050 return str(value) -1051 if not isinstance(significance, int): -1052 raise TypeError("significance needs to be an integer.") -1053 if significance < 1: -1054 raise ValueError("significance needs to be larger than zero.") -1055 fexp = np.floor(np.log10(dvalue)) -1056 if fexp < 0.0: -1057 return '{:{form}}({:1.0f})'.format(value, dvalue * 10 ** (-fexp + significance - 1), form='.' + str(-int(fexp) + significance - 1) + 'f') -1058 elif fexp == 0.0: -1059 return f"{value:.{significance - 1}f}({dvalue:1.{significance - 1}f})" -1060 else: -1061 return f"{value:.{max(0, int(significance - fexp - 1))}f}({dvalue:2.{max(0, int(significance - fexp - 1))}f})" -1062 -1063 -1064def _expand_deltas(deltas, idx, shape, gapsize): -1065 """Expand deltas defined on idx to a regular range with spacing gapsize between two -1066 configurations and where holes are filled by 0. -1067 If idx is of type range, the deltas are not changed if the idx.step == gapsize. -1068 -1069 Parameters -1070 ---------- -1071 deltas : list -1072 List of fluctuations -1073 idx : list -1074 List or range of configs on which the deltas are defined, has to be sorted in ascending order. -1075 shape : int -1076 Number of configs in idx. -1077 gapsize : int -1078 The target distance between two configurations. If longer distances -1079 are found in idx, the data is expanded. -1080 """ -1081 if isinstance(idx, range): -1082 if (idx.step == gapsize): -1083 return deltas -1084 ret = np.zeros((idx[-1] - idx[0] + gapsize) // gapsize) -1085 for i in range(shape): -1086 ret[(idx[i] - idx[0]) // gapsize] = deltas[i] -1087 return ret -1088 -1089 -1090def _merge_idx(idl): -1091 """Returns the union of all lists in idl as range or sorted list -1092 -1093 Parameters -1094 ---------- -1095 idl : list -1096 List of lists or ranges. -1097 """ -1098 -1099 if _check_lists_equal(idl): -1100 return idl[0] -1101 -1102 idunion = sorted(set().union(*idl)) +1046gm = gamma_method +1047 +1048 +1049def _format_uncertainty(value, dvalue, significance=2): +1050 """Creates a string of a value and its error in paranthesis notation, e.g., 13.02(45)""" +1051 if dvalue == 0.0 or (not np.isfinite(dvalue)): +1052 return str(value) +1053 if not isinstance(significance, int): +1054 raise TypeError("significance needs to be an integer.") +1055 if significance < 1: +1056 raise ValueError("significance needs to be larger than zero.") +1057 fexp = np.floor(np.log10(dvalue)) +1058 if fexp < 0.0: +1059 return '{:{form}}({:1.0f})'.format(value, dvalue * 10 ** (-fexp + significance - 1), form='.' + str(-int(fexp) + significance - 1) + 'f') +1060 elif fexp == 0.0: +1061 return f"{value:.{significance - 1}f}({dvalue:1.{significance - 1}f})" +1062 else: +1063 return f"{value:.{max(0, int(significance - fexp - 1))}f}({dvalue:2.{max(0, int(significance - fexp - 1))}f})" +1064 +1065 +1066def _expand_deltas(deltas, idx, shape, gapsize): +1067 """Expand deltas defined on idx to a regular range with spacing gapsize between two +1068 configurations and where holes are filled by 0. +1069 If idx is of type range, the deltas are not changed if the idx.step == gapsize. +1070 +1071 Parameters +1072 ---------- +1073 deltas : list +1074 List of fluctuations +1075 idx : list +1076 List or range of configs on which the deltas are defined, has to be sorted in ascending order. +1077 shape : int +1078 Number of configs in idx. +1079 gapsize : int +1080 The target distance between two configurations. If longer distances +1081 are found in idx, the data is expanded. +1082 """ +1083 if isinstance(idx, range): +1084 if (idx.step == gapsize): +1085 return deltas +1086 ret = np.zeros((idx[-1] - idx[0] + gapsize) // gapsize) +1087 for i in range(shape): +1088 ret[(idx[i] - idx[0]) // gapsize] = deltas[i] +1089 return ret +1090 +1091 +1092def _merge_idx(idl): +1093 """Returns the union of all lists in idl as range or sorted list +1094 +1095 Parameters +1096 ---------- +1097 idl : list +1098 List of lists or ranges. +1099 """ +1100 +1101 if _check_lists_equal(idl): +1102 return idl[0] 1103 -1104 # Check whether idunion can be expressed as range -1105 idrange = range(idunion[0], idunion[-1] + 1, idunion[1] - idunion[0]) -1106 idtest = [list(idrange), idunion] -1107 if _check_lists_equal(idtest): -1108 return idrange -1109 -1110 return idunion +1104 idunion = sorted(set().union(*idl)) +1105 +1106 # Check whether idunion can be expressed as range +1107 idrange = range(idunion[0], idunion[-1] + 1, idunion[1] - idunion[0]) +1108 idtest = [list(idrange), idunion] +1109 if _check_lists_equal(idtest): +1110 return idrange 1111 -1112 -1113def _intersection_idx(idl): -1114 """Returns the intersection of all lists in idl as range or sorted list -1115 -1116 Parameters -1117 ---------- -1118 idl : list -1119 List of lists or ranges. -1120 """ -1121 -1122 if _check_lists_equal(idl): -1123 return idl[0] -1124 -1125 idinter = sorted(set.intersection(*[set(o) for o in idl])) +1112 return idunion +1113 +1114 +1115def _intersection_idx(idl): +1116 """Returns the intersection of all lists in idl as range or sorted list +1117 +1118 Parameters +1119 ---------- +1120 idl : list +1121 List of lists or ranges. +1122 """ +1123 +1124 if _check_lists_equal(idl): +1125 return idl[0] 1126 -1127 # Check whether idinter can be expressed as range -1128 try: -1129 idrange = range(idinter[0], idinter[-1] + 1, idinter[1] - idinter[0]) -1130 idtest = [list(idrange), idinter] -1131 if _check_lists_equal(idtest): -1132 return idrange -1133 except IndexError: -1134 pass -1135 -1136 return idinter +1127 idinter = sorted(set.intersection(*[set(o) for o in idl])) +1128 +1129 # Check whether idinter can be expressed as range +1130 try: +1131 idrange = range(idinter[0], idinter[-1] + 1, idinter[1] - idinter[0]) +1132 idtest = [list(idrange), idinter] +1133 if _check_lists_equal(idtest): +1134 return idrange +1135 except IndexError: +1136 pass 1137 -1138 -1139def _expand_deltas_for_merge(deltas, idx, shape, new_idx): -1140 """Expand deltas defined on idx to the list of configs that is defined by new_idx. -1141 New, empty entries are filled by 0. If idx and new_idx are of type range, the smallest -1142 common divisor of the step sizes is used as new step size. -1143 -1144 Parameters -1145 ---------- -1146 deltas : list -1147 List of fluctuations -1148 idx : list -1149 List or range of configs on which the deltas are defined. -1150 Has to be a subset of new_idx and has to be sorted in ascending order. -1151 shape : list -1152 Number of configs in idx. -1153 new_idx : list -1154 List of configs that defines the new range, has to be sorted in ascending order. -1155 """ -1156 -1157 if type(idx) is range and type(new_idx) is range: -1158 if idx == new_idx: -1159 return deltas -1160 ret = np.zeros(new_idx[-1] - new_idx[0] + 1) -1161 for i in range(shape): -1162 ret[idx[i] - new_idx[0]] = deltas[i] -1163 return np.array([ret[new_idx[i] - new_idx[0]] for i in range(len(new_idx))]) * len(new_idx) / len(idx) -1164 -1165 -1166def derived_observable(func, data, array_mode=False, **kwargs): -1167 """Construct a derived Obs according to func(data, **kwargs) using automatic differentiation. -1168 -1169 Parameters -1170 ---------- -1171 func : object -1172 arbitrary function of the form func(data, **kwargs). For the -1173 automatic differentiation to work, all numpy functions have to have -1174 the autograd wrapper (use 'import autograd.numpy as anp'). -1175 data : list -1176 list of Obs, e.g. [obs1, obs2, obs3]. -1177 num_grad : bool -1178 if True, numerical derivatives are used instead of autograd -1179 (default False). To control the numerical differentiation the -1180 kwargs of numdifftools.step_generators.MaxStepGenerator -1181 can be used. -1182 man_grad : list -1183 manually supply a list or an array which contains the jacobian -1184 of func. Use cautiously, supplying the wrong derivative will -1185 not be intercepted. -1186 -1187 Notes -1188 ----- -1189 For simple mathematical operations it can be practical to use anonymous -1190 functions. For the ratio of two observables one can e.g. use -1191 -1192 new_obs = derived_observable(lambda x: x[0] / x[1], [obs1, obs2]) -1193 """ -1194 -1195 data = np.asarray(data) -1196 raveled_data = data.ravel() -1197 -1198 # Workaround for matrix operations containing non Obs data -1199 if not all(isinstance(x, Obs) for x in raveled_data): -1200 for i in range(len(raveled_data)): -1201 if isinstance(raveled_data[i], (int, float)): -1202 raveled_data[i] = cov_Obs(raveled_data[i], 0.0, "###dummy_covobs###") -1203 -1204 allcov = {} -1205 for o in raveled_data: -1206 for name in o.cov_names: -1207 if name in allcov: -1208 if not np.allclose(allcov[name], o.covobs[name].cov): -1209 raise Exception('Inconsistent covariance matrices for %s!' % (name)) -1210 else: -1211 allcov[name] = o.covobs[name].cov -1212 -1213 n_obs = len(raveled_data) -1214 new_names = sorted(set([y for x in [o.names for o in raveled_data] for y in x])) -1215 new_cov_names = sorted(set([y for x in [o.cov_names for o in raveled_data] for y in x])) -1216 new_sample_names = sorted(set(new_names) - set(new_cov_names)) -1217 -1218 reweighted = len(list(filter(lambda o: o.reweighted is True, raveled_data))) > 0 +1138 return idinter +1139 +1140 +1141def _expand_deltas_for_merge(deltas, idx, shape, new_idx): +1142 """Expand deltas defined on idx to the list of configs that is defined by new_idx. +1143 New, empty entries are filled by 0. If idx and new_idx are of type range, the smallest +1144 common divisor of the step sizes is used as new step size. +1145 +1146 Parameters +1147 ---------- +1148 deltas : list +1149 List of fluctuations +1150 idx : list +1151 List or range of configs on which the deltas are defined. +1152 Has to be a subset of new_idx and has to be sorted in ascending order. +1153 shape : list +1154 Number of configs in idx. +1155 new_idx : list +1156 List of configs that defines the new range, has to be sorted in ascending order. +1157 """ +1158 +1159 if type(idx) is range and type(new_idx) is range: +1160 if idx == new_idx: +1161 return deltas +1162 ret = np.zeros(new_idx[-1] - new_idx[0] + 1) +1163 for i in range(shape): +1164 ret[idx[i] - new_idx[0]] = deltas[i] +1165 return np.array([ret[new_idx[i] - new_idx[0]] for i in range(len(new_idx))]) * len(new_idx) / len(idx) +1166 +1167 +1168def derived_observable(func, data, array_mode=False, **kwargs): +1169 """Construct a derived Obs according to func(data, **kwargs) using automatic differentiation. +1170 +1171 Parameters +1172 ---------- +1173 func : object +1174 arbitrary function of the form func(data, **kwargs). For the +1175 automatic differentiation to work, all numpy functions have to have +1176 the autograd wrapper (use 'import autograd.numpy as anp'). +1177 data : list +1178 list of Obs, e.g. [obs1, obs2, obs3]. +1179 num_grad : bool +1180 if True, numerical derivatives are used instead of autograd +1181 (default False). To control the numerical differentiation the +1182 kwargs of numdifftools.step_generators.MaxStepGenerator +1183 can be used. +1184 man_grad : list +1185 manually supply a list or an array which contains the jacobian +1186 of func. Use cautiously, supplying the wrong derivative will +1187 not be intercepted. +1188 +1189 Notes +1190 ----- +1191 For simple mathematical operations it can be practical to use anonymous +1192 functions. For the ratio of two observables one can e.g. use +1193 +1194 new_obs = derived_observable(lambda x: x[0] / x[1], [obs1, obs2]) +1195 """ +1196 +1197 data = np.asarray(data) +1198 raveled_data = data.ravel() +1199 +1200 # Workaround for matrix operations containing non Obs data +1201 if not all(isinstance(x, Obs) for x in raveled_data): +1202 for i in range(len(raveled_data)): +1203 if isinstance(raveled_data[i], (int, float)): +1204 raveled_data[i] = cov_Obs(raveled_data[i], 0.0, "###dummy_covobs###") +1205 +1206 allcov = {} +1207 for o in raveled_data: +1208 for name in o.cov_names: +1209 if name in allcov: +1210 if not np.allclose(allcov[name], o.covobs[name].cov): +1211 raise Exception('Inconsistent covariance matrices for %s!' % (name)) +1212 else: +1213 allcov[name] = o.covobs[name].cov +1214 +1215 n_obs = len(raveled_data) +1216 new_names = sorted(set([y for x in [o.names for o in raveled_data] for y in x])) +1217 new_cov_names = sorted(set([y for x in [o.cov_names for o in raveled_data] for y in x])) +1218 new_sample_names = sorted(set(new_names) - set(new_cov_names)) 1219 -1220 if data.ndim == 1: -1221 values = np.array([o.value for o in data]) -1222 else: -1223 values = np.vectorize(lambda x: x.value)(data) -1224 -1225 new_values = func(values, **kwargs) +1220 reweighted = len(list(filter(lambda o: o.reweighted is True, raveled_data))) > 0 +1221 +1222 if data.ndim == 1: +1223 values = np.array([o.value for o in data]) +1224 else: +1225 values = np.vectorize(lambda x: x.value)(data) 1226 -1227 multi = int(isinstance(new_values, np.ndarray)) +1227 new_values = func(values, **kwargs) 1228 -1229 new_r_values = {} -1230 new_idl_d = {} -1231 for name in new_sample_names: -1232 idl = [] -1233 tmp_values = np.zeros(n_obs) -1234 for i, item in enumerate(raveled_data): -1235 tmp_values[i] = item.r_values.get(name, item.value) -1236 tmp_idl = item.idl.get(name) -1237 if tmp_idl is not None: -1238 idl.append(tmp_idl) -1239 if multi > 0: -1240 tmp_values = np.array(tmp_values).reshape(data.shape) -1241 new_r_values[name] = func(tmp_values, **kwargs) -1242 new_idl_d[name] = _merge_idx(idl) -1243 -1244 if 'man_grad' in kwargs: -1245 deriv = np.asarray(kwargs.get('man_grad')) -1246 if new_values.shape + data.shape != deriv.shape: -1247 raise Exception('Manual derivative does not have correct shape.') -1248 elif kwargs.get('num_grad') is True: -1249 if multi > 0: -1250 raise Exception('Multi mode currently not supported for numerical derivative') -1251 options = { -1252 'base_step': 0.1, -1253 'step_ratio': 2.5} -1254 for key in options.keys(): -1255 kwarg = kwargs.get(key) -1256 if kwarg is not None: -1257 options[key] = kwarg -1258 tmp_df = nd.Gradient(func, order=4, **{k: v for k, v in options.items() if v is not None})(values, **kwargs) -1259 if tmp_df.size == 1: -1260 deriv = np.array([tmp_df.real]) -1261 else: -1262 deriv = tmp_df.real -1263 else: -1264 deriv = jacobian(func)(values, **kwargs) -1265 -1266 final_result = np.zeros(new_values.shape, dtype=object) +1229 multi = int(isinstance(new_values, np.ndarray)) +1230 +1231 new_r_values = {} +1232 new_idl_d = {} +1233 for name in new_sample_names: +1234 idl = [] +1235 tmp_values = np.zeros(n_obs) +1236 for i, item in enumerate(raveled_data): +1237 tmp_values[i] = item.r_values.get(name, item.value) +1238 tmp_idl = item.idl.get(name) +1239 if tmp_idl is not None: +1240 idl.append(tmp_idl) +1241 if multi > 0: +1242 tmp_values = np.array(tmp_values).reshape(data.shape) +1243 new_r_values[name] = func(tmp_values, **kwargs) +1244 new_idl_d[name] = _merge_idx(idl) +1245 +1246 if 'man_grad' in kwargs: +1247 deriv = np.asarray(kwargs.get('man_grad')) +1248 if new_values.shape + data.shape != deriv.shape: +1249 raise Exception('Manual derivative does not have correct shape.') +1250 elif kwargs.get('num_grad') is True: +1251 if multi > 0: +1252 raise Exception('Multi mode currently not supported for numerical derivative') +1253 options = { +1254 'base_step': 0.1, +1255 'step_ratio': 2.5} +1256 for key in options.keys(): +1257 kwarg = kwargs.get(key) +1258 if kwarg is not None: +1259 options[key] = kwarg +1260 tmp_df = nd.Gradient(func, order=4, **{k: v for k, v in options.items() if v is not None})(values, **kwargs) +1261 if tmp_df.size == 1: +1262 deriv = np.array([tmp_df.real]) +1263 else: +1264 deriv = tmp_df.real +1265 else: +1266 deriv = jacobian(func)(values, **kwargs) 1267 -1268 if array_mode is True: +1268 final_result = np.zeros(new_values.shape, dtype=object) 1269 -1270 class _Zero_grad(): -1271 def __init__(self, N): -1272 self.grad = np.zeros((N, 1)) -1273 -1274 new_covobs_lengths = dict(set([y for x in [[(n, o.covobs[n].N) for n in o.cov_names] for o in raveled_data] for y in x])) -1275 d_extracted = {} -1276 g_extracted = {} -1277 for name in new_sample_names: -1278 d_extracted[name] = [] -1279 ens_length = len(new_idl_d[name]) -1280 for i_dat, dat in enumerate(data): -1281 d_extracted[name].append(np.array([_expand_deltas_for_merge(o.deltas.get(name, np.zeros(ens_length)), o.idl.get(name, new_idl_d[name]), o.shape.get(name, ens_length), new_idl_d[name]) for o in dat.reshape(np.prod(dat.shape))]).reshape(dat.shape + (ens_length, ))) -1282 for name in new_cov_names: -1283 g_extracted[name] = [] -1284 zero_grad = _Zero_grad(new_covobs_lengths[name]) -1285 for i_dat, dat in enumerate(data): -1286 g_extracted[name].append(np.array([o.covobs.get(name, zero_grad).grad for o in dat.reshape(np.prod(dat.shape))]).reshape(dat.shape + (new_covobs_lengths[name], 1))) -1287 -1288 for i_val, new_val in np.ndenumerate(new_values): -1289 new_deltas = {} -1290 new_grad = {} -1291 if array_mode is True: -1292 for name in new_sample_names: -1293 ens_length = d_extracted[name][0].shape[-1] -1294 new_deltas[name] = np.zeros(ens_length) -1295 for i_dat, dat in enumerate(d_extracted[name]): -1296 new_deltas[name] += np.tensordot(deriv[i_val + (i_dat, )], dat) -1297 for name in new_cov_names: -1298 new_grad[name] = 0 -1299 for i_dat, dat in enumerate(g_extracted[name]): -1300 new_grad[name] += np.tensordot(deriv[i_val + (i_dat, )], dat) -1301 else: -1302 for j_obs, obs in np.ndenumerate(data): -1303 for name in obs.names: -1304 if name in obs.cov_names: -1305 new_grad[name] = new_grad.get(name, 0) + deriv[i_val + j_obs] * obs.covobs[name].grad -1306 else: -1307 new_deltas[name] = new_deltas.get(name, 0) + deriv[i_val + j_obs] * _expand_deltas_for_merge(obs.deltas[name], obs.idl[name], obs.shape[name], new_idl_d[name]) -1308 -1309 new_covobs = {name: Covobs(0, allcov[name], name, grad=new_grad[name]) for name in new_grad} +1270 if array_mode is True: +1271 +1272 class _Zero_grad(): +1273 def __init__(self, N): +1274 self.grad = np.zeros((N, 1)) +1275 +1276 new_covobs_lengths = dict(set([y for x in [[(n, o.covobs[n].N) for n in o.cov_names] for o in raveled_data] for y in x])) +1277 d_extracted = {} +1278 g_extracted = {} +1279 for name in new_sample_names: +1280 d_extracted[name] = [] +1281 ens_length = len(new_idl_d[name]) +1282 for i_dat, dat in enumerate(data): +1283 d_extracted[name].append(np.array([_expand_deltas_for_merge(o.deltas.get(name, np.zeros(ens_length)), o.idl.get(name, new_idl_d[name]), o.shape.get(name, ens_length), new_idl_d[name]) for o in dat.reshape(np.prod(dat.shape))]).reshape(dat.shape + (ens_length, ))) +1284 for name in new_cov_names: +1285 g_extracted[name] = [] +1286 zero_grad = _Zero_grad(new_covobs_lengths[name]) +1287 for i_dat, dat in enumerate(data): +1288 g_extracted[name].append(np.array([o.covobs.get(name, zero_grad).grad for o in dat.reshape(np.prod(dat.shape))]).reshape(dat.shape + (new_covobs_lengths[name], 1))) +1289 +1290 for i_val, new_val in np.ndenumerate(new_values): +1291 new_deltas = {} +1292 new_grad = {} +1293 if array_mode is True: +1294 for name in new_sample_names: +1295 ens_length = d_extracted[name][0].shape[-1] +1296 new_deltas[name] = np.zeros(ens_length) +1297 for i_dat, dat in enumerate(d_extracted[name]): +1298 new_deltas[name] += np.tensordot(deriv[i_val + (i_dat, )], dat) +1299 for name in new_cov_names: +1300 new_grad[name] = 0 +1301 for i_dat, dat in enumerate(g_extracted[name]): +1302 new_grad[name] += np.tensordot(deriv[i_val + (i_dat, )], dat) +1303 else: +1304 for j_obs, obs in np.ndenumerate(data): +1305 for name in obs.names: +1306 if name in obs.cov_names: +1307 new_grad[name] = new_grad.get(name, 0) + deriv[i_val + j_obs] * obs.covobs[name].grad +1308 else: +1309 new_deltas[name] = new_deltas.get(name, 0) + deriv[i_val + j_obs] * _expand_deltas_for_merge(obs.deltas[name], obs.idl[name], obs.shape[name], new_idl_d[name]) 1310 -1311 if not set(new_covobs.keys()).isdisjoint(new_deltas.keys()): -1312 raise Exception('The same name has been used for deltas and covobs!') -1313 new_samples = [] -1314 new_means = [] -1315 new_idl = [] -1316 new_names_obs = [] -1317 for name in new_names: -1318 if name not in new_covobs: -1319 new_samples.append(new_deltas[name]) -1320 new_idl.append(new_idl_d[name]) -1321 new_means.append(new_r_values[name][i_val]) -1322 new_names_obs.append(name) -1323 final_result[i_val] = Obs(new_samples, new_names_obs, means=new_means, idl=new_idl) -1324 for name in new_covobs: -1325 final_result[i_val].names.append(name) -1326 final_result[i_val]._covobs = new_covobs -1327 final_result[i_val]._value = new_val -1328 final_result[i_val].reweighted = reweighted -1329 -1330 if multi == 0: -1331 final_result = final_result.item() -1332 -1333 return final_result +1311 new_covobs = {name: Covobs(0, allcov[name], name, grad=new_grad[name]) for name in new_grad} +1312 +1313 if not set(new_covobs.keys()).isdisjoint(new_deltas.keys()): +1314 raise Exception('The same name has been used for deltas and covobs!') +1315 new_samples = [] +1316 new_means = [] +1317 new_idl = [] +1318 new_names_obs = [] +1319 for name in new_names: +1320 if name not in new_covobs: +1321 new_samples.append(new_deltas[name]) +1322 new_idl.append(new_idl_d[name]) +1323 new_means.append(new_r_values[name][i_val]) +1324 new_names_obs.append(name) +1325 final_result[i_val] = Obs(new_samples, new_names_obs, means=new_means, idl=new_idl) +1326 for name in new_covobs: +1327 final_result[i_val].names.append(name) +1328 final_result[i_val]._covobs = new_covobs +1329 final_result[i_val]._value = new_val +1330 final_result[i_val].reweighted = reweighted +1331 +1332 if multi == 0: +1333 final_result = final_result.item() 1334 -1335 -1336def _reduce_deltas(deltas, idx_old, idx_new): -1337 """Extract deltas defined on idx_old on all configs of idx_new. -1338 -1339 Assumes, that idx_old and idx_new are correctly defined idl, i.e., they -1340 are ordered in an ascending order. -1341 -1342 Parameters -1343 ---------- -1344 deltas : list -1345 List of fluctuations -1346 idx_old : list -1347 List or range of configs on which the deltas are defined -1348 idx_new : list -1349 List of configs for which we want to extract the deltas. -1350 Has to be a subset of idx_old. -1351 """ -1352 if not len(deltas) == len(idx_old): -1353 raise Exception('Length of deltas and idx_old have to be the same: %d != %d' % (len(deltas), len(idx_old))) -1354 if type(idx_old) is range and type(idx_new) is range: -1355 if idx_old == idx_new: -1356 return deltas -1357 if _check_lists_equal([idx_old, idx_new]): -1358 return deltas -1359 indices = np.intersect1d(idx_old, idx_new, assume_unique=True, return_indices=True)[1] -1360 if len(indices) < len(idx_new): -1361 raise Exception('Error in _reduce_deltas: Config of idx_new not in idx_old') -1362 return np.array(deltas)[indices] -1363 -1364 -1365def reweight(weight, obs, **kwargs): -1366 """Reweight a list of observables. -1367 -1368 Parameters -1369 ---------- -1370 weight : Obs -1371 Reweighting factor. An Observable that has to be defined on a superset of the -1372 configurations in obs[i].idl for all i. -1373 obs : list -1374 list of Obs, e.g. [obs1, obs2, obs3]. -1375 all_configs : bool -1376 if True, the reweighted observables are normalized by the average of -1377 the reweighting factor on all configurations in weight.idl and not -1378 on the configurations in obs[i].idl. Default False. -1379 """ -1380 result = [] -1381 for i in range(len(obs)): -1382 if len(obs[i].cov_names): -1383 raise Exception('Error: Not possible to reweight an Obs that contains covobs!') -1384 if not set(obs[i].names).issubset(weight.names): -1385 raise Exception('Error: Ensembles do not fit') -1386 for name in obs[i].names: -1387 if not set(obs[i].idl[name]).issubset(weight.idl[name]): -1388 raise Exception('obs[%d] has to be defined on a subset of the configs in weight.idl[%s]!' % (i, name)) -1389 new_samples = [] -1390 w_deltas = {} -1391 for name in sorted(obs[i].names): -1392 w_deltas[name] = _reduce_deltas(weight.deltas[name], weight.idl[name], obs[i].idl[name]) -1393 new_samples.append((w_deltas[name] + weight.r_values[name]) * (obs[i].deltas[name] + obs[i].r_values[name])) -1394 tmp_obs = Obs(new_samples, sorted(obs[i].names), idl=[obs[i].idl[name] for name in sorted(obs[i].names)]) -1395 -1396 if kwargs.get('all_configs'): -1397 new_weight = weight -1398 else: -1399 new_weight = Obs([w_deltas[name] + weight.r_values[name] for name in sorted(obs[i].names)], sorted(obs[i].names), idl=[obs[i].idl[name] for name in sorted(obs[i].names)]) -1400 -1401 result.append(tmp_obs / new_weight) -1402 result[-1].reweighted = True -1403 -1404 return result +1335 return final_result +1336 +1337 +1338def _reduce_deltas(deltas, idx_old, idx_new): +1339 """Extract deltas defined on idx_old on all configs of idx_new. +1340 +1341 Assumes, that idx_old and idx_new are correctly defined idl, i.e., they +1342 are ordered in an ascending order. +1343 +1344 Parameters +1345 ---------- +1346 deltas : list +1347 List of fluctuations +1348 idx_old : list +1349 List or range of configs on which the deltas are defined +1350 idx_new : list +1351 List of configs for which we want to extract the deltas. +1352 Has to be a subset of idx_old. +1353 """ +1354 if not len(deltas) == len(idx_old): +1355 raise Exception('Length of deltas and idx_old have to be the same: %d != %d' % (len(deltas), len(idx_old))) +1356 if type(idx_old) is range and type(idx_new) is range: +1357 if idx_old == idx_new: +1358 return deltas +1359 if _check_lists_equal([idx_old, idx_new]): +1360 return deltas +1361 indices = np.intersect1d(idx_old, idx_new, assume_unique=True, return_indices=True)[1] +1362 if len(indices) < len(idx_new): +1363 raise Exception('Error in _reduce_deltas: Config of idx_new not in idx_old') +1364 return np.array(deltas)[indices] +1365 +1366 +1367def reweight(weight, obs, **kwargs): +1368 """Reweight a list of observables. +1369 +1370 Parameters +1371 ---------- +1372 weight : Obs +1373 Reweighting factor. An Observable that has to be defined on a superset of the +1374 configurations in obs[i].idl for all i. +1375 obs : list +1376 list of Obs, e.g. [obs1, obs2, obs3]. +1377 all_configs : bool +1378 if True, the reweighted observables are normalized by the average of +1379 the reweighting factor on all configurations in weight.idl and not +1380 on the configurations in obs[i].idl. Default False. +1381 """ +1382 result = [] +1383 for i in range(len(obs)): +1384 if len(obs[i].cov_names): +1385 raise Exception('Error: Not possible to reweight an Obs that contains covobs!') +1386 if not set(obs[i].names).issubset(weight.names): +1387 raise Exception('Error: Ensembles do not fit') +1388 for name in obs[i].names: +1389 if not set(obs[i].idl[name]).issubset(weight.idl[name]): +1390 raise Exception('obs[%d] has to be defined on a subset of the configs in weight.idl[%s]!' % (i, name)) +1391 new_samples = [] +1392 w_deltas = {} +1393 for name in sorted(obs[i].names): +1394 w_deltas[name] = _reduce_deltas(weight.deltas[name], weight.idl[name], obs[i].idl[name]) +1395 new_samples.append((w_deltas[name] + weight.r_values[name]) * (obs[i].deltas[name] + obs[i].r_values[name])) +1396 tmp_obs = Obs(new_samples, sorted(obs[i].names), idl=[obs[i].idl[name] for name in sorted(obs[i].names)]) +1397 +1398 if kwargs.get('all_configs'): +1399 new_weight = weight +1400 else: +1401 new_weight = Obs([w_deltas[name] + weight.r_values[name] for name in sorted(obs[i].names)], sorted(obs[i].names), idl=[obs[i].idl[name] for name in sorted(obs[i].names)]) +1402 +1403 result.append(tmp_obs / new_weight) +1404 result[-1].reweighted = True 1405 -1406 -1407def correlate(obs_a, obs_b): -1408 """Correlate two observables. -1409 -1410 Parameters -1411 ---------- -1412 obs_a : Obs -1413 First observable -1414 obs_b : Obs -1415 Second observable -1416 -1417 Notes -1418 ----- -1419 Keep in mind to only correlate primary observables which have not been reweighted -1420 yet. The reweighting has to be applied after correlating the observables. -1421 Currently only works if ensembles are identical (this is not strictly necessary). -1422 """ -1423 -1424 if sorted(obs_a.names) != sorted(obs_b.names): -1425 raise Exception(f"Ensembles do not fit {set(sorted(obs_a.names)) ^ set(sorted(obs_b.names))}") -1426 if len(obs_a.cov_names) or len(obs_b.cov_names): -1427 raise Exception('Error: Not possible to correlate Obs that contain covobs!') -1428 for name in obs_a.names: -1429 if obs_a.shape[name] != obs_b.shape[name]: -1430 raise Exception('Shapes of ensemble', name, 'do not fit') -1431 if obs_a.idl[name] != obs_b.idl[name]: -1432 raise Exception('idl of ensemble', name, 'do not fit') -1433 -1434 if obs_a.reweighted is True: -1435 warnings.warn("The first observable is already reweighted.", RuntimeWarning) -1436 if obs_b.reweighted is True: -1437 warnings.warn("The second observable is already reweighted.", RuntimeWarning) -1438 -1439 new_samples = [] -1440 new_idl = [] -1441 for name in sorted(obs_a.names): -1442 new_samples.append((obs_a.deltas[name] + obs_a.r_values[name]) * (obs_b.deltas[name] + obs_b.r_values[name])) -1443 new_idl.append(obs_a.idl[name]) -1444 -1445 o = Obs(new_samples, sorted(obs_a.names), idl=new_idl) -1446 o.reweighted = obs_a.reweighted or obs_b.reweighted -1447 return o -1448 -1449 -1450def covariance(obs, visualize=False, correlation=False, smooth=None, **kwargs): -1451 r'''Calculates the error covariance matrix of a set of observables. -1452 -1453 WARNING: This function should be used with care, especially for observables with support on multiple -1454 ensembles with differing autocorrelations. See the notes below for details. -1455 -1456 The gamma method has to be applied first to all observables. +1406 return result +1407 +1408 +1409def correlate(obs_a, obs_b): +1410 """Correlate two observables. +1411 +1412 Parameters +1413 ---------- +1414 obs_a : Obs +1415 First observable +1416 obs_b : Obs +1417 Second observable +1418 +1419 Notes +1420 ----- +1421 Keep in mind to only correlate primary observables which have not been reweighted +1422 yet. The reweighting has to be applied after correlating the observables. +1423 Currently only works if ensembles are identical (this is not strictly necessary). +1424 """ +1425 +1426 if sorted(obs_a.names) != sorted(obs_b.names): +1427 raise Exception(f"Ensembles do not fit {set(sorted(obs_a.names)) ^ set(sorted(obs_b.names))}") +1428 if len(obs_a.cov_names) or len(obs_b.cov_names): +1429 raise Exception('Error: Not possible to correlate Obs that contain covobs!') +1430 for name in obs_a.names: +1431 if obs_a.shape[name] != obs_b.shape[name]: +1432 raise Exception('Shapes of ensemble', name, 'do not fit') +1433 if obs_a.idl[name] != obs_b.idl[name]: +1434 raise Exception('idl of ensemble', name, 'do not fit') +1435 +1436 if obs_a.reweighted is True: +1437 warnings.warn("The first observable is already reweighted.", RuntimeWarning) +1438 if obs_b.reweighted is True: +1439 warnings.warn("The second observable is already reweighted.", RuntimeWarning) +1440 +1441 new_samples = [] +1442 new_idl = [] +1443 for name in sorted(obs_a.names): +1444 new_samples.append((obs_a.deltas[name] + obs_a.r_values[name]) * (obs_b.deltas[name] + obs_b.r_values[name])) +1445 new_idl.append(obs_a.idl[name]) +1446 +1447 o = Obs(new_samples, sorted(obs_a.names), idl=new_idl) +1448 o.reweighted = obs_a.reweighted or obs_b.reweighted +1449 return o +1450 +1451 +1452def covariance(obs, visualize=False, correlation=False, smooth=None, **kwargs): +1453 r'''Calculates the error covariance matrix of a set of observables. +1454 +1455 WARNING: This function should be used with care, especially for observables with support on multiple +1456 ensembles with differing autocorrelations. See the notes below for details. 1457 -1458 Parameters -1459 ---------- -1460 obs : list or numpy.ndarray -1461 List or one dimensional array of Obs -1462 visualize : bool -1463 If True plots the corresponding normalized correlation matrix (default False). -1464 correlation : bool -1465 If True the correlation matrix instead of the error covariance matrix is returned (default False). -1466 smooth : None or int -1467 If smooth is an integer 'E' between 2 and the dimension of the matrix minus 1 the eigenvalue -1468 smoothing procedure of hep-lat/9412087 is applied to the correlation matrix which leaves the -1469 largest E eigenvalues essentially unchanged and smoothes the smaller eigenvalues to avoid extremely -1470 small ones. -1471 -1472 Notes -1473 ----- -1474 The error covariance is defined such that it agrees with the squared standard error for two identical observables -1475 $$\operatorname{cov}(a,a)=\sum_{s=1}^N\delta_a^s\delta_a^s/N^2=\Gamma_{aa}(0)/N=\operatorname{var}(a)/N=\sigma_a^2$$ -1476 in the absence of autocorrelation. -1477 The error covariance is estimated by calculating the correlation matrix assuming no autocorrelation and then rescaling the correlation matrix by the full errors including the previous gamma method estimate for the autocorrelation of the observables. The covariance at windowsize 0 is guaranteed to be positive semi-definite -1478 $$\sum_{i,j}v_i\Gamma_{ij}(0)v_j=\frac{1}{N}\sum_{s=1}^N\sum_{i,j}v_i\delta_i^s\delta_j^s v_j=\frac{1}{N}\sum_{s=1}^N\sum_{i}|v_i\delta_i^s|^2\geq 0\,,$$ for every $v\in\mathbb{R}^M$, while such an identity does not hold for larger windows/lags. -1479 For observables defined on a single ensemble our approximation is equivalent to assuming that the integrated autocorrelation time of an off-diagonal element is equal to the geometric mean of the integrated autocorrelation times of the corresponding diagonal elements. -1480 $$\tau_{\mathrm{int}, ij}=\sqrt{\tau_{\mathrm{int}, i}\times \tau_{\mathrm{int}, j}}$$ -1481 This construction ensures that the estimated covariance matrix is positive semi-definite (up to numerical rounding errors). -1482 ''' -1483 -1484 length = len(obs) +1458 The gamma method has to be applied first to all observables. +1459 +1460 Parameters +1461 ---------- +1462 obs : list or numpy.ndarray +1463 List or one dimensional array of Obs +1464 visualize : bool +1465 If True plots the corresponding normalized correlation matrix (default False). +1466 correlation : bool +1467 If True the correlation matrix instead of the error covariance matrix is returned (default False). +1468 smooth : None or int +1469 If smooth is an integer 'E' between 2 and the dimension of the matrix minus 1 the eigenvalue +1470 smoothing procedure of hep-lat/9412087 is applied to the correlation matrix which leaves the +1471 largest E eigenvalues essentially unchanged and smoothes the smaller eigenvalues to avoid extremely +1472 small ones. +1473 +1474 Notes +1475 ----- +1476 The error covariance is defined such that it agrees with the squared standard error for two identical observables +1477 $$\operatorname{cov}(a,a)=\sum_{s=1}^N\delta_a^s\delta_a^s/N^2=\Gamma_{aa}(0)/N=\operatorname{var}(a)/N=\sigma_a^2$$ +1478 in the absence of autocorrelation. +1479 The error covariance is estimated by calculating the correlation matrix assuming no autocorrelation and then rescaling the correlation matrix by the full errors including the previous gamma method estimate for the autocorrelation of the observables. The covariance at windowsize 0 is guaranteed to be positive semi-definite +1480 $$\sum_{i,j}v_i\Gamma_{ij}(0)v_j=\frac{1}{N}\sum_{s=1}^N\sum_{i,j}v_i\delta_i^s\delta_j^s v_j=\frac{1}{N}\sum_{s=1}^N\sum_{i}|v_i\delta_i^s|^2\geq 0\,,$$ for every $v\in\mathbb{R}^M$, while such an identity does not hold for larger windows/lags. +1481 For observables defined on a single ensemble our approximation is equivalent to assuming that the integrated autocorrelation time of an off-diagonal element is equal to the geometric mean of the integrated autocorrelation times of the corresponding diagonal elements. +1482 $$\tau_{\mathrm{int}, ij}=\sqrt{\tau_{\mathrm{int}, i}\times \tau_{\mathrm{int}, j}}$$ +1483 This construction ensures that the estimated covariance matrix is positive semi-definite (up to numerical rounding errors). +1484 ''' 1485 -1486 max_samples = np.max([o.N for o in obs]) -1487 if max_samples <= length and not [item for sublist in [o.cov_names for o in obs] for item in sublist]: -1488 warnings.warn(f"The dimension of the covariance matrix ({length}) is larger or equal to the number of samples ({max_samples}). This will result in a rank deficient matrix.", RuntimeWarning) -1489 -1490 cov = np.zeros((length, length)) -1491 for i in range(length): -1492 for j in range(i, length): -1493 cov[i, j] = _covariance_element(obs[i], obs[j]) -1494 cov = cov + cov.T - np.diag(np.diag(cov)) -1495 -1496 corr = np.diag(1 / np.sqrt(np.diag(cov))) @ cov @ np.diag(1 / np.sqrt(np.diag(cov))) +1486 length = len(obs) +1487 +1488 max_samples = np.max([o.N for o in obs]) +1489 if max_samples <= length and not [item for sublist in [o.cov_names for o in obs] for item in sublist]: +1490 warnings.warn(f"The dimension of the covariance matrix ({length}) is larger or equal to the number of samples ({max_samples}). This will result in a rank deficient matrix.", RuntimeWarning) +1491 +1492 cov = np.zeros((length, length)) +1493 for i in range(length): +1494 for j in range(i, length): +1495 cov[i, j] = _covariance_element(obs[i], obs[j]) +1496 cov = cov + cov.T - np.diag(np.diag(cov)) 1497 -1498 if isinstance(smooth, int): -1499 corr = _smooth_eigenvalues(corr, smooth) -1500 -1501 if visualize: -1502 plt.matshow(corr, vmin=-1, vmax=1) -1503 plt.set_cmap('RdBu') -1504 plt.colorbar() -1505 plt.draw() -1506 -1507 if correlation is True: -1508 return corr -1509 -1510 errors = [o.dvalue for o in obs] -1511 cov = np.diag(errors) @ corr @ np.diag(errors) -1512 -1513 eigenvalues = np.linalg.eigh(cov)[0] -1514 if not np.all(eigenvalues >= 0): -1515 warnings.warn("Covariance matrix is not positive semi-definite (Eigenvalues: " + str(eigenvalues) + ")", RuntimeWarning) -1516 -1517 return cov +1498 corr = np.diag(1 / np.sqrt(np.diag(cov))) @ cov @ np.diag(1 / np.sqrt(np.diag(cov))) +1499 +1500 if isinstance(smooth, int): +1501 corr = _smooth_eigenvalues(corr, smooth) +1502 +1503 if visualize: +1504 plt.matshow(corr, vmin=-1, vmax=1) +1505 plt.set_cmap('RdBu') +1506 plt.colorbar() +1507 plt.draw() +1508 +1509 if correlation is True: +1510 return corr +1511 +1512 errors = [o.dvalue for o in obs] +1513 cov = np.diag(errors) @ corr @ np.diag(errors) +1514 +1515 eigenvalues = np.linalg.eigh(cov)[0] +1516 if not np.all(eigenvalues >= 0): +1517 warnings.warn("Covariance matrix is not positive semi-definite (Eigenvalues: " + str(eigenvalues) + ")", RuntimeWarning) 1518 -1519 -1520def _smooth_eigenvalues(corr, E): -1521 """Eigenvalue smoothing as described in hep-lat/9412087 -1522 -1523 corr : np.ndarray -1524 correlation matrix -1525 E : integer -1526 Number of eigenvalues to be left substantially unchanged -1527 """ -1528 if not (2 < E < corr.shape[0] - 1): -1529 raise Exception(f"'E' has to be between 2 and the dimension of the correlation matrix minus 1 ({corr.shape[0] - 1}).") -1530 vals, vec = np.linalg.eigh(corr) -1531 lambda_min = np.mean(vals[:-E]) -1532 vals[vals < lambda_min] = lambda_min -1533 vals /= np.mean(vals) -1534 return vec @ np.diag(vals) @ vec.T -1535 -1536 -1537def _covariance_element(obs1, obs2): -1538 """Estimates the covariance of two Obs objects, neglecting autocorrelations.""" -1539 -1540 def calc_gamma(deltas1, deltas2, idx1, idx2, new_idx): -1541 deltas1 = _reduce_deltas(deltas1, idx1, new_idx) -1542 deltas2 = _reduce_deltas(deltas2, idx2, new_idx) -1543 return np.sum(deltas1 * deltas2) -1544 -1545 if set(obs1.names).isdisjoint(set(obs2.names)): -1546 return 0.0 -1547 -1548 if not hasattr(obs1, 'e_dvalue') or not hasattr(obs2, 'e_dvalue'): -1549 raise Exception('The gamma method has to be applied to both Obs first.') -1550 -1551 dvalue = 0.0 +1519 return cov +1520 +1521 +1522def _smooth_eigenvalues(corr, E): +1523 """Eigenvalue smoothing as described in hep-lat/9412087 +1524 +1525 corr : np.ndarray +1526 correlation matrix +1527 E : integer +1528 Number of eigenvalues to be left substantially unchanged +1529 """ +1530 if not (2 < E < corr.shape[0] - 1): +1531 raise Exception(f"'E' has to be between 2 and the dimension of the correlation matrix minus 1 ({corr.shape[0] - 1}).") +1532 vals, vec = np.linalg.eigh(corr) +1533 lambda_min = np.mean(vals[:-E]) +1534 vals[vals < lambda_min] = lambda_min +1535 vals /= np.mean(vals) +1536 return vec @ np.diag(vals) @ vec.T +1537 +1538 +1539def _covariance_element(obs1, obs2): +1540 """Estimates the covariance of two Obs objects, neglecting autocorrelations.""" +1541 +1542 def calc_gamma(deltas1, deltas2, idx1, idx2, new_idx): +1543 deltas1 = _reduce_deltas(deltas1, idx1, new_idx) +1544 deltas2 = _reduce_deltas(deltas2, idx2, new_idx) +1545 return np.sum(deltas1 * deltas2) +1546 +1547 if set(obs1.names).isdisjoint(set(obs2.names)): +1548 return 0.0 +1549 +1550 if not hasattr(obs1, 'e_dvalue') or not hasattr(obs2, 'e_dvalue'): +1551 raise Exception('The gamma method has to be applied to both Obs first.') 1552 -1553 for e_name in obs1.mc_names: +1553 dvalue = 0.0 1554 -1555 if e_name not in obs2.mc_names: -1556 continue -1557 -1558 idl_d = {} -1559 for r_name in obs1.e_content[e_name]: -1560 if r_name not in obs2.e_content[e_name]: -1561 continue -1562 idl_d[r_name] = _intersection_idx([obs1.idl[r_name], obs2.idl[r_name]]) -1563 -1564 gamma = 0.0 +1555 for e_name in obs1.mc_names: +1556 +1557 if e_name not in obs2.mc_names: +1558 continue +1559 +1560 idl_d = {} +1561 for r_name in obs1.e_content[e_name]: +1562 if r_name not in obs2.e_content[e_name]: +1563 continue +1564 idl_d[r_name] = _intersection_idx([obs1.idl[r_name], obs2.idl[r_name]]) 1565 -1566 for r_name in obs1.e_content[e_name]: -1567 if r_name not in obs2.e_content[e_name]: -1568 continue -1569 if len(idl_d[r_name]) == 0: +1566 gamma = 0.0 +1567 +1568 for r_name in obs1.e_content[e_name]: +1569 if r_name not in obs2.e_content[e_name]: 1570 continue -1571 gamma += calc_gamma(obs1.deltas[r_name], obs2.deltas[r_name], obs1.idl[r_name], obs2.idl[r_name], idl_d[r_name]) -1572 -1573 if gamma == 0.0: -1574 continue -1575 -1576 gamma_div = 0.0 -1577 for r_name in obs1.e_content[e_name]: -1578 if r_name not in obs2.e_content[e_name]: -1579 continue -1580 if len(idl_d[r_name]) == 0: +1571 if len(idl_d[r_name]) == 0: +1572 continue +1573 gamma += calc_gamma(obs1.deltas[r_name], obs2.deltas[r_name], obs1.idl[r_name], obs2.idl[r_name], idl_d[r_name]) +1574 +1575 if gamma == 0.0: +1576 continue +1577 +1578 gamma_div = 0.0 +1579 for r_name in obs1.e_content[e_name]: +1580 if r_name not in obs2.e_content[e_name]: 1581 continue -1582 gamma_div += np.sqrt(calc_gamma(obs1.deltas[r_name], obs1.deltas[r_name], obs1.idl[r_name], obs1.idl[r_name], idl_d[r_name]) * calc_gamma(obs2.deltas[r_name], obs2.deltas[r_name], obs2.idl[r_name], obs2.idl[r_name], idl_d[r_name])) -1583 gamma /= gamma_div -1584 -1585 dvalue += gamma +1582 if len(idl_d[r_name]) == 0: +1583 continue +1584 gamma_div += np.sqrt(calc_gamma(obs1.deltas[r_name], obs1.deltas[r_name], obs1.idl[r_name], obs1.idl[r_name], idl_d[r_name]) * calc_gamma(obs2.deltas[r_name], obs2.deltas[r_name], obs2.idl[r_name], obs2.idl[r_name], idl_d[r_name])) +1585 gamma /= gamma_div 1586 -1587 for e_name in obs1.cov_names: +1587 dvalue += gamma 1588 -1589 if e_name not in obs2.cov_names: -1590 continue -1591 -1592 dvalue += np.dot(np.transpose(obs1.covobs[e_name].grad), np.dot(obs1.covobs[e_name].cov, obs2.covobs[e_name].grad)).item() +1589 for e_name in obs1.cov_names: +1590 +1591 if e_name not in obs2.cov_names: +1592 continue 1593 -1594 return dvalue +1594 dvalue += np.dot(np.transpose(obs1.covobs[e_name].grad), np.dot(obs1.covobs[e_name].cov, obs2.covobs[e_name].grad)).item() 1595 -1596 -1597def import_jackknife(jacks, name, idl=None): -1598 """Imports jackknife samples and returns an Obs -1599 -1600 Parameters -1601 ---------- -1602 jacks : numpy.ndarray -1603 numpy array containing the mean value as zeroth entry and -1604 the N jackknife samples as first to Nth entry. -1605 name : str -1606 name of the ensemble the samples are defined on. -1607 """ -1608 length = len(jacks) - 1 -1609 prj = (np.ones((length, length)) - (length - 1) * np.identity(length)) -1610 samples = jacks[1:] @ prj -1611 mean = np.mean(samples) -1612 new_obs = Obs([samples - mean], [name], idl=idl, means=[mean]) -1613 new_obs._value = jacks[0] -1614 return new_obs -1615 -1616 -1617def import_bootstrap(boots, name, random_numbers): -1618 """Imports bootstrap samples and returns an Obs -1619 -1620 Parameters -1621 ---------- -1622 boots : numpy.ndarray -1623 numpy array containing the mean value as zeroth entry and -1624 the N bootstrap samples as first to Nth entry. -1625 name : str -1626 name of the ensemble the samples are defined on. -1627 random_numbers : np.ndarray -1628 Array of shape (samples, length) containing the random numbers to generate the bootstrap samples, -1629 where samples is the number of bootstrap samples and length is the length of the original Monte Carlo -1630 chain to be reconstructed. -1631 """ -1632 samples, length = random_numbers.shape -1633 if samples != len(boots) - 1: -1634 raise ValueError("Random numbers do not have the correct shape.") -1635 -1636 if samples < length: -1637 raise ValueError("Obs can't be reconstructed if there are fewer bootstrap samples than Monte Carlo data points.") -1638 -1639 proj = np.vstack([np.bincount(o, minlength=length) for o in random_numbers]) / length +1596 return dvalue +1597 +1598 +1599def import_jackknife(jacks, name, idl=None): +1600 """Imports jackknife samples and returns an Obs +1601 +1602 Parameters +1603 ---------- +1604 jacks : numpy.ndarray +1605 numpy array containing the mean value as zeroth entry and +1606 the N jackknife samples as first to Nth entry. +1607 name : str +1608 name of the ensemble the samples are defined on. +1609 """ +1610 length = len(jacks) - 1 +1611 prj = (np.ones((length, length)) - (length - 1) * np.identity(length)) +1612 samples = jacks[1:] @ prj +1613 mean = np.mean(samples) +1614 new_obs = Obs([samples - mean], [name], idl=idl, means=[mean]) +1615 new_obs._value = jacks[0] +1616 return new_obs +1617 +1618 +1619def import_bootstrap(boots, name, random_numbers): +1620 """Imports bootstrap samples and returns an Obs +1621 +1622 Parameters +1623 ---------- +1624 boots : numpy.ndarray +1625 numpy array containing the mean value as zeroth entry and +1626 the N bootstrap samples as first to Nth entry. +1627 name : str +1628 name of the ensemble the samples are defined on. +1629 random_numbers : np.ndarray +1630 Array of shape (samples, length) containing the random numbers to generate the bootstrap samples, +1631 where samples is the number of bootstrap samples and length is the length of the original Monte Carlo +1632 chain to be reconstructed. +1633 """ +1634 samples, length = random_numbers.shape +1635 if samples != len(boots) - 1: +1636 raise ValueError("Random numbers do not have the correct shape.") +1637 +1638 if samples < length: +1639 raise ValueError("Obs can't be reconstructed if there are fewer bootstrap samples than Monte Carlo data points.") 1640 -1641 samples = scipy.linalg.lstsq(proj, boots[1:])[0] -1642 ret = Obs([samples], [name]) -1643 ret._value = boots[0] -1644 return ret -1645 -1646 -1647def merge_obs(list_of_obs): -1648 """Combine all observables in list_of_obs into one new observable -1649 -1650 Parameters -1651 ---------- -1652 list_of_obs : list -1653 list of the Obs object to be combined -1654 -1655 Notes -1656 ----- -1657 It is not possible to combine obs which are based on the same replicum -1658 """ -1659 replist = [item for obs in list_of_obs for item in obs.names] -1660 if (len(replist) == len(set(replist))) is False: -1661 raise Exception('list_of_obs contains duplicate replica: %s' % (str(replist))) -1662 if any([len(o.cov_names) for o in list_of_obs]): -1663 raise Exception('Not possible to merge data that contains covobs!') -1664 new_dict = {} -1665 idl_dict = {} -1666 for o in list_of_obs: -1667 new_dict.update({key: o.deltas.get(key, 0) + o.r_values.get(key, 0) -1668 for key in set(o.deltas) | set(o.r_values)}) -1669 idl_dict.update({key: o.idl.get(key, 0) for key in set(o.deltas)}) -1670 -1671 names = sorted(new_dict.keys()) -1672 o = Obs([new_dict[name] for name in names], names, idl=[idl_dict[name] for name in names]) -1673 o.reweighted = np.max([oi.reweighted for oi in list_of_obs]) -1674 return o -1675 -1676 -1677def cov_Obs(means, cov, name, grad=None): -1678 """Create an Obs based on mean(s) and a covariance matrix -1679 -1680 Parameters -1681 ---------- -1682 mean : list of floats or float -1683 N mean value(s) of the new Obs -1684 cov : list or array -1685 2d (NxN) Covariance matrix, 1d diagonal entries or 0d covariance -1686 name : str -1687 identifier for the covariance matrix -1688 grad : list or array -1689 Gradient of the Covobs wrt. the means belonging to cov. -1690 """ -1691 -1692 def covobs_to_obs(co): -1693 """Make an Obs out of a Covobs -1694 -1695 Parameters -1696 ---------- -1697 co : Covobs -1698 Covobs to be embedded into the Obs -1699 """ -1700 o = Obs([], [], means=[]) -1701 o._value = co.value -1702 o.names.append(co.name) -1703 o._covobs[co.name] = co -1704 o._dvalue = np.sqrt(co.errsq()) -1705 return o -1706 -1707 ol = [] -1708 if isinstance(means, (float, int)): -1709 means = [means] -1710 -1711 for i in range(len(means)): -1712 ol.append(covobs_to_obs(Covobs(means[i], cov, name, pos=i, grad=grad))) -1713 if ol[0].covobs[name].N != len(means): -1714 raise Exception('You have to provide %d mean values!' % (ol[0].N)) -1715 if len(ol) == 1: -1716 return ol[0] -1717 return ol -1718 -1719 -1720def _determine_gap(o, e_content, e_name): -1721 gaps = [] -1722 for r_name in e_content[e_name]: -1723 if isinstance(o.idl[r_name], range): -1724 gaps.append(o.idl[r_name].step) -1725 else: -1726 gaps.append(np.min(np.diff(o.idl[r_name]))) -1727 -1728 gap = min(gaps) -1729 if not np.all([gi % gap == 0 for gi in gaps]): -1730 raise Exception(f"Replica for ensemble {e_name} do not have a common spacing.", gaps) -1731 -1732 return gap +1641 proj = np.vstack([np.bincount(o, minlength=length) for o in random_numbers]) / length +1642 +1643 samples = scipy.linalg.lstsq(proj, boots[1:])[0] +1644 ret = Obs([samples], [name]) +1645 ret._value = boots[0] +1646 return ret +1647 +1648 +1649def merge_obs(list_of_obs): +1650 """Combine all observables in list_of_obs into one new observable +1651 +1652 Parameters +1653 ---------- +1654 list_of_obs : list +1655 list of the Obs object to be combined +1656 +1657 Notes +1658 ----- +1659 It is not possible to combine obs which are based on the same replicum +1660 """ +1661 replist = [item for obs in list_of_obs for item in obs.names] +1662 if (len(replist) == len(set(replist))) is False: +1663 raise Exception('list_of_obs contains duplicate replica: %s' % (str(replist))) +1664 if any([len(o.cov_names) for o in list_of_obs]): +1665 raise Exception('Not possible to merge data that contains covobs!') +1666 new_dict = {} +1667 idl_dict = {} +1668 for o in list_of_obs: +1669 new_dict.update({key: o.deltas.get(key, 0) + o.r_values.get(key, 0) +1670 for key in set(o.deltas) | set(o.r_values)}) +1671 idl_dict.update({key: o.idl.get(key, 0) for key in set(o.deltas)}) +1672 +1673 names = sorted(new_dict.keys()) +1674 o = Obs([new_dict[name] for name in names], names, idl=[idl_dict[name] for name in names]) +1675 o.reweighted = np.max([oi.reweighted for oi in list_of_obs]) +1676 return o +1677 +1678 +1679def cov_Obs(means, cov, name, grad=None): +1680 """Create an Obs based on mean(s) and a covariance matrix +1681 +1682 Parameters +1683 ---------- +1684 mean : list of floats or float +1685 N mean value(s) of the new Obs +1686 cov : list or array +1687 2d (NxN) Covariance matrix, 1d diagonal entries or 0d covariance +1688 name : str +1689 identifier for the covariance matrix +1690 grad : list or array +1691 Gradient of the Covobs wrt. the means belonging to cov. +1692 """ +1693 +1694 def covobs_to_obs(co): +1695 """Make an Obs out of a Covobs +1696 +1697 Parameters +1698 ---------- +1699 co : Covobs +1700 Covobs to be embedded into the Obs +1701 """ +1702 o = Obs([], [], means=[]) +1703 o._value = co.value +1704 o.names.append(co.name) +1705 o._covobs[co.name] = co +1706 o._dvalue = np.sqrt(co.errsq()) +1707 return o +1708 +1709 ol = [] +1710 if isinstance(means, (float, int)): +1711 means = [means] +1712 +1713 for i in range(len(means)): +1714 ol.append(covobs_to_obs(Covobs(means[i], cov, name, pos=i, grad=grad))) +1715 if ol[0].covobs[name].N != len(means): +1716 raise Exception('You have to provide %d mean values!' % (ol[0].N)) +1717 if len(ol) == 1: +1718 return ol[0] +1719 return ol +1720 +1721 +1722def _determine_gap(o, e_content, e_name): +1723 gaps = [] +1724 for r_name in e_content[e_name]: +1725 if isinstance(o.idl[r_name], range): +1726 gaps.append(o.idl[r_name].step) +1727 else: +1728 gaps.append(np.min(np.diff(o.idl[r_name]))) +1729 +1730 gap = min(gaps) +1731 if not np.all([gi % gap == 0 for gi in gaps]): +1732 raise Exception(f"Replica for ensemble {e_name} do not have a common spacing.", gaps) 1733 -1734 -1735def _check_lists_equal(idl): -1736 ''' -1737 Use groupby to efficiently check whether all elements of idl are identical. -1738 Returns True if all elements are equal, otherwise False. -1739 -1740 Parameters -1741 ---------- -1742 idl : list of lists, ranges or np.ndarrays -1743 ''' -1744 g = groupby([np.nditer(el) if isinstance(el, np.ndarray) else el for el in idl]) -1745 if next(g, True) and not next(g, False): -1746 return True -1747 return False +1734 return gap +1735 +1736 +1737def _check_lists_equal(idl): +1738 ''' +1739 Use groupby to efficiently check whether all elements of idl are identical. +1740 Returns True if all elements are equal, otherwise False. +1741 +1742 Parameters +1743 ---------- +1744 idl : list of lists, ranges or np.ndarrays +1745 ''' +1746 g = groupby([np.nditer(el) if isinstance(el, np.ndarray) else el for el in idl]) +1747 if next(g, True) and not next(g, False): +1748 return True +1749 return False @@ -2179,814 +2181,816 @@ 105 elif isinstance(idx, (list, np.ndarray)): 106 dc = np.unique(np.diff(idx)) 107 if np.any(dc < 0): -108 raise ValueError("Unsorted idx for idl[%s]" % (name)) -109 if len(dc) == 1: -110 self.idl[name] = range(idx[0], idx[-1] + dc[0], dc[0]) -111 else: -112 self.idl[name] = list(idx) -113 else: -114 raise TypeError('incompatible type for idl[%s].' % (name)) -115 else: -116 for name, sample in sorted(zip(names, samples)): -117 self.idl[name] = range(1, len(sample) + 1) -118 -119 if kwargs.get("means") is not None: -120 for name, sample, mean in sorted(zip(names, samples, kwargs.get("means"))): -121 self.shape[name] = len(self.idl[name]) -122 self.N += self.shape[name] -123 self.r_values[name] = mean -124 self.deltas[name] = sample -125 else: -126 for name, sample in sorted(zip(names, samples)): -127 self.shape[name] = len(self.idl[name]) -128 self.N += self.shape[name] -129 if len(sample) != self.shape[name]: -130 raise ValueError('Incompatible samples and idx for %s: %d vs. %d' % (name, len(sample), self.shape[name])) -131 self.r_values[name] = np.mean(sample) -132 self.deltas[name] = sample - self.r_values[name] -133 self._value += self.shape[name] * self.r_values[name] -134 self._value /= self.N -135 -136 self._dvalue = 0.0 -137 self.ddvalue = 0.0 -138 self.reweighted = False -139 -140 self.tag = None +108 raise ValueError("Unsorted idx for idl[%s] at position %s" % (name, ' '.join(['%s' % (pos + 1) for pos in np.where(np.diff(idx) < 0)[0]]))) +109 elif np.any(dc == 0): +110 raise ValueError("Duplicate entries in idx for idl[%s] at position %s" % (name, ' '.join(['%s' % (pos + 1) for pos in np.where(np.diff(idx) == 0)[0]]))) +111 if len(dc) == 1: +112 self.idl[name] = range(idx[0], idx[-1] + dc[0], dc[0]) +113 else: +114 self.idl[name] = list(idx) +115 else: +116 raise TypeError('incompatible type for idl[%s].' % (name)) +117 else: +118 for name, sample in sorted(zip(names, samples)): +119 self.idl[name] = range(1, len(sample) + 1) +120 +121 if kwargs.get("means") is not None: +122 for name, sample, mean in sorted(zip(names, samples, kwargs.get("means"))): +123 self.shape[name] = len(self.idl[name]) +124 self.N += self.shape[name] +125 self.r_values[name] = mean +126 self.deltas[name] = sample +127 else: +128 for name, sample in sorted(zip(names, samples)): +129 self.shape[name] = len(self.idl[name]) +130 self.N += self.shape[name] +131 if len(sample) != self.shape[name]: +132 raise ValueError('Incompatible samples and idx for %s: %d vs. %d' % (name, len(sample), self.shape[name])) +133 self.r_values[name] = np.mean(sample) +134 self.deltas[name] = sample - self.r_values[name] +135 self._value += self.shape[name] * self.r_values[name] +136 self._value /= self.N +137 +138 self._dvalue = 0.0 +139 self.ddvalue = 0.0 +140 self.reweighted = False 141 -142 @property -143 def value(self): -144 return self._value -145 -146 @property -147 def dvalue(self): -148 return self._dvalue -149 -150 @property -151 def e_names(self): -152 return sorted(set([o.split('|')[0] for o in self.names])) -153 -154 @property -155 def cov_names(self): -156 return sorted(set([o for o in self.covobs.keys()])) -157 -158 @property -159 def mc_names(self): -160 return sorted(set([o.split('|')[0] for o in self.names if o not in self.cov_names])) -161 -162 @property -163 def e_content(self): -164 res = {} -165 for e, e_name in enumerate(self.e_names): -166 res[e_name] = sorted(filter(lambda x: x.startswith(e_name + '|'), self.names)) -167 if e_name in self.names: -168 res[e_name].append(e_name) -169 return res -170 -171 @property -172 def covobs(self): -173 return self._covobs -174 -175 def gamma_method(self, **kwargs): -176 """Estimate the error and related properties of the Obs. -177 -178 Parameters -179 ---------- -180 S : float -181 specifies a custom value for the parameter S (default 2.0). -182 If set to 0 it is assumed that the data exhibits no -183 autocorrelation. In this case the error estimates coincides -184 with the sample standard error. -185 tau_exp : float -186 positive value triggers the critical slowing down analysis -187 (default 0.0). -188 N_sigma : float -189 number of standard deviations from zero until the tail is -190 attached to the autocorrelation function (default 1). -191 fft : bool -192 determines whether the fft algorithm is used for the computation -193 of the autocorrelation function (default True) -194 """ -195 -196 e_content = self.e_content -197 self.e_dvalue = {} -198 self.e_ddvalue = {} -199 self.e_tauint = {} -200 self.e_dtauint = {} -201 self.e_windowsize = {} -202 self.e_n_tauint = {} -203 self.e_n_dtauint = {} -204 e_gamma = {} -205 self.e_rho = {} -206 self.e_drho = {} -207 self._dvalue = 0 -208 self.ddvalue = 0 -209 -210 self.S = {} -211 self.tau_exp = {} -212 self.N_sigma = {} -213 -214 if kwargs.get('fft') is False: -215 fft = False -216 else: -217 fft = True -218 -219 def _parse_kwarg(kwarg_name): -220 if kwarg_name in kwargs: -221 tmp = kwargs.get(kwarg_name) -222 if isinstance(tmp, (int, float)): -223 if tmp < 0: -224 raise Exception(kwarg_name + ' has to be larger or equal to 0.') -225 for e, e_name in enumerate(self.e_names): -226 getattr(self, kwarg_name)[e_name] = tmp -227 else: -228 raise TypeError(kwarg_name + ' is not in proper format.') -229 else: -230 for e, e_name in enumerate(self.e_names): -231 if e_name in getattr(Obs, kwarg_name + '_dict'): -232 getattr(self, kwarg_name)[e_name] = getattr(Obs, kwarg_name + '_dict')[e_name] -233 else: -234 getattr(self, kwarg_name)[e_name] = getattr(Obs, kwarg_name + '_global') -235 -236 _parse_kwarg('S') -237 _parse_kwarg('tau_exp') -238 _parse_kwarg('N_sigma') -239 -240 for e, e_name in enumerate(self.mc_names): -241 gapsize = _determine_gap(self, e_content, e_name) -242 -243 r_length = [] -244 for r_name in e_content[e_name]: -245 if isinstance(self.idl[r_name], range): -246 r_length.append(len(self.idl[r_name]) * self.idl[r_name].step // gapsize) -247 else: -248 r_length.append((self.idl[r_name][-1] - self.idl[r_name][0] + 1) // gapsize) -249 -250 e_N = np.sum([self.shape[r_name] for r_name in e_content[e_name]]) -251 w_max = max(r_length) // 2 -252 e_gamma[e_name] = np.zeros(w_max) -253 self.e_rho[e_name] = np.zeros(w_max) -254 self.e_drho[e_name] = np.zeros(w_max) -255 -256 for r_name in e_content[e_name]: -257 e_gamma[e_name] += self._calc_gamma(self.deltas[r_name], self.idl[r_name], self.shape[r_name], w_max, fft, gapsize) -258 -259 gamma_div = np.zeros(w_max) -260 for r_name in e_content[e_name]: -261 gamma_div += self._calc_gamma(np.ones((self.shape[r_name])), self.idl[r_name], self.shape[r_name], w_max, fft, gapsize) -262 gamma_div[gamma_div < 1] = 1.0 -263 e_gamma[e_name] /= gamma_div[:w_max] -264 -265 if np.abs(e_gamma[e_name][0]) < 10 * np.finfo(float).tiny: # Prevent division by zero -266 self.e_tauint[e_name] = 0.5 -267 self.e_dtauint[e_name] = 0.0 -268 self.e_dvalue[e_name] = 0.0 -269 self.e_ddvalue[e_name] = 0.0 -270 self.e_windowsize[e_name] = 0 -271 continue -272 -273 self.e_rho[e_name] = e_gamma[e_name][:w_max] / e_gamma[e_name][0] -274 self.e_n_tauint[e_name] = np.cumsum(np.concatenate(([0.5], self.e_rho[e_name][1:]))) -275 # Make sure no entry of tauint is smaller than 0.5 -276 self.e_n_tauint[e_name][self.e_n_tauint[e_name] <= 0.5] = 0.5 + np.finfo(np.float64).eps -277 # hep-lat/0306017 eq. (42) -278 self.e_n_dtauint[e_name] = self.e_n_tauint[e_name] * 2 * np.sqrt(np.abs(np.arange(w_max) + 0.5 - self.e_n_tauint[e_name]) / e_N) -279 self.e_n_dtauint[e_name][0] = 0.0 -280 -281 def _compute_drho(i): -282 tmp = (self.e_rho[e_name][i + 1:w_max] -283 + np.concatenate([self.e_rho[e_name][i - 1:None if i - (w_max - 1) // 2 <= 0 else (2 * i - (2 * w_max) // 2):-1], -284 self.e_rho[e_name][1:max(1, w_max - 2 * i)]]) -285 - 2 * self.e_rho[e_name][i] * self.e_rho[e_name][1:w_max - i]) -286 self.e_drho[e_name][i] = np.sqrt(np.sum(tmp ** 2) / e_N) -287 -288 if self.tau_exp[e_name] > 0: -289 _compute_drho(1) -290 texp = self.tau_exp[e_name] -291 # Critical slowing down analysis -292 if w_max // 2 <= 1: -293 raise Exception("Need at least 8 samples for tau_exp error analysis") -294 for n in range(1, w_max // 2): -295 _compute_drho(n + 1) -296 if (self.e_rho[e_name][n] - self.N_sigma[e_name] * self.e_drho[e_name][n]) < 0 or n >= w_max // 2 - 2: -297 # Bias correction hep-lat/0306017 eq. (49) included -298 self.e_tauint[e_name] = self.e_n_tauint[e_name][n] * (1 + (2 * n + 1) / e_N) / (1 + 1 / e_N) + texp * np.abs(self.e_rho[e_name][n + 1]) # The absolute makes sure, that the tail contribution is always positive -299 self.e_dtauint[e_name] = np.sqrt(self.e_n_dtauint[e_name][n] ** 2 + texp ** 2 * self.e_drho[e_name][n + 1] ** 2) -300 # Error of tau_exp neglected so far, missing term: self.e_rho[e_name][n + 1] ** 2 * d_tau_exp ** 2 -301 self.e_dvalue[e_name] = np.sqrt(2 * self.e_tauint[e_name] * e_gamma[e_name][0] * (1 + 1 / e_N) / e_N) -302 self.e_ddvalue[e_name] = self.e_dvalue[e_name] * np.sqrt((n + 0.5) / e_N) -303 self.e_windowsize[e_name] = n -304 break -305 else: -306 if self.S[e_name] == 0.0: -307 self.e_tauint[e_name] = 0.5 -308 self.e_dtauint[e_name] = 0.0 -309 self.e_dvalue[e_name] = np.sqrt(e_gamma[e_name][0] / (e_N - 1)) -310 self.e_ddvalue[e_name] = self.e_dvalue[e_name] * np.sqrt(0.5 / e_N) -311 self.e_windowsize[e_name] = 0 -312 else: -313 # Standard automatic windowing procedure -314 tau = self.S[e_name] / np.log((2 * self.e_n_tauint[e_name][1:] + 1) / (2 * self.e_n_tauint[e_name][1:] - 1)) -315 g_w = np.exp(- np.arange(1, len(tau) + 1) / tau) - tau / np.sqrt(np.arange(1, len(tau) + 1) * e_N) -316 for n in range(1, w_max): -317 if g_w[n - 1] < 0 or n >= w_max - 1: -318 _compute_drho(n) -319 self.e_tauint[e_name] = self.e_n_tauint[e_name][n] * (1 + (2 * n + 1) / e_N) / (1 + 1 / e_N) # Bias correction hep-lat/0306017 eq. (49) -320 self.e_dtauint[e_name] = self.e_n_dtauint[e_name][n] -321 self.e_dvalue[e_name] = np.sqrt(2 * self.e_tauint[e_name] * e_gamma[e_name][0] * (1 + 1 / e_N) / e_N) -322 self.e_ddvalue[e_name] = self.e_dvalue[e_name] * np.sqrt((n + 0.5) / e_N) -323 self.e_windowsize[e_name] = n -324 break -325 -326 self._dvalue += self.e_dvalue[e_name] ** 2 -327 self.ddvalue += (self.e_dvalue[e_name] * self.e_ddvalue[e_name]) ** 2 -328 -329 for e_name in self.cov_names: -330 self.e_dvalue[e_name] = np.sqrt(self.covobs[e_name].errsq()) -331 self.e_ddvalue[e_name] = 0 -332 self._dvalue += self.e_dvalue[e_name]**2 -333 -334 self._dvalue = np.sqrt(self._dvalue) -335 if self._dvalue == 0.0: -336 self.ddvalue = 0.0 -337 else: -338 self.ddvalue = np.sqrt(self.ddvalue) / self._dvalue -339 return -340 -341 gm = gamma_method +142 self.tag = None +143 +144 @property +145 def value(self): +146 return self._value +147 +148 @property +149 def dvalue(self): +150 return self._dvalue +151 +152 @property +153 def e_names(self): +154 return sorted(set([o.split('|')[0] for o in self.names])) +155 +156 @property +157 def cov_names(self): +158 return sorted(set([o for o in self.covobs.keys()])) +159 +160 @property +161 def mc_names(self): +162 return sorted(set([o.split('|')[0] for o in self.names if o not in self.cov_names])) +163 +164 @property +165 def e_content(self): +166 res = {} +167 for e, e_name in enumerate(self.e_names): +168 res[e_name] = sorted(filter(lambda x: x.startswith(e_name + '|'), self.names)) +169 if e_name in self.names: +170 res[e_name].append(e_name) +171 return res +172 +173 @property +174 def covobs(self): +175 return self._covobs +176 +177 def gamma_method(self, **kwargs): +178 """Estimate the error and related properties of the Obs. +179 +180 Parameters +181 ---------- +182 S : float +183 specifies a custom value for the parameter S (default 2.0). +184 If set to 0 it is assumed that the data exhibits no +185 autocorrelation. In this case the error estimates coincides +186 with the sample standard error. +187 tau_exp : float +188 positive value triggers the critical slowing down analysis +189 (default 0.0). +190 N_sigma : float +191 number of standard deviations from zero until the tail is +192 attached to the autocorrelation function (default 1). +193 fft : bool +194 determines whether the fft algorithm is used for the computation +195 of the autocorrelation function (default True) +196 """ +197 +198 e_content = self.e_content +199 self.e_dvalue = {} +200 self.e_ddvalue = {} +201 self.e_tauint = {} +202 self.e_dtauint = {} +203 self.e_windowsize = {} +204 self.e_n_tauint = {} +205 self.e_n_dtauint = {} +206 e_gamma = {} +207 self.e_rho = {} +208 self.e_drho = {} +209 self._dvalue = 0 +210 self.ddvalue = 0 +211 +212 self.S = {} +213 self.tau_exp = {} +214 self.N_sigma = {} +215 +216 if kwargs.get('fft') is False: +217 fft = False +218 else: +219 fft = True +220 +221 def _parse_kwarg(kwarg_name): +222 if kwarg_name in kwargs: +223 tmp = kwargs.get(kwarg_name) +224 if isinstance(tmp, (int, float)): +225 if tmp < 0: +226 raise Exception(kwarg_name + ' has to be larger or equal to 0.') +227 for e, e_name in enumerate(self.e_names): +228 getattr(self, kwarg_name)[e_name] = tmp +229 else: +230 raise TypeError(kwarg_name + ' is not in proper format.') +231 else: +232 for e, e_name in enumerate(self.e_names): +233 if e_name in getattr(Obs, kwarg_name + '_dict'): +234 getattr(self, kwarg_name)[e_name] = getattr(Obs, kwarg_name + '_dict')[e_name] +235 else: +236 getattr(self, kwarg_name)[e_name] = getattr(Obs, kwarg_name + '_global') +237 +238 _parse_kwarg('S') +239 _parse_kwarg('tau_exp') +240 _parse_kwarg('N_sigma') +241 +242 for e, e_name in enumerate(self.mc_names): +243 gapsize = _determine_gap(self, e_content, e_name) +244 +245 r_length = [] +246 for r_name in e_content[e_name]: +247 if isinstance(self.idl[r_name], range): +248 r_length.append(len(self.idl[r_name]) * self.idl[r_name].step // gapsize) +249 else: +250 r_length.append((self.idl[r_name][-1] - self.idl[r_name][0] + 1) // gapsize) +251 +252 e_N = np.sum([self.shape[r_name] for r_name in e_content[e_name]]) +253 w_max = max(r_length) // 2 +254 e_gamma[e_name] = np.zeros(w_max) +255 self.e_rho[e_name] = np.zeros(w_max) +256 self.e_drho[e_name] = np.zeros(w_max) +257 +258 for r_name in e_content[e_name]: +259 e_gamma[e_name] += self._calc_gamma(self.deltas[r_name], self.idl[r_name], self.shape[r_name], w_max, fft, gapsize) +260 +261 gamma_div = np.zeros(w_max) +262 for r_name in e_content[e_name]: +263 gamma_div += self._calc_gamma(np.ones((self.shape[r_name])), self.idl[r_name], self.shape[r_name], w_max, fft, gapsize) +264 gamma_div[gamma_div < 1] = 1.0 +265 e_gamma[e_name] /= gamma_div[:w_max] +266 +267 if np.abs(e_gamma[e_name][0]) < 10 * np.finfo(float).tiny: # Prevent division by zero +268 self.e_tauint[e_name] = 0.5 +269 self.e_dtauint[e_name] = 0.0 +270 self.e_dvalue[e_name] = 0.0 +271 self.e_ddvalue[e_name] = 0.0 +272 self.e_windowsize[e_name] = 0 +273 continue +274 +275 self.e_rho[e_name] = e_gamma[e_name][:w_max] / e_gamma[e_name][0] +276 self.e_n_tauint[e_name] = np.cumsum(np.concatenate(([0.5], self.e_rho[e_name][1:]))) +277 # Make sure no entry of tauint is smaller than 0.5 +278 self.e_n_tauint[e_name][self.e_n_tauint[e_name] <= 0.5] = 0.5 + np.finfo(np.float64).eps +279 # hep-lat/0306017 eq. (42) +280 self.e_n_dtauint[e_name] = self.e_n_tauint[e_name] * 2 * np.sqrt(np.abs(np.arange(w_max) + 0.5 - self.e_n_tauint[e_name]) / e_N) +281 self.e_n_dtauint[e_name][0] = 0.0 +282 +283 def _compute_drho(i): +284 tmp = (self.e_rho[e_name][i + 1:w_max] +285 + np.concatenate([self.e_rho[e_name][i - 1:None if i - (w_max - 1) // 2 <= 0 else (2 * i - (2 * w_max) // 2):-1], +286 self.e_rho[e_name][1:max(1, w_max - 2 * i)]]) +287 - 2 * self.e_rho[e_name][i] * self.e_rho[e_name][1:w_max - i]) +288 self.e_drho[e_name][i] = np.sqrt(np.sum(tmp ** 2) / e_N) +289 +290 if self.tau_exp[e_name] > 0: +291 _compute_drho(1) +292 texp = self.tau_exp[e_name] +293 # Critical slowing down analysis +294 if w_max // 2 <= 1: +295 raise Exception("Need at least 8 samples for tau_exp error analysis") +296 for n in range(1, w_max // 2): +297 _compute_drho(n + 1) +298 if (self.e_rho[e_name][n] - self.N_sigma[e_name] * self.e_drho[e_name][n]) < 0 or n >= w_max // 2 - 2: +299 # Bias correction hep-lat/0306017 eq. (49) included +300 self.e_tauint[e_name] = self.e_n_tauint[e_name][n] * (1 + (2 * n + 1) / e_N) / (1 + 1 / e_N) + texp * np.abs(self.e_rho[e_name][n + 1]) # The absolute makes sure, that the tail contribution is always positive +301 self.e_dtauint[e_name] = np.sqrt(self.e_n_dtauint[e_name][n] ** 2 + texp ** 2 * self.e_drho[e_name][n + 1] ** 2) +302 # Error of tau_exp neglected so far, missing term: self.e_rho[e_name][n + 1] ** 2 * d_tau_exp ** 2 +303 self.e_dvalue[e_name] = np.sqrt(2 * self.e_tauint[e_name] * e_gamma[e_name][0] * (1 + 1 / e_N) / e_N) +304 self.e_ddvalue[e_name] = self.e_dvalue[e_name] * np.sqrt((n + 0.5) / e_N) +305 self.e_windowsize[e_name] = n +306 break +307 else: +308 if self.S[e_name] == 0.0: +309 self.e_tauint[e_name] = 0.5 +310 self.e_dtauint[e_name] = 0.0 +311 self.e_dvalue[e_name] = np.sqrt(e_gamma[e_name][0] / (e_N - 1)) +312 self.e_ddvalue[e_name] = self.e_dvalue[e_name] * np.sqrt(0.5 / e_N) +313 self.e_windowsize[e_name] = 0 +314 else: +315 # Standard automatic windowing procedure +316 tau = self.S[e_name] / np.log((2 * self.e_n_tauint[e_name][1:] + 1) / (2 * self.e_n_tauint[e_name][1:] - 1)) +317 g_w = np.exp(- np.arange(1, len(tau) + 1) / tau) - tau / np.sqrt(np.arange(1, len(tau) + 1) * e_N) +318 for n in range(1, w_max): +319 if g_w[n - 1] < 0 or n >= w_max - 1: +320 _compute_drho(n) +321 self.e_tauint[e_name] = self.e_n_tauint[e_name][n] * (1 + (2 * n + 1) / e_N) / (1 + 1 / e_N) # Bias correction hep-lat/0306017 eq. (49) +322 self.e_dtauint[e_name] = self.e_n_dtauint[e_name][n] +323 self.e_dvalue[e_name] = np.sqrt(2 * self.e_tauint[e_name] * e_gamma[e_name][0] * (1 + 1 / e_N) / e_N) +324 self.e_ddvalue[e_name] = self.e_dvalue[e_name] * np.sqrt((n + 0.5) / e_N) +325 self.e_windowsize[e_name] = n +326 break +327 +328 self._dvalue += self.e_dvalue[e_name] ** 2 +329 self.ddvalue += (self.e_dvalue[e_name] * self.e_ddvalue[e_name]) ** 2 +330 +331 for e_name in self.cov_names: +332 self.e_dvalue[e_name] = np.sqrt(self.covobs[e_name].errsq()) +333 self.e_ddvalue[e_name] = 0 +334 self._dvalue += self.e_dvalue[e_name]**2 +335 +336 self._dvalue = np.sqrt(self._dvalue) +337 if self._dvalue == 0.0: +338 self.ddvalue = 0.0 +339 else: +340 self.ddvalue = np.sqrt(self.ddvalue) / self._dvalue +341 return 342 -343 def _calc_gamma(self, deltas, idx, shape, w_max, fft, gapsize): -344 """Calculate Gamma_{AA} from the deltas, which are defined on idx. -345 idx is assumed to be a contiguous range (possibly with a stepsize != 1) -346 -347 Parameters -348 ---------- -349 deltas : list -350 List of fluctuations -351 idx : list -352 List or range of configurations on which the deltas are defined. -353 shape : int -354 Number of configurations in idx. -355 w_max : int -356 Upper bound for the summation window. -357 fft : bool -358 determines whether the fft algorithm is used for the computation -359 of the autocorrelation function. -360 gapsize : int -361 The target distance between two configurations. If longer distances -362 are found in idx, the data is expanded. -363 """ -364 gamma = np.zeros(w_max) -365 deltas = _expand_deltas(deltas, idx, shape, gapsize) -366 new_shape = len(deltas) -367 if fft: -368 max_gamma = min(new_shape, w_max) -369 # The padding for the fft has to be even -370 padding = new_shape + max_gamma + (new_shape + max_gamma) % 2 -371 gamma[:max_gamma] += np.fft.irfft(np.abs(np.fft.rfft(deltas, padding)) ** 2)[:max_gamma] -372 else: -373 for n in range(w_max): -374 if new_shape - n >= 0: -375 gamma[n] += deltas[0:new_shape - n].dot(deltas[n:new_shape]) -376 -377 return gamma +343 gm = gamma_method +344 +345 def _calc_gamma(self, deltas, idx, shape, w_max, fft, gapsize): +346 """Calculate Gamma_{AA} from the deltas, which are defined on idx. +347 idx is assumed to be a contiguous range (possibly with a stepsize != 1) +348 +349 Parameters +350 ---------- +351 deltas : list +352 List of fluctuations +353 idx : list +354 List or range of configurations on which the deltas are defined. +355 shape : int +356 Number of configurations in idx. +357 w_max : int +358 Upper bound for the summation window. +359 fft : bool +360 determines whether the fft algorithm is used for the computation +361 of the autocorrelation function. +362 gapsize : int +363 The target distance between two configurations. If longer distances +364 are found in idx, the data is expanded. +365 """ +366 gamma = np.zeros(w_max) +367 deltas = _expand_deltas(deltas, idx, shape, gapsize) +368 new_shape = len(deltas) +369 if fft: +370 max_gamma = min(new_shape, w_max) +371 # The padding for the fft has to be even +372 padding = new_shape + max_gamma + (new_shape + max_gamma) % 2 +373 gamma[:max_gamma] += np.fft.irfft(np.abs(np.fft.rfft(deltas, padding)) ** 2)[:max_gamma] +374 else: +375 for n in range(w_max): +376 if new_shape - n >= 0: +377 gamma[n] += deltas[0:new_shape - n].dot(deltas[n:new_shape]) 378 -379 def details(self, ens_content=True): -380 """Output detailed properties of the Obs. -381 -382 Parameters -383 ---------- -384 ens_content : bool -385 print details about the ensembles and replica if true. -386 """ -387 if self.tag is not None: -388 print("Description:", self.tag) -389 if not hasattr(self, 'e_dvalue'): -390 print('Result\t %3.8e' % (self.value)) -391 else: -392 if self.value == 0.0: -393 percentage = np.nan -394 else: -395 percentage = np.abs(self._dvalue / self.value) * 100 -396 print('Result\t %3.8e +/- %3.8e +/- %3.8e (%3.3f%%)' % (self.value, self._dvalue, self.ddvalue, percentage)) -397 if len(self.e_names) > 1: -398 print(' Ensemble errors:') -399 e_content = self.e_content -400 for e_name in self.mc_names: -401 gap = _determine_gap(self, e_content, e_name) -402 -403 if len(self.e_names) > 1: -404 print('', e_name, '\t %3.6e +/- %3.6e' % (self.e_dvalue[e_name], self.e_ddvalue[e_name])) -405 tau_string = " \N{GREEK SMALL LETTER TAU}_int\t " + _format_uncertainty(self.e_tauint[e_name], self.e_dtauint[e_name]) -406 tau_string += f" in units of {gap} config" -407 if gap > 1: -408 tau_string += "s" -409 if self.tau_exp[e_name] > 0: -410 tau_string = f"{tau_string: <45}" + '\t(\N{GREEK SMALL LETTER TAU}_exp=%3.2f, N_\N{GREEK SMALL LETTER SIGMA}=%1.0i)' % (self.tau_exp[e_name], self.N_sigma[e_name]) -411 else: -412 tau_string = f"{tau_string: <45}" + '\t(S=%3.2f)' % (self.S[e_name]) -413 print(tau_string) -414 for e_name in self.cov_names: -415 print('', e_name, '\t %3.8e' % (self.e_dvalue[e_name])) -416 if ens_content is True: -417 if len(self.e_names) == 1: -418 print(self.N, 'samples in', len(self.e_names), 'ensemble:') -419 else: -420 print(self.N, 'samples in', len(self.e_names), 'ensembles:') -421 my_string_list = [] -422 for key, value in sorted(self.e_content.items()): -423 if key not in self.covobs: -424 my_string = ' ' + "\u00B7 Ensemble '" + key + "' " -425 if len(value) == 1: -426 my_string += f': {self.shape[value[0]]} configurations' -427 if isinstance(self.idl[value[0]], range): -428 my_string += f' (from {self.idl[value[0]].start} to {self.idl[value[0]][-1]}' + int(self.idl[value[0]].step != 1) * f' in steps of {self.idl[value[0]].step}' + ')' -429 else: -430 my_string += f' (irregular range from {self.idl[value[0]][0]} to {self.idl[value[0]][-1]})' -431 else: -432 sublist = [] -433 for v in value: -434 my_substring = ' ' + "\u00B7 Replicum '" + v[len(key) + 1:] + "' " -435 my_substring += f': {self.shape[v]} configurations' -436 if isinstance(self.idl[v], range): -437 my_substring += f' (from {self.idl[v].start} to {self.idl[v][-1]}' + int(self.idl[v].step != 1) * f' in steps of {self.idl[v].step}' + ')' -438 else: -439 my_substring += f' (irregular range from {self.idl[v][0]} to {self.idl[v][-1]})' -440 sublist.append(my_substring) -441 -442 my_string += '\n' + '\n'.join(sublist) -443 else: -444 my_string = ' ' + "\u00B7 Covobs '" + key + "' " -445 my_string_list.append(my_string) -446 print('\n'.join(my_string_list)) -447 -448 def reweight(self, weight): -449 """Reweight the obs with given rewighting factors. -450 -451 Parameters -452 ---------- -453 weight : Obs -454 Reweighting factor. An Observable that has to be defined on a superset of the -455 configurations in obs[i].idl for all i. -456 all_configs : bool -457 if True, the reweighted observables are normalized by the average of -458 the reweighting factor on all configurations in weight.idl and not -459 on the configurations in obs[i].idl. Default False. -460 """ -461 return reweight(weight, [self])[0] -462 -463 def is_zero_within_error(self, sigma=1): -464 """Checks whether the observable is zero within 'sigma' standard errors. -465 -466 Parameters -467 ---------- -468 sigma : int -469 Number of standard errors used for the check. -470 -471 Works only properly when the gamma method was run. -472 """ -473 return self.is_zero() or np.abs(self.value) <= sigma * self._dvalue -474 -475 def is_zero(self, atol=1e-10): -476 """Checks whether the observable is zero within a given tolerance. -477 -478 Parameters -479 ---------- -480 atol : float -481 Absolute tolerance (for details see numpy documentation). -482 """ -483 return np.isclose(0.0, self.value, 1e-14, atol) and all(np.allclose(0.0, delta, 1e-14, atol) for delta in self.deltas.values()) and all(np.allclose(0.0, delta.errsq(), 1e-14, atol) for delta in self.covobs.values()) -484 -485 def plot_tauint(self, save=None): -486 """Plot integrated autocorrelation time for each ensemble. -487 -488 Parameters -489 ---------- -490 save : str -491 saves the figure to a file named 'save' if. -492 """ -493 if not hasattr(self, 'e_dvalue'): -494 raise Exception('Run the gamma method first.') -495 -496 for e, e_name in enumerate(self.mc_names): -497 fig = plt.figure() -498 plt.xlabel(r'$W$') -499 plt.ylabel(r'$\tau_\mathrm{int}$') -500 length = int(len(self.e_n_tauint[e_name])) -501 if self.tau_exp[e_name] > 0: -502 base = self.e_n_tauint[e_name][self.e_windowsize[e_name]] -503 x_help = np.arange(2 * self.tau_exp[e_name]) -504 y_help = (x_help + 1) * np.abs(self.e_rho[e_name][self.e_windowsize[e_name] + 1]) * (1 - x_help / (2 * (2 * self.tau_exp[e_name] - 1))) + base -505 x_arr = np.arange(self.e_windowsize[e_name] + 1, self.e_windowsize[e_name] + 1 + 2 * self.tau_exp[e_name]) -506 plt.plot(x_arr, y_help, 'C' + str(e), linewidth=1, ls='--', marker=',') -507 plt.errorbar([self.e_windowsize[e_name] + 2 * self.tau_exp[e_name]], [self.e_tauint[e_name]], -508 yerr=[self.e_dtauint[e_name]], fmt='C' + str(e), linewidth=1, capsize=2, marker='o', mfc=plt.rcParams['axes.facecolor']) -509 xmax = self.e_windowsize[e_name] + 2 * self.tau_exp[e_name] + 1.5 -510 label = e_name + r', $\tau_\mathrm{exp}$=' + str(np.around(self.tau_exp[e_name], decimals=2)) -511 else: -512 label = e_name + ', S=' + str(np.around(self.S[e_name], decimals=2)) -513 xmax = max(10.5, 2 * self.e_windowsize[e_name] - 0.5) -514 -515 plt.errorbar(np.arange(length)[:int(xmax) + 1], self.e_n_tauint[e_name][:int(xmax) + 1], yerr=self.e_n_dtauint[e_name][:int(xmax) + 1], linewidth=1, capsize=2, label=label) -516 plt.axvline(x=self.e_windowsize[e_name], color='C' + str(e), alpha=0.5, marker=',', ls='--') -517 plt.legend() -518 plt.xlim(-0.5, xmax) -519 ylim = plt.ylim() -520 plt.ylim(bottom=0.0, top=max(1.0, ylim[1])) -521 plt.draw() -522 if save: -523 fig.savefig(save + "_" + str(e)) -524 -525 def plot_rho(self, save=None): -526 """Plot normalized autocorrelation function time for each ensemble. -527 -528 Parameters -529 ---------- -530 save : str -531 saves the figure to a file named 'save' if. -532 """ -533 if not hasattr(self, 'e_dvalue'): -534 raise Exception('Run the gamma method first.') -535 for e, e_name in enumerate(self.mc_names): -536 fig = plt.figure() -537 plt.xlabel('W') -538 plt.ylabel('rho') -539 length = int(len(self.e_drho[e_name])) -540 plt.errorbar(np.arange(length), self.e_rho[e_name][:length], yerr=self.e_drho[e_name][:], linewidth=1, capsize=2) -541 plt.axvline(x=self.e_windowsize[e_name], color='r', alpha=0.25, ls='--', marker=',') -542 if self.tau_exp[e_name] > 0: -543 plt.plot([self.e_windowsize[e_name] + 1, self.e_windowsize[e_name] + 1 + 2 * self.tau_exp[e_name]], -544 [self.e_rho[e_name][self.e_windowsize[e_name] + 1], 0], 'k-', lw=1) -545 xmax = self.e_windowsize[e_name] + 2 * self.tau_exp[e_name] + 1.5 -546 plt.title('Rho ' + e_name + r', tau\_exp=' + str(np.around(self.tau_exp[e_name], decimals=2))) -547 else: -548 xmax = max(10.5, 2 * self.e_windowsize[e_name] - 0.5) -549 plt.title('Rho ' + e_name + ', S=' + str(np.around(self.S[e_name], decimals=2))) -550 plt.plot([-0.5, xmax], [0, 0], 'k--', lw=1) -551 plt.xlim(-0.5, xmax) -552 plt.draw() -553 if save: -554 fig.savefig(save + "_" + str(e)) -555 -556 def plot_rep_dist(self): -557 """Plot replica distribution for each ensemble with more than one replicum.""" -558 if not hasattr(self, 'e_dvalue'): -559 raise Exception('Run the gamma method first.') -560 for e, e_name in enumerate(self.mc_names): -561 if len(self.e_content[e_name]) == 1: -562 print('No replica distribution for a single replicum (', e_name, ')') -563 continue -564 r_length = [] -565 sub_r_mean = 0 -566 for r, r_name in enumerate(self.e_content[e_name]): -567 r_length.append(len(self.deltas[r_name])) -568 sub_r_mean += self.shape[r_name] * self.r_values[r_name] -569 e_N = np.sum(r_length) -570 sub_r_mean /= e_N -571 arr = np.zeros(len(self.e_content[e_name])) -572 for r, r_name in enumerate(self.e_content[e_name]): -573 arr[r] = (self.r_values[r_name] - sub_r_mean) / (self.e_dvalue[e_name] * np.sqrt(e_N / self.shape[r_name] - 1)) -574 plt.hist(arr, rwidth=0.8, bins=len(self.e_content[e_name])) -575 plt.title('Replica distribution' + e_name + ' (mean=0, var=1)') -576 plt.draw() -577 -578 def plot_history(self, expand=True): -579 """Plot derived Monte Carlo history for each ensemble -580 -581 Parameters -582 ---------- -583 expand : bool -584 show expanded history for irregular Monte Carlo chains (default: True). -585 """ -586 for e, e_name in enumerate(self.mc_names): -587 plt.figure() -588 r_length = [] -589 tmp = [] -590 tmp_expanded = [] -591 for r, r_name in enumerate(self.e_content[e_name]): -592 tmp.append(self.deltas[r_name] + self.r_values[r_name]) -593 if expand: -594 tmp_expanded.append(_expand_deltas(self.deltas[r_name], list(self.idl[r_name]), self.shape[r_name], 1) + self.r_values[r_name]) -595 r_length.append(len(tmp_expanded[-1])) -596 else: -597 r_length.append(len(tmp[-1])) -598 e_N = np.sum(r_length) -599 x = np.arange(e_N) -600 y_test = np.concatenate(tmp, axis=0) -601 if expand: -602 y = np.concatenate(tmp_expanded, axis=0) -603 else: -604 y = y_test -605 plt.errorbar(x, y, fmt='.', markersize=3) -606 plt.xlim(-0.5, e_N - 0.5) -607 plt.title(e_name + f'\nskew: {skew(y_test):.3f} (p={skewtest(y_test).pvalue:.3f}), kurtosis: {kurtosis(y_test):.3f} (p={kurtosistest(y_test).pvalue:.3f})') -608 plt.draw() -609 -610 def plot_piechart(self, save=None): -611 """Plot piechart which shows the fractional contribution of each -612 ensemble to the error and returns a dictionary containing the fractions. -613 -614 Parameters -615 ---------- -616 save : str -617 saves the figure to a file named 'save' if. -618 """ -619 if not hasattr(self, 'e_dvalue'): -620 raise Exception('Run the gamma method first.') -621 if np.isclose(0.0, self._dvalue, atol=1e-15): -622 raise Exception('Error is 0.0') -623 labels = self.e_names -624 sizes = [self.e_dvalue[name] ** 2 for name in labels] / self._dvalue ** 2 -625 fig1, ax1 = plt.subplots() -626 ax1.pie(sizes, labels=labels, startangle=90, normalize=True) -627 ax1.axis('equal') -628 plt.draw() -629 if save: -630 fig1.savefig(save) -631 -632 return dict(zip(labels, sizes)) +379 return gamma +380 +381 def details(self, ens_content=True): +382 """Output detailed properties of the Obs. +383 +384 Parameters +385 ---------- +386 ens_content : bool +387 print details about the ensembles and replica if true. +388 """ +389 if self.tag is not None: +390 print("Description:", self.tag) +391 if not hasattr(self, 'e_dvalue'): +392 print('Result\t %3.8e' % (self.value)) +393 else: +394 if self.value == 0.0: +395 percentage = np.nan +396 else: +397 percentage = np.abs(self._dvalue / self.value) * 100 +398 print('Result\t %3.8e +/- %3.8e +/- %3.8e (%3.3f%%)' % (self.value, self._dvalue, self.ddvalue, percentage)) +399 if len(self.e_names) > 1: +400 print(' Ensemble errors:') +401 e_content = self.e_content +402 for e_name in self.mc_names: +403 gap = _determine_gap(self, e_content, e_name) +404 +405 if len(self.e_names) > 1: +406 print('', e_name, '\t %3.6e +/- %3.6e' % (self.e_dvalue[e_name], self.e_ddvalue[e_name])) +407 tau_string = " \N{GREEK SMALL LETTER TAU}_int\t " + _format_uncertainty(self.e_tauint[e_name], self.e_dtauint[e_name]) +408 tau_string += f" in units of {gap} config" +409 if gap > 1: +410 tau_string += "s" +411 if self.tau_exp[e_name] > 0: +412 tau_string = f"{tau_string: <45}" + '\t(\N{GREEK SMALL LETTER TAU}_exp=%3.2f, N_\N{GREEK SMALL LETTER SIGMA}=%1.0i)' % (self.tau_exp[e_name], self.N_sigma[e_name]) +413 else: +414 tau_string = f"{tau_string: <45}" + '\t(S=%3.2f)' % (self.S[e_name]) +415 print(tau_string) +416 for e_name in self.cov_names: +417 print('', e_name, '\t %3.8e' % (self.e_dvalue[e_name])) +418 if ens_content is True: +419 if len(self.e_names) == 1: +420 print(self.N, 'samples in', len(self.e_names), 'ensemble:') +421 else: +422 print(self.N, 'samples in', len(self.e_names), 'ensembles:') +423 my_string_list = [] +424 for key, value in sorted(self.e_content.items()): +425 if key not in self.covobs: +426 my_string = ' ' + "\u00B7 Ensemble '" + key + "' " +427 if len(value) == 1: +428 my_string += f': {self.shape[value[0]]} configurations' +429 if isinstance(self.idl[value[0]], range): +430 my_string += f' (from {self.idl[value[0]].start} to {self.idl[value[0]][-1]}' + int(self.idl[value[0]].step != 1) * f' in steps of {self.idl[value[0]].step}' + ')' +431 else: +432 my_string += f' (irregular range from {self.idl[value[0]][0]} to {self.idl[value[0]][-1]})' +433 else: +434 sublist = [] +435 for v in value: +436 my_substring = ' ' + "\u00B7 Replicum '" + v[len(key) + 1:] + "' " +437 my_substring += f': {self.shape[v]} configurations' +438 if isinstance(self.idl[v], range): +439 my_substring += f' (from {self.idl[v].start} to {self.idl[v][-1]}' + int(self.idl[v].step != 1) * f' in steps of {self.idl[v].step}' + ')' +440 else: +441 my_substring += f' (irregular range from {self.idl[v][0]} to {self.idl[v][-1]})' +442 sublist.append(my_substring) +443 +444 my_string += '\n' + '\n'.join(sublist) +445 else: +446 my_string = ' ' + "\u00B7 Covobs '" + key + "' " +447 my_string_list.append(my_string) +448 print('\n'.join(my_string_list)) +449 +450 def reweight(self, weight): +451 """Reweight the obs with given rewighting factors. +452 +453 Parameters +454 ---------- +455 weight : Obs +456 Reweighting factor. An Observable that has to be defined on a superset of the +457 configurations in obs[i].idl for all i. +458 all_configs : bool +459 if True, the reweighted observables are normalized by the average of +460 the reweighting factor on all configurations in weight.idl and not +461 on the configurations in obs[i].idl. Default False. +462 """ +463 return reweight(weight, [self])[0] +464 +465 def is_zero_within_error(self, sigma=1): +466 """Checks whether the observable is zero within 'sigma' standard errors. +467 +468 Parameters +469 ---------- +470 sigma : int +471 Number of standard errors used for the check. +472 +473 Works only properly when the gamma method was run. +474 """ +475 return self.is_zero() or np.abs(self.value) <= sigma * self._dvalue +476 +477 def is_zero(self, atol=1e-10): +478 """Checks whether the observable is zero within a given tolerance. +479 +480 Parameters +481 ---------- +482 atol : float +483 Absolute tolerance (for details see numpy documentation). +484 """ +485 return np.isclose(0.0, self.value, 1e-14, atol) and all(np.allclose(0.0, delta, 1e-14, atol) for delta in self.deltas.values()) and all(np.allclose(0.0, delta.errsq(), 1e-14, atol) for delta in self.covobs.values()) +486 +487 def plot_tauint(self, save=None): +488 """Plot integrated autocorrelation time for each ensemble. +489 +490 Parameters +491 ---------- +492 save : str +493 saves the figure to a file named 'save' if. +494 """ +495 if not hasattr(self, 'e_dvalue'): +496 raise Exception('Run the gamma method first.') +497 +498 for e, e_name in enumerate(self.mc_names): +499 fig = plt.figure() +500 plt.xlabel(r'$W$') +501 plt.ylabel(r'$\tau_\mathrm{int}$') +502 length = int(len(self.e_n_tauint[e_name])) +503 if self.tau_exp[e_name] > 0: +504 base = self.e_n_tauint[e_name][self.e_windowsize[e_name]] +505 x_help = np.arange(2 * self.tau_exp[e_name]) +506 y_help = (x_help + 1) * np.abs(self.e_rho[e_name][self.e_windowsize[e_name] + 1]) * (1 - x_help / (2 * (2 * self.tau_exp[e_name] - 1))) + base +507 x_arr = np.arange(self.e_windowsize[e_name] + 1, self.e_windowsize[e_name] + 1 + 2 * self.tau_exp[e_name]) +508 plt.plot(x_arr, y_help, 'C' + str(e), linewidth=1, ls='--', marker=',') +509 plt.errorbar([self.e_windowsize[e_name] + 2 * self.tau_exp[e_name]], [self.e_tauint[e_name]], +510 yerr=[self.e_dtauint[e_name]], fmt='C' + str(e), linewidth=1, capsize=2, marker='o', mfc=plt.rcParams['axes.facecolor']) +511 xmax = self.e_windowsize[e_name] + 2 * self.tau_exp[e_name] + 1.5 +512 label = e_name + r', $\tau_\mathrm{exp}$=' + str(np.around(self.tau_exp[e_name], decimals=2)) +513 else: +514 label = e_name + ', S=' + str(np.around(self.S[e_name], decimals=2)) +515 xmax = max(10.5, 2 * self.e_windowsize[e_name] - 0.5) +516 +517 plt.errorbar(np.arange(length)[:int(xmax) + 1], self.e_n_tauint[e_name][:int(xmax) + 1], yerr=self.e_n_dtauint[e_name][:int(xmax) + 1], linewidth=1, capsize=2, label=label) +518 plt.axvline(x=self.e_windowsize[e_name], color='C' + str(e), alpha=0.5, marker=',', ls='--') +519 plt.legend() +520 plt.xlim(-0.5, xmax) +521 ylim = plt.ylim() +522 plt.ylim(bottom=0.0, top=max(1.0, ylim[1])) +523 plt.draw() +524 if save: +525 fig.savefig(save + "_" + str(e)) +526 +527 def plot_rho(self, save=None): +528 """Plot normalized autocorrelation function time for each ensemble. +529 +530 Parameters +531 ---------- +532 save : str +533 saves the figure to a file named 'save' if. +534 """ +535 if not hasattr(self, 'e_dvalue'): +536 raise Exception('Run the gamma method first.') +537 for e, e_name in enumerate(self.mc_names): +538 fig = plt.figure() +539 plt.xlabel('W') +540 plt.ylabel('rho') +541 length = int(len(self.e_drho[e_name])) +542 plt.errorbar(np.arange(length), self.e_rho[e_name][:length], yerr=self.e_drho[e_name][:], linewidth=1, capsize=2) +543 plt.axvline(x=self.e_windowsize[e_name], color='r', alpha=0.25, ls='--', marker=',') +544 if self.tau_exp[e_name] > 0: +545 plt.plot([self.e_windowsize[e_name] + 1, self.e_windowsize[e_name] + 1 + 2 * self.tau_exp[e_name]], +546 [self.e_rho[e_name][self.e_windowsize[e_name] + 1], 0], 'k-', lw=1) +547 xmax = self.e_windowsize[e_name] + 2 * self.tau_exp[e_name] + 1.5 +548 plt.title('Rho ' + e_name + r', tau\_exp=' + str(np.around(self.tau_exp[e_name], decimals=2))) +549 else: +550 xmax = max(10.5, 2 * self.e_windowsize[e_name] - 0.5) +551 plt.title('Rho ' + e_name + ', S=' + str(np.around(self.S[e_name], decimals=2))) +552 plt.plot([-0.5, xmax], [0, 0], 'k--', lw=1) +553 plt.xlim(-0.5, xmax) +554 plt.draw() +555 if save: +556 fig.savefig(save + "_" + str(e)) +557 +558 def plot_rep_dist(self): +559 """Plot replica distribution for each ensemble with more than one replicum.""" +560 if not hasattr(self, 'e_dvalue'): +561 raise Exception('Run the gamma method first.') +562 for e, e_name in enumerate(self.mc_names): +563 if len(self.e_content[e_name]) == 1: +564 print('No replica distribution for a single replicum (', e_name, ')') +565 continue +566 r_length = [] +567 sub_r_mean = 0 +568 for r, r_name in enumerate(self.e_content[e_name]): +569 r_length.append(len(self.deltas[r_name])) +570 sub_r_mean += self.shape[r_name] * self.r_values[r_name] +571 e_N = np.sum(r_length) +572 sub_r_mean /= e_N +573 arr = np.zeros(len(self.e_content[e_name])) +574 for r, r_name in enumerate(self.e_content[e_name]): +575 arr[r] = (self.r_values[r_name] - sub_r_mean) / (self.e_dvalue[e_name] * np.sqrt(e_N / self.shape[r_name] - 1)) +576 plt.hist(arr, rwidth=0.8, bins=len(self.e_content[e_name])) +577 plt.title('Replica distribution' + e_name + ' (mean=0, var=1)') +578 plt.draw() +579 +580 def plot_history(self, expand=True): +581 """Plot derived Monte Carlo history for each ensemble +582 +583 Parameters +584 ---------- +585 expand : bool +586 show expanded history for irregular Monte Carlo chains (default: True). +587 """ +588 for e, e_name in enumerate(self.mc_names): +589 plt.figure() +590 r_length = [] +591 tmp = [] +592 tmp_expanded = [] +593 for r, r_name in enumerate(self.e_content[e_name]): +594 tmp.append(self.deltas[r_name] + self.r_values[r_name]) +595 if expand: +596 tmp_expanded.append(_expand_deltas(self.deltas[r_name], list(self.idl[r_name]), self.shape[r_name], 1) + self.r_values[r_name]) +597 r_length.append(len(tmp_expanded[-1])) +598 else: +599 r_length.append(len(tmp[-1])) +600 e_N = np.sum(r_length) +601 x = np.arange(e_N) +602 y_test = np.concatenate(tmp, axis=0) +603 if expand: +604 y = np.concatenate(tmp_expanded, axis=0) +605 else: +606 y = y_test +607 plt.errorbar(x, y, fmt='.', markersize=3) +608 plt.xlim(-0.5, e_N - 0.5) +609 plt.title(e_name + f'\nskew: {skew(y_test):.3f} (p={skewtest(y_test).pvalue:.3f}), kurtosis: {kurtosis(y_test):.3f} (p={kurtosistest(y_test).pvalue:.3f})') +610 plt.draw() +611 +612 def plot_piechart(self, save=None): +613 """Plot piechart which shows the fractional contribution of each +614 ensemble to the error and returns a dictionary containing the fractions. +615 +616 Parameters +617 ---------- +618 save : str +619 saves the figure to a file named 'save' if. +620 """ +621 if not hasattr(self, 'e_dvalue'): +622 raise Exception('Run the gamma method first.') +623 if np.isclose(0.0, self._dvalue, atol=1e-15): +624 raise Exception('Error is 0.0') +625 labels = self.e_names +626 sizes = [self.e_dvalue[name] ** 2 for name in labels] / self._dvalue ** 2 +627 fig1, ax1 = plt.subplots() +628 ax1.pie(sizes, labels=labels, startangle=90, normalize=True) +629 ax1.axis('equal') +630 plt.draw() +631 if save: +632 fig1.savefig(save) 633 -634 def dump(self, filename, datatype="json.gz", description="", **kwargs): -635 """Dump the Obs to a file 'name' of chosen format. -636 -637 Parameters -638 ---------- -639 filename : str -640 name of the file to be saved. -641 datatype : str -642 Format of the exported file. Supported formats include -643 "json.gz" and "pickle" -644 description : str -645 Description for output file, only relevant for json.gz format. -646 path : str -647 specifies a custom path for the file (default '.') -648 """ -649 if 'path' in kwargs: -650 file_name = kwargs.get('path') + '/' + filename -651 else: -652 file_name = filename -653 -654 if datatype == "json.gz": -655 from .input.json import dump_to_json -656 dump_to_json([self], file_name, description=description) -657 elif datatype == "pickle": -658 with open(file_name + '.p', 'wb') as fb: -659 pickle.dump(self, fb) -660 else: -661 raise Exception("Unknown datatype " + str(datatype)) -662 -663 def export_jackknife(self): -664 """Export jackknife samples from the Obs -665 -666 Returns -667 ------- -668 numpy.ndarray -669 Returns a numpy array of length N + 1 where N is the number of samples -670 for the given ensemble and replicum. The zeroth entry of the array contains -671 the mean value of the Obs, entries 1 to N contain the N jackknife samples -672 derived from the Obs. The current implementation only works for observables -673 defined on exactly one ensemble and replicum. The derived jackknife samples -674 should agree with samples from a full jackknife analysis up to O(1/N). -675 """ -676 -677 if len(self.names) != 1: -678 raise Exception("'export_jackknife' is only implemented for Obs defined on one ensemble and replicum.") -679 -680 name = self.names[0] -681 full_data = self.deltas[name] + self.r_values[name] -682 n = full_data.size -683 mean = self.value -684 tmp_jacks = np.zeros(n + 1) -685 tmp_jacks[0] = mean -686 tmp_jacks[1:] = (n * mean - full_data) / (n - 1) -687 return tmp_jacks -688 -689 def export_bootstrap(self, samples=500, random_numbers=None, save_rng=None): -690 """Export bootstrap samples from the Obs -691 -692 Parameters -693 ---------- -694 samples : int -695 Number of bootstrap samples to generate. -696 random_numbers : np.ndarray -697 Array of shape (samples, length) containing the random numbers to generate the bootstrap samples. -698 If not provided the bootstrap samples are generated bashed on the md5 hash of the enesmble name. -699 save_rng : str -700 Save the random numbers to a file if a path is specified. -701 -702 Returns -703 ------- -704 numpy.ndarray -705 Returns a numpy array of length N + 1 where N is the number of samples -706 for the given ensemble and replicum. The zeroth entry of the array contains -707 the mean value of the Obs, entries 1 to N contain the N import_bootstrap samples -708 derived from the Obs. The current implementation only works for observables -709 defined on exactly one ensemble and replicum. The derived bootstrap samples -710 should agree with samples from a full bootstrap analysis up to O(1/N). -711 """ -712 if len(self.names) != 1: -713 raise Exception("'export_boostrap' is only implemented for Obs defined on one ensemble and replicum.") -714 -715 name = self.names[0] -716 length = self.N -717 -718 if random_numbers is None: -719 seed = int(hashlib.md5(name.encode()).hexdigest(), 16) & 0xFFFFFFFF -720 rng = np.random.default_rng(seed) -721 random_numbers = rng.integers(0, length, size=(samples, length)) -722 -723 if save_rng is not None: -724 np.savetxt(save_rng, random_numbers, fmt='%i') -725 -726 proj = np.vstack([np.bincount(o, minlength=length) for o in random_numbers]) / length -727 ret = np.zeros(samples + 1) -728 ret[0] = self.value -729 ret[1:] = proj @ (self.deltas[name] + self.r_values[name]) -730 return ret -731 -732 def __float__(self): -733 return float(self.value) -734 -735 def __repr__(self): -736 return 'Obs[' + str(self) + ']' -737 -738 def __str__(self): -739 return _format_uncertainty(self.value, self._dvalue) -740 -741 def __format__(self, format_type): -742 if format_type == "": -743 significance = 2 -744 else: -745 significance = int(float(format_type.replace("+", "").replace("-", ""))) -746 my_str = _format_uncertainty(self.value, self._dvalue, -747 significance=significance) -748 for char in ["+", " "]: -749 if format_type.startswith(char): -750 if my_str[0] != "-": -751 my_str = char + my_str -752 return my_str -753 -754 def __hash__(self): -755 hash_tuple = (np.array([self.value]).astype(np.float32).data.tobytes(),) -756 hash_tuple += tuple([o.astype(np.float32).data.tobytes() for o in self.deltas.values()]) -757 hash_tuple += tuple([np.array([o.errsq()]).astype(np.float32).data.tobytes() for o in self.covobs.values()]) -758 hash_tuple += tuple([o.encode() for o in self.names]) -759 m = hashlib.md5() -760 [m.update(o) for o in hash_tuple] -761 return int(m.hexdigest(), 16) & 0xFFFFFFFF -762 -763 # Overload comparisons -764 def __lt__(self, other): -765 return self.value < other -766 -767 def __le__(self, other): -768 return self.value <= other -769 -770 def __gt__(self, other): -771 return self.value > other -772 -773 def __ge__(self, other): -774 return self.value >= other -775 -776 def __eq__(self, other): -777 if other is None: -778 return False -779 return (self - other).is_zero() -780 -781 # Overload math operations -782 def __add__(self, y): -783 if isinstance(y, Obs): -784 return derived_observable(lambda x, **kwargs: x[0] + x[1], [self, y], man_grad=[1, 1]) -785 else: -786 if isinstance(y, np.ndarray): -787 return np.array([self + o for o in y]) -788 elif isinstance(y, complex): -789 return CObs(self, 0) + y -790 elif y.__class__.__name__ in ['Corr', 'CObs']: -791 return NotImplemented -792 else: -793 return derived_observable(lambda x, **kwargs: x[0] + y, [self], man_grad=[1]) -794 -795 def __radd__(self, y): -796 return self + y -797 -798 def __mul__(self, y): -799 if isinstance(y, Obs): -800 return derived_observable(lambda x, **kwargs: x[0] * x[1], [self, y], man_grad=[y.value, self.value]) -801 else: -802 if isinstance(y, np.ndarray): -803 return np.array([self * o for o in y]) -804 elif isinstance(y, complex): -805 return CObs(self * y.real, self * y.imag) -806 elif y.__class__.__name__ in ['Corr', 'CObs']: -807 return NotImplemented -808 else: -809 return derived_observable(lambda x, **kwargs: x[0] * y, [self], man_grad=[y]) -810 -811 def __rmul__(self, y): -812 return self * y -813 -814 def __sub__(self, y): -815 if isinstance(y, Obs): -816 return derived_observable(lambda x, **kwargs: x[0] - x[1], [self, y], man_grad=[1, -1]) -817 else: -818 if isinstance(y, np.ndarray): -819 return np.array([self - o for o in y]) -820 elif y.__class__.__name__ in ['Corr', 'CObs']: -821 return NotImplemented -822 else: -823 return derived_observable(lambda x, **kwargs: x[0] - y, [self], man_grad=[1]) -824 -825 def __rsub__(self, y): -826 return -1 * (self - y) -827 -828 def __pos__(self): -829 return self -830 -831 def __neg__(self): -832 return -1 * self -833 -834 def __truediv__(self, y): -835 if isinstance(y, Obs): -836 return derived_observable(lambda x, **kwargs: x[0] / x[1], [self, y], man_grad=[1 / y.value, - self.value / y.value ** 2]) -837 else: -838 if isinstance(y, np.ndarray): -839 return np.array([self / o for o in y]) -840 elif y.__class__.__name__ in ['Corr', 'CObs']: -841 return NotImplemented -842 else: -843 return derived_observable(lambda x, **kwargs: x[0] / y, [self], man_grad=[1 / y]) -844 -845 def __rtruediv__(self, y): -846 if isinstance(y, Obs): -847 return derived_observable(lambda x, **kwargs: x[0] / x[1], [y, self], man_grad=[1 / self.value, - y.value / self.value ** 2]) -848 else: -849 if isinstance(y, np.ndarray): -850 return np.array([o / self for o in y]) -851 elif y.__class__.__name__ in ['Corr', 'CObs']: -852 return NotImplemented -853 else: -854 return derived_observable(lambda x, **kwargs: y / x[0], [self], man_grad=[-y / self.value ** 2]) -855 -856 def __pow__(self, y): -857 if isinstance(y, Obs): -858 return derived_observable(lambda x: x[0] ** x[1], [self, y]) -859 else: -860 return derived_observable(lambda x: x[0] ** y, [self]) -861 -862 def __rpow__(self, y): -863 if isinstance(y, Obs): -864 return derived_observable(lambda x: x[0] ** x[1], [y, self]) -865 else: -866 return derived_observable(lambda x: y ** x[0], [self]) -867 -868 def __abs__(self): -869 return derived_observable(lambda x: anp.abs(x[0]), [self]) -870 -871 # Overload numpy functions -872 def sqrt(self): -873 return derived_observable(lambda x, **kwargs: np.sqrt(x[0]), [self], man_grad=[1 / 2 / np.sqrt(self.value)]) -874 -875 def log(self): -876 return derived_observable(lambda x, **kwargs: np.log(x[0]), [self], man_grad=[1 / self.value]) -877 -878 def exp(self): -879 return derived_observable(lambda x, **kwargs: np.exp(x[0]), [self], man_grad=[np.exp(self.value)]) -880 -881 def sin(self): -882 return derived_observable(lambda x, **kwargs: np.sin(x[0]), [self], man_grad=[np.cos(self.value)]) -883 -884 def cos(self): -885 return derived_observable(lambda x, **kwargs: np.cos(x[0]), [self], man_grad=[-np.sin(self.value)]) -886 -887 def tan(self): -888 return derived_observable(lambda x, **kwargs: np.tan(x[0]), [self], man_grad=[1 / np.cos(self.value) ** 2]) -889 -890 def arcsin(self): -891 return derived_observable(lambda x: anp.arcsin(x[0]), [self]) -892 -893 def arccos(self): -894 return derived_observable(lambda x: anp.arccos(x[0]), [self]) -895 -896 def arctan(self): -897 return derived_observable(lambda x: anp.arctan(x[0]), [self]) -898 -899 def sinh(self): -900 return derived_observable(lambda x, **kwargs: np.sinh(x[0]), [self], man_grad=[np.cosh(self.value)]) -901 -902 def cosh(self): -903 return derived_observable(lambda x, **kwargs: np.cosh(x[0]), [self], man_grad=[np.sinh(self.value)]) -904 -905 def tanh(self): -906 return derived_observable(lambda x, **kwargs: np.tanh(x[0]), [self], man_grad=[1 / np.cosh(self.value) ** 2]) -907 -908 def arcsinh(self): -909 return derived_observable(lambda x: anp.arcsinh(x[0]), [self]) -910 -911 def arccosh(self): -912 return derived_observable(lambda x: anp.arccosh(x[0]), [self]) -913 -914 def arctanh(self): -915 return derived_observable(lambda x: anp.arctanh(x[0]), [self]) +634 return dict(zip(labels, sizes)) +635 +636 def dump(self, filename, datatype="json.gz", description="", **kwargs): +637 """Dump the Obs to a file 'name' of chosen format. +638 +639 Parameters +640 ---------- +641 filename : str +642 name of the file to be saved. +643 datatype : str +644 Format of the exported file. Supported formats include +645 "json.gz" and "pickle" +646 description : str +647 Description for output file, only relevant for json.gz format. +648 path : str +649 specifies a custom path for the file (default '.') +650 """ +651 if 'path' in kwargs: +652 file_name = kwargs.get('path') + '/' + filename +653 else: +654 file_name = filename +655 +656 if datatype == "json.gz": +657 from .input.json import dump_to_json +658 dump_to_json([self], file_name, description=description) +659 elif datatype == "pickle": +660 with open(file_name + '.p', 'wb') as fb: +661 pickle.dump(self, fb) +662 else: +663 raise Exception("Unknown datatype " + str(datatype)) +664 +665 def export_jackknife(self): +666 """Export jackknife samples from the Obs +667 +668 Returns +669 ------- +670 numpy.ndarray +671 Returns a numpy array of length N + 1 where N is the number of samples +672 for the given ensemble and replicum. The zeroth entry of the array contains +673 the mean value of the Obs, entries 1 to N contain the N jackknife samples +674 derived from the Obs. The current implementation only works for observables +675 defined on exactly one ensemble and replicum. The derived jackknife samples +676 should agree with samples from a full jackknife analysis up to O(1/N). +677 """ +678 +679 if len(self.names) != 1: +680 raise Exception("'export_jackknife' is only implemented for Obs defined on one ensemble and replicum.") +681 +682 name = self.names[0] +683 full_data = self.deltas[name] + self.r_values[name] +684 n = full_data.size +685 mean = self.value +686 tmp_jacks = np.zeros(n + 1) +687 tmp_jacks[0] = mean +688 tmp_jacks[1:] = (n * mean - full_data) / (n - 1) +689 return tmp_jacks +690 +691 def export_bootstrap(self, samples=500, random_numbers=None, save_rng=None): +692 """Export bootstrap samples from the Obs +693 +694 Parameters +695 ---------- +696 samples : int +697 Number of bootstrap samples to generate. +698 random_numbers : np.ndarray +699 Array of shape (samples, length) containing the random numbers to generate the bootstrap samples. +700 If not provided the bootstrap samples are generated bashed on the md5 hash of the enesmble name. +701 save_rng : str +702 Save the random numbers to a file if a path is specified. +703 +704 Returns +705 ------- +706 numpy.ndarray +707 Returns a numpy array of length N + 1 where N is the number of samples +708 for the given ensemble and replicum. The zeroth entry of the array contains +709 the mean value of the Obs, entries 1 to N contain the N import_bootstrap samples +710 derived from the Obs. The current implementation only works for observables +711 defined on exactly one ensemble and replicum. The derived bootstrap samples +712 should agree with samples from a full bootstrap analysis up to O(1/N). +713 """ +714 if len(self.names) != 1: +715 raise Exception("'export_boostrap' is only implemented for Obs defined on one ensemble and replicum.") +716 +717 name = self.names[0] +718 length = self.N +719 +720 if random_numbers is None: +721 seed = int(hashlib.md5(name.encode()).hexdigest(), 16) & 0xFFFFFFFF +722 rng = np.random.default_rng(seed) +723 random_numbers = rng.integers(0, length, size=(samples, length)) +724 +725 if save_rng is not None: +726 np.savetxt(save_rng, random_numbers, fmt='%i') +727 +728 proj = np.vstack([np.bincount(o, minlength=length) for o in random_numbers]) / length +729 ret = np.zeros(samples + 1) +730 ret[0] = self.value +731 ret[1:] = proj @ (self.deltas[name] + self.r_values[name]) +732 return ret +733 +734 def __float__(self): +735 return float(self.value) +736 +737 def __repr__(self): +738 return 'Obs[' + str(self) + ']' +739 +740 def __str__(self): +741 return _format_uncertainty(self.value, self._dvalue) +742 +743 def __format__(self, format_type): +744 if format_type == "": +745 significance = 2 +746 else: +747 significance = int(float(format_type.replace("+", "").replace("-", ""))) +748 my_str = _format_uncertainty(self.value, self._dvalue, +749 significance=significance) +750 for char in ["+", " "]: +751 if format_type.startswith(char): +752 if my_str[0] != "-": +753 my_str = char + my_str +754 return my_str +755 +756 def __hash__(self): +757 hash_tuple = (np.array([self.value]).astype(np.float32).data.tobytes(),) +758 hash_tuple += tuple([o.astype(np.float32).data.tobytes() for o in self.deltas.values()]) +759 hash_tuple += tuple([np.array([o.errsq()]).astype(np.float32).data.tobytes() for o in self.covobs.values()]) +760 hash_tuple += tuple([o.encode() for o in self.names]) +761 m = hashlib.md5() +762 [m.update(o) for o in hash_tuple] +763 return int(m.hexdigest(), 16) & 0xFFFFFFFF +764 +765 # Overload comparisons +766 def __lt__(self, other): +767 return self.value < other +768 +769 def __le__(self, other): +770 return self.value <= other +771 +772 def __gt__(self, other): +773 return self.value > other +774 +775 def __ge__(self, other): +776 return self.value >= other +777 +778 def __eq__(self, other): +779 if other is None: +780 return False +781 return (self - other).is_zero() +782 +783 # Overload math operations +784 def __add__(self, y): +785 if isinstance(y, Obs): +786 return derived_observable(lambda x, **kwargs: x[0] + x[1], [self, y], man_grad=[1, 1]) +787 else: +788 if isinstance(y, np.ndarray): +789 return np.array([self + o for o in y]) +790 elif isinstance(y, complex): +791 return CObs(self, 0) + y +792 elif y.__class__.__name__ in ['Corr', 'CObs']: +793 return NotImplemented +794 else: +795 return derived_observable(lambda x, **kwargs: x[0] + y, [self], man_grad=[1]) +796 +797 def __radd__(self, y): +798 return self + y +799 +800 def __mul__(self, y): +801 if isinstance(y, Obs): +802 return derived_observable(lambda x, **kwargs: x[0] * x[1], [self, y], man_grad=[y.value, self.value]) +803 else: +804 if isinstance(y, np.ndarray): +805 return np.array([self * o for o in y]) +806 elif isinstance(y, complex): +807 return CObs(self * y.real, self * y.imag) +808 elif y.__class__.__name__ in ['Corr', 'CObs']: +809 return NotImplemented +810 else: +811 return derived_observable(lambda x, **kwargs: x[0] * y, [self], man_grad=[y]) +812 +813 def __rmul__(self, y): +814 return self * y +815 +816 def __sub__(self, y): +817 if isinstance(y, Obs): +818 return derived_observable(lambda x, **kwargs: x[0] - x[1], [self, y], man_grad=[1, -1]) +819 else: +820 if isinstance(y, np.ndarray): +821 return np.array([self - o for o in y]) +822 elif y.__class__.__name__ in ['Corr', 'CObs']: +823 return NotImplemented +824 else: +825 return derived_observable(lambda x, **kwargs: x[0] - y, [self], man_grad=[1]) +826 +827 def __rsub__(self, y): +828 return -1 * (self - y) +829 +830 def __pos__(self): +831 return self +832 +833 def __neg__(self): +834 return -1 * self +835 +836 def __truediv__(self, y): +837 if isinstance(y, Obs): +838 return derived_observable(lambda x, **kwargs: x[0] / x[1], [self, y], man_grad=[1 / y.value, - self.value / y.value ** 2]) +839 else: +840 if isinstance(y, np.ndarray): +841 return np.array([self / o for o in y]) +842 elif y.__class__.__name__ in ['Corr', 'CObs']: +843 return NotImplemented +844 else: +845 return derived_observable(lambda x, **kwargs: x[0] / y, [self], man_grad=[1 / y]) +846 +847 def __rtruediv__(self, y): +848 if isinstance(y, Obs): +849 return derived_observable(lambda x, **kwargs: x[0] / x[1], [y, self], man_grad=[1 / self.value, - y.value / self.value ** 2]) +850 else: +851 if isinstance(y, np.ndarray): +852 return np.array([o / self for o in y]) +853 elif y.__class__.__name__ in ['Corr', 'CObs']: +854 return NotImplemented +855 else: +856 return derived_observable(lambda x, **kwargs: y / x[0], [self], man_grad=[-y / self.value ** 2]) +857 +858 def __pow__(self, y): +859 if isinstance(y, Obs): +860 return derived_observable(lambda x: x[0] ** x[1], [self, y]) +861 else: +862 return derived_observable(lambda x: x[0] ** y, [self]) +863 +864 def __rpow__(self, y): +865 if isinstance(y, Obs): +866 return derived_observable(lambda x: x[0] ** x[1], [y, self]) +867 else: +868 return derived_observable(lambda x: y ** x[0], [self]) +869 +870 def __abs__(self): +871 return derived_observable(lambda x: anp.abs(x[0]), [self]) +872 +873 # Overload numpy functions +874 def sqrt(self): +875 return derived_observable(lambda x, **kwargs: np.sqrt(x[0]), [self], man_grad=[1 / 2 / np.sqrt(self.value)]) +876 +877 def log(self): +878 return derived_observable(lambda x, **kwargs: np.log(x[0]), [self], man_grad=[1 / self.value]) +879 +880 def exp(self): +881 return derived_observable(lambda x, **kwargs: np.exp(x[0]), [self], man_grad=[np.exp(self.value)]) +882 +883 def sin(self): +884 return derived_observable(lambda x, **kwargs: np.sin(x[0]), [self], man_grad=[np.cos(self.value)]) +885 +886 def cos(self): +887 return derived_observable(lambda x, **kwargs: np.cos(x[0]), [self], man_grad=[-np.sin(self.value)]) +888 +889 def tan(self): +890 return derived_observable(lambda x, **kwargs: np.tan(x[0]), [self], man_grad=[1 / np.cos(self.value) ** 2]) +891 +892 def arcsin(self): +893 return derived_observable(lambda x: anp.arcsin(x[0]), [self]) +894 +895 def arccos(self): +896 return derived_observable(lambda x: anp.arccos(x[0]), [self]) +897 +898 def arctan(self): +899 return derived_observable(lambda x: anp.arctan(x[0]), [self]) +900 +901 def sinh(self): +902 return derived_observable(lambda x, **kwargs: np.sinh(x[0]), [self], man_grad=[np.cosh(self.value)]) +903 +904 def cosh(self): +905 return derived_observable(lambda x, **kwargs: np.cosh(x[0]), [self], man_grad=[np.sinh(self.value)]) +906 +907 def tanh(self): +908 return derived_observable(lambda x, **kwargs: np.tanh(x[0]), [self], man_grad=[1 / np.cosh(self.value) ** 2]) +909 +910 def arcsinh(self): +911 return derived_observable(lambda x: anp.arcsinh(x[0]), [self]) +912 +913 def arccosh(self): +914 return derived_observable(lambda x: anp.arccosh(x[0]), [self]) +915 +916 def arctanh(self): +917 return derived_observable(lambda x: anp.arctanh(x[0]), [self]) @@ -3079,39 +3083,41 @@ this overwrites the standard value for that ensemble. 105 elif isinstance(idx, (list, np.ndarray)): 106 dc = np.unique(np.diff(idx)) 107 if np.any(dc < 0): -108 raise ValueError("Unsorted idx for idl[%s]" % (name)) -109 if len(dc) == 1: -110 self.idl[name] = range(idx[0], idx[-1] + dc[0], dc[0]) -111 else: -112 self.idl[name] = list(idx) -113 else: -114 raise TypeError('incompatible type for idl[%s].' % (name)) -115 else: -116 for name, sample in sorted(zip(names, samples)): -117 self.idl[name] = range(1, len(sample) + 1) -118 -119 if kwargs.get("means") is not None: -120 for name, sample, mean in sorted(zip(names, samples, kwargs.get("means"))): -121 self.shape[name] = len(self.idl[name]) -122 self.N += self.shape[name] -123 self.r_values[name] = mean -124 self.deltas[name] = sample -125 else: -126 for name, sample in sorted(zip(names, samples)): -127 self.shape[name] = len(self.idl[name]) -128 self.N += self.shape[name] -129 if len(sample) != self.shape[name]: -130 raise ValueError('Incompatible samples and idx for %s: %d vs. %d' % (name, len(sample), self.shape[name])) -131 self.r_values[name] = np.mean(sample) -132 self.deltas[name] = sample - self.r_values[name] -133 self._value += self.shape[name] * self.r_values[name] -134 self._value /= self.N -135 -136 self._dvalue = 0.0 -137 self.ddvalue = 0.0 -138 self.reweighted = False -139 -140 self.tag = None +108 raise ValueError("Unsorted idx for idl[%s] at position %s" % (name, ' '.join(['%s' % (pos + 1) for pos in np.where(np.diff(idx) < 0)[0]]))) +109 elif np.any(dc == 0): +110 raise ValueError("Duplicate entries in idx for idl[%s] at position %s" % (name, ' '.join(['%s' % (pos + 1) for pos in np.where(np.diff(idx) == 0)[0]]))) +111 if len(dc) == 1: +112 self.idl[name] = range(idx[0], idx[-1] + dc[0], dc[0]) +113 else: +114 self.idl[name] = list(idx) +115 else: +116 raise TypeError('incompatible type for idl[%s].' % (name)) +117 else: +118 for name, sample in sorted(zip(names, samples)): +119 self.idl[name] = range(1, len(sample) + 1) +120 +121 if kwargs.get("means") is not None: +122 for name, sample, mean in sorted(zip(names, samples, kwargs.get("means"))): +123 self.shape[name] = len(self.idl[name]) +124 self.N += self.shape[name] +125 self.r_values[name] = mean +126 self.deltas[name] = sample +127 else: +128 for name, sample in sorted(zip(names, samples)): +129 self.shape[name] = len(self.idl[name]) +130 self.N += self.shape[name] +131 if len(sample) != self.shape[name]: +132 raise ValueError('Incompatible samples and idx for %s: %d vs. %d' % (name, len(sample), self.shape[name])) +133 self.r_values[name] = np.mean(sample) +134 self.deltas[name] = sample - self.r_values[name] +135 self._value += self.shape[name] * self.r_values[name] +136 self._value /= self.N +137 +138 self._dvalue = 0.0 +139 self.ddvalue = 0.0 +140 self.reweighted = False +141 +142 self.tag = None @@ -3390,171 +3396,171 @@ list of ranges or lists on which the samples are defined -
175    def gamma_method(self, **kwargs):
-176        """Estimate the error and related properties of the Obs.
-177
-178        Parameters
-179        ----------
-180        S : float
-181            specifies a custom value for the parameter S (default 2.0).
-182            If set to 0 it is assumed that the data exhibits no
-183            autocorrelation. In this case the error estimates coincides
-184            with the sample standard error.
-185        tau_exp : float
-186            positive value triggers the critical slowing down analysis
-187            (default 0.0).
-188        N_sigma : float
-189            number of standard deviations from zero until the tail is
-190            attached to the autocorrelation function (default 1).
-191        fft : bool
-192            determines whether the fft algorithm is used for the computation
-193            of the autocorrelation function (default True)
-194        """
-195
-196        e_content = self.e_content
-197        self.e_dvalue = {}
-198        self.e_ddvalue = {}
-199        self.e_tauint = {}
-200        self.e_dtauint = {}
-201        self.e_windowsize = {}
-202        self.e_n_tauint = {}
-203        self.e_n_dtauint = {}
-204        e_gamma = {}
-205        self.e_rho = {}
-206        self.e_drho = {}
-207        self._dvalue = 0
-208        self.ddvalue = 0
-209
-210        self.S = {}
-211        self.tau_exp = {}
-212        self.N_sigma = {}
-213
-214        if kwargs.get('fft') is False:
-215            fft = False
-216        else:
-217            fft = True
-218
-219        def _parse_kwarg(kwarg_name):
-220            if kwarg_name in kwargs:
-221                tmp = kwargs.get(kwarg_name)
-222                if isinstance(tmp, (int, float)):
-223                    if tmp < 0:
-224                        raise Exception(kwarg_name + ' has to be larger or equal to 0.')
-225                    for e, e_name in enumerate(self.e_names):
-226                        getattr(self, kwarg_name)[e_name] = tmp
-227                else:
-228                    raise TypeError(kwarg_name + ' is not in proper format.')
-229            else:
-230                for e, e_name in enumerate(self.e_names):
-231                    if e_name in getattr(Obs, kwarg_name + '_dict'):
-232                        getattr(self, kwarg_name)[e_name] = getattr(Obs, kwarg_name + '_dict')[e_name]
-233                    else:
-234                        getattr(self, kwarg_name)[e_name] = getattr(Obs, kwarg_name + '_global')
-235
-236        _parse_kwarg('S')
-237        _parse_kwarg('tau_exp')
-238        _parse_kwarg('N_sigma')
-239
-240        for e, e_name in enumerate(self.mc_names):
-241            gapsize = _determine_gap(self, e_content, e_name)
-242
-243            r_length = []
-244            for r_name in e_content[e_name]:
-245                if isinstance(self.idl[r_name], range):
-246                    r_length.append(len(self.idl[r_name]) * self.idl[r_name].step // gapsize)
-247                else:
-248                    r_length.append((self.idl[r_name][-1] - self.idl[r_name][0] + 1) // gapsize)
-249
-250            e_N = np.sum([self.shape[r_name] for r_name in e_content[e_name]])
-251            w_max = max(r_length) // 2
-252            e_gamma[e_name] = np.zeros(w_max)
-253            self.e_rho[e_name] = np.zeros(w_max)
-254            self.e_drho[e_name] = np.zeros(w_max)
-255
-256            for r_name in e_content[e_name]:
-257                e_gamma[e_name] += self._calc_gamma(self.deltas[r_name], self.idl[r_name], self.shape[r_name], w_max, fft, gapsize)
-258
-259            gamma_div = np.zeros(w_max)
-260            for r_name in e_content[e_name]:
-261                gamma_div += self._calc_gamma(np.ones((self.shape[r_name])), self.idl[r_name], self.shape[r_name], w_max, fft, gapsize)
-262            gamma_div[gamma_div < 1] = 1.0
-263            e_gamma[e_name] /= gamma_div[:w_max]
-264
-265            if np.abs(e_gamma[e_name][0]) < 10 * np.finfo(float).tiny:  # Prevent division by zero
-266                self.e_tauint[e_name] = 0.5
-267                self.e_dtauint[e_name] = 0.0
-268                self.e_dvalue[e_name] = 0.0
-269                self.e_ddvalue[e_name] = 0.0
-270                self.e_windowsize[e_name] = 0
-271                continue
-272
-273            self.e_rho[e_name] = e_gamma[e_name][:w_max] / e_gamma[e_name][0]
-274            self.e_n_tauint[e_name] = np.cumsum(np.concatenate(([0.5], self.e_rho[e_name][1:])))
-275            # Make sure no entry of tauint is smaller than 0.5
-276            self.e_n_tauint[e_name][self.e_n_tauint[e_name] <= 0.5] = 0.5 + np.finfo(np.float64).eps
-277            # hep-lat/0306017 eq. (42)
-278            self.e_n_dtauint[e_name] = self.e_n_tauint[e_name] * 2 * np.sqrt(np.abs(np.arange(w_max) + 0.5 - self.e_n_tauint[e_name]) / e_N)
-279            self.e_n_dtauint[e_name][0] = 0.0
-280
-281            def _compute_drho(i):
-282                tmp = (self.e_rho[e_name][i + 1:w_max]
-283                       + np.concatenate([self.e_rho[e_name][i - 1:None if i - (w_max - 1) // 2 <= 0 else (2 * i - (2 * w_max) // 2):-1],
-284                                         self.e_rho[e_name][1:max(1, w_max - 2 * i)]])
-285                       - 2 * self.e_rho[e_name][i] * self.e_rho[e_name][1:w_max - i])
-286                self.e_drho[e_name][i] = np.sqrt(np.sum(tmp ** 2) / e_N)
-287
-288            if self.tau_exp[e_name] > 0:
-289                _compute_drho(1)
-290                texp = self.tau_exp[e_name]
-291                # Critical slowing down analysis
-292                if w_max // 2 <= 1:
-293                    raise Exception("Need at least 8 samples for tau_exp error analysis")
-294                for n in range(1, w_max // 2):
-295                    _compute_drho(n + 1)
-296                    if (self.e_rho[e_name][n] - self.N_sigma[e_name] * self.e_drho[e_name][n]) < 0 or n >= w_max // 2 - 2:
-297                        # Bias correction hep-lat/0306017 eq. (49) included
-298                        self.e_tauint[e_name] = self.e_n_tauint[e_name][n] * (1 + (2 * n + 1) / e_N) / (1 + 1 / e_N) + texp * np.abs(self.e_rho[e_name][n + 1])  # The absolute makes sure, that the tail contribution is always positive
-299                        self.e_dtauint[e_name] = np.sqrt(self.e_n_dtauint[e_name][n] ** 2 + texp ** 2 * self.e_drho[e_name][n + 1] ** 2)
-300                        # Error of tau_exp neglected so far, missing term: self.e_rho[e_name][n + 1] ** 2 * d_tau_exp ** 2
-301                        self.e_dvalue[e_name] = np.sqrt(2 * self.e_tauint[e_name] * e_gamma[e_name][0] * (1 + 1 / e_N) / e_N)
-302                        self.e_ddvalue[e_name] = self.e_dvalue[e_name] * np.sqrt((n + 0.5) / e_N)
-303                        self.e_windowsize[e_name] = n
-304                        break
-305            else:
-306                if self.S[e_name] == 0.0:
-307                    self.e_tauint[e_name] = 0.5
-308                    self.e_dtauint[e_name] = 0.0
-309                    self.e_dvalue[e_name] = np.sqrt(e_gamma[e_name][0] / (e_N - 1))
-310                    self.e_ddvalue[e_name] = self.e_dvalue[e_name] * np.sqrt(0.5 / e_N)
-311                    self.e_windowsize[e_name] = 0
-312                else:
-313                    # Standard automatic windowing procedure
-314                    tau = self.S[e_name] / np.log((2 * self.e_n_tauint[e_name][1:] + 1) / (2 * self.e_n_tauint[e_name][1:] - 1))
-315                    g_w = np.exp(- np.arange(1, len(tau) + 1) / tau) - tau / np.sqrt(np.arange(1, len(tau) + 1) * e_N)
-316                    for n in range(1, w_max):
-317                        if g_w[n - 1] < 0 or n >= w_max - 1:
-318                            _compute_drho(n)
-319                            self.e_tauint[e_name] = self.e_n_tauint[e_name][n] * (1 + (2 * n + 1) / e_N) / (1 + 1 / e_N)  # Bias correction hep-lat/0306017 eq. (49)
-320                            self.e_dtauint[e_name] = self.e_n_dtauint[e_name][n]
-321                            self.e_dvalue[e_name] = np.sqrt(2 * self.e_tauint[e_name] * e_gamma[e_name][0] * (1 + 1 / e_N) / e_N)
-322                            self.e_ddvalue[e_name] = self.e_dvalue[e_name] * np.sqrt((n + 0.5) / e_N)
-323                            self.e_windowsize[e_name] = n
-324                            break
-325
-326            self._dvalue += self.e_dvalue[e_name] ** 2
-327            self.ddvalue += (self.e_dvalue[e_name] * self.e_ddvalue[e_name]) ** 2
-328
-329        for e_name in self.cov_names:
-330            self.e_dvalue[e_name] = np.sqrt(self.covobs[e_name].errsq())
-331            self.e_ddvalue[e_name] = 0
-332            self._dvalue += self.e_dvalue[e_name]**2
-333
-334        self._dvalue = np.sqrt(self._dvalue)
-335        if self._dvalue == 0.0:
-336            self.ddvalue = 0.0
-337        else:
-338            self.ddvalue = np.sqrt(self.ddvalue) / self._dvalue
-339        return
+            
177    def gamma_method(self, **kwargs):
+178        """Estimate the error and related properties of the Obs.
+179
+180        Parameters
+181        ----------
+182        S : float
+183            specifies a custom value for the parameter S (default 2.0).
+184            If set to 0 it is assumed that the data exhibits no
+185            autocorrelation. In this case the error estimates coincides
+186            with the sample standard error.
+187        tau_exp : float
+188            positive value triggers the critical slowing down analysis
+189            (default 0.0).
+190        N_sigma : float
+191            number of standard deviations from zero until the tail is
+192            attached to the autocorrelation function (default 1).
+193        fft : bool
+194            determines whether the fft algorithm is used for the computation
+195            of the autocorrelation function (default True)
+196        """
+197
+198        e_content = self.e_content
+199        self.e_dvalue = {}
+200        self.e_ddvalue = {}
+201        self.e_tauint = {}
+202        self.e_dtauint = {}
+203        self.e_windowsize = {}
+204        self.e_n_tauint = {}
+205        self.e_n_dtauint = {}
+206        e_gamma = {}
+207        self.e_rho = {}
+208        self.e_drho = {}
+209        self._dvalue = 0
+210        self.ddvalue = 0
+211
+212        self.S = {}
+213        self.tau_exp = {}
+214        self.N_sigma = {}
+215
+216        if kwargs.get('fft') is False:
+217            fft = False
+218        else:
+219            fft = True
+220
+221        def _parse_kwarg(kwarg_name):
+222            if kwarg_name in kwargs:
+223                tmp = kwargs.get(kwarg_name)
+224                if isinstance(tmp, (int, float)):
+225                    if tmp < 0:
+226                        raise Exception(kwarg_name + ' has to be larger or equal to 0.')
+227                    for e, e_name in enumerate(self.e_names):
+228                        getattr(self, kwarg_name)[e_name] = tmp
+229                else:
+230                    raise TypeError(kwarg_name + ' is not in proper format.')
+231            else:
+232                for e, e_name in enumerate(self.e_names):
+233                    if e_name in getattr(Obs, kwarg_name + '_dict'):
+234                        getattr(self, kwarg_name)[e_name] = getattr(Obs, kwarg_name + '_dict')[e_name]
+235                    else:
+236                        getattr(self, kwarg_name)[e_name] = getattr(Obs, kwarg_name + '_global')
+237
+238        _parse_kwarg('S')
+239        _parse_kwarg('tau_exp')
+240        _parse_kwarg('N_sigma')
+241
+242        for e, e_name in enumerate(self.mc_names):
+243            gapsize = _determine_gap(self, e_content, e_name)
+244
+245            r_length = []
+246            for r_name in e_content[e_name]:
+247                if isinstance(self.idl[r_name], range):
+248                    r_length.append(len(self.idl[r_name]) * self.idl[r_name].step // gapsize)
+249                else:
+250                    r_length.append((self.idl[r_name][-1] - self.idl[r_name][0] + 1) // gapsize)
+251
+252            e_N = np.sum([self.shape[r_name] for r_name in e_content[e_name]])
+253            w_max = max(r_length) // 2
+254            e_gamma[e_name] = np.zeros(w_max)
+255            self.e_rho[e_name] = np.zeros(w_max)
+256            self.e_drho[e_name] = np.zeros(w_max)
+257
+258            for r_name in e_content[e_name]:
+259                e_gamma[e_name] += self._calc_gamma(self.deltas[r_name], self.idl[r_name], self.shape[r_name], w_max, fft, gapsize)
+260
+261            gamma_div = np.zeros(w_max)
+262            for r_name in e_content[e_name]:
+263                gamma_div += self._calc_gamma(np.ones((self.shape[r_name])), self.idl[r_name], self.shape[r_name], w_max, fft, gapsize)
+264            gamma_div[gamma_div < 1] = 1.0
+265            e_gamma[e_name] /= gamma_div[:w_max]
+266
+267            if np.abs(e_gamma[e_name][0]) < 10 * np.finfo(float).tiny:  # Prevent division by zero
+268                self.e_tauint[e_name] = 0.5
+269                self.e_dtauint[e_name] = 0.0
+270                self.e_dvalue[e_name] = 0.0
+271                self.e_ddvalue[e_name] = 0.0
+272                self.e_windowsize[e_name] = 0
+273                continue
+274
+275            self.e_rho[e_name] = e_gamma[e_name][:w_max] / e_gamma[e_name][0]
+276            self.e_n_tauint[e_name] = np.cumsum(np.concatenate(([0.5], self.e_rho[e_name][1:])))
+277            # Make sure no entry of tauint is smaller than 0.5
+278            self.e_n_tauint[e_name][self.e_n_tauint[e_name] <= 0.5] = 0.5 + np.finfo(np.float64).eps
+279            # hep-lat/0306017 eq. (42)
+280            self.e_n_dtauint[e_name] = self.e_n_tauint[e_name] * 2 * np.sqrt(np.abs(np.arange(w_max) + 0.5 - self.e_n_tauint[e_name]) / e_N)
+281            self.e_n_dtauint[e_name][0] = 0.0
+282
+283            def _compute_drho(i):
+284                tmp = (self.e_rho[e_name][i + 1:w_max]
+285                       + np.concatenate([self.e_rho[e_name][i - 1:None if i - (w_max - 1) // 2 <= 0 else (2 * i - (2 * w_max) // 2):-1],
+286                                         self.e_rho[e_name][1:max(1, w_max - 2 * i)]])
+287                       - 2 * self.e_rho[e_name][i] * self.e_rho[e_name][1:w_max - i])
+288                self.e_drho[e_name][i] = np.sqrt(np.sum(tmp ** 2) / e_N)
+289
+290            if self.tau_exp[e_name] > 0:
+291                _compute_drho(1)
+292                texp = self.tau_exp[e_name]
+293                # Critical slowing down analysis
+294                if w_max // 2 <= 1:
+295                    raise Exception("Need at least 8 samples for tau_exp error analysis")
+296                for n in range(1, w_max // 2):
+297                    _compute_drho(n + 1)
+298                    if (self.e_rho[e_name][n] - self.N_sigma[e_name] * self.e_drho[e_name][n]) < 0 or n >= w_max // 2 - 2:
+299                        # Bias correction hep-lat/0306017 eq. (49) included
+300                        self.e_tauint[e_name] = self.e_n_tauint[e_name][n] * (1 + (2 * n + 1) / e_N) / (1 + 1 / e_N) + texp * np.abs(self.e_rho[e_name][n + 1])  # The absolute makes sure, that the tail contribution is always positive
+301                        self.e_dtauint[e_name] = np.sqrt(self.e_n_dtauint[e_name][n] ** 2 + texp ** 2 * self.e_drho[e_name][n + 1] ** 2)
+302                        # Error of tau_exp neglected so far, missing term: self.e_rho[e_name][n + 1] ** 2 * d_tau_exp ** 2
+303                        self.e_dvalue[e_name] = np.sqrt(2 * self.e_tauint[e_name] * e_gamma[e_name][0] * (1 + 1 / e_N) / e_N)
+304                        self.e_ddvalue[e_name] = self.e_dvalue[e_name] * np.sqrt((n + 0.5) / e_N)
+305                        self.e_windowsize[e_name] = n
+306                        break
+307            else:
+308                if self.S[e_name] == 0.0:
+309                    self.e_tauint[e_name] = 0.5
+310                    self.e_dtauint[e_name] = 0.0
+311                    self.e_dvalue[e_name] = np.sqrt(e_gamma[e_name][0] / (e_N - 1))
+312                    self.e_ddvalue[e_name] = self.e_dvalue[e_name] * np.sqrt(0.5 / e_N)
+313                    self.e_windowsize[e_name] = 0
+314                else:
+315                    # Standard automatic windowing procedure
+316                    tau = self.S[e_name] / np.log((2 * self.e_n_tauint[e_name][1:] + 1) / (2 * self.e_n_tauint[e_name][1:] - 1))
+317                    g_w = np.exp(- np.arange(1, len(tau) + 1) / tau) - tau / np.sqrt(np.arange(1, len(tau) + 1) * e_N)
+318                    for n in range(1, w_max):
+319                        if g_w[n - 1] < 0 or n >= w_max - 1:
+320                            _compute_drho(n)
+321                            self.e_tauint[e_name] = self.e_n_tauint[e_name][n] * (1 + (2 * n + 1) / e_N) / (1 + 1 / e_N)  # Bias correction hep-lat/0306017 eq. (49)
+322                            self.e_dtauint[e_name] = self.e_n_dtauint[e_name][n]
+323                            self.e_dvalue[e_name] = np.sqrt(2 * self.e_tauint[e_name] * e_gamma[e_name][0] * (1 + 1 / e_N) / e_N)
+324                            self.e_ddvalue[e_name] = self.e_dvalue[e_name] * np.sqrt((n + 0.5) / e_N)
+325                            self.e_windowsize[e_name] = n
+326                            break
+327
+328            self._dvalue += self.e_dvalue[e_name] ** 2
+329            self.ddvalue += (self.e_dvalue[e_name] * self.e_ddvalue[e_name]) ** 2
+330
+331        for e_name in self.cov_names:
+332            self.e_dvalue[e_name] = np.sqrt(self.covobs[e_name].errsq())
+333            self.e_ddvalue[e_name] = 0
+334            self._dvalue += self.e_dvalue[e_name]**2
+335
+336        self._dvalue = np.sqrt(self._dvalue)
+337        if self._dvalue == 0.0:
+338            self.ddvalue = 0.0
+339        else:
+340            self.ddvalue = np.sqrt(self.ddvalue) / self._dvalue
+341        return
 
@@ -3593,171 +3599,171 @@ of the autocorrelation function (default True)
-
175    def gamma_method(self, **kwargs):
-176        """Estimate the error and related properties of the Obs.
-177
-178        Parameters
-179        ----------
-180        S : float
-181            specifies a custom value for the parameter S (default 2.0).
-182            If set to 0 it is assumed that the data exhibits no
-183            autocorrelation. In this case the error estimates coincides
-184            with the sample standard error.
-185        tau_exp : float
-186            positive value triggers the critical slowing down analysis
-187            (default 0.0).
-188        N_sigma : float
-189            number of standard deviations from zero until the tail is
-190            attached to the autocorrelation function (default 1).
-191        fft : bool
-192            determines whether the fft algorithm is used for the computation
-193            of the autocorrelation function (default True)
-194        """
-195
-196        e_content = self.e_content
-197        self.e_dvalue = {}
-198        self.e_ddvalue = {}
-199        self.e_tauint = {}
-200        self.e_dtauint = {}
-201        self.e_windowsize = {}
-202        self.e_n_tauint = {}
-203        self.e_n_dtauint = {}
-204        e_gamma = {}
-205        self.e_rho = {}
-206        self.e_drho = {}
-207        self._dvalue = 0
-208        self.ddvalue = 0
-209
-210        self.S = {}
-211        self.tau_exp = {}
-212        self.N_sigma = {}
-213
-214        if kwargs.get('fft') is False:
-215            fft = False
-216        else:
-217            fft = True
-218
-219        def _parse_kwarg(kwarg_name):
-220            if kwarg_name in kwargs:
-221                tmp = kwargs.get(kwarg_name)
-222                if isinstance(tmp, (int, float)):
-223                    if tmp < 0:
-224                        raise Exception(kwarg_name + ' has to be larger or equal to 0.')
-225                    for e, e_name in enumerate(self.e_names):
-226                        getattr(self, kwarg_name)[e_name] = tmp
-227                else:
-228                    raise TypeError(kwarg_name + ' is not in proper format.')
-229            else:
-230                for e, e_name in enumerate(self.e_names):
-231                    if e_name in getattr(Obs, kwarg_name + '_dict'):
-232                        getattr(self, kwarg_name)[e_name] = getattr(Obs, kwarg_name + '_dict')[e_name]
-233                    else:
-234                        getattr(self, kwarg_name)[e_name] = getattr(Obs, kwarg_name + '_global')
-235
-236        _parse_kwarg('S')
-237        _parse_kwarg('tau_exp')
-238        _parse_kwarg('N_sigma')
-239
-240        for e, e_name in enumerate(self.mc_names):
-241            gapsize = _determine_gap(self, e_content, e_name)
-242
-243            r_length = []
-244            for r_name in e_content[e_name]:
-245                if isinstance(self.idl[r_name], range):
-246                    r_length.append(len(self.idl[r_name]) * self.idl[r_name].step // gapsize)
-247                else:
-248                    r_length.append((self.idl[r_name][-1] - self.idl[r_name][0] + 1) // gapsize)
-249
-250            e_N = np.sum([self.shape[r_name] for r_name in e_content[e_name]])
-251            w_max = max(r_length) // 2
-252            e_gamma[e_name] = np.zeros(w_max)
-253            self.e_rho[e_name] = np.zeros(w_max)
-254            self.e_drho[e_name] = np.zeros(w_max)
-255
-256            for r_name in e_content[e_name]:
-257                e_gamma[e_name] += self._calc_gamma(self.deltas[r_name], self.idl[r_name], self.shape[r_name], w_max, fft, gapsize)
-258
-259            gamma_div = np.zeros(w_max)
-260            for r_name in e_content[e_name]:
-261                gamma_div += self._calc_gamma(np.ones((self.shape[r_name])), self.idl[r_name], self.shape[r_name], w_max, fft, gapsize)
-262            gamma_div[gamma_div < 1] = 1.0
-263            e_gamma[e_name] /= gamma_div[:w_max]
-264
-265            if np.abs(e_gamma[e_name][0]) < 10 * np.finfo(float).tiny:  # Prevent division by zero
-266                self.e_tauint[e_name] = 0.5
-267                self.e_dtauint[e_name] = 0.0
-268                self.e_dvalue[e_name] = 0.0
-269                self.e_ddvalue[e_name] = 0.0
-270                self.e_windowsize[e_name] = 0
-271                continue
-272
-273            self.e_rho[e_name] = e_gamma[e_name][:w_max] / e_gamma[e_name][0]
-274            self.e_n_tauint[e_name] = np.cumsum(np.concatenate(([0.5], self.e_rho[e_name][1:])))
-275            # Make sure no entry of tauint is smaller than 0.5
-276            self.e_n_tauint[e_name][self.e_n_tauint[e_name] <= 0.5] = 0.5 + np.finfo(np.float64).eps
-277            # hep-lat/0306017 eq. (42)
-278            self.e_n_dtauint[e_name] = self.e_n_tauint[e_name] * 2 * np.sqrt(np.abs(np.arange(w_max) + 0.5 - self.e_n_tauint[e_name]) / e_N)
-279            self.e_n_dtauint[e_name][0] = 0.0
-280
-281            def _compute_drho(i):
-282                tmp = (self.e_rho[e_name][i + 1:w_max]
-283                       + np.concatenate([self.e_rho[e_name][i - 1:None if i - (w_max - 1) // 2 <= 0 else (2 * i - (2 * w_max) // 2):-1],
-284                                         self.e_rho[e_name][1:max(1, w_max - 2 * i)]])
-285                       - 2 * self.e_rho[e_name][i] * self.e_rho[e_name][1:w_max - i])
-286                self.e_drho[e_name][i] = np.sqrt(np.sum(tmp ** 2) / e_N)
-287
-288            if self.tau_exp[e_name] > 0:
-289                _compute_drho(1)
-290                texp = self.tau_exp[e_name]
-291                # Critical slowing down analysis
-292                if w_max // 2 <= 1:
-293                    raise Exception("Need at least 8 samples for tau_exp error analysis")
-294                for n in range(1, w_max // 2):
-295                    _compute_drho(n + 1)
-296                    if (self.e_rho[e_name][n] - self.N_sigma[e_name] * self.e_drho[e_name][n]) < 0 or n >= w_max // 2 - 2:
-297                        # Bias correction hep-lat/0306017 eq. (49) included
-298                        self.e_tauint[e_name] = self.e_n_tauint[e_name][n] * (1 + (2 * n + 1) / e_N) / (1 + 1 / e_N) + texp * np.abs(self.e_rho[e_name][n + 1])  # The absolute makes sure, that the tail contribution is always positive
-299                        self.e_dtauint[e_name] = np.sqrt(self.e_n_dtauint[e_name][n] ** 2 + texp ** 2 * self.e_drho[e_name][n + 1] ** 2)
-300                        # Error of tau_exp neglected so far, missing term: self.e_rho[e_name][n + 1] ** 2 * d_tau_exp ** 2
-301                        self.e_dvalue[e_name] = np.sqrt(2 * self.e_tauint[e_name] * e_gamma[e_name][0] * (1 + 1 / e_N) / e_N)
-302                        self.e_ddvalue[e_name] = self.e_dvalue[e_name] * np.sqrt((n + 0.5) / e_N)
-303                        self.e_windowsize[e_name] = n
-304                        break
-305            else:
-306                if self.S[e_name] == 0.0:
-307                    self.e_tauint[e_name] = 0.5
-308                    self.e_dtauint[e_name] = 0.0
-309                    self.e_dvalue[e_name] = np.sqrt(e_gamma[e_name][0] / (e_N - 1))
-310                    self.e_ddvalue[e_name] = self.e_dvalue[e_name] * np.sqrt(0.5 / e_N)
-311                    self.e_windowsize[e_name] = 0
-312                else:
-313                    # Standard automatic windowing procedure
-314                    tau = self.S[e_name] / np.log((2 * self.e_n_tauint[e_name][1:] + 1) / (2 * self.e_n_tauint[e_name][1:] - 1))
-315                    g_w = np.exp(- np.arange(1, len(tau) + 1) / tau) - tau / np.sqrt(np.arange(1, len(tau) + 1) * e_N)
-316                    for n in range(1, w_max):
-317                        if g_w[n - 1] < 0 or n >= w_max - 1:
-318                            _compute_drho(n)
-319                            self.e_tauint[e_name] = self.e_n_tauint[e_name][n] * (1 + (2 * n + 1) / e_N) / (1 + 1 / e_N)  # Bias correction hep-lat/0306017 eq. (49)
-320                            self.e_dtauint[e_name] = self.e_n_dtauint[e_name][n]
-321                            self.e_dvalue[e_name] = np.sqrt(2 * self.e_tauint[e_name] * e_gamma[e_name][0] * (1 + 1 / e_N) / e_N)
-322                            self.e_ddvalue[e_name] = self.e_dvalue[e_name] * np.sqrt((n + 0.5) / e_N)
-323                            self.e_windowsize[e_name] = n
-324                            break
-325
-326            self._dvalue += self.e_dvalue[e_name] ** 2
-327            self.ddvalue += (self.e_dvalue[e_name] * self.e_ddvalue[e_name]) ** 2
-328
-329        for e_name in self.cov_names:
-330            self.e_dvalue[e_name] = np.sqrt(self.covobs[e_name].errsq())
-331            self.e_ddvalue[e_name] = 0
-332            self._dvalue += self.e_dvalue[e_name]**2
-333
-334        self._dvalue = np.sqrt(self._dvalue)
-335        if self._dvalue == 0.0:
-336            self.ddvalue = 0.0
-337        else:
-338            self.ddvalue = np.sqrt(self.ddvalue) / self._dvalue
-339        return
+            
177    def gamma_method(self, **kwargs):
+178        """Estimate the error and related properties of the Obs.
+179
+180        Parameters
+181        ----------
+182        S : float
+183            specifies a custom value for the parameter S (default 2.0).
+184            If set to 0 it is assumed that the data exhibits no
+185            autocorrelation. In this case the error estimates coincides
+186            with the sample standard error.
+187        tau_exp : float
+188            positive value triggers the critical slowing down analysis
+189            (default 0.0).
+190        N_sigma : float
+191            number of standard deviations from zero until the tail is
+192            attached to the autocorrelation function (default 1).
+193        fft : bool
+194            determines whether the fft algorithm is used for the computation
+195            of the autocorrelation function (default True)
+196        """
+197
+198        e_content = self.e_content
+199        self.e_dvalue = {}
+200        self.e_ddvalue = {}
+201        self.e_tauint = {}
+202        self.e_dtauint = {}
+203        self.e_windowsize = {}
+204        self.e_n_tauint = {}
+205        self.e_n_dtauint = {}
+206        e_gamma = {}
+207        self.e_rho = {}
+208        self.e_drho = {}
+209        self._dvalue = 0
+210        self.ddvalue = 0
+211
+212        self.S = {}
+213        self.tau_exp = {}
+214        self.N_sigma = {}
+215
+216        if kwargs.get('fft') is False:
+217            fft = False
+218        else:
+219            fft = True
+220
+221        def _parse_kwarg(kwarg_name):
+222            if kwarg_name in kwargs:
+223                tmp = kwargs.get(kwarg_name)
+224                if isinstance(tmp, (int, float)):
+225                    if tmp < 0:
+226                        raise Exception(kwarg_name + ' has to be larger or equal to 0.')
+227                    for e, e_name in enumerate(self.e_names):
+228                        getattr(self, kwarg_name)[e_name] = tmp
+229                else:
+230                    raise TypeError(kwarg_name + ' is not in proper format.')
+231            else:
+232                for e, e_name in enumerate(self.e_names):
+233                    if e_name in getattr(Obs, kwarg_name + '_dict'):
+234                        getattr(self, kwarg_name)[e_name] = getattr(Obs, kwarg_name + '_dict')[e_name]
+235                    else:
+236                        getattr(self, kwarg_name)[e_name] = getattr(Obs, kwarg_name + '_global')
+237
+238        _parse_kwarg('S')
+239        _parse_kwarg('tau_exp')
+240        _parse_kwarg('N_sigma')
+241
+242        for e, e_name in enumerate(self.mc_names):
+243            gapsize = _determine_gap(self, e_content, e_name)
+244
+245            r_length = []
+246            for r_name in e_content[e_name]:
+247                if isinstance(self.idl[r_name], range):
+248                    r_length.append(len(self.idl[r_name]) * self.idl[r_name].step // gapsize)
+249                else:
+250                    r_length.append((self.idl[r_name][-1] - self.idl[r_name][0] + 1) // gapsize)
+251
+252            e_N = np.sum([self.shape[r_name] for r_name in e_content[e_name]])
+253            w_max = max(r_length) // 2
+254            e_gamma[e_name] = np.zeros(w_max)
+255            self.e_rho[e_name] = np.zeros(w_max)
+256            self.e_drho[e_name] = np.zeros(w_max)
+257
+258            for r_name in e_content[e_name]:
+259                e_gamma[e_name] += self._calc_gamma(self.deltas[r_name], self.idl[r_name], self.shape[r_name], w_max, fft, gapsize)
+260
+261            gamma_div = np.zeros(w_max)
+262            for r_name in e_content[e_name]:
+263                gamma_div += self._calc_gamma(np.ones((self.shape[r_name])), self.idl[r_name], self.shape[r_name], w_max, fft, gapsize)
+264            gamma_div[gamma_div < 1] = 1.0
+265            e_gamma[e_name] /= gamma_div[:w_max]
+266
+267            if np.abs(e_gamma[e_name][0]) < 10 * np.finfo(float).tiny:  # Prevent division by zero
+268                self.e_tauint[e_name] = 0.5
+269                self.e_dtauint[e_name] = 0.0
+270                self.e_dvalue[e_name] = 0.0
+271                self.e_ddvalue[e_name] = 0.0
+272                self.e_windowsize[e_name] = 0
+273                continue
+274
+275            self.e_rho[e_name] = e_gamma[e_name][:w_max] / e_gamma[e_name][0]
+276            self.e_n_tauint[e_name] = np.cumsum(np.concatenate(([0.5], self.e_rho[e_name][1:])))
+277            # Make sure no entry of tauint is smaller than 0.5
+278            self.e_n_tauint[e_name][self.e_n_tauint[e_name] <= 0.5] = 0.5 + np.finfo(np.float64).eps
+279            # hep-lat/0306017 eq. (42)
+280            self.e_n_dtauint[e_name] = self.e_n_tauint[e_name] * 2 * np.sqrt(np.abs(np.arange(w_max) + 0.5 - self.e_n_tauint[e_name]) / e_N)
+281            self.e_n_dtauint[e_name][0] = 0.0
+282
+283            def _compute_drho(i):
+284                tmp = (self.e_rho[e_name][i + 1:w_max]
+285                       + np.concatenate([self.e_rho[e_name][i - 1:None if i - (w_max - 1) // 2 <= 0 else (2 * i - (2 * w_max) // 2):-1],
+286                                         self.e_rho[e_name][1:max(1, w_max - 2 * i)]])
+287                       - 2 * self.e_rho[e_name][i] * self.e_rho[e_name][1:w_max - i])
+288                self.e_drho[e_name][i] = np.sqrt(np.sum(tmp ** 2) / e_N)
+289
+290            if self.tau_exp[e_name] > 0:
+291                _compute_drho(1)
+292                texp = self.tau_exp[e_name]
+293                # Critical slowing down analysis
+294                if w_max // 2 <= 1:
+295                    raise Exception("Need at least 8 samples for tau_exp error analysis")
+296                for n in range(1, w_max // 2):
+297                    _compute_drho(n + 1)
+298                    if (self.e_rho[e_name][n] - self.N_sigma[e_name] * self.e_drho[e_name][n]) < 0 or n >= w_max // 2 - 2:
+299                        # Bias correction hep-lat/0306017 eq. (49) included
+300                        self.e_tauint[e_name] = self.e_n_tauint[e_name][n] * (1 + (2 * n + 1) / e_N) / (1 + 1 / e_N) + texp * np.abs(self.e_rho[e_name][n + 1])  # The absolute makes sure, that the tail contribution is always positive
+301                        self.e_dtauint[e_name] = np.sqrt(self.e_n_dtauint[e_name][n] ** 2 + texp ** 2 * self.e_drho[e_name][n + 1] ** 2)
+302                        # Error of tau_exp neglected so far, missing term: self.e_rho[e_name][n + 1] ** 2 * d_tau_exp ** 2
+303                        self.e_dvalue[e_name] = np.sqrt(2 * self.e_tauint[e_name] * e_gamma[e_name][0] * (1 + 1 / e_N) / e_N)
+304                        self.e_ddvalue[e_name] = self.e_dvalue[e_name] * np.sqrt((n + 0.5) / e_N)
+305                        self.e_windowsize[e_name] = n
+306                        break
+307            else:
+308                if self.S[e_name] == 0.0:
+309                    self.e_tauint[e_name] = 0.5
+310                    self.e_dtauint[e_name] = 0.0
+311                    self.e_dvalue[e_name] = np.sqrt(e_gamma[e_name][0] / (e_N - 1))
+312                    self.e_ddvalue[e_name] = self.e_dvalue[e_name] * np.sqrt(0.5 / e_N)
+313                    self.e_windowsize[e_name] = 0
+314                else:
+315                    # Standard automatic windowing procedure
+316                    tau = self.S[e_name] / np.log((2 * self.e_n_tauint[e_name][1:] + 1) / (2 * self.e_n_tauint[e_name][1:] - 1))
+317                    g_w = np.exp(- np.arange(1, len(tau) + 1) / tau) - tau / np.sqrt(np.arange(1, len(tau) + 1) * e_N)
+318                    for n in range(1, w_max):
+319                        if g_w[n - 1] < 0 or n >= w_max - 1:
+320                            _compute_drho(n)
+321                            self.e_tauint[e_name] = self.e_n_tauint[e_name][n] * (1 + (2 * n + 1) / e_N) / (1 + 1 / e_N)  # Bias correction hep-lat/0306017 eq. (49)
+322                            self.e_dtauint[e_name] = self.e_n_dtauint[e_name][n]
+323                            self.e_dvalue[e_name] = np.sqrt(2 * self.e_tauint[e_name] * e_gamma[e_name][0] * (1 + 1 / e_N) / e_N)
+324                            self.e_ddvalue[e_name] = self.e_dvalue[e_name] * np.sqrt((n + 0.5) / e_N)
+325                            self.e_windowsize[e_name] = n
+326                            break
+327
+328            self._dvalue += self.e_dvalue[e_name] ** 2
+329            self.ddvalue += (self.e_dvalue[e_name] * self.e_ddvalue[e_name]) ** 2
+330
+331        for e_name in self.cov_names:
+332            self.e_dvalue[e_name] = np.sqrt(self.covobs[e_name].errsq())
+333            self.e_ddvalue[e_name] = 0
+334            self._dvalue += self.e_dvalue[e_name]**2
+335
+336        self._dvalue = np.sqrt(self._dvalue)
+337        if self._dvalue == 0.0:
+338            self.ddvalue = 0.0
+339        else:
+340            self.ddvalue = np.sqrt(self.ddvalue) / self._dvalue
+341        return
 
@@ -3796,74 +3802,74 @@ of the autocorrelation function (default True)
-
379    def details(self, ens_content=True):
-380        """Output detailed properties of the Obs.
-381
-382        Parameters
-383        ----------
-384        ens_content : bool
-385            print details about the ensembles and replica if true.
-386        """
-387        if self.tag is not None:
-388            print("Description:", self.tag)
-389        if not hasattr(self, 'e_dvalue'):
-390            print('Result\t %3.8e' % (self.value))
-391        else:
-392            if self.value == 0.0:
-393                percentage = np.nan
-394            else:
-395                percentage = np.abs(self._dvalue / self.value) * 100
-396            print('Result\t %3.8e +/- %3.8e +/- %3.8e (%3.3f%%)' % (self.value, self._dvalue, self.ddvalue, percentage))
-397            if len(self.e_names) > 1:
-398                print(' Ensemble errors:')
-399            e_content = self.e_content
-400            for e_name in self.mc_names:
-401                gap = _determine_gap(self, e_content, e_name)
-402
-403                if len(self.e_names) > 1:
-404                    print('', e_name, '\t %3.6e +/- %3.6e' % (self.e_dvalue[e_name], self.e_ddvalue[e_name]))
-405                tau_string = " \N{GREEK SMALL LETTER TAU}_int\t " + _format_uncertainty(self.e_tauint[e_name], self.e_dtauint[e_name])
-406                tau_string += f" in units of {gap} config"
-407                if gap > 1:
-408                    tau_string += "s"
-409                if self.tau_exp[e_name] > 0:
-410                    tau_string = f"{tau_string: <45}" + '\t(\N{GREEK SMALL LETTER TAU}_exp=%3.2f, N_\N{GREEK SMALL LETTER SIGMA}=%1.0i)' % (self.tau_exp[e_name], self.N_sigma[e_name])
-411                else:
-412                    tau_string = f"{tau_string: <45}" + '\t(S=%3.2f)' % (self.S[e_name])
-413                print(tau_string)
-414            for e_name in self.cov_names:
-415                print('', e_name, '\t %3.8e' % (self.e_dvalue[e_name]))
-416        if ens_content is True:
-417            if len(self.e_names) == 1:
-418                print(self.N, 'samples in', len(self.e_names), 'ensemble:')
-419            else:
-420                print(self.N, 'samples in', len(self.e_names), 'ensembles:')
-421            my_string_list = []
-422            for key, value in sorted(self.e_content.items()):
-423                if key not in self.covobs:
-424                    my_string = '  ' + "\u00B7 Ensemble '" + key + "' "
-425                    if len(value) == 1:
-426                        my_string += f': {self.shape[value[0]]} configurations'
-427                        if isinstance(self.idl[value[0]], range):
-428                            my_string += f' (from {self.idl[value[0]].start} to {self.idl[value[0]][-1]}' + int(self.idl[value[0]].step != 1) * f' in steps of {self.idl[value[0]].step}' + ')'
-429                        else:
-430                            my_string += f' (irregular range from {self.idl[value[0]][0]} to {self.idl[value[0]][-1]})'
-431                    else:
-432                        sublist = []
-433                        for v in value:
-434                            my_substring = '    ' + "\u00B7 Replicum '" + v[len(key) + 1:] + "' "
-435                            my_substring += f': {self.shape[v]} configurations'
-436                            if isinstance(self.idl[v], range):
-437                                my_substring += f' (from {self.idl[v].start} to {self.idl[v][-1]}' + int(self.idl[v].step != 1) * f' in steps of {self.idl[v].step}' + ')'
-438                            else:
-439                                my_substring += f' (irregular range from {self.idl[v][0]} to {self.idl[v][-1]})'
-440                            sublist.append(my_substring)
-441
-442                        my_string += '\n' + '\n'.join(sublist)
-443                else:
-444                    my_string = '  ' + "\u00B7 Covobs   '" + key + "' "
-445                my_string_list.append(my_string)
-446            print('\n'.join(my_string_list))
+            
381    def details(self, ens_content=True):
+382        """Output detailed properties of the Obs.
+383
+384        Parameters
+385        ----------
+386        ens_content : bool
+387            print details about the ensembles and replica if true.
+388        """
+389        if self.tag is not None:
+390            print("Description:", self.tag)
+391        if not hasattr(self, 'e_dvalue'):
+392            print('Result\t %3.8e' % (self.value))
+393        else:
+394            if self.value == 0.0:
+395                percentage = np.nan
+396            else:
+397                percentage = np.abs(self._dvalue / self.value) * 100
+398            print('Result\t %3.8e +/- %3.8e +/- %3.8e (%3.3f%%)' % (self.value, self._dvalue, self.ddvalue, percentage))
+399            if len(self.e_names) > 1:
+400                print(' Ensemble errors:')
+401            e_content = self.e_content
+402            for e_name in self.mc_names:
+403                gap = _determine_gap(self, e_content, e_name)
+404
+405                if len(self.e_names) > 1:
+406                    print('', e_name, '\t %3.6e +/- %3.6e' % (self.e_dvalue[e_name], self.e_ddvalue[e_name]))
+407                tau_string = " \N{GREEK SMALL LETTER TAU}_int\t " + _format_uncertainty(self.e_tauint[e_name], self.e_dtauint[e_name])
+408                tau_string += f" in units of {gap} config"
+409                if gap > 1:
+410                    tau_string += "s"
+411                if self.tau_exp[e_name] > 0:
+412                    tau_string = f"{tau_string: <45}" + '\t(\N{GREEK SMALL LETTER TAU}_exp=%3.2f, N_\N{GREEK SMALL LETTER SIGMA}=%1.0i)' % (self.tau_exp[e_name], self.N_sigma[e_name])
+413                else:
+414                    tau_string = f"{tau_string: <45}" + '\t(S=%3.2f)' % (self.S[e_name])
+415                print(tau_string)
+416            for e_name in self.cov_names:
+417                print('', e_name, '\t %3.8e' % (self.e_dvalue[e_name]))
+418        if ens_content is True:
+419            if len(self.e_names) == 1:
+420                print(self.N, 'samples in', len(self.e_names), 'ensemble:')
+421            else:
+422                print(self.N, 'samples in', len(self.e_names), 'ensembles:')
+423            my_string_list = []
+424            for key, value in sorted(self.e_content.items()):
+425                if key not in self.covobs:
+426                    my_string = '  ' + "\u00B7 Ensemble '" + key + "' "
+427                    if len(value) == 1:
+428                        my_string += f': {self.shape[value[0]]} configurations'
+429                        if isinstance(self.idl[value[0]], range):
+430                            my_string += f' (from {self.idl[value[0]].start} to {self.idl[value[0]][-1]}' + int(self.idl[value[0]].step != 1) * f' in steps of {self.idl[value[0]].step}' + ')'
+431                        else:
+432                            my_string += f' (irregular range from {self.idl[value[0]][0]} to {self.idl[value[0]][-1]})'
+433                    else:
+434                        sublist = []
+435                        for v in value:
+436                            my_substring = '    ' + "\u00B7 Replicum '" + v[len(key) + 1:] + "' "
+437                            my_substring += f': {self.shape[v]} configurations'
+438                            if isinstance(self.idl[v], range):
+439                                my_substring += f' (from {self.idl[v].start} to {self.idl[v][-1]}' + int(self.idl[v].step != 1) * f' in steps of {self.idl[v].step}' + ')'
+440                            else:
+441                                my_substring += f' (irregular range from {self.idl[v][0]} to {self.idl[v][-1]})'
+442                            sublist.append(my_substring)
+443
+444                        my_string += '\n' + '\n'.join(sublist)
+445                else:
+446                    my_string = '  ' + "\u00B7 Covobs   '" + key + "' "
+447                my_string_list.append(my_string)
+448            print('\n'.join(my_string_list))
 
@@ -3890,20 +3896,20 @@ print details about the ensembles and replica if true.
-
448    def reweight(self, weight):
-449        """Reweight the obs with given rewighting factors.
-450
-451        Parameters
-452        ----------
-453        weight : Obs
-454            Reweighting factor. An Observable that has to be defined on a superset of the
-455            configurations in obs[i].idl for all i.
-456        all_configs : bool
-457            if True, the reweighted observables are normalized by the average of
-458            the reweighting factor on all configurations in weight.idl and not
-459            on the configurations in obs[i].idl. Default False.
-460        """
-461        return reweight(weight, [self])[0]
+            
450    def reweight(self, weight):
+451        """Reweight the obs with given rewighting factors.
+452
+453        Parameters
+454        ----------
+455        weight : Obs
+456            Reweighting factor. An Observable that has to be defined on a superset of the
+457            configurations in obs[i].idl for all i.
+458        all_configs : bool
+459            if True, the reweighted observables are normalized by the average of
+460            the reweighting factor on all configurations in weight.idl and not
+461            on the configurations in obs[i].idl. Default False.
+462        """
+463        return reweight(weight, [self])[0]
 
@@ -3935,17 +3941,17 @@ on the configurations in obs[i].idl. Default False.
-
463    def is_zero_within_error(self, sigma=1):
-464        """Checks whether the observable is zero within 'sigma' standard errors.
-465
-466        Parameters
-467        ----------
-468        sigma : int
-469            Number of standard errors used for the check.
-470
-471        Works only properly when the gamma method was run.
-472        """
-473        return self.is_zero() or np.abs(self.value) <= sigma * self._dvalue
+            
465    def is_zero_within_error(self, sigma=1):
+466        """Checks whether the observable is zero within 'sigma' standard errors.
+467
+468        Parameters
+469        ----------
+470        sigma : int
+471            Number of standard errors used for the check.
+472
+473        Works only properly when the gamma method was run.
+474        """
+475        return self.is_zero() or np.abs(self.value) <= sigma * self._dvalue
 
@@ -3973,15 +3979,15 @@ Number of standard errors used for the check.
-
475    def is_zero(self, atol=1e-10):
-476        """Checks whether the observable is zero within a given tolerance.
-477
-478        Parameters
-479        ----------
-480        atol : float
-481            Absolute tolerance (for details see numpy documentation).
-482        """
-483        return np.isclose(0.0, self.value, 1e-14, atol) and all(np.allclose(0.0, delta, 1e-14, atol) for delta in self.deltas.values()) and all(np.allclose(0.0, delta.errsq(), 1e-14, atol) for delta in self.covobs.values())
+            
477    def is_zero(self, atol=1e-10):
+478        """Checks whether the observable is zero within a given tolerance.
+479
+480        Parameters
+481        ----------
+482        atol : float
+483            Absolute tolerance (for details see numpy documentation).
+484        """
+485        return np.isclose(0.0, self.value, 1e-14, atol) and all(np.allclose(0.0, delta, 1e-14, atol) for delta in self.deltas.values()) and all(np.allclose(0.0, delta.errsq(), 1e-14, atol) for delta in self.covobs.values())
 
@@ -4008,45 +4014,45 @@ Absolute tolerance (for details see numpy documentation).
-
485    def plot_tauint(self, save=None):
-486        """Plot integrated autocorrelation time for each ensemble.
-487
-488        Parameters
-489        ----------
-490        save : str
-491            saves the figure to a file named 'save' if.
-492        """
-493        if not hasattr(self, 'e_dvalue'):
-494            raise Exception('Run the gamma method first.')
-495
-496        for e, e_name in enumerate(self.mc_names):
-497            fig = plt.figure()
-498            plt.xlabel(r'$W$')
-499            plt.ylabel(r'$\tau_\mathrm{int}$')
-500            length = int(len(self.e_n_tauint[e_name]))
-501            if self.tau_exp[e_name] > 0:
-502                base = self.e_n_tauint[e_name][self.e_windowsize[e_name]]
-503                x_help = np.arange(2 * self.tau_exp[e_name])
-504                y_help = (x_help + 1) * np.abs(self.e_rho[e_name][self.e_windowsize[e_name] + 1]) * (1 - x_help / (2 * (2 * self.tau_exp[e_name] - 1))) + base
-505                x_arr = np.arange(self.e_windowsize[e_name] + 1, self.e_windowsize[e_name] + 1 + 2 * self.tau_exp[e_name])
-506                plt.plot(x_arr, y_help, 'C' + str(e), linewidth=1, ls='--', marker=',')
-507                plt.errorbar([self.e_windowsize[e_name] + 2 * self.tau_exp[e_name]], [self.e_tauint[e_name]],
-508                             yerr=[self.e_dtauint[e_name]], fmt='C' + str(e), linewidth=1, capsize=2, marker='o', mfc=plt.rcParams['axes.facecolor'])
-509                xmax = self.e_windowsize[e_name] + 2 * self.tau_exp[e_name] + 1.5
-510                label = e_name + r', $\tau_\mathrm{exp}$=' + str(np.around(self.tau_exp[e_name], decimals=2))
-511            else:
-512                label = e_name + ', S=' + str(np.around(self.S[e_name], decimals=2))
-513                xmax = max(10.5, 2 * self.e_windowsize[e_name] - 0.5)
-514
-515            plt.errorbar(np.arange(length)[:int(xmax) + 1], self.e_n_tauint[e_name][:int(xmax) + 1], yerr=self.e_n_dtauint[e_name][:int(xmax) + 1], linewidth=1, capsize=2, label=label)
-516            plt.axvline(x=self.e_windowsize[e_name], color='C' + str(e), alpha=0.5, marker=',', ls='--')
-517            plt.legend()
-518            plt.xlim(-0.5, xmax)
-519            ylim = plt.ylim()
-520            plt.ylim(bottom=0.0, top=max(1.0, ylim[1]))
-521            plt.draw()
-522            if save:
-523                fig.savefig(save + "_" + str(e))
+            
487    def plot_tauint(self, save=None):
+488        """Plot integrated autocorrelation time for each ensemble.
+489
+490        Parameters
+491        ----------
+492        save : str
+493            saves the figure to a file named 'save' if.
+494        """
+495        if not hasattr(self, 'e_dvalue'):
+496            raise Exception('Run the gamma method first.')
+497
+498        for e, e_name in enumerate(self.mc_names):
+499            fig = plt.figure()
+500            plt.xlabel(r'$W$')
+501            plt.ylabel(r'$\tau_\mathrm{int}$')
+502            length = int(len(self.e_n_tauint[e_name]))
+503            if self.tau_exp[e_name] > 0:
+504                base = self.e_n_tauint[e_name][self.e_windowsize[e_name]]
+505                x_help = np.arange(2 * self.tau_exp[e_name])
+506                y_help = (x_help + 1) * np.abs(self.e_rho[e_name][self.e_windowsize[e_name] + 1]) * (1 - x_help / (2 * (2 * self.tau_exp[e_name] - 1))) + base
+507                x_arr = np.arange(self.e_windowsize[e_name] + 1, self.e_windowsize[e_name] + 1 + 2 * self.tau_exp[e_name])
+508                plt.plot(x_arr, y_help, 'C' + str(e), linewidth=1, ls='--', marker=',')
+509                plt.errorbar([self.e_windowsize[e_name] + 2 * self.tau_exp[e_name]], [self.e_tauint[e_name]],
+510                             yerr=[self.e_dtauint[e_name]], fmt='C' + str(e), linewidth=1, capsize=2, marker='o', mfc=plt.rcParams['axes.facecolor'])
+511                xmax = self.e_windowsize[e_name] + 2 * self.tau_exp[e_name] + 1.5
+512                label = e_name + r', $\tau_\mathrm{exp}$=' + str(np.around(self.tau_exp[e_name], decimals=2))
+513            else:
+514                label = e_name + ', S=' + str(np.around(self.S[e_name], decimals=2))
+515                xmax = max(10.5, 2 * self.e_windowsize[e_name] - 0.5)
+516
+517            plt.errorbar(np.arange(length)[:int(xmax) + 1], self.e_n_tauint[e_name][:int(xmax) + 1], yerr=self.e_n_dtauint[e_name][:int(xmax) + 1], linewidth=1, capsize=2, label=label)
+518            plt.axvline(x=self.e_windowsize[e_name], color='C' + str(e), alpha=0.5, marker=',', ls='--')
+519            plt.legend()
+520            plt.xlim(-0.5, xmax)
+521            ylim = plt.ylim()
+522            plt.ylim(bottom=0.0, top=max(1.0, ylim[1]))
+523            plt.draw()
+524            if save:
+525                fig.savefig(save + "_" + str(e))
 
@@ -4073,36 +4079,36 @@ saves the figure to a file named 'save' if.
-
525    def plot_rho(self, save=None):
-526        """Plot normalized autocorrelation function time for each ensemble.
-527
-528        Parameters
-529        ----------
-530        save : str
-531            saves the figure to a file named 'save' if.
-532        """
-533        if not hasattr(self, 'e_dvalue'):
-534            raise Exception('Run the gamma method first.')
-535        for e, e_name in enumerate(self.mc_names):
-536            fig = plt.figure()
-537            plt.xlabel('W')
-538            plt.ylabel('rho')
-539            length = int(len(self.e_drho[e_name]))
-540            plt.errorbar(np.arange(length), self.e_rho[e_name][:length], yerr=self.e_drho[e_name][:], linewidth=1, capsize=2)
-541            plt.axvline(x=self.e_windowsize[e_name], color='r', alpha=0.25, ls='--', marker=',')
-542            if self.tau_exp[e_name] > 0:
-543                plt.plot([self.e_windowsize[e_name] + 1, self.e_windowsize[e_name] + 1 + 2 * self.tau_exp[e_name]],
-544                         [self.e_rho[e_name][self.e_windowsize[e_name] + 1], 0], 'k-', lw=1)
-545                xmax = self.e_windowsize[e_name] + 2 * self.tau_exp[e_name] + 1.5
-546                plt.title('Rho ' + e_name + r', tau\_exp=' + str(np.around(self.tau_exp[e_name], decimals=2)))
-547            else:
-548                xmax = max(10.5, 2 * self.e_windowsize[e_name] - 0.5)
-549                plt.title('Rho ' + e_name + ', S=' + str(np.around(self.S[e_name], decimals=2)))
-550            plt.plot([-0.5, xmax], [0, 0], 'k--', lw=1)
-551            plt.xlim(-0.5, xmax)
-552            plt.draw()
-553            if save:
-554                fig.savefig(save + "_" + str(e))
+            
527    def plot_rho(self, save=None):
+528        """Plot normalized autocorrelation function time for each ensemble.
+529
+530        Parameters
+531        ----------
+532        save : str
+533            saves the figure to a file named 'save' if.
+534        """
+535        if not hasattr(self, 'e_dvalue'):
+536            raise Exception('Run the gamma method first.')
+537        for e, e_name in enumerate(self.mc_names):
+538            fig = plt.figure()
+539            plt.xlabel('W')
+540            plt.ylabel('rho')
+541            length = int(len(self.e_drho[e_name]))
+542            plt.errorbar(np.arange(length), self.e_rho[e_name][:length], yerr=self.e_drho[e_name][:], linewidth=1, capsize=2)
+543            plt.axvline(x=self.e_windowsize[e_name], color='r', alpha=0.25, ls='--', marker=',')
+544            if self.tau_exp[e_name] > 0:
+545                plt.plot([self.e_windowsize[e_name] + 1, self.e_windowsize[e_name] + 1 + 2 * self.tau_exp[e_name]],
+546                         [self.e_rho[e_name][self.e_windowsize[e_name] + 1], 0], 'k-', lw=1)
+547                xmax = self.e_windowsize[e_name] + 2 * self.tau_exp[e_name] + 1.5
+548                plt.title('Rho ' + e_name + r', tau\_exp=' + str(np.around(self.tau_exp[e_name], decimals=2)))
+549            else:
+550                xmax = max(10.5, 2 * self.e_windowsize[e_name] - 0.5)
+551                plt.title('Rho ' + e_name + ', S=' + str(np.around(self.S[e_name], decimals=2)))
+552            plt.plot([-0.5, xmax], [0, 0], 'k--', lw=1)
+553            plt.xlim(-0.5, xmax)
+554            plt.draw()
+555            if save:
+556                fig.savefig(save + "_" + str(e))
 
@@ -4129,27 +4135,27 @@ saves the figure to a file named 'save' if.
-
556    def plot_rep_dist(self):
-557        """Plot replica distribution for each ensemble with more than one replicum."""
-558        if not hasattr(self, 'e_dvalue'):
-559            raise Exception('Run the gamma method first.')
-560        for e, e_name in enumerate(self.mc_names):
-561            if len(self.e_content[e_name]) == 1:
-562                print('No replica distribution for a single replicum (', e_name, ')')
-563                continue
-564            r_length = []
-565            sub_r_mean = 0
-566            for r, r_name in enumerate(self.e_content[e_name]):
-567                r_length.append(len(self.deltas[r_name]))
-568                sub_r_mean += self.shape[r_name] * self.r_values[r_name]
-569            e_N = np.sum(r_length)
-570            sub_r_mean /= e_N
-571            arr = np.zeros(len(self.e_content[e_name]))
-572            for r, r_name in enumerate(self.e_content[e_name]):
-573                arr[r] = (self.r_values[r_name] - sub_r_mean) / (self.e_dvalue[e_name] * np.sqrt(e_N / self.shape[r_name] - 1))
-574            plt.hist(arr, rwidth=0.8, bins=len(self.e_content[e_name]))
-575            plt.title('Replica distribution' + e_name + ' (mean=0, var=1)')
-576            plt.draw()
+            
558    def plot_rep_dist(self):
+559        """Plot replica distribution for each ensemble with more than one replicum."""
+560        if not hasattr(self, 'e_dvalue'):
+561            raise Exception('Run the gamma method first.')
+562        for e, e_name in enumerate(self.mc_names):
+563            if len(self.e_content[e_name]) == 1:
+564                print('No replica distribution for a single replicum (', e_name, ')')
+565                continue
+566            r_length = []
+567            sub_r_mean = 0
+568            for r, r_name in enumerate(self.e_content[e_name]):
+569                r_length.append(len(self.deltas[r_name]))
+570                sub_r_mean += self.shape[r_name] * self.r_values[r_name]
+571            e_N = np.sum(r_length)
+572            sub_r_mean /= e_N
+573            arr = np.zeros(len(self.e_content[e_name]))
+574            for r, r_name in enumerate(self.e_content[e_name]):
+575                arr[r] = (self.r_values[r_name] - sub_r_mean) / (self.e_dvalue[e_name] * np.sqrt(e_N / self.shape[r_name] - 1))
+576            plt.hist(arr, rwidth=0.8, bins=len(self.e_content[e_name]))
+577            plt.title('Replica distribution' + e_name + ' (mean=0, var=1)')
+578            plt.draw()
 
@@ -4169,37 +4175,37 @@ saves the figure to a file named 'save' if.
-
578    def plot_history(self, expand=True):
-579        """Plot derived Monte Carlo history for each ensemble
-580
-581        Parameters
-582        ----------
-583        expand : bool
-584            show expanded history for irregular Monte Carlo chains (default: True).
-585        """
-586        for e, e_name in enumerate(self.mc_names):
-587            plt.figure()
-588            r_length = []
-589            tmp = []
-590            tmp_expanded = []
-591            for r, r_name in enumerate(self.e_content[e_name]):
-592                tmp.append(self.deltas[r_name] + self.r_values[r_name])
-593                if expand:
-594                    tmp_expanded.append(_expand_deltas(self.deltas[r_name], list(self.idl[r_name]), self.shape[r_name], 1) + self.r_values[r_name])
-595                    r_length.append(len(tmp_expanded[-1]))
-596                else:
-597                    r_length.append(len(tmp[-1]))
-598            e_N = np.sum(r_length)
-599            x = np.arange(e_N)
-600            y_test = np.concatenate(tmp, axis=0)
-601            if expand:
-602                y = np.concatenate(tmp_expanded, axis=0)
-603            else:
-604                y = y_test
-605            plt.errorbar(x, y, fmt='.', markersize=3)
-606            plt.xlim(-0.5, e_N - 0.5)
-607            plt.title(e_name + f'\nskew: {skew(y_test):.3f} (p={skewtest(y_test).pvalue:.3f}), kurtosis: {kurtosis(y_test):.3f} (p={kurtosistest(y_test).pvalue:.3f})')
-608            plt.draw()
+            
580    def plot_history(self, expand=True):
+581        """Plot derived Monte Carlo history for each ensemble
+582
+583        Parameters
+584        ----------
+585        expand : bool
+586            show expanded history for irregular Monte Carlo chains (default: True).
+587        """
+588        for e, e_name in enumerate(self.mc_names):
+589            plt.figure()
+590            r_length = []
+591            tmp = []
+592            tmp_expanded = []
+593            for r, r_name in enumerate(self.e_content[e_name]):
+594                tmp.append(self.deltas[r_name] + self.r_values[r_name])
+595                if expand:
+596                    tmp_expanded.append(_expand_deltas(self.deltas[r_name], list(self.idl[r_name]), self.shape[r_name], 1) + self.r_values[r_name])
+597                    r_length.append(len(tmp_expanded[-1]))
+598                else:
+599                    r_length.append(len(tmp[-1]))
+600            e_N = np.sum(r_length)
+601            x = np.arange(e_N)
+602            y_test = np.concatenate(tmp, axis=0)
+603            if expand:
+604                y = np.concatenate(tmp_expanded, axis=0)
+605            else:
+606                y = y_test
+607            plt.errorbar(x, y, fmt='.', markersize=3)
+608            plt.xlim(-0.5, e_N - 0.5)
+609            plt.title(e_name + f'\nskew: {skew(y_test):.3f} (p={skewtest(y_test).pvalue:.3f}), kurtosis: {kurtosis(y_test):.3f} (p={kurtosistest(y_test).pvalue:.3f})')
+610            plt.draw()
 
@@ -4226,29 +4232,29 @@ show expanded history for irregular Monte Carlo chains (default: True).
-
610    def plot_piechart(self, save=None):
-611        """Plot piechart which shows the fractional contribution of each
-612        ensemble to the error and returns a dictionary containing the fractions.
-613
-614        Parameters
-615        ----------
-616        save : str
-617            saves the figure to a file named 'save' if.
-618        """
-619        if not hasattr(self, 'e_dvalue'):
-620            raise Exception('Run the gamma method first.')
-621        if np.isclose(0.0, self._dvalue, atol=1e-15):
-622            raise Exception('Error is 0.0')
-623        labels = self.e_names
-624        sizes = [self.e_dvalue[name] ** 2 for name in labels] / self._dvalue ** 2
-625        fig1, ax1 = plt.subplots()
-626        ax1.pie(sizes, labels=labels, startangle=90, normalize=True)
-627        ax1.axis('equal')
-628        plt.draw()
-629        if save:
-630            fig1.savefig(save)
-631
-632        return dict(zip(labels, sizes))
+            
612    def plot_piechart(self, save=None):
+613        """Plot piechart which shows the fractional contribution of each
+614        ensemble to the error and returns a dictionary containing the fractions.
+615
+616        Parameters
+617        ----------
+618        save : str
+619            saves the figure to a file named 'save' if.
+620        """
+621        if not hasattr(self, 'e_dvalue'):
+622            raise Exception('Run the gamma method first.')
+623        if np.isclose(0.0, self._dvalue, atol=1e-15):
+624            raise Exception('Error is 0.0')
+625        labels = self.e_names
+626        sizes = [self.e_dvalue[name] ** 2 for name in labels] / self._dvalue ** 2
+627        fig1, ax1 = plt.subplots()
+628        ax1.pie(sizes, labels=labels, startangle=90, normalize=True)
+629        ax1.axis('equal')
+630        plt.draw()
+631        if save:
+632            fig1.savefig(save)
+633
+634        return dict(zip(labels, sizes))
 
@@ -4276,34 +4282,34 @@ saves the figure to a file named 'save' if.
-
634    def dump(self, filename, datatype="json.gz", description="", **kwargs):
-635        """Dump the Obs to a file 'name' of chosen format.
-636
-637        Parameters
-638        ----------
-639        filename : str
-640            name of the file to be saved.
-641        datatype : str
-642            Format of the exported file. Supported formats include
-643            "json.gz" and "pickle"
-644        description : str
-645            Description for output file, only relevant for json.gz format.
-646        path : str
-647            specifies a custom path for the file (default '.')
-648        """
-649        if 'path' in kwargs:
-650            file_name = kwargs.get('path') + '/' + filename
-651        else:
-652            file_name = filename
-653
-654        if datatype == "json.gz":
-655            from .input.json import dump_to_json
-656            dump_to_json([self], file_name, description=description)
-657        elif datatype == "pickle":
-658            with open(file_name + '.p', 'wb') as fb:
-659                pickle.dump(self, fb)
-660        else:
-661            raise Exception("Unknown datatype " + str(datatype))
+            
636    def dump(self, filename, datatype="json.gz", description="", **kwargs):
+637        """Dump the Obs to a file 'name' of chosen format.
+638
+639        Parameters
+640        ----------
+641        filename : str
+642            name of the file to be saved.
+643        datatype : str
+644            Format of the exported file. Supported formats include
+645            "json.gz" and "pickle"
+646        description : str
+647            Description for output file, only relevant for json.gz format.
+648        path : str
+649            specifies a custom path for the file (default '.')
+650        """
+651        if 'path' in kwargs:
+652            file_name = kwargs.get('path') + '/' + filename
+653        else:
+654            file_name = filename
+655
+656        if datatype == "json.gz":
+657            from .input.json import dump_to_json
+658            dump_to_json([self], file_name, description=description)
+659        elif datatype == "pickle":
+660            with open(file_name + '.p', 'wb') as fb:
+661                pickle.dump(self, fb)
+662        else:
+663            raise Exception("Unknown datatype " + str(datatype))
 
@@ -4337,31 +4343,31 @@ specifies a custom path for the file (default '.')
-
663    def export_jackknife(self):
-664        """Export jackknife samples from the Obs
-665
-666        Returns
-667        -------
-668        numpy.ndarray
-669            Returns a numpy array of length N + 1 where N is the number of samples
-670            for the given ensemble and replicum. The zeroth entry of the array contains
-671            the mean value of the Obs, entries 1 to N contain the N jackknife samples
-672            derived from the Obs. The current implementation only works for observables
-673            defined on exactly one ensemble and replicum. The derived jackknife samples
-674            should agree with samples from a full jackknife analysis up to O(1/N).
-675        """
-676
-677        if len(self.names) != 1:
-678            raise Exception("'export_jackknife' is only implemented for Obs defined on one ensemble and replicum.")
-679
-680        name = self.names[0]
-681        full_data = self.deltas[name] + self.r_values[name]
-682        n = full_data.size
-683        mean = self.value
-684        tmp_jacks = np.zeros(n + 1)
-685        tmp_jacks[0] = mean
-686        tmp_jacks[1:] = (n * mean - full_data) / (n - 1)
-687        return tmp_jacks
+            
665    def export_jackknife(self):
+666        """Export jackknife samples from the Obs
+667
+668        Returns
+669        -------
+670        numpy.ndarray
+671            Returns a numpy array of length N + 1 where N is the number of samples
+672            for the given ensemble and replicum. The zeroth entry of the array contains
+673            the mean value of the Obs, entries 1 to N contain the N jackknife samples
+674            derived from the Obs. The current implementation only works for observables
+675            defined on exactly one ensemble and replicum. The derived jackknife samples
+676            should agree with samples from a full jackknife analysis up to O(1/N).
+677        """
+678
+679        if len(self.names) != 1:
+680            raise Exception("'export_jackknife' is only implemented for Obs defined on one ensemble and replicum.")
+681
+682        name = self.names[0]
+683        full_data = self.deltas[name] + self.r_values[name]
+684        n = full_data.size
+685        mean = self.value
+686        tmp_jacks = np.zeros(n + 1)
+687        tmp_jacks[0] = mean
+688        tmp_jacks[1:] = (n * mean - full_data) / (n - 1)
+689        return tmp_jacks
 
@@ -4392,48 +4398,48 @@ should agree with samples from a full jackknife analysis up to O(1/N).
-
689    def export_bootstrap(self, samples=500, random_numbers=None, save_rng=None):
-690        """Export bootstrap samples from the Obs
-691
-692        Parameters
-693        ----------
-694        samples : int
-695            Number of bootstrap samples to generate.
-696        random_numbers : np.ndarray
-697            Array of shape (samples, length) containing the random numbers to generate the bootstrap samples.
-698            If not provided the bootstrap samples are generated bashed on the md5 hash of the enesmble name.
-699        save_rng : str
-700            Save the random numbers to a file if a path is specified.
-701
-702        Returns
-703        -------
-704        numpy.ndarray
-705            Returns a numpy array of length N + 1 where N is the number of samples
-706            for the given ensemble and replicum. The zeroth entry of the array contains
-707            the mean value of the Obs, entries 1 to N contain the N import_bootstrap samples
-708            derived from the Obs. The current implementation only works for observables
-709            defined on exactly one ensemble and replicum. The derived bootstrap samples
-710            should agree with samples from a full bootstrap analysis up to O(1/N).
-711        """
-712        if len(self.names) != 1:
-713            raise Exception("'export_boostrap' is only implemented for Obs defined on one ensemble and replicum.")
-714
-715        name = self.names[0]
-716        length = self.N
-717
-718        if random_numbers is None:
-719            seed = int(hashlib.md5(name.encode()).hexdigest(), 16) & 0xFFFFFFFF
-720            rng = np.random.default_rng(seed)
-721            random_numbers = rng.integers(0, length, size=(samples, length))
-722
-723        if save_rng is not None:
-724            np.savetxt(save_rng, random_numbers, fmt='%i')
-725
-726        proj = np.vstack([np.bincount(o, minlength=length) for o in random_numbers]) / length
-727        ret = np.zeros(samples + 1)
-728        ret[0] = self.value
-729        ret[1:] = proj @ (self.deltas[name] + self.r_values[name])
-730        return ret
+            
691    def export_bootstrap(self, samples=500, random_numbers=None, save_rng=None):
+692        """Export bootstrap samples from the Obs
+693
+694        Parameters
+695        ----------
+696        samples : int
+697            Number of bootstrap samples to generate.
+698        random_numbers : np.ndarray
+699            Array of shape (samples, length) containing the random numbers to generate the bootstrap samples.
+700            If not provided the bootstrap samples are generated bashed on the md5 hash of the enesmble name.
+701        save_rng : str
+702            Save the random numbers to a file if a path is specified.
+703
+704        Returns
+705        -------
+706        numpy.ndarray
+707            Returns a numpy array of length N + 1 where N is the number of samples
+708            for the given ensemble and replicum. The zeroth entry of the array contains
+709            the mean value of the Obs, entries 1 to N contain the N import_bootstrap samples
+710            derived from the Obs. The current implementation only works for observables
+711            defined on exactly one ensemble and replicum. The derived bootstrap samples
+712            should agree with samples from a full bootstrap analysis up to O(1/N).
+713        """
+714        if len(self.names) != 1:
+715            raise Exception("'export_boostrap' is only implemented for Obs defined on one ensemble and replicum.")
+716
+717        name = self.names[0]
+718        length = self.N
+719
+720        if random_numbers is None:
+721            seed = int(hashlib.md5(name.encode()).hexdigest(), 16) & 0xFFFFFFFF
+722            rng = np.random.default_rng(seed)
+723            random_numbers = rng.integers(0, length, size=(samples, length))
+724
+725        if save_rng is not None:
+726            np.savetxt(save_rng, random_numbers, fmt='%i')
+727
+728        proj = np.vstack([np.bincount(o, minlength=length) for o in random_numbers]) / length
+729        ret = np.zeros(samples + 1)
+730        ret[0] = self.value
+731        ret[1:] = proj @ (self.deltas[name] + self.r_values[name])
+732        return ret
 
@@ -4476,8 +4482,8 @@ should agree with samples from a full bootstrap analysis up to O(1/N).
-
872    def sqrt(self):
-873        return derived_observable(lambda x, **kwargs: np.sqrt(x[0]), [self], man_grad=[1 / 2 / np.sqrt(self.value)])
+            
874    def sqrt(self):
+875        return derived_observable(lambda x, **kwargs: np.sqrt(x[0]), [self], man_grad=[1 / 2 / np.sqrt(self.value)])
 
@@ -4495,8 +4501,8 @@ should agree with samples from a full bootstrap analysis up to O(1/N).
-
875    def log(self):
-876        return derived_observable(lambda x, **kwargs: np.log(x[0]), [self], man_grad=[1 / self.value])
+            
877    def log(self):
+878        return derived_observable(lambda x, **kwargs: np.log(x[0]), [self], man_grad=[1 / self.value])
 
@@ -4514,8 +4520,8 @@ should agree with samples from a full bootstrap analysis up to O(1/N).
-
878    def exp(self):
-879        return derived_observable(lambda x, **kwargs: np.exp(x[0]), [self], man_grad=[np.exp(self.value)])
+            
880    def exp(self):
+881        return derived_observable(lambda x, **kwargs: np.exp(x[0]), [self], man_grad=[np.exp(self.value)])
 
@@ -4533,8 +4539,8 @@ should agree with samples from a full bootstrap analysis up to O(1/N).
-
881    def sin(self):
-882        return derived_observable(lambda x, **kwargs: np.sin(x[0]), [self], man_grad=[np.cos(self.value)])
+            
883    def sin(self):
+884        return derived_observable(lambda x, **kwargs: np.sin(x[0]), [self], man_grad=[np.cos(self.value)])
 
@@ -4552,8 +4558,8 @@ should agree with samples from a full bootstrap analysis up to O(1/N).
-
884    def cos(self):
-885        return derived_observable(lambda x, **kwargs: np.cos(x[0]), [self], man_grad=[-np.sin(self.value)])
+            
886    def cos(self):
+887        return derived_observable(lambda x, **kwargs: np.cos(x[0]), [self], man_grad=[-np.sin(self.value)])
 
@@ -4571,8 +4577,8 @@ should agree with samples from a full bootstrap analysis up to O(1/N).
-
887    def tan(self):
-888        return derived_observable(lambda x, **kwargs: np.tan(x[0]), [self], man_grad=[1 / np.cos(self.value) ** 2])
+            
889    def tan(self):
+890        return derived_observable(lambda x, **kwargs: np.tan(x[0]), [self], man_grad=[1 / np.cos(self.value) ** 2])
 
@@ -4590,8 +4596,8 @@ should agree with samples from a full bootstrap analysis up to O(1/N).
-
890    def arcsin(self):
-891        return derived_observable(lambda x: anp.arcsin(x[0]), [self])
+            
892    def arcsin(self):
+893        return derived_observable(lambda x: anp.arcsin(x[0]), [self])
 
@@ -4609,8 +4615,8 @@ should agree with samples from a full bootstrap analysis up to O(1/N).
-
893    def arccos(self):
-894        return derived_observable(lambda x: anp.arccos(x[0]), [self])
+            
895    def arccos(self):
+896        return derived_observable(lambda x: anp.arccos(x[0]), [self])
 
@@ -4628,8 +4634,8 @@ should agree with samples from a full bootstrap analysis up to O(1/N).
-
896    def arctan(self):
-897        return derived_observable(lambda x: anp.arctan(x[0]), [self])
+            
898    def arctan(self):
+899        return derived_observable(lambda x: anp.arctan(x[0]), [self])
 
@@ -4647,8 +4653,8 @@ should agree with samples from a full bootstrap analysis up to O(1/N).
-
899    def sinh(self):
-900        return derived_observable(lambda x, **kwargs: np.sinh(x[0]), [self], man_grad=[np.cosh(self.value)])
+            
901    def sinh(self):
+902        return derived_observable(lambda x, **kwargs: np.sinh(x[0]), [self], man_grad=[np.cosh(self.value)])
 
@@ -4666,8 +4672,8 @@ should agree with samples from a full bootstrap analysis up to O(1/N).
-
902    def cosh(self):
-903        return derived_observable(lambda x, **kwargs: np.cosh(x[0]), [self], man_grad=[np.sinh(self.value)])
+            
904    def cosh(self):
+905        return derived_observable(lambda x, **kwargs: np.cosh(x[0]), [self], man_grad=[np.sinh(self.value)])
 
@@ -4685,8 +4691,8 @@ should agree with samples from a full bootstrap analysis up to O(1/N).
-
905    def tanh(self):
-906        return derived_observable(lambda x, **kwargs: np.tanh(x[0]), [self], man_grad=[1 / np.cosh(self.value) ** 2])
+            
907    def tanh(self):
+908        return derived_observable(lambda x, **kwargs: np.tanh(x[0]), [self], man_grad=[1 / np.cosh(self.value) ** 2])
 
@@ -4704,8 +4710,8 @@ should agree with samples from a full bootstrap analysis up to O(1/N).
-
908    def arcsinh(self):
-909        return derived_observable(lambda x: anp.arcsinh(x[0]), [self])
+            
910    def arcsinh(self):
+911        return derived_observable(lambda x: anp.arcsinh(x[0]), [self])
 
@@ -4723,8 +4729,8 @@ should agree with samples from a full bootstrap analysis up to O(1/N).
-
911    def arccosh(self):
-912        return derived_observable(lambda x: anp.arccosh(x[0]), [self])
+            
913    def arccosh(self):
+914        return derived_observable(lambda x: anp.arccosh(x[0]), [self])
 
@@ -4742,8 +4748,8 @@ should agree with samples from a full bootstrap analysis up to O(1/N).
-
914    def arctanh(self):
-915        return derived_observable(lambda x: anp.arctanh(x[0]), [self])
+            
916    def arctanh(self):
+917        return derived_observable(lambda x: anp.arctanh(x[0]), [self])
 
@@ -4894,123 +4900,123 @@ should agree with samples from a full bootstrap analysis up to O(1/N).
-
 918class CObs:
- 919    """Class for a complex valued observable."""
- 920    __slots__ = ['_real', '_imag', 'tag']
- 921
- 922    def __init__(self, real, imag=0.0):
- 923        self._real = real
- 924        self._imag = imag
- 925        self.tag = None
- 926
- 927    @property
- 928    def real(self):
- 929        return self._real
- 930
- 931    @property
- 932    def imag(self):
- 933        return self._imag
- 934
- 935    def gamma_method(self, **kwargs):
- 936        """Executes the gamma_method for the real and the imaginary part."""
- 937        if isinstance(self.real, Obs):
- 938            self.real.gamma_method(**kwargs)
- 939        if isinstance(self.imag, Obs):
- 940            self.imag.gamma_method(**kwargs)
- 941
- 942    def is_zero(self):
- 943        """Checks whether both real and imaginary part are zero within machine precision."""
- 944        return self.real == 0.0 and self.imag == 0.0
- 945
- 946    def conjugate(self):
- 947        return CObs(self.real, -self.imag)
- 948
- 949    def __add__(self, other):
- 950        if isinstance(other, np.ndarray):
- 951            return other + self
- 952        elif hasattr(other, 'real') and hasattr(other, 'imag'):
- 953            return CObs(self.real + other.real,
- 954                        self.imag + other.imag)
- 955        else:
- 956            return CObs(self.real + other, self.imag)
- 957
- 958    def __radd__(self, y):
- 959        return self + y
- 960
- 961    def __sub__(self, other):
- 962        if isinstance(other, np.ndarray):
- 963            return -1 * (other - self)
- 964        elif hasattr(other, 'real') and hasattr(other, 'imag'):
- 965            return CObs(self.real - other.real, self.imag - other.imag)
- 966        else:
- 967            return CObs(self.real - other, self.imag)
- 968
- 969    def __rsub__(self, other):
- 970        return -1 * (self - other)
- 971
- 972    def __mul__(self, other):
- 973        if isinstance(other, np.ndarray):
- 974            return other * self
- 975        elif hasattr(other, 'real') and hasattr(other, 'imag'):
- 976            if all(isinstance(i, Obs) for i in [self.real, self.imag, other.real, other.imag]):
- 977                return CObs(derived_observable(lambda x, **kwargs: x[0] * x[1] - x[2] * x[3],
- 978                                               [self.real, other.real, self.imag, other.imag],
- 979                                               man_grad=[other.real.value, self.real.value, -other.imag.value, -self.imag.value]),
- 980                            derived_observable(lambda x, **kwargs: x[2] * x[1] + x[0] * x[3],
- 981                                               [self.real, other.real, self.imag, other.imag],
- 982                                               man_grad=[other.imag.value, self.imag.value, other.real.value, self.real.value]))
- 983            elif getattr(other, 'imag', 0) != 0:
- 984                return CObs(self.real * other.real - self.imag * other.imag,
- 985                            self.imag * other.real + self.real * other.imag)
- 986            else:
- 987                return CObs(self.real * other.real, self.imag * other.real)
- 988        else:
- 989            return CObs(self.real * other, self.imag * other)
- 990
- 991    def __rmul__(self, other):
- 992        return self * other
- 993
- 994    def __truediv__(self, other):
- 995        if isinstance(other, np.ndarray):
- 996            return 1 / (other / self)
- 997        elif hasattr(other, 'real') and hasattr(other, 'imag'):
- 998            r = other.real ** 2 + other.imag ** 2
- 999            return CObs((self.real * other.real + self.imag * other.imag) / r, (self.imag * other.real - self.real * other.imag) / r)
-1000        else:
-1001            return CObs(self.real / other, self.imag / other)
-1002
-1003    def __rtruediv__(self, other):
-1004        r = self.real ** 2 + self.imag ** 2
-1005        if hasattr(other, 'real') and hasattr(other, 'imag'):
-1006            return CObs((self.real * other.real + self.imag * other.imag) / r, (self.real * other.imag - self.imag * other.real) / r)
-1007        else:
-1008            return CObs(self.real * other / r, -self.imag * other / r)
-1009
-1010    def __abs__(self):
-1011        return np.sqrt(self.real**2 + self.imag**2)
-1012
-1013    def __pos__(self):
-1014        return self
-1015
-1016    def __neg__(self):
-1017        return -1 * self
-1018
-1019    def __eq__(self, other):
-1020        return self.real == other.real and self.imag == other.imag
-1021
-1022    def __str__(self):
-1023        return '(' + str(self.real) + int(self.imag >= 0.0) * '+' + str(self.imag) + 'j)'
-1024
-1025    def __repr__(self):
-1026        return 'CObs[' + str(self) + ']'
-1027
-1028    def __format__(self, format_type):
-1029        if format_type == "":
-1030            significance = 2
-1031            format_type = "2"
-1032        else:
-1033            significance = int(float(format_type.replace("+", "").replace("-", "")))
-1034        return f"({self.real:{format_type}}{self.imag:+{significance}}j)"
+            
 920class CObs:
+ 921    """Class for a complex valued observable."""
+ 922    __slots__ = ['_real', '_imag', 'tag']
+ 923
+ 924    def __init__(self, real, imag=0.0):
+ 925        self._real = real
+ 926        self._imag = imag
+ 927        self.tag = None
+ 928
+ 929    @property
+ 930    def real(self):
+ 931        return self._real
+ 932
+ 933    @property
+ 934    def imag(self):
+ 935        return self._imag
+ 936
+ 937    def gamma_method(self, **kwargs):
+ 938        """Executes the gamma_method for the real and the imaginary part."""
+ 939        if isinstance(self.real, Obs):
+ 940            self.real.gamma_method(**kwargs)
+ 941        if isinstance(self.imag, Obs):
+ 942            self.imag.gamma_method(**kwargs)
+ 943
+ 944    def is_zero(self):
+ 945        """Checks whether both real and imaginary part are zero within machine precision."""
+ 946        return self.real == 0.0 and self.imag == 0.0
+ 947
+ 948    def conjugate(self):
+ 949        return CObs(self.real, -self.imag)
+ 950
+ 951    def __add__(self, other):
+ 952        if isinstance(other, np.ndarray):
+ 953            return other + self
+ 954        elif hasattr(other, 'real') and hasattr(other, 'imag'):
+ 955            return CObs(self.real + other.real,
+ 956                        self.imag + other.imag)
+ 957        else:
+ 958            return CObs(self.real + other, self.imag)
+ 959
+ 960    def __radd__(self, y):
+ 961        return self + y
+ 962
+ 963    def __sub__(self, other):
+ 964        if isinstance(other, np.ndarray):
+ 965            return -1 * (other - self)
+ 966        elif hasattr(other, 'real') and hasattr(other, 'imag'):
+ 967            return CObs(self.real - other.real, self.imag - other.imag)
+ 968        else:
+ 969            return CObs(self.real - other, self.imag)
+ 970
+ 971    def __rsub__(self, other):
+ 972        return -1 * (self - other)
+ 973
+ 974    def __mul__(self, other):
+ 975        if isinstance(other, np.ndarray):
+ 976            return other * self
+ 977        elif hasattr(other, 'real') and hasattr(other, 'imag'):
+ 978            if all(isinstance(i, Obs) for i in [self.real, self.imag, other.real, other.imag]):
+ 979                return CObs(derived_observable(lambda x, **kwargs: x[0] * x[1] - x[2] * x[3],
+ 980                                               [self.real, other.real, self.imag, other.imag],
+ 981                                               man_grad=[other.real.value, self.real.value, -other.imag.value, -self.imag.value]),
+ 982                            derived_observable(lambda x, **kwargs: x[2] * x[1] + x[0] * x[3],
+ 983                                               [self.real, other.real, self.imag, other.imag],
+ 984                                               man_grad=[other.imag.value, self.imag.value, other.real.value, self.real.value]))
+ 985            elif getattr(other, 'imag', 0) != 0:
+ 986                return CObs(self.real * other.real - self.imag * other.imag,
+ 987                            self.imag * other.real + self.real * other.imag)
+ 988            else:
+ 989                return CObs(self.real * other.real, self.imag * other.real)
+ 990        else:
+ 991            return CObs(self.real * other, self.imag * other)
+ 992
+ 993    def __rmul__(self, other):
+ 994        return self * other
+ 995
+ 996    def __truediv__(self, other):
+ 997        if isinstance(other, np.ndarray):
+ 998            return 1 / (other / self)
+ 999        elif hasattr(other, 'real') and hasattr(other, 'imag'):
+1000            r = other.real ** 2 + other.imag ** 2
+1001            return CObs((self.real * other.real + self.imag * other.imag) / r, (self.imag * other.real - self.real * other.imag) / r)
+1002        else:
+1003            return CObs(self.real / other, self.imag / other)
+1004
+1005    def __rtruediv__(self, other):
+1006        r = self.real ** 2 + self.imag ** 2
+1007        if hasattr(other, 'real') and hasattr(other, 'imag'):
+1008            return CObs((self.real * other.real + self.imag * other.imag) / r, (self.real * other.imag - self.imag * other.real) / r)
+1009        else:
+1010            return CObs(self.real * other / r, -self.imag * other / r)
+1011
+1012    def __abs__(self):
+1013        return np.sqrt(self.real**2 + self.imag**2)
+1014
+1015    def __pos__(self):
+1016        return self
+1017
+1018    def __neg__(self):
+1019        return -1 * self
+1020
+1021    def __eq__(self, other):
+1022        return self.real == other.real and self.imag == other.imag
+1023
+1024    def __str__(self):
+1025        return '(' + str(self.real) + int(self.imag >= 0.0) * '+' + str(self.imag) + 'j)'
+1026
+1027    def __repr__(self):
+1028        return 'CObs[' + str(self) + ']'
+1029
+1030    def __format__(self, format_type):
+1031        if format_type == "":
+1032            significance = 2
+1033            format_type = "2"
+1034        else:
+1035            significance = int(float(format_type.replace("+", "").replace("-", "")))
+1036        return f"({self.real:{format_type}}{self.imag:+{significance}}j)"
 
@@ -5028,10 +5034,10 @@ should agree with samples from a full bootstrap analysis up to O(1/N).
-
922    def __init__(self, real, imag=0.0):
-923        self._real = real
-924        self._imag = imag
-925        self.tag = None
+            
924    def __init__(self, real, imag=0.0):
+925        self._real = real
+926        self._imag = imag
+927        self.tag = None
 
@@ -5082,12 +5088,12 @@ should agree with samples from a full bootstrap analysis up to O(1/N).
-
935    def gamma_method(self, **kwargs):
-936        """Executes the gamma_method for the real and the imaginary part."""
-937        if isinstance(self.real, Obs):
-938            self.real.gamma_method(**kwargs)
-939        if isinstance(self.imag, Obs):
-940            self.imag.gamma_method(**kwargs)
+            
937    def gamma_method(self, **kwargs):
+938        """Executes the gamma_method for the real and the imaginary part."""
+939        if isinstance(self.real, Obs):
+940            self.real.gamma_method(**kwargs)
+941        if isinstance(self.imag, Obs):
+942            self.imag.gamma_method(**kwargs)
 
@@ -5107,9 +5113,9 @@ should agree with samples from a full bootstrap analysis up to O(1/N).
-
942    def is_zero(self):
-943        """Checks whether both real and imaginary part are zero within machine precision."""
-944        return self.real == 0.0 and self.imag == 0.0
+            
944    def is_zero(self):
+945        """Checks whether both real and imaginary part are zero within machine precision."""
+946        return self.real == 0.0 and self.imag == 0.0
 
@@ -5129,8 +5135,8 @@ should agree with samples from a full bootstrap analysis up to O(1/N).
-
946    def conjugate(self):
-947        return CObs(self.real, -self.imag)
+            
948    def conjugate(self):
+949        return CObs(self.real, -self.imag)
 
@@ -5149,12 +5155,12 @@ should agree with samples from a full bootstrap analysis up to O(1/N).
-
1037def gamma_method(x, **kwargs):
-1038    """Vectorized version of the gamma_method applicable to lists or arrays of Obs.
-1039
-1040    See docstring of pe.Obs.gamma_method for details.
-1041    """
-1042    return np.vectorize(lambda o: o.gm(**kwargs))(x)
+            
1039def gamma_method(x, **kwargs):
+1040    """Vectorized version of the gamma_method applicable to lists or arrays of Obs.
+1041
+1042    See docstring of pe.Obs.gamma_method for details.
+1043    """
+1044    return np.vectorize(lambda o: o.gm(**kwargs))(x)
 
@@ -5176,12 +5182,12 @@ should agree with samples from a full bootstrap analysis up to O(1/N).
-
1037def gamma_method(x, **kwargs):
-1038    """Vectorized version of the gamma_method applicable to lists or arrays of Obs.
-1039
-1040    See docstring of pe.Obs.gamma_method for details.
-1041    """
-1042    return np.vectorize(lambda o: o.gm(**kwargs))(x)
+            
1039def gamma_method(x, **kwargs):
+1040    """Vectorized version of the gamma_method applicable to lists or arrays of Obs.
+1041
+1042    See docstring of pe.Obs.gamma_method for details.
+1043    """
+1044    return np.vectorize(lambda o: o.gm(**kwargs))(x)
 
@@ -5203,174 +5209,174 @@ should agree with samples from a full bootstrap analysis up to O(1/N).
-
1167def derived_observable(func, data, array_mode=False, **kwargs):
-1168    """Construct a derived Obs according to func(data, **kwargs) using automatic differentiation.
-1169
-1170    Parameters
-1171    ----------
-1172    func : object
-1173        arbitrary function of the form func(data, **kwargs). For the
-1174        automatic differentiation to work, all numpy functions have to have
-1175        the autograd wrapper (use 'import autograd.numpy as anp').
-1176    data : list
-1177        list of Obs, e.g. [obs1, obs2, obs3].
-1178    num_grad : bool
-1179        if True, numerical derivatives are used instead of autograd
-1180        (default False). To control the numerical differentiation the
-1181        kwargs of numdifftools.step_generators.MaxStepGenerator
-1182        can be used.
-1183    man_grad : list
-1184        manually supply a list or an array which contains the jacobian
-1185        of func. Use cautiously, supplying the wrong derivative will
-1186        not be intercepted.
-1187
-1188    Notes
-1189    -----
-1190    For simple mathematical operations it can be practical to use anonymous
-1191    functions. For the ratio of two observables one can e.g. use
-1192
-1193    new_obs = derived_observable(lambda x: x[0] / x[1], [obs1, obs2])
-1194    """
-1195
-1196    data = np.asarray(data)
-1197    raveled_data = data.ravel()
-1198
-1199    # Workaround for matrix operations containing non Obs data
-1200    if not all(isinstance(x, Obs) for x in raveled_data):
-1201        for i in range(len(raveled_data)):
-1202            if isinstance(raveled_data[i], (int, float)):
-1203                raveled_data[i] = cov_Obs(raveled_data[i], 0.0, "###dummy_covobs###")
-1204
-1205    allcov = {}
-1206    for o in raveled_data:
-1207        for name in o.cov_names:
-1208            if name in allcov:
-1209                if not np.allclose(allcov[name], o.covobs[name].cov):
-1210                    raise Exception('Inconsistent covariance matrices for %s!' % (name))
-1211            else:
-1212                allcov[name] = o.covobs[name].cov
-1213
-1214    n_obs = len(raveled_data)
-1215    new_names = sorted(set([y for x in [o.names for o in raveled_data] for y in x]))
-1216    new_cov_names = sorted(set([y for x in [o.cov_names for o in raveled_data] for y in x]))
-1217    new_sample_names = sorted(set(new_names) - set(new_cov_names))
-1218
-1219    reweighted = len(list(filter(lambda o: o.reweighted is True, raveled_data))) > 0
+            
1169def derived_observable(func, data, array_mode=False, **kwargs):
+1170    """Construct a derived Obs according to func(data, **kwargs) using automatic differentiation.
+1171
+1172    Parameters
+1173    ----------
+1174    func : object
+1175        arbitrary function of the form func(data, **kwargs). For the
+1176        automatic differentiation to work, all numpy functions have to have
+1177        the autograd wrapper (use 'import autograd.numpy as anp').
+1178    data : list
+1179        list of Obs, e.g. [obs1, obs2, obs3].
+1180    num_grad : bool
+1181        if True, numerical derivatives are used instead of autograd
+1182        (default False). To control the numerical differentiation the
+1183        kwargs of numdifftools.step_generators.MaxStepGenerator
+1184        can be used.
+1185    man_grad : list
+1186        manually supply a list or an array which contains the jacobian
+1187        of func. Use cautiously, supplying the wrong derivative will
+1188        not be intercepted.
+1189
+1190    Notes
+1191    -----
+1192    For simple mathematical operations it can be practical to use anonymous
+1193    functions. For the ratio of two observables one can e.g. use
+1194
+1195    new_obs = derived_observable(lambda x: x[0] / x[1], [obs1, obs2])
+1196    """
+1197
+1198    data = np.asarray(data)
+1199    raveled_data = data.ravel()
+1200
+1201    # Workaround for matrix operations containing non Obs data
+1202    if not all(isinstance(x, Obs) for x in raveled_data):
+1203        for i in range(len(raveled_data)):
+1204            if isinstance(raveled_data[i], (int, float)):
+1205                raveled_data[i] = cov_Obs(raveled_data[i], 0.0, "###dummy_covobs###")
+1206
+1207    allcov = {}
+1208    for o in raveled_data:
+1209        for name in o.cov_names:
+1210            if name in allcov:
+1211                if not np.allclose(allcov[name], o.covobs[name].cov):
+1212                    raise Exception('Inconsistent covariance matrices for %s!' % (name))
+1213            else:
+1214                allcov[name] = o.covobs[name].cov
+1215
+1216    n_obs = len(raveled_data)
+1217    new_names = sorted(set([y for x in [o.names for o in raveled_data] for y in x]))
+1218    new_cov_names = sorted(set([y for x in [o.cov_names for o in raveled_data] for y in x]))
+1219    new_sample_names = sorted(set(new_names) - set(new_cov_names))
 1220
-1221    if data.ndim == 1:
-1222        values = np.array([o.value for o in data])
-1223    else:
-1224        values = np.vectorize(lambda x: x.value)(data)
-1225
-1226    new_values = func(values, **kwargs)
+1221    reweighted = len(list(filter(lambda o: o.reweighted is True, raveled_data))) > 0
+1222
+1223    if data.ndim == 1:
+1224        values = np.array([o.value for o in data])
+1225    else:
+1226        values = np.vectorize(lambda x: x.value)(data)
 1227
-1228    multi = int(isinstance(new_values, np.ndarray))
+1228    new_values = func(values, **kwargs)
 1229
-1230    new_r_values = {}
-1231    new_idl_d = {}
-1232    for name in new_sample_names:
-1233        idl = []
-1234        tmp_values = np.zeros(n_obs)
-1235        for i, item in enumerate(raveled_data):
-1236            tmp_values[i] = item.r_values.get(name, item.value)
-1237            tmp_idl = item.idl.get(name)
-1238            if tmp_idl is not None:
-1239                idl.append(tmp_idl)
-1240        if multi > 0:
-1241            tmp_values = np.array(tmp_values).reshape(data.shape)
-1242        new_r_values[name] = func(tmp_values, **kwargs)
-1243        new_idl_d[name] = _merge_idx(idl)
-1244
-1245    if 'man_grad' in kwargs:
-1246        deriv = np.asarray(kwargs.get('man_grad'))
-1247        if new_values.shape + data.shape != deriv.shape:
-1248            raise Exception('Manual derivative does not have correct shape.')
-1249    elif kwargs.get('num_grad') is True:
-1250        if multi > 0:
-1251            raise Exception('Multi mode currently not supported for numerical derivative')
-1252        options = {
-1253            'base_step': 0.1,
-1254            'step_ratio': 2.5}
-1255        for key in options.keys():
-1256            kwarg = kwargs.get(key)
-1257            if kwarg is not None:
-1258                options[key] = kwarg
-1259        tmp_df = nd.Gradient(func, order=4, **{k: v for k, v in options.items() if v is not None})(values, **kwargs)
-1260        if tmp_df.size == 1:
-1261            deriv = np.array([tmp_df.real])
-1262        else:
-1263            deriv = tmp_df.real
-1264    else:
-1265        deriv = jacobian(func)(values, **kwargs)
-1266
-1267    final_result = np.zeros(new_values.shape, dtype=object)
+1230    multi = int(isinstance(new_values, np.ndarray))
+1231
+1232    new_r_values = {}
+1233    new_idl_d = {}
+1234    for name in new_sample_names:
+1235        idl = []
+1236        tmp_values = np.zeros(n_obs)
+1237        for i, item in enumerate(raveled_data):
+1238            tmp_values[i] = item.r_values.get(name, item.value)
+1239            tmp_idl = item.idl.get(name)
+1240            if tmp_idl is not None:
+1241                idl.append(tmp_idl)
+1242        if multi > 0:
+1243            tmp_values = np.array(tmp_values).reshape(data.shape)
+1244        new_r_values[name] = func(tmp_values, **kwargs)
+1245        new_idl_d[name] = _merge_idx(idl)
+1246
+1247    if 'man_grad' in kwargs:
+1248        deriv = np.asarray(kwargs.get('man_grad'))
+1249        if new_values.shape + data.shape != deriv.shape:
+1250            raise Exception('Manual derivative does not have correct shape.')
+1251    elif kwargs.get('num_grad') is True:
+1252        if multi > 0:
+1253            raise Exception('Multi mode currently not supported for numerical derivative')
+1254        options = {
+1255            'base_step': 0.1,
+1256            'step_ratio': 2.5}
+1257        for key in options.keys():
+1258            kwarg = kwargs.get(key)
+1259            if kwarg is not None:
+1260                options[key] = kwarg
+1261        tmp_df = nd.Gradient(func, order=4, **{k: v for k, v in options.items() if v is not None})(values, **kwargs)
+1262        if tmp_df.size == 1:
+1263            deriv = np.array([tmp_df.real])
+1264        else:
+1265            deriv = tmp_df.real
+1266    else:
+1267        deriv = jacobian(func)(values, **kwargs)
 1268
-1269    if array_mode is True:
+1269    final_result = np.zeros(new_values.shape, dtype=object)
 1270
-1271        class _Zero_grad():
-1272            def __init__(self, N):
-1273                self.grad = np.zeros((N, 1))
-1274
-1275        new_covobs_lengths = dict(set([y for x in [[(n, o.covobs[n].N) for n in o.cov_names] for o in raveled_data] for y in x]))
-1276        d_extracted = {}
-1277        g_extracted = {}
-1278        for name in new_sample_names:
-1279            d_extracted[name] = []
-1280            ens_length = len(new_idl_d[name])
-1281            for i_dat, dat in enumerate(data):
-1282                d_extracted[name].append(np.array([_expand_deltas_for_merge(o.deltas.get(name, np.zeros(ens_length)), o.idl.get(name, new_idl_d[name]), o.shape.get(name, ens_length), new_idl_d[name]) for o in dat.reshape(np.prod(dat.shape))]).reshape(dat.shape + (ens_length, )))
-1283        for name in new_cov_names:
-1284            g_extracted[name] = []
-1285            zero_grad = _Zero_grad(new_covobs_lengths[name])
-1286            for i_dat, dat in enumerate(data):
-1287                g_extracted[name].append(np.array([o.covobs.get(name, zero_grad).grad for o in dat.reshape(np.prod(dat.shape))]).reshape(dat.shape + (new_covobs_lengths[name], 1)))
-1288
-1289    for i_val, new_val in np.ndenumerate(new_values):
-1290        new_deltas = {}
-1291        new_grad = {}
-1292        if array_mode is True:
-1293            for name in new_sample_names:
-1294                ens_length = d_extracted[name][0].shape[-1]
-1295                new_deltas[name] = np.zeros(ens_length)
-1296                for i_dat, dat in enumerate(d_extracted[name]):
-1297                    new_deltas[name] += np.tensordot(deriv[i_val + (i_dat, )], dat)
-1298            for name in new_cov_names:
-1299                new_grad[name] = 0
-1300                for i_dat, dat in enumerate(g_extracted[name]):
-1301                    new_grad[name] += np.tensordot(deriv[i_val + (i_dat, )], dat)
-1302        else:
-1303            for j_obs, obs in np.ndenumerate(data):
-1304                for name in obs.names:
-1305                    if name in obs.cov_names:
-1306                        new_grad[name] = new_grad.get(name, 0) + deriv[i_val + j_obs] * obs.covobs[name].grad
-1307                    else:
-1308                        new_deltas[name] = new_deltas.get(name, 0) + deriv[i_val + j_obs] * _expand_deltas_for_merge(obs.deltas[name], obs.idl[name], obs.shape[name], new_idl_d[name])
-1309
-1310        new_covobs = {name: Covobs(0, allcov[name], name, grad=new_grad[name]) for name in new_grad}
+1271    if array_mode is True:
+1272
+1273        class _Zero_grad():
+1274            def __init__(self, N):
+1275                self.grad = np.zeros((N, 1))
+1276
+1277        new_covobs_lengths = dict(set([y for x in [[(n, o.covobs[n].N) for n in o.cov_names] for o in raveled_data] for y in x]))
+1278        d_extracted = {}
+1279        g_extracted = {}
+1280        for name in new_sample_names:
+1281            d_extracted[name] = []
+1282            ens_length = len(new_idl_d[name])
+1283            for i_dat, dat in enumerate(data):
+1284                d_extracted[name].append(np.array([_expand_deltas_for_merge(o.deltas.get(name, np.zeros(ens_length)), o.idl.get(name, new_idl_d[name]), o.shape.get(name, ens_length), new_idl_d[name]) for o in dat.reshape(np.prod(dat.shape))]).reshape(dat.shape + (ens_length, )))
+1285        for name in new_cov_names:
+1286            g_extracted[name] = []
+1287            zero_grad = _Zero_grad(new_covobs_lengths[name])
+1288            for i_dat, dat in enumerate(data):
+1289                g_extracted[name].append(np.array([o.covobs.get(name, zero_grad).grad for o in dat.reshape(np.prod(dat.shape))]).reshape(dat.shape + (new_covobs_lengths[name], 1)))
+1290
+1291    for i_val, new_val in np.ndenumerate(new_values):
+1292        new_deltas = {}
+1293        new_grad = {}
+1294        if array_mode is True:
+1295            for name in new_sample_names:
+1296                ens_length = d_extracted[name][0].shape[-1]
+1297                new_deltas[name] = np.zeros(ens_length)
+1298                for i_dat, dat in enumerate(d_extracted[name]):
+1299                    new_deltas[name] += np.tensordot(deriv[i_val + (i_dat, )], dat)
+1300            for name in new_cov_names:
+1301                new_grad[name] = 0
+1302                for i_dat, dat in enumerate(g_extracted[name]):
+1303                    new_grad[name] += np.tensordot(deriv[i_val + (i_dat, )], dat)
+1304        else:
+1305            for j_obs, obs in np.ndenumerate(data):
+1306                for name in obs.names:
+1307                    if name in obs.cov_names:
+1308                        new_grad[name] = new_grad.get(name, 0) + deriv[i_val + j_obs] * obs.covobs[name].grad
+1309                    else:
+1310                        new_deltas[name] = new_deltas.get(name, 0) + deriv[i_val + j_obs] * _expand_deltas_for_merge(obs.deltas[name], obs.idl[name], obs.shape[name], new_idl_d[name])
 1311
-1312        if not set(new_covobs.keys()).isdisjoint(new_deltas.keys()):
-1313            raise Exception('The same name has been used for deltas and covobs!')
-1314        new_samples = []
-1315        new_means = []
-1316        new_idl = []
-1317        new_names_obs = []
-1318        for name in new_names:
-1319            if name not in new_covobs:
-1320                new_samples.append(new_deltas[name])
-1321                new_idl.append(new_idl_d[name])
-1322                new_means.append(new_r_values[name][i_val])
-1323                new_names_obs.append(name)
-1324        final_result[i_val] = Obs(new_samples, new_names_obs, means=new_means, idl=new_idl)
-1325        for name in new_covobs:
-1326            final_result[i_val].names.append(name)
-1327        final_result[i_val]._covobs = new_covobs
-1328        final_result[i_val]._value = new_val
-1329        final_result[i_val].reweighted = reweighted
-1330
-1331    if multi == 0:
-1332        final_result = final_result.item()
-1333
-1334    return final_result
+1312        new_covobs = {name: Covobs(0, allcov[name], name, grad=new_grad[name]) for name in new_grad}
+1313
+1314        if not set(new_covobs.keys()).isdisjoint(new_deltas.keys()):
+1315            raise Exception('The same name has been used for deltas and covobs!')
+1316        new_samples = []
+1317        new_means = []
+1318        new_idl = []
+1319        new_names_obs = []
+1320        for name in new_names:
+1321            if name not in new_covobs:
+1322                new_samples.append(new_deltas[name])
+1323                new_idl.append(new_idl_d[name])
+1324                new_means.append(new_r_values[name][i_val])
+1325                new_names_obs.append(name)
+1326        final_result[i_val] = Obs(new_samples, new_names_obs, means=new_means, idl=new_idl)
+1327        for name in new_covobs:
+1328            final_result[i_val].names.append(name)
+1329        final_result[i_val]._covobs = new_covobs
+1330        final_result[i_val]._value = new_val
+1331        final_result[i_val].reweighted = reweighted
+1332
+1333    if multi == 0:
+1334        final_result = final_result.item()
+1335
+1336    return final_result
 
@@ -5417,46 +5423,46 @@ functions. For the ratio of two observables one can e.g. use

-
1366def reweight(weight, obs, **kwargs):
-1367    """Reweight a list of observables.
-1368
-1369    Parameters
-1370    ----------
-1371    weight : Obs
-1372        Reweighting factor. An Observable that has to be defined on a superset of the
-1373        configurations in obs[i].idl for all i.
-1374    obs : list
-1375        list of Obs, e.g. [obs1, obs2, obs3].
-1376    all_configs : bool
-1377        if True, the reweighted observables are normalized by the average of
-1378        the reweighting factor on all configurations in weight.idl and not
-1379        on the configurations in obs[i].idl. Default False.
-1380    """
-1381    result = []
-1382    for i in range(len(obs)):
-1383        if len(obs[i].cov_names):
-1384            raise Exception('Error: Not possible to reweight an Obs that contains covobs!')
-1385        if not set(obs[i].names).issubset(weight.names):
-1386            raise Exception('Error: Ensembles do not fit')
-1387        for name in obs[i].names:
-1388            if not set(obs[i].idl[name]).issubset(weight.idl[name]):
-1389                raise Exception('obs[%d] has to be defined on a subset of the configs in weight.idl[%s]!' % (i, name))
-1390        new_samples = []
-1391        w_deltas = {}
-1392        for name in sorted(obs[i].names):
-1393            w_deltas[name] = _reduce_deltas(weight.deltas[name], weight.idl[name], obs[i].idl[name])
-1394            new_samples.append((w_deltas[name] + weight.r_values[name]) * (obs[i].deltas[name] + obs[i].r_values[name]))
-1395        tmp_obs = Obs(new_samples, sorted(obs[i].names), idl=[obs[i].idl[name] for name in sorted(obs[i].names)])
-1396
-1397        if kwargs.get('all_configs'):
-1398            new_weight = weight
-1399        else:
-1400            new_weight = Obs([w_deltas[name] + weight.r_values[name] for name in sorted(obs[i].names)], sorted(obs[i].names), idl=[obs[i].idl[name] for name in sorted(obs[i].names)])
-1401
-1402        result.append(tmp_obs / new_weight)
-1403        result[-1].reweighted = True
-1404
-1405    return result
+            
1368def reweight(weight, obs, **kwargs):
+1369    """Reweight a list of observables.
+1370
+1371    Parameters
+1372    ----------
+1373    weight : Obs
+1374        Reweighting factor. An Observable that has to be defined on a superset of the
+1375        configurations in obs[i].idl for all i.
+1376    obs : list
+1377        list of Obs, e.g. [obs1, obs2, obs3].
+1378    all_configs : bool
+1379        if True, the reweighted observables are normalized by the average of
+1380        the reweighting factor on all configurations in weight.idl and not
+1381        on the configurations in obs[i].idl. Default False.
+1382    """
+1383    result = []
+1384    for i in range(len(obs)):
+1385        if len(obs[i].cov_names):
+1386            raise Exception('Error: Not possible to reweight an Obs that contains covobs!')
+1387        if not set(obs[i].names).issubset(weight.names):
+1388            raise Exception('Error: Ensembles do not fit')
+1389        for name in obs[i].names:
+1390            if not set(obs[i].idl[name]).issubset(weight.idl[name]):
+1391                raise Exception('obs[%d] has to be defined on a subset of the configs in weight.idl[%s]!' % (i, name))
+1392        new_samples = []
+1393        w_deltas = {}
+1394        for name in sorted(obs[i].names):
+1395            w_deltas[name] = _reduce_deltas(weight.deltas[name], weight.idl[name], obs[i].idl[name])
+1396            new_samples.append((w_deltas[name] + weight.r_values[name]) * (obs[i].deltas[name] + obs[i].r_values[name]))
+1397        tmp_obs = Obs(new_samples, sorted(obs[i].names), idl=[obs[i].idl[name] for name in sorted(obs[i].names)])
+1398
+1399        if kwargs.get('all_configs'):
+1400            new_weight = weight
+1401        else:
+1402            new_weight = Obs([w_deltas[name] + weight.r_values[name] for name in sorted(obs[i].names)], sorted(obs[i].names), idl=[obs[i].idl[name] for name in sorted(obs[i].names)])
+1403
+1404        result.append(tmp_obs / new_weight)
+1405        result[-1].reweighted = True
+1406
+1407    return result
 
@@ -5490,47 +5496,47 @@ on the configurations in obs[i].idl. Default False.
-
1408def correlate(obs_a, obs_b):
-1409    """Correlate two observables.
-1410
-1411    Parameters
-1412    ----------
-1413    obs_a : Obs
-1414        First observable
-1415    obs_b : Obs
-1416        Second observable
-1417
-1418    Notes
-1419    -----
-1420    Keep in mind to only correlate primary observables which have not been reweighted
-1421    yet. The reweighting has to be applied after correlating the observables.
-1422    Currently only works if ensembles are identical (this is not strictly necessary).
-1423    """
-1424
-1425    if sorted(obs_a.names) != sorted(obs_b.names):
-1426        raise Exception(f"Ensembles do not fit {set(sorted(obs_a.names)) ^ set(sorted(obs_b.names))}")
-1427    if len(obs_a.cov_names) or len(obs_b.cov_names):
-1428        raise Exception('Error: Not possible to correlate Obs that contain covobs!')
-1429    for name in obs_a.names:
-1430        if obs_a.shape[name] != obs_b.shape[name]:
-1431            raise Exception('Shapes of ensemble', name, 'do not fit')
-1432        if obs_a.idl[name] != obs_b.idl[name]:
-1433            raise Exception('idl of ensemble', name, 'do not fit')
-1434
-1435    if obs_a.reweighted is True:
-1436        warnings.warn("The first observable is already reweighted.", RuntimeWarning)
-1437    if obs_b.reweighted is True:
-1438        warnings.warn("The second observable is already reweighted.", RuntimeWarning)
-1439
-1440    new_samples = []
-1441    new_idl = []
-1442    for name in sorted(obs_a.names):
-1443        new_samples.append((obs_a.deltas[name] + obs_a.r_values[name]) * (obs_b.deltas[name] + obs_b.r_values[name]))
-1444        new_idl.append(obs_a.idl[name])
-1445
-1446    o = Obs(new_samples, sorted(obs_a.names), idl=new_idl)
-1447    o.reweighted = obs_a.reweighted or obs_b.reweighted
-1448    return o
+            
1410def correlate(obs_a, obs_b):
+1411    """Correlate two observables.
+1412
+1413    Parameters
+1414    ----------
+1415    obs_a : Obs
+1416        First observable
+1417    obs_b : Obs
+1418        Second observable
+1419
+1420    Notes
+1421    -----
+1422    Keep in mind to only correlate primary observables which have not been reweighted
+1423    yet. The reweighting has to be applied after correlating the observables.
+1424    Currently only works if ensembles are identical (this is not strictly necessary).
+1425    """
+1426
+1427    if sorted(obs_a.names) != sorted(obs_b.names):
+1428        raise Exception(f"Ensembles do not fit {set(sorted(obs_a.names)) ^ set(sorted(obs_b.names))}")
+1429    if len(obs_a.cov_names) or len(obs_b.cov_names):
+1430        raise Exception('Error: Not possible to correlate Obs that contain covobs!')
+1431    for name in obs_a.names:
+1432        if obs_a.shape[name] != obs_b.shape[name]:
+1433            raise Exception('Shapes of ensemble', name, 'do not fit')
+1434        if obs_a.idl[name] != obs_b.idl[name]:
+1435            raise Exception('idl of ensemble', name, 'do not fit')
+1436
+1437    if obs_a.reweighted is True:
+1438        warnings.warn("The first observable is already reweighted.", RuntimeWarning)
+1439    if obs_b.reweighted is True:
+1440        warnings.warn("The second observable is already reweighted.", RuntimeWarning)
+1441
+1442    new_samples = []
+1443    new_idl = []
+1444    for name in sorted(obs_a.names):
+1445        new_samples.append((obs_a.deltas[name] + obs_a.r_values[name]) * (obs_b.deltas[name] + obs_b.r_values[name]))
+1446        new_idl.append(obs_a.idl[name])
+1447
+1448    o = Obs(new_samples, sorted(obs_a.names), idl=new_idl)
+1449    o.reweighted = obs_a.reweighted or obs_b.reweighted
+1450    return o
 
@@ -5565,74 +5571,74 @@ Currently only works if ensembles are identical (this is not strictly necessary)
-
1451def covariance(obs, visualize=False, correlation=False, smooth=None, **kwargs):
-1452    r'''Calculates the error covariance matrix of a set of observables.
-1453
-1454    WARNING: This function should be used with care, especially for observables with support on multiple
-1455             ensembles with differing autocorrelations. See the notes below for details.
-1456
-1457    The gamma method has to be applied first to all observables.
+            
1453def covariance(obs, visualize=False, correlation=False, smooth=None, **kwargs):
+1454    r'''Calculates the error covariance matrix of a set of observables.
+1455
+1456    WARNING: This function should be used with care, especially for observables with support on multiple
+1457             ensembles with differing autocorrelations. See the notes below for details.
 1458
-1459    Parameters
-1460    ----------
-1461    obs : list or numpy.ndarray
-1462        List or one dimensional array of Obs
-1463    visualize : bool
-1464        If True plots the corresponding normalized correlation matrix (default False).
-1465    correlation : bool
-1466        If True the correlation matrix instead of the error covariance matrix is returned (default False).
-1467    smooth : None or int
-1468        If smooth is an integer 'E' between 2 and the dimension of the matrix minus 1 the eigenvalue
-1469        smoothing procedure of hep-lat/9412087 is applied to the correlation matrix which leaves the
-1470        largest E eigenvalues essentially unchanged and smoothes the smaller eigenvalues to avoid extremely
-1471        small ones.
-1472
-1473    Notes
-1474    -----
-1475    The error covariance is defined such that it agrees with the squared standard error for two identical observables
-1476    $$\operatorname{cov}(a,a)=\sum_{s=1}^N\delta_a^s\delta_a^s/N^2=\Gamma_{aa}(0)/N=\operatorname{var}(a)/N=\sigma_a^2$$
-1477    in the absence of autocorrelation.
-1478    The error covariance is estimated by calculating the correlation matrix assuming no autocorrelation and then rescaling the correlation matrix by the full errors including the previous gamma method estimate for the autocorrelation of the observables. The covariance at windowsize 0 is guaranteed to be positive semi-definite
-1479    $$\sum_{i,j}v_i\Gamma_{ij}(0)v_j=\frac{1}{N}\sum_{s=1}^N\sum_{i,j}v_i\delta_i^s\delta_j^s v_j=\frac{1}{N}\sum_{s=1}^N\sum_{i}|v_i\delta_i^s|^2\geq 0\,,$$ for every $v\in\mathbb{R}^M$, while such an identity does not hold for larger windows/lags.
-1480    For observables defined on a single ensemble our approximation is equivalent to assuming that the integrated autocorrelation time of an off-diagonal element is equal to the geometric mean of the integrated autocorrelation times of the corresponding diagonal elements.
-1481    $$\tau_{\mathrm{int}, ij}=\sqrt{\tau_{\mathrm{int}, i}\times \tau_{\mathrm{int}, j}}$$
-1482    This construction ensures that the estimated covariance matrix is positive semi-definite (up to numerical rounding errors).
-1483    '''
-1484
-1485    length = len(obs)
+1459    The gamma method has to be applied first to all observables.
+1460
+1461    Parameters
+1462    ----------
+1463    obs : list or numpy.ndarray
+1464        List or one dimensional array of Obs
+1465    visualize : bool
+1466        If True plots the corresponding normalized correlation matrix (default False).
+1467    correlation : bool
+1468        If True the correlation matrix instead of the error covariance matrix is returned (default False).
+1469    smooth : None or int
+1470        If smooth is an integer 'E' between 2 and the dimension of the matrix minus 1 the eigenvalue
+1471        smoothing procedure of hep-lat/9412087 is applied to the correlation matrix which leaves the
+1472        largest E eigenvalues essentially unchanged and smoothes the smaller eigenvalues to avoid extremely
+1473        small ones.
+1474
+1475    Notes
+1476    -----
+1477    The error covariance is defined such that it agrees with the squared standard error for two identical observables
+1478    $$\operatorname{cov}(a,a)=\sum_{s=1}^N\delta_a^s\delta_a^s/N^2=\Gamma_{aa}(0)/N=\operatorname{var}(a)/N=\sigma_a^2$$
+1479    in the absence of autocorrelation.
+1480    The error covariance is estimated by calculating the correlation matrix assuming no autocorrelation and then rescaling the correlation matrix by the full errors including the previous gamma method estimate for the autocorrelation of the observables. The covariance at windowsize 0 is guaranteed to be positive semi-definite
+1481    $$\sum_{i,j}v_i\Gamma_{ij}(0)v_j=\frac{1}{N}\sum_{s=1}^N\sum_{i,j}v_i\delta_i^s\delta_j^s v_j=\frac{1}{N}\sum_{s=1}^N\sum_{i}|v_i\delta_i^s|^2\geq 0\,,$$ for every $v\in\mathbb{R}^M$, while such an identity does not hold for larger windows/lags.
+1482    For observables defined on a single ensemble our approximation is equivalent to assuming that the integrated autocorrelation time of an off-diagonal element is equal to the geometric mean of the integrated autocorrelation times of the corresponding diagonal elements.
+1483    $$\tau_{\mathrm{int}, ij}=\sqrt{\tau_{\mathrm{int}, i}\times \tau_{\mathrm{int}, j}}$$
+1484    This construction ensures that the estimated covariance matrix is positive semi-definite (up to numerical rounding errors).
+1485    '''
 1486
-1487    max_samples = np.max([o.N for o in obs])
-1488    if max_samples <= length and not [item for sublist in [o.cov_names for o in obs] for item in sublist]:
-1489        warnings.warn(f"The dimension of the covariance matrix ({length}) is larger or equal to the number of samples ({max_samples}). This will result in a rank deficient matrix.", RuntimeWarning)
-1490
-1491    cov = np.zeros((length, length))
-1492    for i in range(length):
-1493        for j in range(i, length):
-1494            cov[i, j] = _covariance_element(obs[i], obs[j])
-1495    cov = cov + cov.T - np.diag(np.diag(cov))
-1496
-1497    corr = np.diag(1 / np.sqrt(np.diag(cov))) @ cov @ np.diag(1 / np.sqrt(np.diag(cov)))
+1487    length = len(obs)
+1488
+1489    max_samples = np.max([o.N for o in obs])
+1490    if max_samples <= length and not [item for sublist in [o.cov_names for o in obs] for item in sublist]:
+1491        warnings.warn(f"The dimension of the covariance matrix ({length}) is larger or equal to the number of samples ({max_samples}). This will result in a rank deficient matrix.", RuntimeWarning)
+1492
+1493    cov = np.zeros((length, length))
+1494    for i in range(length):
+1495        for j in range(i, length):
+1496            cov[i, j] = _covariance_element(obs[i], obs[j])
+1497    cov = cov + cov.T - np.diag(np.diag(cov))
 1498
-1499    if isinstance(smooth, int):
-1500        corr = _smooth_eigenvalues(corr, smooth)
-1501
-1502    if visualize:
-1503        plt.matshow(corr, vmin=-1, vmax=1)
-1504        plt.set_cmap('RdBu')
-1505        plt.colorbar()
-1506        plt.draw()
-1507
-1508    if correlation is True:
-1509        return corr
-1510
-1511    errors = [o.dvalue for o in obs]
-1512    cov = np.diag(errors) @ corr @ np.diag(errors)
-1513
-1514    eigenvalues = np.linalg.eigh(cov)[0]
-1515    if not np.all(eigenvalues >= 0):
-1516        warnings.warn("Covariance matrix is not positive semi-definite (Eigenvalues: " + str(eigenvalues) + ")", RuntimeWarning)
-1517
-1518    return cov
+1499    corr = np.diag(1 / np.sqrt(np.diag(cov))) @ cov @ np.diag(1 / np.sqrt(np.diag(cov)))
+1500
+1501    if isinstance(smooth, int):
+1502        corr = _smooth_eigenvalues(corr, smooth)
+1503
+1504    if visualize:
+1505        plt.matshow(corr, vmin=-1, vmax=1)
+1506        plt.set_cmap('RdBu')
+1507        plt.colorbar()
+1508        plt.draw()
+1509
+1510    if correlation is True:
+1511        return corr
+1512
+1513    errors = [o.dvalue for o in obs]
+1514    cov = np.diag(errors) @ corr @ np.diag(errors)
+1515
+1516    eigenvalues = np.linalg.eigh(cov)[0]
+1517    if not np.all(eigenvalues >= 0):
+1518        warnings.warn("Covariance matrix is not positive semi-definite (Eigenvalues: " + str(eigenvalues) + ")", RuntimeWarning)
+1519
+1520    return cov
 
@@ -5684,24 +5690,24 @@ This construction ensures that the estimated covariance matrix is positive semi-
-
1598def import_jackknife(jacks, name, idl=None):
-1599    """Imports jackknife samples and returns an Obs
-1600
-1601    Parameters
-1602    ----------
-1603    jacks : numpy.ndarray
-1604        numpy array containing the mean value as zeroth entry and
-1605        the N jackknife samples as first to Nth entry.
-1606    name : str
-1607        name of the ensemble the samples are defined on.
-1608    """
-1609    length = len(jacks) - 1
-1610    prj = (np.ones((length, length)) - (length - 1) * np.identity(length))
-1611    samples = jacks[1:] @ prj
-1612    mean = np.mean(samples)
-1613    new_obs = Obs([samples - mean], [name], idl=idl, means=[mean])
-1614    new_obs._value = jacks[0]
-1615    return new_obs
+            
1600def import_jackknife(jacks, name, idl=None):
+1601    """Imports jackknife samples and returns an Obs
+1602
+1603    Parameters
+1604    ----------
+1605    jacks : numpy.ndarray
+1606        numpy array containing the mean value as zeroth entry and
+1607        the N jackknife samples as first to Nth entry.
+1608    name : str
+1609        name of the ensemble the samples are defined on.
+1610    """
+1611    length = len(jacks) - 1
+1612    prj = (np.ones((length, length)) - (length - 1) * np.identity(length))
+1613    samples = jacks[1:] @ prj
+1614    mean = np.mean(samples)
+1615    new_obs = Obs([samples - mean], [name], idl=idl, means=[mean])
+1616    new_obs._value = jacks[0]
+1617    return new_obs
 
@@ -5731,34 +5737,34 @@ name of the ensemble the samples are defined on.
-
1618def import_bootstrap(boots, name, random_numbers):
-1619    """Imports bootstrap samples and returns an Obs
-1620
-1621    Parameters
-1622    ----------
-1623    boots : numpy.ndarray
-1624        numpy array containing the mean value as zeroth entry and
-1625        the N bootstrap samples as first to Nth entry.
-1626    name : str
-1627        name of the ensemble the samples are defined on.
-1628    random_numbers : np.ndarray
-1629        Array of shape (samples, length) containing the random numbers to generate the bootstrap samples,
-1630        where samples is the number of bootstrap samples and length is the length of the original Monte Carlo
-1631        chain to be reconstructed.
-1632    """
-1633    samples, length = random_numbers.shape
-1634    if samples != len(boots) - 1:
-1635        raise ValueError("Random numbers do not have the correct shape.")
-1636
-1637    if samples < length:
-1638        raise ValueError("Obs can't be reconstructed if there are fewer bootstrap samples than Monte Carlo data points.")
-1639
-1640    proj = np.vstack([np.bincount(o, minlength=length) for o in random_numbers]) / length
+            
1620def import_bootstrap(boots, name, random_numbers):
+1621    """Imports bootstrap samples and returns an Obs
+1622
+1623    Parameters
+1624    ----------
+1625    boots : numpy.ndarray
+1626        numpy array containing the mean value as zeroth entry and
+1627        the N bootstrap samples as first to Nth entry.
+1628    name : str
+1629        name of the ensemble the samples are defined on.
+1630    random_numbers : np.ndarray
+1631        Array of shape (samples, length) containing the random numbers to generate the bootstrap samples,
+1632        where samples is the number of bootstrap samples and length is the length of the original Monte Carlo
+1633        chain to be reconstructed.
+1634    """
+1635    samples, length = random_numbers.shape
+1636    if samples != len(boots) - 1:
+1637        raise ValueError("Random numbers do not have the correct shape.")
+1638
+1639    if samples < length:
+1640        raise ValueError("Obs can't be reconstructed if there are fewer bootstrap samples than Monte Carlo data points.")
 1641
-1642    samples = scipy.linalg.lstsq(proj, boots[1:])[0]
-1643    ret = Obs([samples], [name])
-1644    ret._value = boots[0]
-1645    return ret
+1642    proj = np.vstack([np.bincount(o, minlength=length) for o in random_numbers]) / length
+1643
+1644    samples = scipy.linalg.lstsq(proj, boots[1:])[0]
+1645    ret = Obs([samples], [name])
+1646    ret._value = boots[0]
+1647    return ret
 
@@ -5792,34 +5798,34 @@ chain to be reconstructed.
-
1648def merge_obs(list_of_obs):
-1649    """Combine all observables in list_of_obs into one new observable
-1650
-1651    Parameters
-1652    ----------
-1653    list_of_obs : list
-1654        list of the Obs object to be combined
-1655
-1656    Notes
-1657    -----
-1658    It is not possible to combine obs which are based on the same replicum
-1659    """
-1660    replist = [item for obs in list_of_obs for item in obs.names]
-1661    if (len(replist) == len(set(replist))) is False:
-1662        raise Exception('list_of_obs contains duplicate replica: %s' % (str(replist)))
-1663    if any([len(o.cov_names) for o in list_of_obs]):
-1664        raise Exception('Not possible to merge data that contains covobs!')
-1665    new_dict = {}
-1666    idl_dict = {}
-1667    for o in list_of_obs:
-1668        new_dict.update({key: o.deltas.get(key, 0) + o.r_values.get(key, 0)
-1669                        for key in set(o.deltas) | set(o.r_values)})
-1670        idl_dict.update({key: o.idl.get(key, 0) for key in set(o.deltas)})
-1671
-1672    names = sorted(new_dict.keys())
-1673    o = Obs([new_dict[name] for name in names], names, idl=[idl_dict[name] for name in names])
-1674    o.reweighted = np.max([oi.reweighted for oi in list_of_obs])
-1675    return o
+            
1650def merge_obs(list_of_obs):
+1651    """Combine all observables in list_of_obs into one new observable
+1652
+1653    Parameters
+1654    ----------
+1655    list_of_obs : list
+1656        list of the Obs object to be combined
+1657
+1658    Notes
+1659    -----
+1660    It is not possible to combine obs which are based on the same replicum
+1661    """
+1662    replist = [item for obs in list_of_obs for item in obs.names]
+1663    if (len(replist) == len(set(replist))) is False:
+1664        raise Exception('list_of_obs contains duplicate replica: %s' % (str(replist)))
+1665    if any([len(o.cov_names) for o in list_of_obs]):
+1666        raise Exception('Not possible to merge data that contains covobs!')
+1667    new_dict = {}
+1668    idl_dict = {}
+1669    for o in list_of_obs:
+1670        new_dict.update({key: o.deltas.get(key, 0) + o.r_values.get(key, 0)
+1671                        for key in set(o.deltas) | set(o.r_values)})
+1672        idl_dict.update({key: o.idl.get(key, 0) for key in set(o.deltas)})
+1673
+1674    names = sorted(new_dict.keys())
+1675    o = Obs([new_dict[name] for name in names], names, idl=[idl_dict[name] for name in names])
+1676    o.reweighted = np.max([oi.reweighted for oi in list_of_obs])
+1677    return o
 
@@ -5850,47 +5856,47 @@ list of the Obs object to be combined
-
1678def cov_Obs(means, cov, name, grad=None):
-1679    """Create an Obs based on mean(s) and a covariance matrix
-1680
-1681    Parameters
-1682    ----------
-1683    mean : list of floats or float
-1684        N mean value(s) of the new Obs
-1685    cov : list or array
-1686        2d (NxN) Covariance matrix, 1d diagonal entries or 0d covariance
-1687    name : str
-1688        identifier for the covariance matrix
-1689    grad : list or array
-1690        Gradient of the Covobs wrt. the means belonging to cov.
-1691    """
-1692
-1693    def covobs_to_obs(co):
-1694        """Make an Obs out of a Covobs
-1695
-1696        Parameters
-1697        ----------
-1698        co : Covobs
-1699            Covobs to be embedded into the Obs
-1700        """
-1701        o = Obs([], [], means=[])
-1702        o._value = co.value
-1703        o.names.append(co.name)
-1704        o._covobs[co.name] = co
-1705        o._dvalue = np.sqrt(co.errsq())
-1706        return o
-1707
-1708    ol = []
-1709    if isinstance(means, (float, int)):
-1710        means = [means]
-1711
-1712    for i in range(len(means)):
-1713        ol.append(covobs_to_obs(Covobs(means[i], cov, name, pos=i, grad=grad)))
-1714    if ol[0].covobs[name].N != len(means):
-1715        raise Exception('You have to provide %d mean values!' % (ol[0].N))
-1716    if len(ol) == 1:
-1717        return ol[0]
-1718    return ol
+            
1680def cov_Obs(means, cov, name, grad=None):
+1681    """Create an Obs based on mean(s) and a covariance matrix
+1682
+1683    Parameters
+1684    ----------
+1685    mean : list of floats or float
+1686        N mean value(s) of the new Obs
+1687    cov : list or array
+1688        2d (NxN) Covariance matrix, 1d diagonal entries or 0d covariance
+1689    name : str
+1690        identifier for the covariance matrix
+1691    grad : list or array
+1692        Gradient of the Covobs wrt. the means belonging to cov.
+1693    """
+1694
+1695    def covobs_to_obs(co):
+1696        """Make an Obs out of a Covobs
+1697
+1698        Parameters
+1699        ----------
+1700        co : Covobs
+1701            Covobs to be embedded into the Obs
+1702        """
+1703        o = Obs([], [], means=[])
+1704        o._value = co.value
+1705        o.names.append(co.name)
+1706        o._covobs[co.name] = co
+1707        o._dvalue = np.sqrt(co.errsq())
+1708        return o
+1709
+1710    ol = []
+1711    if isinstance(means, (float, int)):
+1712        means = [means]
+1713
+1714    for i in range(len(means)):
+1715        ol.append(covobs_to_obs(Covobs(means[i], cov, name, pos=i, grad=grad)))
+1716    if ol[0].covobs[name].N != len(means):
+1717        raise Exception('You have to provide %d mean values!' % (ol[0].N))
+1718    if len(ol) == 1:
+1719        return ol[0]
+1720    return ol