diff --git a/docs/pyerrors/obs.html b/docs/pyerrors/obs.html index 4714d5d1..08587452 100644 --- a/docs/pyerrors/obs.html +++ b/docs/pyerrors/obs.html @@ -445,1397 +445,1405 @@ 238 _parse_kwarg('N_sigma') 239 240 for e, e_name in enumerate(self.mc_names): - 241 r_length = [] - 242 for r_name in e_content[e_name]: - 243 if isinstance(self.idl[r_name], range): - 244 r_length.append(len(self.idl[r_name])) - 245 else: - 246 r_length.append((self.idl[r_name][-1] - self.idl[r_name][0] + 1)) - 247 - 248 e_N = np.sum([self.shape[r_name] for r_name in e_content[e_name]]) - 249 w_max = max(r_length) // 2 - 250 e_gamma[e_name] = np.zeros(w_max) - 251 self.e_rho[e_name] = np.zeros(w_max) - 252 self.e_drho[e_name] = np.zeros(w_max) - 253 - 254 for r_name in e_content[e_name]: - 255 e_gamma[e_name] += self._calc_gamma(self.deltas[r_name], self.idl[r_name], self.shape[r_name], w_max, fft) - 256 - 257 gamma_div = np.zeros(w_max) - 258 for r_name in e_content[e_name]: - 259 gamma_div += self._calc_gamma(np.ones((self.shape[r_name])), self.idl[r_name], self.shape[r_name], w_max, fft) - 260 gamma_div[gamma_div < 1] = 1.0 - 261 e_gamma[e_name] /= gamma_div[:w_max] - 262 - 263 if np.abs(e_gamma[e_name][0]) < 10 * np.finfo(float).tiny: # Prevent division by zero - 264 self.e_tauint[e_name] = 0.5 - 265 self.e_dtauint[e_name] = 0.0 - 266 self.e_dvalue[e_name] = 0.0 - 267 self.e_ddvalue[e_name] = 0.0 - 268 self.e_windowsize[e_name] = 0 - 269 continue - 270 - 271 gaps = [] - 272 for r_name in e_content[e_name]: - 273 if isinstance(self.idl[r_name], range): - 274 gaps.append(1) - 275 else: - 276 gaps.append(np.min(np.diff(self.idl[r_name]))) - 277 - 278 if not np.all([gi == gaps[0] for gi in gaps]): - 279 raise Exception(f"Replica for ensemble {e_name} are not equally spaced.", gaps) - 280 else: - 281 gapsize = gaps[0] - 282 - 283 self.e_rho[e_name] = e_gamma[e_name][:w_max] / e_gamma[e_name][0] - 284 self.e_n_tauint[e_name] = np.cumsum(np.concatenate(([0.5], self.e_rho[e_name][1:]))) - 285 # Make sure no entry of tauint is smaller than 0.5 - 286 self.e_n_tauint[e_name][self.e_n_tauint[e_name] <= 0.5] = 0.5 + np.finfo(np.float64).eps - 287 # hep-lat/0306017 eq. (42) - 288 self.e_n_dtauint[e_name] = self.e_n_tauint[e_name] * 2 * np.sqrt(np.abs(np.arange(w_max) / gapsize + 0.5 - self.e_n_tauint[e_name]) / e_N) - 289 self.e_n_dtauint[e_name][0] = 0.0 - 290 - 291 def _compute_drho(i): - 292 tmp = (self.e_rho[e_name][i + 1:w_max] - 293 + np.concatenate([self.e_rho[e_name][i - 1:None if i - w_max // 2 < 0 else 2 * (i - w_max // 2):-1], - 294 self.e_rho[e_name][1:max(1, w_max - 2 * i)]]) - 295 - 2 * self.e_rho[e_name][i] * self.e_rho[e_name][1:w_max - i]) - 296 self.e_drho[e_name][i] = np.sqrt(np.sum(tmp ** 2) / e_N) - 297 - 298 if self.tau_exp[e_name] > 0: - 299 _compute_drho(gapsize) - 300 texp = self.tau_exp[e_name] - 301 # Critical slowing down analysis - 302 if w_max // 2 <= 1: - 303 raise Exception("Need at least 8 samples for tau_exp error analysis") - 304 for n in range(gapsize, w_max // 2, gapsize): - 305 _compute_drho(n + gapsize) - 306 if (self.e_rho[e_name][n] - self.N_sigma[e_name] * self.e_drho[e_name][n]) < 0 or n >= w_max // 2 - 2: - 307 # Bias correction hep-lat/0306017 eq. (49) included - 308 self.e_tauint[e_name] = self.e_n_tauint[e_name][n] * (1 + (2 * n / gapsize + 1) / e_N) / (1 + 1 / e_N) + texp * np.abs(self.e_rho[e_name][n + 1]) # The absolute makes sure, that the tail contribution is always positive - 309 self.e_dtauint[e_name] = np.sqrt(self.e_n_dtauint[e_name][n] ** 2 + texp ** 2 * self.e_drho[e_name][n + 1] ** 2) - 310 # Error of tau_exp neglected so far, missing term: self.e_rho[e_name][n + 1] ** 2 * d_tau_exp ** 2 - 311 self.e_dvalue[e_name] = np.sqrt(2 * self.e_tauint[e_name] * e_gamma[e_name][0] * (1 + 1 / e_N) / e_N) - 312 self.e_ddvalue[e_name] = self.e_dvalue[e_name] * np.sqrt((n / gapsize + 0.5) / e_N) - 313 self.e_windowsize[e_name] = n - 314 break - 315 else: - 316 if self.S[e_name] == 0.0: - 317 self.e_tauint[e_name] = 0.5 - 318 self.e_dtauint[e_name] = 0.0 - 319 self.e_dvalue[e_name] = np.sqrt(e_gamma[e_name][0] / (e_N - 1)) - 320 self.e_ddvalue[e_name] = self.e_dvalue[e_name] * np.sqrt(0.5 / e_N) - 321 self.e_windowsize[e_name] = 0 - 322 else: - 323 # Standard automatic windowing procedure - 324 tau = self.S[e_name] / np.log((2 * self.e_n_tauint[e_name][gapsize::gapsize] + 1) / (2 * self.e_n_tauint[e_name][gapsize::gapsize] - 1)) - 325 g_w = np.exp(- np.arange(1, len(tau) + 1) / tau) - tau / np.sqrt(np.arange(1, len(tau) + 1) * e_N) - 326 for n in range(1, w_max // gapsize): - 327 if g_w[n - 1] < 0 or n >= w_max // gapsize - 1: - 328 _compute_drho(gapsize * n) - 329 n *= gapsize - 330 self.e_tauint[e_name] = self.e_n_tauint[e_name][n] * (1 + (2 * n / gapsize + 1) / e_N) / (1 + 1 / e_N) # Bias correction hep-lat/0306017 eq. (49) - 331 self.e_dtauint[e_name] = self.e_n_dtauint[e_name][n] - 332 self.e_dvalue[e_name] = np.sqrt(2 * self.e_tauint[e_name] * e_gamma[e_name][0] * (1 + 1 / e_N) / e_N) - 333 self.e_ddvalue[e_name] = self.e_dvalue[e_name] * np.sqrt((n / gapsize + 0.5) / e_N) - 334 self.e_windowsize[e_name] = n - 335 break - 336 - 337 self._dvalue += self.e_dvalue[e_name] ** 2 - 338 self.ddvalue += (self.e_dvalue[e_name] * self.e_ddvalue[e_name]) ** 2 - 339 - 340 for e_name in self.cov_names: - 341 self.e_dvalue[e_name] = np.sqrt(self.covobs[e_name].errsq()) - 342 self.e_ddvalue[e_name] = 0 - 343 self._dvalue += self.e_dvalue[e_name]**2 - 344 - 345 self._dvalue = np.sqrt(self._dvalue) - 346 if self._dvalue == 0.0: - 347 self.ddvalue = 0.0 - 348 else: - 349 self.ddvalue = np.sqrt(self.ddvalue) / self._dvalue - 350 return - 351 - 352 gm = gamma_method - 353 - 354 def _calc_gamma(self, deltas, idx, shape, w_max, fft): - 355 """Calculate Gamma_{AA} from the deltas, which are defined on idx. - 356 idx is assumed to be a contiguous range (possibly with a stepsize != 1) - 357 - 358 Parameters - 359 ---------- - 360 deltas : list - 361 List of fluctuations - 362 idx : list - 363 List or range of configurations on which the deltas are defined. - 364 shape : int - 365 Number of configurations in idx. - 366 w_max : int - 367 Upper bound for the summation window. - 368 fft : bool - 369 determines whether the fft algorithm is used for the computation - 370 of the autocorrelation function. - 371 """ - 372 gamma = np.zeros(w_max) - 373 deltas = _expand_deltas(deltas, idx, shape) - 374 new_shape = len(deltas) - 375 if fft: - 376 max_gamma = min(new_shape, w_max) - 377 # The padding for the fft has to be even - 378 padding = new_shape + max_gamma + (new_shape + max_gamma) % 2 - 379 gamma[:max_gamma] += np.fft.irfft(np.abs(np.fft.rfft(deltas, padding)) ** 2)[:max_gamma] - 380 else: - 381 for n in range(w_max): - 382 if new_shape - n >= 0: - 383 gamma[n] += deltas[0:new_shape - n].dot(deltas[n:new_shape]) - 384 - 385 return gamma - 386 - 387 def details(self, ens_content=True): - 388 """Output detailed properties of the Obs. - 389 - 390 Parameters - 391 ---------- - 392 ens_content : bool - 393 print details about the ensembles and replica if true. - 394 """ - 395 if self.tag is not None: - 396 print("Description:", self.tag) - 397 if not hasattr(self, 'e_dvalue'): - 398 print('Result\t %3.8e' % (self.value)) - 399 else: - 400 if self.value == 0.0: - 401 percentage = np.nan - 402 else: - 403 percentage = np.abs(self._dvalue / self.value) * 100 - 404 print('Result\t %3.8e +/- %3.8e +/- %3.8e (%3.3f%%)' % (self.value, self._dvalue, self.ddvalue, percentage)) - 405 if len(self.e_names) > 1: - 406 print(' Ensemble errors:') - 407 e_content = self.e_content - 408 for e_name in self.mc_names: - 409 if isinstance(self.idl[e_content[e_name][0]], range): - 410 gap = self.idl[e_content[e_name][0]].step + 241 gapsize = _determine_gap(self, e_content, e_name) + 242 + 243 r_length = [] + 244 for r_name in e_content[e_name]: + 245 if isinstance(self.idl[r_name], range): + 246 r_length.append(len(self.idl[r_name]) * self.idl[r_name].step // gapsize) + 247 else: + 248 r_length.append((self.idl[r_name][-1] - self.idl[r_name][0] + 1) // gapsize) + 249 + 250 e_N = np.sum([self.shape[r_name] for r_name in e_content[e_name]]) + 251 w_max = max(r_length) // 2 + 252 e_gamma[e_name] = np.zeros(w_max) + 253 self.e_rho[e_name] = np.zeros(w_max) + 254 self.e_drho[e_name] = np.zeros(w_max) + 255 + 256 for r_name in e_content[e_name]: + 257 e_gamma[e_name] += self._calc_gamma(self.deltas[r_name], self.idl[r_name], self.shape[r_name], w_max, fft, gapsize) + 258 + 259 gamma_div = np.zeros(w_max) + 260 for r_name in e_content[e_name]: + 261 gamma_div += self._calc_gamma(np.ones((self.shape[r_name])), self.idl[r_name], self.shape[r_name], w_max, fft, gapsize) + 262 gamma_div[gamma_div < 1] = 1.0 + 263 e_gamma[e_name] /= gamma_div[:w_max] + 264 + 265 if np.abs(e_gamma[e_name][0]) < 10 * np.finfo(float).tiny: # Prevent division by zero + 266 self.e_tauint[e_name] = 0.5 + 267 self.e_dtauint[e_name] = 0.0 + 268 self.e_dvalue[e_name] = 0.0 + 269 self.e_ddvalue[e_name] = 0.0 + 270 self.e_windowsize[e_name] = 0 + 271 continue + 272 + 273 self.e_rho[e_name] = e_gamma[e_name][:w_max] / e_gamma[e_name][0] + 274 self.e_n_tauint[e_name] = np.cumsum(np.concatenate(([0.5], self.e_rho[e_name][1:]))) + 275 # Make sure no entry of tauint is smaller than 0.5 + 276 self.e_n_tauint[e_name][self.e_n_tauint[e_name] <= 0.5] = 0.5 + np.finfo(np.float64).eps + 277 # hep-lat/0306017 eq. (42) + 278 self.e_n_dtauint[e_name] = self.e_n_tauint[e_name] * 2 * np.sqrt(np.abs(np.arange(w_max) + 0.5 - self.e_n_tauint[e_name]) / e_N) + 279 self.e_n_dtauint[e_name][0] = 0.0 + 280 + 281 def _compute_drho(i): + 282 tmp = (self.e_rho[e_name][i + 1:w_max] + 283 + np.concatenate([self.e_rho[e_name][i - 1:None if i - w_max // 2 < 0 else 2 * (i - w_max // 2):-1], + 284 self.e_rho[e_name][1:max(1, w_max - 2 * i)]]) + 285 - 2 * self.e_rho[e_name][i] * self.e_rho[e_name][1:w_max - i]) + 286 self.e_drho[e_name][i] = np.sqrt(np.sum(tmp ** 2) / e_N) + 287 + 288 if self.tau_exp[e_name] > 0: + 289 _compute_drho(1) + 290 texp = self.tau_exp[e_name] + 291 # Critical slowing down analysis + 292 if w_max // 2 <= 1: + 293 raise Exception("Need at least 8 samples for tau_exp error analysis") + 294 for n in range(1, w_max // 2): + 295 _compute_drho(n + 1) + 296 if (self.e_rho[e_name][n] - self.N_sigma[e_name] * self.e_drho[e_name][n]) < 0 or n >= w_max // 2 - 2: + 297 # Bias correction hep-lat/0306017 eq. (49) included + 298 self.e_tauint[e_name] = self.e_n_tauint[e_name][n] * (1 + (2 * n + 1) / e_N) / (1 + 1 / e_N) + texp * np.abs(self.e_rho[e_name][n + 1]) # The absolute makes sure, that the tail contribution is always positive + 299 self.e_dtauint[e_name] = np.sqrt(self.e_n_dtauint[e_name][n] ** 2 + texp ** 2 * self.e_drho[e_name][n + 1] ** 2) + 300 # Error of tau_exp neglected so far, missing term: self.e_rho[e_name][n + 1] ** 2 * d_tau_exp ** 2 + 301 self.e_dvalue[e_name] = np.sqrt(2 * self.e_tauint[e_name] * e_gamma[e_name][0] * (1 + 1 / e_N) / e_N) + 302 self.e_ddvalue[e_name] = self.e_dvalue[e_name] * np.sqrt((n + 0.5) / e_N) + 303 self.e_windowsize[e_name] = n + 304 break + 305 else: + 306 if self.S[e_name] == 0.0: + 307 self.e_tauint[e_name] = 0.5 + 308 self.e_dtauint[e_name] = 0.0 + 309 self.e_dvalue[e_name] = np.sqrt(e_gamma[e_name][0] / (e_N - 1)) + 310 self.e_ddvalue[e_name] = self.e_dvalue[e_name] * np.sqrt(0.5 / e_N) + 311 self.e_windowsize[e_name] = 0 + 312 else: + 313 # Standard automatic windowing procedure + 314 tau = self.S[e_name] / np.log((2 * self.e_n_tauint[e_name][1:] + 1) / (2 * self.e_n_tauint[e_name][1:] - 1)) + 315 g_w = np.exp(- np.arange(1, len(tau) + 1) / tau) - tau / np.sqrt(np.arange(1, len(tau) + 1) * e_N) + 316 for n in range(1, w_max): + 317 if g_w[n - 1] < 0 or n >= w_max - 1: + 318 _compute_drho(n) + 319 self.e_tauint[e_name] = self.e_n_tauint[e_name][n] * (1 + (2 * n + 1) / e_N) / (1 + 1 / e_N) # Bias correction hep-lat/0306017 eq. (49) + 320 self.e_dtauint[e_name] = self.e_n_dtauint[e_name][n] + 321 self.e_dvalue[e_name] = np.sqrt(2 * self.e_tauint[e_name] * e_gamma[e_name][0] * (1 + 1 / e_N) / e_N) + 322 self.e_ddvalue[e_name] = self.e_dvalue[e_name] * np.sqrt((n + 0.5) / e_N) + 323 self.e_windowsize[e_name] = n + 324 break + 325 + 326 self._dvalue += self.e_dvalue[e_name] ** 2 + 327 self.ddvalue += (self.e_dvalue[e_name] * self.e_ddvalue[e_name]) ** 2 + 328 + 329 for e_name in self.cov_names: + 330 self.e_dvalue[e_name] = np.sqrt(self.covobs[e_name].errsq()) + 331 self.e_ddvalue[e_name] = 0 + 332 self._dvalue += self.e_dvalue[e_name]**2 + 333 + 334 self._dvalue = np.sqrt(self._dvalue) + 335 if self._dvalue == 0.0: + 336 self.ddvalue = 0.0 + 337 else: + 338 self.ddvalue = np.sqrt(self.ddvalue) / self._dvalue + 339 return + 340 + 341 gm = gamma_method + 342 + 343 def _calc_gamma(self, deltas, idx, shape, w_max, fft, gapsize): + 344 """Calculate Gamma_{AA} from the deltas, which are defined on idx. + 345 idx is assumed to be a contiguous range (possibly with a stepsize != 1) + 346 + 347 Parameters + 348 ---------- + 349 deltas : list + 350 List of fluctuations + 351 idx : list + 352 List or range of configurations on which the deltas are defined. + 353 shape : int + 354 Number of configurations in idx. + 355 w_max : int + 356 Upper bound for the summation window. + 357 fft : bool + 358 determines whether the fft algorithm is used for the computation + 359 of the autocorrelation function. + 360 gapsize : int + 361 The target distance between two configurations. If longer distances + 362 are found in idx, the data is expanded. + 363 """ + 364 gamma = np.zeros(w_max) + 365 deltas = _expand_deltas(deltas, idx, shape, gapsize) + 366 new_shape = len(deltas) + 367 if fft: + 368 max_gamma = min(new_shape, w_max) + 369 # The padding for the fft has to be even + 370 padding = new_shape + max_gamma + (new_shape + max_gamma) % 2 + 371 gamma[:max_gamma] += np.fft.irfft(np.abs(np.fft.rfft(deltas, padding)) ** 2)[:max_gamma] + 372 else: + 373 for n in range(w_max): + 374 if new_shape - n >= 0: + 375 gamma[n] += deltas[0:new_shape - n].dot(deltas[n:new_shape]) + 376 + 377 return gamma + 378 + 379 def details(self, ens_content=True): + 380 """Output detailed properties of the Obs. + 381 + 382 Parameters + 383 ---------- + 384 ens_content : bool + 385 print details about the ensembles and replica if true. + 386 """ + 387 if self.tag is not None: + 388 print("Description:", self.tag) + 389 if not hasattr(self, 'e_dvalue'): + 390 print('Result\t %3.8e' % (self.value)) + 391 else: + 392 if self.value == 0.0: + 393 percentage = np.nan + 394 else: + 395 percentage = np.abs(self._dvalue / self.value) * 100 + 396 print('Result\t %3.8e +/- %3.8e +/- %3.8e (%3.3f%%)' % (self.value, self._dvalue, self.ddvalue, percentage)) + 397 if len(self.e_names) > 1: + 398 print(' Ensemble errors:') + 399 e_content = self.e_content + 400 for e_name in self.mc_names: + 401 gap = _determine_gap(self, e_content, e_name) + 402 + 403 if len(self.e_names) > 1: + 404 print('', e_name, '\t %3.6e +/- %3.6e' % (self.e_dvalue[e_name], self.e_ddvalue[e_name])) + 405 tau_string = " \N{GREEK SMALL LETTER TAU}_int\t " + _format_uncertainty(self.e_tauint[e_name], self.e_dtauint[e_name]) + 406 tau_string += f" in units of {gap} config" + 407 if gap > 1: + 408 tau_string += "s" + 409 if self.tau_exp[e_name] > 0: + 410 tau_string = f"{tau_string: <45}" + '\t(\N{GREEK SMALL LETTER TAU}_exp=%3.2f, N_\N{GREEK SMALL LETTER SIGMA}=%1.0i)' % (self.tau_exp[e_name], self.N_sigma[e_name]) 411 else: - 412 gap = np.min(np.diff(self.idl[e_content[e_name][0]])) - 413 - 414 if len(self.e_names) > 1: - 415 print('', e_name, '\t %3.6e +/- %3.6e' % (self.e_dvalue[e_name], self.e_ddvalue[e_name])) - 416 tau_string = " \N{GREEK SMALL LETTER TAU}_int\t " + _format_uncertainty(self.e_tauint[e_name], self.e_dtauint[e_name]) - 417 tau_string += f" in units of {gap} config" - 418 if gap > 1: - 419 tau_string += "s" - 420 if self.tau_exp[e_name] > 0: - 421 tau_string = f"{tau_string: <45}" + '\t(\N{GREEK SMALL LETTER TAU}_exp=%3.2f, N_\N{GREEK SMALL LETTER SIGMA}=%1.0i)' % (self.tau_exp[e_name], self.N_sigma[e_name]) - 422 else: - 423 tau_string = f"{tau_string: <45}" + '\t(S=%3.2f)' % (self.S[e_name]) - 424 print(tau_string) - 425 for e_name in self.cov_names: - 426 print('', e_name, '\t %3.8e' % (self.e_dvalue[e_name])) - 427 if ens_content is True: - 428 if len(self.e_names) == 1: - 429 print(self.N, 'samples in', len(self.e_names), 'ensemble:') - 430 else: - 431 print(self.N, 'samples in', len(self.e_names), 'ensembles:') - 432 my_string_list = [] - 433 for key, value in sorted(self.e_content.items()): - 434 if key not in self.covobs: - 435 my_string = ' ' + "\u00B7 Ensemble '" + key + "' " - 436 if len(value) == 1: - 437 my_string += f': {self.shape[value[0]]} configurations' - 438 if isinstance(self.idl[value[0]], range): - 439 my_string += f' (from {self.idl[value[0]].start} to {self.idl[value[0]][-1]}' + int(self.idl[value[0]].step != 1) * f' in steps of {self.idl[value[0]].step}' + ')' - 440 else: - 441 my_string += f' (irregular range from {self.idl[value[0]][0]} to {self.idl[value[0]][-1]})' - 442 else: - 443 sublist = [] - 444 for v in value: - 445 my_substring = ' ' + "\u00B7 Replicum '" + v[len(key) + 1:] + "' " - 446 my_substring += f': {self.shape[v]} configurations' - 447 if isinstance(self.idl[v], range): - 448 my_substring += f' (from {self.idl[v].start} to {self.idl[v][-1]}' + int(self.idl[v].step != 1) * f' in steps of {self.idl[v].step}' + ')' - 449 else: - 450 my_substring += f' (irregular range from {self.idl[v][0]} to {self.idl[v][-1]})' - 451 sublist.append(my_substring) - 452 - 453 my_string += '\n' + '\n'.join(sublist) - 454 else: - 455 my_string = ' ' + "\u00B7 Covobs '" + key + "' " - 456 my_string_list.append(my_string) - 457 print('\n'.join(my_string_list)) - 458 - 459 def reweight(self, weight): - 460 """Reweight the obs with given rewighting factors. - 461 - 462 Parameters - 463 ---------- - 464 weight : Obs - 465 Reweighting factor. An Observable that has to be defined on a superset of the - 466 configurations in obs[i].idl for all i. - 467 all_configs : bool - 468 if True, the reweighted observables are normalized by the average of - 469 the reweighting factor on all configurations in weight.idl and not - 470 on the configurations in obs[i].idl. Default False. - 471 """ - 472 return reweight(weight, [self])[0] - 473 - 474 def is_zero_within_error(self, sigma=1): - 475 """Checks whether the observable is zero within 'sigma' standard errors. - 476 - 477 Parameters - 478 ---------- - 479 sigma : int - 480 Number of standard errors used for the check. - 481 - 482 Works only properly when the gamma method was run. - 483 """ - 484 return self.is_zero() or np.abs(self.value) <= sigma * self._dvalue - 485 - 486 def is_zero(self, atol=1e-10): - 487 """Checks whether the observable is zero within a given tolerance. - 488 - 489 Parameters - 490 ---------- - 491 atol : float - 492 Absolute tolerance (for details see numpy documentation). - 493 """ - 494 return np.isclose(0.0, self.value, 1e-14, atol) and all(np.allclose(0.0, delta, 1e-14, atol) for delta in self.deltas.values()) and all(np.allclose(0.0, delta.errsq(), 1e-14, atol) for delta in self.covobs.values()) + 412 tau_string = f"{tau_string: <45}" + '\t(S=%3.2f)' % (self.S[e_name]) + 413 print(tau_string) + 414 for e_name in self.cov_names: + 415 print('', e_name, '\t %3.8e' % (self.e_dvalue[e_name])) + 416 if ens_content is True: + 417 if len(self.e_names) == 1: + 418 print(self.N, 'samples in', len(self.e_names), 'ensemble:') + 419 else: + 420 print(self.N, 'samples in', len(self.e_names), 'ensembles:') + 421 my_string_list = [] + 422 for key, value in sorted(self.e_content.items()): + 423 if key not in self.covobs: + 424 my_string = ' ' + "\u00B7 Ensemble '" + key + "' " + 425 if len(value) == 1: + 426 my_string += f': {self.shape[value[0]]} configurations' + 427 if isinstance(self.idl[value[0]], range): + 428 my_string += f' (from {self.idl[value[0]].start} to {self.idl[value[0]][-1]}' + int(self.idl[value[0]].step != 1) * f' in steps of {self.idl[value[0]].step}' + ')' + 429 else: + 430 my_string += f' (irregular range from {self.idl[value[0]][0]} to {self.idl[value[0]][-1]})' + 431 else: + 432 sublist = [] + 433 for v in value: + 434 my_substring = ' ' + "\u00B7 Replicum '" + v[len(key) + 1:] + "' " + 435 my_substring += f': {self.shape[v]} configurations' + 436 if isinstance(self.idl[v], range): + 437 my_substring += f' (from {self.idl[v].start} to {self.idl[v][-1]}' + int(self.idl[v].step != 1) * f' in steps of {self.idl[v].step}' + ')' + 438 else: + 439 my_substring += f' (irregular range from {self.idl[v][0]} to {self.idl[v][-1]})' + 440 sublist.append(my_substring) + 441 + 442 my_string += '\n' + '\n'.join(sublist) + 443 else: + 444 my_string = ' ' + "\u00B7 Covobs '" + key + "' " + 445 my_string_list.append(my_string) + 446 print('\n'.join(my_string_list)) + 447 + 448 def reweight(self, weight): + 449 """Reweight the obs with given rewighting factors. + 450 + 451 Parameters + 452 ---------- + 453 weight : Obs + 454 Reweighting factor. An Observable that has to be defined on a superset of the + 455 configurations in obs[i].idl for all i. + 456 all_configs : bool + 457 if True, the reweighted observables are normalized by the average of + 458 the reweighting factor on all configurations in weight.idl and not + 459 on the configurations in obs[i].idl. Default False. + 460 """ + 461 return reweight(weight, [self])[0] + 462 + 463 def is_zero_within_error(self, sigma=1): + 464 """Checks whether the observable is zero within 'sigma' standard errors. + 465 + 466 Parameters + 467 ---------- + 468 sigma : int + 469 Number of standard errors used for the check. + 470 + 471 Works only properly when the gamma method was run. + 472 """ + 473 return self.is_zero() or np.abs(self.value) <= sigma * self._dvalue + 474 + 475 def is_zero(self, atol=1e-10): + 476 """Checks whether the observable is zero within a given tolerance. + 477 + 478 Parameters + 479 ---------- + 480 atol : float + 481 Absolute tolerance (for details see numpy documentation). + 482 """ + 483 return np.isclose(0.0, self.value, 1e-14, atol) and all(np.allclose(0.0, delta, 1e-14, atol) for delta in self.deltas.values()) and all(np.allclose(0.0, delta.errsq(), 1e-14, atol) for delta in self.covobs.values()) + 484 + 485 def plot_tauint(self, save=None): + 486 """Plot integrated autocorrelation time for each ensemble. + 487 + 488 Parameters + 489 ---------- + 490 save : str + 491 saves the figure to a file named 'save' if. + 492 """ + 493 if not hasattr(self, 'e_dvalue'): + 494 raise Exception('Run the gamma method first.') 495 - 496 def plot_tauint(self, save=None): - 497 """Plot integrated autocorrelation time for each ensemble. - 498 - 499 Parameters - 500 ---------- - 501 save : str - 502 saves the figure to a file named 'save' if. - 503 """ - 504 if not hasattr(self, 'e_dvalue'): - 505 raise Exception('Run the gamma method first.') - 506 - 507 for e, e_name in enumerate(self.mc_names): - 508 fig = plt.figure() - 509 plt.xlabel(r'$W$') - 510 plt.ylabel(r'$\tau_\mathrm{int}$') - 511 length = int(len(self.e_n_tauint[e_name])) - 512 if self.tau_exp[e_name] > 0: - 513 base = self.e_n_tauint[e_name][self.e_windowsize[e_name]] - 514 x_help = np.arange(2 * self.tau_exp[e_name]) - 515 y_help = (x_help + 1) * np.abs(self.e_rho[e_name][self.e_windowsize[e_name] + 1]) * (1 - x_help / (2 * (2 * self.tau_exp[e_name] - 1))) + base - 516 x_arr = np.arange(self.e_windowsize[e_name] + 1, self.e_windowsize[e_name] + 1 + 2 * self.tau_exp[e_name]) - 517 plt.plot(x_arr, y_help, 'C' + str(e), linewidth=1, ls='--', marker=',') - 518 plt.errorbar([self.e_windowsize[e_name] + 2 * self.tau_exp[e_name]], [self.e_tauint[e_name]], - 519 yerr=[self.e_dtauint[e_name]], fmt='C' + str(e), linewidth=1, capsize=2, marker='o', mfc=plt.rcParams['axes.facecolor']) - 520 xmax = self.e_windowsize[e_name] + 2 * self.tau_exp[e_name] + 1.5 - 521 label = e_name + r', $\tau_\mathrm{exp}$=' + str(np.around(self.tau_exp[e_name], decimals=2)) - 522 else: - 523 label = e_name + ', S=' + str(np.around(self.S[e_name], decimals=2)) - 524 xmax = max(10.5, 2 * self.e_windowsize[e_name] - 0.5) - 525 - 526 plt.errorbar(np.arange(length)[:int(xmax) + 1], self.e_n_tauint[e_name][:int(xmax) + 1], yerr=self.e_n_dtauint[e_name][:int(xmax) + 1], linewidth=1, capsize=2, label=label) - 527 plt.axvline(x=self.e_windowsize[e_name], color='C' + str(e), alpha=0.5, marker=',', ls='--') - 528 plt.legend() - 529 plt.xlim(-0.5, xmax) - 530 ylim = plt.ylim() - 531 plt.ylim(bottom=0.0, top=max(1.0, ylim[1])) - 532 plt.draw() - 533 if save: - 534 fig.savefig(save + "_" + str(e)) - 535 - 536 def plot_rho(self, save=None): - 537 """Plot normalized autocorrelation function time for each ensemble. - 538 - 539 Parameters - 540 ---------- - 541 save : str - 542 saves the figure to a file named 'save' if. - 543 """ - 544 if not hasattr(self, 'e_dvalue'): - 545 raise Exception('Run the gamma method first.') - 546 for e, e_name in enumerate(self.mc_names): - 547 fig = plt.figure() - 548 plt.xlabel('W') - 549 plt.ylabel('rho') - 550 length = int(len(self.e_drho[e_name])) - 551 plt.errorbar(np.arange(length), self.e_rho[e_name][:length], yerr=self.e_drho[e_name][:], linewidth=1, capsize=2) - 552 plt.axvline(x=self.e_windowsize[e_name], color='r', alpha=0.25, ls='--', marker=',') - 553 if self.tau_exp[e_name] > 0: - 554 plt.plot([self.e_windowsize[e_name] + 1, self.e_windowsize[e_name] + 1 + 2 * self.tau_exp[e_name]], - 555 [self.e_rho[e_name][self.e_windowsize[e_name] + 1], 0], 'k-', lw=1) - 556 xmax = self.e_windowsize[e_name] + 2 * self.tau_exp[e_name] + 1.5 - 557 plt.title('Rho ' + e_name + r', tau\_exp=' + str(np.around(self.tau_exp[e_name], decimals=2))) - 558 else: - 559 xmax = max(10.5, 2 * self.e_windowsize[e_name] - 0.5) - 560 plt.title('Rho ' + e_name + ', S=' + str(np.around(self.S[e_name], decimals=2))) - 561 plt.plot([-0.5, xmax], [0, 0], 'k--', lw=1) - 562 plt.xlim(-0.5, xmax) - 563 plt.draw() - 564 if save: - 565 fig.savefig(save + "_" + str(e)) - 566 - 567 def plot_rep_dist(self): - 568 """Plot replica distribution for each ensemble with more than one replicum.""" - 569 if not hasattr(self, 'e_dvalue'): - 570 raise Exception('Run the gamma method first.') - 571 for e, e_name in enumerate(self.mc_names): - 572 if len(self.e_content[e_name]) == 1: - 573 print('No replica distribution for a single replicum (', e_name, ')') - 574 continue - 575 r_length = [] - 576 sub_r_mean = 0 - 577 for r, r_name in enumerate(self.e_content[e_name]): - 578 r_length.append(len(self.deltas[r_name])) - 579 sub_r_mean += self.shape[r_name] * self.r_values[r_name] - 580 e_N = np.sum(r_length) - 581 sub_r_mean /= e_N - 582 arr = np.zeros(len(self.e_content[e_name])) - 583 for r, r_name in enumerate(self.e_content[e_name]): - 584 arr[r] = (self.r_values[r_name] - sub_r_mean) / (self.e_dvalue[e_name] * np.sqrt(e_N / self.shape[r_name] - 1)) - 585 plt.hist(arr, rwidth=0.8, bins=len(self.e_content[e_name])) - 586 plt.title('Replica distribution' + e_name + ' (mean=0, var=1)') - 587 plt.draw() - 588 - 589 def plot_history(self, expand=True): - 590 """Plot derived Monte Carlo history for each ensemble - 591 - 592 Parameters - 593 ---------- - 594 expand : bool - 595 show expanded history for irregular Monte Carlo chains (default: True). - 596 """ - 597 for e, e_name in enumerate(self.mc_names): - 598 plt.figure() - 599 r_length = [] - 600 tmp = [] - 601 tmp_expanded = [] - 602 for r, r_name in enumerate(self.e_content[e_name]): - 603 tmp.append(self.deltas[r_name] + self.r_values[r_name]) - 604 if expand: - 605 tmp_expanded.append(_expand_deltas(self.deltas[r_name], list(self.idl[r_name]), self.shape[r_name]) + self.r_values[r_name]) - 606 r_length.append(len(tmp_expanded[-1])) - 607 else: - 608 r_length.append(len(tmp[-1])) - 609 e_N = np.sum(r_length) - 610 x = np.arange(e_N) - 611 y_test = np.concatenate(tmp, axis=0) - 612 if expand: - 613 y = np.concatenate(tmp_expanded, axis=0) - 614 else: - 615 y = y_test - 616 plt.errorbar(x, y, fmt='.', markersize=3) - 617 plt.xlim(-0.5, e_N - 0.5) - 618 plt.title(e_name + f'\nskew: {skew(y_test):.3f} (p={skewtest(y_test).pvalue:.3f}), kurtosis: {kurtosis(y_test):.3f} (p={kurtosistest(y_test).pvalue:.3f})') - 619 plt.draw() - 620 - 621 def plot_piechart(self, save=None): - 622 """Plot piechart which shows the fractional contribution of each - 623 ensemble to the error and returns a dictionary containing the fractions. - 624 - 625 Parameters - 626 ---------- - 627 save : str - 628 saves the figure to a file named 'save' if. - 629 """ - 630 if not hasattr(self, 'e_dvalue'): - 631 raise Exception('Run the gamma method first.') - 632 if np.isclose(0.0, self._dvalue, atol=1e-15): - 633 raise Exception('Error is 0.0') - 634 labels = self.e_names - 635 sizes = [self.e_dvalue[name] ** 2 for name in labels] / self._dvalue ** 2 - 636 fig1, ax1 = plt.subplots() - 637 ax1.pie(sizes, labels=labels, startangle=90, normalize=True) - 638 ax1.axis('equal') - 639 plt.draw() - 640 if save: - 641 fig1.savefig(save) - 642 - 643 return dict(zip(self.e_names, sizes)) - 644 - 645 def dump(self, filename, datatype="json.gz", description="", **kwargs): - 646 """Dump the Obs to a file 'name' of chosen format. - 647 - 648 Parameters - 649 ---------- - 650 filename : str - 651 name of the file to be saved. - 652 datatype : str - 653 Format of the exported file. Supported formats include - 654 "json.gz" and "pickle" - 655 description : str - 656 Description for output file, only relevant for json.gz format. - 657 path : str - 658 specifies a custom path for the file (default '.') - 659 """ - 660 if 'path' in kwargs: - 661 file_name = kwargs.get('path') + '/' + filename - 662 else: - 663 file_name = filename - 664 - 665 if datatype == "json.gz": - 666 from .input.json import dump_to_json - 667 dump_to_json([self], file_name, description=description) - 668 elif datatype == "pickle": - 669 with open(file_name + '.p', 'wb') as fb: - 670 pickle.dump(self, fb) - 671 else: - 672 raise Exception("Unknown datatype " + str(datatype)) - 673 - 674 def export_jackknife(self): - 675 """Export jackknife samples from the Obs + 496 for e, e_name in enumerate(self.mc_names): + 497 fig = plt.figure() + 498 plt.xlabel(r'$W$') + 499 plt.ylabel(r'$\tau_\mathrm{int}$') + 500 length = int(len(self.e_n_tauint[e_name])) + 501 if self.tau_exp[e_name] > 0: + 502 base = self.e_n_tauint[e_name][self.e_windowsize[e_name]] + 503 x_help = np.arange(2 * self.tau_exp[e_name]) + 504 y_help = (x_help + 1) * np.abs(self.e_rho[e_name][self.e_windowsize[e_name] + 1]) * (1 - x_help / (2 * (2 * self.tau_exp[e_name] - 1))) + base + 505 x_arr = np.arange(self.e_windowsize[e_name] + 1, self.e_windowsize[e_name] + 1 + 2 * self.tau_exp[e_name]) + 506 plt.plot(x_arr, y_help, 'C' + str(e), linewidth=1, ls='--', marker=',') + 507 plt.errorbar([self.e_windowsize[e_name] + 2 * self.tau_exp[e_name]], [self.e_tauint[e_name]], + 508 yerr=[self.e_dtauint[e_name]], fmt='C' + str(e), linewidth=1, capsize=2, marker='o', mfc=plt.rcParams['axes.facecolor']) + 509 xmax = self.e_windowsize[e_name] + 2 * self.tau_exp[e_name] + 1.5 + 510 label = e_name + r', $\tau_\mathrm{exp}$=' + str(np.around(self.tau_exp[e_name], decimals=2)) + 511 else: + 512 label = e_name + ', S=' + str(np.around(self.S[e_name], decimals=2)) + 513 xmax = max(10.5, 2 * self.e_windowsize[e_name] - 0.5) + 514 + 515 plt.errorbar(np.arange(length)[:int(xmax) + 1], self.e_n_tauint[e_name][:int(xmax) + 1], yerr=self.e_n_dtauint[e_name][:int(xmax) + 1], linewidth=1, capsize=2, label=label) + 516 plt.axvline(x=self.e_windowsize[e_name], color='C' + str(e), alpha=0.5, marker=',', ls='--') + 517 plt.legend() + 518 plt.xlim(-0.5, xmax) + 519 ylim = plt.ylim() + 520 plt.ylim(bottom=0.0, top=max(1.0, ylim[1])) + 521 plt.draw() + 522 if save: + 523 fig.savefig(save + "_" + str(e)) + 524 + 525 def plot_rho(self, save=None): + 526 """Plot normalized autocorrelation function time for each ensemble. + 527 + 528 Parameters + 529 ---------- + 530 save : str + 531 saves the figure to a file named 'save' if. + 532 """ + 533 if not hasattr(self, 'e_dvalue'): + 534 raise Exception('Run the gamma method first.') + 535 for e, e_name in enumerate(self.mc_names): + 536 fig = plt.figure() + 537 plt.xlabel('W') + 538 plt.ylabel('rho') + 539 length = int(len(self.e_drho[e_name])) + 540 plt.errorbar(np.arange(length), self.e_rho[e_name][:length], yerr=self.e_drho[e_name][:], linewidth=1, capsize=2) + 541 plt.axvline(x=self.e_windowsize[e_name], color='r', alpha=0.25, ls='--', marker=',') + 542 if self.tau_exp[e_name] > 0: + 543 plt.plot([self.e_windowsize[e_name] + 1, self.e_windowsize[e_name] + 1 + 2 * self.tau_exp[e_name]], + 544 [self.e_rho[e_name][self.e_windowsize[e_name] + 1], 0], 'k-', lw=1) + 545 xmax = self.e_windowsize[e_name] + 2 * self.tau_exp[e_name] + 1.5 + 546 plt.title('Rho ' + e_name + r', tau\_exp=' + str(np.around(self.tau_exp[e_name], decimals=2))) + 547 else: + 548 xmax = max(10.5, 2 * self.e_windowsize[e_name] - 0.5) + 549 plt.title('Rho ' + e_name + ', S=' + str(np.around(self.S[e_name], decimals=2))) + 550 plt.plot([-0.5, xmax], [0, 0], 'k--', lw=1) + 551 plt.xlim(-0.5, xmax) + 552 plt.draw() + 553 if save: + 554 fig.savefig(save + "_" + str(e)) + 555 + 556 def plot_rep_dist(self): + 557 """Plot replica distribution for each ensemble with more than one replicum.""" + 558 if not hasattr(self, 'e_dvalue'): + 559 raise Exception('Run the gamma method first.') + 560 for e, e_name in enumerate(self.mc_names): + 561 if len(self.e_content[e_name]) == 1: + 562 print('No replica distribution for a single replicum (', e_name, ')') + 563 continue + 564 r_length = [] + 565 sub_r_mean = 0 + 566 for r, r_name in enumerate(self.e_content[e_name]): + 567 r_length.append(len(self.deltas[r_name])) + 568 sub_r_mean += self.shape[r_name] * self.r_values[r_name] + 569 e_N = np.sum(r_length) + 570 sub_r_mean /= e_N + 571 arr = np.zeros(len(self.e_content[e_name])) + 572 for r, r_name in enumerate(self.e_content[e_name]): + 573 arr[r] = (self.r_values[r_name] - sub_r_mean) / (self.e_dvalue[e_name] * np.sqrt(e_N / self.shape[r_name] - 1)) + 574 plt.hist(arr, rwidth=0.8, bins=len(self.e_content[e_name])) + 575 plt.title('Replica distribution' + e_name + ' (mean=0, var=1)') + 576 plt.draw() + 577 + 578 def plot_history(self, expand=True): + 579 """Plot derived Monte Carlo history for each ensemble + 580 + 581 Parameters + 582 ---------- + 583 expand : bool + 584 show expanded history for irregular Monte Carlo chains (default: True). + 585 """ + 586 for e, e_name in enumerate(self.mc_names): + 587 plt.figure() + 588 r_length = [] + 589 tmp = [] + 590 tmp_expanded = [] + 591 for r, r_name in enumerate(self.e_content[e_name]): + 592 tmp.append(self.deltas[r_name] + self.r_values[r_name]) + 593 if expand: + 594 tmp_expanded.append(_expand_deltas(self.deltas[r_name], list(self.idl[r_name]), self.shape[r_name], 1) + self.r_values[r_name]) + 595 r_length.append(len(tmp_expanded[-1])) + 596 else: + 597 r_length.append(len(tmp[-1])) + 598 e_N = np.sum(r_length) + 599 x = np.arange(e_N) + 600 y_test = np.concatenate(tmp, axis=0) + 601 if expand: + 602 y = np.concatenate(tmp_expanded, axis=0) + 603 else: + 604 y = y_test + 605 plt.errorbar(x, y, fmt='.', markersize=3) + 606 plt.xlim(-0.5, e_N - 0.5) + 607 plt.title(e_name + f'\nskew: {skew(y_test):.3f} (p={skewtest(y_test).pvalue:.3f}), kurtosis: {kurtosis(y_test):.3f} (p={kurtosistest(y_test).pvalue:.3f})') + 608 plt.draw() + 609 + 610 def plot_piechart(self, save=None): + 611 """Plot piechart which shows the fractional contribution of each + 612 ensemble to the error and returns a dictionary containing the fractions. + 613 + 614 Parameters + 615 ---------- + 616 save : str + 617 saves the figure to a file named 'save' if. + 618 """ + 619 if not hasattr(self, 'e_dvalue'): + 620 raise Exception('Run the gamma method first.') + 621 if np.isclose(0.0, self._dvalue, atol=1e-15): + 622 raise Exception('Error is 0.0') + 623 labels = self.e_names + 624 sizes = [self.e_dvalue[name] ** 2 for name in labels] / self._dvalue ** 2 + 625 fig1, ax1 = plt.subplots() + 626 ax1.pie(sizes, labels=labels, startangle=90, normalize=True) + 627 ax1.axis('equal') + 628 plt.draw() + 629 if save: + 630 fig1.savefig(save) + 631 + 632 return dict(zip(self.e_names, sizes)) + 633 + 634 def dump(self, filename, datatype="json.gz", description="", **kwargs): + 635 """Dump the Obs to a file 'name' of chosen format. + 636 + 637 Parameters + 638 ---------- + 639 filename : str + 640 name of the file to be saved. + 641 datatype : str + 642 Format of the exported file. Supported formats include + 643 "json.gz" and "pickle" + 644 description : str + 645 Description for output file, only relevant for json.gz format. + 646 path : str + 647 specifies a custom path for the file (default '.') + 648 """ + 649 if 'path' in kwargs: + 650 file_name = kwargs.get('path') + '/' + filename + 651 else: + 652 file_name = filename + 653 + 654 if datatype == "json.gz": + 655 from .input.json import dump_to_json + 656 dump_to_json([self], file_name, description=description) + 657 elif datatype == "pickle": + 658 with open(file_name + '.p', 'wb') as fb: + 659 pickle.dump(self, fb) + 660 else: + 661 raise Exception("Unknown datatype " + str(datatype)) + 662 + 663 def export_jackknife(self): + 664 """Export jackknife samples from the Obs + 665 + 666 Returns + 667 ------- + 668 numpy.ndarray + 669 Returns a numpy array of length N + 1 where N is the number of samples + 670 for the given ensemble and replicum. The zeroth entry of the array contains + 671 the mean value of the Obs, entries 1 to N contain the N jackknife samples + 672 derived from the Obs. The current implementation only works for observables + 673 defined on exactly one ensemble and replicum. The derived jackknife samples + 674 should agree with samples from a full jackknife analysis up to O(1/N). + 675 """ 676 - 677 Returns - 678 ------- - 679 numpy.ndarray - 680 Returns a numpy array of length N + 1 where N is the number of samples - 681 for the given ensemble and replicum. The zeroth entry of the array contains - 682 the mean value of the Obs, entries 1 to N contain the N jackknife samples - 683 derived from the Obs. The current implementation only works for observables - 684 defined on exactly one ensemble and replicum. The derived jackknife samples - 685 should agree with samples from a full jackknife analysis up to O(1/N). - 686 """ - 687 - 688 if len(self.names) != 1: - 689 raise Exception("'export_jackknife' is only implemented for Obs defined on one ensemble and replicum.") - 690 - 691 name = self.names[0] - 692 full_data = self.deltas[name] + self.r_values[name] - 693 n = full_data.size - 694 mean = self.value - 695 tmp_jacks = np.zeros(n + 1) - 696 tmp_jacks[0] = mean - 697 tmp_jacks[1:] = (n * mean - full_data) / (n - 1) - 698 return tmp_jacks - 699 - 700 def __float__(self): - 701 return float(self.value) - 702 - 703 def __repr__(self): - 704 return 'Obs[' + str(self) + ']' - 705 - 706 def __str__(self): - 707 return _format_uncertainty(self.value, self._dvalue) - 708 - 709 def __hash__(self): - 710 hash_tuple = (np.array([self.value]).astype(np.float32).data.tobytes(),) - 711 hash_tuple += tuple([o.astype(np.float32).data.tobytes() for o in self.deltas.values()]) - 712 hash_tuple += tuple([np.array([o.errsq()]).astype(np.float32).data.tobytes() for o in self.covobs.values()]) - 713 hash_tuple += tuple([o.encode() for o in self.names]) - 714 m = hashlib.md5() - 715 [m.update(o) for o in hash_tuple] - 716 return int(m.hexdigest(), 16) & 0xFFFFFFFF - 717 - 718 # Overload comparisons - 719 def __lt__(self, other): - 720 return self.value < other - 721 - 722 def __le__(self, other): - 723 return self.value <= other - 724 - 725 def __gt__(self, other): - 726 return self.value > other - 727 - 728 def __ge__(self, other): - 729 return self.value >= other - 730 - 731 def __eq__(self, other): - 732 return (self - other).is_zero() - 733 - 734 def __ne__(self, other): - 735 return not (self - other).is_zero() - 736 - 737 # Overload math operations - 738 def __add__(self, y): - 739 if isinstance(y, Obs): - 740 return derived_observable(lambda x, **kwargs: x[0] + x[1], [self, y], man_grad=[1, 1]) - 741 else: - 742 if isinstance(y, np.ndarray): - 743 return np.array([self + o for o in y]) - 744 elif y.__class__.__name__ in ['Corr', 'CObs']: - 745 return NotImplemented - 746 else: - 747 return derived_observable(lambda x, **kwargs: x[0] + y, [self], man_grad=[1]) - 748 - 749 def __radd__(self, y): - 750 return self + y - 751 - 752 def __mul__(self, y): - 753 if isinstance(y, Obs): - 754 return derived_observable(lambda x, **kwargs: x[0] * x[1], [self, y], man_grad=[y.value, self.value]) - 755 else: - 756 if isinstance(y, np.ndarray): - 757 return np.array([self * o for o in y]) - 758 elif isinstance(y, complex): - 759 return CObs(self * y.real, self * y.imag) - 760 elif y.__class__.__name__ in ['Corr', 'CObs']: - 761 return NotImplemented - 762 else: - 763 return derived_observable(lambda x, **kwargs: x[0] * y, [self], man_grad=[y]) - 764 - 765 def __rmul__(self, y): - 766 return self * y + 677 if len(self.names) != 1: + 678 raise Exception("'export_jackknife' is only implemented for Obs defined on one ensemble and replicum.") + 679 + 680 name = self.names[0] + 681 full_data = self.deltas[name] + self.r_values[name] + 682 n = full_data.size + 683 mean = self.value + 684 tmp_jacks = np.zeros(n + 1) + 685 tmp_jacks[0] = mean + 686 tmp_jacks[1:] = (n * mean - full_data) / (n - 1) + 687 return tmp_jacks + 688 + 689 def __float__(self): + 690 return float(self.value) + 691 + 692 def __repr__(self): + 693 return 'Obs[' + str(self) + ']' + 694 + 695 def __str__(self): + 696 return _format_uncertainty(self.value, self._dvalue) + 697 + 698 def __hash__(self): + 699 hash_tuple = (np.array([self.value]).astype(np.float32).data.tobytes(),) + 700 hash_tuple += tuple([o.astype(np.float32).data.tobytes() for o in self.deltas.values()]) + 701 hash_tuple += tuple([np.array([o.errsq()]).astype(np.float32).data.tobytes() for o in self.covobs.values()]) + 702 hash_tuple += tuple([o.encode() for o in self.names]) + 703 m = hashlib.md5() + 704 [m.update(o) for o in hash_tuple] + 705 return int(m.hexdigest(), 16) & 0xFFFFFFFF + 706 + 707 # Overload comparisons + 708 def __lt__(self, other): + 709 return self.value < other + 710 + 711 def __le__(self, other): + 712 return self.value <= other + 713 + 714 def __gt__(self, other): + 715 return self.value > other + 716 + 717 def __ge__(self, other): + 718 return self.value >= other + 719 + 720 def __eq__(self, other): + 721 return (self - other).is_zero() + 722 + 723 def __ne__(self, other): + 724 return not (self - other).is_zero() + 725 + 726 # Overload math operations + 727 def __add__(self, y): + 728 if isinstance(y, Obs): + 729 return derived_observable(lambda x, **kwargs: x[0] + x[1], [self, y], man_grad=[1, 1]) + 730 else: + 731 if isinstance(y, np.ndarray): + 732 return np.array([self + o for o in y]) + 733 elif y.__class__.__name__ in ['Corr', 'CObs']: + 734 return NotImplemented + 735 else: + 736 return derived_observable(lambda x, **kwargs: x[0] + y, [self], man_grad=[1]) + 737 + 738 def __radd__(self, y): + 739 return self + y + 740 + 741 def __mul__(self, y): + 742 if isinstance(y, Obs): + 743 return derived_observable(lambda x, **kwargs: x[0] * x[1], [self, y], man_grad=[y.value, self.value]) + 744 else: + 745 if isinstance(y, np.ndarray): + 746 return np.array([self * o for o in y]) + 747 elif isinstance(y, complex): + 748 return CObs(self * y.real, self * y.imag) + 749 elif y.__class__.__name__ in ['Corr', 'CObs']: + 750 return NotImplemented + 751 else: + 752 return derived_observable(lambda x, **kwargs: x[0] * y, [self], man_grad=[y]) + 753 + 754 def __rmul__(self, y): + 755 return self * y + 756 + 757 def __sub__(self, y): + 758 if isinstance(y, Obs): + 759 return derived_observable(lambda x, **kwargs: x[0] - x[1], [self, y], man_grad=[1, -1]) + 760 else: + 761 if isinstance(y, np.ndarray): + 762 return np.array([self - o for o in y]) + 763 elif y.__class__.__name__ in ['Corr', 'CObs']: + 764 return NotImplemented + 765 else: + 766 return derived_observable(lambda x, **kwargs: x[0] - y, [self], man_grad=[1]) 767 - 768 def __sub__(self, y): - 769 if isinstance(y, Obs): - 770 return derived_observable(lambda x, **kwargs: x[0] - x[1], [self, y], man_grad=[1, -1]) - 771 else: - 772 if isinstance(y, np.ndarray): - 773 return np.array([self - o for o in y]) - 774 elif y.__class__.__name__ in ['Corr', 'CObs']: - 775 return NotImplemented - 776 else: - 777 return derived_observable(lambda x, **kwargs: x[0] - y, [self], man_grad=[1]) - 778 - 779 def __rsub__(self, y): - 780 return -1 * (self - y) - 781 - 782 def __pos__(self): - 783 return self - 784 - 785 def __neg__(self): - 786 return -1 * self + 768 def __rsub__(self, y): + 769 return -1 * (self - y) + 770 + 771 def __pos__(self): + 772 return self + 773 + 774 def __neg__(self): + 775 return -1 * self + 776 + 777 def __truediv__(self, y): + 778 if isinstance(y, Obs): + 779 return derived_observable(lambda x, **kwargs: x[0] / x[1], [self, y], man_grad=[1 / y.value, - self.value / y.value ** 2]) + 780 else: + 781 if isinstance(y, np.ndarray): + 782 return np.array([self / o for o in y]) + 783 elif y.__class__.__name__ in ['Corr', 'CObs']: + 784 return NotImplemented + 785 else: + 786 return derived_observable(lambda x, **kwargs: x[0] / y, [self], man_grad=[1 / y]) 787 - 788 def __truediv__(self, y): + 788 def __rtruediv__(self, y): 789 if isinstance(y, Obs): - 790 return derived_observable(lambda x, **kwargs: x[0] / x[1], [self, y], man_grad=[1 / y.value, - self.value / y.value ** 2]) + 790 return derived_observable(lambda x, **kwargs: x[0] / x[1], [y, self], man_grad=[1 / self.value, - y.value / self.value ** 2]) 791 else: 792 if isinstance(y, np.ndarray): - 793 return np.array([self / o for o in y]) + 793 return np.array([o / self for o in y]) 794 elif y.__class__.__name__ in ['Corr', 'CObs']: 795 return NotImplemented 796 else: - 797 return derived_observable(lambda x, **kwargs: x[0] / y, [self], man_grad=[1 / y]) + 797 return derived_observable(lambda x, **kwargs: y / x[0], [self], man_grad=[-y / self.value ** 2]) 798 - 799 def __rtruediv__(self, y): + 799 def __pow__(self, y): 800 if isinstance(y, Obs): - 801 return derived_observable(lambda x, **kwargs: x[0] / x[1], [y, self], man_grad=[1 / self.value, - y.value / self.value ** 2]) + 801 return derived_observable(lambda x: x[0] ** x[1], [self, y]) 802 else: - 803 if isinstance(y, np.ndarray): - 804 return np.array([o / self for o in y]) - 805 elif y.__class__.__name__ in ['Corr', 'CObs']: - 806 return NotImplemented - 807 else: - 808 return derived_observable(lambda x, **kwargs: y / x[0], [self], man_grad=[-y / self.value ** 2]) - 809 - 810 def __pow__(self, y): - 811 if isinstance(y, Obs): - 812 return derived_observable(lambda x: x[0] ** x[1], [self, y]) - 813 else: - 814 return derived_observable(lambda x: x[0] ** y, [self]) - 815 - 816 def __rpow__(self, y): - 817 if isinstance(y, Obs): - 818 return derived_observable(lambda x: x[0] ** x[1], [y, self]) - 819 else: - 820 return derived_observable(lambda x: y ** x[0], [self]) - 821 - 822 def __abs__(self): - 823 return derived_observable(lambda x: anp.abs(x[0]), [self]) - 824 - 825 # Overload numpy functions - 826 def sqrt(self): - 827 return derived_observable(lambda x, **kwargs: np.sqrt(x[0]), [self], man_grad=[1 / 2 / np.sqrt(self.value)]) - 828 - 829 def log(self): - 830 return derived_observable(lambda x, **kwargs: np.log(x[0]), [self], man_grad=[1 / self.value]) - 831 - 832 def exp(self): - 833 return derived_observable(lambda x, **kwargs: np.exp(x[0]), [self], man_grad=[np.exp(self.value)]) - 834 - 835 def sin(self): - 836 return derived_observable(lambda x, **kwargs: np.sin(x[0]), [self], man_grad=[np.cos(self.value)]) - 837 - 838 def cos(self): - 839 return derived_observable(lambda x, **kwargs: np.cos(x[0]), [self], man_grad=[-np.sin(self.value)]) - 840 - 841 def tan(self): - 842 return derived_observable(lambda x, **kwargs: np.tan(x[0]), [self], man_grad=[1 / np.cos(self.value) ** 2]) - 843 - 844 def arcsin(self): - 845 return derived_observable(lambda x: anp.arcsin(x[0]), [self]) - 846 - 847 def arccos(self): - 848 return derived_observable(lambda x: anp.arccos(x[0]), [self]) - 849 - 850 def arctan(self): - 851 return derived_observable(lambda x: anp.arctan(x[0]), [self]) - 852 - 853 def sinh(self): - 854 return derived_observable(lambda x, **kwargs: np.sinh(x[0]), [self], man_grad=[np.cosh(self.value)]) - 855 - 856 def cosh(self): - 857 return derived_observable(lambda x, **kwargs: np.cosh(x[0]), [self], man_grad=[np.sinh(self.value)]) - 858 - 859 def tanh(self): - 860 return derived_observable(lambda x, **kwargs: np.tanh(x[0]), [self], man_grad=[1 / np.cosh(self.value) ** 2]) - 861 - 862 def arcsinh(self): - 863 return derived_observable(lambda x: anp.arcsinh(x[0]), [self]) + 803 return derived_observable(lambda x: x[0] ** y, [self]) + 804 + 805 def __rpow__(self, y): + 806 if isinstance(y, Obs): + 807 return derived_observable(lambda x: x[0] ** x[1], [y, self]) + 808 else: + 809 return derived_observable(lambda x: y ** x[0], [self]) + 810 + 811 def __abs__(self): + 812 return derived_observable(lambda x: anp.abs(x[0]), [self]) + 813 + 814 # Overload numpy functions + 815 def sqrt(self): + 816 return derived_observable(lambda x, **kwargs: np.sqrt(x[0]), [self], man_grad=[1 / 2 / np.sqrt(self.value)]) + 817 + 818 def log(self): + 819 return derived_observable(lambda x, **kwargs: np.log(x[0]), [self], man_grad=[1 / self.value]) + 820 + 821 def exp(self): + 822 return derived_observable(lambda x, **kwargs: np.exp(x[0]), [self], man_grad=[np.exp(self.value)]) + 823 + 824 def sin(self): + 825 return derived_observable(lambda x, **kwargs: np.sin(x[0]), [self], man_grad=[np.cos(self.value)]) + 826 + 827 def cos(self): + 828 return derived_observable(lambda x, **kwargs: np.cos(x[0]), [self], man_grad=[-np.sin(self.value)]) + 829 + 830 def tan(self): + 831 return derived_observable(lambda x, **kwargs: np.tan(x[0]), [self], man_grad=[1 / np.cos(self.value) ** 2]) + 832 + 833 def arcsin(self): + 834 return derived_observable(lambda x: anp.arcsin(x[0]), [self]) + 835 + 836 def arccos(self): + 837 return derived_observable(lambda x: anp.arccos(x[0]), [self]) + 838 + 839 def arctan(self): + 840 return derived_observable(lambda x: anp.arctan(x[0]), [self]) + 841 + 842 def sinh(self): + 843 return derived_observable(lambda x, **kwargs: np.sinh(x[0]), [self], man_grad=[np.cosh(self.value)]) + 844 + 845 def cosh(self): + 846 return derived_observable(lambda x, **kwargs: np.cosh(x[0]), [self], man_grad=[np.sinh(self.value)]) + 847 + 848 def tanh(self): + 849 return derived_observable(lambda x, **kwargs: np.tanh(x[0]), [self], man_grad=[1 / np.cosh(self.value) ** 2]) + 850 + 851 def arcsinh(self): + 852 return derived_observable(lambda x: anp.arcsinh(x[0]), [self]) + 853 + 854 def arccosh(self): + 855 return derived_observable(lambda x: anp.arccosh(x[0]), [self]) + 856 + 857 def arctanh(self): + 858 return derived_observable(lambda x: anp.arctanh(x[0]), [self]) + 859 + 860 + 861class CObs: + 862 """Class for a complex valued observable.""" + 863 __slots__ = ['_real', '_imag', 'tag'] 864 - 865 def arccosh(self): - 866 return derived_observable(lambda x: anp.arccosh(x[0]), [self]) - 867 - 868 def arctanh(self): - 869 return derived_observable(lambda x: anp.arctanh(x[0]), [self]) - 870 - 871 - 872class CObs: - 873 """Class for a complex valued observable.""" - 874 __slots__ = ['_real', '_imag', 'tag'] - 875 - 876 def __init__(self, real, imag=0.0): - 877 self._real = real - 878 self._imag = imag - 879 self.tag = None - 880 - 881 @property - 882 def real(self): - 883 return self._real + 865 def __init__(self, real, imag=0.0): + 866 self._real = real + 867 self._imag = imag + 868 self.tag = None + 869 + 870 @property + 871 def real(self): + 872 return self._real + 873 + 874 @property + 875 def imag(self): + 876 return self._imag + 877 + 878 def gamma_method(self, **kwargs): + 879 """Executes the gamma_method for the real and the imaginary part.""" + 880 if isinstance(self.real, Obs): + 881 self.real.gamma_method(**kwargs) + 882 if isinstance(self.imag, Obs): + 883 self.imag.gamma_method(**kwargs) 884 - 885 @property - 886 def imag(self): - 887 return self._imag + 885 def is_zero(self): + 886 """Checks whether both real and imaginary part are zero within machine precision.""" + 887 return self.real == 0.0 and self.imag == 0.0 888 - 889 def gamma_method(self, **kwargs): - 890 """Executes the gamma_method for the real and the imaginary part.""" - 891 if isinstance(self.real, Obs): - 892 self.real.gamma_method(**kwargs) - 893 if isinstance(self.imag, Obs): - 894 self.imag.gamma_method(**kwargs) - 895 - 896 def is_zero(self): - 897 """Checks whether both real and imaginary part are zero within machine precision.""" - 898 return self.real == 0.0 and self.imag == 0.0 - 899 - 900 def conjugate(self): - 901 return CObs(self.real, -self.imag) - 902 - 903 def __add__(self, other): - 904 if isinstance(other, np.ndarray): - 905 return other + self - 906 elif hasattr(other, 'real') and hasattr(other, 'imag'): - 907 return CObs(self.real + other.real, - 908 self.imag + other.imag) + 889 def conjugate(self): + 890 return CObs(self.real, -self.imag) + 891 + 892 def __add__(self, other): + 893 if isinstance(other, np.ndarray): + 894 return other + self + 895 elif hasattr(other, 'real') and hasattr(other, 'imag'): + 896 return CObs(self.real + other.real, + 897 self.imag + other.imag) + 898 else: + 899 return CObs(self.real + other, self.imag) + 900 + 901 def __radd__(self, y): + 902 return self + y + 903 + 904 def __sub__(self, other): + 905 if isinstance(other, np.ndarray): + 906 return -1 * (other - self) + 907 elif hasattr(other, 'real') and hasattr(other, 'imag'): + 908 return CObs(self.real - other.real, self.imag - other.imag) 909 else: - 910 return CObs(self.real + other, self.imag) + 910 return CObs(self.real - other, self.imag) 911 - 912 def __radd__(self, y): - 913 return self + y + 912 def __rsub__(self, other): + 913 return -1 * (self - other) 914 - 915 def __sub__(self, other): + 915 def __mul__(self, other): 916 if isinstance(other, np.ndarray): - 917 return -1 * (other - self) + 917 return other * self 918 elif hasattr(other, 'real') and hasattr(other, 'imag'): - 919 return CObs(self.real - other.real, self.imag - other.imag) - 920 else: - 921 return CObs(self.real - other, self.imag) - 922 - 923 def __rsub__(self, other): - 924 return -1 * (self - other) - 925 - 926 def __mul__(self, other): - 927 if isinstance(other, np.ndarray): - 928 return other * self - 929 elif hasattr(other, 'real') and hasattr(other, 'imag'): - 930 if all(isinstance(i, Obs) for i in [self.real, self.imag, other.real, other.imag]): - 931 return CObs(derived_observable(lambda x, **kwargs: x[0] * x[1] - x[2] * x[3], - 932 [self.real, other.real, self.imag, other.imag], - 933 man_grad=[other.real.value, self.real.value, -other.imag.value, -self.imag.value]), - 934 derived_observable(lambda x, **kwargs: x[2] * x[1] + x[0] * x[3], - 935 [self.real, other.real, self.imag, other.imag], - 936 man_grad=[other.imag.value, self.imag.value, other.real.value, self.real.value])) - 937 elif getattr(other, 'imag', 0) != 0: - 938 return CObs(self.real * other.real - self.imag * other.imag, - 939 self.imag * other.real + self.real * other.imag) - 940 else: - 941 return CObs(self.real * other.real, self.imag * other.real) - 942 else: - 943 return CObs(self.real * other, self.imag * other) - 944 - 945 def __rmul__(self, other): - 946 return self * other - 947 - 948 def __truediv__(self, other): - 949 if isinstance(other, np.ndarray): - 950 return 1 / (other / self) - 951 elif hasattr(other, 'real') and hasattr(other, 'imag'): - 952 r = other.real ** 2 + other.imag ** 2 - 953 return CObs((self.real * other.real + self.imag * other.imag) / r, (self.imag * other.real - self.real * other.imag) / r) - 954 else: - 955 return CObs(self.real / other, self.imag / other) - 956 - 957 def __rtruediv__(self, other): - 958 r = self.real ** 2 + self.imag ** 2 - 959 if hasattr(other, 'real') and hasattr(other, 'imag'): - 960 return CObs((self.real * other.real + self.imag * other.imag) / r, (self.real * other.imag - self.imag * other.real) / r) - 961 else: - 962 return CObs(self.real * other / r, -self.imag * other / r) - 963 - 964 def __abs__(self): - 965 return np.sqrt(self.real**2 + self.imag**2) - 966 - 967 def __pos__(self): - 968 return self - 969 - 970 def __neg__(self): - 971 return -1 * self - 972 - 973 def __eq__(self, other): - 974 return self.real == other.real and self.imag == other.imag - 975 - 976 def __str__(self): - 977 return '(' + str(self.real) + int(self.imag >= 0.0) * '+' + str(self.imag) + 'j)' - 978 - 979 def __repr__(self): - 980 return 'CObs[' + str(self) + ']' - 981 - 982 - 983def _format_uncertainty(value, dvalue): - 984 """Creates a string of a value and its error in paranthesis notation, e.g., 13.02(45)""" - 985 if dvalue == 0.0: - 986 return str(value) - 987 fexp = np.floor(np.log10(dvalue)) - 988 if fexp < 0.0: - 989 return '{:{form}}({:2.0f})'.format(value, dvalue * 10 ** (-fexp + 1), form='.' + str(-int(fexp) + 1) + 'f') - 990 elif fexp == 0.0: - 991 return '{:.1f}({:1.1f})'.format(value, dvalue) - 992 else: - 993 return '{:.0f}({:2.0f})'.format(value, dvalue) - 994 - 995 - 996def _expand_deltas(deltas, idx, shape): - 997 """Expand deltas defined on idx to a regular, contiguous range, where holes are filled by 0. - 998 If idx is of type range, the deltas are not changed - 999 -1000 Parameters -1001 ---------- -1002 deltas : list -1003 List of fluctuations -1004 idx : list -1005 List or range of configs on which the deltas are defined, has to be sorted in ascending order. -1006 shape : int -1007 Number of configs in idx. -1008 """ -1009 if isinstance(idx, range): -1010 return deltas -1011 else: -1012 ret = np.zeros(idx[-1] - idx[0] + 1) -1013 for i in range(shape): -1014 ret[idx[i] - idx[0]] = deltas[i] -1015 return ret -1016 -1017 -1018def _merge_idx(idl): -1019 """Returns the union of all lists in idl as sorted list -1020 -1021 Parameters -1022 ---------- -1023 idl : list -1024 List of lists or ranges. -1025 """ -1026 -1027 # Use groupby to efficiently check whether all elements of idl are identical -1028 try: -1029 g = groupby(idl) -1030 if next(g, True) and not next(g, False): -1031 return idl[0] -1032 except Exception: -1033 pass + 919 if all(isinstance(i, Obs) for i in [self.real, self.imag, other.real, other.imag]): + 920 return CObs(derived_observable(lambda x, **kwargs: x[0] * x[1] - x[2] * x[3], + 921 [self.real, other.real, self.imag, other.imag], + 922 man_grad=[other.real.value, self.real.value, -other.imag.value, -self.imag.value]), + 923 derived_observable(lambda x, **kwargs: x[2] * x[1] + x[0] * x[3], + 924 [self.real, other.real, self.imag, other.imag], + 925 man_grad=[other.imag.value, self.imag.value, other.real.value, self.real.value])) + 926 elif getattr(other, 'imag', 0) != 0: + 927 return CObs(self.real * other.real - self.imag * other.imag, + 928 self.imag * other.real + self.real * other.imag) + 929 else: + 930 return CObs(self.real * other.real, self.imag * other.real) + 931 else: + 932 return CObs(self.real * other, self.imag * other) + 933 + 934 def __rmul__(self, other): + 935 return self * other + 936 + 937 def __truediv__(self, other): + 938 if isinstance(other, np.ndarray): + 939 return 1 / (other / self) + 940 elif hasattr(other, 'real') and hasattr(other, 'imag'): + 941 r = other.real ** 2 + other.imag ** 2 + 942 return CObs((self.real * other.real + self.imag * other.imag) / r, (self.imag * other.real - self.real * other.imag) / r) + 943 else: + 944 return CObs(self.real / other, self.imag / other) + 945 + 946 def __rtruediv__(self, other): + 947 r = self.real ** 2 + self.imag ** 2 + 948 if hasattr(other, 'real') and hasattr(other, 'imag'): + 949 return CObs((self.real * other.real + self.imag * other.imag) / r, (self.real * other.imag - self.imag * other.real) / r) + 950 else: + 951 return CObs(self.real * other / r, -self.imag * other / r) + 952 + 953 def __abs__(self): + 954 return np.sqrt(self.real**2 + self.imag**2) + 955 + 956 def __pos__(self): + 957 return self + 958 + 959 def __neg__(self): + 960 return -1 * self + 961 + 962 def __eq__(self, other): + 963 return self.real == other.real and self.imag == other.imag + 964 + 965 def __str__(self): + 966 return '(' + str(self.real) + int(self.imag >= 0.0) * '+' + str(self.imag) + 'j)' + 967 + 968 def __repr__(self): + 969 return 'CObs[' + str(self) + ']' + 970 + 971 + 972def _format_uncertainty(value, dvalue): + 973 """Creates a string of a value and its error in paranthesis notation, e.g., 13.02(45)""" + 974 if dvalue == 0.0: + 975 return str(value) + 976 fexp = np.floor(np.log10(dvalue)) + 977 if fexp < 0.0: + 978 return '{:{form}}({:2.0f})'.format(value, dvalue * 10 ** (-fexp + 1), form='.' + str(-int(fexp) + 1) + 'f') + 979 elif fexp == 0.0: + 980 return '{:.1f}({:1.1f})'.format(value, dvalue) + 981 else: + 982 return '{:.0f}({:2.0f})'.format(value, dvalue) + 983 + 984 + 985def _expand_deltas(deltas, idx, shape, gapsize): + 986 """Expand deltas defined on idx to a regular range with spacing gapsize between two + 987 configurations and where holes are filled by 0. + 988 If idx is of type range, the deltas are not changed if the idx.step == gapsize. + 989 + 990 Parameters + 991 ---------- + 992 deltas : list + 993 List of fluctuations + 994 idx : list + 995 List or range of configs on which the deltas are defined, has to be sorted in ascending order. + 996 shape : int + 997 Number of configs in idx. + 998 gapsize : int + 999 The target distance between two configurations. If longer distances +1000 are found in idx, the data is expanded. +1001 """ +1002 if isinstance(idx, range): +1003 if (idx.step == gapsize): +1004 return deltas +1005 ret = np.zeros((idx[-1] - idx[0] + gapsize) // gapsize) +1006 for i in range(shape): +1007 ret[(idx[i] - idx[0]) // gapsize] = deltas[i] +1008 return ret +1009 +1010 +1011def _merge_idx(idl): +1012 """Returns the union of all lists in idl as sorted list +1013 +1014 Parameters +1015 ---------- +1016 idl : list +1017 List of lists or ranges. +1018 """ +1019 +1020 # Use groupby to efficiently check whether all elements of idl are identical +1021 try: +1022 g = groupby(idl) +1023 if next(g, True) and not next(g, False): +1024 return idl[0] +1025 except Exception: +1026 pass +1027 +1028 if np.all([type(idx) is range for idx in idl]): +1029 if len(set([idx[0] for idx in idl])) == 1: +1030 idstart = min([idx.start for idx in idl]) +1031 idstop = max([idx.stop for idx in idl]) +1032 idstep = min([idx.step for idx in idl]) +1033 return range(idstart, idstop, idstep) 1034 -1035 if np.all([type(idx) is range for idx in idl]): -1036 if len(set([idx[0] for idx in idl])) == 1: -1037 idstart = min([idx.start for idx in idl]) -1038 idstop = max([idx.stop for idx in idl]) -1039 idstep = min([idx.step for idx in idl]) -1040 return range(idstart, idstop, idstep) -1041 -1042 return sorted(set().union(*idl)) -1043 -1044 -1045def _intersection_idx(idl): -1046 """Returns the intersection of all lists in idl as sorted list -1047 -1048 Parameters -1049 ---------- -1050 idl : list -1051 List of lists or ranges. -1052 """ -1053 -1054 def _lcm(*args): -1055 """Returns the lowest common multiple of args. -1056 -1057 From python 3.9 onwards the math library contains an lcm function.""" -1058 return reduce(lambda a, b: a * b // gcd(a, b), args) -1059 -1060 # Use groupby to efficiently check whether all elements of idl are identical -1061 try: -1062 g = groupby(idl) -1063 if next(g, True) and not next(g, False): -1064 return idl[0] -1065 except Exception: -1066 pass +1035 return sorted(set().union(*idl)) +1036 +1037 +1038def _intersection_idx(idl): +1039 """Returns the intersection of all lists in idl as sorted list +1040 +1041 Parameters +1042 ---------- +1043 idl : list +1044 List of lists or ranges. +1045 """ +1046 +1047 def _lcm(*args): +1048 """Returns the lowest common multiple of args. +1049 +1050 From python 3.9 onwards the math library contains an lcm function.""" +1051 return reduce(lambda a, b: a * b // gcd(a, b), args) +1052 +1053 # Use groupby to efficiently check whether all elements of idl are identical +1054 try: +1055 g = groupby(idl) +1056 if next(g, True) and not next(g, False): +1057 return idl[0] +1058 except Exception: +1059 pass +1060 +1061 if np.all([type(idx) is range for idx in idl]): +1062 if len(set([idx[0] for idx in idl])) == 1: +1063 idstart = max([idx.start for idx in idl]) +1064 idstop = min([idx.stop for idx in idl]) +1065 idstep = _lcm(*[idx.step for idx in idl]) +1066 return range(idstart, idstop, idstep) 1067 -1068 if np.all([type(idx) is range for idx in idl]): -1069 if len(set([idx[0] for idx in idl])) == 1: -1070 idstart = max([idx.start for idx in idl]) -1071 idstop = min([idx.stop for idx in idl]) -1072 idstep = _lcm(*[idx.step for idx in idl]) -1073 return range(idstart, idstop, idstep) -1074 -1075 return sorted(set.intersection(*[set(o) for o in idl])) -1076 -1077 -1078def _expand_deltas_for_merge(deltas, idx, shape, new_idx): -1079 """Expand deltas defined on idx to the list of configs that is defined by new_idx. -1080 New, empty entries are filled by 0. If idx and new_idx are of type range, the smallest -1081 common divisor of the step sizes is used as new step size. -1082 -1083 Parameters -1084 ---------- -1085 deltas : list -1086 List of fluctuations -1087 idx : list -1088 List or range of configs on which the deltas are defined. -1089 Has to be a subset of new_idx and has to be sorted in ascending order. -1090 shape : list -1091 Number of configs in idx. -1092 new_idx : list -1093 List of configs that defines the new range, has to be sorted in ascending order. -1094 """ -1095 -1096 if type(idx) is range and type(new_idx) is range: -1097 if idx == new_idx: -1098 return deltas -1099 ret = np.zeros(new_idx[-1] - new_idx[0] + 1) -1100 for i in range(shape): -1101 ret[idx[i] - new_idx[0]] = deltas[i] -1102 return np.array([ret[new_idx[i] - new_idx[0]] for i in range(len(new_idx))]) * len(new_idx) / len(idx) -1103 -1104 -1105def derived_observable(func, data, array_mode=False, **kwargs): -1106 """Construct a derived Obs according to func(data, **kwargs) using automatic differentiation. -1107 -1108 Parameters -1109 ---------- -1110 func : object -1111 arbitrary function of the form func(data, **kwargs). For the -1112 automatic differentiation to work, all numpy functions have to have -1113 the autograd wrapper (use 'import autograd.numpy as anp'). -1114 data : list -1115 list of Obs, e.g. [obs1, obs2, obs3]. -1116 num_grad : bool -1117 if True, numerical derivatives are used instead of autograd -1118 (default False). To control the numerical differentiation the -1119 kwargs of numdifftools.step_generators.MaxStepGenerator -1120 can be used. -1121 man_grad : list -1122 manually supply a list or an array which contains the jacobian -1123 of func. Use cautiously, supplying the wrong derivative will -1124 not be intercepted. -1125 -1126 Notes -1127 ----- -1128 For simple mathematical operations it can be practical to use anonymous -1129 functions. For the ratio of two observables one can e.g. use -1130 -1131 new_obs = derived_observable(lambda x: x[0] / x[1], [obs1, obs2]) -1132 """ -1133 -1134 data = np.asarray(data) -1135 raveled_data = data.ravel() -1136 -1137 # Workaround for matrix operations containing non Obs data -1138 if not all(isinstance(x, Obs) for x in raveled_data): -1139 for i in range(len(raveled_data)): -1140 if isinstance(raveled_data[i], (int, float)): -1141 raveled_data[i] = cov_Obs(raveled_data[i], 0.0, "###dummy_covobs###") -1142 -1143 allcov = {} -1144 for o in raveled_data: -1145 for name in o.cov_names: -1146 if name in allcov: -1147 if not np.allclose(allcov[name], o.covobs[name].cov): -1148 raise Exception('Inconsistent covariance matrices for %s!' % (name)) -1149 else: -1150 allcov[name] = o.covobs[name].cov +1068 return sorted(set.intersection(*[set(o) for o in idl])) +1069 +1070 +1071def _expand_deltas_for_merge(deltas, idx, shape, new_idx): +1072 """Expand deltas defined on idx to the list of configs that is defined by new_idx. +1073 New, empty entries are filled by 0. If idx and new_idx are of type range, the smallest +1074 common divisor of the step sizes is used as new step size. +1075 +1076 Parameters +1077 ---------- +1078 deltas : list +1079 List of fluctuations +1080 idx : list +1081 List or range of configs on which the deltas are defined. +1082 Has to be a subset of new_idx and has to be sorted in ascending order. +1083 shape : list +1084 Number of configs in idx. +1085 new_idx : list +1086 List of configs that defines the new range, has to be sorted in ascending order. +1087 """ +1088 +1089 if type(idx) is range and type(new_idx) is range: +1090 if idx == new_idx: +1091 return deltas +1092 ret = np.zeros(new_idx[-1] - new_idx[0] + 1) +1093 for i in range(shape): +1094 ret[idx[i] - new_idx[0]] = deltas[i] +1095 return np.array([ret[new_idx[i] - new_idx[0]] for i in range(len(new_idx))]) * len(new_idx) / len(idx) +1096 +1097 +1098def derived_observable(func, data, array_mode=False, **kwargs): +1099 """Construct a derived Obs according to func(data, **kwargs) using automatic differentiation. +1100 +1101 Parameters +1102 ---------- +1103 func : object +1104 arbitrary function of the form func(data, **kwargs). For the +1105 automatic differentiation to work, all numpy functions have to have +1106 the autograd wrapper (use 'import autograd.numpy as anp'). +1107 data : list +1108 list of Obs, e.g. [obs1, obs2, obs3]. +1109 num_grad : bool +1110 if True, numerical derivatives are used instead of autograd +1111 (default False). To control the numerical differentiation the +1112 kwargs of numdifftools.step_generators.MaxStepGenerator +1113 can be used. +1114 man_grad : list +1115 manually supply a list or an array which contains the jacobian +1116 of func. Use cautiously, supplying the wrong derivative will +1117 not be intercepted. +1118 +1119 Notes +1120 ----- +1121 For simple mathematical operations it can be practical to use anonymous +1122 functions. For the ratio of two observables one can e.g. use +1123 +1124 new_obs = derived_observable(lambda x: x[0] / x[1], [obs1, obs2]) +1125 """ +1126 +1127 data = np.asarray(data) +1128 raveled_data = data.ravel() +1129 +1130 # Workaround for matrix operations containing non Obs data +1131 if not all(isinstance(x, Obs) for x in raveled_data): +1132 for i in range(len(raveled_data)): +1133 if isinstance(raveled_data[i], (int, float)): +1134 raveled_data[i] = cov_Obs(raveled_data[i], 0.0, "###dummy_covobs###") +1135 +1136 allcov = {} +1137 for o in raveled_data: +1138 for name in o.cov_names: +1139 if name in allcov: +1140 if not np.allclose(allcov[name], o.covobs[name].cov): +1141 raise Exception('Inconsistent covariance matrices for %s!' % (name)) +1142 else: +1143 allcov[name] = o.covobs[name].cov +1144 +1145 n_obs = len(raveled_data) +1146 new_names = sorted(set([y for x in [o.names for o in raveled_data] for y in x])) +1147 new_cov_names = sorted(set([y for x in [o.cov_names for o in raveled_data] for y in x])) +1148 new_sample_names = sorted(set(new_names) - set(new_cov_names)) +1149 +1150 reweighted = len(list(filter(lambda o: o.reweighted is True, raveled_data))) > 0 1151 -1152 n_obs = len(raveled_data) -1153 new_names = sorted(set([y for x in [o.names for o in raveled_data] for y in x])) -1154 new_cov_names = sorted(set([y for x in [o.cov_names for o in raveled_data] for y in x])) -1155 new_sample_names = sorted(set(new_names) - set(new_cov_names)) +1152 if data.ndim == 1: +1153 values = np.array([o.value for o in data]) +1154 else: +1155 values = np.vectorize(lambda x: x.value)(data) 1156 -1157 reweighted = len(list(filter(lambda o: o.reweighted is True, raveled_data))) > 0 +1157 new_values = func(values, **kwargs) 1158 -1159 if data.ndim == 1: -1160 values = np.array([o.value for o in data]) -1161 else: -1162 values = np.vectorize(lambda x: x.value)(data) -1163 -1164 new_values = func(values, **kwargs) -1165 -1166 multi = int(isinstance(new_values, np.ndarray)) -1167 -1168 new_r_values = {} -1169 new_idl_d = {} -1170 for name in new_sample_names: -1171 idl = [] -1172 tmp_values = np.zeros(n_obs) -1173 for i, item in enumerate(raveled_data): -1174 tmp_values[i] = item.r_values.get(name, item.value) -1175 tmp_idl = item.idl.get(name) -1176 if tmp_idl is not None: -1177 idl.append(tmp_idl) -1178 if multi > 0: -1179 tmp_values = np.array(tmp_values).reshape(data.shape) -1180 new_r_values[name] = func(tmp_values, **kwargs) -1181 new_idl_d[name] = _merge_idx(idl) -1182 -1183 if 'man_grad' in kwargs: -1184 deriv = np.asarray(kwargs.get('man_grad')) -1185 if new_values.shape + data.shape != deriv.shape: -1186 raise Exception('Manual derivative does not have correct shape.') -1187 elif kwargs.get('num_grad') is True: -1188 if multi > 0: -1189 raise Exception('Multi mode currently not supported for numerical derivative') -1190 options = { -1191 'base_step': 0.1, -1192 'step_ratio': 2.5} -1193 for key in options.keys(): -1194 kwarg = kwargs.get(key) -1195 if kwarg is not None: -1196 options[key] = kwarg -1197 tmp_df = nd.Gradient(func, order=4, **{k: v for k, v in options.items() if v is not None})(values, **kwargs) -1198 if tmp_df.size == 1: -1199 deriv = np.array([tmp_df.real]) -1200 else: -1201 deriv = tmp_df.real -1202 else: -1203 deriv = jacobian(func)(values, **kwargs) -1204 -1205 final_result = np.zeros(new_values.shape, dtype=object) -1206 -1207 if array_mode is True: -1208 -1209 class _Zero_grad(): -1210 def __init__(self, N): -1211 self.grad = np.zeros((N, 1)) -1212 -1213 new_covobs_lengths = dict(set([y for x in [[(n, o.covobs[n].N) for n in o.cov_names] for o in raveled_data] for y in x])) -1214 d_extracted = {} -1215 g_extracted = {} -1216 for name in new_sample_names: -1217 d_extracted[name] = [] -1218 ens_length = len(new_idl_d[name]) -1219 for i_dat, dat in enumerate(data): -1220 d_extracted[name].append(np.array([_expand_deltas_for_merge(o.deltas.get(name, np.zeros(ens_length)), o.idl.get(name, new_idl_d[name]), o.shape.get(name, ens_length), new_idl_d[name]) for o in dat.reshape(np.prod(dat.shape))]).reshape(dat.shape + (ens_length, ))) -1221 for name in new_cov_names: -1222 g_extracted[name] = [] -1223 zero_grad = _Zero_grad(new_covobs_lengths[name]) -1224 for i_dat, dat in enumerate(data): -1225 g_extracted[name].append(np.array([o.covobs.get(name, zero_grad).grad for o in dat.reshape(np.prod(dat.shape))]).reshape(dat.shape + (new_covobs_lengths[name], 1))) -1226 -1227 for i_val, new_val in np.ndenumerate(new_values): -1228 new_deltas = {} -1229 new_grad = {} -1230 if array_mode is True: -1231 for name in new_sample_names: -1232 ens_length = d_extracted[name][0].shape[-1] -1233 new_deltas[name] = np.zeros(ens_length) -1234 for i_dat, dat in enumerate(d_extracted[name]): -1235 new_deltas[name] += np.tensordot(deriv[i_val + (i_dat, )], dat) -1236 for name in new_cov_names: -1237 new_grad[name] = 0 -1238 for i_dat, dat in enumerate(g_extracted[name]): -1239 new_grad[name] += np.tensordot(deriv[i_val + (i_dat, )], dat) -1240 else: -1241 for j_obs, obs in np.ndenumerate(data): -1242 for name in obs.names: -1243 if name in obs.cov_names: -1244 new_grad[name] = new_grad.get(name, 0) + deriv[i_val + j_obs] * obs.covobs[name].grad -1245 else: -1246 new_deltas[name] = new_deltas.get(name, 0) + deriv[i_val + j_obs] * _expand_deltas_for_merge(obs.deltas[name], obs.idl[name], obs.shape[name], new_idl_d[name]) -1247 -1248 new_covobs = {name: Covobs(0, allcov[name], name, grad=new_grad[name]) for name in new_grad} -1249 -1250 if not set(new_covobs.keys()).isdisjoint(new_deltas.keys()): -1251 raise Exception('The same name has been used for deltas and covobs!') -1252 new_samples = [] -1253 new_means = [] -1254 new_idl = [] -1255 new_names_obs = [] -1256 for name in new_names: -1257 if name not in new_covobs: -1258 new_samples.append(new_deltas[name]) -1259 new_idl.append(new_idl_d[name]) -1260 new_means.append(new_r_values[name][i_val]) -1261 new_names_obs.append(name) -1262 final_result[i_val] = Obs(new_samples, new_names_obs, means=new_means, idl=new_idl) -1263 for name in new_covobs: -1264 final_result[i_val].names.append(name) -1265 final_result[i_val]._covobs = new_covobs -1266 final_result[i_val]._value = new_val -1267 final_result[i_val].reweighted = reweighted -1268 -1269 if multi == 0: -1270 final_result = final_result.item() -1271 -1272 return final_result +1159 multi = int(isinstance(new_values, np.ndarray)) +1160 +1161 new_r_values = {} +1162 new_idl_d = {} +1163 for name in new_sample_names: +1164 idl = [] +1165 tmp_values = np.zeros(n_obs) +1166 for i, item in enumerate(raveled_data): +1167 tmp_values[i] = item.r_values.get(name, item.value) +1168 tmp_idl = item.idl.get(name) +1169 if tmp_idl is not None: +1170 idl.append(tmp_idl) +1171 if multi > 0: +1172 tmp_values = np.array(tmp_values).reshape(data.shape) +1173 new_r_values[name] = func(tmp_values, **kwargs) +1174 new_idl_d[name] = _merge_idx(idl) +1175 +1176 if 'man_grad' in kwargs: +1177 deriv = np.asarray(kwargs.get('man_grad')) +1178 if new_values.shape + data.shape != deriv.shape: +1179 raise Exception('Manual derivative does not have correct shape.') +1180 elif kwargs.get('num_grad') is True: +1181 if multi > 0: +1182 raise Exception('Multi mode currently not supported for numerical derivative') +1183 options = { +1184 'base_step': 0.1, +1185 'step_ratio': 2.5} +1186 for key in options.keys(): +1187 kwarg = kwargs.get(key) +1188 if kwarg is not None: +1189 options[key] = kwarg +1190 tmp_df = nd.Gradient(func, order=4, **{k: v for k, v in options.items() if v is not None})(values, **kwargs) +1191 if tmp_df.size == 1: +1192 deriv = np.array([tmp_df.real]) +1193 else: +1194 deriv = tmp_df.real +1195 else: +1196 deriv = jacobian(func)(values, **kwargs) +1197 +1198 final_result = np.zeros(new_values.shape, dtype=object) +1199 +1200 if array_mode is True: +1201 +1202 class _Zero_grad(): +1203 def __init__(self, N): +1204 self.grad = np.zeros((N, 1)) +1205 +1206 new_covobs_lengths = dict(set([y for x in [[(n, o.covobs[n].N) for n in o.cov_names] for o in raveled_data] for y in x])) +1207 d_extracted = {} +1208 g_extracted = {} +1209 for name in new_sample_names: +1210 d_extracted[name] = [] +1211 ens_length = len(new_idl_d[name]) +1212 for i_dat, dat in enumerate(data): +1213 d_extracted[name].append(np.array([_expand_deltas_for_merge(o.deltas.get(name, np.zeros(ens_length)), o.idl.get(name, new_idl_d[name]), o.shape.get(name, ens_length), new_idl_d[name]) for o in dat.reshape(np.prod(dat.shape))]).reshape(dat.shape + (ens_length, ))) +1214 for name in new_cov_names: +1215 g_extracted[name] = [] +1216 zero_grad = _Zero_grad(new_covobs_lengths[name]) +1217 for i_dat, dat in enumerate(data): +1218 g_extracted[name].append(np.array([o.covobs.get(name, zero_grad).grad for o in dat.reshape(np.prod(dat.shape))]).reshape(dat.shape + (new_covobs_lengths[name], 1))) +1219 +1220 for i_val, new_val in np.ndenumerate(new_values): +1221 new_deltas = {} +1222 new_grad = {} +1223 if array_mode is True: +1224 for name in new_sample_names: +1225 ens_length = d_extracted[name][0].shape[-1] +1226 new_deltas[name] = np.zeros(ens_length) +1227 for i_dat, dat in enumerate(d_extracted[name]): +1228 new_deltas[name] += np.tensordot(deriv[i_val + (i_dat, )], dat) +1229 for name in new_cov_names: +1230 new_grad[name] = 0 +1231 for i_dat, dat in enumerate(g_extracted[name]): +1232 new_grad[name] += np.tensordot(deriv[i_val + (i_dat, )], dat) +1233 else: +1234 for j_obs, obs in np.ndenumerate(data): +1235 for name in obs.names: +1236 if name in obs.cov_names: +1237 new_grad[name] = new_grad.get(name, 0) + deriv[i_val + j_obs] * obs.covobs[name].grad +1238 else: +1239 new_deltas[name] = new_deltas.get(name, 0) + deriv[i_val + j_obs] * _expand_deltas_for_merge(obs.deltas[name], obs.idl[name], obs.shape[name], new_idl_d[name]) +1240 +1241 new_covobs = {name: Covobs(0, allcov[name], name, grad=new_grad[name]) for name in new_grad} +1242 +1243 if not set(new_covobs.keys()).isdisjoint(new_deltas.keys()): +1244 raise Exception('The same name has been used for deltas and covobs!') +1245 new_samples = [] +1246 new_means = [] +1247 new_idl = [] +1248 new_names_obs = [] +1249 for name in new_names: +1250 if name not in new_covobs: +1251 new_samples.append(new_deltas[name]) +1252 new_idl.append(new_idl_d[name]) +1253 new_means.append(new_r_values[name][i_val]) +1254 new_names_obs.append(name) +1255 final_result[i_val] = Obs(new_samples, new_names_obs, means=new_means, idl=new_idl) +1256 for name in new_covobs: +1257 final_result[i_val].names.append(name) +1258 final_result[i_val]._covobs = new_covobs +1259 final_result[i_val]._value = new_val +1260 final_result[i_val].reweighted = reweighted +1261 +1262 if multi == 0: +1263 final_result = final_result.item() +1264 +1265 return final_result +1266 +1267 +1268def _reduce_deltas(deltas, idx_old, idx_new): +1269 """Extract deltas defined on idx_old on all configs of idx_new. +1270 +1271 Assumes, that idx_old and idx_new are correctly defined idl, i.e., they +1272 are ordered in an ascending order. 1273 -1274 -1275def _reduce_deltas(deltas, idx_old, idx_new): -1276 """Extract deltas defined on idx_old on all configs of idx_new. -1277 -1278 Assumes, that idx_old and idx_new are correctly defined idl, i.e., they -1279 are ordered in an ascending order. -1280 -1281 Parameters -1282 ---------- -1283 deltas : list -1284 List of fluctuations -1285 idx_old : list -1286 List or range of configs on which the deltas are defined -1287 idx_new : list -1288 List of configs for which we want to extract the deltas. -1289 Has to be a subset of idx_old. -1290 """ -1291 if not len(deltas) == len(idx_old): -1292 raise Exception('Length of deltas and idx_old have to be the same: %d != %d' % (len(deltas), len(idx_old))) -1293 if type(idx_old) is range and type(idx_new) is range: -1294 if idx_old == idx_new: -1295 return deltas -1296 # Use groupby to efficiently check whether all elements of idx_old and idx_new are identical -1297 try: -1298 g = groupby([idx_old, idx_new]) -1299 if next(g, True) and not next(g, False): -1300 return deltas -1301 except Exception: -1302 pass -1303 indices = np.intersect1d(idx_old, idx_new, assume_unique=True, return_indices=True)[1] -1304 if len(indices) < len(idx_new): -1305 raise Exception('Error in _reduce_deltas: Config of idx_new not in idx_old') -1306 return np.array(deltas)[indices] -1307 -1308 -1309def reweight(weight, obs, **kwargs): -1310 """Reweight a list of observables. -1311 -1312 Parameters -1313 ---------- -1314 weight : Obs -1315 Reweighting factor. An Observable that has to be defined on a superset of the -1316 configurations in obs[i].idl for all i. -1317 obs : list -1318 list of Obs, e.g. [obs1, obs2, obs3]. -1319 all_configs : bool -1320 if True, the reweighted observables are normalized by the average of -1321 the reweighting factor on all configurations in weight.idl and not -1322 on the configurations in obs[i].idl. Default False. -1323 """ -1324 result = [] -1325 for i in range(len(obs)): -1326 if len(obs[i].cov_names): -1327 raise Exception('Error: Not possible to reweight an Obs that contains covobs!') -1328 if not set(obs[i].names).issubset(weight.names): -1329 raise Exception('Error: Ensembles do not fit') -1330 for name in obs[i].names: -1331 if not set(obs[i].idl[name]).issubset(weight.idl[name]): -1332 raise Exception('obs[%d] has to be defined on a subset of the configs in weight.idl[%s]!' % (i, name)) -1333 new_samples = [] -1334 w_deltas = {} -1335 for name in sorted(obs[i].names): -1336 w_deltas[name] = _reduce_deltas(weight.deltas[name], weight.idl[name], obs[i].idl[name]) -1337 new_samples.append((w_deltas[name] + weight.r_values[name]) * (obs[i].deltas[name] + obs[i].r_values[name])) -1338 tmp_obs = Obs(new_samples, sorted(obs[i].names), idl=[obs[i].idl[name] for name in sorted(obs[i].names)]) -1339 -1340 if kwargs.get('all_configs'): -1341 new_weight = weight -1342 else: -1343 new_weight = Obs([w_deltas[name] + weight.r_values[name] for name in sorted(obs[i].names)], sorted(obs[i].names), idl=[obs[i].idl[name] for name in sorted(obs[i].names)]) -1344 -1345 result.append(tmp_obs / new_weight) -1346 result[-1].reweighted = True -1347 -1348 return result -1349 -1350 -1351def correlate(obs_a, obs_b): -1352 """Correlate two observables. +1274 Parameters +1275 ---------- +1276 deltas : list +1277 List of fluctuations +1278 idx_old : list +1279 List or range of configs on which the deltas are defined +1280 idx_new : list +1281 List of configs for which we want to extract the deltas. +1282 Has to be a subset of idx_old. +1283 """ +1284 if not len(deltas) == len(idx_old): +1285 raise Exception('Length of deltas and idx_old have to be the same: %d != %d' % (len(deltas), len(idx_old))) +1286 if type(idx_old) is range and type(idx_new) is range: +1287 if idx_old == idx_new: +1288 return deltas +1289 # Use groupby to efficiently check whether all elements of idx_old and idx_new are identical +1290 try: +1291 g = groupby([idx_old, idx_new]) +1292 if next(g, True) and not next(g, False): +1293 return deltas +1294 except Exception: +1295 pass +1296 indices = np.intersect1d(idx_old, idx_new, assume_unique=True, return_indices=True)[1] +1297 if len(indices) < len(idx_new): +1298 raise Exception('Error in _reduce_deltas: Config of idx_new not in idx_old') +1299 return np.array(deltas)[indices] +1300 +1301 +1302def reweight(weight, obs, **kwargs): +1303 """Reweight a list of observables. +1304 +1305 Parameters +1306 ---------- +1307 weight : Obs +1308 Reweighting factor. An Observable that has to be defined on a superset of the +1309 configurations in obs[i].idl for all i. +1310 obs : list +1311 list of Obs, e.g. [obs1, obs2, obs3]. +1312 all_configs : bool +1313 if True, the reweighted observables are normalized by the average of +1314 the reweighting factor on all configurations in weight.idl and not +1315 on the configurations in obs[i].idl. Default False. +1316 """ +1317 result = [] +1318 for i in range(len(obs)): +1319 if len(obs[i].cov_names): +1320 raise Exception('Error: Not possible to reweight an Obs that contains covobs!') +1321 if not set(obs[i].names).issubset(weight.names): +1322 raise Exception('Error: Ensembles do not fit') +1323 for name in obs[i].names: +1324 if not set(obs[i].idl[name]).issubset(weight.idl[name]): +1325 raise Exception('obs[%d] has to be defined on a subset of the configs in weight.idl[%s]!' % (i, name)) +1326 new_samples = [] +1327 w_deltas = {} +1328 for name in sorted(obs[i].names): +1329 w_deltas[name] = _reduce_deltas(weight.deltas[name], weight.idl[name], obs[i].idl[name]) +1330 new_samples.append((w_deltas[name] + weight.r_values[name]) * (obs[i].deltas[name] + obs[i].r_values[name])) +1331 tmp_obs = Obs(new_samples, sorted(obs[i].names), idl=[obs[i].idl[name] for name in sorted(obs[i].names)]) +1332 +1333 if kwargs.get('all_configs'): +1334 new_weight = weight +1335 else: +1336 new_weight = Obs([w_deltas[name] + weight.r_values[name] for name in sorted(obs[i].names)], sorted(obs[i].names), idl=[obs[i].idl[name] for name in sorted(obs[i].names)]) +1337 +1338 result.append(tmp_obs / new_weight) +1339 result[-1].reweighted = True +1340 +1341 return result +1342 +1343 +1344def correlate(obs_a, obs_b): +1345 """Correlate two observables. +1346 +1347 Parameters +1348 ---------- +1349 obs_a : Obs +1350 First observable +1351 obs_b : Obs +1352 Second observable 1353 -1354 Parameters -1355 ---------- -1356 obs_a : Obs -1357 First observable -1358 obs_b : Obs -1359 Second observable +1354 Notes +1355 ----- +1356 Keep in mind to only correlate primary observables which have not been reweighted +1357 yet. The reweighting has to be applied after correlating the observables. +1358 Currently only works if ensembles are identical (this is not strictly necessary). +1359 """ 1360 -1361 Notes -1362 ----- -1363 Keep in mind to only correlate primary observables which have not been reweighted -1364 yet. The reweighting has to be applied after correlating the observables. -1365 Currently only works if ensembles are identical (this is not strictly necessary). -1366 """ -1367 -1368 if sorted(obs_a.names) != sorted(obs_b.names): -1369 raise Exception(f"Ensembles do not fit {set(sorted(obs_a.names)) ^ set(sorted(obs_b.names))}") -1370 if len(obs_a.cov_names) or len(obs_b.cov_names): -1371 raise Exception('Error: Not possible to correlate Obs that contain covobs!') -1372 for name in obs_a.names: -1373 if obs_a.shape[name] != obs_b.shape[name]: -1374 raise Exception('Shapes of ensemble', name, 'do not fit') -1375 if obs_a.idl[name] != obs_b.idl[name]: -1376 raise Exception('idl of ensemble', name, 'do not fit') -1377 -1378 if obs_a.reweighted is True: -1379 warnings.warn("The first observable is already reweighted.", RuntimeWarning) -1380 if obs_b.reweighted is True: -1381 warnings.warn("The second observable is already reweighted.", RuntimeWarning) -1382 -1383 new_samples = [] -1384 new_idl = [] -1385 for name in sorted(obs_a.names): -1386 new_samples.append((obs_a.deltas[name] + obs_a.r_values[name]) * (obs_b.deltas[name] + obs_b.r_values[name])) -1387 new_idl.append(obs_a.idl[name]) -1388 -1389 o = Obs(new_samples, sorted(obs_a.names), idl=new_idl) -1390 o.reweighted = obs_a.reweighted or obs_b.reweighted -1391 return o +1361 if sorted(obs_a.names) != sorted(obs_b.names): +1362 raise Exception(f"Ensembles do not fit {set(sorted(obs_a.names)) ^ set(sorted(obs_b.names))}") +1363 if len(obs_a.cov_names) or len(obs_b.cov_names): +1364 raise Exception('Error: Not possible to correlate Obs that contain covobs!') +1365 for name in obs_a.names: +1366 if obs_a.shape[name] != obs_b.shape[name]: +1367 raise Exception('Shapes of ensemble', name, 'do not fit') +1368 if obs_a.idl[name] != obs_b.idl[name]: +1369 raise Exception('idl of ensemble', name, 'do not fit') +1370 +1371 if obs_a.reweighted is True: +1372 warnings.warn("The first observable is already reweighted.", RuntimeWarning) +1373 if obs_b.reweighted is True: +1374 warnings.warn("The second observable is already reweighted.", RuntimeWarning) +1375 +1376 new_samples = [] +1377 new_idl = [] +1378 for name in sorted(obs_a.names): +1379 new_samples.append((obs_a.deltas[name] + obs_a.r_values[name]) * (obs_b.deltas[name] + obs_b.r_values[name])) +1380 new_idl.append(obs_a.idl[name]) +1381 +1382 o = Obs(new_samples, sorted(obs_a.names), idl=new_idl) +1383 o.reweighted = obs_a.reweighted or obs_b.reweighted +1384 return o +1385 +1386 +1387def covariance(obs, visualize=False, correlation=False, smooth=None, **kwargs): +1388 r'''Calculates the error covariance matrix of a set of observables. +1389 +1390 WARNING: This function should be used with care, especially for observables with support on multiple +1391 ensembles with differing autocorrelations. See the notes below for details. 1392 -1393 -1394def covariance(obs, visualize=False, correlation=False, smooth=None, **kwargs): -1395 r'''Calculates the error covariance matrix of a set of observables. -1396 -1397 WARNING: This function should be used with care, especially for observables with support on multiple -1398 ensembles with differing autocorrelations. See the notes below for details. -1399 -1400 The gamma method has to be applied first to all observables. -1401 -1402 Parameters -1403 ---------- -1404 obs : list or numpy.ndarray -1405 List or one dimensional array of Obs -1406 visualize : bool -1407 If True plots the corresponding normalized correlation matrix (default False). -1408 correlation : bool -1409 If True the correlation matrix instead of the error covariance matrix is returned (default False). -1410 smooth : None or int -1411 If smooth is an integer 'E' between 2 and the dimension of the matrix minus 1 the eigenvalue -1412 smoothing procedure of hep-lat/9412087 is applied to the correlation matrix which leaves the -1413 largest E eigenvalues essentially unchanged and smoothes the smaller eigenvalues to avoid extremely -1414 small ones. -1415 -1416 Notes -1417 ----- -1418 The error covariance is defined such that it agrees with the squared standard error for two identical observables -1419 $$\operatorname{cov}(a,a)=\sum_{s=1}^N\delta_a^s\delta_a^s/N^2=\Gamma_{aa}(0)/N=\operatorname{var}(a)/N=\sigma_a^2$$ -1420 in the absence of autocorrelation. -1421 The error covariance is estimated by calculating the correlation matrix assuming no autocorrelation and then rescaling the correlation matrix by the full errors including the previous gamma method estimate for the autocorrelation of the observables. The covariance at windowsize 0 is guaranteed to be positive semi-definite -1422 $$\sum_{i,j}v_i\Gamma_{ij}(0)v_j=\frac{1}{N}\sum_{s=1}^N\sum_{i,j}v_i\delta_i^s\delta_j^s v_j=\frac{1}{N}\sum_{s=1}^N\sum_{i}|v_i\delta_i^s|^2\geq 0\,,$$ for every $v\in\mathbb{R}^M$, while such an identity does not hold for larger windows/lags. -1423 For observables defined on a single ensemble our approximation is equivalent to assuming that the integrated autocorrelation time of an off-diagonal element is equal to the geometric mean of the integrated autocorrelation times of the corresponding diagonal elements. -1424 $$\tau_{\mathrm{int}, ij}=\sqrt{\tau_{\mathrm{int}, i}\times \tau_{\mathrm{int}, j}}$$ -1425 This construction ensures that the estimated covariance matrix is positive semi-definite (up to numerical rounding errors). -1426 ''' -1427 -1428 length = len(obs) -1429 -1430 max_samples = np.max([o.N for o in obs]) -1431 if max_samples <= length and not [item for sublist in [o.cov_names for o in obs] for item in sublist]: -1432 warnings.warn(f"The dimension of the covariance matrix ({length}) is larger or equal to the number of samples ({max_samples}). This will result in a rank deficient matrix.", RuntimeWarning) -1433 -1434 cov = np.zeros((length, length)) -1435 for i in range(length): -1436 for j in range(i, length): -1437 cov[i, j] = _covariance_element(obs[i], obs[j]) -1438 cov = cov + cov.T - np.diag(np.diag(cov)) -1439 -1440 corr = np.diag(1 / np.sqrt(np.diag(cov))) @ cov @ np.diag(1 / np.sqrt(np.diag(cov))) -1441 -1442 if isinstance(smooth, int): -1443 corr = _smooth_eigenvalues(corr, smooth) -1444 -1445 if visualize: -1446 plt.matshow(corr, vmin=-1, vmax=1) -1447 plt.set_cmap('RdBu') -1448 plt.colorbar() -1449 plt.draw() -1450 -1451 if correlation is True: -1452 return corr +1393 The gamma method has to be applied first to all observables. +1394 +1395 Parameters +1396 ---------- +1397 obs : list or numpy.ndarray +1398 List or one dimensional array of Obs +1399 visualize : bool +1400 If True plots the corresponding normalized correlation matrix (default False). +1401 correlation : bool +1402 If True the correlation matrix instead of the error covariance matrix is returned (default False). +1403 smooth : None or int +1404 If smooth is an integer 'E' between 2 and the dimension of the matrix minus 1 the eigenvalue +1405 smoothing procedure of hep-lat/9412087 is applied to the correlation matrix which leaves the +1406 largest E eigenvalues essentially unchanged and smoothes the smaller eigenvalues to avoid extremely +1407 small ones. +1408 +1409 Notes +1410 ----- +1411 The error covariance is defined such that it agrees with the squared standard error for two identical observables +1412 $$\operatorname{cov}(a,a)=\sum_{s=1}^N\delta_a^s\delta_a^s/N^2=\Gamma_{aa}(0)/N=\operatorname{var}(a)/N=\sigma_a^2$$ +1413 in the absence of autocorrelation. +1414 The error covariance is estimated by calculating the correlation matrix assuming no autocorrelation and then rescaling the correlation matrix by the full errors including the previous gamma method estimate for the autocorrelation of the observables. The covariance at windowsize 0 is guaranteed to be positive semi-definite +1415 $$\sum_{i,j}v_i\Gamma_{ij}(0)v_j=\frac{1}{N}\sum_{s=1}^N\sum_{i,j}v_i\delta_i^s\delta_j^s v_j=\frac{1}{N}\sum_{s=1}^N\sum_{i}|v_i\delta_i^s|^2\geq 0\,,$$ for every $v\in\mathbb{R}^M$, while such an identity does not hold for larger windows/lags. +1416 For observables defined on a single ensemble our approximation is equivalent to assuming that the integrated autocorrelation time of an off-diagonal element is equal to the geometric mean of the integrated autocorrelation times of the corresponding diagonal elements. +1417 $$\tau_{\mathrm{int}, ij}=\sqrt{\tau_{\mathrm{int}, i}\times \tau_{\mathrm{int}, j}}$$ +1418 This construction ensures that the estimated covariance matrix is positive semi-definite (up to numerical rounding errors). +1419 ''' +1420 +1421 length = len(obs) +1422 +1423 max_samples = np.max([o.N for o in obs]) +1424 if max_samples <= length and not [item for sublist in [o.cov_names for o in obs] for item in sublist]: +1425 warnings.warn(f"The dimension of the covariance matrix ({length}) is larger or equal to the number of samples ({max_samples}). This will result in a rank deficient matrix.", RuntimeWarning) +1426 +1427 cov = np.zeros((length, length)) +1428 for i in range(length): +1429 for j in range(i, length): +1430 cov[i, j] = _covariance_element(obs[i], obs[j]) +1431 cov = cov + cov.T - np.diag(np.diag(cov)) +1432 +1433 corr = np.diag(1 / np.sqrt(np.diag(cov))) @ cov @ np.diag(1 / np.sqrt(np.diag(cov))) +1434 +1435 if isinstance(smooth, int): +1436 corr = _smooth_eigenvalues(corr, smooth) +1437 +1438 if visualize: +1439 plt.matshow(corr, vmin=-1, vmax=1) +1440 plt.set_cmap('RdBu') +1441 plt.colorbar() +1442 plt.draw() +1443 +1444 if correlation is True: +1445 return corr +1446 +1447 errors = [o.dvalue for o in obs] +1448 cov = np.diag(errors) @ corr @ np.diag(errors) +1449 +1450 eigenvalues = np.linalg.eigh(cov)[0] +1451 if not np.all(eigenvalues >= 0): +1452 warnings.warn("Covariance matrix is not positive semi-definite (Eigenvalues: " + str(eigenvalues) + ")", RuntimeWarning) 1453 -1454 errors = [o.dvalue for o in obs] -1455 cov = np.diag(errors) @ corr @ np.diag(errors) +1454 return cov +1455 1456 -1457 eigenvalues = np.linalg.eigh(cov)[0] -1458 if not np.all(eigenvalues >= 0): -1459 warnings.warn("Covariance matrix is not positive semi-definite (Eigenvalues: " + str(eigenvalues) + ")", RuntimeWarning) -1460 -1461 return cov -1462 -1463 -1464def _smooth_eigenvalues(corr, E): -1465 """Eigenvalue smoothing as described in hep-lat/9412087 -1466 -1467 corr : np.ndarray -1468 correlation matrix -1469 E : integer -1470 Number of eigenvalues to be left substantially unchanged -1471 """ -1472 if not (2 < E < corr.shape[0] - 1): -1473 raise Exception(f"'E' has to be between 2 and the dimension of the correlation matrix minus 1 ({corr.shape[0] - 1}).") -1474 vals, vec = np.linalg.eigh(corr) -1475 lambda_min = np.mean(vals[:-E]) -1476 vals[vals < lambda_min] = lambda_min -1477 vals /= np.mean(vals) -1478 return vec @ np.diag(vals) @ vec.T -1479 -1480 -1481def _covariance_element(obs1, obs2): -1482 """Estimates the covariance of two Obs objects, neglecting autocorrelations.""" -1483 -1484 def calc_gamma(deltas1, deltas2, idx1, idx2, new_idx): -1485 deltas1 = _reduce_deltas(deltas1, idx1, new_idx) -1486 deltas2 = _reduce_deltas(deltas2, idx2, new_idx) -1487 return np.sum(deltas1 * deltas2) -1488 -1489 if set(obs1.names).isdisjoint(set(obs2.names)): -1490 return 0.0 +1457def _smooth_eigenvalues(corr, E): +1458 """Eigenvalue smoothing as described in hep-lat/9412087 +1459 +1460 corr : np.ndarray +1461 correlation matrix +1462 E : integer +1463 Number of eigenvalues to be left substantially unchanged +1464 """ +1465 if not (2 < E < corr.shape[0] - 1): +1466 raise Exception(f"'E' has to be between 2 and the dimension of the correlation matrix minus 1 ({corr.shape[0] - 1}).") +1467 vals, vec = np.linalg.eigh(corr) +1468 lambda_min = np.mean(vals[:-E]) +1469 vals[vals < lambda_min] = lambda_min +1470 vals /= np.mean(vals) +1471 return vec @ np.diag(vals) @ vec.T +1472 +1473 +1474def _covariance_element(obs1, obs2): +1475 """Estimates the covariance of two Obs objects, neglecting autocorrelations.""" +1476 +1477 def calc_gamma(deltas1, deltas2, idx1, idx2, new_idx): +1478 deltas1 = _reduce_deltas(deltas1, idx1, new_idx) +1479 deltas2 = _reduce_deltas(deltas2, idx2, new_idx) +1480 return np.sum(deltas1 * deltas2) +1481 +1482 if set(obs1.names).isdisjoint(set(obs2.names)): +1483 return 0.0 +1484 +1485 if not hasattr(obs1, 'e_dvalue') or not hasattr(obs2, 'e_dvalue'): +1486 raise Exception('The gamma method has to be applied to both Obs first.') +1487 +1488 dvalue = 0.0 +1489 +1490 for e_name in obs1.mc_names: 1491 -1492 if not hasattr(obs1, 'e_dvalue') or not hasattr(obs2, 'e_dvalue'): -1493 raise Exception('The gamma method has to be applied to both Obs first.') +1492 if e_name not in obs2.mc_names: +1493 continue 1494 -1495 dvalue = 0.0 -1496 -1497 for e_name in obs1.mc_names: -1498 -1499 if e_name not in obs2.mc_names: -1500 continue -1501 -1502 idl_d = {} +1495 idl_d = {} +1496 for r_name in obs1.e_content[e_name]: +1497 if r_name not in obs2.e_content[e_name]: +1498 continue +1499 idl_d[r_name] = _intersection_idx([obs1.idl[r_name], obs2.idl[r_name]]) +1500 +1501 gamma = 0.0 +1502 1503 for r_name in obs1.e_content[e_name]: 1504 if r_name not in obs2.e_content[e_name]: 1505 continue -1506 idl_d[r_name] = _intersection_idx([obs1.idl[r_name], obs2.idl[r_name]]) -1507 -1508 gamma = 0.0 +1506 if len(idl_d[r_name]) == 0: +1507 continue +1508 gamma += calc_gamma(obs1.deltas[r_name], obs2.deltas[r_name], obs1.idl[r_name], obs2.idl[r_name], idl_d[r_name]) 1509 -1510 for r_name in obs1.e_content[e_name]: -1511 if r_name not in obs2.e_content[e_name]: -1512 continue -1513 if len(idl_d[r_name]) == 0: -1514 continue -1515 gamma += calc_gamma(obs1.deltas[r_name], obs2.deltas[r_name], obs1.idl[r_name], obs2.idl[r_name], idl_d[r_name]) -1516 -1517 if gamma == 0.0: -1518 continue -1519 -1520 gamma_div = 0.0 -1521 for r_name in obs1.e_content[e_name]: -1522 if r_name not in obs2.e_content[e_name]: -1523 continue -1524 if len(idl_d[r_name]) == 0: -1525 continue -1526 gamma_div += np.sqrt(calc_gamma(obs1.deltas[r_name], obs1.deltas[r_name], obs1.idl[r_name], obs1.idl[r_name], idl_d[r_name]) * calc_gamma(obs2.deltas[r_name], obs2.deltas[r_name], obs2.idl[r_name], obs2.idl[r_name], idl_d[r_name])) -1527 gamma /= gamma_div +1510 if gamma == 0.0: +1511 continue +1512 +1513 gamma_div = 0.0 +1514 for r_name in obs1.e_content[e_name]: +1515 if r_name not in obs2.e_content[e_name]: +1516 continue +1517 if len(idl_d[r_name]) == 0: +1518 continue +1519 gamma_div += np.sqrt(calc_gamma(obs1.deltas[r_name], obs1.deltas[r_name], obs1.idl[r_name], obs1.idl[r_name], idl_d[r_name]) * calc_gamma(obs2.deltas[r_name], obs2.deltas[r_name], obs2.idl[r_name], obs2.idl[r_name], idl_d[r_name])) +1520 gamma /= gamma_div +1521 +1522 dvalue += gamma +1523 +1524 for e_name in obs1.cov_names: +1525 +1526 if e_name not in obs2.cov_names: +1527 continue 1528 -1529 dvalue += gamma +1529 dvalue += float(np.dot(np.transpose(obs1.covobs[e_name].grad), np.dot(obs1.covobs[e_name].cov, obs2.covobs[e_name].grad))) 1530 -1531 for e_name in obs1.cov_names: +1531 return dvalue 1532 -1533 if e_name not in obs2.cov_names: -1534 continue -1535 -1536 dvalue += float(np.dot(np.transpose(obs1.covobs[e_name].grad), np.dot(obs1.covobs[e_name].cov, obs2.covobs[e_name].grad))) -1537 -1538 return dvalue -1539 -1540 -1541def import_jackknife(jacks, name, idl=None): -1542 """Imports jackknife samples and returns an Obs -1543 -1544 Parameters -1545 ---------- -1546 jacks : numpy.ndarray -1547 numpy array containing the mean value as zeroth entry and -1548 the N jackknife samples as first to Nth entry. -1549 name : str -1550 name of the ensemble the samples are defined on. -1551 """ -1552 length = len(jacks) - 1 -1553 prj = (np.ones((length, length)) - (length - 1) * np.identity(length)) -1554 samples = jacks[1:] @ prj -1555 mean = np.mean(samples) -1556 new_obs = Obs([samples - mean], [name], idl=idl, means=[mean]) -1557 new_obs._value = jacks[0] -1558 return new_obs -1559 -1560 -1561def merge_obs(list_of_obs): -1562 """Combine all observables in list_of_obs into one new observable -1563 -1564 Parameters -1565 ---------- -1566 list_of_obs : list -1567 list of the Obs object to be combined -1568 -1569 Notes -1570 ----- -1571 It is not possible to combine obs which are based on the same replicum -1572 """ -1573 replist = [item for obs in list_of_obs for item in obs.names] -1574 if (len(replist) == len(set(replist))) is False: -1575 raise Exception('list_of_obs contains duplicate replica: %s' % (str(replist))) -1576 if any([len(o.cov_names) for o in list_of_obs]): -1577 raise Exception('Not possible to merge data that contains covobs!') -1578 new_dict = {} -1579 idl_dict = {} -1580 for o in list_of_obs: -1581 new_dict.update({key: o.deltas.get(key, 0) + o.r_values.get(key, 0) -1582 for key in set(o.deltas) | set(o.r_values)}) -1583 idl_dict.update({key: o.idl.get(key, 0) for key in set(o.deltas)}) -1584 -1585 names = sorted(new_dict.keys()) -1586 o = Obs([new_dict[name] for name in names], names, idl=[idl_dict[name] for name in names]) -1587 o.reweighted = np.max([oi.reweighted for oi in list_of_obs]) -1588 return o -1589 -1590 -1591def cov_Obs(means, cov, name, grad=None): -1592 """Create an Obs based on mean(s) and a covariance matrix -1593 -1594 Parameters -1595 ---------- -1596 mean : list of floats or float -1597 N mean value(s) of the new Obs -1598 cov : list or array -1599 2d (NxN) Covariance matrix, 1d diagonal entries or 0d covariance -1600 name : str -1601 identifier for the covariance matrix -1602 grad : list or array -1603 Gradient of the Covobs wrt. the means belonging to cov. -1604 """ -1605 -1606 def covobs_to_obs(co): -1607 """Make an Obs out of a Covobs -1608 -1609 Parameters -1610 ---------- -1611 co : Covobs -1612 Covobs to be embedded into the Obs -1613 """ -1614 o = Obs([], [], means=[]) -1615 o._value = co.value -1616 o.names.append(co.name) -1617 o._covobs[co.name] = co -1618 o._dvalue = np.sqrt(co.errsq()) -1619 return o -1620 -1621 ol = [] -1622 if isinstance(means, (float, int)): -1623 means = [means] -1624 -1625 for i in range(len(means)): -1626 ol.append(covobs_to_obs(Covobs(means[i], cov, name, pos=i, grad=grad))) -1627 if ol[0].covobs[name].N != len(means): -1628 raise Exception('You have to provide %d mean values!' % (ol[0].N)) -1629 if len(ol) == 1: -1630 return ol[0] -1631 return ol +1533 +1534def import_jackknife(jacks, name, idl=None): +1535 """Imports jackknife samples and returns an Obs +1536 +1537 Parameters +1538 ---------- +1539 jacks : numpy.ndarray +1540 numpy array containing the mean value as zeroth entry and +1541 the N jackknife samples as first to Nth entry. +1542 name : str +1543 name of the ensemble the samples are defined on. +1544 """ +1545 length = len(jacks) - 1 +1546 prj = (np.ones((length, length)) - (length - 1) * np.identity(length)) +1547 samples = jacks[1:] @ prj +1548 mean = np.mean(samples) +1549 new_obs = Obs([samples - mean], [name], idl=idl, means=[mean]) +1550 new_obs._value = jacks[0] +1551 return new_obs +1552 +1553 +1554def merge_obs(list_of_obs): +1555 """Combine all observables in list_of_obs into one new observable +1556 +1557 Parameters +1558 ---------- +1559 list_of_obs : list +1560 list of the Obs object to be combined +1561 +1562 Notes +1563 ----- +1564 It is not possible to combine obs which are based on the same replicum +1565 """ +1566 replist = [item for obs in list_of_obs for item in obs.names] +1567 if (len(replist) == len(set(replist))) is False: +1568 raise Exception('list_of_obs contains duplicate replica: %s' % (str(replist))) +1569 if any([len(o.cov_names) for o in list_of_obs]): +1570 raise Exception('Not possible to merge data that contains covobs!') +1571 new_dict = {} +1572 idl_dict = {} +1573 for o in list_of_obs: +1574 new_dict.update({key: o.deltas.get(key, 0) + o.r_values.get(key, 0) +1575 for key in set(o.deltas) | set(o.r_values)}) +1576 idl_dict.update({key: o.idl.get(key, 0) for key in set(o.deltas)}) +1577 +1578 names = sorted(new_dict.keys()) +1579 o = Obs([new_dict[name] for name in names], names, idl=[idl_dict[name] for name in names]) +1580 o.reweighted = np.max([oi.reweighted for oi in list_of_obs]) +1581 return o +1582 +1583 +1584def cov_Obs(means, cov, name, grad=None): +1585 """Create an Obs based on mean(s) and a covariance matrix +1586 +1587 Parameters +1588 ---------- +1589 mean : list of floats or float +1590 N mean value(s) of the new Obs +1591 cov : list or array +1592 2d (NxN) Covariance matrix, 1d diagonal entries or 0d covariance +1593 name : str +1594 identifier for the covariance matrix +1595 grad : list or array +1596 Gradient of the Covobs wrt. the means belonging to cov. +1597 """ +1598 +1599 def covobs_to_obs(co): +1600 """Make an Obs out of a Covobs +1601 +1602 Parameters +1603 ---------- +1604 co : Covobs +1605 Covobs to be embedded into the Obs +1606 """ +1607 o = Obs([], [], means=[]) +1608 o._value = co.value +1609 o.names.append(co.name) +1610 o._covobs[co.name] = co +1611 o._dvalue = np.sqrt(co.errsq()) +1612 return o +1613 +1614 ol = [] +1615 if isinstance(means, (float, int)): +1616 means = [means] +1617 +1618 for i in range(len(means)): +1619 ol.append(covobs_to_obs(Covobs(means[i], cov, name, pos=i, grad=grad))) +1620 if ol[0].covobs[name].N != len(means): +1621 raise Exception('You have to provide %d mean values!' % (ol[0].N)) +1622 if len(ol) == 1: +1623 return ol[0] +1624 return ol +1625 +1626 +1627def _determine_gap(o, e_content, e_name): +1628 gaps = [] +1629 for r_name in e_content[e_name]: +1630 if isinstance(o.idl[r_name], range): +1631 gaps.append(o.idl[r_name].step) +1632 else: +1633 gaps.append(np.min(np.diff(o.idl[r_name]))) +1634 +1635 gap = min(gaps) +1636 if not np.all([gi % gap == 0 for gi in gaps]): +1637 raise Exception(f"Replica for ensemble {e_name} do not have a common spacing.", gaps) +1638 +1639 return gap @@ -2073,635 +2081,624 @@ 239 _parse_kwarg('N_sigma') 240 241 for e, e_name in enumerate(self.mc_names): -242 r_length = [] -243 for r_name in e_content[e_name]: -244 if isinstance(self.idl[r_name], range): -245 r_length.append(len(self.idl[r_name])) -246 else: -247 r_length.append((self.idl[r_name][-1] - self.idl[r_name][0] + 1)) -248 -249 e_N = np.sum([self.shape[r_name] for r_name in e_content[e_name]]) -250 w_max = max(r_length) // 2 -251 e_gamma[e_name] = np.zeros(w_max) -252 self.e_rho[e_name] = np.zeros(w_max) -253 self.e_drho[e_name] = np.zeros(w_max) -254 -255 for r_name in e_content[e_name]: -256 e_gamma[e_name] += self._calc_gamma(self.deltas[r_name], self.idl[r_name], self.shape[r_name], w_max, fft) -257 -258 gamma_div = np.zeros(w_max) -259 for r_name in e_content[e_name]: -260 gamma_div += self._calc_gamma(np.ones((self.shape[r_name])), self.idl[r_name], self.shape[r_name], w_max, fft) -261 gamma_div[gamma_div < 1] = 1.0 -262 e_gamma[e_name] /= gamma_div[:w_max] -263 -264 if np.abs(e_gamma[e_name][0]) < 10 * np.finfo(float).tiny: # Prevent division by zero -265 self.e_tauint[e_name] = 0.5 -266 self.e_dtauint[e_name] = 0.0 -267 self.e_dvalue[e_name] = 0.0 -268 self.e_ddvalue[e_name] = 0.0 -269 self.e_windowsize[e_name] = 0 -270 continue -271 -272 gaps = [] -273 for r_name in e_content[e_name]: -274 if isinstance(self.idl[r_name], range): -275 gaps.append(1) -276 else: -277 gaps.append(np.min(np.diff(self.idl[r_name]))) -278 -279 if not np.all([gi == gaps[0] for gi in gaps]): -280 raise Exception(f"Replica for ensemble {e_name} are not equally spaced.", gaps) -281 else: -282 gapsize = gaps[0] -283 -284 self.e_rho[e_name] = e_gamma[e_name][:w_max] / e_gamma[e_name][0] -285 self.e_n_tauint[e_name] = np.cumsum(np.concatenate(([0.5], self.e_rho[e_name][1:]))) -286 # Make sure no entry of tauint is smaller than 0.5 -287 self.e_n_tauint[e_name][self.e_n_tauint[e_name] <= 0.5] = 0.5 + np.finfo(np.float64).eps -288 # hep-lat/0306017 eq. (42) -289 self.e_n_dtauint[e_name] = self.e_n_tauint[e_name] * 2 * np.sqrt(np.abs(np.arange(w_max) / gapsize + 0.5 - self.e_n_tauint[e_name]) / e_N) -290 self.e_n_dtauint[e_name][0] = 0.0 -291 -292 def _compute_drho(i): -293 tmp = (self.e_rho[e_name][i + 1:w_max] -294 + np.concatenate([self.e_rho[e_name][i - 1:None if i - w_max // 2 < 0 else 2 * (i - w_max // 2):-1], -295 self.e_rho[e_name][1:max(1, w_max - 2 * i)]]) -296 - 2 * self.e_rho[e_name][i] * self.e_rho[e_name][1:w_max - i]) -297 self.e_drho[e_name][i] = np.sqrt(np.sum(tmp ** 2) / e_N) -298 -299 if self.tau_exp[e_name] > 0: -300 _compute_drho(gapsize) -301 texp = self.tau_exp[e_name] -302 # Critical slowing down analysis -303 if w_max // 2 <= 1: -304 raise Exception("Need at least 8 samples for tau_exp error analysis") -305 for n in range(gapsize, w_max // 2, gapsize): -306 _compute_drho(n + gapsize) -307 if (self.e_rho[e_name][n] - self.N_sigma[e_name] * self.e_drho[e_name][n]) < 0 or n >= w_max // 2 - 2: -308 # Bias correction hep-lat/0306017 eq. (49) included -309 self.e_tauint[e_name] = self.e_n_tauint[e_name][n] * (1 + (2 * n / gapsize + 1) / e_N) / (1 + 1 / e_N) + texp * np.abs(self.e_rho[e_name][n + 1]) # The absolute makes sure, that the tail contribution is always positive -310 self.e_dtauint[e_name] = np.sqrt(self.e_n_dtauint[e_name][n] ** 2 + texp ** 2 * self.e_drho[e_name][n + 1] ** 2) -311 # Error of tau_exp neglected so far, missing term: self.e_rho[e_name][n + 1] ** 2 * d_tau_exp ** 2 -312 self.e_dvalue[e_name] = np.sqrt(2 * self.e_tauint[e_name] * e_gamma[e_name][0] * (1 + 1 / e_N) / e_N) -313 self.e_ddvalue[e_name] = self.e_dvalue[e_name] * np.sqrt((n / gapsize + 0.5) / e_N) -314 self.e_windowsize[e_name] = n -315 break -316 else: -317 if self.S[e_name] == 0.0: -318 self.e_tauint[e_name] = 0.5 -319 self.e_dtauint[e_name] = 0.0 -320 self.e_dvalue[e_name] = np.sqrt(e_gamma[e_name][0] / (e_N - 1)) -321 self.e_ddvalue[e_name] = self.e_dvalue[e_name] * np.sqrt(0.5 / e_N) -322 self.e_windowsize[e_name] = 0 -323 else: -324 # Standard automatic windowing procedure -325 tau = self.S[e_name] / np.log((2 * self.e_n_tauint[e_name][gapsize::gapsize] + 1) / (2 * self.e_n_tauint[e_name][gapsize::gapsize] - 1)) -326 g_w = np.exp(- np.arange(1, len(tau) + 1) / tau) - tau / np.sqrt(np.arange(1, len(tau) + 1) * e_N) -327 for n in range(1, w_max // gapsize): -328 if g_w[n - 1] < 0 or n >= w_max // gapsize - 1: -329 _compute_drho(gapsize * n) -330 n *= gapsize -331 self.e_tauint[e_name] = self.e_n_tauint[e_name][n] * (1 + (2 * n / gapsize + 1) / e_N) / (1 + 1 / e_N) # Bias correction hep-lat/0306017 eq. (49) -332 self.e_dtauint[e_name] = self.e_n_dtauint[e_name][n] -333 self.e_dvalue[e_name] = np.sqrt(2 * self.e_tauint[e_name] * e_gamma[e_name][0] * (1 + 1 / e_N) / e_N) -334 self.e_ddvalue[e_name] = self.e_dvalue[e_name] * np.sqrt((n / gapsize + 0.5) / e_N) -335 self.e_windowsize[e_name] = n -336 break -337 -338 self._dvalue += self.e_dvalue[e_name] ** 2 -339 self.ddvalue += (self.e_dvalue[e_name] * self.e_ddvalue[e_name]) ** 2 -340 -341 for e_name in self.cov_names: -342 self.e_dvalue[e_name] = np.sqrt(self.covobs[e_name].errsq()) -343 self.e_ddvalue[e_name] = 0 -344 self._dvalue += self.e_dvalue[e_name]**2 -345 -346 self._dvalue = np.sqrt(self._dvalue) -347 if self._dvalue == 0.0: -348 self.ddvalue = 0.0 -349 else: -350 self.ddvalue = np.sqrt(self.ddvalue) / self._dvalue -351 return -352 -353 gm = gamma_method -354 -355 def _calc_gamma(self, deltas, idx, shape, w_max, fft): -356 """Calculate Gamma_{AA} from the deltas, which are defined on idx. -357 idx is assumed to be a contiguous range (possibly with a stepsize != 1) -358 -359 Parameters -360 ---------- -361 deltas : list -362 List of fluctuations -363 idx : list -364 List or range of configurations on which the deltas are defined. -365 shape : int -366 Number of configurations in idx. -367 w_max : int -368 Upper bound for the summation window. -369 fft : bool -370 determines whether the fft algorithm is used for the computation -371 of the autocorrelation function. -372 """ -373 gamma = np.zeros(w_max) -374 deltas = _expand_deltas(deltas, idx, shape) -375 new_shape = len(deltas) -376 if fft: -377 max_gamma = min(new_shape, w_max) -378 # The padding for the fft has to be even -379 padding = new_shape + max_gamma + (new_shape + max_gamma) % 2 -380 gamma[:max_gamma] += np.fft.irfft(np.abs(np.fft.rfft(deltas, padding)) ** 2)[:max_gamma] -381 else: -382 for n in range(w_max): -383 if new_shape - n >= 0: -384 gamma[n] += deltas[0:new_shape - n].dot(deltas[n:new_shape]) -385 -386 return gamma -387 -388 def details(self, ens_content=True): -389 """Output detailed properties of the Obs. -390 -391 Parameters -392 ---------- -393 ens_content : bool -394 print details about the ensembles and replica if true. -395 """ -396 if self.tag is not None: -397 print("Description:", self.tag) -398 if not hasattr(self, 'e_dvalue'): -399 print('Result\t %3.8e' % (self.value)) -400 else: -401 if self.value == 0.0: -402 percentage = np.nan -403 else: -404 percentage = np.abs(self._dvalue / self.value) * 100 -405 print('Result\t %3.8e +/- %3.8e +/- %3.8e (%3.3f%%)' % (self.value, self._dvalue, self.ddvalue, percentage)) -406 if len(self.e_names) > 1: -407 print(' Ensemble errors:') -408 e_content = self.e_content -409 for e_name in self.mc_names: -410 if isinstance(self.idl[e_content[e_name][0]], range): -411 gap = self.idl[e_content[e_name][0]].step +242 gapsize = _determine_gap(self, e_content, e_name) +243 +244 r_length = [] +245 for r_name in e_content[e_name]: +246 if isinstance(self.idl[r_name], range): +247 r_length.append(len(self.idl[r_name]) * self.idl[r_name].step // gapsize) +248 else: +249 r_length.append((self.idl[r_name][-1] - self.idl[r_name][0] + 1) // gapsize) +250 +251 e_N = np.sum([self.shape[r_name] for r_name in e_content[e_name]]) +252 w_max = max(r_length) // 2 +253 e_gamma[e_name] = np.zeros(w_max) +254 self.e_rho[e_name] = np.zeros(w_max) +255 self.e_drho[e_name] = np.zeros(w_max) +256 +257 for r_name in e_content[e_name]: +258 e_gamma[e_name] += self._calc_gamma(self.deltas[r_name], self.idl[r_name], self.shape[r_name], w_max, fft, gapsize) +259 +260 gamma_div = np.zeros(w_max) +261 for r_name in e_content[e_name]: +262 gamma_div += self._calc_gamma(np.ones((self.shape[r_name])), self.idl[r_name], self.shape[r_name], w_max, fft, gapsize) +263 gamma_div[gamma_div < 1] = 1.0 +264 e_gamma[e_name] /= gamma_div[:w_max] +265 +266 if np.abs(e_gamma[e_name][0]) < 10 * np.finfo(float).tiny: # Prevent division by zero +267 self.e_tauint[e_name] = 0.5 +268 self.e_dtauint[e_name] = 0.0 +269 self.e_dvalue[e_name] = 0.0 +270 self.e_ddvalue[e_name] = 0.0 +271 self.e_windowsize[e_name] = 0 +272 continue +273 +274 self.e_rho[e_name] = e_gamma[e_name][:w_max] / e_gamma[e_name][0] +275 self.e_n_tauint[e_name] = np.cumsum(np.concatenate(([0.5], self.e_rho[e_name][1:]))) +276 # Make sure no entry of tauint is smaller than 0.5 +277 self.e_n_tauint[e_name][self.e_n_tauint[e_name] <= 0.5] = 0.5 + np.finfo(np.float64).eps +278 # hep-lat/0306017 eq. (42) +279 self.e_n_dtauint[e_name] = self.e_n_tauint[e_name] * 2 * np.sqrt(np.abs(np.arange(w_max) + 0.5 - self.e_n_tauint[e_name]) / e_N) +280 self.e_n_dtauint[e_name][0] = 0.0 +281 +282 def _compute_drho(i): +283 tmp = (self.e_rho[e_name][i + 1:w_max] +284 + np.concatenate([self.e_rho[e_name][i - 1:None if i - w_max // 2 < 0 else 2 * (i - w_max // 2):-1], +285 self.e_rho[e_name][1:max(1, w_max - 2 * i)]]) +286 - 2 * self.e_rho[e_name][i] * self.e_rho[e_name][1:w_max - i]) +287 self.e_drho[e_name][i] = np.sqrt(np.sum(tmp ** 2) / e_N) +288 +289 if self.tau_exp[e_name] > 0: +290 _compute_drho(1) +291 texp = self.tau_exp[e_name] +292 # Critical slowing down analysis +293 if w_max // 2 <= 1: +294 raise Exception("Need at least 8 samples for tau_exp error analysis") +295 for n in range(1, w_max // 2): +296 _compute_drho(n + 1) +297 if (self.e_rho[e_name][n] - self.N_sigma[e_name] * self.e_drho[e_name][n]) < 0 or n >= w_max // 2 - 2: +298 # Bias correction hep-lat/0306017 eq. (49) included +299 self.e_tauint[e_name] = self.e_n_tauint[e_name][n] * (1 + (2 * n + 1) / e_N) / (1 + 1 / e_N) + texp * np.abs(self.e_rho[e_name][n + 1]) # The absolute makes sure, that the tail contribution is always positive +300 self.e_dtauint[e_name] = np.sqrt(self.e_n_dtauint[e_name][n] ** 2 + texp ** 2 * self.e_drho[e_name][n + 1] ** 2) +301 # Error of tau_exp neglected so far, missing term: self.e_rho[e_name][n + 1] ** 2 * d_tau_exp ** 2 +302 self.e_dvalue[e_name] = np.sqrt(2 * self.e_tauint[e_name] * e_gamma[e_name][0] * (1 + 1 / e_N) / e_N) +303 self.e_ddvalue[e_name] = self.e_dvalue[e_name] * np.sqrt((n + 0.5) / e_N) +304 self.e_windowsize[e_name] = n +305 break +306 else: +307 if self.S[e_name] == 0.0: +308 self.e_tauint[e_name] = 0.5 +309 self.e_dtauint[e_name] = 0.0 +310 self.e_dvalue[e_name] = np.sqrt(e_gamma[e_name][0] / (e_N - 1)) +311 self.e_ddvalue[e_name] = self.e_dvalue[e_name] * np.sqrt(0.5 / e_N) +312 self.e_windowsize[e_name] = 0 +313 else: +314 # Standard automatic windowing procedure +315 tau = self.S[e_name] / np.log((2 * self.e_n_tauint[e_name][1:] + 1) / (2 * self.e_n_tauint[e_name][1:] - 1)) +316 g_w = np.exp(- np.arange(1, len(tau) + 1) / tau) - tau / np.sqrt(np.arange(1, len(tau) + 1) * e_N) +317 for n in range(1, w_max): +318 if g_w[n - 1] < 0 or n >= w_max - 1: +319 _compute_drho(n) +320 self.e_tauint[e_name] = self.e_n_tauint[e_name][n] * (1 + (2 * n + 1) / e_N) / (1 + 1 / e_N) # Bias correction hep-lat/0306017 eq. (49) +321 self.e_dtauint[e_name] = self.e_n_dtauint[e_name][n] +322 self.e_dvalue[e_name] = np.sqrt(2 * self.e_tauint[e_name] * e_gamma[e_name][0] * (1 + 1 / e_N) / e_N) +323 self.e_ddvalue[e_name] = self.e_dvalue[e_name] * np.sqrt((n + 0.5) / e_N) +324 self.e_windowsize[e_name] = n +325 break +326 +327 self._dvalue += self.e_dvalue[e_name] ** 2 +328 self.ddvalue += (self.e_dvalue[e_name] * self.e_ddvalue[e_name]) ** 2 +329 +330 for e_name in self.cov_names: +331 self.e_dvalue[e_name] = np.sqrt(self.covobs[e_name].errsq()) +332 self.e_ddvalue[e_name] = 0 +333 self._dvalue += self.e_dvalue[e_name]**2 +334 +335 self._dvalue = np.sqrt(self._dvalue) +336 if self._dvalue == 0.0: +337 self.ddvalue = 0.0 +338 else: +339 self.ddvalue = np.sqrt(self.ddvalue) / self._dvalue +340 return +341 +342 gm = gamma_method +343 +344 def _calc_gamma(self, deltas, idx, shape, w_max, fft, gapsize): +345 """Calculate Gamma_{AA} from the deltas, which are defined on idx. +346 idx is assumed to be a contiguous range (possibly with a stepsize != 1) +347 +348 Parameters +349 ---------- +350 deltas : list +351 List of fluctuations +352 idx : list +353 List or range of configurations on which the deltas are defined. +354 shape : int +355 Number of configurations in idx. +356 w_max : int +357 Upper bound for the summation window. +358 fft : bool +359 determines whether the fft algorithm is used for the computation +360 of the autocorrelation function. +361 gapsize : int +362 The target distance between two configurations. If longer distances +363 are found in idx, the data is expanded. +364 """ +365 gamma = np.zeros(w_max) +366 deltas = _expand_deltas(deltas, idx, shape, gapsize) +367 new_shape = len(deltas) +368 if fft: +369 max_gamma = min(new_shape, w_max) +370 # The padding for the fft has to be even +371 padding = new_shape + max_gamma + (new_shape + max_gamma) % 2 +372 gamma[:max_gamma] += np.fft.irfft(np.abs(np.fft.rfft(deltas, padding)) ** 2)[:max_gamma] +373 else: +374 for n in range(w_max): +375 if new_shape - n >= 0: +376 gamma[n] += deltas[0:new_shape - n].dot(deltas[n:new_shape]) +377 +378 return gamma +379 +380 def details(self, ens_content=True): +381 """Output detailed properties of the Obs. +382 +383 Parameters +384 ---------- +385 ens_content : bool +386 print details about the ensembles and replica if true. +387 """ +388 if self.tag is not None: +389 print("Description:", self.tag) +390 if not hasattr(self, 'e_dvalue'): +391 print('Result\t %3.8e' % (self.value)) +392 else: +393 if self.value == 0.0: +394 percentage = np.nan +395 else: +396 percentage = np.abs(self._dvalue / self.value) * 100 +397 print('Result\t %3.8e +/- %3.8e +/- %3.8e (%3.3f%%)' % (self.value, self._dvalue, self.ddvalue, percentage)) +398 if len(self.e_names) > 1: +399 print(' Ensemble errors:') +400 e_content = self.e_content +401 for e_name in self.mc_names: +402 gap = _determine_gap(self, e_content, e_name) +403 +404 if len(self.e_names) > 1: +405 print('', e_name, '\t %3.6e +/- %3.6e' % (self.e_dvalue[e_name], self.e_ddvalue[e_name])) +406 tau_string = " \N{GREEK SMALL LETTER TAU}_int\t " + _format_uncertainty(self.e_tauint[e_name], self.e_dtauint[e_name]) +407 tau_string += f" in units of {gap} config" +408 if gap > 1: +409 tau_string += "s" +410 if self.tau_exp[e_name] > 0: +411 tau_string = f"{tau_string: <45}" + '\t(\N{GREEK SMALL LETTER TAU}_exp=%3.2f, N_\N{GREEK SMALL LETTER SIGMA}=%1.0i)' % (self.tau_exp[e_name], self.N_sigma[e_name]) 412 else: -413 gap = np.min(np.diff(self.idl[e_content[e_name][0]])) -414 -415 if len(self.e_names) > 1: -416 print('', e_name, '\t %3.6e +/- %3.6e' % (self.e_dvalue[e_name], self.e_ddvalue[e_name])) -417 tau_string = " \N{GREEK SMALL LETTER TAU}_int\t " + _format_uncertainty(self.e_tauint[e_name], self.e_dtauint[e_name]) -418 tau_string += f" in units of {gap} config" -419 if gap > 1: -420 tau_string += "s" -421 if self.tau_exp[e_name] > 0: -422 tau_string = f"{tau_string: <45}" + '\t(\N{GREEK SMALL LETTER TAU}_exp=%3.2f, N_\N{GREEK SMALL LETTER SIGMA}=%1.0i)' % (self.tau_exp[e_name], self.N_sigma[e_name]) -423 else: -424 tau_string = f"{tau_string: <45}" + '\t(S=%3.2f)' % (self.S[e_name]) -425 print(tau_string) -426 for e_name in self.cov_names: -427 print('', e_name, '\t %3.8e' % (self.e_dvalue[e_name])) -428 if ens_content is True: -429 if len(self.e_names) == 1: -430 print(self.N, 'samples in', len(self.e_names), 'ensemble:') -431 else: -432 print(self.N, 'samples in', len(self.e_names), 'ensembles:') -433 my_string_list = [] -434 for key, value in sorted(self.e_content.items()): -435 if key not in self.covobs: -436 my_string = ' ' + "\u00B7 Ensemble '" + key + "' " -437 if len(value) == 1: -438 my_string += f': {self.shape[value[0]]} configurations' -439 if isinstance(self.idl[value[0]], range): -440 my_string += f' (from {self.idl[value[0]].start} to {self.idl[value[0]][-1]}' + int(self.idl[value[0]].step != 1) * f' in steps of {self.idl[value[0]].step}' + ')' -441 else: -442 my_string += f' (irregular range from {self.idl[value[0]][0]} to {self.idl[value[0]][-1]})' -443 else: -444 sublist = [] -445 for v in value: -446 my_substring = ' ' + "\u00B7 Replicum '" + v[len(key) + 1:] + "' " -447 my_substring += f': {self.shape[v]} configurations' -448 if isinstance(self.idl[v], range): -449 my_substring += f' (from {self.idl[v].start} to {self.idl[v][-1]}' + int(self.idl[v].step != 1) * f' in steps of {self.idl[v].step}' + ')' -450 else: -451 my_substring += f' (irregular range from {self.idl[v][0]} to {self.idl[v][-1]})' -452 sublist.append(my_substring) -453 -454 my_string += '\n' + '\n'.join(sublist) -455 else: -456 my_string = ' ' + "\u00B7 Covobs '" + key + "' " -457 my_string_list.append(my_string) -458 print('\n'.join(my_string_list)) -459 -460 def reweight(self, weight): -461 """Reweight the obs with given rewighting factors. -462 -463 Parameters -464 ---------- -465 weight : Obs -466 Reweighting factor. An Observable that has to be defined on a superset of the -467 configurations in obs[i].idl for all i. -468 all_configs : bool -469 if True, the reweighted observables are normalized by the average of -470 the reweighting factor on all configurations in weight.idl and not -471 on the configurations in obs[i].idl. Default False. -472 """ -473 return reweight(weight, [self])[0] -474 -475 def is_zero_within_error(self, sigma=1): -476 """Checks whether the observable is zero within 'sigma' standard errors. -477 -478 Parameters -479 ---------- -480 sigma : int -481 Number of standard errors used for the check. -482 -483 Works only properly when the gamma method was run. -484 """ -485 return self.is_zero() or np.abs(self.value) <= sigma * self._dvalue -486 -487 def is_zero(self, atol=1e-10): -488 """Checks whether the observable is zero within a given tolerance. -489 -490 Parameters -491 ---------- -492 atol : float -493 Absolute tolerance (for details see numpy documentation). -494 """ -495 return np.isclose(0.0, self.value, 1e-14, atol) and all(np.allclose(0.0, delta, 1e-14, atol) for delta in self.deltas.values()) and all(np.allclose(0.0, delta.errsq(), 1e-14, atol) for delta in self.covobs.values()) +413 tau_string = f"{tau_string: <45}" + '\t(S=%3.2f)' % (self.S[e_name]) +414 print(tau_string) +415 for e_name in self.cov_names: +416 print('', e_name, '\t %3.8e' % (self.e_dvalue[e_name])) +417 if ens_content is True: +418 if len(self.e_names) == 1: +419 print(self.N, 'samples in', len(self.e_names), 'ensemble:') +420 else: +421 print(self.N, 'samples in', len(self.e_names), 'ensembles:') +422 my_string_list = [] +423 for key, value in sorted(self.e_content.items()): +424 if key not in self.covobs: +425 my_string = ' ' + "\u00B7 Ensemble '" + key + "' " +426 if len(value) == 1: +427 my_string += f': {self.shape[value[0]]} configurations' +428 if isinstance(self.idl[value[0]], range): +429 my_string += f' (from {self.idl[value[0]].start} to {self.idl[value[0]][-1]}' + int(self.idl[value[0]].step != 1) * f' in steps of {self.idl[value[0]].step}' + ')' +430 else: +431 my_string += f' (irregular range from {self.idl[value[0]][0]} to {self.idl[value[0]][-1]})' +432 else: +433 sublist = [] +434 for v in value: +435 my_substring = ' ' + "\u00B7 Replicum '" + v[len(key) + 1:] + "' " +436 my_substring += f': {self.shape[v]} configurations' +437 if isinstance(self.idl[v], range): +438 my_substring += f' (from {self.idl[v].start} to {self.idl[v][-1]}' + int(self.idl[v].step != 1) * f' in steps of {self.idl[v].step}' + ')' +439 else: +440 my_substring += f' (irregular range from {self.idl[v][0]} to {self.idl[v][-1]})' +441 sublist.append(my_substring) +442 +443 my_string += '\n' + '\n'.join(sublist) +444 else: +445 my_string = ' ' + "\u00B7 Covobs '" + key + "' " +446 my_string_list.append(my_string) +447 print('\n'.join(my_string_list)) +448 +449 def reweight(self, weight): +450 """Reweight the obs with given rewighting factors. +451 +452 Parameters +453 ---------- +454 weight : Obs +455 Reweighting factor. An Observable that has to be defined on a superset of the +456 configurations in obs[i].idl for all i. +457 all_configs : bool +458 if True, the reweighted observables are normalized by the average of +459 the reweighting factor on all configurations in weight.idl and not +460 on the configurations in obs[i].idl. Default False. +461 """ +462 return reweight(weight, [self])[0] +463 +464 def is_zero_within_error(self, sigma=1): +465 """Checks whether the observable is zero within 'sigma' standard errors. +466 +467 Parameters +468 ---------- +469 sigma : int +470 Number of standard errors used for the check. +471 +472 Works only properly when the gamma method was run. +473 """ +474 return self.is_zero() or np.abs(self.value) <= sigma * self._dvalue +475 +476 def is_zero(self, atol=1e-10): +477 """Checks whether the observable is zero within a given tolerance. +478 +479 Parameters +480 ---------- +481 atol : float +482 Absolute tolerance (for details see numpy documentation). +483 """ +484 return np.isclose(0.0, self.value, 1e-14, atol) and all(np.allclose(0.0, delta, 1e-14, atol) for delta in self.deltas.values()) and all(np.allclose(0.0, delta.errsq(), 1e-14, atol) for delta in self.covobs.values()) +485 +486 def plot_tauint(self, save=None): +487 """Plot integrated autocorrelation time for each ensemble. +488 +489 Parameters +490 ---------- +491 save : str +492 saves the figure to a file named 'save' if. +493 """ +494 if not hasattr(self, 'e_dvalue'): +495 raise Exception('Run the gamma method first.') 496 -497 def plot_tauint(self, save=None): -498 """Plot integrated autocorrelation time for each ensemble. -499 -500 Parameters -501 ---------- -502 save : str -503 saves the figure to a file named 'save' if. -504 """ -505 if not hasattr(self, 'e_dvalue'): -506 raise Exception('Run the gamma method first.') -507 -508 for e, e_name in enumerate(self.mc_names): -509 fig = plt.figure() -510 plt.xlabel(r'$W$') -511 plt.ylabel(r'$\tau_\mathrm{int}$') -512 length = int(len(self.e_n_tauint[e_name])) -513 if self.tau_exp[e_name] > 0: -514 base = self.e_n_tauint[e_name][self.e_windowsize[e_name]] -515 x_help = np.arange(2 * self.tau_exp[e_name]) -516 y_help = (x_help + 1) * np.abs(self.e_rho[e_name][self.e_windowsize[e_name] + 1]) * (1 - x_help / (2 * (2 * self.tau_exp[e_name] - 1))) + base -517 x_arr = np.arange(self.e_windowsize[e_name] + 1, self.e_windowsize[e_name] + 1 + 2 * self.tau_exp[e_name]) -518 plt.plot(x_arr, y_help, 'C' + str(e), linewidth=1, ls='--', marker=',') -519 plt.errorbar([self.e_windowsize[e_name] + 2 * self.tau_exp[e_name]], [self.e_tauint[e_name]], -520 yerr=[self.e_dtauint[e_name]], fmt='C' + str(e), linewidth=1, capsize=2, marker='o', mfc=plt.rcParams['axes.facecolor']) -521 xmax = self.e_windowsize[e_name] + 2 * self.tau_exp[e_name] + 1.5 -522 label = e_name + r', $\tau_\mathrm{exp}$=' + str(np.around(self.tau_exp[e_name], decimals=2)) -523 else: -524 label = e_name + ', S=' + str(np.around(self.S[e_name], decimals=2)) -525 xmax = max(10.5, 2 * self.e_windowsize[e_name] - 0.5) -526 -527 plt.errorbar(np.arange(length)[:int(xmax) + 1], self.e_n_tauint[e_name][:int(xmax) + 1], yerr=self.e_n_dtauint[e_name][:int(xmax) + 1], linewidth=1, capsize=2, label=label) -528 plt.axvline(x=self.e_windowsize[e_name], color='C' + str(e), alpha=0.5, marker=',', ls='--') -529 plt.legend() -530 plt.xlim(-0.5, xmax) -531 ylim = plt.ylim() -532 plt.ylim(bottom=0.0, top=max(1.0, ylim[1])) -533 plt.draw() -534 if save: -535 fig.savefig(save + "_" + str(e)) -536 -537 def plot_rho(self, save=None): -538 """Plot normalized autocorrelation function time for each ensemble. -539 -540 Parameters -541 ---------- -542 save : str -543 saves the figure to a file named 'save' if. -544 """ -545 if not hasattr(self, 'e_dvalue'): -546 raise Exception('Run the gamma method first.') -547 for e, e_name in enumerate(self.mc_names): -548 fig = plt.figure() -549 plt.xlabel('W') -550 plt.ylabel('rho') -551 length = int(len(self.e_drho[e_name])) -552 plt.errorbar(np.arange(length), self.e_rho[e_name][:length], yerr=self.e_drho[e_name][:], linewidth=1, capsize=2) -553 plt.axvline(x=self.e_windowsize[e_name], color='r', alpha=0.25, ls='--', marker=',') -554 if self.tau_exp[e_name] > 0: -555 plt.plot([self.e_windowsize[e_name] + 1, self.e_windowsize[e_name] + 1 + 2 * self.tau_exp[e_name]], -556 [self.e_rho[e_name][self.e_windowsize[e_name] + 1], 0], 'k-', lw=1) -557 xmax = self.e_windowsize[e_name] + 2 * self.tau_exp[e_name] + 1.5 -558 plt.title('Rho ' + e_name + r', tau\_exp=' + str(np.around(self.tau_exp[e_name], decimals=2))) -559 else: -560 xmax = max(10.5, 2 * self.e_windowsize[e_name] - 0.5) -561 plt.title('Rho ' + e_name + ', S=' + str(np.around(self.S[e_name], decimals=2))) -562 plt.plot([-0.5, xmax], [0, 0], 'k--', lw=1) -563 plt.xlim(-0.5, xmax) -564 plt.draw() -565 if save: -566 fig.savefig(save + "_" + str(e)) -567 -568 def plot_rep_dist(self): -569 """Plot replica distribution for each ensemble with more than one replicum.""" -570 if not hasattr(self, 'e_dvalue'): -571 raise Exception('Run the gamma method first.') -572 for e, e_name in enumerate(self.mc_names): -573 if len(self.e_content[e_name]) == 1: -574 print('No replica distribution for a single replicum (', e_name, ')') -575 continue -576 r_length = [] -577 sub_r_mean = 0 -578 for r, r_name in enumerate(self.e_content[e_name]): -579 r_length.append(len(self.deltas[r_name])) -580 sub_r_mean += self.shape[r_name] * self.r_values[r_name] -581 e_N = np.sum(r_length) -582 sub_r_mean /= e_N -583 arr = np.zeros(len(self.e_content[e_name])) -584 for r, r_name in enumerate(self.e_content[e_name]): -585 arr[r] = (self.r_values[r_name] - sub_r_mean) / (self.e_dvalue[e_name] * np.sqrt(e_N / self.shape[r_name] - 1)) -586 plt.hist(arr, rwidth=0.8, bins=len(self.e_content[e_name])) -587 plt.title('Replica distribution' + e_name + ' (mean=0, var=1)') -588 plt.draw() -589 -590 def plot_history(self, expand=True): -591 """Plot derived Monte Carlo history for each ensemble -592 -593 Parameters -594 ---------- -595 expand : bool -596 show expanded history for irregular Monte Carlo chains (default: True). -597 """ -598 for e, e_name in enumerate(self.mc_names): -599 plt.figure() -600 r_length = [] -601 tmp = [] -602 tmp_expanded = [] -603 for r, r_name in enumerate(self.e_content[e_name]): -604 tmp.append(self.deltas[r_name] + self.r_values[r_name]) -605 if expand: -606 tmp_expanded.append(_expand_deltas(self.deltas[r_name], list(self.idl[r_name]), self.shape[r_name]) + self.r_values[r_name]) -607 r_length.append(len(tmp_expanded[-1])) -608 else: -609 r_length.append(len(tmp[-1])) -610 e_N = np.sum(r_length) -611 x = np.arange(e_N) -612 y_test = np.concatenate(tmp, axis=0) -613 if expand: -614 y = np.concatenate(tmp_expanded, axis=0) -615 else: -616 y = y_test -617 plt.errorbar(x, y, fmt='.', markersize=3) -618 plt.xlim(-0.5, e_N - 0.5) -619 plt.title(e_name + f'\nskew: {skew(y_test):.3f} (p={skewtest(y_test).pvalue:.3f}), kurtosis: {kurtosis(y_test):.3f} (p={kurtosistest(y_test).pvalue:.3f})') -620 plt.draw() -621 -622 def plot_piechart(self, save=None): -623 """Plot piechart which shows the fractional contribution of each -624 ensemble to the error and returns a dictionary containing the fractions. -625 -626 Parameters -627 ---------- -628 save : str -629 saves the figure to a file named 'save' if. -630 """ -631 if not hasattr(self, 'e_dvalue'): -632 raise Exception('Run the gamma method first.') -633 if np.isclose(0.0, self._dvalue, atol=1e-15): -634 raise Exception('Error is 0.0') -635 labels = self.e_names -636 sizes = [self.e_dvalue[name] ** 2 for name in labels] / self._dvalue ** 2 -637 fig1, ax1 = plt.subplots() -638 ax1.pie(sizes, labels=labels, startangle=90, normalize=True) -639 ax1.axis('equal') -640 plt.draw() -641 if save: -642 fig1.savefig(save) -643 -644 return dict(zip(self.e_names, sizes)) -645 -646 def dump(self, filename, datatype="json.gz", description="", **kwargs): -647 """Dump the Obs to a file 'name' of chosen format. -648 -649 Parameters -650 ---------- -651 filename : str -652 name of the file to be saved. -653 datatype : str -654 Format of the exported file. Supported formats include -655 "json.gz" and "pickle" -656 description : str -657 Description for output file, only relevant for json.gz format. -658 path : str -659 specifies a custom path for the file (default '.') -660 """ -661 if 'path' in kwargs: -662 file_name = kwargs.get('path') + '/' + filename -663 else: -664 file_name = filename -665 -666 if datatype == "json.gz": -667 from .input.json import dump_to_json -668 dump_to_json([self], file_name, description=description) -669 elif datatype == "pickle": -670 with open(file_name + '.p', 'wb') as fb: -671 pickle.dump(self, fb) -672 else: -673 raise Exception("Unknown datatype " + str(datatype)) -674 -675 def export_jackknife(self): -676 """Export jackknife samples from the Obs +497 for e, e_name in enumerate(self.mc_names): +498 fig = plt.figure() +499 plt.xlabel(r'$W$') +500 plt.ylabel(r'$\tau_\mathrm{int}$') +501 length = int(len(self.e_n_tauint[e_name])) +502 if self.tau_exp[e_name] > 0: +503 base = self.e_n_tauint[e_name][self.e_windowsize[e_name]] +504 x_help = np.arange(2 * self.tau_exp[e_name]) +505 y_help = (x_help + 1) * np.abs(self.e_rho[e_name][self.e_windowsize[e_name] + 1]) * (1 - x_help / (2 * (2 * self.tau_exp[e_name] - 1))) + base +506 x_arr = np.arange(self.e_windowsize[e_name] + 1, self.e_windowsize[e_name] + 1 + 2 * self.tau_exp[e_name]) +507 plt.plot(x_arr, y_help, 'C' + str(e), linewidth=1, ls='--', marker=',') +508 plt.errorbar([self.e_windowsize[e_name] + 2 * self.tau_exp[e_name]], [self.e_tauint[e_name]], +509 yerr=[self.e_dtauint[e_name]], fmt='C' + str(e), linewidth=1, capsize=2, marker='o', mfc=plt.rcParams['axes.facecolor']) +510 xmax = self.e_windowsize[e_name] + 2 * self.tau_exp[e_name] + 1.5 +511 label = e_name + r', $\tau_\mathrm{exp}$=' + str(np.around(self.tau_exp[e_name], decimals=2)) +512 else: +513 label = e_name + ', S=' + str(np.around(self.S[e_name], decimals=2)) +514 xmax = max(10.5, 2 * self.e_windowsize[e_name] - 0.5) +515 +516 plt.errorbar(np.arange(length)[:int(xmax) + 1], self.e_n_tauint[e_name][:int(xmax) + 1], yerr=self.e_n_dtauint[e_name][:int(xmax) + 1], linewidth=1, capsize=2, label=label) +517 plt.axvline(x=self.e_windowsize[e_name], color='C' + str(e), alpha=0.5, marker=',', ls='--') +518 plt.legend() +519 plt.xlim(-0.5, xmax) +520 ylim = plt.ylim() +521 plt.ylim(bottom=0.0, top=max(1.0, ylim[1])) +522 plt.draw() +523 if save: +524 fig.savefig(save + "_" + str(e)) +525 +526 def plot_rho(self, save=None): +527 """Plot normalized autocorrelation function time for each ensemble. +528 +529 Parameters +530 ---------- +531 save : str +532 saves the figure to a file named 'save' if. +533 """ +534 if not hasattr(self, 'e_dvalue'): +535 raise Exception('Run the gamma method first.') +536 for e, e_name in enumerate(self.mc_names): +537 fig = plt.figure() +538 plt.xlabel('W') +539 plt.ylabel('rho') +540 length = int(len(self.e_drho[e_name])) +541 plt.errorbar(np.arange(length), self.e_rho[e_name][:length], yerr=self.e_drho[e_name][:], linewidth=1, capsize=2) +542 plt.axvline(x=self.e_windowsize[e_name], color='r', alpha=0.25, ls='--', marker=',') +543 if self.tau_exp[e_name] > 0: +544 plt.plot([self.e_windowsize[e_name] + 1, self.e_windowsize[e_name] + 1 + 2 * self.tau_exp[e_name]], +545 [self.e_rho[e_name][self.e_windowsize[e_name] + 1], 0], 'k-', lw=1) +546 xmax = self.e_windowsize[e_name] + 2 * self.tau_exp[e_name] + 1.5 +547 plt.title('Rho ' + e_name + r', tau\_exp=' + str(np.around(self.tau_exp[e_name], decimals=2))) +548 else: +549 xmax = max(10.5, 2 * self.e_windowsize[e_name] - 0.5) +550 plt.title('Rho ' + e_name + ', S=' + str(np.around(self.S[e_name], decimals=2))) +551 plt.plot([-0.5, xmax], [0, 0], 'k--', lw=1) +552 plt.xlim(-0.5, xmax) +553 plt.draw() +554 if save: +555 fig.savefig(save + "_" + str(e)) +556 +557 def plot_rep_dist(self): +558 """Plot replica distribution for each ensemble with more than one replicum.""" +559 if not hasattr(self, 'e_dvalue'): +560 raise Exception('Run the gamma method first.') +561 for e, e_name in enumerate(self.mc_names): +562 if len(self.e_content[e_name]) == 1: +563 print('No replica distribution for a single replicum (', e_name, ')') +564 continue +565 r_length = [] +566 sub_r_mean = 0 +567 for r, r_name in enumerate(self.e_content[e_name]): +568 r_length.append(len(self.deltas[r_name])) +569 sub_r_mean += self.shape[r_name] * self.r_values[r_name] +570 e_N = np.sum(r_length) +571 sub_r_mean /= e_N +572 arr = np.zeros(len(self.e_content[e_name])) +573 for r, r_name in enumerate(self.e_content[e_name]): +574 arr[r] = (self.r_values[r_name] - sub_r_mean) / (self.e_dvalue[e_name] * np.sqrt(e_N / self.shape[r_name] - 1)) +575 plt.hist(arr, rwidth=0.8, bins=len(self.e_content[e_name])) +576 plt.title('Replica distribution' + e_name + ' (mean=0, var=1)') +577 plt.draw() +578 +579 def plot_history(self, expand=True): +580 """Plot derived Monte Carlo history for each ensemble +581 +582 Parameters +583 ---------- +584 expand : bool +585 show expanded history for irregular Monte Carlo chains (default: True). +586 """ +587 for e, e_name in enumerate(self.mc_names): +588 plt.figure() +589 r_length = [] +590 tmp = [] +591 tmp_expanded = [] +592 for r, r_name in enumerate(self.e_content[e_name]): +593 tmp.append(self.deltas[r_name] + self.r_values[r_name]) +594 if expand: +595 tmp_expanded.append(_expand_deltas(self.deltas[r_name], list(self.idl[r_name]), self.shape[r_name], 1) + self.r_values[r_name]) +596 r_length.append(len(tmp_expanded[-1])) +597 else: +598 r_length.append(len(tmp[-1])) +599 e_N = np.sum(r_length) +600 x = np.arange(e_N) +601 y_test = np.concatenate(tmp, axis=0) +602 if expand: +603 y = np.concatenate(tmp_expanded, axis=0) +604 else: +605 y = y_test +606 plt.errorbar(x, y, fmt='.', markersize=3) +607 plt.xlim(-0.5, e_N - 0.5) +608 plt.title(e_name + f'\nskew: {skew(y_test):.3f} (p={skewtest(y_test).pvalue:.3f}), kurtosis: {kurtosis(y_test):.3f} (p={kurtosistest(y_test).pvalue:.3f})') +609 plt.draw() +610 +611 def plot_piechart(self, save=None): +612 """Plot piechart which shows the fractional contribution of each +613 ensemble to the error and returns a dictionary containing the fractions. +614 +615 Parameters +616 ---------- +617 save : str +618 saves the figure to a file named 'save' if. +619 """ +620 if not hasattr(self, 'e_dvalue'): +621 raise Exception('Run the gamma method first.') +622 if np.isclose(0.0, self._dvalue, atol=1e-15): +623 raise Exception('Error is 0.0') +624 labels = self.e_names +625 sizes = [self.e_dvalue[name] ** 2 for name in labels] / self._dvalue ** 2 +626 fig1, ax1 = plt.subplots() +627 ax1.pie(sizes, labels=labels, startangle=90, normalize=True) +628 ax1.axis('equal') +629 plt.draw() +630 if save: +631 fig1.savefig(save) +632 +633 return dict(zip(self.e_names, sizes)) +634 +635 def dump(self, filename, datatype="json.gz", description="", **kwargs): +636 """Dump the Obs to a file 'name' of chosen format. +637 +638 Parameters +639 ---------- +640 filename : str +641 name of the file to be saved. +642 datatype : str +643 Format of the exported file. Supported formats include +644 "json.gz" and "pickle" +645 description : str +646 Description for output file, only relevant for json.gz format. +647 path : str +648 specifies a custom path for the file (default '.') +649 """ +650 if 'path' in kwargs: +651 file_name = kwargs.get('path') + '/' + filename +652 else: +653 file_name = filename +654 +655 if datatype == "json.gz": +656 from .input.json import dump_to_json +657 dump_to_json([self], file_name, description=description) +658 elif datatype == "pickle": +659 with open(file_name + '.p', 'wb') as fb: +660 pickle.dump(self, fb) +661 else: +662 raise Exception("Unknown datatype " + str(datatype)) +663 +664 def export_jackknife(self): +665 """Export jackknife samples from the Obs +666 +667 Returns +668 ------- +669 numpy.ndarray +670 Returns a numpy array of length N + 1 where N is the number of samples +671 for the given ensemble and replicum. The zeroth entry of the array contains +672 the mean value of the Obs, entries 1 to N contain the N jackknife samples +673 derived from the Obs. The current implementation only works for observables +674 defined on exactly one ensemble and replicum. The derived jackknife samples +675 should agree with samples from a full jackknife analysis up to O(1/N). +676 """ 677 -678 Returns -679 ------- -680 numpy.ndarray -681 Returns a numpy array of length N + 1 where N is the number of samples -682 for the given ensemble and replicum. The zeroth entry of the array contains -683 the mean value of the Obs, entries 1 to N contain the N jackknife samples -684 derived from the Obs. The current implementation only works for observables -685 defined on exactly one ensemble and replicum. The derived jackknife samples -686 should agree with samples from a full jackknife analysis up to O(1/N). -687 """ -688 -689 if len(self.names) != 1: -690 raise Exception("'export_jackknife' is only implemented for Obs defined on one ensemble and replicum.") -691 -692 name = self.names[0] -693 full_data = self.deltas[name] + self.r_values[name] -694 n = full_data.size -695 mean = self.value -696 tmp_jacks = np.zeros(n + 1) -697 tmp_jacks[0] = mean -698 tmp_jacks[1:] = (n * mean - full_data) / (n - 1) -699 return tmp_jacks -700 -701 def __float__(self): -702 return float(self.value) -703 -704 def __repr__(self): -705 return 'Obs[' + str(self) + ']' -706 -707 def __str__(self): -708 return _format_uncertainty(self.value, self._dvalue) -709 -710 def __hash__(self): -711 hash_tuple = (np.array([self.value]).astype(np.float32).data.tobytes(),) -712 hash_tuple += tuple([o.astype(np.float32).data.tobytes() for o in self.deltas.values()]) -713 hash_tuple += tuple([np.array([o.errsq()]).astype(np.float32).data.tobytes() for o in self.covobs.values()]) -714 hash_tuple += tuple([o.encode() for o in self.names]) -715 m = hashlib.md5() -716 [m.update(o) for o in hash_tuple] -717 return int(m.hexdigest(), 16) & 0xFFFFFFFF -718 -719 # Overload comparisons -720 def __lt__(self, other): -721 return self.value < other -722 -723 def __le__(self, other): -724 return self.value <= other -725 -726 def __gt__(self, other): -727 return self.value > other -728 -729 def __ge__(self, other): -730 return self.value >= other -731 -732 def __eq__(self, other): -733 return (self - other).is_zero() -734 -735 def __ne__(self, other): -736 return not (self - other).is_zero() -737 -738 # Overload math operations -739 def __add__(self, y): -740 if isinstance(y, Obs): -741 return derived_observable(lambda x, **kwargs: x[0] + x[1], [self, y], man_grad=[1, 1]) -742 else: -743 if isinstance(y, np.ndarray): -744 return np.array([self + o for o in y]) -745 elif y.__class__.__name__ in ['Corr', 'CObs']: -746 return NotImplemented -747 else: -748 return derived_observable(lambda x, **kwargs: x[0] + y, [self], man_grad=[1]) -749 -750 def __radd__(self, y): -751 return self + y -752 -753 def __mul__(self, y): -754 if isinstance(y, Obs): -755 return derived_observable(lambda x, **kwargs: x[0] * x[1], [self, y], man_grad=[y.value, self.value]) -756 else: -757 if isinstance(y, np.ndarray): -758 return np.array([self * o for o in y]) -759 elif isinstance(y, complex): -760 return CObs(self * y.real, self * y.imag) -761 elif y.__class__.__name__ in ['Corr', 'CObs']: -762 return NotImplemented -763 else: -764 return derived_observable(lambda x, **kwargs: x[0] * y, [self], man_grad=[y]) -765 -766 def __rmul__(self, y): -767 return self * y +678 if len(self.names) != 1: +679 raise Exception("'export_jackknife' is only implemented for Obs defined on one ensemble and replicum.") +680 +681 name = self.names[0] +682 full_data = self.deltas[name] + self.r_values[name] +683 n = full_data.size +684 mean = self.value +685 tmp_jacks = np.zeros(n + 1) +686 tmp_jacks[0] = mean +687 tmp_jacks[1:] = (n * mean - full_data) / (n - 1) +688 return tmp_jacks +689 +690 def __float__(self): +691 return float(self.value) +692 +693 def __repr__(self): +694 return 'Obs[' + str(self) + ']' +695 +696 def __str__(self): +697 return _format_uncertainty(self.value, self._dvalue) +698 +699 def __hash__(self): +700 hash_tuple = (np.array([self.value]).astype(np.float32).data.tobytes(),) +701 hash_tuple += tuple([o.astype(np.float32).data.tobytes() for o in self.deltas.values()]) +702 hash_tuple += tuple([np.array([o.errsq()]).astype(np.float32).data.tobytes() for o in self.covobs.values()]) +703 hash_tuple += tuple([o.encode() for o in self.names]) +704 m = hashlib.md5() +705 [m.update(o) for o in hash_tuple] +706 return int(m.hexdigest(), 16) & 0xFFFFFFFF +707 +708 # Overload comparisons +709 def __lt__(self, other): +710 return self.value < other +711 +712 def __le__(self, other): +713 return self.value <= other +714 +715 def __gt__(self, other): +716 return self.value > other +717 +718 def __ge__(self, other): +719 return self.value >= other +720 +721 def __eq__(self, other): +722 return (self - other).is_zero() +723 +724 def __ne__(self, other): +725 return not (self - other).is_zero() +726 +727 # Overload math operations +728 def __add__(self, y): +729 if isinstance(y, Obs): +730 return derived_observable(lambda x, **kwargs: x[0] + x[1], [self, y], man_grad=[1, 1]) +731 else: +732 if isinstance(y, np.ndarray): +733 return np.array([self + o for o in y]) +734 elif y.__class__.__name__ in ['Corr', 'CObs']: +735 return NotImplemented +736 else: +737 return derived_observable(lambda x, **kwargs: x[0] + y, [self], man_grad=[1]) +738 +739 def __radd__(self, y): +740 return self + y +741 +742 def __mul__(self, y): +743 if isinstance(y, Obs): +744 return derived_observable(lambda x, **kwargs: x[0] * x[1], [self, y], man_grad=[y.value, self.value]) +745 else: +746 if isinstance(y, np.ndarray): +747 return np.array([self * o for o in y]) +748 elif isinstance(y, complex): +749 return CObs(self * y.real, self * y.imag) +750 elif y.__class__.__name__ in ['Corr', 'CObs']: +751 return NotImplemented +752 else: +753 return derived_observable(lambda x, **kwargs: x[0] * y, [self], man_grad=[y]) +754 +755 def __rmul__(self, y): +756 return self * y +757 +758 def __sub__(self, y): +759 if isinstance(y, Obs): +760 return derived_observable(lambda x, **kwargs: x[0] - x[1], [self, y], man_grad=[1, -1]) +761 else: +762 if isinstance(y, np.ndarray): +763 return np.array([self - o for o in y]) +764 elif y.__class__.__name__ in ['Corr', 'CObs']: +765 return NotImplemented +766 else: +767 return derived_observable(lambda x, **kwargs: x[0] - y, [self], man_grad=[1]) 768 -769 def __sub__(self, y): -770 if isinstance(y, Obs): -771 return derived_observable(lambda x, **kwargs: x[0] - x[1], [self, y], man_grad=[1, -1]) -772 else: -773 if isinstance(y, np.ndarray): -774 return np.array([self - o for o in y]) -775 elif y.__class__.__name__ in ['Corr', 'CObs']: -776 return NotImplemented -777 else: -778 return derived_observable(lambda x, **kwargs: x[0] - y, [self], man_grad=[1]) -779 -780 def __rsub__(self, y): -781 return -1 * (self - y) -782 -783 def __pos__(self): -784 return self -785 -786 def __neg__(self): -787 return -1 * self +769 def __rsub__(self, y): +770 return -1 * (self - y) +771 +772 def __pos__(self): +773 return self +774 +775 def __neg__(self): +776 return -1 * self +777 +778 def __truediv__(self, y): +779 if isinstance(y, Obs): +780 return derived_observable(lambda x, **kwargs: x[0] / x[1], [self, y], man_grad=[1 / y.value, - self.value / y.value ** 2]) +781 else: +782 if isinstance(y, np.ndarray): +783 return np.array([self / o for o in y]) +784 elif y.__class__.__name__ in ['Corr', 'CObs']: +785 return NotImplemented +786 else: +787 return derived_observable(lambda x, **kwargs: x[0] / y, [self], man_grad=[1 / y]) 788 -789 def __truediv__(self, y): +789 def __rtruediv__(self, y): 790 if isinstance(y, Obs): -791 return derived_observable(lambda x, **kwargs: x[0] / x[1], [self, y], man_grad=[1 / y.value, - self.value / y.value ** 2]) +791 return derived_observable(lambda x, **kwargs: x[0] / x[1], [y, self], man_grad=[1 / self.value, - y.value / self.value ** 2]) 792 else: 793 if isinstance(y, np.ndarray): -794 return np.array([self / o for o in y]) +794 return np.array([o / self for o in y]) 795 elif y.__class__.__name__ in ['Corr', 'CObs']: 796 return NotImplemented 797 else: -798 return derived_observable(lambda x, **kwargs: x[0] / y, [self], man_grad=[1 / y]) +798 return derived_observable(lambda x, **kwargs: y / x[0], [self], man_grad=[-y / self.value ** 2]) 799 -800 def __rtruediv__(self, y): +800 def __pow__(self, y): 801 if isinstance(y, Obs): -802 return derived_observable(lambda x, **kwargs: x[0] / x[1], [y, self], man_grad=[1 / self.value, - y.value / self.value ** 2]) +802 return derived_observable(lambda x: x[0] ** x[1], [self, y]) 803 else: -804 if isinstance(y, np.ndarray): -805 return np.array([o / self for o in y]) -806 elif y.__class__.__name__ in ['Corr', 'CObs']: -807 return NotImplemented -808 else: -809 return derived_observable(lambda x, **kwargs: y / x[0], [self], man_grad=[-y / self.value ** 2]) -810 -811 def __pow__(self, y): -812 if isinstance(y, Obs): -813 return derived_observable(lambda x: x[0] ** x[1], [self, y]) -814 else: -815 return derived_observable(lambda x: x[0] ** y, [self]) -816 -817 def __rpow__(self, y): -818 if isinstance(y, Obs): -819 return derived_observable(lambda x: x[0] ** x[1], [y, self]) -820 else: -821 return derived_observable(lambda x: y ** x[0], [self]) -822 -823 def __abs__(self): -824 return derived_observable(lambda x: anp.abs(x[0]), [self]) -825 -826 # Overload numpy functions -827 def sqrt(self): -828 return derived_observable(lambda x, **kwargs: np.sqrt(x[0]), [self], man_grad=[1 / 2 / np.sqrt(self.value)]) -829 -830 def log(self): -831 return derived_observable(lambda x, **kwargs: np.log(x[0]), [self], man_grad=[1 / self.value]) -832 -833 def exp(self): -834 return derived_observable(lambda x, **kwargs: np.exp(x[0]), [self], man_grad=[np.exp(self.value)]) -835 -836 def sin(self): -837 return derived_observable(lambda x, **kwargs: np.sin(x[0]), [self], man_grad=[np.cos(self.value)]) -838 -839 def cos(self): -840 return derived_observable(lambda x, **kwargs: np.cos(x[0]), [self], man_grad=[-np.sin(self.value)]) -841 -842 def tan(self): -843 return derived_observable(lambda x, **kwargs: np.tan(x[0]), [self], man_grad=[1 / np.cos(self.value) ** 2]) -844 -845 def arcsin(self): -846 return derived_observable(lambda x: anp.arcsin(x[0]), [self]) -847 -848 def arccos(self): -849 return derived_observable(lambda x: anp.arccos(x[0]), [self]) -850 -851 def arctan(self): -852 return derived_observable(lambda x: anp.arctan(x[0]), [self]) -853 -854 def sinh(self): -855 return derived_observable(lambda x, **kwargs: np.sinh(x[0]), [self], man_grad=[np.cosh(self.value)]) -856 -857 def cosh(self): -858 return derived_observable(lambda x, **kwargs: np.cosh(x[0]), [self], man_grad=[np.sinh(self.value)]) -859 -860 def tanh(self): -861 return derived_observable(lambda x, **kwargs: np.tanh(x[0]), [self], man_grad=[1 / np.cosh(self.value) ** 2]) -862 -863 def arcsinh(self): -864 return derived_observable(lambda x: anp.arcsinh(x[0]), [self]) -865 -866 def arccosh(self): -867 return derived_observable(lambda x: anp.arccosh(x[0]), [self]) -868 -869 def arctanh(self): -870 return derived_observable(lambda x: anp.arctanh(x[0]), [self]) +804 return derived_observable(lambda x: x[0] ** y, [self]) +805 +806 def __rpow__(self, y): +807 if isinstance(y, Obs): +808 return derived_observable(lambda x: x[0] ** x[1], [y, self]) +809 else: +810 return derived_observable(lambda x: y ** x[0], [self]) +811 +812 def __abs__(self): +813 return derived_observable(lambda x: anp.abs(x[0]), [self]) +814 +815 # Overload numpy functions +816 def sqrt(self): +817 return derived_observable(lambda x, **kwargs: np.sqrt(x[0]), [self], man_grad=[1 / 2 / np.sqrt(self.value)]) +818 +819 def log(self): +820 return derived_observable(lambda x, **kwargs: np.log(x[0]), [self], man_grad=[1 / self.value]) +821 +822 def exp(self): +823 return derived_observable(lambda x, **kwargs: np.exp(x[0]), [self], man_grad=[np.exp(self.value)]) +824 +825 def sin(self): +826 return derived_observable(lambda x, **kwargs: np.sin(x[0]), [self], man_grad=[np.cos(self.value)]) +827 +828 def cos(self): +829 return derived_observable(lambda x, **kwargs: np.cos(x[0]), [self], man_grad=[-np.sin(self.value)]) +830 +831 def tan(self): +832 return derived_observable(lambda x, **kwargs: np.tan(x[0]), [self], man_grad=[1 / np.cos(self.value) ** 2]) +833 +834 def arcsin(self): +835 return derived_observable(lambda x: anp.arcsin(x[0]), [self]) +836 +837 def arccos(self): +838 return derived_observable(lambda x: anp.arccos(x[0]), [self]) +839 +840 def arctan(self): +841 return derived_observable(lambda x: anp.arctan(x[0]), [self]) +842 +843 def sinh(self): +844 return derived_observable(lambda x, **kwargs: np.sinh(x[0]), [self], man_grad=[np.cosh(self.value)]) +845 +846 def cosh(self): +847 return derived_observable(lambda x, **kwargs: np.cosh(x[0]), [self], man_grad=[np.sinh(self.value)]) +848 +849 def tanh(self): +850 return derived_observable(lambda x, **kwargs: np.tanh(x[0]), [self], man_grad=[1 / np.cosh(self.value) ** 2]) +851 +852 def arcsinh(self): +853 return derived_observable(lambda x: anp.arcsinh(x[0]), [self]) +854 +855 def arccosh(self): +856 return derived_observable(lambda x: anp.arccosh(x[0]), [self]) +857 +858 def arctanh(self): +859 return derived_observable(lambda x: anp.arctanh(x[0]), [self]) @@ -2923,116 +2920,105 @@ list of ranges or lists on which the samples are defined 239 _parse_kwarg('N_sigma') 240 241 for e, e_name in enumerate(self.mc_names): -242 r_length = [] -243 for r_name in e_content[e_name]: -244 if isinstance(self.idl[r_name], range): -245 r_length.append(len(self.idl[r_name])) -246 else: -247 r_length.append((self.idl[r_name][-1] - self.idl[r_name][0] + 1)) -248 -249 e_N = np.sum([self.shape[r_name] for r_name in e_content[e_name]]) -250 w_max = max(r_length) // 2 -251 e_gamma[e_name] = np.zeros(w_max) -252 self.e_rho[e_name] = np.zeros(w_max) -253 self.e_drho[e_name] = np.zeros(w_max) -254 -255 for r_name in e_content[e_name]: -256 e_gamma[e_name] += self._calc_gamma(self.deltas[r_name], self.idl[r_name], self.shape[r_name], w_max, fft) -257 -258 gamma_div = np.zeros(w_max) -259 for r_name in e_content[e_name]: -260 gamma_div += self._calc_gamma(np.ones((self.shape[r_name])), self.idl[r_name], self.shape[r_name], w_max, fft) -261 gamma_div[gamma_div < 1] = 1.0 -262 e_gamma[e_name] /= gamma_div[:w_max] -263 -264 if np.abs(e_gamma[e_name][0]) < 10 * np.finfo(float).tiny: # Prevent division by zero -265 self.e_tauint[e_name] = 0.5 -266 self.e_dtauint[e_name] = 0.0 -267 self.e_dvalue[e_name] = 0.0 -268 self.e_ddvalue[e_name] = 0.0 -269 self.e_windowsize[e_name] = 0 -270 continue -271 -272 gaps = [] -273 for r_name in e_content[e_name]: -274 if isinstance(self.idl[r_name], range): -275 gaps.append(1) -276 else: -277 gaps.append(np.min(np.diff(self.idl[r_name]))) -278 -279 if not np.all([gi == gaps[0] for gi in gaps]): -280 raise Exception(f"Replica for ensemble {e_name} are not equally spaced.", gaps) -281 else: -282 gapsize = gaps[0] -283 -284 self.e_rho[e_name] = e_gamma[e_name][:w_max] / e_gamma[e_name][0] -285 self.e_n_tauint[e_name] = np.cumsum(np.concatenate(([0.5], self.e_rho[e_name][1:]))) -286 # Make sure no entry of tauint is smaller than 0.5 -287 self.e_n_tauint[e_name][self.e_n_tauint[e_name] <= 0.5] = 0.5 + np.finfo(np.float64).eps -288 # hep-lat/0306017 eq. (42) -289 self.e_n_dtauint[e_name] = self.e_n_tauint[e_name] * 2 * np.sqrt(np.abs(np.arange(w_max) / gapsize + 0.5 - self.e_n_tauint[e_name]) / e_N) -290 self.e_n_dtauint[e_name][0] = 0.0 -291 -292 def _compute_drho(i): -293 tmp = (self.e_rho[e_name][i + 1:w_max] -294 + np.concatenate([self.e_rho[e_name][i - 1:None if i - w_max // 2 < 0 else 2 * (i - w_max // 2):-1], -295 self.e_rho[e_name][1:max(1, w_max - 2 * i)]]) -296 - 2 * self.e_rho[e_name][i] * self.e_rho[e_name][1:w_max - i]) -297 self.e_drho[e_name][i] = np.sqrt(np.sum(tmp ** 2) / e_N) -298 -299 if self.tau_exp[e_name] > 0: -300 _compute_drho(gapsize) -301 texp = self.tau_exp[e_name] -302 # Critical slowing down analysis -303 if w_max // 2 <= 1: -304 raise Exception("Need at least 8 samples for tau_exp error analysis") -305 for n in range(gapsize, w_max // 2, gapsize): -306 _compute_drho(n + gapsize) -307 if (self.e_rho[e_name][n] - self.N_sigma[e_name] * self.e_drho[e_name][n]) < 0 or n >= w_max // 2 - 2: -308 # Bias correction hep-lat/0306017 eq. (49) included -309 self.e_tauint[e_name] = self.e_n_tauint[e_name][n] * (1 + (2 * n / gapsize + 1) / e_N) / (1 + 1 / e_N) + texp * np.abs(self.e_rho[e_name][n + 1]) # The absolute makes sure, that the tail contribution is always positive -310 self.e_dtauint[e_name] = np.sqrt(self.e_n_dtauint[e_name][n] ** 2 + texp ** 2 * self.e_drho[e_name][n + 1] ** 2) -311 # Error of tau_exp neglected so far, missing term: self.e_rho[e_name][n + 1] ** 2 * d_tau_exp ** 2 -312 self.e_dvalue[e_name] = np.sqrt(2 * self.e_tauint[e_name] * e_gamma[e_name][0] * (1 + 1 / e_N) / e_N) -313 self.e_ddvalue[e_name] = self.e_dvalue[e_name] * np.sqrt((n / gapsize + 0.5) / e_N) -314 self.e_windowsize[e_name] = n -315 break -316 else: -317 if self.S[e_name] == 0.0: -318 self.e_tauint[e_name] = 0.5 -319 self.e_dtauint[e_name] = 0.0 -320 self.e_dvalue[e_name] = np.sqrt(e_gamma[e_name][0] / (e_N - 1)) -321 self.e_ddvalue[e_name] = self.e_dvalue[e_name] * np.sqrt(0.5 / e_N) -322 self.e_windowsize[e_name] = 0 -323 else: -324 # Standard automatic windowing procedure -325 tau = self.S[e_name] / np.log((2 * self.e_n_tauint[e_name][gapsize::gapsize] + 1) / (2 * self.e_n_tauint[e_name][gapsize::gapsize] - 1)) -326 g_w = np.exp(- np.arange(1, len(tau) + 1) / tau) - tau / np.sqrt(np.arange(1, len(tau) + 1) * e_N) -327 for n in range(1, w_max // gapsize): -328 if g_w[n - 1] < 0 or n >= w_max // gapsize - 1: -329 _compute_drho(gapsize * n) -330 n *= gapsize -331 self.e_tauint[e_name] = self.e_n_tauint[e_name][n] * (1 + (2 * n / gapsize + 1) / e_N) / (1 + 1 / e_N) # Bias correction hep-lat/0306017 eq. (49) -332 self.e_dtauint[e_name] = self.e_n_dtauint[e_name][n] -333 self.e_dvalue[e_name] = np.sqrt(2 * self.e_tauint[e_name] * e_gamma[e_name][0] * (1 + 1 / e_N) / e_N) -334 self.e_ddvalue[e_name] = self.e_dvalue[e_name] * np.sqrt((n / gapsize + 0.5) / e_N) -335 self.e_windowsize[e_name] = n -336 break -337 -338 self._dvalue += self.e_dvalue[e_name] ** 2 -339 self.ddvalue += (self.e_dvalue[e_name] * self.e_ddvalue[e_name]) ** 2 -340 -341 for e_name in self.cov_names: -342 self.e_dvalue[e_name] = np.sqrt(self.covobs[e_name].errsq()) -343 self.e_ddvalue[e_name] = 0 -344 self._dvalue += self.e_dvalue[e_name]**2 -345 -346 self._dvalue = np.sqrt(self._dvalue) -347 if self._dvalue == 0.0: -348 self.ddvalue = 0.0 -349 else: -350 self.ddvalue = np.sqrt(self.ddvalue) / self._dvalue -351 return +242 gapsize = _determine_gap(self, e_content, e_name) +243 +244 r_length = [] +245 for r_name in e_content[e_name]: +246 if isinstance(self.idl[r_name], range): +247 r_length.append(len(self.idl[r_name]) * self.idl[r_name].step // gapsize) +248 else: +249 r_length.append((self.idl[r_name][-1] - self.idl[r_name][0] + 1) // gapsize) +250 +251 e_N = np.sum([self.shape[r_name] for r_name in e_content[e_name]]) +252 w_max = max(r_length) // 2 +253 e_gamma[e_name] = np.zeros(w_max) +254 self.e_rho[e_name] = np.zeros(w_max) +255 self.e_drho[e_name] = np.zeros(w_max) +256 +257 for r_name in e_content[e_name]: +258 e_gamma[e_name] += self._calc_gamma(self.deltas[r_name], self.idl[r_name], self.shape[r_name], w_max, fft, gapsize) +259 +260 gamma_div = np.zeros(w_max) +261 for r_name in e_content[e_name]: +262 gamma_div += self._calc_gamma(np.ones((self.shape[r_name])), self.idl[r_name], self.shape[r_name], w_max, fft, gapsize) +263 gamma_div[gamma_div < 1] = 1.0 +264 e_gamma[e_name] /= gamma_div[:w_max] +265 +266 if np.abs(e_gamma[e_name][0]) < 10 * np.finfo(float).tiny: # Prevent division by zero +267 self.e_tauint[e_name] = 0.5 +268 self.e_dtauint[e_name] = 0.0 +269 self.e_dvalue[e_name] = 0.0 +270 self.e_ddvalue[e_name] = 0.0 +271 self.e_windowsize[e_name] = 0 +272 continue +273 +274 self.e_rho[e_name] = e_gamma[e_name][:w_max] / e_gamma[e_name][0] +275 self.e_n_tauint[e_name] = np.cumsum(np.concatenate(([0.5], self.e_rho[e_name][1:]))) +276 # Make sure no entry of tauint is smaller than 0.5 +277 self.e_n_tauint[e_name][self.e_n_tauint[e_name] <= 0.5] = 0.5 + np.finfo(np.float64).eps +278 # hep-lat/0306017 eq. (42) +279 self.e_n_dtauint[e_name] = self.e_n_tauint[e_name] * 2 * np.sqrt(np.abs(np.arange(w_max) + 0.5 - self.e_n_tauint[e_name]) / e_N) +280 self.e_n_dtauint[e_name][0] = 0.0 +281 +282 def _compute_drho(i): +283 tmp = (self.e_rho[e_name][i + 1:w_max] +284 + np.concatenate([self.e_rho[e_name][i - 1:None if i - w_max // 2 < 0 else 2 * (i - w_max // 2):-1], +285 self.e_rho[e_name][1:max(1, w_max - 2 * i)]]) +286 - 2 * self.e_rho[e_name][i] * self.e_rho[e_name][1:w_max - i]) +287 self.e_drho[e_name][i] = np.sqrt(np.sum(tmp ** 2) / e_N) +288 +289 if self.tau_exp[e_name] > 0: +290 _compute_drho(1) +291 texp = self.tau_exp[e_name] +292 # Critical slowing down analysis +293 if w_max // 2 <= 1: +294 raise Exception("Need at least 8 samples for tau_exp error analysis") +295 for n in range(1, w_max // 2): +296 _compute_drho(n + 1) +297 if (self.e_rho[e_name][n] - self.N_sigma[e_name] * self.e_drho[e_name][n]) < 0 or n >= w_max // 2 - 2: +298 # Bias correction hep-lat/0306017 eq. (49) included +299 self.e_tauint[e_name] = self.e_n_tauint[e_name][n] * (1 + (2 * n + 1) / e_N) / (1 + 1 / e_N) + texp * np.abs(self.e_rho[e_name][n + 1]) # The absolute makes sure, that the tail contribution is always positive +300 self.e_dtauint[e_name] = np.sqrt(self.e_n_dtauint[e_name][n] ** 2 + texp ** 2 * self.e_drho[e_name][n + 1] ** 2) +301 # Error of tau_exp neglected so far, missing term: self.e_rho[e_name][n + 1] ** 2 * d_tau_exp ** 2 +302 self.e_dvalue[e_name] = np.sqrt(2 * self.e_tauint[e_name] * e_gamma[e_name][0] * (1 + 1 / e_N) / e_N) +303 self.e_ddvalue[e_name] = self.e_dvalue[e_name] * np.sqrt((n + 0.5) / e_N) +304 self.e_windowsize[e_name] = n +305 break +306 else: +307 if self.S[e_name] == 0.0: +308 self.e_tauint[e_name] = 0.5 +309 self.e_dtauint[e_name] = 0.0 +310 self.e_dvalue[e_name] = np.sqrt(e_gamma[e_name][0] / (e_N - 1)) +311 self.e_ddvalue[e_name] = self.e_dvalue[e_name] * np.sqrt(0.5 / e_N) +312 self.e_windowsize[e_name] = 0 +313 else: +314 # Standard automatic windowing procedure +315 tau = self.S[e_name] / np.log((2 * self.e_n_tauint[e_name][1:] + 1) / (2 * self.e_n_tauint[e_name][1:] - 1)) +316 g_w = np.exp(- np.arange(1, len(tau) + 1) / tau) - tau / np.sqrt(np.arange(1, len(tau) + 1) * e_N) +317 for n in range(1, w_max): +318 if g_w[n - 1] < 0 or n >= w_max - 1: +319 _compute_drho(n) +320 self.e_tauint[e_name] = self.e_n_tauint[e_name][n] * (1 + (2 * n + 1) / e_N) / (1 + 1 / e_N) # Bias correction hep-lat/0306017 eq. (49) +321 self.e_dtauint[e_name] = self.e_n_dtauint[e_name][n] +322 self.e_dvalue[e_name] = np.sqrt(2 * self.e_tauint[e_name] * e_gamma[e_name][0] * (1 + 1 / e_N) / e_N) +323 self.e_ddvalue[e_name] = self.e_dvalue[e_name] * np.sqrt((n + 0.5) / e_N) +324 self.e_windowsize[e_name] = n +325 break +326 +327 self._dvalue += self.e_dvalue[e_name] ** 2 +328 self.ddvalue += (self.e_dvalue[e_name] * self.e_ddvalue[e_name]) ** 2 +329 +330 for e_name in self.cov_names: +331 self.e_dvalue[e_name] = np.sqrt(self.covobs[e_name].errsq()) +332 self.e_ddvalue[e_name] = 0 +333 self._dvalue += self.e_dvalue[e_name]**2 +334 +335 self._dvalue = np.sqrt(self._dvalue) +336 if self._dvalue == 0.0: +337 self.ddvalue = 0.0 +338 else: +339 self.ddvalue = np.sqrt(self.ddvalue) / self._dvalue +340 return @@ -3137,116 +3123,105 @@ of the autocorrelation function (default True) 239 _parse_kwarg('N_sigma') 240 241 for e, e_name in enumerate(self.mc_names): -242 r_length = [] -243 for r_name in e_content[e_name]: -244 if isinstance(self.idl[r_name], range): -245 r_length.append(len(self.idl[r_name])) -246 else: -247 r_length.append((self.idl[r_name][-1] - self.idl[r_name][0] + 1)) -248 -249 e_N = np.sum([self.shape[r_name] for r_name in e_content[e_name]]) -250 w_max = max(r_length) // 2 -251 e_gamma[e_name] = np.zeros(w_max) -252 self.e_rho[e_name] = np.zeros(w_max) -253 self.e_drho[e_name] = np.zeros(w_max) -254 -255 for r_name in e_content[e_name]: -256 e_gamma[e_name] += self._calc_gamma(self.deltas[r_name], self.idl[r_name], self.shape[r_name], w_max, fft) -257 -258 gamma_div = np.zeros(w_max) -259 for r_name in e_content[e_name]: -260 gamma_div += self._calc_gamma(np.ones((self.shape[r_name])), self.idl[r_name], self.shape[r_name], w_max, fft) -261 gamma_div[gamma_div < 1] = 1.0 -262 e_gamma[e_name] /= gamma_div[:w_max] -263 -264 if np.abs(e_gamma[e_name][0]) < 10 * np.finfo(float).tiny: # Prevent division by zero -265 self.e_tauint[e_name] = 0.5 -266 self.e_dtauint[e_name] = 0.0 -267 self.e_dvalue[e_name] = 0.0 -268 self.e_ddvalue[e_name] = 0.0 -269 self.e_windowsize[e_name] = 0 -270 continue -271 -272 gaps = [] -273 for r_name in e_content[e_name]: -274 if isinstance(self.idl[r_name], range): -275 gaps.append(1) -276 else: -277 gaps.append(np.min(np.diff(self.idl[r_name]))) -278 -279 if not np.all([gi == gaps[0] for gi in gaps]): -280 raise Exception(f"Replica for ensemble {e_name} are not equally spaced.", gaps) -281 else: -282 gapsize = gaps[0] -283 -284 self.e_rho[e_name] = e_gamma[e_name][:w_max] / e_gamma[e_name][0] -285 self.e_n_tauint[e_name] = np.cumsum(np.concatenate(([0.5], self.e_rho[e_name][1:]))) -286 # Make sure no entry of tauint is smaller than 0.5 -287 self.e_n_tauint[e_name][self.e_n_tauint[e_name] <= 0.5] = 0.5 + np.finfo(np.float64).eps -288 # hep-lat/0306017 eq. (42) -289 self.e_n_dtauint[e_name] = self.e_n_tauint[e_name] * 2 * np.sqrt(np.abs(np.arange(w_max) / gapsize + 0.5 - self.e_n_tauint[e_name]) / e_N) -290 self.e_n_dtauint[e_name][0] = 0.0 -291 -292 def _compute_drho(i): -293 tmp = (self.e_rho[e_name][i + 1:w_max] -294 + np.concatenate([self.e_rho[e_name][i - 1:None if i - w_max // 2 < 0 else 2 * (i - w_max // 2):-1], -295 self.e_rho[e_name][1:max(1, w_max - 2 * i)]]) -296 - 2 * self.e_rho[e_name][i] * self.e_rho[e_name][1:w_max - i]) -297 self.e_drho[e_name][i] = np.sqrt(np.sum(tmp ** 2) / e_N) -298 -299 if self.tau_exp[e_name] > 0: -300 _compute_drho(gapsize) -301 texp = self.tau_exp[e_name] -302 # Critical slowing down analysis -303 if w_max // 2 <= 1: -304 raise Exception("Need at least 8 samples for tau_exp error analysis") -305 for n in range(gapsize, w_max // 2, gapsize): -306 _compute_drho(n + gapsize) -307 if (self.e_rho[e_name][n] - self.N_sigma[e_name] * self.e_drho[e_name][n]) < 0 or n >= w_max // 2 - 2: -308 # Bias correction hep-lat/0306017 eq. (49) included -309 self.e_tauint[e_name] = self.e_n_tauint[e_name][n] * (1 + (2 * n / gapsize + 1) / e_N) / (1 + 1 / e_N) + texp * np.abs(self.e_rho[e_name][n + 1]) # The absolute makes sure, that the tail contribution is always positive -310 self.e_dtauint[e_name] = np.sqrt(self.e_n_dtauint[e_name][n] ** 2 + texp ** 2 * self.e_drho[e_name][n + 1] ** 2) -311 # Error of tau_exp neglected so far, missing term: self.e_rho[e_name][n + 1] ** 2 * d_tau_exp ** 2 -312 self.e_dvalue[e_name] = np.sqrt(2 * self.e_tauint[e_name] * e_gamma[e_name][0] * (1 + 1 / e_N) / e_N) -313 self.e_ddvalue[e_name] = self.e_dvalue[e_name] * np.sqrt((n / gapsize + 0.5) / e_N) -314 self.e_windowsize[e_name] = n -315 break -316 else: -317 if self.S[e_name] == 0.0: -318 self.e_tauint[e_name] = 0.5 -319 self.e_dtauint[e_name] = 0.0 -320 self.e_dvalue[e_name] = np.sqrt(e_gamma[e_name][0] / (e_N - 1)) -321 self.e_ddvalue[e_name] = self.e_dvalue[e_name] * np.sqrt(0.5 / e_N) -322 self.e_windowsize[e_name] = 0 -323 else: -324 # Standard automatic windowing procedure -325 tau = self.S[e_name] / np.log((2 * self.e_n_tauint[e_name][gapsize::gapsize] + 1) / (2 * self.e_n_tauint[e_name][gapsize::gapsize] - 1)) -326 g_w = np.exp(- np.arange(1, len(tau) + 1) / tau) - tau / np.sqrt(np.arange(1, len(tau) + 1) * e_N) -327 for n in range(1, w_max // gapsize): -328 if g_w[n - 1] < 0 or n >= w_max // gapsize - 1: -329 _compute_drho(gapsize * n) -330 n *= gapsize -331 self.e_tauint[e_name] = self.e_n_tauint[e_name][n] * (1 + (2 * n / gapsize + 1) / e_N) / (1 + 1 / e_N) # Bias correction hep-lat/0306017 eq. (49) -332 self.e_dtauint[e_name] = self.e_n_dtauint[e_name][n] -333 self.e_dvalue[e_name] = np.sqrt(2 * self.e_tauint[e_name] * e_gamma[e_name][0] * (1 + 1 / e_N) / e_N) -334 self.e_ddvalue[e_name] = self.e_dvalue[e_name] * np.sqrt((n / gapsize + 0.5) / e_N) -335 self.e_windowsize[e_name] = n -336 break -337 -338 self._dvalue += self.e_dvalue[e_name] ** 2 -339 self.ddvalue += (self.e_dvalue[e_name] * self.e_ddvalue[e_name]) ** 2 -340 -341 for e_name in self.cov_names: -342 self.e_dvalue[e_name] = np.sqrt(self.covobs[e_name].errsq()) -343 self.e_ddvalue[e_name] = 0 -344 self._dvalue += self.e_dvalue[e_name]**2 -345 -346 self._dvalue = np.sqrt(self._dvalue) -347 if self._dvalue == 0.0: -348 self.ddvalue = 0.0 -349 else: -350 self.ddvalue = np.sqrt(self.ddvalue) / self._dvalue -351 return +242 gapsize = _determine_gap(self, e_content, e_name) +243 +244 r_length = [] +245 for r_name in e_content[e_name]: +246 if isinstance(self.idl[r_name], range): +247 r_length.append(len(self.idl[r_name]) * self.idl[r_name].step // gapsize) +248 else: +249 r_length.append((self.idl[r_name][-1] - self.idl[r_name][0] + 1) // gapsize) +250 +251 e_N = np.sum([self.shape[r_name] for r_name in e_content[e_name]]) +252 w_max = max(r_length) // 2 +253 e_gamma[e_name] = np.zeros(w_max) +254 self.e_rho[e_name] = np.zeros(w_max) +255 self.e_drho[e_name] = np.zeros(w_max) +256 +257 for r_name in e_content[e_name]: +258 e_gamma[e_name] += self._calc_gamma(self.deltas[r_name], self.idl[r_name], self.shape[r_name], w_max, fft, gapsize) +259 +260 gamma_div = np.zeros(w_max) +261 for r_name in e_content[e_name]: +262 gamma_div += self._calc_gamma(np.ones((self.shape[r_name])), self.idl[r_name], self.shape[r_name], w_max, fft, gapsize) +263 gamma_div[gamma_div < 1] = 1.0 +264 e_gamma[e_name] /= gamma_div[:w_max] +265 +266 if np.abs(e_gamma[e_name][0]) < 10 * np.finfo(float).tiny: # Prevent division by zero +267 self.e_tauint[e_name] = 0.5 +268 self.e_dtauint[e_name] = 0.0 +269 self.e_dvalue[e_name] = 0.0 +270 self.e_ddvalue[e_name] = 0.0 +271 self.e_windowsize[e_name] = 0 +272 continue +273 +274 self.e_rho[e_name] = e_gamma[e_name][:w_max] / e_gamma[e_name][0] +275 self.e_n_tauint[e_name] = np.cumsum(np.concatenate(([0.5], self.e_rho[e_name][1:]))) +276 # Make sure no entry of tauint is smaller than 0.5 +277 self.e_n_tauint[e_name][self.e_n_tauint[e_name] <= 0.5] = 0.5 + np.finfo(np.float64).eps +278 # hep-lat/0306017 eq. (42) +279 self.e_n_dtauint[e_name] = self.e_n_tauint[e_name] * 2 * np.sqrt(np.abs(np.arange(w_max) + 0.5 - self.e_n_tauint[e_name]) / e_N) +280 self.e_n_dtauint[e_name][0] = 0.0 +281 +282 def _compute_drho(i): +283 tmp = (self.e_rho[e_name][i + 1:w_max] +284 + np.concatenate([self.e_rho[e_name][i - 1:None if i - w_max // 2 < 0 else 2 * (i - w_max // 2):-1], +285 self.e_rho[e_name][1:max(1, w_max - 2 * i)]]) +286 - 2 * self.e_rho[e_name][i] * self.e_rho[e_name][1:w_max - i]) +287 self.e_drho[e_name][i] = np.sqrt(np.sum(tmp ** 2) / e_N) +288 +289 if self.tau_exp[e_name] > 0: +290 _compute_drho(1) +291 texp = self.tau_exp[e_name] +292 # Critical slowing down analysis +293 if w_max // 2 <= 1: +294 raise Exception("Need at least 8 samples for tau_exp error analysis") +295 for n in range(1, w_max // 2): +296 _compute_drho(n + 1) +297 if (self.e_rho[e_name][n] - self.N_sigma[e_name] * self.e_drho[e_name][n]) < 0 or n >= w_max // 2 - 2: +298 # Bias correction hep-lat/0306017 eq. (49) included +299 self.e_tauint[e_name] = self.e_n_tauint[e_name][n] * (1 + (2 * n + 1) / e_N) / (1 + 1 / e_N) + texp * np.abs(self.e_rho[e_name][n + 1]) # The absolute makes sure, that the tail contribution is always positive +300 self.e_dtauint[e_name] = np.sqrt(self.e_n_dtauint[e_name][n] ** 2 + texp ** 2 * self.e_drho[e_name][n + 1] ** 2) +301 # Error of tau_exp neglected so far, missing term: self.e_rho[e_name][n + 1] ** 2 * d_tau_exp ** 2 +302 self.e_dvalue[e_name] = np.sqrt(2 * self.e_tauint[e_name] * e_gamma[e_name][0] * (1 + 1 / e_N) / e_N) +303 self.e_ddvalue[e_name] = self.e_dvalue[e_name] * np.sqrt((n + 0.5) / e_N) +304 self.e_windowsize[e_name] = n +305 break +306 else: +307 if self.S[e_name] == 0.0: +308 self.e_tauint[e_name] = 0.5 +309 self.e_dtauint[e_name] = 0.0 +310 self.e_dvalue[e_name] = np.sqrt(e_gamma[e_name][0] / (e_N - 1)) +311 self.e_ddvalue[e_name] = self.e_dvalue[e_name] * np.sqrt(0.5 / e_N) +312 self.e_windowsize[e_name] = 0 +313 else: +314 # Standard automatic windowing procedure +315 tau = self.S[e_name] / np.log((2 * self.e_n_tauint[e_name][1:] + 1) / (2 * self.e_n_tauint[e_name][1:] - 1)) +316 g_w = np.exp(- np.arange(1, len(tau) + 1) / tau) - tau / np.sqrt(np.arange(1, len(tau) + 1) * e_N) +317 for n in range(1, w_max): +318 if g_w[n - 1] < 0 or n >= w_max - 1: +319 _compute_drho(n) +320 self.e_tauint[e_name] = self.e_n_tauint[e_name][n] * (1 + (2 * n + 1) / e_N) / (1 + 1 / e_N) # Bias correction hep-lat/0306017 eq. (49) +321 self.e_dtauint[e_name] = self.e_n_dtauint[e_name][n] +322 self.e_dvalue[e_name] = np.sqrt(2 * self.e_tauint[e_name] * e_gamma[e_name][0] * (1 + 1 / e_N) / e_N) +323 self.e_ddvalue[e_name] = self.e_dvalue[e_name] * np.sqrt((n + 0.5) / e_N) +324 self.e_windowsize[e_name] = n +325 break +326 +327 self._dvalue += self.e_dvalue[e_name] ** 2 +328 self.ddvalue += (self.e_dvalue[e_name] * self.e_ddvalue[e_name]) ** 2 +329 +330 for e_name in self.cov_names: +331 self.e_dvalue[e_name] = np.sqrt(self.covobs[e_name].errsq()) +332 self.e_ddvalue[e_name] = 0 +333 self._dvalue += self.e_dvalue[e_name]**2 +334 +335 self._dvalue = np.sqrt(self._dvalue) +336 if self._dvalue == 0.0: +337 self.ddvalue = 0.0 +338 else: +339 self.ddvalue = np.sqrt(self.ddvalue) / self._dvalue +340 return @@ -3285,77 +3260,74 @@ of the autocorrelation function (default True) -
388    def details(self, ens_content=True):
-389        """Output detailed properties of the Obs.
-390
-391        Parameters
-392        ----------
-393        ens_content : bool
-394            print details about the ensembles and replica if true.
-395        """
-396        if self.tag is not None:
-397            print("Description:", self.tag)
-398        if not hasattr(self, 'e_dvalue'):
-399            print('Result\t %3.8e' % (self.value))
-400        else:
-401            if self.value == 0.0:
-402                percentage = np.nan
-403            else:
-404                percentage = np.abs(self._dvalue / self.value) * 100
-405            print('Result\t %3.8e +/- %3.8e +/- %3.8e (%3.3f%%)' % (self.value, self._dvalue, self.ddvalue, percentage))
-406            if len(self.e_names) > 1:
-407                print(' Ensemble errors:')
-408            e_content = self.e_content
-409            for e_name in self.mc_names:
-410                if isinstance(self.idl[e_content[e_name][0]], range):
-411                    gap = self.idl[e_content[e_name][0]].step
+            
380    def details(self, ens_content=True):
+381        """Output detailed properties of the Obs.
+382
+383        Parameters
+384        ----------
+385        ens_content : bool
+386            print details about the ensembles and replica if true.
+387        """
+388        if self.tag is not None:
+389            print("Description:", self.tag)
+390        if not hasattr(self, 'e_dvalue'):
+391            print('Result\t %3.8e' % (self.value))
+392        else:
+393            if self.value == 0.0:
+394                percentage = np.nan
+395            else:
+396                percentage = np.abs(self._dvalue / self.value) * 100
+397            print('Result\t %3.8e +/- %3.8e +/- %3.8e (%3.3f%%)' % (self.value, self._dvalue, self.ddvalue, percentage))
+398            if len(self.e_names) > 1:
+399                print(' Ensemble errors:')
+400            e_content = self.e_content
+401            for e_name in self.mc_names:
+402                gap = _determine_gap(self, e_content, e_name)
+403
+404                if len(self.e_names) > 1:
+405                    print('', e_name, '\t %3.6e +/- %3.6e' % (self.e_dvalue[e_name], self.e_ddvalue[e_name]))
+406                tau_string = " \N{GREEK SMALL LETTER TAU}_int\t " + _format_uncertainty(self.e_tauint[e_name], self.e_dtauint[e_name])
+407                tau_string += f" in units of {gap} config"
+408                if gap > 1:
+409                    tau_string += "s"
+410                if self.tau_exp[e_name] > 0:
+411                    tau_string = f"{tau_string: <45}" + '\t(\N{GREEK SMALL LETTER TAU}_exp=%3.2f, N_\N{GREEK SMALL LETTER SIGMA}=%1.0i)' % (self.tau_exp[e_name], self.N_sigma[e_name])
 412                else:
-413                    gap = np.min(np.diff(self.idl[e_content[e_name][0]]))
-414
-415                if len(self.e_names) > 1:
-416                    print('', e_name, '\t %3.6e +/- %3.6e' % (self.e_dvalue[e_name], self.e_ddvalue[e_name]))
-417                tau_string = " \N{GREEK SMALL LETTER TAU}_int\t " + _format_uncertainty(self.e_tauint[e_name], self.e_dtauint[e_name])
-418                tau_string += f" in units of {gap} config"
-419                if gap > 1:
-420                    tau_string += "s"
-421                if self.tau_exp[e_name] > 0:
-422                    tau_string = f"{tau_string: <45}" + '\t(\N{GREEK SMALL LETTER TAU}_exp=%3.2f, N_\N{GREEK SMALL LETTER SIGMA}=%1.0i)' % (self.tau_exp[e_name], self.N_sigma[e_name])
-423                else:
-424                    tau_string = f"{tau_string: <45}" + '\t(S=%3.2f)' % (self.S[e_name])
-425                print(tau_string)
-426            for e_name in self.cov_names:
-427                print('', e_name, '\t %3.8e' % (self.e_dvalue[e_name]))
-428        if ens_content is True:
-429            if len(self.e_names) == 1:
-430                print(self.N, 'samples in', len(self.e_names), 'ensemble:')
-431            else:
-432                print(self.N, 'samples in', len(self.e_names), 'ensembles:')
-433            my_string_list = []
-434            for key, value in sorted(self.e_content.items()):
-435                if key not in self.covobs:
-436                    my_string = '  ' + "\u00B7 Ensemble '" + key + "' "
-437                    if len(value) == 1:
-438                        my_string += f': {self.shape[value[0]]} configurations'
-439                        if isinstance(self.idl[value[0]], range):
-440                            my_string += f' (from {self.idl[value[0]].start} to {self.idl[value[0]][-1]}' + int(self.idl[value[0]].step != 1) * f' in steps of {self.idl[value[0]].step}' + ')'
-441                        else:
-442                            my_string += f' (irregular range from {self.idl[value[0]][0]} to {self.idl[value[0]][-1]})'
-443                    else:
-444                        sublist = []
-445                        for v in value:
-446                            my_substring = '    ' + "\u00B7 Replicum '" + v[len(key) + 1:] + "' "
-447                            my_substring += f': {self.shape[v]} configurations'
-448                            if isinstance(self.idl[v], range):
-449                                my_substring += f' (from {self.idl[v].start} to {self.idl[v][-1]}' + int(self.idl[v].step != 1) * f' in steps of {self.idl[v].step}' + ')'
-450                            else:
-451                                my_substring += f' (irregular range from {self.idl[v][0]} to {self.idl[v][-1]})'
-452                            sublist.append(my_substring)
-453
-454                        my_string += '\n' + '\n'.join(sublist)
-455                else:
-456                    my_string = '  ' + "\u00B7 Covobs   '" + key + "' "
-457                my_string_list.append(my_string)
-458            print('\n'.join(my_string_list))
+413                    tau_string = f"{tau_string: <45}" + '\t(S=%3.2f)' % (self.S[e_name])
+414                print(tau_string)
+415            for e_name in self.cov_names:
+416                print('', e_name, '\t %3.8e' % (self.e_dvalue[e_name]))
+417        if ens_content is True:
+418            if len(self.e_names) == 1:
+419                print(self.N, 'samples in', len(self.e_names), 'ensemble:')
+420            else:
+421                print(self.N, 'samples in', len(self.e_names), 'ensembles:')
+422            my_string_list = []
+423            for key, value in sorted(self.e_content.items()):
+424                if key not in self.covobs:
+425                    my_string = '  ' + "\u00B7 Ensemble '" + key + "' "
+426                    if len(value) == 1:
+427                        my_string += f': {self.shape[value[0]]} configurations'
+428                        if isinstance(self.idl[value[0]], range):
+429                            my_string += f' (from {self.idl[value[0]].start} to {self.idl[value[0]][-1]}' + int(self.idl[value[0]].step != 1) * f' in steps of {self.idl[value[0]].step}' + ')'
+430                        else:
+431                            my_string += f' (irregular range from {self.idl[value[0]][0]} to {self.idl[value[0]][-1]})'
+432                    else:
+433                        sublist = []
+434                        for v in value:
+435                            my_substring = '    ' + "\u00B7 Replicum '" + v[len(key) + 1:] + "' "
+436                            my_substring += f': {self.shape[v]} configurations'
+437                            if isinstance(self.idl[v], range):
+438                                my_substring += f' (from {self.idl[v].start} to {self.idl[v][-1]}' + int(self.idl[v].step != 1) * f' in steps of {self.idl[v].step}' + ')'
+439                            else:
+440                                my_substring += f' (irregular range from {self.idl[v][0]} to {self.idl[v][-1]})'
+441                            sublist.append(my_substring)
+442
+443                        my_string += '\n' + '\n'.join(sublist)
+444                else:
+445                    my_string = '  ' + "\u00B7 Covobs   '" + key + "' "
+446                my_string_list.append(my_string)
+447            print('\n'.join(my_string_list))
 
@@ -3382,20 +3354,20 @@ print details about the ensembles and replica if true.
-
460    def reweight(self, weight):
-461        """Reweight the obs with given rewighting factors.
-462
-463        Parameters
-464        ----------
-465        weight : Obs
-466            Reweighting factor. An Observable that has to be defined on a superset of the
-467            configurations in obs[i].idl for all i.
-468        all_configs : bool
-469            if True, the reweighted observables are normalized by the average of
-470            the reweighting factor on all configurations in weight.idl and not
-471            on the configurations in obs[i].idl. Default False.
-472        """
-473        return reweight(weight, [self])[0]
+            
449    def reweight(self, weight):
+450        """Reweight the obs with given rewighting factors.
+451
+452        Parameters
+453        ----------
+454        weight : Obs
+455            Reweighting factor. An Observable that has to be defined on a superset of the
+456            configurations in obs[i].idl for all i.
+457        all_configs : bool
+458            if True, the reweighted observables are normalized by the average of
+459            the reweighting factor on all configurations in weight.idl and not
+460            on the configurations in obs[i].idl. Default False.
+461        """
+462        return reweight(weight, [self])[0]
 
@@ -3427,17 +3399,17 @@ on the configurations in obs[i].idl. Default False.
-
475    def is_zero_within_error(self, sigma=1):
-476        """Checks whether the observable is zero within 'sigma' standard errors.
-477
-478        Parameters
-479        ----------
-480        sigma : int
-481            Number of standard errors used for the check.
-482
-483        Works only properly when the gamma method was run.
-484        """
-485        return self.is_zero() or np.abs(self.value) <= sigma * self._dvalue
+            
464    def is_zero_within_error(self, sigma=1):
+465        """Checks whether the observable is zero within 'sigma' standard errors.
+466
+467        Parameters
+468        ----------
+469        sigma : int
+470            Number of standard errors used for the check.
+471
+472        Works only properly when the gamma method was run.
+473        """
+474        return self.is_zero() or np.abs(self.value) <= sigma * self._dvalue
 
@@ -3465,15 +3437,15 @@ Number of standard errors used for the check.
-
487    def is_zero(self, atol=1e-10):
-488        """Checks whether the observable is zero within a given tolerance.
-489
-490        Parameters
-491        ----------
-492        atol : float
-493            Absolute tolerance (for details see numpy documentation).
-494        """
-495        return np.isclose(0.0, self.value, 1e-14, atol) and all(np.allclose(0.0, delta, 1e-14, atol) for delta in self.deltas.values()) and all(np.allclose(0.0, delta.errsq(), 1e-14, atol) for delta in self.covobs.values())
+            
476    def is_zero(self, atol=1e-10):
+477        """Checks whether the observable is zero within a given tolerance.
+478
+479        Parameters
+480        ----------
+481        atol : float
+482            Absolute tolerance (for details see numpy documentation).
+483        """
+484        return np.isclose(0.0, self.value, 1e-14, atol) and all(np.allclose(0.0, delta, 1e-14, atol) for delta in self.deltas.values()) and all(np.allclose(0.0, delta.errsq(), 1e-14, atol) for delta in self.covobs.values())
 
@@ -3500,45 +3472,45 @@ Absolute tolerance (for details see numpy documentation).
-
497    def plot_tauint(self, save=None):
-498        """Plot integrated autocorrelation time for each ensemble.
-499
-500        Parameters
-501        ----------
-502        save : str
-503            saves the figure to a file named 'save' if.
-504        """
-505        if not hasattr(self, 'e_dvalue'):
-506            raise Exception('Run the gamma method first.')
-507
-508        for e, e_name in enumerate(self.mc_names):
-509            fig = plt.figure()
-510            plt.xlabel(r'$W$')
-511            plt.ylabel(r'$\tau_\mathrm{int}$')
-512            length = int(len(self.e_n_tauint[e_name]))
-513            if self.tau_exp[e_name] > 0:
-514                base = self.e_n_tauint[e_name][self.e_windowsize[e_name]]
-515                x_help = np.arange(2 * self.tau_exp[e_name])
-516                y_help = (x_help + 1) * np.abs(self.e_rho[e_name][self.e_windowsize[e_name] + 1]) * (1 - x_help / (2 * (2 * self.tau_exp[e_name] - 1))) + base
-517                x_arr = np.arange(self.e_windowsize[e_name] + 1, self.e_windowsize[e_name] + 1 + 2 * self.tau_exp[e_name])
-518                plt.plot(x_arr, y_help, 'C' + str(e), linewidth=1, ls='--', marker=',')
-519                plt.errorbar([self.e_windowsize[e_name] + 2 * self.tau_exp[e_name]], [self.e_tauint[e_name]],
-520                             yerr=[self.e_dtauint[e_name]], fmt='C' + str(e), linewidth=1, capsize=2, marker='o', mfc=plt.rcParams['axes.facecolor'])
-521                xmax = self.e_windowsize[e_name] + 2 * self.tau_exp[e_name] + 1.5
-522                label = e_name + r', $\tau_\mathrm{exp}$=' + str(np.around(self.tau_exp[e_name], decimals=2))
-523            else:
-524                label = e_name + ', S=' + str(np.around(self.S[e_name], decimals=2))
-525                xmax = max(10.5, 2 * self.e_windowsize[e_name] - 0.5)
-526
-527            plt.errorbar(np.arange(length)[:int(xmax) + 1], self.e_n_tauint[e_name][:int(xmax) + 1], yerr=self.e_n_dtauint[e_name][:int(xmax) + 1], linewidth=1, capsize=2, label=label)
-528            plt.axvline(x=self.e_windowsize[e_name], color='C' + str(e), alpha=0.5, marker=',', ls='--')
-529            plt.legend()
-530            plt.xlim(-0.5, xmax)
-531            ylim = plt.ylim()
-532            plt.ylim(bottom=0.0, top=max(1.0, ylim[1]))
-533            plt.draw()
-534            if save:
-535                fig.savefig(save + "_" + str(e))
+            
486    def plot_tauint(self, save=None):
+487        """Plot integrated autocorrelation time for each ensemble.
+488
+489        Parameters
+490        ----------
+491        save : str
+492            saves the figure to a file named 'save' if.
+493        """
+494        if not hasattr(self, 'e_dvalue'):
+495            raise Exception('Run the gamma method first.')
+496
+497        for e, e_name in enumerate(self.mc_names):
+498            fig = plt.figure()
+499            plt.xlabel(r'$W$')
+500            plt.ylabel(r'$\tau_\mathrm{int}$')
+501            length = int(len(self.e_n_tauint[e_name]))
+502            if self.tau_exp[e_name] > 0:
+503                base = self.e_n_tauint[e_name][self.e_windowsize[e_name]]
+504                x_help = np.arange(2 * self.tau_exp[e_name])
+505                y_help = (x_help + 1) * np.abs(self.e_rho[e_name][self.e_windowsize[e_name] + 1]) * (1 - x_help / (2 * (2 * self.tau_exp[e_name] - 1))) + base
+506                x_arr = np.arange(self.e_windowsize[e_name] + 1, self.e_windowsize[e_name] + 1 + 2 * self.tau_exp[e_name])
+507                plt.plot(x_arr, y_help, 'C' + str(e), linewidth=1, ls='--', marker=',')
+508                plt.errorbar([self.e_windowsize[e_name] + 2 * self.tau_exp[e_name]], [self.e_tauint[e_name]],
+509                             yerr=[self.e_dtauint[e_name]], fmt='C' + str(e), linewidth=1, capsize=2, marker='o', mfc=plt.rcParams['axes.facecolor'])
+510                xmax = self.e_windowsize[e_name] + 2 * self.tau_exp[e_name] + 1.5
+511                label = e_name + r', $\tau_\mathrm{exp}$=' + str(np.around(self.tau_exp[e_name], decimals=2))
+512            else:
+513                label = e_name + ', S=' + str(np.around(self.S[e_name], decimals=2))
+514                xmax = max(10.5, 2 * self.e_windowsize[e_name] - 0.5)
+515
+516            plt.errorbar(np.arange(length)[:int(xmax) + 1], self.e_n_tauint[e_name][:int(xmax) + 1], yerr=self.e_n_dtauint[e_name][:int(xmax) + 1], linewidth=1, capsize=2, label=label)
+517            plt.axvline(x=self.e_windowsize[e_name], color='C' + str(e), alpha=0.5, marker=',', ls='--')
+518            plt.legend()
+519            plt.xlim(-0.5, xmax)
+520            ylim = plt.ylim()
+521            plt.ylim(bottom=0.0, top=max(1.0, ylim[1]))
+522            plt.draw()
+523            if save:
+524                fig.savefig(save + "_" + str(e))
 
@@ -3565,36 +3537,36 @@ saves the figure to a file named 'save' if.
-
537    def plot_rho(self, save=None):
-538        """Plot normalized autocorrelation function time for each ensemble.
-539
-540        Parameters
-541        ----------
-542        save : str
-543            saves the figure to a file named 'save' if.
-544        """
-545        if not hasattr(self, 'e_dvalue'):
-546            raise Exception('Run the gamma method first.')
-547        for e, e_name in enumerate(self.mc_names):
-548            fig = plt.figure()
-549            plt.xlabel('W')
-550            plt.ylabel('rho')
-551            length = int(len(self.e_drho[e_name]))
-552            plt.errorbar(np.arange(length), self.e_rho[e_name][:length], yerr=self.e_drho[e_name][:], linewidth=1, capsize=2)
-553            plt.axvline(x=self.e_windowsize[e_name], color='r', alpha=0.25, ls='--', marker=',')
-554            if self.tau_exp[e_name] > 0:
-555                plt.plot([self.e_windowsize[e_name] + 1, self.e_windowsize[e_name] + 1 + 2 * self.tau_exp[e_name]],
-556                         [self.e_rho[e_name][self.e_windowsize[e_name] + 1], 0], 'k-', lw=1)
-557                xmax = self.e_windowsize[e_name] + 2 * self.tau_exp[e_name] + 1.5
-558                plt.title('Rho ' + e_name + r', tau\_exp=' + str(np.around(self.tau_exp[e_name], decimals=2)))
-559            else:
-560                xmax = max(10.5, 2 * self.e_windowsize[e_name] - 0.5)
-561                plt.title('Rho ' + e_name + ', S=' + str(np.around(self.S[e_name], decimals=2)))
-562            plt.plot([-0.5, xmax], [0, 0], 'k--', lw=1)
-563            plt.xlim(-0.5, xmax)
-564            plt.draw()
-565            if save:
-566                fig.savefig(save + "_" + str(e))
+            
526    def plot_rho(self, save=None):
+527        """Plot normalized autocorrelation function time for each ensemble.
+528
+529        Parameters
+530        ----------
+531        save : str
+532            saves the figure to a file named 'save' if.
+533        """
+534        if not hasattr(self, 'e_dvalue'):
+535            raise Exception('Run the gamma method first.')
+536        for e, e_name in enumerate(self.mc_names):
+537            fig = plt.figure()
+538            plt.xlabel('W')
+539            plt.ylabel('rho')
+540            length = int(len(self.e_drho[e_name]))
+541            plt.errorbar(np.arange(length), self.e_rho[e_name][:length], yerr=self.e_drho[e_name][:], linewidth=1, capsize=2)
+542            plt.axvline(x=self.e_windowsize[e_name], color='r', alpha=0.25, ls='--', marker=',')
+543            if self.tau_exp[e_name] > 0:
+544                plt.plot([self.e_windowsize[e_name] + 1, self.e_windowsize[e_name] + 1 + 2 * self.tau_exp[e_name]],
+545                         [self.e_rho[e_name][self.e_windowsize[e_name] + 1], 0], 'k-', lw=1)
+546                xmax = self.e_windowsize[e_name] + 2 * self.tau_exp[e_name] + 1.5
+547                plt.title('Rho ' + e_name + r', tau\_exp=' + str(np.around(self.tau_exp[e_name], decimals=2)))
+548            else:
+549                xmax = max(10.5, 2 * self.e_windowsize[e_name] - 0.5)
+550                plt.title('Rho ' + e_name + ', S=' + str(np.around(self.S[e_name], decimals=2)))
+551            plt.plot([-0.5, xmax], [0, 0], 'k--', lw=1)
+552            plt.xlim(-0.5, xmax)
+553            plt.draw()
+554            if save:
+555                fig.savefig(save + "_" + str(e))
 
@@ -3621,27 +3593,27 @@ saves the figure to a file named 'save' if.
-
568    def plot_rep_dist(self):
-569        """Plot replica distribution for each ensemble with more than one replicum."""
-570        if not hasattr(self, 'e_dvalue'):
-571            raise Exception('Run the gamma method first.')
-572        for e, e_name in enumerate(self.mc_names):
-573            if len(self.e_content[e_name]) == 1:
-574                print('No replica distribution for a single replicum (', e_name, ')')
-575                continue
-576            r_length = []
-577            sub_r_mean = 0
-578            for r, r_name in enumerate(self.e_content[e_name]):
-579                r_length.append(len(self.deltas[r_name]))
-580                sub_r_mean += self.shape[r_name] * self.r_values[r_name]
-581            e_N = np.sum(r_length)
-582            sub_r_mean /= e_N
-583            arr = np.zeros(len(self.e_content[e_name]))
-584            for r, r_name in enumerate(self.e_content[e_name]):
-585                arr[r] = (self.r_values[r_name] - sub_r_mean) / (self.e_dvalue[e_name] * np.sqrt(e_N / self.shape[r_name] - 1))
-586            plt.hist(arr, rwidth=0.8, bins=len(self.e_content[e_name]))
-587            plt.title('Replica distribution' + e_name + ' (mean=0, var=1)')
-588            plt.draw()
+            
557    def plot_rep_dist(self):
+558        """Plot replica distribution for each ensemble with more than one replicum."""
+559        if not hasattr(self, 'e_dvalue'):
+560            raise Exception('Run the gamma method first.')
+561        for e, e_name in enumerate(self.mc_names):
+562            if len(self.e_content[e_name]) == 1:
+563                print('No replica distribution for a single replicum (', e_name, ')')
+564                continue
+565            r_length = []
+566            sub_r_mean = 0
+567            for r, r_name in enumerate(self.e_content[e_name]):
+568                r_length.append(len(self.deltas[r_name]))
+569                sub_r_mean += self.shape[r_name] * self.r_values[r_name]
+570            e_N = np.sum(r_length)
+571            sub_r_mean /= e_N
+572            arr = np.zeros(len(self.e_content[e_name]))
+573            for r, r_name in enumerate(self.e_content[e_name]):
+574                arr[r] = (self.r_values[r_name] - sub_r_mean) / (self.e_dvalue[e_name] * np.sqrt(e_N / self.shape[r_name] - 1))
+575            plt.hist(arr, rwidth=0.8, bins=len(self.e_content[e_name]))
+576            plt.title('Replica distribution' + e_name + ' (mean=0, var=1)')
+577            plt.draw()
 
@@ -3661,37 +3633,37 @@ saves the figure to a file named 'save' if.
-
590    def plot_history(self, expand=True):
-591        """Plot derived Monte Carlo history for each ensemble
-592
-593        Parameters
-594        ----------
-595        expand : bool
-596            show expanded history for irregular Monte Carlo chains (default: True).
-597        """
-598        for e, e_name in enumerate(self.mc_names):
-599            plt.figure()
-600            r_length = []
-601            tmp = []
-602            tmp_expanded = []
-603            for r, r_name in enumerate(self.e_content[e_name]):
-604                tmp.append(self.deltas[r_name] + self.r_values[r_name])
-605                if expand:
-606                    tmp_expanded.append(_expand_deltas(self.deltas[r_name], list(self.idl[r_name]), self.shape[r_name]) + self.r_values[r_name])
-607                    r_length.append(len(tmp_expanded[-1]))
-608                else:
-609                    r_length.append(len(tmp[-1]))
-610            e_N = np.sum(r_length)
-611            x = np.arange(e_N)
-612            y_test = np.concatenate(tmp, axis=0)
-613            if expand:
-614                y = np.concatenate(tmp_expanded, axis=0)
-615            else:
-616                y = y_test
-617            plt.errorbar(x, y, fmt='.', markersize=3)
-618            plt.xlim(-0.5, e_N - 0.5)
-619            plt.title(e_name + f'\nskew: {skew(y_test):.3f} (p={skewtest(y_test).pvalue:.3f}), kurtosis: {kurtosis(y_test):.3f} (p={kurtosistest(y_test).pvalue:.3f})')
-620            plt.draw()
+            
579    def plot_history(self, expand=True):
+580        """Plot derived Monte Carlo history for each ensemble
+581
+582        Parameters
+583        ----------
+584        expand : bool
+585            show expanded history for irregular Monte Carlo chains (default: True).
+586        """
+587        for e, e_name in enumerate(self.mc_names):
+588            plt.figure()
+589            r_length = []
+590            tmp = []
+591            tmp_expanded = []
+592            for r, r_name in enumerate(self.e_content[e_name]):
+593                tmp.append(self.deltas[r_name] + self.r_values[r_name])
+594                if expand:
+595                    tmp_expanded.append(_expand_deltas(self.deltas[r_name], list(self.idl[r_name]), self.shape[r_name], 1) + self.r_values[r_name])
+596                    r_length.append(len(tmp_expanded[-1]))
+597                else:
+598                    r_length.append(len(tmp[-1]))
+599            e_N = np.sum(r_length)
+600            x = np.arange(e_N)
+601            y_test = np.concatenate(tmp, axis=0)
+602            if expand:
+603                y = np.concatenate(tmp_expanded, axis=0)
+604            else:
+605                y = y_test
+606            plt.errorbar(x, y, fmt='.', markersize=3)
+607            plt.xlim(-0.5, e_N - 0.5)
+608            plt.title(e_name + f'\nskew: {skew(y_test):.3f} (p={skewtest(y_test).pvalue:.3f}), kurtosis: {kurtosis(y_test):.3f} (p={kurtosistest(y_test).pvalue:.3f})')
+609            plt.draw()
 
@@ -3718,29 +3690,29 @@ show expanded history for irregular Monte Carlo chains (default: True).
-
622    def plot_piechart(self, save=None):
-623        """Plot piechart which shows the fractional contribution of each
-624        ensemble to the error and returns a dictionary containing the fractions.
-625
-626        Parameters
-627        ----------
-628        save : str
-629            saves the figure to a file named 'save' if.
-630        """
-631        if not hasattr(self, 'e_dvalue'):
-632            raise Exception('Run the gamma method first.')
-633        if np.isclose(0.0, self._dvalue, atol=1e-15):
-634            raise Exception('Error is 0.0')
-635        labels = self.e_names
-636        sizes = [self.e_dvalue[name] ** 2 for name in labels] / self._dvalue ** 2
-637        fig1, ax1 = plt.subplots()
-638        ax1.pie(sizes, labels=labels, startangle=90, normalize=True)
-639        ax1.axis('equal')
-640        plt.draw()
-641        if save:
-642            fig1.savefig(save)
-643
-644        return dict(zip(self.e_names, sizes))
+            
611    def plot_piechart(self, save=None):
+612        """Plot piechart which shows the fractional contribution of each
+613        ensemble to the error and returns a dictionary containing the fractions.
+614
+615        Parameters
+616        ----------
+617        save : str
+618            saves the figure to a file named 'save' if.
+619        """
+620        if not hasattr(self, 'e_dvalue'):
+621            raise Exception('Run the gamma method first.')
+622        if np.isclose(0.0, self._dvalue, atol=1e-15):
+623            raise Exception('Error is 0.0')
+624        labels = self.e_names
+625        sizes = [self.e_dvalue[name] ** 2 for name in labels] / self._dvalue ** 2
+626        fig1, ax1 = plt.subplots()
+627        ax1.pie(sizes, labels=labels, startangle=90, normalize=True)
+628        ax1.axis('equal')
+629        plt.draw()
+630        if save:
+631            fig1.savefig(save)
+632
+633        return dict(zip(self.e_names, sizes))
 
@@ -3768,34 +3740,34 @@ saves the figure to a file named 'save' if.
-
646    def dump(self, filename, datatype="json.gz", description="", **kwargs):
-647        """Dump the Obs to a file 'name' of chosen format.
-648
-649        Parameters
-650        ----------
-651        filename : str
-652            name of the file to be saved.
-653        datatype : str
-654            Format of the exported file. Supported formats include
-655            "json.gz" and "pickle"
-656        description : str
-657            Description for output file, only relevant for json.gz format.
-658        path : str
-659            specifies a custom path for the file (default '.')
-660        """
-661        if 'path' in kwargs:
-662            file_name = kwargs.get('path') + '/' + filename
-663        else:
-664            file_name = filename
-665
-666        if datatype == "json.gz":
-667            from .input.json import dump_to_json
-668            dump_to_json([self], file_name, description=description)
-669        elif datatype == "pickle":
-670            with open(file_name + '.p', 'wb') as fb:
-671                pickle.dump(self, fb)
-672        else:
-673            raise Exception("Unknown datatype " + str(datatype))
+            
635    def dump(self, filename, datatype="json.gz", description="", **kwargs):
+636        """Dump the Obs to a file 'name' of chosen format.
+637
+638        Parameters
+639        ----------
+640        filename : str
+641            name of the file to be saved.
+642        datatype : str
+643            Format of the exported file. Supported formats include
+644            "json.gz" and "pickle"
+645        description : str
+646            Description for output file, only relevant for json.gz format.
+647        path : str
+648            specifies a custom path for the file (default '.')
+649        """
+650        if 'path' in kwargs:
+651            file_name = kwargs.get('path') + '/' + filename
+652        else:
+653            file_name = filename
+654
+655        if datatype == "json.gz":
+656            from .input.json import dump_to_json
+657            dump_to_json([self], file_name, description=description)
+658        elif datatype == "pickle":
+659            with open(file_name + '.p', 'wb') as fb:
+660                pickle.dump(self, fb)
+661        else:
+662            raise Exception("Unknown datatype " + str(datatype))
 
@@ -3829,31 +3801,31 @@ specifies a custom path for the file (default '.')
-
675    def export_jackknife(self):
-676        """Export jackknife samples from the Obs
+            
664    def export_jackknife(self):
+665        """Export jackknife samples from the Obs
+666
+667        Returns
+668        -------
+669        numpy.ndarray
+670            Returns a numpy array of length N + 1 where N is the number of samples
+671            for the given ensemble and replicum. The zeroth entry of the array contains
+672            the mean value of the Obs, entries 1 to N contain the N jackknife samples
+673            derived from the Obs. The current implementation only works for observables
+674            defined on exactly one ensemble and replicum. The derived jackknife samples
+675            should agree with samples from a full jackknife analysis up to O(1/N).
+676        """
 677
-678        Returns
-679        -------
-680        numpy.ndarray
-681            Returns a numpy array of length N + 1 where N is the number of samples
-682            for the given ensemble and replicum. The zeroth entry of the array contains
-683            the mean value of the Obs, entries 1 to N contain the N jackknife samples
-684            derived from the Obs. The current implementation only works for observables
-685            defined on exactly one ensemble and replicum. The derived jackknife samples
-686            should agree with samples from a full jackknife analysis up to O(1/N).
-687        """
-688
-689        if len(self.names) != 1:
-690            raise Exception("'export_jackknife' is only implemented for Obs defined on one ensemble and replicum.")
-691
-692        name = self.names[0]
-693        full_data = self.deltas[name] + self.r_values[name]
-694        n = full_data.size
-695        mean = self.value
-696        tmp_jacks = np.zeros(n + 1)
-697        tmp_jacks[0] = mean
-698        tmp_jacks[1:] = (n * mean - full_data) / (n - 1)
-699        return tmp_jacks
+678        if len(self.names) != 1:
+679            raise Exception("'export_jackknife' is only implemented for Obs defined on one ensemble and replicum.")
+680
+681        name = self.names[0]
+682        full_data = self.deltas[name] + self.r_values[name]
+683        n = full_data.size
+684        mean = self.value
+685        tmp_jacks = np.zeros(n + 1)
+686        tmp_jacks[0] = mean
+687        tmp_jacks[1:] = (n * mean - full_data) / (n - 1)
+688        return tmp_jacks
 
@@ -3884,8 +3856,8 @@ should agree with samples from a full jackknife analysis up to O(1/N).
-
827    def sqrt(self):
-828        return derived_observable(lambda x, **kwargs: np.sqrt(x[0]), [self], man_grad=[1 / 2 / np.sqrt(self.value)])
+            
816    def sqrt(self):
+817        return derived_observable(lambda x, **kwargs: np.sqrt(x[0]), [self], man_grad=[1 / 2 / np.sqrt(self.value)])
 
@@ -3903,8 +3875,8 @@ should agree with samples from a full jackknife analysis up to O(1/N).
-
830    def log(self):
-831        return derived_observable(lambda x, **kwargs: np.log(x[0]), [self], man_grad=[1 / self.value])
+            
819    def log(self):
+820        return derived_observable(lambda x, **kwargs: np.log(x[0]), [self], man_grad=[1 / self.value])
 
@@ -3922,8 +3894,8 @@ should agree with samples from a full jackknife analysis up to O(1/N).
-
833    def exp(self):
-834        return derived_observable(lambda x, **kwargs: np.exp(x[0]), [self], man_grad=[np.exp(self.value)])
+            
822    def exp(self):
+823        return derived_observable(lambda x, **kwargs: np.exp(x[0]), [self], man_grad=[np.exp(self.value)])
 
@@ -3941,8 +3913,8 @@ should agree with samples from a full jackknife analysis up to O(1/N).
-
836    def sin(self):
-837        return derived_observable(lambda x, **kwargs: np.sin(x[0]), [self], man_grad=[np.cos(self.value)])
+            
825    def sin(self):
+826        return derived_observable(lambda x, **kwargs: np.sin(x[0]), [self], man_grad=[np.cos(self.value)])
 
@@ -3960,8 +3932,8 @@ should agree with samples from a full jackknife analysis up to O(1/N).
-
839    def cos(self):
-840        return derived_observable(lambda x, **kwargs: np.cos(x[0]), [self], man_grad=[-np.sin(self.value)])
+            
828    def cos(self):
+829        return derived_observable(lambda x, **kwargs: np.cos(x[0]), [self], man_grad=[-np.sin(self.value)])
 
@@ -3979,8 +3951,8 @@ should agree with samples from a full jackknife analysis up to O(1/N).
-
842    def tan(self):
-843        return derived_observable(lambda x, **kwargs: np.tan(x[0]), [self], man_grad=[1 / np.cos(self.value) ** 2])
+            
831    def tan(self):
+832        return derived_observable(lambda x, **kwargs: np.tan(x[0]), [self], man_grad=[1 / np.cos(self.value) ** 2])
 
@@ -3998,8 +3970,8 @@ should agree with samples from a full jackknife analysis up to O(1/N).
-
845    def arcsin(self):
-846        return derived_observable(lambda x: anp.arcsin(x[0]), [self])
+            
834    def arcsin(self):
+835        return derived_observable(lambda x: anp.arcsin(x[0]), [self])
 
@@ -4017,8 +3989,8 @@ should agree with samples from a full jackknife analysis up to O(1/N).
-
848    def arccos(self):
-849        return derived_observable(lambda x: anp.arccos(x[0]), [self])
+            
837    def arccos(self):
+838        return derived_observable(lambda x: anp.arccos(x[0]), [self])
 
@@ -4036,8 +4008,8 @@ should agree with samples from a full jackknife analysis up to O(1/N).
-
851    def arctan(self):
-852        return derived_observable(lambda x: anp.arctan(x[0]), [self])
+            
840    def arctan(self):
+841        return derived_observable(lambda x: anp.arctan(x[0]), [self])
 
@@ -4055,8 +4027,8 @@ should agree with samples from a full jackknife analysis up to O(1/N).
-
854    def sinh(self):
-855        return derived_observable(lambda x, **kwargs: np.sinh(x[0]), [self], man_grad=[np.cosh(self.value)])
+            
843    def sinh(self):
+844        return derived_observable(lambda x, **kwargs: np.sinh(x[0]), [self], man_grad=[np.cosh(self.value)])
 
@@ -4074,8 +4046,8 @@ should agree with samples from a full jackknife analysis up to O(1/N).
-
857    def cosh(self):
-858        return derived_observable(lambda x, **kwargs: np.cosh(x[0]), [self], man_grad=[np.sinh(self.value)])
+            
846    def cosh(self):
+847        return derived_observable(lambda x, **kwargs: np.cosh(x[0]), [self], man_grad=[np.sinh(self.value)])
 
@@ -4093,8 +4065,8 @@ should agree with samples from a full jackknife analysis up to O(1/N).
-
860    def tanh(self):
-861        return derived_observable(lambda x, **kwargs: np.tanh(x[0]), [self], man_grad=[1 / np.cosh(self.value) ** 2])
+            
849    def tanh(self):
+850        return derived_observable(lambda x, **kwargs: np.tanh(x[0]), [self], man_grad=[1 / np.cosh(self.value) ** 2])
 
@@ -4112,8 +4084,8 @@ should agree with samples from a full jackknife analysis up to O(1/N).
-
863    def arcsinh(self):
-864        return derived_observable(lambda x: anp.arcsinh(x[0]), [self])
+            
852    def arcsinh(self):
+853        return derived_observable(lambda x: anp.arcsinh(x[0]), [self])
 
@@ -4131,8 +4103,8 @@ should agree with samples from a full jackknife analysis up to O(1/N).
-
866    def arccosh(self):
-867        return derived_observable(lambda x: anp.arccosh(x[0]), [self])
+            
855    def arccosh(self):
+856        return derived_observable(lambda x: anp.arccosh(x[0]), [self])
 
@@ -4150,8 +4122,8 @@ should agree with samples from a full jackknife analysis up to O(1/N).
-
869    def arctanh(self):
-870        return derived_observable(lambda x: anp.arctanh(x[0]), [self])
+            
858    def arctanh(self):
+859        return derived_observable(lambda x: anp.arctanh(x[0]), [self])
 
@@ -4170,115 +4142,115 @@ should agree with samples from a full jackknife analysis up to O(1/N).
-
873class CObs:
-874    """Class for a complex valued observable."""
-875    __slots__ = ['_real', '_imag', 'tag']
-876
-877    def __init__(self, real, imag=0.0):
-878        self._real = real
-879        self._imag = imag
-880        self.tag = None
-881
-882    @property
-883    def real(self):
-884        return self._real
+            
862class CObs:
+863    """Class for a complex valued observable."""
+864    __slots__ = ['_real', '_imag', 'tag']
+865
+866    def __init__(self, real, imag=0.0):
+867        self._real = real
+868        self._imag = imag
+869        self.tag = None
+870
+871    @property
+872    def real(self):
+873        return self._real
+874
+875    @property
+876    def imag(self):
+877        return self._imag
+878
+879    def gamma_method(self, **kwargs):
+880        """Executes the gamma_method for the real and the imaginary part."""
+881        if isinstance(self.real, Obs):
+882            self.real.gamma_method(**kwargs)
+883        if isinstance(self.imag, Obs):
+884            self.imag.gamma_method(**kwargs)
 885
-886    @property
-887    def imag(self):
-888        return self._imag
+886    def is_zero(self):
+887        """Checks whether both real and imaginary part are zero within machine precision."""
+888        return self.real == 0.0 and self.imag == 0.0
 889
-890    def gamma_method(self, **kwargs):
-891        """Executes the gamma_method for the real and the imaginary part."""
-892        if isinstance(self.real, Obs):
-893            self.real.gamma_method(**kwargs)
-894        if isinstance(self.imag, Obs):
-895            self.imag.gamma_method(**kwargs)
-896
-897    def is_zero(self):
-898        """Checks whether both real and imaginary part are zero within machine precision."""
-899        return self.real == 0.0 and self.imag == 0.0
-900
-901    def conjugate(self):
-902        return CObs(self.real, -self.imag)
-903
-904    def __add__(self, other):
-905        if isinstance(other, np.ndarray):
-906            return other + self
-907        elif hasattr(other, 'real') and hasattr(other, 'imag'):
-908            return CObs(self.real + other.real,
-909                        self.imag + other.imag)
+890    def conjugate(self):
+891        return CObs(self.real, -self.imag)
+892
+893    def __add__(self, other):
+894        if isinstance(other, np.ndarray):
+895            return other + self
+896        elif hasattr(other, 'real') and hasattr(other, 'imag'):
+897            return CObs(self.real + other.real,
+898                        self.imag + other.imag)
+899        else:
+900            return CObs(self.real + other, self.imag)
+901
+902    def __radd__(self, y):
+903        return self + y
+904
+905    def __sub__(self, other):
+906        if isinstance(other, np.ndarray):
+907            return -1 * (other - self)
+908        elif hasattr(other, 'real') and hasattr(other, 'imag'):
+909            return CObs(self.real - other.real, self.imag - other.imag)
 910        else:
-911            return CObs(self.real + other, self.imag)
+911            return CObs(self.real - other, self.imag)
 912
-913    def __radd__(self, y):
-914        return self + y
+913    def __rsub__(self, other):
+914        return -1 * (self - other)
 915
-916    def __sub__(self, other):
+916    def __mul__(self, other):
 917        if isinstance(other, np.ndarray):
-918            return -1 * (other - self)
+918            return other * self
 919        elif hasattr(other, 'real') and hasattr(other, 'imag'):
-920            return CObs(self.real - other.real, self.imag - other.imag)
-921        else:
-922            return CObs(self.real - other, self.imag)
-923
-924    def __rsub__(self, other):
-925        return -1 * (self - other)
-926
-927    def __mul__(self, other):
-928        if isinstance(other, np.ndarray):
-929            return other * self
-930        elif hasattr(other, 'real') and hasattr(other, 'imag'):
-931            if all(isinstance(i, Obs) for i in [self.real, self.imag, other.real, other.imag]):
-932                return CObs(derived_observable(lambda x, **kwargs: x[0] * x[1] - x[2] * x[3],
-933                                               [self.real, other.real, self.imag, other.imag],
-934                                               man_grad=[other.real.value, self.real.value, -other.imag.value, -self.imag.value]),
-935                            derived_observable(lambda x, **kwargs: x[2] * x[1] + x[0] * x[3],
-936                                               [self.real, other.real, self.imag, other.imag],
-937                                               man_grad=[other.imag.value, self.imag.value, other.real.value, self.real.value]))
-938            elif getattr(other, 'imag', 0) != 0:
-939                return CObs(self.real * other.real - self.imag * other.imag,
-940                            self.imag * other.real + self.real * other.imag)
-941            else:
-942                return CObs(self.real * other.real, self.imag * other.real)
-943        else:
-944            return CObs(self.real * other, self.imag * other)
-945
-946    def __rmul__(self, other):
-947        return self * other
-948
-949    def __truediv__(self, other):
-950        if isinstance(other, np.ndarray):
-951            return 1 / (other / self)
-952        elif hasattr(other, 'real') and hasattr(other, 'imag'):
-953            r = other.real ** 2 + other.imag ** 2
-954            return CObs((self.real * other.real + self.imag * other.imag) / r, (self.imag * other.real - self.real * other.imag) / r)
-955        else:
-956            return CObs(self.real / other, self.imag / other)
-957
-958    def __rtruediv__(self, other):
-959        r = self.real ** 2 + self.imag ** 2
-960        if hasattr(other, 'real') and hasattr(other, 'imag'):
-961            return CObs((self.real * other.real + self.imag * other.imag) / r, (self.real * other.imag - self.imag * other.real) / r)
-962        else:
-963            return CObs(self.real * other / r, -self.imag * other / r)
-964
-965    def __abs__(self):
-966        return np.sqrt(self.real**2 + self.imag**2)
-967
-968    def __pos__(self):
-969        return self
-970
-971    def __neg__(self):
-972        return -1 * self
-973
-974    def __eq__(self, other):
-975        return self.real == other.real and self.imag == other.imag
-976
-977    def __str__(self):
-978        return '(' + str(self.real) + int(self.imag >= 0.0) * '+' + str(self.imag) + 'j)'
-979
-980    def __repr__(self):
-981        return 'CObs[' + str(self) + ']'
+920            if all(isinstance(i, Obs) for i in [self.real, self.imag, other.real, other.imag]):
+921                return CObs(derived_observable(lambda x, **kwargs: x[0] * x[1] - x[2] * x[3],
+922                                               [self.real, other.real, self.imag, other.imag],
+923                                               man_grad=[other.real.value, self.real.value, -other.imag.value, -self.imag.value]),
+924                            derived_observable(lambda x, **kwargs: x[2] * x[1] + x[0] * x[3],
+925                                               [self.real, other.real, self.imag, other.imag],
+926                                               man_grad=[other.imag.value, self.imag.value, other.real.value, self.real.value]))
+927            elif getattr(other, 'imag', 0) != 0:
+928                return CObs(self.real * other.real - self.imag * other.imag,
+929                            self.imag * other.real + self.real * other.imag)
+930            else:
+931                return CObs(self.real * other.real, self.imag * other.real)
+932        else:
+933            return CObs(self.real * other, self.imag * other)
+934
+935    def __rmul__(self, other):
+936        return self * other
+937
+938    def __truediv__(self, other):
+939        if isinstance(other, np.ndarray):
+940            return 1 / (other / self)
+941        elif hasattr(other, 'real') and hasattr(other, 'imag'):
+942            r = other.real ** 2 + other.imag ** 2
+943            return CObs((self.real * other.real + self.imag * other.imag) / r, (self.imag * other.real - self.real * other.imag) / r)
+944        else:
+945            return CObs(self.real / other, self.imag / other)
+946
+947    def __rtruediv__(self, other):
+948        r = self.real ** 2 + self.imag ** 2
+949        if hasattr(other, 'real') and hasattr(other, 'imag'):
+950            return CObs((self.real * other.real + self.imag * other.imag) / r, (self.real * other.imag - self.imag * other.real) / r)
+951        else:
+952            return CObs(self.real * other / r, -self.imag * other / r)
+953
+954    def __abs__(self):
+955        return np.sqrt(self.real**2 + self.imag**2)
+956
+957    def __pos__(self):
+958        return self
+959
+960    def __neg__(self):
+961        return -1 * self
+962
+963    def __eq__(self, other):
+964        return self.real == other.real and self.imag == other.imag
+965
+966    def __str__(self):
+967        return '(' + str(self.real) + int(self.imag >= 0.0) * '+' + str(self.imag) + 'j)'
+968
+969    def __repr__(self):
+970        return 'CObs[' + str(self) + ']'
 
@@ -4296,10 +4268,10 @@ should agree with samples from a full jackknife analysis up to O(1/N).
-
877    def __init__(self, real, imag=0.0):
-878        self._real = real
-879        self._imag = imag
-880        self.tag = None
+            
866    def __init__(self, real, imag=0.0):
+867        self._real = real
+868        self._imag = imag
+869        self.tag = None
 
@@ -4317,12 +4289,12 @@ should agree with samples from a full jackknife analysis up to O(1/N).
-
890    def gamma_method(self, **kwargs):
-891        """Executes the gamma_method for the real and the imaginary part."""
-892        if isinstance(self.real, Obs):
-893            self.real.gamma_method(**kwargs)
-894        if isinstance(self.imag, Obs):
-895            self.imag.gamma_method(**kwargs)
+            
879    def gamma_method(self, **kwargs):
+880        """Executes the gamma_method for the real and the imaginary part."""
+881        if isinstance(self.real, Obs):
+882            self.real.gamma_method(**kwargs)
+883        if isinstance(self.imag, Obs):
+884            self.imag.gamma_method(**kwargs)
 
@@ -4342,9 +4314,9 @@ should agree with samples from a full jackknife analysis up to O(1/N).
-
897    def is_zero(self):
-898        """Checks whether both real and imaginary part are zero within machine precision."""
-899        return self.real == 0.0 and self.imag == 0.0
+            
886    def is_zero(self):
+887        """Checks whether both real and imaginary part are zero within machine precision."""
+888        return self.real == 0.0 and self.imag == 0.0
 
@@ -4364,8 +4336,8 @@ should agree with samples from a full jackknife analysis up to O(1/N).
-
901    def conjugate(self):
-902        return CObs(self.real, -self.imag)
+            
890    def conjugate(self):
+891        return CObs(self.real, -self.imag)
 
@@ -4384,174 +4356,174 @@ should agree with samples from a full jackknife analysis up to O(1/N).
-
1106def derived_observable(func, data, array_mode=False, **kwargs):
-1107    """Construct a derived Obs according to func(data, **kwargs) using automatic differentiation.
-1108
-1109    Parameters
-1110    ----------
-1111    func : object
-1112        arbitrary function of the form func(data, **kwargs). For the
-1113        automatic differentiation to work, all numpy functions have to have
-1114        the autograd wrapper (use 'import autograd.numpy as anp').
-1115    data : list
-1116        list of Obs, e.g. [obs1, obs2, obs3].
-1117    num_grad : bool
-1118        if True, numerical derivatives are used instead of autograd
-1119        (default False). To control the numerical differentiation the
-1120        kwargs of numdifftools.step_generators.MaxStepGenerator
-1121        can be used.
-1122    man_grad : list
-1123        manually supply a list or an array which contains the jacobian
-1124        of func. Use cautiously, supplying the wrong derivative will
-1125        not be intercepted.
-1126
-1127    Notes
-1128    -----
-1129    For simple mathematical operations it can be practical to use anonymous
-1130    functions. For the ratio of two observables one can e.g. use
-1131
-1132    new_obs = derived_observable(lambda x: x[0] / x[1], [obs1, obs2])
-1133    """
-1134
-1135    data = np.asarray(data)
-1136    raveled_data = data.ravel()
-1137
-1138    # Workaround for matrix operations containing non Obs data
-1139    if not all(isinstance(x, Obs) for x in raveled_data):
-1140        for i in range(len(raveled_data)):
-1141            if isinstance(raveled_data[i], (int, float)):
-1142                raveled_data[i] = cov_Obs(raveled_data[i], 0.0, "###dummy_covobs###")
-1143
-1144    allcov = {}
-1145    for o in raveled_data:
-1146        for name in o.cov_names:
-1147            if name in allcov:
-1148                if not np.allclose(allcov[name], o.covobs[name].cov):
-1149                    raise Exception('Inconsistent covariance matrices for %s!' % (name))
-1150            else:
-1151                allcov[name] = o.covobs[name].cov
+            
1099def derived_observable(func, data, array_mode=False, **kwargs):
+1100    """Construct a derived Obs according to func(data, **kwargs) using automatic differentiation.
+1101
+1102    Parameters
+1103    ----------
+1104    func : object
+1105        arbitrary function of the form func(data, **kwargs). For the
+1106        automatic differentiation to work, all numpy functions have to have
+1107        the autograd wrapper (use 'import autograd.numpy as anp').
+1108    data : list
+1109        list of Obs, e.g. [obs1, obs2, obs3].
+1110    num_grad : bool
+1111        if True, numerical derivatives are used instead of autograd
+1112        (default False). To control the numerical differentiation the
+1113        kwargs of numdifftools.step_generators.MaxStepGenerator
+1114        can be used.
+1115    man_grad : list
+1116        manually supply a list or an array which contains the jacobian
+1117        of func. Use cautiously, supplying the wrong derivative will
+1118        not be intercepted.
+1119
+1120    Notes
+1121    -----
+1122    For simple mathematical operations it can be practical to use anonymous
+1123    functions. For the ratio of two observables one can e.g. use
+1124
+1125    new_obs = derived_observable(lambda x: x[0] / x[1], [obs1, obs2])
+1126    """
+1127
+1128    data = np.asarray(data)
+1129    raveled_data = data.ravel()
+1130
+1131    # Workaround for matrix operations containing non Obs data
+1132    if not all(isinstance(x, Obs) for x in raveled_data):
+1133        for i in range(len(raveled_data)):
+1134            if isinstance(raveled_data[i], (int, float)):
+1135                raveled_data[i] = cov_Obs(raveled_data[i], 0.0, "###dummy_covobs###")
+1136
+1137    allcov = {}
+1138    for o in raveled_data:
+1139        for name in o.cov_names:
+1140            if name in allcov:
+1141                if not np.allclose(allcov[name], o.covobs[name].cov):
+1142                    raise Exception('Inconsistent covariance matrices for %s!' % (name))
+1143            else:
+1144                allcov[name] = o.covobs[name].cov
+1145
+1146    n_obs = len(raveled_data)
+1147    new_names = sorted(set([y for x in [o.names for o in raveled_data] for y in x]))
+1148    new_cov_names = sorted(set([y for x in [o.cov_names for o in raveled_data] for y in x]))
+1149    new_sample_names = sorted(set(new_names) - set(new_cov_names))
+1150
+1151    reweighted = len(list(filter(lambda o: o.reweighted is True, raveled_data))) > 0
 1152
-1153    n_obs = len(raveled_data)
-1154    new_names = sorted(set([y for x in [o.names for o in raveled_data] for y in x]))
-1155    new_cov_names = sorted(set([y for x in [o.cov_names for o in raveled_data] for y in x]))
-1156    new_sample_names = sorted(set(new_names) - set(new_cov_names))
+1153    if data.ndim == 1:
+1154        values = np.array([o.value for o in data])
+1155    else:
+1156        values = np.vectorize(lambda x: x.value)(data)
 1157
-1158    reweighted = len(list(filter(lambda o: o.reweighted is True, raveled_data))) > 0
+1158    new_values = func(values, **kwargs)
 1159
-1160    if data.ndim == 1:
-1161        values = np.array([o.value for o in data])
-1162    else:
-1163        values = np.vectorize(lambda x: x.value)(data)
-1164
-1165    new_values = func(values, **kwargs)
-1166
-1167    multi = int(isinstance(new_values, np.ndarray))
-1168
-1169    new_r_values = {}
-1170    new_idl_d = {}
-1171    for name in new_sample_names:
-1172        idl = []
-1173        tmp_values = np.zeros(n_obs)
-1174        for i, item in enumerate(raveled_data):
-1175            tmp_values[i] = item.r_values.get(name, item.value)
-1176            tmp_idl = item.idl.get(name)
-1177            if tmp_idl is not None:
-1178                idl.append(tmp_idl)
-1179        if multi > 0:
-1180            tmp_values = np.array(tmp_values).reshape(data.shape)
-1181        new_r_values[name] = func(tmp_values, **kwargs)
-1182        new_idl_d[name] = _merge_idx(idl)
-1183
-1184    if 'man_grad' in kwargs:
-1185        deriv = np.asarray(kwargs.get('man_grad'))
-1186        if new_values.shape + data.shape != deriv.shape:
-1187            raise Exception('Manual derivative does not have correct shape.')
-1188    elif kwargs.get('num_grad') is True:
-1189        if multi > 0:
-1190            raise Exception('Multi mode currently not supported for numerical derivative')
-1191        options = {
-1192            'base_step': 0.1,
-1193            'step_ratio': 2.5}
-1194        for key in options.keys():
-1195            kwarg = kwargs.get(key)
-1196            if kwarg is not None:
-1197                options[key] = kwarg
-1198        tmp_df = nd.Gradient(func, order=4, **{k: v for k, v in options.items() if v is not None})(values, **kwargs)
-1199        if tmp_df.size == 1:
-1200            deriv = np.array([tmp_df.real])
-1201        else:
-1202            deriv = tmp_df.real
-1203    else:
-1204        deriv = jacobian(func)(values, **kwargs)
-1205
-1206    final_result = np.zeros(new_values.shape, dtype=object)
-1207
-1208    if array_mode is True:
-1209
-1210        class _Zero_grad():
-1211            def __init__(self, N):
-1212                self.grad = np.zeros((N, 1))
-1213
-1214        new_covobs_lengths = dict(set([y for x in [[(n, o.covobs[n].N) for n in o.cov_names] for o in raveled_data] for y in x]))
-1215        d_extracted = {}
-1216        g_extracted = {}
-1217        for name in new_sample_names:
-1218            d_extracted[name] = []
-1219            ens_length = len(new_idl_d[name])
-1220            for i_dat, dat in enumerate(data):
-1221                d_extracted[name].append(np.array([_expand_deltas_for_merge(o.deltas.get(name, np.zeros(ens_length)), o.idl.get(name, new_idl_d[name]), o.shape.get(name, ens_length), new_idl_d[name]) for o in dat.reshape(np.prod(dat.shape))]).reshape(dat.shape + (ens_length, )))
-1222        for name in new_cov_names:
-1223            g_extracted[name] = []
-1224            zero_grad = _Zero_grad(new_covobs_lengths[name])
-1225            for i_dat, dat in enumerate(data):
-1226                g_extracted[name].append(np.array([o.covobs.get(name, zero_grad).grad for o in dat.reshape(np.prod(dat.shape))]).reshape(dat.shape + (new_covobs_lengths[name], 1)))
-1227
-1228    for i_val, new_val in np.ndenumerate(new_values):
-1229        new_deltas = {}
-1230        new_grad = {}
-1231        if array_mode is True:
-1232            for name in new_sample_names:
-1233                ens_length = d_extracted[name][0].shape[-1]
-1234                new_deltas[name] = np.zeros(ens_length)
-1235                for i_dat, dat in enumerate(d_extracted[name]):
-1236                    new_deltas[name] += np.tensordot(deriv[i_val + (i_dat, )], dat)
-1237            for name in new_cov_names:
-1238                new_grad[name] = 0
-1239                for i_dat, dat in enumerate(g_extracted[name]):
-1240                    new_grad[name] += np.tensordot(deriv[i_val + (i_dat, )], dat)
-1241        else:
-1242            for j_obs, obs in np.ndenumerate(data):
-1243                for name in obs.names:
-1244                    if name in obs.cov_names:
-1245                        new_grad[name] = new_grad.get(name, 0) + deriv[i_val + j_obs] * obs.covobs[name].grad
-1246                    else:
-1247                        new_deltas[name] = new_deltas.get(name, 0) + deriv[i_val + j_obs] * _expand_deltas_for_merge(obs.deltas[name], obs.idl[name], obs.shape[name], new_idl_d[name])
-1248
-1249        new_covobs = {name: Covobs(0, allcov[name], name, grad=new_grad[name]) for name in new_grad}
-1250
-1251        if not set(new_covobs.keys()).isdisjoint(new_deltas.keys()):
-1252            raise Exception('The same name has been used for deltas and covobs!')
-1253        new_samples = []
-1254        new_means = []
-1255        new_idl = []
-1256        new_names_obs = []
-1257        for name in new_names:
-1258            if name not in new_covobs:
-1259                new_samples.append(new_deltas[name])
-1260                new_idl.append(new_idl_d[name])
-1261                new_means.append(new_r_values[name][i_val])
-1262                new_names_obs.append(name)
-1263        final_result[i_val] = Obs(new_samples, new_names_obs, means=new_means, idl=new_idl)
-1264        for name in new_covobs:
-1265            final_result[i_val].names.append(name)
-1266        final_result[i_val]._covobs = new_covobs
-1267        final_result[i_val]._value = new_val
-1268        final_result[i_val].reweighted = reweighted
-1269
-1270    if multi == 0:
-1271        final_result = final_result.item()
-1272
-1273    return final_result
+1160    multi = int(isinstance(new_values, np.ndarray))
+1161
+1162    new_r_values = {}
+1163    new_idl_d = {}
+1164    for name in new_sample_names:
+1165        idl = []
+1166        tmp_values = np.zeros(n_obs)
+1167        for i, item in enumerate(raveled_data):
+1168            tmp_values[i] = item.r_values.get(name, item.value)
+1169            tmp_idl = item.idl.get(name)
+1170            if tmp_idl is not None:
+1171                idl.append(tmp_idl)
+1172        if multi > 0:
+1173            tmp_values = np.array(tmp_values).reshape(data.shape)
+1174        new_r_values[name] = func(tmp_values, **kwargs)
+1175        new_idl_d[name] = _merge_idx(idl)
+1176
+1177    if 'man_grad' in kwargs:
+1178        deriv = np.asarray(kwargs.get('man_grad'))
+1179        if new_values.shape + data.shape != deriv.shape:
+1180            raise Exception('Manual derivative does not have correct shape.')
+1181    elif kwargs.get('num_grad') is True:
+1182        if multi > 0:
+1183            raise Exception('Multi mode currently not supported for numerical derivative')
+1184        options = {
+1185            'base_step': 0.1,
+1186            'step_ratio': 2.5}
+1187        for key in options.keys():
+1188            kwarg = kwargs.get(key)
+1189            if kwarg is not None:
+1190                options[key] = kwarg
+1191        tmp_df = nd.Gradient(func, order=4, **{k: v for k, v in options.items() if v is not None})(values, **kwargs)
+1192        if tmp_df.size == 1:
+1193            deriv = np.array([tmp_df.real])
+1194        else:
+1195            deriv = tmp_df.real
+1196    else:
+1197        deriv = jacobian(func)(values, **kwargs)
+1198
+1199    final_result = np.zeros(new_values.shape, dtype=object)
+1200
+1201    if array_mode is True:
+1202
+1203        class _Zero_grad():
+1204            def __init__(self, N):
+1205                self.grad = np.zeros((N, 1))
+1206
+1207        new_covobs_lengths = dict(set([y for x in [[(n, o.covobs[n].N) for n in o.cov_names] for o in raveled_data] for y in x]))
+1208        d_extracted = {}
+1209        g_extracted = {}
+1210        for name in new_sample_names:
+1211            d_extracted[name] = []
+1212            ens_length = len(new_idl_d[name])
+1213            for i_dat, dat in enumerate(data):
+1214                d_extracted[name].append(np.array([_expand_deltas_for_merge(o.deltas.get(name, np.zeros(ens_length)), o.idl.get(name, new_idl_d[name]), o.shape.get(name, ens_length), new_idl_d[name]) for o in dat.reshape(np.prod(dat.shape))]).reshape(dat.shape + (ens_length, )))
+1215        for name in new_cov_names:
+1216            g_extracted[name] = []
+1217            zero_grad = _Zero_grad(new_covobs_lengths[name])
+1218            for i_dat, dat in enumerate(data):
+1219                g_extracted[name].append(np.array([o.covobs.get(name, zero_grad).grad for o in dat.reshape(np.prod(dat.shape))]).reshape(dat.shape + (new_covobs_lengths[name], 1)))
+1220
+1221    for i_val, new_val in np.ndenumerate(new_values):
+1222        new_deltas = {}
+1223        new_grad = {}
+1224        if array_mode is True:
+1225            for name in new_sample_names:
+1226                ens_length = d_extracted[name][0].shape[-1]
+1227                new_deltas[name] = np.zeros(ens_length)
+1228                for i_dat, dat in enumerate(d_extracted[name]):
+1229                    new_deltas[name] += np.tensordot(deriv[i_val + (i_dat, )], dat)
+1230            for name in new_cov_names:
+1231                new_grad[name] = 0
+1232                for i_dat, dat in enumerate(g_extracted[name]):
+1233                    new_grad[name] += np.tensordot(deriv[i_val + (i_dat, )], dat)
+1234        else:
+1235            for j_obs, obs in np.ndenumerate(data):
+1236                for name in obs.names:
+1237                    if name in obs.cov_names:
+1238                        new_grad[name] = new_grad.get(name, 0) + deriv[i_val + j_obs] * obs.covobs[name].grad
+1239                    else:
+1240                        new_deltas[name] = new_deltas.get(name, 0) + deriv[i_val + j_obs] * _expand_deltas_for_merge(obs.deltas[name], obs.idl[name], obs.shape[name], new_idl_d[name])
+1241
+1242        new_covobs = {name: Covobs(0, allcov[name], name, grad=new_grad[name]) for name in new_grad}
+1243
+1244        if not set(new_covobs.keys()).isdisjoint(new_deltas.keys()):
+1245            raise Exception('The same name has been used for deltas and covobs!')
+1246        new_samples = []
+1247        new_means = []
+1248        new_idl = []
+1249        new_names_obs = []
+1250        for name in new_names:
+1251            if name not in new_covobs:
+1252                new_samples.append(new_deltas[name])
+1253                new_idl.append(new_idl_d[name])
+1254                new_means.append(new_r_values[name][i_val])
+1255                new_names_obs.append(name)
+1256        final_result[i_val] = Obs(new_samples, new_names_obs, means=new_means, idl=new_idl)
+1257        for name in new_covobs:
+1258            final_result[i_val].names.append(name)
+1259        final_result[i_val]._covobs = new_covobs
+1260        final_result[i_val]._value = new_val
+1261        final_result[i_val].reweighted = reweighted
+1262
+1263    if multi == 0:
+1264        final_result = final_result.item()
+1265
+1266    return final_result
 
@@ -4598,46 +4570,46 @@ functions. For the ratio of two observables one can e.g. use

-
1310def reweight(weight, obs, **kwargs):
-1311    """Reweight a list of observables.
-1312
-1313    Parameters
-1314    ----------
-1315    weight : Obs
-1316        Reweighting factor. An Observable that has to be defined on a superset of the
-1317        configurations in obs[i].idl for all i.
-1318    obs : list
-1319        list of Obs, e.g. [obs1, obs2, obs3].
-1320    all_configs : bool
-1321        if True, the reweighted observables are normalized by the average of
-1322        the reweighting factor on all configurations in weight.idl and not
-1323        on the configurations in obs[i].idl. Default False.
-1324    """
-1325    result = []
-1326    for i in range(len(obs)):
-1327        if len(obs[i].cov_names):
-1328            raise Exception('Error: Not possible to reweight an Obs that contains covobs!')
-1329        if not set(obs[i].names).issubset(weight.names):
-1330            raise Exception('Error: Ensembles do not fit')
-1331        for name in obs[i].names:
-1332            if not set(obs[i].idl[name]).issubset(weight.idl[name]):
-1333                raise Exception('obs[%d] has to be defined on a subset of the configs in weight.idl[%s]!' % (i, name))
-1334        new_samples = []
-1335        w_deltas = {}
-1336        for name in sorted(obs[i].names):
-1337            w_deltas[name] = _reduce_deltas(weight.deltas[name], weight.idl[name], obs[i].idl[name])
-1338            new_samples.append((w_deltas[name] + weight.r_values[name]) * (obs[i].deltas[name] + obs[i].r_values[name]))
-1339        tmp_obs = Obs(new_samples, sorted(obs[i].names), idl=[obs[i].idl[name] for name in sorted(obs[i].names)])
-1340
-1341        if kwargs.get('all_configs'):
-1342            new_weight = weight
-1343        else:
-1344            new_weight = Obs([w_deltas[name] + weight.r_values[name] for name in sorted(obs[i].names)], sorted(obs[i].names), idl=[obs[i].idl[name] for name in sorted(obs[i].names)])
-1345
-1346        result.append(tmp_obs / new_weight)
-1347        result[-1].reweighted = True
-1348
-1349    return result
+            
1303def reweight(weight, obs, **kwargs):
+1304    """Reweight a list of observables.
+1305
+1306    Parameters
+1307    ----------
+1308    weight : Obs
+1309        Reweighting factor. An Observable that has to be defined on a superset of the
+1310        configurations in obs[i].idl for all i.
+1311    obs : list
+1312        list of Obs, e.g. [obs1, obs2, obs3].
+1313    all_configs : bool
+1314        if True, the reweighted observables are normalized by the average of
+1315        the reweighting factor on all configurations in weight.idl and not
+1316        on the configurations in obs[i].idl. Default False.
+1317    """
+1318    result = []
+1319    for i in range(len(obs)):
+1320        if len(obs[i].cov_names):
+1321            raise Exception('Error: Not possible to reweight an Obs that contains covobs!')
+1322        if not set(obs[i].names).issubset(weight.names):
+1323            raise Exception('Error: Ensembles do not fit')
+1324        for name in obs[i].names:
+1325            if not set(obs[i].idl[name]).issubset(weight.idl[name]):
+1326                raise Exception('obs[%d] has to be defined on a subset of the configs in weight.idl[%s]!' % (i, name))
+1327        new_samples = []
+1328        w_deltas = {}
+1329        for name in sorted(obs[i].names):
+1330            w_deltas[name] = _reduce_deltas(weight.deltas[name], weight.idl[name], obs[i].idl[name])
+1331            new_samples.append((w_deltas[name] + weight.r_values[name]) * (obs[i].deltas[name] + obs[i].r_values[name]))
+1332        tmp_obs = Obs(new_samples, sorted(obs[i].names), idl=[obs[i].idl[name] for name in sorted(obs[i].names)])
+1333
+1334        if kwargs.get('all_configs'):
+1335            new_weight = weight
+1336        else:
+1337            new_weight = Obs([w_deltas[name] + weight.r_values[name] for name in sorted(obs[i].names)], sorted(obs[i].names), idl=[obs[i].idl[name] for name in sorted(obs[i].names)])
+1338
+1339        result.append(tmp_obs / new_weight)
+1340        result[-1].reweighted = True
+1341
+1342    return result
 
@@ -4671,47 +4643,47 @@ on the configurations in obs[i].idl. Default False.
-
1352def correlate(obs_a, obs_b):
-1353    """Correlate two observables.
+            
1345def correlate(obs_a, obs_b):
+1346    """Correlate two observables.
+1347
+1348    Parameters
+1349    ----------
+1350    obs_a : Obs
+1351        First observable
+1352    obs_b : Obs
+1353        Second observable
 1354
-1355    Parameters
-1356    ----------
-1357    obs_a : Obs
-1358        First observable
-1359    obs_b : Obs
-1360        Second observable
+1355    Notes
+1356    -----
+1357    Keep in mind to only correlate primary observables which have not been reweighted
+1358    yet. The reweighting has to be applied after correlating the observables.
+1359    Currently only works if ensembles are identical (this is not strictly necessary).
+1360    """
 1361
-1362    Notes
-1363    -----
-1364    Keep in mind to only correlate primary observables which have not been reweighted
-1365    yet. The reweighting has to be applied after correlating the observables.
-1366    Currently only works if ensembles are identical (this is not strictly necessary).
-1367    """
-1368
-1369    if sorted(obs_a.names) != sorted(obs_b.names):
-1370        raise Exception(f"Ensembles do not fit {set(sorted(obs_a.names)) ^ set(sorted(obs_b.names))}")
-1371    if len(obs_a.cov_names) or len(obs_b.cov_names):
-1372        raise Exception('Error: Not possible to correlate Obs that contain covobs!')
-1373    for name in obs_a.names:
-1374        if obs_a.shape[name] != obs_b.shape[name]:
-1375            raise Exception('Shapes of ensemble', name, 'do not fit')
-1376        if obs_a.idl[name] != obs_b.idl[name]:
-1377            raise Exception('idl of ensemble', name, 'do not fit')
-1378
-1379    if obs_a.reweighted is True:
-1380        warnings.warn("The first observable is already reweighted.", RuntimeWarning)
-1381    if obs_b.reweighted is True:
-1382        warnings.warn("The second observable is already reweighted.", RuntimeWarning)
-1383
-1384    new_samples = []
-1385    new_idl = []
-1386    for name in sorted(obs_a.names):
-1387        new_samples.append((obs_a.deltas[name] + obs_a.r_values[name]) * (obs_b.deltas[name] + obs_b.r_values[name]))
-1388        new_idl.append(obs_a.idl[name])
-1389
-1390    o = Obs(new_samples, sorted(obs_a.names), idl=new_idl)
-1391    o.reweighted = obs_a.reweighted or obs_b.reweighted
-1392    return o
+1362    if sorted(obs_a.names) != sorted(obs_b.names):
+1363        raise Exception(f"Ensembles do not fit {set(sorted(obs_a.names)) ^ set(sorted(obs_b.names))}")
+1364    if len(obs_a.cov_names) or len(obs_b.cov_names):
+1365        raise Exception('Error: Not possible to correlate Obs that contain covobs!')
+1366    for name in obs_a.names:
+1367        if obs_a.shape[name] != obs_b.shape[name]:
+1368            raise Exception('Shapes of ensemble', name, 'do not fit')
+1369        if obs_a.idl[name] != obs_b.idl[name]:
+1370            raise Exception('idl of ensemble', name, 'do not fit')
+1371
+1372    if obs_a.reweighted is True:
+1373        warnings.warn("The first observable is already reweighted.", RuntimeWarning)
+1374    if obs_b.reweighted is True:
+1375        warnings.warn("The second observable is already reweighted.", RuntimeWarning)
+1376
+1377    new_samples = []
+1378    new_idl = []
+1379    for name in sorted(obs_a.names):
+1380        new_samples.append((obs_a.deltas[name] + obs_a.r_values[name]) * (obs_b.deltas[name] + obs_b.r_values[name]))
+1381        new_idl.append(obs_a.idl[name])
+1382
+1383    o = Obs(new_samples, sorted(obs_a.names), idl=new_idl)
+1384    o.reweighted = obs_a.reweighted or obs_b.reweighted
+1385    return o
 
@@ -4746,74 +4718,74 @@ Currently only works if ensembles are identical (this is not strictly necessary)
-
1395def covariance(obs, visualize=False, correlation=False, smooth=None, **kwargs):
-1396    r'''Calculates the error covariance matrix of a set of observables.
-1397
-1398    WARNING: This function should be used with care, especially for observables with support on multiple
-1399             ensembles with differing autocorrelations. See the notes below for details.
-1400
-1401    The gamma method has to be applied first to all observables.
-1402
-1403    Parameters
-1404    ----------
-1405    obs : list or numpy.ndarray
-1406        List or one dimensional array of Obs
-1407    visualize : bool
-1408        If True plots the corresponding normalized correlation matrix (default False).
-1409    correlation : bool
-1410        If True the correlation matrix instead of the error covariance matrix is returned (default False).
-1411    smooth : None or int
-1412        If smooth is an integer 'E' between 2 and the dimension of the matrix minus 1 the eigenvalue
-1413        smoothing procedure of hep-lat/9412087 is applied to the correlation matrix which leaves the
-1414        largest E eigenvalues essentially unchanged and smoothes the smaller eigenvalues to avoid extremely
-1415        small ones.
-1416
-1417    Notes
-1418    -----
-1419    The error covariance is defined such that it agrees with the squared standard error for two identical observables
-1420    $$\operatorname{cov}(a,a)=\sum_{s=1}^N\delta_a^s\delta_a^s/N^2=\Gamma_{aa}(0)/N=\operatorname{var}(a)/N=\sigma_a^2$$
-1421    in the absence of autocorrelation.
-1422    The error covariance is estimated by calculating the correlation matrix assuming no autocorrelation and then rescaling the correlation matrix by the full errors including the previous gamma method estimate for the autocorrelation of the observables. The covariance at windowsize 0 is guaranteed to be positive semi-definite
-1423    $$\sum_{i,j}v_i\Gamma_{ij}(0)v_j=\frac{1}{N}\sum_{s=1}^N\sum_{i,j}v_i\delta_i^s\delta_j^s v_j=\frac{1}{N}\sum_{s=1}^N\sum_{i}|v_i\delta_i^s|^2\geq 0\,,$$ for every $v\in\mathbb{R}^M$, while such an identity does not hold for larger windows/lags.
-1424    For observables defined on a single ensemble our approximation is equivalent to assuming that the integrated autocorrelation time of an off-diagonal element is equal to the geometric mean of the integrated autocorrelation times of the corresponding diagonal elements.
-1425    $$\tau_{\mathrm{int}, ij}=\sqrt{\tau_{\mathrm{int}, i}\times \tau_{\mathrm{int}, j}}$$
-1426    This construction ensures that the estimated covariance matrix is positive semi-definite (up to numerical rounding errors).
-1427    '''
-1428
-1429    length = len(obs)
-1430
-1431    max_samples = np.max([o.N for o in obs])
-1432    if max_samples <= length and not [item for sublist in [o.cov_names for o in obs] for item in sublist]:
-1433        warnings.warn(f"The dimension of the covariance matrix ({length}) is larger or equal to the number of samples ({max_samples}). This will result in a rank deficient matrix.", RuntimeWarning)
-1434
-1435    cov = np.zeros((length, length))
-1436    for i in range(length):
-1437        for j in range(i, length):
-1438            cov[i, j] = _covariance_element(obs[i], obs[j])
-1439    cov = cov + cov.T - np.diag(np.diag(cov))
-1440
-1441    corr = np.diag(1 / np.sqrt(np.diag(cov))) @ cov @ np.diag(1 / np.sqrt(np.diag(cov)))
-1442
-1443    if isinstance(smooth, int):
-1444        corr = _smooth_eigenvalues(corr, smooth)
-1445
-1446    if visualize:
-1447        plt.matshow(corr, vmin=-1, vmax=1)
-1448        plt.set_cmap('RdBu')
-1449        plt.colorbar()
-1450        plt.draw()
-1451
-1452    if correlation is True:
-1453        return corr
+            
1388def covariance(obs, visualize=False, correlation=False, smooth=None, **kwargs):
+1389    r'''Calculates the error covariance matrix of a set of observables.
+1390
+1391    WARNING: This function should be used with care, especially for observables with support on multiple
+1392             ensembles with differing autocorrelations. See the notes below for details.
+1393
+1394    The gamma method has to be applied first to all observables.
+1395
+1396    Parameters
+1397    ----------
+1398    obs : list or numpy.ndarray
+1399        List or one dimensional array of Obs
+1400    visualize : bool
+1401        If True plots the corresponding normalized correlation matrix (default False).
+1402    correlation : bool
+1403        If True the correlation matrix instead of the error covariance matrix is returned (default False).
+1404    smooth : None or int
+1405        If smooth is an integer 'E' between 2 and the dimension of the matrix minus 1 the eigenvalue
+1406        smoothing procedure of hep-lat/9412087 is applied to the correlation matrix which leaves the
+1407        largest E eigenvalues essentially unchanged and smoothes the smaller eigenvalues to avoid extremely
+1408        small ones.
+1409
+1410    Notes
+1411    -----
+1412    The error covariance is defined such that it agrees with the squared standard error for two identical observables
+1413    $$\operatorname{cov}(a,a)=\sum_{s=1}^N\delta_a^s\delta_a^s/N^2=\Gamma_{aa}(0)/N=\operatorname{var}(a)/N=\sigma_a^2$$
+1414    in the absence of autocorrelation.
+1415    The error covariance is estimated by calculating the correlation matrix assuming no autocorrelation and then rescaling the correlation matrix by the full errors including the previous gamma method estimate for the autocorrelation of the observables. The covariance at windowsize 0 is guaranteed to be positive semi-definite
+1416    $$\sum_{i,j}v_i\Gamma_{ij}(0)v_j=\frac{1}{N}\sum_{s=1}^N\sum_{i,j}v_i\delta_i^s\delta_j^s v_j=\frac{1}{N}\sum_{s=1}^N\sum_{i}|v_i\delta_i^s|^2\geq 0\,,$$ for every $v\in\mathbb{R}^M$, while such an identity does not hold for larger windows/lags.
+1417    For observables defined on a single ensemble our approximation is equivalent to assuming that the integrated autocorrelation time of an off-diagonal element is equal to the geometric mean of the integrated autocorrelation times of the corresponding diagonal elements.
+1418    $$\tau_{\mathrm{int}, ij}=\sqrt{\tau_{\mathrm{int}, i}\times \tau_{\mathrm{int}, j}}$$
+1419    This construction ensures that the estimated covariance matrix is positive semi-definite (up to numerical rounding errors).
+1420    '''
+1421
+1422    length = len(obs)
+1423
+1424    max_samples = np.max([o.N for o in obs])
+1425    if max_samples <= length and not [item for sublist in [o.cov_names for o in obs] for item in sublist]:
+1426        warnings.warn(f"The dimension of the covariance matrix ({length}) is larger or equal to the number of samples ({max_samples}). This will result in a rank deficient matrix.", RuntimeWarning)
+1427
+1428    cov = np.zeros((length, length))
+1429    for i in range(length):
+1430        for j in range(i, length):
+1431            cov[i, j] = _covariance_element(obs[i], obs[j])
+1432    cov = cov + cov.T - np.diag(np.diag(cov))
+1433
+1434    corr = np.diag(1 / np.sqrt(np.diag(cov))) @ cov @ np.diag(1 / np.sqrt(np.diag(cov)))
+1435
+1436    if isinstance(smooth, int):
+1437        corr = _smooth_eigenvalues(corr, smooth)
+1438
+1439    if visualize:
+1440        plt.matshow(corr, vmin=-1, vmax=1)
+1441        plt.set_cmap('RdBu')
+1442        plt.colorbar()
+1443        plt.draw()
+1444
+1445    if correlation is True:
+1446        return corr
+1447
+1448    errors = [o.dvalue for o in obs]
+1449    cov = np.diag(errors) @ corr @ np.diag(errors)
+1450
+1451    eigenvalues = np.linalg.eigh(cov)[0]
+1452    if not np.all(eigenvalues >= 0):
+1453        warnings.warn("Covariance matrix is not positive semi-definite (Eigenvalues: " + str(eigenvalues) + ")", RuntimeWarning)
 1454
-1455    errors = [o.dvalue for o in obs]
-1456    cov = np.diag(errors) @ corr @ np.diag(errors)
-1457
-1458    eigenvalues = np.linalg.eigh(cov)[0]
-1459    if not np.all(eigenvalues >= 0):
-1460        warnings.warn("Covariance matrix is not positive semi-definite (Eigenvalues: " + str(eigenvalues) + ")", RuntimeWarning)
-1461
-1462    return cov
+1455    return cov
 
@@ -4865,24 +4837,24 @@ This construction ensures that the estimated covariance matrix is positive semi-
-
1542def import_jackknife(jacks, name, idl=None):
-1543    """Imports jackknife samples and returns an Obs
-1544
-1545    Parameters
-1546    ----------
-1547    jacks : numpy.ndarray
-1548        numpy array containing the mean value as zeroth entry and
-1549        the N jackknife samples as first to Nth entry.
-1550    name : str
-1551        name of the ensemble the samples are defined on.
-1552    """
-1553    length = len(jacks) - 1
-1554    prj = (np.ones((length, length)) - (length - 1) * np.identity(length))
-1555    samples = jacks[1:] @ prj
-1556    mean = np.mean(samples)
-1557    new_obs = Obs([samples - mean], [name], idl=idl, means=[mean])
-1558    new_obs._value = jacks[0]
-1559    return new_obs
+            
1535def import_jackknife(jacks, name, idl=None):
+1536    """Imports jackknife samples and returns an Obs
+1537
+1538    Parameters
+1539    ----------
+1540    jacks : numpy.ndarray
+1541        numpy array containing the mean value as zeroth entry and
+1542        the N jackknife samples as first to Nth entry.
+1543    name : str
+1544        name of the ensemble the samples are defined on.
+1545    """
+1546    length = len(jacks) - 1
+1547    prj = (np.ones((length, length)) - (length - 1) * np.identity(length))
+1548    samples = jacks[1:] @ prj
+1549    mean = np.mean(samples)
+1550    new_obs = Obs([samples - mean], [name], idl=idl, means=[mean])
+1551    new_obs._value = jacks[0]
+1552    return new_obs
 
@@ -4912,34 +4884,34 @@ name of the ensemble the samples are defined on.
-
1562def merge_obs(list_of_obs):
-1563    """Combine all observables in list_of_obs into one new observable
-1564
-1565    Parameters
-1566    ----------
-1567    list_of_obs : list
-1568        list of the Obs object to be combined
-1569
-1570    Notes
-1571    -----
-1572    It is not possible to combine obs which are based on the same replicum
-1573    """
-1574    replist = [item for obs in list_of_obs for item in obs.names]
-1575    if (len(replist) == len(set(replist))) is False:
-1576        raise Exception('list_of_obs contains duplicate replica: %s' % (str(replist)))
-1577    if any([len(o.cov_names) for o in list_of_obs]):
-1578        raise Exception('Not possible to merge data that contains covobs!')
-1579    new_dict = {}
-1580    idl_dict = {}
-1581    for o in list_of_obs:
-1582        new_dict.update({key: o.deltas.get(key, 0) + o.r_values.get(key, 0)
-1583                        for key in set(o.deltas) | set(o.r_values)})
-1584        idl_dict.update({key: o.idl.get(key, 0) for key in set(o.deltas)})
-1585
-1586    names = sorted(new_dict.keys())
-1587    o = Obs([new_dict[name] for name in names], names, idl=[idl_dict[name] for name in names])
-1588    o.reweighted = np.max([oi.reweighted for oi in list_of_obs])
-1589    return o
+            
1555def merge_obs(list_of_obs):
+1556    """Combine all observables in list_of_obs into one new observable
+1557
+1558    Parameters
+1559    ----------
+1560    list_of_obs : list
+1561        list of the Obs object to be combined
+1562
+1563    Notes
+1564    -----
+1565    It is not possible to combine obs which are based on the same replicum
+1566    """
+1567    replist = [item for obs in list_of_obs for item in obs.names]
+1568    if (len(replist) == len(set(replist))) is False:
+1569        raise Exception('list_of_obs contains duplicate replica: %s' % (str(replist)))
+1570    if any([len(o.cov_names) for o in list_of_obs]):
+1571        raise Exception('Not possible to merge data that contains covobs!')
+1572    new_dict = {}
+1573    idl_dict = {}
+1574    for o in list_of_obs:
+1575        new_dict.update({key: o.deltas.get(key, 0) + o.r_values.get(key, 0)
+1576                        for key in set(o.deltas) | set(o.r_values)})
+1577        idl_dict.update({key: o.idl.get(key, 0) for key in set(o.deltas)})
+1578
+1579    names = sorted(new_dict.keys())
+1580    o = Obs([new_dict[name] for name in names], names, idl=[idl_dict[name] for name in names])
+1581    o.reweighted = np.max([oi.reweighted for oi in list_of_obs])
+1582    return o
 
@@ -4970,47 +4942,47 @@ list of the Obs object to be combined
-
1592def cov_Obs(means, cov, name, grad=None):
-1593    """Create an Obs based on mean(s) and a covariance matrix
-1594
-1595    Parameters
-1596    ----------
-1597    mean : list of floats or float
-1598        N mean value(s) of the new Obs
-1599    cov : list or array
-1600        2d (NxN) Covariance matrix, 1d diagonal entries or 0d covariance
-1601    name : str
-1602        identifier for the covariance matrix
-1603    grad : list or array
-1604        Gradient of the Covobs wrt. the means belonging to cov.
-1605    """
-1606
-1607    def covobs_to_obs(co):
-1608        """Make an Obs out of a Covobs
-1609
-1610        Parameters
-1611        ----------
-1612        co : Covobs
-1613            Covobs to be embedded into the Obs
-1614        """
-1615        o = Obs([], [], means=[])
-1616        o._value = co.value
-1617        o.names.append(co.name)
-1618        o._covobs[co.name] = co
-1619        o._dvalue = np.sqrt(co.errsq())
-1620        return o
-1621
-1622    ol = []
-1623    if isinstance(means, (float, int)):
-1624        means = [means]
-1625
-1626    for i in range(len(means)):
-1627        ol.append(covobs_to_obs(Covobs(means[i], cov, name, pos=i, grad=grad)))
-1628    if ol[0].covobs[name].N != len(means):
-1629        raise Exception('You have to provide %d mean values!' % (ol[0].N))
-1630    if len(ol) == 1:
-1631        return ol[0]
-1632    return ol
+            
1585def cov_Obs(means, cov, name, grad=None):
+1586    """Create an Obs based on mean(s) and a covariance matrix
+1587
+1588    Parameters
+1589    ----------
+1590    mean : list of floats or float
+1591        N mean value(s) of the new Obs
+1592    cov : list or array
+1593        2d (NxN) Covariance matrix, 1d diagonal entries or 0d covariance
+1594    name : str
+1595        identifier for the covariance matrix
+1596    grad : list or array
+1597        Gradient of the Covobs wrt. the means belonging to cov.
+1598    """
+1599
+1600    def covobs_to_obs(co):
+1601        """Make an Obs out of a Covobs
+1602
+1603        Parameters
+1604        ----------
+1605        co : Covobs
+1606            Covobs to be embedded into the Obs
+1607        """
+1608        o = Obs([], [], means=[])
+1609        o._value = co.value
+1610        o.names.append(co.name)
+1611        o._covobs[co.name] = co
+1612        o._dvalue = np.sqrt(co.errsq())
+1613        return o
+1614
+1615    ol = []
+1616    if isinstance(means, (float, int)):
+1617        means = [means]
+1618
+1619    for i in range(len(means)):
+1620        ol.append(covobs_to_obs(Covobs(means[i], cov, name, pos=i, grad=grad)))
+1621    if ol[0].covobs[name].N != len(means):
+1622        raise Exception('You have to provide %d mean values!' % (ol[0].N))
+1623    if len(ol) == 1:
+1624        return ol[0]
+1625    return ol