diff --git a/docs/pyerrors/obs.html b/docs/pyerrors/obs.html index e7439aac..c785ea95 100644 --- a/docs/pyerrors/obs.html +++ b/docs/pyerrors/obs.html @@ -478,1392 +478,1405 @@ 279 tmp = self.e_rho[e_name][i + 1:w_max] + np.concatenate([self.e_rho[e_name][i - 1::-1], self.e_rho[e_name][1:w_max - 2 * i]]) - 2 * self.e_rho[e_name][i] * self.e_rho[e_name][1:w_max - i] 280 self.e_drho[e_name][i] = np.sqrt(np.sum(tmp ** 2) / e_N) 281 - 282 _compute_drho(1) - 283 if self.tau_exp[e_name] > 0: - 284 texp = self.tau_exp[e_name] - 285 # Critical slowing down analysis - 286 if w_max // 2 <= 1: - 287 raise Exception("Need at least 8 samples for tau_exp error analysis") - 288 for n in range(1, w_max // 2): - 289 _compute_drho(n + 1) - 290 if (self.e_rho[e_name][n] - self.N_sigma[e_name] * self.e_drho[e_name][n]) < 0 or n >= w_max // 2 - 2: - 291 # Bias correction hep-lat/0306017 eq. (49) included - 292 self.e_tauint[e_name] = self.e_n_tauint[e_name][n] * (1 + (2 * n + 1) / e_N) / (1 + 1 / e_N) + texp * np.abs(self.e_rho[e_name][n + 1]) # The absolute makes sure, that the tail contribution is always positive - 293 self.e_dtauint[e_name] = np.sqrt(self.e_n_dtauint[e_name][n] ** 2 + texp ** 2 * self.e_drho[e_name][n + 1] ** 2) - 294 # Error of tau_exp neglected so far, missing term: self.e_rho[e_name][n + 1] ** 2 * d_tau_exp ** 2 - 295 self.e_dvalue[e_name] = np.sqrt(2 * self.e_tauint[e_name] * e_gamma[e_name][0] * (1 + 1 / e_N) / e_N) - 296 self.e_ddvalue[e_name] = self.e_dvalue[e_name] * np.sqrt((n + 0.5) / e_N) - 297 self.e_windowsize[e_name] = n - 298 break - 299 else: - 300 if self.S[e_name] == 0.0: - 301 self.e_tauint[e_name] = 0.5 - 302 self.e_dtauint[e_name] = 0.0 - 303 self.e_dvalue[e_name] = np.sqrt(e_gamma[e_name][0] / (e_N - 1)) - 304 self.e_ddvalue[e_name] = self.e_dvalue[e_name] * np.sqrt(0.5 / e_N) - 305 self.e_windowsize[e_name] = 0 - 306 else: - 307 # Standard automatic windowing procedure - 308 tau = self.S[e_name] / np.log((2 * self.e_n_tauint[e_name][1:] + 1) / (2 * self.e_n_tauint[e_name][1:] - 1)) - 309 g_w = np.exp(- np.arange(1, w_max) / tau) - tau / np.sqrt(np.arange(1, w_max) * e_N) - 310 for n in range(1, w_max): - 311 if n < w_max // 2 - 2: - 312 _compute_drho(n + 1) - 313 if g_w[n - 1] < 0 or n >= w_max - 1: - 314 self.e_tauint[e_name] = self.e_n_tauint[e_name][n] * (1 + (2 * n + 1) / e_N) / (1 + 1 / e_N) # Bias correction hep-lat/0306017 eq. (49) - 315 self.e_dtauint[e_name] = self.e_n_dtauint[e_name][n] - 316 self.e_dvalue[e_name] = np.sqrt(2 * self.e_tauint[e_name] * e_gamma[e_name][0] * (1 + 1 / e_N) / e_N) - 317 self.e_ddvalue[e_name] = self.e_dvalue[e_name] * np.sqrt((n + 0.5) / e_N) - 318 self.e_windowsize[e_name] = n - 319 break - 320 - 321 self._dvalue += self.e_dvalue[e_name] ** 2 - 322 self.ddvalue += (self.e_dvalue[e_name] * self.e_ddvalue[e_name]) ** 2 - 323 - 324 for e_name in self.cov_names: - 325 self.e_dvalue[e_name] = np.sqrt(self.covobs[e_name].errsq()) - 326 self.e_ddvalue[e_name] = 0 - 327 self._dvalue += self.e_dvalue[e_name]**2 - 328 - 329 self._dvalue = np.sqrt(self._dvalue) - 330 if self._dvalue == 0.0: - 331 self.ddvalue = 0.0 - 332 else: - 333 self.ddvalue = np.sqrt(self.ddvalue) / self._dvalue - 334 return - 335 - 336 def _calc_gamma(self, deltas, idx, shape, w_max, fft): - 337 """Calculate Gamma_{AA} from the deltas, which are defined on idx. - 338 idx is assumed to be a contiguous range (possibly with a stepsize != 1) - 339 - 340 Parameters - 341 ---------- - 342 deltas : list - 343 List of fluctuations - 344 idx : list - 345 List or range of configurations on which the deltas are defined. - 346 shape : int - 347 Number of configurations in idx. - 348 w_max : int - 349 Upper bound for the summation window. - 350 fft : bool - 351 determines whether the fft algorithm is used for the computation - 352 of the autocorrelation function. - 353 """ - 354 gamma = np.zeros(w_max) - 355 deltas = _expand_deltas(deltas, idx, shape) - 356 new_shape = len(deltas) - 357 if fft: - 358 max_gamma = min(new_shape, w_max) - 359 # The padding for the fft has to be even - 360 padding = new_shape + max_gamma + (new_shape + max_gamma) % 2 - 361 gamma[:max_gamma] += np.fft.irfft(np.abs(np.fft.rfft(deltas, padding)) ** 2)[:max_gamma] - 362 else: - 363 for n in range(w_max): - 364 if new_shape - n >= 0: - 365 gamma[n] += deltas[0:new_shape - n].dot(deltas[n:new_shape]) - 366 - 367 return gamma - 368 - 369 def details(self, ens_content=True): - 370 """Output detailed properties of the Obs. - 371 - 372 Parameters - 373 ---------- - 374 ens_content : bool - 375 print details about the ensembles and replica if true. - 376 """ - 377 if self.tag is not None: - 378 print("Description:", self.tag) - 379 if not hasattr(self, 'e_dvalue'): - 380 print('Result\t %3.8e' % (self.value)) - 381 else: - 382 if self.value == 0.0: - 383 percentage = np.nan - 384 else: - 385 percentage = np.abs(self._dvalue / self.value) * 100 - 386 print('Result\t %3.8e +/- %3.8e +/- %3.8e (%3.3f%%)' % (self.value, self._dvalue, self.ddvalue, percentage)) - 387 if len(self.e_names) > 1: - 388 print(' Ensemble errors:') - 389 for e_name in self.mc_names: - 390 if len(self.e_names) > 1: - 391 print('', e_name, '\t %3.8e +/- %3.8e' % (self.e_dvalue[e_name], self.e_ddvalue[e_name])) - 392 if self.tau_exp[e_name] > 0: - 393 print(' t_int\t %3.8e +/- %3.8e tau_exp = %3.2f, N_sigma = %1.0i' % (self.e_tauint[e_name], self.e_dtauint[e_name], self.tau_exp[e_name], self.N_sigma[e_name])) - 394 else: - 395 print(' t_int\t %3.8e +/- %3.8e S = %3.2f' % (self.e_tauint[e_name], self.e_dtauint[e_name], self.S[e_name])) - 396 for e_name in self.cov_names: - 397 print('', e_name, '\t %3.8e' % (self.e_dvalue[e_name])) - 398 if ens_content is True: - 399 if len(self.e_names) == 1: - 400 print(self.N, 'samples in', len(self.e_names), 'ensemble:') - 401 else: - 402 print(self.N, 'samples in', len(self.e_names), 'ensembles:') - 403 my_string_list = [] - 404 for key, value in sorted(self.e_content.items()): - 405 if key not in self.covobs: - 406 my_string = ' ' + "\u00B7 Ensemble '" + key + "' " - 407 if len(value) == 1: - 408 my_string += f': {self.shape[value[0]]} configurations' - 409 if isinstance(self.idl[value[0]], range): - 410 my_string += f' (from {self.idl[value[0]].start} to {self.idl[value[0]][-1]}' + int(self.idl[value[0]].step != 1) * f' in steps of {self.idl[value[0]].step}' + ')' - 411 else: - 412 my_string += ' (irregular range)' - 413 else: - 414 sublist = [] - 415 for v in value: - 416 my_substring = ' ' + "\u00B7 Replicum '" + v[len(key) + 1:] + "' " - 417 my_substring += f': {self.shape[v]} configurations' - 418 if isinstance(self.idl[v], range): - 419 my_substring += f' (from {self.idl[v].start} to {self.idl[v][-1]}' + int(self.idl[v].step != 1) * f' in steps of {self.idl[v].step}' + ')' - 420 else: - 421 my_substring += ' (irregular range)' - 422 sublist.append(my_substring) - 423 - 424 my_string += '\n' + '\n'.join(sublist) - 425 else: - 426 my_string = ' ' + "\u00B7 Covobs '" + key + "' " - 427 my_string_list.append(my_string) - 428 print('\n'.join(my_string_list)) - 429 - 430 def reweight(self, weight): - 431 """Reweight the obs with given rewighting factors. - 432 - 433 Parameters - 434 ---------- - 435 weight : Obs - 436 Reweighting factor. An Observable that has to be defined on a superset of the - 437 configurations in obs[i].idl for all i. - 438 all_configs : bool - 439 if True, the reweighted observables are normalized by the average of - 440 the reweighting factor on all configurations in weight.idl and not - 441 on the configurations in obs[i].idl. Default False. - 442 """ - 443 return reweight(weight, [self])[0] - 444 - 445 def is_zero_within_error(self, sigma=1): - 446 """Checks whether the observable is zero within 'sigma' standard errors. - 447 - 448 Parameters - 449 ---------- - 450 sigma : int - 451 Number of standard errors used for the check. - 452 - 453 Works only properly when the gamma method was run. - 454 """ - 455 return self.is_zero() or np.abs(self.value) <= sigma * self._dvalue - 456 - 457 def is_zero(self, atol=1e-10): - 458 """Checks whether the observable is zero within a given tolerance. - 459 - 460 Parameters - 461 ---------- - 462 atol : float - 463 Absolute tolerance (for details see numpy documentation). - 464 """ - 465 return np.isclose(0.0, self.value, 1e-14, atol) and all(np.allclose(0.0, delta, 1e-14, atol) for delta in self.deltas.values()) and all(np.allclose(0.0, delta.errsq(), 1e-14, atol) for delta in self.covobs.values()) - 466 - 467 def plot_tauint(self, save=None): - 468 """Plot integrated autocorrelation time for each ensemble. + 282 gaps = [] + 283 for r_name in e_content[e_name]: + 284 if isinstance(self.idl[r_name], range): + 285 gaps.append(1) + 286 else: + 287 gaps.append(np.min(np.diff(self.idl[r_name]))) + 288 + 289 if not np.all([gi == gaps[0] for gi in gaps]): + 290 raise Exception(f"Replica for ensemble {e_name} are not equally spaced.", gaps) + 291 else: + 292 gapsize = gaps[0] + 293 + 294 _compute_drho(gapsize) + 295 if self.tau_exp[e_name] > 0: + 296 texp = self.tau_exp[e_name] + 297 # Critical slowing down analysis + 298 if w_max // 2 <= 1: + 299 raise Exception("Need at least 8 samples for tau_exp error analysis") + 300 for n in range(gapsize, w_max // 2, gapsize): + 301 _compute_drho(n + gapsize) + 302 if (self.e_rho[e_name][n] - self.N_sigma[e_name] * self.e_drho[e_name][n]) < 0 or n >= w_max // 2 - 2: + 303 # Bias correction hep-lat/0306017 eq. (49) included + 304 self.e_tauint[e_name] = self.e_n_tauint[e_name][n] * (1 + (2 * n / gapsize + 1) / e_N) / (1 + 1 / e_N) + texp * np.abs(self.e_rho[e_name][n + 1]) # The absolute makes sure, that the tail contribution is always positive + 305 self.e_dtauint[e_name] = np.sqrt(self.e_n_dtauint[e_name][n] ** 2 + texp ** 2 * self.e_drho[e_name][n + 1] ** 2) + 306 # Error of tau_exp neglected so far, missing term: self.e_rho[e_name][n + 1] ** 2 * d_tau_exp ** 2 + 307 self.e_dvalue[e_name] = np.sqrt(2 * self.e_tauint[e_name] * e_gamma[e_name][0] * (1 + 1 / e_N) / e_N) + 308 self.e_ddvalue[e_name] = self.e_dvalue[e_name] * np.sqrt((n / gapsize + 0.5) / e_N) + 309 self.e_windowsize[e_name] = n + 310 break + 311 else: + 312 if self.S[e_name] == 0.0: + 313 self.e_tauint[e_name] = 0.5 + 314 self.e_dtauint[e_name] = 0.0 + 315 self.e_dvalue[e_name] = np.sqrt(e_gamma[e_name][0] / (e_N - 1)) + 316 self.e_ddvalue[e_name] = self.e_dvalue[e_name] * np.sqrt(0.5 / e_N) + 317 self.e_windowsize[e_name] = 0 + 318 else: + 319 # Standard automatic windowing procedure + 320 tau = self.S[e_name] / np.log((2 * self.e_n_tauint[e_name][gapsize::gapsize] + 1) / (2 * self.e_n_tauint[e_name][gapsize::gapsize] - 1)) + 321 g_w = np.exp(- np.arange(1, len(tau) + 1) / tau) - tau / np.sqrt(np.arange(1, len(tau) + 1) * e_N) + 322 for n in range(1, w_max): + 323 if n < w_max // 2 - 2: + 324 _compute_drho(gapsize * n + gapsize) + 325 if g_w[n - 1] < 0 or n >= w_max - 1: + 326 n *= gapsize + 327 self.e_tauint[e_name] = self.e_n_tauint[e_name][n] * (1 + (2 * n / gapsize + 1) / e_N) / (1 + 1 / e_N) # Bias correction hep-lat/0306017 eq. (49) + 328 self.e_dtauint[e_name] = self.e_n_dtauint[e_name][n] + 329 self.e_dvalue[e_name] = np.sqrt(2 * self.e_tauint[e_name] * e_gamma[e_name][0] * (1 + 1 / e_N) / e_N) + 330 self.e_ddvalue[e_name] = self.e_dvalue[e_name] * np.sqrt((n / gapsize + 0.5) / e_N) + 331 self.e_windowsize[e_name] = n + 332 break + 333 + 334 self._dvalue += self.e_dvalue[e_name] ** 2 + 335 self.ddvalue += (self.e_dvalue[e_name] * self.e_ddvalue[e_name]) ** 2 + 336 + 337 for e_name in self.cov_names: + 338 self.e_dvalue[e_name] = np.sqrt(self.covobs[e_name].errsq()) + 339 self.e_ddvalue[e_name] = 0 + 340 self._dvalue += self.e_dvalue[e_name]**2 + 341 + 342 self._dvalue = np.sqrt(self._dvalue) + 343 if self._dvalue == 0.0: + 344 self.ddvalue = 0.0 + 345 else: + 346 self.ddvalue = np.sqrt(self.ddvalue) / self._dvalue + 347 return + 348 + 349 def _calc_gamma(self, deltas, idx, shape, w_max, fft): + 350 """Calculate Gamma_{AA} from the deltas, which are defined on idx. + 351 idx is assumed to be a contiguous range (possibly with a stepsize != 1) + 352 + 353 Parameters + 354 ---------- + 355 deltas : list + 356 List of fluctuations + 357 idx : list + 358 List or range of configurations on which the deltas are defined. + 359 shape : int + 360 Number of configurations in idx. + 361 w_max : int + 362 Upper bound for the summation window. + 363 fft : bool + 364 determines whether the fft algorithm is used for the computation + 365 of the autocorrelation function. + 366 """ + 367 gamma = np.zeros(w_max) + 368 deltas = _expand_deltas(deltas, idx, shape) + 369 new_shape = len(deltas) + 370 if fft: + 371 max_gamma = min(new_shape, w_max) + 372 # The padding for the fft has to be even + 373 padding = new_shape + max_gamma + (new_shape + max_gamma) % 2 + 374 gamma[:max_gamma] += np.fft.irfft(np.abs(np.fft.rfft(deltas, padding)) ** 2)[:max_gamma] + 375 else: + 376 for n in range(w_max): + 377 if new_shape - n >= 0: + 378 gamma[n] += deltas[0:new_shape - n].dot(deltas[n:new_shape]) + 379 + 380 return gamma + 381 + 382 def details(self, ens_content=True): + 383 """Output detailed properties of the Obs. + 384 + 385 Parameters + 386 ---------- + 387 ens_content : bool + 388 print details about the ensembles and replica if true. + 389 """ + 390 if self.tag is not None: + 391 print("Description:", self.tag) + 392 if not hasattr(self, 'e_dvalue'): + 393 print('Result\t %3.8e' % (self.value)) + 394 else: + 395 if self.value == 0.0: + 396 percentage = np.nan + 397 else: + 398 percentage = np.abs(self._dvalue / self.value) * 100 + 399 print('Result\t %3.8e +/- %3.8e +/- %3.8e (%3.3f%%)' % (self.value, self._dvalue, self.ddvalue, percentage)) + 400 if len(self.e_names) > 1: + 401 print(' Ensemble errors:') + 402 for e_name in self.mc_names: + 403 if len(self.e_names) > 1: + 404 print('', e_name, '\t %3.8e +/- %3.8e' % (self.e_dvalue[e_name], self.e_ddvalue[e_name])) + 405 if self.tau_exp[e_name] > 0: + 406 print(' t_int\t %3.8e +/- %3.8e tau_exp = %3.2f, N_sigma = %1.0i' % (self.e_tauint[e_name], self.e_dtauint[e_name], self.tau_exp[e_name], self.N_sigma[e_name])) + 407 else: + 408 print(' t_int\t %3.8e +/- %3.8e S = %3.2f' % (self.e_tauint[e_name], self.e_dtauint[e_name], self.S[e_name])) + 409 for e_name in self.cov_names: + 410 print('', e_name, '\t %3.8e' % (self.e_dvalue[e_name])) + 411 if ens_content is True: + 412 if len(self.e_names) == 1: + 413 print(self.N, 'samples in', len(self.e_names), 'ensemble:') + 414 else: + 415 print(self.N, 'samples in', len(self.e_names), 'ensembles:') + 416 my_string_list = [] + 417 for key, value in sorted(self.e_content.items()): + 418 if key not in self.covobs: + 419 my_string = ' ' + "\u00B7 Ensemble '" + key + "' " + 420 if len(value) == 1: + 421 my_string += f': {self.shape[value[0]]} configurations' + 422 if isinstance(self.idl[value[0]], range): + 423 my_string += f' (from {self.idl[value[0]].start} to {self.idl[value[0]][-1]}' + int(self.idl[value[0]].step != 1) * f' in steps of {self.idl[value[0]].step}' + ')' + 424 else: + 425 my_string += ' (irregular range)' + 426 else: + 427 sublist = [] + 428 for v in value: + 429 my_substring = ' ' + "\u00B7 Replicum '" + v[len(key) + 1:] + "' " + 430 my_substring += f': {self.shape[v]} configurations' + 431 if isinstance(self.idl[v], range): + 432 my_substring += f' (from {self.idl[v].start} to {self.idl[v][-1]}' + int(self.idl[v].step != 1) * f' in steps of {self.idl[v].step}' + ')' + 433 else: + 434 my_substring += ' (irregular range)' + 435 sublist.append(my_substring) + 436 + 437 my_string += '\n' + '\n'.join(sublist) + 438 else: + 439 my_string = ' ' + "\u00B7 Covobs '" + key + "' " + 440 my_string_list.append(my_string) + 441 print('\n'.join(my_string_list)) + 442 + 443 def reweight(self, weight): + 444 """Reweight the obs with given rewighting factors. + 445 + 446 Parameters + 447 ---------- + 448 weight : Obs + 449 Reweighting factor. An Observable that has to be defined on a superset of the + 450 configurations in obs[i].idl for all i. + 451 all_configs : bool + 452 if True, the reweighted observables are normalized by the average of + 453 the reweighting factor on all configurations in weight.idl and not + 454 on the configurations in obs[i].idl. Default False. + 455 """ + 456 return reweight(weight, [self])[0] + 457 + 458 def is_zero_within_error(self, sigma=1): + 459 """Checks whether the observable is zero within 'sigma' standard errors. + 460 + 461 Parameters + 462 ---------- + 463 sigma : int + 464 Number of standard errors used for the check. + 465 + 466 Works only properly when the gamma method was run. + 467 """ + 468 return self.is_zero() or np.abs(self.value) <= sigma * self._dvalue 469 - 470 Parameters - 471 ---------- - 472 save : str - 473 saves the figure to a file named 'save' if. - 474 """ - 475 if not hasattr(self, 'e_dvalue'): - 476 raise Exception('Run the gamma method first.') - 477 - 478 for e, e_name in enumerate(self.mc_names): - 479 fig = plt.figure() - 480 plt.xlabel(r'$W$') - 481 plt.ylabel(r'$\tau_\mathrm{int}$') - 482 length = int(len(self.e_n_tauint[e_name])) - 483 if self.tau_exp[e_name] > 0: - 484 base = self.e_n_tauint[e_name][self.e_windowsize[e_name]] - 485 x_help = np.arange(2 * self.tau_exp[e_name]) - 486 y_help = (x_help + 1) * np.abs(self.e_rho[e_name][self.e_windowsize[e_name] + 1]) * (1 - x_help / (2 * (2 * self.tau_exp[e_name] - 1))) + base - 487 x_arr = np.arange(self.e_windowsize[e_name] + 1, self.e_windowsize[e_name] + 1 + 2 * self.tau_exp[e_name]) - 488 plt.plot(x_arr, y_help, 'C' + str(e), linewidth=1, ls='--', marker=',') - 489 plt.errorbar([self.e_windowsize[e_name] + 2 * self.tau_exp[e_name]], [self.e_tauint[e_name]], - 490 yerr=[self.e_dtauint[e_name]], fmt='C' + str(e), linewidth=1, capsize=2, marker='o', mfc=plt.rcParams['axes.facecolor']) - 491 xmax = self.e_windowsize[e_name] + 2 * self.tau_exp[e_name] + 1.5 - 492 label = e_name + r', $\tau_\mathrm{exp}$=' + str(np.around(self.tau_exp[e_name], decimals=2)) - 493 else: - 494 label = e_name + ', S=' + str(np.around(self.S[e_name], decimals=2)) - 495 xmax = max(10.5, 2 * self.e_windowsize[e_name] - 0.5) - 496 - 497 plt.errorbar(np.arange(length)[:int(xmax) + 1], self.e_n_tauint[e_name][:int(xmax) + 1], yerr=self.e_n_dtauint[e_name][:int(xmax) + 1], linewidth=1, capsize=2, label=label) - 498 plt.axvline(x=self.e_windowsize[e_name], color='C' + str(e), alpha=0.5, marker=',', ls='--') - 499 plt.legend() - 500 plt.xlim(-0.5, xmax) - 501 ylim = plt.ylim() - 502 plt.ylim(bottom=0.0, top=max(1.0, ylim[1])) - 503 plt.draw() - 504 if save: - 505 fig.savefig(save + "_" + str(e)) - 506 - 507 def plot_rho(self, save=None): - 508 """Plot normalized autocorrelation function time for each ensemble. + 470 def is_zero(self, atol=1e-10): + 471 """Checks whether the observable is zero within a given tolerance. + 472 + 473 Parameters + 474 ---------- + 475 atol : float + 476 Absolute tolerance (for details see numpy documentation). + 477 """ + 478 return np.isclose(0.0, self.value, 1e-14, atol) and all(np.allclose(0.0, delta, 1e-14, atol) for delta in self.deltas.values()) and all(np.allclose(0.0, delta.errsq(), 1e-14, atol) for delta in self.covobs.values()) + 479 + 480 def plot_tauint(self, save=None): + 481 """Plot integrated autocorrelation time for each ensemble. + 482 + 483 Parameters + 484 ---------- + 485 save : str + 486 saves the figure to a file named 'save' if. + 487 """ + 488 if not hasattr(self, 'e_dvalue'): + 489 raise Exception('Run the gamma method first.') + 490 + 491 for e, e_name in enumerate(self.mc_names): + 492 fig = plt.figure() + 493 plt.xlabel(r'$W$') + 494 plt.ylabel(r'$\tau_\mathrm{int}$') + 495 length = int(len(self.e_n_tauint[e_name])) + 496 if self.tau_exp[e_name] > 0: + 497 base = self.e_n_tauint[e_name][self.e_windowsize[e_name]] + 498 x_help = np.arange(2 * self.tau_exp[e_name]) + 499 y_help = (x_help + 1) * np.abs(self.e_rho[e_name][self.e_windowsize[e_name] + 1]) * (1 - x_help / (2 * (2 * self.tau_exp[e_name] - 1))) + base + 500 x_arr = np.arange(self.e_windowsize[e_name] + 1, self.e_windowsize[e_name] + 1 + 2 * self.tau_exp[e_name]) + 501 plt.plot(x_arr, y_help, 'C' + str(e), linewidth=1, ls='--', marker=',') + 502 plt.errorbar([self.e_windowsize[e_name] + 2 * self.tau_exp[e_name]], [self.e_tauint[e_name]], + 503 yerr=[self.e_dtauint[e_name]], fmt='C' + str(e), linewidth=1, capsize=2, marker='o', mfc=plt.rcParams['axes.facecolor']) + 504 xmax = self.e_windowsize[e_name] + 2 * self.tau_exp[e_name] + 1.5 + 505 label = e_name + r', $\tau_\mathrm{exp}$=' + str(np.around(self.tau_exp[e_name], decimals=2)) + 506 else: + 507 label = e_name + ', S=' + str(np.around(self.S[e_name], decimals=2)) + 508 xmax = max(10.5, 2 * self.e_windowsize[e_name] - 0.5) 509 - 510 Parameters - 511 ---------- - 512 save : str - 513 saves the figure to a file named 'save' if. - 514 """ - 515 if not hasattr(self, 'e_dvalue'): - 516 raise Exception('Run the gamma method first.') - 517 for e, e_name in enumerate(self.mc_names): - 518 fig = plt.figure() - 519 plt.xlabel('W') - 520 plt.ylabel('rho') - 521 length = int(len(self.e_drho[e_name])) - 522 plt.errorbar(np.arange(length), self.e_rho[e_name][:length], yerr=self.e_drho[e_name][:], linewidth=1, capsize=2) - 523 plt.axvline(x=self.e_windowsize[e_name], color='r', alpha=0.25, ls='--', marker=',') - 524 if self.tau_exp[e_name] > 0: - 525 plt.plot([self.e_windowsize[e_name] + 1, self.e_windowsize[e_name] + 1 + 2 * self.tau_exp[e_name]], - 526 [self.e_rho[e_name][self.e_windowsize[e_name] + 1], 0], 'k-', lw=1) - 527 xmax = self.e_windowsize[e_name] + 2 * self.tau_exp[e_name] + 1.5 - 528 plt.title('Rho ' + e_name + r', tau\_exp=' + str(np.around(self.tau_exp[e_name], decimals=2))) - 529 else: - 530 xmax = max(10.5, 2 * self.e_windowsize[e_name] - 0.5) - 531 plt.title('Rho ' + e_name + ', S=' + str(np.around(self.S[e_name], decimals=2))) - 532 plt.plot([-0.5, xmax], [0, 0], 'k--', lw=1) - 533 plt.xlim(-0.5, xmax) - 534 plt.draw() - 535 if save: - 536 fig.savefig(save + "_" + str(e)) - 537 - 538 def plot_rep_dist(self): - 539 """Plot replica distribution for each ensemble with more than one replicum.""" - 540 if not hasattr(self, 'e_dvalue'): - 541 raise Exception('Run the gamma method first.') - 542 for e, e_name in enumerate(self.mc_names): - 543 if len(self.e_content[e_name]) == 1: - 544 print('No replica distribution for a single replicum (', e_name, ')') - 545 continue - 546 r_length = [] - 547 sub_r_mean = 0 - 548 for r, r_name in enumerate(self.e_content[e_name]): - 549 r_length.append(len(self.deltas[r_name])) - 550 sub_r_mean += self.shape[r_name] * self.r_values[r_name] - 551 e_N = np.sum(r_length) - 552 sub_r_mean /= e_N - 553 arr = np.zeros(len(self.e_content[e_name])) - 554 for r, r_name in enumerate(self.e_content[e_name]): - 555 arr[r] = (self.r_values[r_name] - sub_r_mean) / (self.e_dvalue[e_name] * np.sqrt(e_N / self.shape[r_name] - 1)) - 556 plt.hist(arr, rwidth=0.8, bins=len(self.e_content[e_name])) - 557 plt.title('Replica distribution' + e_name + ' (mean=0, var=1)') - 558 plt.draw() - 559 - 560 def plot_history(self, expand=True): - 561 """Plot derived Monte Carlo history for each ensemble - 562 - 563 Parameters - 564 ---------- - 565 expand : bool - 566 show expanded history for irregular Monte Carlo chains (default: True). - 567 """ - 568 for e, e_name in enumerate(self.mc_names): - 569 plt.figure() - 570 r_length = [] - 571 tmp = [] - 572 tmp_expanded = [] - 573 for r, r_name in enumerate(self.e_content[e_name]): - 574 tmp.append(self.deltas[r_name] + self.r_values[r_name]) - 575 if expand: - 576 tmp_expanded.append(_expand_deltas(self.deltas[r_name], list(self.idl[r_name]), self.shape[r_name]) + self.r_values[r_name]) - 577 r_length.append(len(tmp_expanded[-1])) - 578 else: - 579 r_length.append(len(tmp[-1])) - 580 e_N = np.sum(r_length) - 581 x = np.arange(e_N) - 582 y_test = np.concatenate(tmp, axis=0) - 583 if expand: - 584 y = np.concatenate(tmp_expanded, axis=0) - 585 else: - 586 y = y_test - 587 plt.errorbar(x, y, fmt='.', markersize=3) - 588 plt.xlim(-0.5, e_N - 0.5) - 589 plt.title(e_name + f'\nskew: {skew(y_test):.3f} (p={skewtest(y_test).pvalue:.3f}), kurtosis: {kurtosis(y_test):.3f} (p={kurtosistest(y_test).pvalue:.3f})') - 590 plt.draw() - 591 - 592 def plot_piechart(self, save=None): - 593 """Plot piechart which shows the fractional contribution of each - 594 ensemble to the error and returns a dictionary containing the fractions. - 595 - 596 Parameters - 597 ---------- - 598 save : str - 599 saves the figure to a file named 'save' if. - 600 """ - 601 if not hasattr(self, 'e_dvalue'): - 602 raise Exception('Run the gamma method first.') - 603 if np.isclose(0.0, self._dvalue, atol=1e-15): - 604 raise Exception('Error is 0.0') - 605 labels = self.e_names - 606 sizes = [self.e_dvalue[name] ** 2 for name in labels] / self._dvalue ** 2 - 607 fig1, ax1 = plt.subplots() - 608 ax1.pie(sizes, labels=labels, startangle=90, normalize=True) - 609 ax1.axis('equal') - 610 plt.draw() - 611 if save: - 612 fig1.savefig(save) - 613 - 614 return dict(zip(self.e_names, sizes)) - 615 - 616 def dump(self, filename, datatype="json.gz", description="", **kwargs): - 617 """Dump the Obs to a file 'name' of chosen format. - 618 - 619 Parameters - 620 ---------- - 621 filename : str - 622 name of the file to be saved. - 623 datatype : str - 624 Format of the exported file. Supported formats include - 625 "json.gz" and "pickle" - 626 description : str - 627 Description for output file, only relevant for json.gz format. - 628 path : str - 629 specifies a custom path for the file (default '.') - 630 """ - 631 if 'path' in kwargs: - 632 file_name = kwargs.get('path') + '/' + filename - 633 else: - 634 file_name = filename - 635 - 636 if datatype == "json.gz": - 637 from .input.json import dump_to_json - 638 dump_to_json([self], file_name, description=description) - 639 elif datatype == "pickle": - 640 with open(file_name + '.p', 'wb') as fb: - 641 pickle.dump(self, fb) - 642 else: - 643 raise Exception("Unknown datatype " + str(datatype)) - 644 - 645 def export_jackknife(self): - 646 """Export jackknife samples from the Obs - 647 - 648 Returns - 649 ------- - 650 numpy.ndarray - 651 Returns a numpy array of length N + 1 where N is the number of samples - 652 for the given ensemble and replicum. The zeroth entry of the array contains - 653 the mean value of the Obs, entries 1 to N contain the N jackknife samples - 654 derived from the Obs. The current implementation only works for observables - 655 defined on exactly one ensemble and replicum. The derived jackknife samples - 656 should agree with samples from a full jackknife analysis up to O(1/N). - 657 """ - 658 - 659 if len(self.names) != 1: - 660 raise Exception("'export_jackknife' is only implemented for Obs defined on one ensemble and replicum.") - 661 - 662 name = self.names[0] - 663 full_data = self.deltas[name] + self.r_values[name] - 664 n = full_data.size - 665 mean = self.value - 666 tmp_jacks = np.zeros(n + 1) - 667 tmp_jacks[0] = mean - 668 tmp_jacks[1:] = (n * mean - full_data) / (n - 1) - 669 return tmp_jacks - 670 - 671 def __float__(self): - 672 return float(self.value) - 673 - 674 def __repr__(self): - 675 return 'Obs[' + str(self) + ']' - 676 - 677 def __str__(self): - 678 if self._dvalue == 0.0: - 679 return str(self.value) - 680 fexp = np.floor(np.log10(self._dvalue)) - 681 if fexp < 0.0: - 682 return '{:{form}}({:2.0f})'.format(self.value, self._dvalue * 10 ** (-fexp + 1), form='.' + str(-int(fexp) + 1) + 'f') - 683 elif fexp == 0.0: - 684 return '{:.1f}({:1.1f})'.format(self.value, self._dvalue) - 685 else: - 686 return '{:.0f}({:2.0f})'.format(self.value, self._dvalue) - 687 - 688 def __hash__(self): - 689 hash_tuple = (np.array([self.value]).astype(np.float32).data.tobytes(),) - 690 hash_tuple += tuple([o.astype(np.float32).data.tobytes() for o in self.deltas.values()]) - 691 hash_tuple += tuple([np.array([o.errsq()]).astype(np.float32).data.tobytes() for o in self.covobs.values()]) - 692 hash_tuple += tuple([o.encode() for o in self.names]) - 693 m = hashlib.md5() - 694 [m.update(o) for o in hash_tuple] - 695 return int(m.hexdigest(), 16) & 0xFFFFFFFF - 696 - 697 # Overload comparisons - 698 def __lt__(self, other): - 699 return self.value < other + 510 plt.errorbar(np.arange(length)[:int(xmax) + 1], self.e_n_tauint[e_name][:int(xmax) + 1], yerr=self.e_n_dtauint[e_name][:int(xmax) + 1], linewidth=1, capsize=2, label=label) + 511 plt.axvline(x=self.e_windowsize[e_name], color='C' + str(e), alpha=0.5, marker=',', ls='--') + 512 plt.legend() + 513 plt.xlim(-0.5, xmax) + 514 ylim = plt.ylim() + 515 plt.ylim(bottom=0.0, top=max(1.0, ylim[1])) + 516 plt.draw() + 517 if save: + 518 fig.savefig(save + "_" + str(e)) + 519 + 520 def plot_rho(self, save=None): + 521 """Plot normalized autocorrelation function time for each ensemble. + 522 + 523 Parameters + 524 ---------- + 525 save : str + 526 saves the figure to a file named 'save' if. + 527 """ + 528 if not hasattr(self, 'e_dvalue'): + 529 raise Exception('Run the gamma method first.') + 530 for e, e_name in enumerate(self.mc_names): + 531 fig = plt.figure() + 532 plt.xlabel('W') + 533 plt.ylabel('rho') + 534 length = int(len(self.e_drho[e_name])) + 535 plt.errorbar(np.arange(length), self.e_rho[e_name][:length], yerr=self.e_drho[e_name][:], linewidth=1, capsize=2) + 536 plt.axvline(x=self.e_windowsize[e_name], color='r', alpha=0.25, ls='--', marker=',') + 537 if self.tau_exp[e_name] > 0: + 538 plt.plot([self.e_windowsize[e_name] + 1, self.e_windowsize[e_name] + 1 + 2 * self.tau_exp[e_name]], + 539 [self.e_rho[e_name][self.e_windowsize[e_name] + 1], 0], 'k-', lw=1) + 540 xmax = self.e_windowsize[e_name] + 2 * self.tau_exp[e_name] + 1.5 + 541 plt.title('Rho ' + e_name + r', tau\_exp=' + str(np.around(self.tau_exp[e_name], decimals=2))) + 542 else: + 543 xmax = max(10.5, 2 * self.e_windowsize[e_name] - 0.5) + 544 plt.title('Rho ' + e_name + ', S=' + str(np.around(self.S[e_name], decimals=2))) + 545 plt.plot([-0.5, xmax], [0, 0], 'k--', lw=1) + 546 plt.xlim(-0.5, xmax) + 547 plt.draw() + 548 if save: + 549 fig.savefig(save + "_" + str(e)) + 550 + 551 def plot_rep_dist(self): + 552 """Plot replica distribution for each ensemble with more than one replicum.""" + 553 if not hasattr(self, 'e_dvalue'): + 554 raise Exception('Run the gamma method first.') + 555 for e, e_name in enumerate(self.mc_names): + 556 if len(self.e_content[e_name]) == 1: + 557 print('No replica distribution for a single replicum (', e_name, ')') + 558 continue + 559 r_length = [] + 560 sub_r_mean = 0 + 561 for r, r_name in enumerate(self.e_content[e_name]): + 562 r_length.append(len(self.deltas[r_name])) + 563 sub_r_mean += self.shape[r_name] * self.r_values[r_name] + 564 e_N = np.sum(r_length) + 565 sub_r_mean /= e_N + 566 arr = np.zeros(len(self.e_content[e_name])) + 567 for r, r_name in enumerate(self.e_content[e_name]): + 568 arr[r] = (self.r_values[r_name] - sub_r_mean) / (self.e_dvalue[e_name] * np.sqrt(e_N / self.shape[r_name] - 1)) + 569 plt.hist(arr, rwidth=0.8, bins=len(self.e_content[e_name])) + 570 plt.title('Replica distribution' + e_name + ' (mean=0, var=1)') + 571 plt.draw() + 572 + 573 def plot_history(self, expand=True): + 574 """Plot derived Monte Carlo history for each ensemble + 575 + 576 Parameters + 577 ---------- + 578 expand : bool + 579 show expanded history for irregular Monte Carlo chains (default: True). + 580 """ + 581 for e, e_name in enumerate(self.mc_names): + 582 plt.figure() + 583 r_length = [] + 584 tmp = [] + 585 tmp_expanded = [] + 586 for r, r_name in enumerate(self.e_content[e_name]): + 587 tmp.append(self.deltas[r_name] + self.r_values[r_name]) + 588 if expand: + 589 tmp_expanded.append(_expand_deltas(self.deltas[r_name], list(self.idl[r_name]), self.shape[r_name]) + self.r_values[r_name]) + 590 r_length.append(len(tmp_expanded[-1])) + 591 else: + 592 r_length.append(len(tmp[-1])) + 593 e_N = np.sum(r_length) + 594 x = np.arange(e_N) + 595 y_test = np.concatenate(tmp, axis=0) + 596 if expand: + 597 y = np.concatenate(tmp_expanded, axis=0) + 598 else: + 599 y = y_test + 600 plt.errorbar(x, y, fmt='.', markersize=3) + 601 plt.xlim(-0.5, e_N - 0.5) + 602 plt.title(e_name + f'\nskew: {skew(y_test):.3f} (p={skewtest(y_test).pvalue:.3f}), kurtosis: {kurtosis(y_test):.3f} (p={kurtosistest(y_test).pvalue:.3f})') + 603 plt.draw() + 604 + 605 def plot_piechart(self, save=None): + 606 """Plot piechart which shows the fractional contribution of each + 607 ensemble to the error and returns a dictionary containing the fractions. + 608 + 609 Parameters + 610 ---------- + 611 save : str + 612 saves the figure to a file named 'save' if. + 613 """ + 614 if not hasattr(self, 'e_dvalue'): + 615 raise Exception('Run the gamma method first.') + 616 if np.isclose(0.0, self._dvalue, atol=1e-15): + 617 raise Exception('Error is 0.0') + 618 labels = self.e_names + 619 sizes = [self.e_dvalue[name] ** 2 for name in labels] / self._dvalue ** 2 + 620 fig1, ax1 = plt.subplots() + 621 ax1.pie(sizes, labels=labels, startangle=90, normalize=True) + 622 ax1.axis('equal') + 623 plt.draw() + 624 if save: + 625 fig1.savefig(save) + 626 + 627 return dict(zip(self.e_names, sizes)) + 628 + 629 def dump(self, filename, datatype="json.gz", description="", **kwargs): + 630 """Dump the Obs to a file 'name' of chosen format. + 631 + 632 Parameters + 633 ---------- + 634 filename : str + 635 name of the file to be saved. + 636 datatype : str + 637 Format of the exported file. Supported formats include + 638 "json.gz" and "pickle" + 639 description : str + 640 Description for output file, only relevant for json.gz format. + 641 path : str + 642 specifies a custom path for the file (default '.') + 643 """ + 644 if 'path' in kwargs: + 645 file_name = kwargs.get('path') + '/' + filename + 646 else: + 647 file_name = filename + 648 + 649 if datatype == "json.gz": + 650 from .input.json import dump_to_json + 651 dump_to_json([self], file_name, description=description) + 652 elif datatype == "pickle": + 653 with open(file_name + '.p', 'wb') as fb: + 654 pickle.dump(self, fb) + 655 else: + 656 raise Exception("Unknown datatype " + str(datatype)) + 657 + 658 def export_jackknife(self): + 659 """Export jackknife samples from the Obs + 660 + 661 Returns + 662 ------- + 663 numpy.ndarray + 664 Returns a numpy array of length N + 1 where N is the number of samples + 665 for the given ensemble and replicum. The zeroth entry of the array contains + 666 the mean value of the Obs, entries 1 to N contain the N jackknife samples + 667 derived from the Obs. The current implementation only works for observables + 668 defined on exactly one ensemble and replicum. The derived jackknife samples + 669 should agree with samples from a full jackknife analysis up to O(1/N). + 670 """ + 671 + 672 if len(self.names) != 1: + 673 raise Exception("'export_jackknife' is only implemented for Obs defined on one ensemble and replicum.") + 674 + 675 name = self.names[0] + 676 full_data = self.deltas[name] + self.r_values[name] + 677 n = full_data.size + 678 mean = self.value + 679 tmp_jacks = np.zeros(n + 1) + 680 tmp_jacks[0] = mean + 681 tmp_jacks[1:] = (n * mean - full_data) / (n - 1) + 682 return tmp_jacks + 683 + 684 def __float__(self): + 685 return float(self.value) + 686 + 687 def __repr__(self): + 688 return 'Obs[' + str(self) + ']' + 689 + 690 def __str__(self): + 691 if self._dvalue == 0.0: + 692 return str(self.value) + 693 fexp = np.floor(np.log10(self._dvalue)) + 694 if fexp < 0.0: + 695 return '{:{form}}({:2.0f})'.format(self.value, self._dvalue * 10 ** (-fexp + 1), form='.' + str(-int(fexp) + 1) + 'f') + 696 elif fexp == 0.0: + 697 return '{:.1f}({:1.1f})'.format(self.value, self._dvalue) + 698 else: + 699 return '{:.0f}({:2.0f})'.format(self.value, self._dvalue) 700 - 701 def __le__(self, other): - 702 return self.value <= other - 703 - 704 def __gt__(self, other): - 705 return self.value > other - 706 - 707 def __ge__(self, other): - 708 return self.value >= other + 701 def __hash__(self): + 702 hash_tuple = (np.array([self.value]).astype(np.float32).data.tobytes(),) + 703 hash_tuple += tuple([o.astype(np.float32).data.tobytes() for o in self.deltas.values()]) + 704 hash_tuple += tuple([np.array([o.errsq()]).astype(np.float32).data.tobytes() for o in self.covobs.values()]) + 705 hash_tuple += tuple([o.encode() for o in self.names]) + 706 m = hashlib.md5() + 707 [m.update(o) for o in hash_tuple] + 708 return int(m.hexdigest(), 16) & 0xFFFFFFFF 709 - 710 def __eq__(self, other): - 711 return (self - other).is_zero() - 712 - 713 def __ne__(self, other): - 714 return not (self - other).is_zero() - 715 - 716 # Overload math operations - 717 def __add__(self, y): - 718 if isinstance(y, Obs): - 719 return derived_observable(lambda x, **kwargs: x[0] + x[1], [self, y], man_grad=[1, 1]) - 720 else: - 721 if isinstance(y, np.ndarray): - 722 return np.array([self + o for o in y]) - 723 elif y.__class__.__name__ in ['Corr', 'CObs']: - 724 return NotImplemented - 725 else: - 726 return derived_observable(lambda x, **kwargs: x[0] + y, [self], man_grad=[1]) - 727 - 728 def __radd__(self, y): - 729 return self + y - 730 - 731 def __mul__(self, y): - 732 if isinstance(y, Obs): - 733 return derived_observable(lambda x, **kwargs: x[0] * x[1], [self, y], man_grad=[y.value, self.value]) - 734 else: - 735 if isinstance(y, np.ndarray): - 736 return np.array([self * o for o in y]) - 737 elif isinstance(y, complex): - 738 return CObs(self * y.real, self * y.imag) - 739 elif y.__class__.__name__ in ['Corr', 'CObs']: - 740 return NotImplemented - 741 else: - 742 return derived_observable(lambda x, **kwargs: x[0] * y, [self], man_grad=[y]) + 710 # Overload comparisons + 711 def __lt__(self, other): + 712 return self.value < other + 713 + 714 def __le__(self, other): + 715 return self.value <= other + 716 + 717 def __gt__(self, other): + 718 return self.value > other + 719 + 720 def __ge__(self, other): + 721 return self.value >= other + 722 + 723 def __eq__(self, other): + 724 return (self - other).is_zero() + 725 + 726 def __ne__(self, other): + 727 return not (self - other).is_zero() + 728 + 729 # Overload math operations + 730 def __add__(self, y): + 731 if isinstance(y, Obs): + 732 return derived_observable(lambda x, **kwargs: x[0] + x[1], [self, y], man_grad=[1, 1]) + 733 else: + 734 if isinstance(y, np.ndarray): + 735 return np.array([self + o for o in y]) + 736 elif y.__class__.__name__ in ['Corr', 'CObs']: + 737 return NotImplemented + 738 else: + 739 return derived_observable(lambda x, **kwargs: x[0] + y, [self], man_grad=[1]) + 740 + 741 def __radd__(self, y): + 742 return self + y 743 - 744 def __rmul__(self, y): - 745 return self * y - 746 - 747 def __sub__(self, y): - 748 if isinstance(y, Obs): - 749 return derived_observable(lambda x, **kwargs: x[0] - x[1], [self, y], man_grad=[1, -1]) - 750 else: - 751 if isinstance(y, np.ndarray): - 752 return np.array([self - o for o in y]) - 753 elif y.__class__.__name__ in ['Corr', 'CObs']: - 754 return NotImplemented - 755 else: - 756 return derived_observable(lambda x, **kwargs: x[0] - y, [self], man_grad=[1]) - 757 - 758 def __rsub__(self, y): - 759 return -1 * (self - y) - 760 - 761 def __pos__(self): - 762 return self - 763 - 764 def __neg__(self): - 765 return -1 * self - 766 - 767 def __truediv__(self, y): - 768 if isinstance(y, Obs): - 769 return derived_observable(lambda x, **kwargs: x[0] / x[1], [self, y], man_grad=[1 / y.value, - self.value / y.value ** 2]) - 770 else: - 771 if isinstance(y, np.ndarray): - 772 return np.array([self / o for o in y]) - 773 elif y.__class__.__name__ in ['Corr', 'CObs']: - 774 return NotImplemented - 775 else: - 776 return derived_observable(lambda x, **kwargs: x[0] / y, [self], man_grad=[1 / y]) - 777 - 778 def __rtruediv__(self, y): - 779 if isinstance(y, Obs): - 780 return derived_observable(lambda x, **kwargs: x[0] / x[1], [y, self], man_grad=[1 / self.value, - y.value / self.value ** 2]) - 781 else: - 782 if isinstance(y, np.ndarray): - 783 return np.array([o / self for o in y]) - 784 elif y.__class__.__name__ in ['Corr', 'CObs']: - 785 return NotImplemented - 786 else: - 787 return derived_observable(lambda x, **kwargs: y / x[0], [self], man_grad=[-y / self.value ** 2]) - 788 - 789 def __pow__(self, y): - 790 if isinstance(y, Obs): - 791 return derived_observable(lambda x: x[0] ** x[1], [self, y]) - 792 else: - 793 return derived_observable(lambda x: x[0] ** y, [self]) - 794 - 795 def __rpow__(self, y): - 796 if isinstance(y, Obs): - 797 return derived_observable(lambda x: x[0] ** x[1], [y, self]) - 798 else: - 799 return derived_observable(lambda x: y ** x[0], [self]) - 800 - 801 def __abs__(self): - 802 return derived_observable(lambda x: anp.abs(x[0]), [self]) - 803 - 804 # Overload numpy functions - 805 def sqrt(self): - 806 return derived_observable(lambda x, **kwargs: np.sqrt(x[0]), [self], man_grad=[1 / 2 / np.sqrt(self.value)]) + 744 def __mul__(self, y): + 745 if isinstance(y, Obs): + 746 return derived_observable(lambda x, **kwargs: x[0] * x[1], [self, y], man_grad=[y.value, self.value]) + 747 else: + 748 if isinstance(y, np.ndarray): + 749 return np.array([self * o for o in y]) + 750 elif isinstance(y, complex): + 751 return CObs(self * y.real, self * y.imag) + 752 elif y.__class__.__name__ in ['Corr', 'CObs']: + 753 return NotImplemented + 754 else: + 755 return derived_observable(lambda x, **kwargs: x[0] * y, [self], man_grad=[y]) + 756 + 757 def __rmul__(self, y): + 758 return self * y + 759 + 760 def __sub__(self, y): + 761 if isinstance(y, Obs): + 762 return derived_observable(lambda x, **kwargs: x[0] - x[1], [self, y], man_grad=[1, -1]) + 763 else: + 764 if isinstance(y, np.ndarray): + 765 return np.array([self - o for o in y]) + 766 elif y.__class__.__name__ in ['Corr', 'CObs']: + 767 return NotImplemented + 768 else: + 769 return derived_observable(lambda x, **kwargs: x[0] - y, [self], man_grad=[1]) + 770 + 771 def __rsub__(self, y): + 772 return -1 * (self - y) + 773 + 774 def __pos__(self): + 775 return self + 776 + 777 def __neg__(self): + 778 return -1 * self + 779 + 780 def __truediv__(self, y): + 781 if isinstance(y, Obs): + 782 return derived_observable(lambda x, **kwargs: x[0] / x[1], [self, y], man_grad=[1 / y.value, - self.value / y.value ** 2]) + 783 else: + 784 if isinstance(y, np.ndarray): + 785 return np.array([self / o for o in y]) + 786 elif y.__class__.__name__ in ['Corr', 'CObs']: + 787 return NotImplemented + 788 else: + 789 return derived_observable(lambda x, **kwargs: x[0] / y, [self], man_grad=[1 / y]) + 790 + 791 def __rtruediv__(self, y): + 792 if isinstance(y, Obs): + 793 return derived_observable(lambda x, **kwargs: x[0] / x[1], [y, self], man_grad=[1 / self.value, - y.value / self.value ** 2]) + 794 else: + 795 if isinstance(y, np.ndarray): + 796 return np.array([o / self for o in y]) + 797 elif y.__class__.__name__ in ['Corr', 'CObs']: + 798 return NotImplemented + 799 else: + 800 return derived_observable(lambda x, **kwargs: y / x[0], [self], man_grad=[-y / self.value ** 2]) + 801 + 802 def __pow__(self, y): + 803 if isinstance(y, Obs): + 804 return derived_observable(lambda x: x[0] ** x[1], [self, y]) + 805 else: + 806 return derived_observable(lambda x: x[0] ** y, [self]) 807 - 808 def log(self): - 809 return derived_observable(lambda x, **kwargs: np.log(x[0]), [self], man_grad=[1 / self.value]) - 810 - 811 def exp(self): - 812 return derived_observable(lambda x, **kwargs: np.exp(x[0]), [self], man_grad=[np.exp(self.value)]) + 808 def __rpow__(self, y): + 809 if isinstance(y, Obs): + 810 return derived_observable(lambda x: x[0] ** x[1], [y, self]) + 811 else: + 812 return derived_observable(lambda x: y ** x[0], [self]) 813 - 814 def sin(self): - 815 return derived_observable(lambda x, **kwargs: np.sin(x[0]), [self], man_grad=[np.cos(self.value)]) + 814 def __abs__(self): + 815 return derived_observable(lambda x: anp.abs(x[0]), [self]) 816 - 817 def cos(self): - 818 return derived_observable(lambda x, **kwargs: np.cos(x[0]), [self], man_grad=[-np.sin(self.value)]) - 819 - 820 def tan(self): - 821 return derived_observable(lambda x, **kwargs: np.tan(x[0]), [self], man_grad=[1 / np.cos(self.value) ** 2]) - 822 - 823 def arcsin(self): - 824 return derived_observable(lambda x: anp.arcsin(x[0]), [self]) - 825 - 826 def arccos(self): - 827 return derived_observable(lambda x: anp.arccos(x[0]), [self]) - 828 - 829 def arctan(self): - 830 return derived_observable(lambda x: anp.arctan(x[0]), [self]) - 831 - 832 def sinh(self): - 833 return derived_observable(lambda x, **kwargs: np.sinh(x[0]), [self], man_grad=[np.cosh(self.value)]) - 834 - 835 def cosh(self): - 836 return derived_observable(lambda x, **kwargs: np.cosh(x[0]), [self], man_grad=[np.sinh(self.value)]) - 837 - 838 def tanh(self): - 839 return derived_observable(lambda x, **kwargs: np.tanh(x[0]), [self], man_grad=[1 / np.cosh(self.value) ** 2]) - 840 - 841 def arcsinh(self): - 842 return derived_observable(lambda x: anp.arcsinh(x[0]), [self]) - 843 - 844 def arccosh(self): - 845 return derived_observable(lambda x: anp.arccosh(x[0]), [self]) - 846 - 847 def arctanh(self): - 848 return derived_observable(lambda x: anp.arctanh(x[0]), [self]) - 849 + 817 # Overload numpy functions + 818 def sqrt(self): + 819 return derived_observable(lambda x, **kwargs: np.sqrt(x[0]), [self], man_grad=[1 / 2 / np.sqrt(self.value)]) + 820 + 821 def log(self): + 822 return derived_observable(lambda x, **kwargs: np.log(x[0]), [self], man_grad=[1 / self.value]) + 823 + 824 def exp(self): + 825 return derived_observable(lambda x, **kwargs: np.exp(x[0]), [self], man_grad=[np.exp(self.value)]) + 826 + 827 def sin(self): + 828 return derived_observable(lambda x, **kwargs: np.sin(x[0]), [self], man_grad=[np.cos(self.value)]) + 829 + 830 def cos(self): + 831 return derived_observable(lambda x, **kwargs: np.cos(x[0]), [self], man_grad=[-np.sin(self.value)]) + 832 + 833 def tan(self): + 834 return derived_observable(lambda x, **kwargs: np.tan(x[0]), [self], man_grad=[1 / np.cos(self.value) ** 2]) + 835 + 836 def arcsin(self): + 837 return derived_observable(lambda x: anp.arcsin(x[0]), [self]) + 838 + 839 def arccos(self): + 840 return derived_observable(lambda x: anp.arccos(x[0]), [self]) + 841 + 842 def arctan(self): + 843 return derived_observable(lambda x: anp.arctan(x[0]), [self]) + 844 + 845 def sinh(self): + 846 return derived_observable(lambda x, **kwargs: np.sinh(x[0]), [self], man_grad=[np.cosh(self.value)]) + 847 + 848 def cosh(self): + 849 return derived_observable(lambda x, **kwargs: np.cosh(x[0]), [self], man_grad=[np.sinh(self.value)]) 850 - 851class CObs: - 852 """Class for a complex valued observable.""" - 853 __slots__ = ['_real', '_imag', 'tag'] - 854 - 855 def __init__(self, real, imag=0.0): - 856 self._real = real - 857 self._imag = imag - 858 self.tag = None + 851 def tanh(self): + 852 return derived_observable(lambda x, **kwargs: np.tanh(x[0]), [self], man_grad=[1 / np.cosh(self.value) ** 2]) + 853 + 854 def arcsinh(self): + 855 return derived_observable(lambda x: anp.arcsinh(x[0]), [self]) + 856 + 857 def arccosh(self): + 858 return derived_observable(lambda x: anp.arccosh(x[0]), [self]) 859 - 860 @property - 861 def real(self): - 862 return self._real + 860 def arctanh(self): + 861 return derived_observable(lambda x: anp.arctanh(x[0]), [self]) + 862 863 - 864 @property - 865 def imag(self): - 866 return self._imag + 864class CObs: + 865 """Class for a complex valued observable.""" + 866 __slots__ = ['_real', '_imag', 'tag'] 867 - 868 def gamma_method(self, **kwargs): - 869 """Executes the gamma_method for the real and the imaginary part.""" - 870 if isinstance(self.real, Obs): - 871 self.real.gamma_method(**kwargs) - 872 if isinstance(self.imag, Obs): - 873 self.imag.gamma_method(**kwargs) - 874 - 875 def is_zero(self): - 876 """Checks whether both real and imaginary part are zero within machine precision.""" - 877 return self.real == 0.0 and self.imag == 0.0 - 878 - 879 def conjugate(self): - 880 return CObs(self.real, -self.imag) - 881 - 882 def __add__(self, other): - 883 if isinstance(other, np.ndarray): - 884 return other + self - 885 elif hasattr(other, 'real') and hasattr(other, 'imag'): - 886 return CObs(self.real + other.real, - 887 self.imag + other.imag) - 888 else: - 889 return CObs(self.real + other, self.imag) - 890 - 891 def __radd__(self, y): - 892 return self + y - 893 - 894 def __sub__(self, other): - 895 if isinstance(other, np.ndarray): - 896 return -1 * (other - self) - 897 elif hasattr(other, 'real') and hasattr(other, 'imag'): - 898 return CObs(self.real - other.real, self.imag - other.imag) - 899 else: - 900 return CObs(self.real - other, self.imag) - 901 - 902 def __rsub__(self, other): - 903 return -1 * (self - other) - 904 - 905 def __mul__(self, other): - 906 if isinstance(other, np.ndarray): - 907 return other * self - 908 elif hasattr(other, 'real') and hasattr(other, 'imag'): - 909 if all(isinstance(i, Obs) for i in [self.real, self.imag, other.real, other.imag]): - 910 return CObs(derived_observable(lambda x, **kwargs: x[0] * x[1] - x[2] * x[3], - 911 [self.real, other.real, self.imag, other.imag], - 912 man_grad=[other.real.value, self.real.value, -other.imag.value, -self.imag.value]), - 913 derived_observable(lambda x, **kwargs: x[2] * x[1] + x[0] * x[3], - 914 [self.real, other.real, self.imag, other.imag], - 915 man_grad=[other.imag.value, self.imag.value, other.real.value, self.real.value])) - 916 elif getattr(other, 'imag', 0) != 0: - 917 return CObs(self.real * other.real - self.imag * other.imag, - 918 self.imag * other.real + self.real * other.imag) - 919 else: - 920 return CObs(self.real * other.real, self.imag * other.real) - 921 else: - 922 return CObs(self.real * other, self.imag * other) - 923 - 924 def __rmul__(self, other): - 925 return self * other - 926 - 927 def __truediv__(self, other): - 928 if isinstance(other, np.ndarray): - 929 return 1 / (other / self) - 930 elif hasattr(other, 'real') and hasattr(other, 'imag'): - 931 r = other.real ** 2 + other.imag ** 2 - 932 return CObs((self.real * other.real + self.imag * other.imag) / r, (self.imag * other.real - self.real * other.imag) / r) - 933 else: - 934 return CObs(self.real / other, self.imag / other) - 935 - 936 def __rtruediv__(self, other): - 937 r = self.real ** 2 + self.imag ** 2 - 938 if hasattr(other, 'real') and hasattr(other, 'imag'): - 939 return CObs((self.real * other.real + self.imag * other.imag) / r, (self.real * other.imag - self.imag * other.real) / r) - 940 else: - 941 return CObs(self.real * other / r, -self.imag * other / r) - 942 - 943 def __abs__(self): - 944 return np.sqrt(self.real**2 + self.imag**2) - 945 - 946 def __pos__(self): - 947 return self + 868 def __init__(self, real, imag=0.0): + 869 self._real = real + 870 self._imag = imag + 871 self.tag = None + 872 + 873 @property + 874 def real(self): + 875 return self._real + 876 + 877 @property + 878 def imag(self): + 879 return self._imag + 880 + 881 def gamma_method(self, **kwargs): + 882 """Executes the gamma_method for the real and the imaginary part.""" + 883 if isinstance(self.real, Obs): + 884 self.real.gamma_method(**kwargs) + 885 if isinstance(self.imag, Obs): + 886 self.imag.gamma_method(**kwargs) + 887 + 888 def is_zero(self): + 889 """Checks whether both real and imaginary part are zero within machine precision.""" + 890 return self.real == 0.0 and self.imag == 0.0 + 891 + 892 def conjugate(self): + 893 return CObs(self.real, -self.imag) + 894 + 895 def __add__(self, other): + 896 if isinstance(other, np.ndarray): + 897 return other + self + 898 elif hasattr(other, 'real') and hasattr(other, 'imag'): + 899 return CObs(self.real + other.real, + 900 self.imag + other.imag) + 901 else: + 902 return CObs(self.real + other, self.imag) + 903 + 904 def __radd__(self, y): + 905 return self + y + 906 + 907 def __sub__(self, other): + 908 if isinstance(other, np.ndarray): + 909 return -1 * (other - self) + 910 elif hasattr(other, 'real') and hasattr(other, 'imag'): + 911 return CObs(self.real - other.real, self.imag - other.imag) + 912 else: + 913 return CObs(self.real - other, self.imag) + 914 + 915 def __rsub__(self, other): + 916 return -1 * (self - other) + 917 + 918 def __mul__(self, other): + 919 if isinstance(other, np.ndarray): + 920 return other * self + 921 elif hasattr(other, 'real') and hasattr(other, 'imag'): + 922 if all(isinstance(i, Obs) for i in [self.real, self.imag, other.real, other.imag]): + 923 return CObs(derived_observable(lambda x, **kwargs: x[0] * x[1] - x[2] * x[3], + 924 [self.real, other.real, self.imag, other.imag], + 925 man_grad=[other.real.value, self.real.value, -other.imag.value, -self.imag.value]), + 926 derived_observable(lambda x, **kwargs: x[2] * x[1] + x[0] * x[3], + 927 [self.real, other.real, self.imag, other.imag], + 928 man_grad=[other.imag.value, self.imag.value, other.real.value, self.real.value])) + 929 elif getattr(other, 'imag', 0) != 0: + 930 return CObs(self.real * other.real - self.imag * other.imag, + 931 self.imag * other.real + self.real * other.imag) + 932 else: + 933 return CObs(self.real * other.real, self.imag * other.real) + 934 else: + 935 return CObs(self.real * other, self.imag * other) + 936 + 937 def __rmul__(self, other): + 938 return self * other + 939 + 940 def __truediv__(self, other): + 941 if isinstance(other, np.ndarray): + 942 return 1 / (other / self) + 943 elif hasattr(other, 'real') and hasattr(other, 'imag'): + 944 r = other.real ** 2 + other.imag ** 2 + 945 return CObs((self.real * other.real + self.imag * other.imag) / r, (self.imag * other.real - self.real * other.imag) / r) + 946 else: + 947 return CObs(self.real / other, self.imag / other) 948 - 949 def __neg__(self): - 950 return -1 * self - 951 - 952 def __eq__(self, other): - 953 return self.real == other.real and self.imag == other.imag - 954 - 955 def __str__(self): - 956 return '(' + str(self.real) + int(self.imag >= 0.0) * '+' + str(self.imag) + 'j)' - 957 - 958 def __repr__(self): - 959 return 'CObs[' + str(self) + ']' - 960 + 949 def __rtruediv__(self, other): + 950 r = self.real ** 2 + self.imag ** 2 + 951 if hasattr(other, 'real') and hasattr(other, 'imag'): + 952 return CObs((self.real * other.real + self.imag * other.imag) / r, (self.real * other.imag - self.imag * other.real) / r) + 953 else: + 954 return CObs(self.real * other / r, -self.imag * other / r) + 955 + 956 def __abs__(self): + 957 return np.sqrt(self.real**2 + self.imag**2) + 958 + 959 def __pos__(self): + 960 return self 961 - 962def _expand_deltas(deltas, idx, shape): - 963 """Expand deltas defined on idx to a regular, contiguous range, where holes are filled by 0. - 964 If idx is of type range, the deltas are not changed - 965 - 966 Parameters - 967 ---------- - 968 deltas : list - 969 List of fluctuations - 970 idx : list - 971 List or range of configs on which the deltas are defined, has to be sorted in ascending order. - 972 shape : int - 973 Number of configs in idx. - 974 """ - 975 if isinstance(idx, range): - 976 return deltas - 977 else: - 978 ret = np.zeros(idx[-1] - idx[0] + 1) - 979 for i in range(shape): - 980 ret[idx[i] - idx[0]] = deltas[i] - 981 return ret - 982 - 983 - 984def _merge_idx(idl): - 985 """Returns the union of all lists in idl as sorted list - 986 - 987 Parameters - 988 ---------- - 989 idl : list - 990 List of lists or ranges. - 991 """ - 992 - 993 # Use groupby to efficiently check whether all elements of idl are identical - 994 try: - 995 g = groupby(idl) - 996 if next(g, True) and not next(g, False): - 997 return idl[0] - 998 except Exception: - 999 pass -1000 -1001 if np.all([type(idx) is range for idx in idl]): -1002 if len(set([idx[0] for idx in idl])) == 1: -1003 idstart = min([idx.start for idx in idl]) -1004 idstop = max([idx.stop for idx in idl]) -1005 idstep = min([idx.step for idx in idl]) -1006 return range(idstart, idstop, idstep) -1007 -1008 return sorted(set().union(*idl)) -1009 -1010 -1011def _intersection_idx(idl): -1012 """Returns the intersection of all lists in idl as sorted list + 962 def __neg__(self): + 963 return -1 * self + 964 + 965 def __eq__(self, other): + 966 return self.real == other.real and self.imag == other.imag + 967 + 968 def __str__(self): + 969 return '(' + str(self.real) + int(self.imag >= 0.0) * '+' + str(self.imag) + 'j)' + 970 + 971 def __repr__(self): + 972 return 'CObs[' + str(self) + ']' + 973 + 974 + 975def _expand_deltas(deltas, idx, shape): + 976 """Expand deltas defined on idx to a regular, contiguous range, where holes are filled by 0. + 977 If idx is of type range, the deltas are not changed + 978 + 979 Parameters + 980 ---------- + 981 deltas : list + 982 List of fluctuations + 983 idx : list + 984 List or range of configs on which the deltas are defined, has to be sorted in ascending order. + 985 shape : int + 986 Number of configs in idx. + 987 """ + 988 if isinstance(idx, range): + 989 return deltas + 990 else: + 991 ret = np.zeros(idx[-1] - idx[0] + 1) + 992 for i in range(shape): + 993 ret[idx[i] - idx[0]] = deltas[i] + 994 return ret + 995 + 996 + 997def _merge_idx(idl): + 998 """Returns the union of all lists in idl as sorted list + 999 +1000 Parameters +1001 ---------- +1002 idl : list +1003 List of lists or ranges. +1004 """ +1005 +1006 # Use groupby to efficiently check whether all elements of idl are identical +1007 try: +1008 g = groupby(idl) +1009 if next(g, True) and not next(g, False): +1010 return idl[0] +1011 except Exception: +1012 pass 1013 -1014 Parameters -1015 ---------- -1016 idl : list -1017 List of lists or ranges. -1018 """ -1019 -1020 def _lcm(*args): -1021 """Returns the lowest common multiple of args. +1014 if np.all([type(idx) is range for idx in idl]): +1015 if len(set([idx[0] for idx in idl])) == 1: +1016 idstart = min([idx.start for idx in idl]) +1017 idstop = max([idx.stop for idx in idl]) +1018 idstep = min([idx.step for idx in idl]) +1019 return range(idstart, idstop, idstep) +1020 +1021 return sorted(set().union(*idl)) 1022 -1023 From python 3.9 onwards the math library contains an lcm function.""" -1024 return reduce(lambda a, b: a * b // gcd(a, b), args) -1025 -1026 # Use groupby to efficiently check whether all elements of idl are identical -1027 try: -1028 g = groupby(idl) -1029 if next(g, True) and not next(g, False): -1030 return idl[0] -1031 except Exception: -1032 pass -1033 -1034 if np.all([type(idx) is range for idx in idl]): -1035 if len(set([idx[0] for idx in idl])) == 1: -1036 idstart = max([idx.start for idx in idl]) -1037 idstop = min([idx.stop for idx in idl]) -1038 idstep = _lcm(*[idx.step for idx in idl]) -1039 return range(idstart, idstop, idstep) -1040 -1041 return sorted(set.intersection(*[set(o) for o in idl])) -1042 -1043 -1044def _expand_deltas_for_merge(deltas, idx, shape, new_idx): -1045 """Expand deltas defined on idx to the list of configs that is defined by new_idx. -1046 New, empty entries are filled by 0. If idx and new_idx are of type range, the smallest -1047 common divisor of the step sizes is used as new step size. -1048 -1049 Parameters -1050 ---------- -1051 deltas : list -1052 List of fluctuations -1053 idx : list -1054 List or range of configs on which the deltas are defined. -1055 Has to be a subset of new_idx and has to be sorted in ascending order. -1056 shape : list -1057 Number of configs in idx. -1058 new_idx : list -1059 List of configs that defines the new range, has to be sorted in ascending order. -1060 """ +1023 +1024def _intersection_idx(idl): +1025 """Returns the intersection of all lists in idl as sorted list +1026 +1027 Parameters +1028 ---------- +1029 idl : list +1030 List of lists or ranges. +1031 """ +1032 +1033 def _lcm(*args): +1034 """Returns the lowest common multiple of args. +1035 +1036 From python 3.9 onwards the math library contains an lcm function.""" +1037 return reduce(lambda a, b: a * b // gcd(a, b), args) +1038 +1039 # Use groupby to efficiently check whether all elements of idl are identical +1040 try: +1041 g = groupby(idl) +1042 if next(g, True) and not next(g, False): +1043 return idl[0] +1044 except Exception: +1045 pass +1046 +1047 if np.all([type(idx) is range for idx in idl]): +1048 if len(set([idx[0] for idx in idl])) == 1: +1049 idstart = max([idx.start for idx in idl]) +1050 idstop = min([idx.stop for idx in idl]) +1051 idstep = _lcm(*[idx.step for idx in idl]) +1052 return range(idstart, idstop, idstep) +1053 +1054 return sorted(set.intersection(*[set(o) for o in idl])) +1055 +1056 +1057def _expand_deltas_for_merge(deltas, idx, shape, new_idx): +1058 """Expand deltas defined on idx to the list of configs that is defined by new_idx. +1059 New, empty entries are filled by 0. If idx and new_idx are of type range, the smallest +1060 common divisor of the step sizes is used as new step size. 1061 -1062 if type(idx) is range and type(new_idx) is range: -1063 if idx == new_idx: -1064 return deltas -1065 ret = np.zeros(new_idx[-1] - new_idx[0] + 1) -1066 for i in range(shape): -1067 ret[idx[i] - new_idx[0]] = deltas[i] -1068 return np.array([ret[new_idx[i] - new_idx[0]] for i in range(len(new_idx))]) -1069 -1070 -1071def _collapse_deltas_for_merge(deltas, idx, shape, new_idx): -1072 """Collapse deltas defined on idx to the list of configs that is defined by new_idx. -1073 If idx and new_idx are of type range, the smallest -1074 common divisor of the step sizes is used as new step size. -1075 -1076 Parameters -1077 ---------- -1078 deltas : list -1079 List of fluctuations -1080 idx : list -1081 List or range of configs on which the deltas are defined. -1082 Has to be a subset of new_idx and has to be sorted in ascending order. -1083 shape : list -1084 Number of configs in idx. -1085 new_idx : list -1086 List of configs that defines the new range, has to be sorted in ascending order. -1087 """ +1062 Parameters +1063 ---------- +1064 deltas : list +1065 List of fluctuations +1066 idx : list +1067 List or range of configs on which the deltas are defined. +1068 Has to be a subset of new_idx and has to be sorted in ascending order. +1069 shape : list +1070 Number of configs in idx. +1071 new_idx : list +1072 List of configs that defines the new range, has to be sorted in ascending order. +1073 """ +1074 +1075 if type(idx) is range and type(new_idx) is range: +1076 if idx == new_idx: +1077 return deltas +1078 ret = np.zeros(new_idx[-1] - new_idx[0] + 1) +1079 for i in range(shape): +1080 ret[idx[i] - new_idx[0]] = deltas[i] +1081 return np.array([ret[new_idx[i] - new_idx[0]] for i in range(len(new_idx))]) +1082 +1083 +1084def _collapse_deltas_for_merge(deltas, idx, shape, new_idx): +1085 """Collapse deltas defined on idx to the list of configs that is defined by new_idx. +1086 If idx and new_idx are of type range, the smallest +1087 common divisor of the step sizes is used as new step size. 1088 -1089 if type(idx) is range and type(new_idx) is range: -1090 if idx == new_idx: -1091 return deltas -1092 ret = np.zeros(new_idx[-1] - new_idx[0] + 1) -1093 for i in range(shape): -1094 if idx[i] in new_idx: -1095 ret[idx[i] - new_idx[0]] = deltas[i] -1096 return np.array([ret[new_idx[i] - new_idx[0]] for i in range(len(new_idx))]) -1097 -1098 -1099def _filter_zeroes(deltas, idx, eps=Obs.filter_eps): -1100 """Filter out all configurations with vanishing fluctuation such that they do not -1101 contribute to the error estimate anymore. Returns the new deltas and -1102 idx according to the filtering. -1103 A fluctuation is considered to be vanishing, if it is smaller than eps times -1104 the mean of the absolute values of all deltas in one list. -1105 -1106 Parameters -1107 ---------- -1108 deltas : list -1109 List of fluctuations -1110 idx : list -1111 List or ranges of configs on which the deltas are defined. -1112 eps : float -1113 Prefactor that enters the filter criterion. -1114 """ -1115 new_deltas = [] -1116 new_idx = [] -1117 maxd = np.mean(np.fabs(deltas)) -1118 for i in range(len(deltas)): -1119 if abs(deltas[i]) > eps * maxd: -1120 new_deltas.append(deltas[i]) -1121 new_idx.append(idx[i]) -1122 if new_idx: -1123 return np.array(new_deltas), new_idx -1124 else: -1125 return deltas, idx -1126 -1127 -1128def derived_observable(func, data, array_mode=False, **kwargs): -1129 """Construct a derived Obs according to func(data, **kwargs) using automatic differentiation. -1130 -1131 Parameters -1132 ---------- -1133 func : object -1134 arbitrary function of the form func(data, **kwargs). For the -1135 automatic differentiation to work, all numpy functions have to have -1136 the autograd wrapper (use 'import autograd.numpy as anp'). -1137 data : list -1138 list of Obs, e.g. [obs1, obs2, obs3]. -1139 num_grad : bool -1140 if True, numerical derivatives are used instead of autograd -1141 (default False). To control the numerical differentiation the -1142 kwargs of numdifftools.step_generators.MaxStepGenerator -1143 can be used. -1144 man_grad : list -1145 manually supply a list or an array which contains the jacobian -1146 of func. Use cautiously, supplying the wrong derivative will -1147 not be intercepted. -1148 -1149 Notes -1150 ----- -1151 For simple mathematical operations it can be practical to use anonymous -1152 functions. For the ratio of two observables one can e.g. use -1153 -1154 new_obs = derived_observable(lambda x: x[0] / x[1], [obs1, obs2]) -1155 """ -1156 -1157 data = np.asarray(data) -1158 raveled_data = data.ravel() -1159 -1160 # Workaround for matrix operations containing non Obs data -1161 if not all(isinstance(x, Obs) for x in raveled_data): -1162 for i in range(len(raveled_data)): -1163 if isinstance(raveled_data[i], (int, float)): -1164 raveled_data[i] = cov_Obs(raveled_data[i], 0.0, "###dummy_covobs###") -1165 -1166 allcov = {} -1167 for o in raveled_data: -1168 for name in o.cov_names: -1169 if name in allcov: -1170 if not np.allclose(allcov[name], o.covobs[name].cov): -1171 raise Exception('Inconsistent covariance matrices for %s!' % (name)) -1172 else: -1173 allcov[name] = o.covobs[name].cov -1174 -1175 n_obs = len(raveled_data) -1176 new_names = sorted(set([y for x in [o.names for o in raveled_data] for y in x])) -1177 new_cov_names = sorted(set([y for x in [o.cov_names for o in raveled_data] for y in x])) -1178 new_sample_names = sorted(set(new_names) - set(new_cov_names)) -1179 -1180 is_merged = {name: (len(list(filter(lambda o: o.is_merged.get(name, False) is True, raveled_data))) > 0) for name in new_sample_names} -1181 reweighted = len(list(filter(lambda o: o.reweighted is True, raveled_data))) > 0 -1182 -1183 if data.ndim == 1: -1184 values = np.array([o.value for o in data]) -1185 else: -1186 values = np.vectorize(lambda x: x.value)(data) +1089 Parameters +1090 ---------- +1091 deltas : list +1092 List of fluctuations +1093 idx : list +1094 List or range of configs on which the deltas are defined. +1095 Has to be a subset of new_idx and has to be sorted in ascending order. +1096 shape : list +1097 Number of configs in idx. +1098 new_idx : list +1099 List of configs that defines the new range, has to be sorted in ascending order. +1100 """ +1101 +1102 if type(idx) is range and type(new_idx) is range: +1103 if idx == new_idx: +1104 return deltas +1105 ret = np.zeros(new_idx[-1] - new_idx[0] + 1) +1106 for i in range(shape): +1107 if idx[i] in new_idx: +1108 ret[idx[i] - new_idx[0]] = deltas[i] +1109 return np.array([ret[new_idx[i] - new_idx[0]] for i in range(len(new_idx))]) +1110 +1111 +1112def _filter_zeroes(deltas, idx, eps=Obs.filter_eps): +1113 """Filter out all configurations with vanishing fluctuation such that they do not +1114 contribute to the error estimate anymore. Returns the new deltas and +1115 idx according to the filtering. +1116 A fluctuation is considered to be vanishing, if it is smaller than eps times +1117 the mean of the absolute values of all deltas in one list. +1118 +1119 Parameters +1120 ---------- +1121 deltas : list +1122 List of fluctuations +1123 idx : list +1124 List or ranges of configs on which the deltas are defined. +1125 eps : float +1126 Prefactor that enters the filter criterion. +1127 """ +1128 new_deltas = [] +1129 new_idx = [] +1130 maxd = np.mean(np.fabs(deltas)) +1131 for i in range(len(deltas)): +1132 if abs(deltas[i]) > eps * maxd: +1133 new_deltas.append(deltas[i]) +1134 new_idx.append(idx[i]) +1135 if new_idx: +1136 return np.array(new_deltas), new_idx +1137 else: +1138 return deltas, idx +1139 +1140 +1141def derived_observable(func, data, array_mode=False, **kwargs): +1142 """Construct a derived Obs according to func(data, **kwargs) using automatic differentiation. +1143 +1144 Parameters +1145 ---------- +1146 func : object +1147 arbitrary function of the form func(data, **kwargs). For the +1148 automatic differentiation to work, all numpy functions have to have +1149 the autograd wrapper (use 'import autograd.numpy as anp'). +1150 data : list +1151 list of Obs, e.g. [obs1, obs2, obs3]. +1152 num_grad : bool +1153 if True, numerical derivatives are used instead of autograd +1154 (default False). To control the numerical differentiation the +1155 kwargs of numdifftools.step_generators.MaxStepGenerator +1156 can be used. +1157 man_grad : list +1158 manually supply a list or an array which contains the jacobian +1159 of func. Use cautiously, supplying the wrong derivative will +1160 not be intercepted. +1161 +1162 Notes +1163 ----- +1164 For simple mathematical operations it can be practical to use anonymous +1165 functions. For the ratio of two observables one can e.g. use +1166 +1167 new_obs = derived_observable(lambda x: x[0] / x[1], [obs1, obs2]) +1168 """ +1169 +1170 data = np.asarray(data) +1171 raveled_data = data.ravel() +1172 +1173 # Workaround for matrix operations containing non Obs data +1174 if not all(isinstance(x, Obs) for x in raveled_data): +1175 for i in range(len(raveled_data)): +1176 if isinstance(raveled_data[i], (int, float)): +1177 raveled_data[i] = cov_Obs(raveled_data[i], 0.0, "###dummy_covobs###") +1178 +1179 allcov = {} +1180 for o in raveled_data: +1181 for name in o.cov_names: +1182 if name in allcov: +1183 if not np.allclose(allcov[name], o.covobs[name].cov): +1184 raise Exception('Inconsistent covariance matrices for %s!' % (name)) +1185 else: +1186 allcov[name] = o.covobs[name].cov 1187 -1188 new_values = func(values, **kwargs) -1189 -1190 multi = int(isinstance(new_values, np.ndarray)) -1191 -1192 new_r_values = {} -1193 new_idl_d = {} -1194 for name in new_sample_names: -1195 idl = [] -1196 tmp_values = np.zeros(n_obs) -1197 for i, item in enumerate(raveled_data): -1198 tmp_values[i] = item.r_values.get(name, item.value) -1199 tmp_idl = item.idl.get(name) -1200 if tmp_idl is not None: -1201 idl.append(tmp_idl) -1202 if multi > 0: -1203 tmp_values = np.array(tmp_values).reshape(data.shape) -1204 new_r_values[name] = func(tmp_values, **kwargs) -1205 new_idl_d[name] = _merge_idx(idl) -1206 if not is_merged[name]: -1207 is_merged[name] = (1 != len(set([len(idx) for idx in [*idl, new_idl_d[name]]]))) -1208 -1209 if 'man_grad' in kwargs: -1210 deriv = np.asarray(kwargs.get('man_grad')) -1211 if new_values.shape + data.shape != deriv.shape: -1212 raise Exception('Manual derivative does not have correct shape.') -1213 elif kwargs.get('num_grad') is True: -1214 if multi > 0: -1215 raise Exception('Multi mode currently not supported for numerical derivative') -1216 options = { -1217 'base_step': 0.1, -1218 'step_ratio': 2.5} -1219 for key in options.keys(): -1220 kwarg = kwargs.get(key) -1221 if kwarg is not None: -1222 options[key] = kwarg -1223 tmp_df = nd.Gradient(func, order=4, **{k: v for k, v in options.items() if v is not None})(values, **kwargs) -1224 if tmp_df.size == 1: -1225 deriv = np.array([tmp_df.real]) -1226 else: -1227 deriv = tmp_df.real -1228 else: -1229 deriv = jacobian(func)(values, **kwargs) -1230 -1231 final_result = np.zeros(new_values.shape, dtype=object) -1232 -1233 if array_mode is True: -1234 -1235 class _Zero_grad(): -1236 def __init__(self, N): -1237 self.grad = np.zeros((N, 1)) -1238 -1239 new_covobs_lengths = dict(set([y for x in [[(n, o.covobs[n].N) for n in o.cov_names] for o in raveled_data] for y in x])) -1240 d_extracted = {} -1241 g_extracted = {} -1242 for name in new_sample_names: -1243 d_extracted[name] = [] -1244 ens_length = len(new_idl_d[name]) -1245 for i_dat, dat in enumerate(data): -1246 d_extracted[name].append(np.array([_expand_deltas_for_merge(o.deltas.get(name, np.zeros(ens_length)), o.idl.get(name, new_idl_d[name]), o.shape.get(name, ens_length), new_idl_d[name]) for o in dat.reshape(np.prod(dat.shape))]).reshape(dat.shape + (ens_length, ))) -1247 for name in new_cov_names: -1248 g_extracted[name] = [] -1249 zero_grad = _Zero_grad(new_covobs_lengths[name]) -1250 for i_dat, dat in enumerate(data): -1251 g_extracted[name].append(np.array([o.covobs.get(name, zero_grad).grad for o in dat.reshape(np.prod(dat.shape))]).reshape(dat.shape + (new_covobs_lengths[name], 1))) -1252 -1253 for i_val, new_val in np.ndenumerate(new_values): -1254 new_deltas = {} -1255 new_grad = {} -1256 if array_mode is True: -1257 for name in new_sample_names: -1258 ens_length = d_extracted[name][0].shape[-1] -1259 new_deltas[name] = np.zeros(ens_length) -1260 for i_dat, dat in enumerate(d_extracted[name]): -1261 new_deltas[name] += np.tensordot(deriv[i_val + (i_dat, )], dat) -1262 for name in new_cov_names: -1263 new_grad[name] = 0 -1264 for i_dat, dat in enumerate(g_extracted[name]): -1265 new_grad[name] += np.tensordot(deriv[i_val + (i_dat, )], dat) -1266 else: -1267 for j_obs, obs in np.ndenumerate(data): -1268 for name in obs.names: -1269 if name in obs.cov_names: -1270 new_grad[name] = new_grad.get(name, 0) + deriv[i_val + j_obs] * obs.covobs[name].grad -1271 else: -1272 new_deltas[name] = new_deltas.get(name, 0) + deriv[i_val + j_obs] * _expand_deltas_for_merge(obs.deltas[name], obs.idl[name], obs.shape[name], new_idl_d[name]) -1273 -1274 new_covobs = {name: Covobs(0, allcov[name], name, grad=new_grad[name]) for name in new_grad} -1275 -1276 if not set(new_covobs.keys()).isdisjoint(new_deltas.keys()): -1277 raise Exception('The same name has been used for deltas and covobs!') -1278 new_samples = [] -1279 new_means = [] -1280 new_idl = [] -1281 new_names_obs = [] -1282 for name in new_names: -1283 if name not in new_covobs: -1284 if is_merged[name]: -1285 filtered_deltas, filtered_idl_d = _filter_zeroes(new_deltas[name], new_idl_d[name]) -1286 else: -1287 filtered_deltas = new_deltas[name] -1288 filtered_idl_d = new_idl_d[name] -1289 -1290 new_samples.append(filtered_deltas) -1291 new_idl.append(filtered_idl_d) -1292 new_means.append(new_r_values[name][i_val]) -1293 new_names_obs.append(name) -1294 final_result[i_val] = Obs(new_samples, new_names_obs, means=new_means, idl=new_idl) -1295 for name in new_covobs: -1296 final_result[i_val].names.append(name) -1297 final_result[i_val]._covobs = new_covobs -1298 final_result[i_val]._value = new_val -1299 final_result[i_val].is_merged = is_merged -1300 final_result[i_val].reweighted = reweighted -1301 -1302 if multi == 0: -1303 final_result = final_result.item() -1304 -1305 return final_result -1306 -1307 -1308def _reduce_deltas(deltas, idx_old, idx_new): -1309 """Extract deltas defined on idx_old on all configs of idx_new. -1310 -1311 Assumes, that idx_old and idx_new are correctly defined idl, i.e., they -1312 are ordered in an ascending order. -1313 -1314 Parameters -1315 ---------- -1316 deltas : list -1317 List of fluctuations -1318 idx_old : list -1319 List or range of configs on which the deltas are defined -1320 idx_new : list -1321 List of configs for which we want to extract the deltas. -1322 Has to be a subset of idx_old. -1323 """ -1324 if not len(deltas) == len(idx_old): -1325 raise Exception('Length of deltas and idx_old have to be the same: %d != %d' % (len(deltas), len(idx_old))) -1326 if type(idx_old) is range and type(idx_new) is range: -1327 if idx_old == idx_new: -1328 return deltas -1329 shape = len(idx_new) -1330 ret = np.zeros(shape) -1331 oldpos = 0 -1332 for i in range(shape): -1333 pos = -1 -1334 for j in range(oldpos, len(idx_old)): -1335 if idx_old[j] == idx_new[i]: -1336 pos = j -1337 break -1338 if pos < 0: -1339 raise Exception('Error in _reduce_deltas: Config %d not in idx_old' % (idx_new[i])) -1340 ret[i] = deltas[pos] -1341 oldpos = pos -1342 return np.array(ret) -1343 -1344 -1345def reweight(weight, obs, **kwargs): -1346 """Reweight a list of observables. -1347 -1348 Parameters -1349 ---------- -1350 weight : Obs -1351 Reweighting factor. An Observable that has to be defined on a superset of the -1352 configurations in obs[i].idl for all i. -1353 obs : list -1354 list of Obs, e.g. [obs1, obs2, obs3]. -1355 all_configs : bool -1356 if True, the reweighted observables are normalized by the average of -1357 the reweighting factor on all configurations in weight.idl and not -1358 on the configurations in obs[i].idl. Default False. -1359 """ -1360 result = [] -1361 for i in range(len(obs)): -1362 if len(obs[i].cov_names): -1363 raise Exception('Error: Not possible to reweight an Obs that contains covobs!') -1364 if not set(obs[i].names).issubset(weight.names): -1365 raise Exception('Error: Ensembles do not fit') -1366 for name in obs[i].names: -1367 if not set(obs[i].idl[name]).issubset(weight.idl[name]): -1368 raise Exception('obs[%d] has to be defined on a subset of the configs in weight.idl[%s]!' % (i, name)) -1369 new_samples = [] -1370 w_deltas = {} -1371 for name in sorted(obs[i].names): -1372 w_deltas[name] = _reduce_deltas(weight.deltas[name], weight.idl[name], obs[i].idl[name]) -1373 new_samples.append((w_deltas[name] + weight.r_values[name]) * (obs[i].deltas[name] + obs[i].r_values[name])) -1374 tmp_obs = Obs(new_samples, sorted(obs[i].names), idl=[obs[i].idl[name] for name in sorted(obs[i].names)]) -1375 -1376 if kwargs.get('all_configs'): -1377 new_weight = weight -1378 else: -1379 new_weight = Obs([w_deltas[name] + weight.r_values[name] for name in sorted(obs[i].names)], sorted(obs[i].names), idl=[obs[i].idl[name] for name in sorted(obs[i].names)]) -1380 -1381 result.append(tmp_obs / new_weight) -1382 result[-1].reweighted = True -1383 result[-1].is_merged = obs[i].is_merged -1384 -1385 return result -1386 -1387 -1388def correlate(obs_a, obs_b): -1389 """Correlate two observables. -1390 -1391 Parameters -1392 ---------- -1393 obs_a : Obs -1394 First observable -1395 obs_b : Obs -1396 Second observable +1188 n_obs = len(raveled_data) +1189 new_names = sorted(set([y for x in [o.names for o in raveled_data] for y in x])) +1190 new_cov_names = sorted(set([y for x in [o.cov_names for o in raveled_data] for y in x])) +1191 new_sample_names = sorted(set(new_names) - set(new_cov_names)) +1192 +1193 is_merged = {name: (len(list(filter(lambda o: o.is_merged.get(name, False) is True, raveled_data))) > 0) for name in new_sample_names} +1194 reweighted = len(list(filter(lambda o: o.reweighted is True, raveled_data))) > 0 +1195 +1196 if data.ndim == 1: +1197 values = np.array([o.value for o in data]) +1198 else: +1199 values = np.vectorize(lambda x: x.value)(data) +1200 +1201 new_values = func(values, **kwargs) +1202 +1203 multi = int(isinstance(new_values, np.ndarray)) +1204 +1205 new_r_values = {} +1206 new_idl_d = {} +1207 for name in new_sample_names: +1208 idl = [] +1209 tmp_values = np.zeros(n_obs) +1210 for i, item in enumerate(raveled_data): +1211 tmp_values[i] = item.r_values.get(name, item.value) +1212 tmp_idl = item.idl.get(name) +1213 if tmp_idl is not None: +1214 idl.append(tmp_idl) +1215 if multi > 0: +1216 tmp_values = np.array(tmp_values).reshape(data.shape) +1217 new_r_values[name] = func(tmp_values, **kwargs) +1218 new_idl_d[name] = _merge_idx(idl) +1219 if not is_merged[name]: +1220 is_merged[name] = (1 != len(set([len(idx) for idx in [*idl, new_idl_d[name]]]))) +1221 +1222 if 'man_grad' in kwargs: +1223 deriv = np.asarray(kwargs.get('man_grad')) +1224 if new_values.shape + data.shape != deriv.shape: +1225 raise Exception('Manual derivative does not have correct shape.') +1226 elif kwargs.get('num_grad') is True: +1227 if multi > 0: +1228 raise Exception('Multi mode currently not supported for numerical derivative') +1229 options = { +1230 'base_step': 0.1, +1231 'step_ratio': 2.5} +1232 for key in options.keys(): +1233 kwarg = kwargs.get(key) +1234 if kwarg is not None: +1235 options[key] = kwarg +1236 tmp_df = nd.Gradient(func, order=4, **{k: v for k, v in options.items() if v is not None})(values, **kwargs) +1237 if tmp_df.size == 1: +1238 deriv = np.array([tmp_df.real]) +1239 else: +1240 deriv = tmp_df.real +1241 else: +1242 deriv = jacobian(func)(values, **kwargs) +1243 +1244 final_result = np.zeros(new_values.shape, dtype=object) +1245 +1246 if array_mode is True: +1247 +1248 class _Zero_grad(): +1249 def __init__(self, N): +1250 self.grad = np.zeros((N, 1)) +1251 +1252 new_covobs_lengths = dict(set([y for x in [[(n, o.covobs[n].N) for n in o.cov_names] for o in raveled_data] for y in x])) +1253 d_extracted = {} +1254 g_extracted = {} +1255 for name in new_sample_names: +1256 d_extracted[name] = [] +1257 ens_length = len(new_idl_d[name]) +1258 for i_dat, dat in enumerate(data): +1259 d_extracted[name].append(np.array([_expand_deltas_for_merge(o.deltas.get(name, np.zeros(ens_length)), o.idl.get(name, new_idl_d[name]), o.shape.get(name, ens_length), new_idl_d[name]) for o in dat.reshape(np.prod(dat.shape))]).reshape(dat.shape + (ens_length, ))) +1260 for name in new_cov_names: +1261 g_extracted[name] = [] +1262 zero_grad = _Zero_grad(new_covobs_lengths[name]) +1263 for i_dat, dat in enumerate(data): +1264 g_extracted[name].append(np.array([o.covobs.get(name, zero_grad).grad for o in dat.reshape(np.prod(dat.shape))]).reshape(dat.shape + (new_covobs_lengths[name], 1))) +1265 +1266 for i_val, new_val in np.ndenumerate(new_values): +1267 new_deltas = {} +1268 new_grad = {} +1269 if array_mode is True: +1270 for name in new_sample_names: +1271 ens_length = d_extracted[name][0].shape[-1] +1272 new_deltas[name] = np.zeros(ens_length) +1273 for i_dat, dat in enumerate(d_extracted[name]): +1274 new_deltas[name] += np.tensordot(deriv[i_val + (i_dat, )], dat) +1275 for name in new_cov_names: +1276 new_grad[name] = 0 +1277 for i_dat, dat in enumerate(g_extracted[name]): +1278 new_grad[name] += np.tensordot(deriv[i_val + (i_dat, )], dat) +1279 else: +1280 for j_obs, obs in np.ndenumerate(data): +1281 for name in obs.names: +1282 if name in obs.cov_names: +1283 new_grad[name] = new_grad.get(name, 0) + deriv[i_val + j_obs] * obs.covobs[name].grad +1284 else: +1285 new_deltas[name] = new_deltas.get(name, 0) + deriv[i_val + j_obs] * _expand_deltas_for_merge(obs.deltas[name], obs.idl[name], obs.shape[name], new_idl_d[name]) +1286 +1287 new_covobs = {name: Covobs(0, allcov[name], name, grad=new_grad[name]) for name in new_grad} +1288 +1289 if not set(new_covobs.keys()).isdisjoint(new_deltas.keys()): +1290 raise Exception('The same name has been used for deltas and covobs!') +1291 new_samples = [] +1292 new_means = [] +1293 new_idl = [] +1294 new_names_obs = [] +1295 for name in new_names: +1296 if name not in new_covobs: +1297 if is_merged[name]: +1298 filtered_deltas, filtered_idl_d = _filter_zeroes(new_deltas[name], new_idl_d[name]) +1299 else: +1300 filtered_deltas = new_deltas[name] +1301 filtered_idl_d = new_idl_d[name] +1302 +1303 new_samples.append(filtered_deltas) +1304 new_idl.append(filtered_idl_d) +1305 new_means.append(new_r_values[name][i_val]) +1306 new_names_obs.append(name) +1307 final_result[i_val] = Obs(new_samples, new_names_obs, means=new_means, idl=new_idl) +1308 for name in new_covobs: +1309 final_result[i_val].names.append(name) +1310 final_result[i_val]._covobs = new_covobs +1311 final_result[i_val]._value = new_val +1312 final_result[i_val].is_merged = is_merged +1313 final_result[i_val].reweighted = reweighted +1314 +1315 if multi == 0: +1316 final_result = final_result.item() +1317 +1318 return final_result +1319 +1320 +1321def _reduce_deltas(deltas, idx_old, idx_new): +1322 """Extract deltas defined on idx_old on all configs of idx_new. +1323 +1324 Assumes, that idx_old and idx_new are correctly defined idl, i.e., they +1325 are ordered in an ascending order. +1326 +1327 Parameters +1328 ---------- +1329 deltas : list +1330 List of fluctuations +1331 idx_old : list +1332 List or range of configs on which the deltas are defined +1333 idx_new : list +1334 List of configs for which we want to extract the deltas. +1335 Has to be a subset of idx_old. +1336 """ +1337 if not len(deltas) == len(idx_old): +1338 raise Exception('Length of deltas and idx_old have to be the same: %d != %d' % (len(deltas), len(idx_old))) +1339 if type(idx_old) is range and type(idx_new) is range: +1340 if idx_old == idx_new: +1341 return deltas +1342 shape = len(idx_new) +1343 ret = np.zeros(shape) +1344 oldpos = 0 +1345 for i in range(shape): +1346 pos = -1 +1347 for j in range(oldpos, len(idx_old)): +1348 if idx_old[j] == idx_new[i]: +1349 pos = j +1350 break +1351 if pos < 0: +1352 raise Exception('Error in _reduce_deltas: Config %d not in idx_old' % (idx_new[i])) +1353 ret[i] = deltas[pos] +1354 oldpos = pos +1355 return np.array(ret) +1356 +1357 +1358def reweight(weight, obs, **kwargs): +1359 """Reweight a list of observables. +1360 +1361 Parameters +1362 ---------- +1363 weight : Obs +1364 Reweighting factor. An Observable that has to be defined on a superset of the +1365 configurations in obs[i].idl for all i. +1366 obs : list +1367 list of Obs, e.g. [obs1, obs2, obs3]. +1368 all_configs : bool +1369 if True, the reweighted observables are normalized by the average of +1370 the reweighting factor on all configurations in weight.idl and not +1371 on the configurations in obs[i].idl. Default False. +1372 """ +1373 result = [] +1374 for i in range(len(obs)): +1375 if len(obs[i].cov_names): +1376 raise Exception('Error: Not possible to reweight an Obs that contains covobs!') +1377 if not set(obs[i].names).issubset(weight.names): +1378 raise Exception('Error: Ensembles do not fit') +1379 for name in obs[i].names: +1380 if not set(obs[i].idl[name]).issubset(weight.idl[name]): +1381 raise Exception('obs[%d] has to be defined on a subset of the configs in weight.idl[%s]!' % (i, name)) +1382 new_samples = [] +1383 w_deltas = {} +1384 for name in sorted(obs[i].names): +1385 w_deltas[name] = _reduce_deltas(weight.deltas[name], weight.idl[name], obs[i].idl[name]) +1386 new_samples.append((w_deltas[name] + weight.r_values[name]) * (obs[i].deltas[name] + obs[i].r_values[name])) +1387 tmp_obs = Obs(new_samples, sorted(obs[i].names), idl=[obs[i].idl[name] for name in sorted(obs[i].names)]) +1388 +1389 if kwargs.get('all_configs'): +1390 new_weight = weight +1391 else: +1392 new_weight = Obs([w_deltas[name] + weight.r_values[name] for name in sorted(obs[i].names)], sorted(obs[i].names), idl=[obs[i].idl[name] for name in sorted(obs[i].names)]) +1393 +1394 result.append(tmp_obs / new_weight) +1395 result[-1].reweighted = True +1396 result[-1].is_merged = obs[i].is_merged 1397 -1398 Notes -1399 ----- -1400 Keep in mind to only correlate primary observables which have not been reweighted -1401 yet. The reweighting has to be applied after correlating the observables. -1402 Currently only works if ensembles are identical (this is not strictly necessary). -1403 """ -1404 -1405 if sorted(obs_a.names) != sorted(obs_b.names): -1406 raise Exception('Ensembles do not fit') -1407 if len(obs_a.cov_names) or len(obs_b.cov_names): -1408 raise Exception('Error: Not possible to correlate Obs that contain covobs!') -1409 for name in obs_a.names: -1410 if obs_a.shape[name] != obs_b.shape[name]: -1411 raise Exception('Shapes of ensemble', name, 'do not fit') -1412 if obs_a.idl[name] != obs_b.idl[name]: -1413 raise Exception('idl of ensemble', name, 'do not fit') -1414 -1415 if obs_a.reweighted is True: -1416 warnings.warn("The first observable is already reweighted.", RuntimeWarning) -1417 if obs_b.reweighted is True: -1418 warnings.warn("The second observable is already reweighted.", RuntimeWarning) -1419 -1420 new_samples = [] -1421 new_idl = [] -1422 for name in sorted(obs_a.names): -1423 new_samples.append((obs_a.deltas[name] + obs_a.r_values[name]) * (obs_b.deltas[name] + obs_b.r_values[name])) -1424 new_idl.append(obs_a.idl[name]) -1425 -1426 o = Obs(new_samples, sorted(obs_a.names), idl=new_idl) -1427 o.is_merged = {name: (obs_a.is_merged.get(name, False) or obs_b.is_merged.get(name, False)) for name in o.names} -1428 o.reweighted = obs_a.reweighted or obs_b.reweighted -1429 return o -1430 -1431 -1432def covariance(obs, visualize=False, correlation=False, smooth=None, **kwargs): -1433 r'''Calculates the error covariance matrix of a set of observables. -1434 -1435 The gamma method has to be applied first to all observables. -1436 -1437 Parameters -1438 ---------- -1439 obs : list or numpy.ndarray -1440 List or one dimensional array of Obs -1441 visualize : bool -1442 If True plots the corresponding normalized correlation matrix (default False). -1443 correlation : bool -1444 If True the correlation matrix instead of the error covariance matrix is returned (default False). -1445 smooth : None or int -1446 If smooth is an integer 'E' between 2 and the dimension of the matrix minus 1 the eigenvalue -1447 smoothing procedure of hep-lat/9412087 is applied to the correlation matrix which leaves the -1448 largest E eigenvalues essentially unchanged and smoothes the smaller eigenvalues to avoid extremely -1449 small ones. -1450 -1451 Notes -1452 ----- -1453 The error covariance is defined such that it agrees with the squared standard error for two identical observables -1454 $$\operatorname{cov}(a,a)=\sum_{s=1}^N\delta_a^s\delta_a^s/N^2=\Gamma_{aa}(0)/N=\operatorname{var}(a)/N=\sigma_a^2$$ -1455 in the absence of autocorrelation. -1456 The error covariance is estimated by calculating the correlation matrix assuming no autocorrelation and then rescaling the correlation matrix by the full errors including the previous gamma method estimate for the autocorrelation of the observables. The covariance at windowsize 0 is guaranteed to be positive semi-definite -1457 $$\sum_{i,j}v_i\Gamma_{ij}(0)v_j=\frac{1}{N}\sum_{s=1}^N\sum_{i,j}v_i\delta_i^s\delta_j^s v_j=\frac{1}{N}\sum_{s=1}^N\sum_{i}|v_i\delta_i^s|^2\geq 0\,,$$ for every $v\in\mathbb{R}^M$, while such an identity does not hold for larger windows/lags. -1458 For observables defined on a single ensemble our approximation is equivalent to assuming that the integrated autocorrelation time of an off-diagonal element is equal to the geometric mean of the integrated autocorrelation times of the corresponding diagonal elements. -1459 $$\tau_{\mathrm{int}, ij}=\sqrt{\tau_{\mathrm{int}, i}\times \tau_{\mathrm{int}, j}}$$ -1460 This construction ensures that the estimated covariance matrix is positive semi-definite (up to numerical rounding errors). -1461 ''' -1462 -1463 length = len(obs) -1464 -1465 max_samples = np.max([o.N for o in obs]) -1466 if max_samples <= length and not [item for sublist in [o.cov_names for o in obs] for item in sublist]: -1467 warnings.warn(f"The dimension of the covariance matrix ({length}) is larger or equal to the number of samples ({max_samples}). This will result in a rank deficient matrix.", RuntimeWarning) -1468 -1469 cov = np.zeros((length, length)) -1470 for i in range(length): -1471 for j in range(i, length): -1472 cov[i, j] = _covariance_element(obs[i], obs[j]) -1473 cov = cov + cov.T - np.diag(np.diag(cov)) -1474 -1475 corr = np.diag(1 / np.sqrt(np.diag(cov))) @ cov @ np.diag(1 / np.sqrt(np.diag(cov))) -1476 -1477 if isinstance(smooth, int): -1478 corr = _smooth_eigenvalues(corr, smooth) -1479 -1480 if visualize: -1481 plt.matshow(corr, vmin=-1, vmax=1) -1482 plt.set_cmap('RdBu') -1483 plt.colorbar() -1484 plt.draw() -1485 -1486 if correlation is True: -1487 return corr -1488 -1489 errors = [o.dvalue for o in obs] -1490 cov = np.diag(errors) @ corr @ np.diag(errors) -1491 -1492 eigenvalues = np.linalg.eigh(cov)[0] -1493 if not np.all(eigenvalues >= 0): -1494 warnings.warn("Covariance matrix is not positive semi-definite (Eigenvalues: " + str(eigenvalues) + ")", RuntimeWarning) -1495 -1496 return cov -1497 +1398 return result +1399 +1400 +1401def correlate(obs_a, obs_b): +1402 """Correlate two observables. +1403 +1404 Parameters +1405 ---------- +1406 obs_a : Obs +1407 First observable +1408 obs_b : Obs +1409 Second observable +1410 +1411 Notes +1412 ----- +1413 Keep in mind to only correlate primary observables which have not been reweighted +1414 yet. The reweighting has to be applied after correlating the observables. +1415 Currently only works if ensembles are identical (this is not strictly necessary). +1416 """ +1417 +1418 if sorted(obs_a.names) != sorted(obs_b.names): +1419 raise Exception('Ensembles do not fit') +1420 if len(obs_a.cov_names) or len(obs_b.cov_names): +1421 raise Exception('Error: Not possible to correlate Obs that contain covobs!') +1422 for name in obs_a.names: +1423 if obs_a.shape[name] != obs_b.shape[name]: +1424 raise Exception('Shapes of ensemble', name, 'do not fit') +1425 if obs_a.idl[name] != obs_b.idl[name]: +1426 raise Exception('idl of ensemble', name, 'do not fit') +1427 +1428 if obs_a.reweighted is True: +1429 warnings.warn("The first observable is already reweighted.", RuntimeWarning) +1430 if obs_b.reweighted is True: +1431 warnings.warn("The second observable is already reweighted.", RuntimeWarning) +1432 +1433 new_samples = [] +1434 new_idl = [] +1435 for name in sorted(obs_a.names): +1436 new_samples.append((obs_a.deltas[name] + obs_a.r_values[name]) * (obs_b.deltas[name] + obs_b.r_values[name])) +1437 new_idl.append(obs_a.idl[name]) +1438 +1439 o = Obs(new_samples, sorted(obs_a.names), idl=new_idl) +1440 o.is_merged = {name: (obs_a.is_merged.get(name, False) or obs_b.is_merged.get(name, False)) for name in o.names} +1441 o.reweighted = obs_a.reweighted or obs_b.reweighted +1442 return o +1443 +1444 +1445def covariance(obs, visualize=False, correlation=False, smooth=None, **kwargs): +1446 r'''Calculates the error covariance matrix of a set of observables. +1447 +1448 The gamma method has to be applied first to all observables. +1449 +1450 Parameters +1451 ---------- +1452 obs : list or numpy.ndarray +1453 List or one dimensional array of Obs +1454 visualize : bool +1455 If True plots the corresponding normalized correlation matrix (default False). +1456 correlation : bool +1457 If True the correlation matrix instead of the error covariance matrix is returned (default False). +1458 smooth : None or int +1459 If smooth is an integer 'E' between 2 and the dimension of the matrix minus 1 the eigenvalue +1460 smoothing procedure of hep-lat/9412087 is applied to the correlation matrix which leaves the +1461 largest E eigenvalues essentially unchanged and smoothes the smaller eigenvalues to avoid extremely +1462 small ones. +1463 +1464 Notes +1465 ----- +1466 The error covariance is defined such that it agrees with the squared standard error for two identical observables +1467 $$\operatorname{cov}(a,a)=\sum_{s=1}^N\delta_a^s\delta_a^s/N^2=\Gamma_{aa}(0)/N=\operatorname{var}(a)/N=\sigma_a^2$$ +1468 in the absence of autocorrelation. +1469 The error covariance is estimated by calculating the correlation matrix assuming no autocorrelation and then rescaling the correlation matrix by the full errors including the previous gamma method estimate for the autocorrelation of the observables. The covariance at windowsize 0 is guaranteed to be positive semi-definite +1470 $$\sum_{i,j}v_i\Gamma_{ij}(0)v_j=\frac{1}{N}\sum_{s=1}^N\sum_{i,j}v_i\delta_i^s\delta_j^s v_j=\frac{1}{N}\sum_{s=1}^N\sum_{i}|v_i\delta_i^s|^2\geq 0\,,$$ for every $v\in\mathbb{R}^M$, while such an identity does not hold for larger windows/lags. +1471 For observables defined on a single ensemble our approximation is equivalent to assuming that the integrated autocorrelation time of an off-diagonal element is equal to the geometric mean of the integrated autocorrelation times of the corresponding diagonal elements. +1472 $$\tau_{\mathrm{int}, ij}=\sqrt{\tau_{\mathrm{int}, i}\times \tau_{\mathrm{int}, j}}$$ +1473 This construction ensures that the estimated covariance matrix is positive semi-definite (up to numerical rounding errors). +1474 ''' +1475 +1476 length = len(obs) +1477 +1478 max_samples = np.max([o.N for o in obs]) +1479 if max_samples <= length and not [item for sublist in [o.cov_names for o in obs] for item in sublist]: +1480 warnings.warn(f"The dimension of the covariance matrix ({length}) is larger or equal to the number of samples ({max_samples}). This will result in a rank deficient matrix.", RuntimeWarning) +1481 +1482 cov = np.zeros((length, length)) +1483 for i in range(length): +1484 for j in range(i, length): +1485 cov[i, j] = _covariance_element(obs[i], obs[j]) +1486 cov = cov + cov.T - np.diag(np.diag(cov)) +1487 +1488 corr = np.diag(1 / np.sqrt(np.diag(cov))) @ cov @ np.diag(1 / np.sqrt(np.diag(cov))) +1489 +1490 if isinstance(smooth, int): +1491 corr = _smooth_eigenvalues(corr, smooth) +1492 +1493 if visualize: +1494 plt.matshow(corr, vmin=-1, vmax=1) +1495 plt.set_cmap('RdBu') +1496 plt.colorbar() +1497 plt.draw() 1498 -1499def _smooth_eigenvalues(corr, E): -1500 """Eigenvalue smoothing as described in hep-lat/9412087 +1499 if correlation is True: +1500 return corr 1501 -1502 corr : np.ndarray -1503 correlation matrix -1504 E : integer -1505 Number of eigenvalues to be left substantially unchanged -1506 """ -1507 if not (2 < E < corr.shape[0] - 1): -1508 raise Exception(f"'E' has to be between 2 and the dimension of the correlation matrix minus 1 ({corr.shape[0] - 1}).") -1509 vals, vec = np.linalg.eigh(corr) -1510 lambda_min = np.mean(vals[:-E]) -1511 vals[vals < lambda_min] = lambda_min -1512 vals /= np.mean(vals) -1513 return vec @ np.diag(vals) @ vec.T +1502 errors = [o.dvalue for o in obs] +1503 cov = np.diag(errors) @ corr @ np.diag(errors) +1504 +1505 eigenvalues = np.linalg.eigh(cov)[0] +1506 if not np.all(eigenvalues >= 0): +1507 warnings.warn("Covariance matrix is not positive semi-definite (Eigenvalues: " + str(eigenvalues) + ")", RuntimeWarning) +1508 +1509 return cov +1510 +1511 +1512def _smooth_eigenvalues(corr, E): +1513 """Eigenvalue smoothing as described in hep-lat/9412087 1514 -1515 -1516def _covariance_element(obs1, obs2): -1517 """Estimates the covariance of two Obs objects, neglecting autocorrelations.""" -1518 -1519 def calc_gamma(deltas1, deltas2, idx1, idx2, new_idx): -1520 deltas1 = _collapse_deltas_for_merge(deltas1, idx1, len(idx1), new_idx) -1521 deltas2 = _collapse_deltas_for_merge(deltas2, idx2, len(idx2), new_idx) -1522 return np.sum(deltas1 * deltas2) -1523 -1524 if set(obs1.names).isdisjoint(set(obs2.names)): -1525 return 0.0 -1526 -1527 if not hasattr(obs1, 'e_dvalue') or not hasattr(obs2, 'e_dvalue'): -1528 raise Exception('The gamma method has to be applied to both Obs first.') -1529 -1530 dvalue = 0.0 +1515 corr : np.ndarray +1516 correlation matrix +1517 E : integer +1518 Number of eigenvalues to be left substantially unchanged +1519 """ +1520 if not (2 < E < corr.shape[0] - 1): +1521 raise Exception(f"'E' has to be between 2 and the dimension of the correlation matrix minus 1 ({corr.shape[0] - 1}).") +1522 vals, vec = np.linalg.eigh(corr) +1523 lambda_min = np.mean(vals[:-E]) +1524 vals[vals < lambda_min] = lambda_min +1525 vals /= np.mean(vals) +1526 return vec @ np.diag(vals) @ vec.T +1527 +1528 +1529def _covariance_element(obs1, obs2): +1530 """Estimates the covariance of two Obs objects, neglecting autocorrelations.""" 1531 -1532 for e_name in obs1.mc_names: -1533 -1534 if e_name not in obs2.mc_names: -1535 continue +1532 def calc_gamma(deltas1, deltas2, idx1, idx2, new_idx): +1533 deltas1 = _collapse_deltas_for_merge(deltas1, idx1, len(idx1), new_idx) +1534 deltas2 = _collapse_deltas_for_merge(deltas2, idx2, len(idx2), new_idx) +1535 return np.sum(deltas1 * deltas2) 1536 -1537 idl_d = {} -1538 for r_name in obs1.e_content[e_name]: -1539 if r_name not in obs2.e_content[e_name]: -1540 continue -1541 idl_d[r_name] = _intersection_idx([obs1.idl[r_name], obs2.idl[r_name]]) +1537 if set(obs1.names).isdisjoint(set(obs2.names)): +1538 return 0.0 +1539 +1540 if not hasattr(obs1, 'e_dvalue') or not hasattr(obs2, 'e_dvalue'): +1541 raise Exception('The gamma method has to be applied to both Obs first.') 1542 -1543 gamma = 0.0 +1543 dvalue = 0.0 1544 -1545 for r_name in obs1.e_content[e_name]: -1546 if r_name not in obs2.e_content[e_name]: -1547 continue -1548 if len(idl_d[r_name]) == 0: -1549 continue -1550 gamma += calc_gamma(obs1.deltas[r_name], obs2.deltas[r_name], obs1.idl[r_name], obs2.idl[r_name], idl_d[r_name]) -1551 -1552 if gamma == 0.0: -1553 continue -1554 -1555 gamma_div = 0.0 -1556 for r_name in obs1.e_content[e_name]: -1557 if r_name not in obs2.e_content[e_name]: -1558 continue -1559 if len(idl_d[r_name]) == 0: +1545 for e_name in obs1.mc_names: +1546 +1547 if e_name not in obs2.mc_names: +1548 continue +1549 +1550 idl_d = {} +1551 for r_name in obs1.e_content[e_name]: +1552 if r_name not in obs2.e_content[e_name]: +1553 continue +1554 idl_d[r_name] = _intersection_idx([obs1.idl[r_name], obs2.idl[r_name]]) +1555 +1556 gamma = 0.0 +1557 +1558 for r_name in obs1.e_content[e_name]: +1559 if r_name not in obs2.e_content[e_name]: 1560 continue -1561 gamma_div += np.sqrt(calc_gamma(obs1.deltas[r_name], obs1.deltas[r_name], obs1.idl[r_name], obs1.idl[r_name], idl_d[r_name]) * calc_gamma(obs2.deltas[r_name], obs2.deltas[r_name], obs2.idl[r_name], obs2.idl[r_name], idl_d[r_name])) -1562 gamma /= gamma_div -1563 -1564 dvalue += gamma -1565 -1566 for e_name in obs1.cov_names: +1561 if len(idl_d[r_name]) == 0: +1562 continue +1563 gamma += calc_gamma(obs1.deltas[r_name], obs2.deltas[r_name], obs1.idl[r_name], obs2.idl[r_name], idl_d[r_name]) +1564 +1565 if gamma == 0.0: +1566 continue 1567 -1568 if e_name not in obs2.cov_names: -1569 continue -1570 -1571 dvalue += float(np.dot(np.transpose(obs1.covobs[e_name].grad), np.dot(obs1.covobs[e_name].cov, obs2.covobs[e_name].grad))) -1572 -1573 return dvalue -1574 -1575 -1576def import_jackknife(jacks, name, idl=None): -1577 """Imports jackknife samples and returns an Obs +1568 gamma_div = 0.0 +1569 for r_name in obs1.e_content[e_name]: +1570 if r_name not in obs2.e_content[e_name]: +1571 continue +1572 if len(idl_d[r_name]) == 0: +1573 continue +1574 gamma_div += np.sqrt(calc_gamma(obs1.deltas[r_name], obs1.deltas[r_name], obs1.idl[r_name], obs1.idl[r_name], idl_d[r_name]) * calc_gamma(obs2.deltas[r_name], obs2.deltas[r_name], obs2.idl[r_name], obs2.idl[r_name], idl_d[r_name])) +1575 gamma /= gamma_div +1576 +1577 dvalue += gamma 1578 -1579 Parameters -1580 ---------- -1581 jacks : numpy.ndarray -1582 numpy array containing the mean value as zeroth entry and -1583 the N jackknife samples as first to Nth entry. -1584 name : str -1585 name of the ensemble the samples are defined on. -1586 """ -1587 length = len(jacks) - 1 -1588 prj = (np.ones((length, length)) - (length - 1) * np.identity(length)) -1589 samples = jacks[1:] @ prj -1590 mean = np.mean(samples) -1591 new_obs = Obs([samples - mean], [name], idl=idl, means=[mean]) -1592 new_obs._value = jacks[0] -1593 return new_obs -1594 -1595 -1596def merge_obs(list_of_obs): -1597 """Combine all observables in list_of_obs into one new observable -1598 -1599 Parameters -1600 ---------- -1601 list_of_obs : list -1602 list of the Obs object to be combined -1603 -1604 Notes -1605 ----- -1606 It is not possible to combine obs which are based on the same replicum -1607 """ -1608 replist = [item for obs in list_of_obs for item in obs.names] -1609 if (len(replist) == len(set(replist))) is False: -1610 raise Exception('list_of_obs contains duplicate replica: %s' % (str(replist))) -1611 if any([len(o.cov_names) for o in list_of_obs]): -1612 raise Exception('Not possible to merge data that contains covobs!') -1613 new_dict = {} -1614 idl_dict = {} -1615 for o in list_of_obs: -1616 new_dict.update({key: o.deltas.get(key, 0) + o.r_values.get(key, 0) -1617 for key in set(o.deltas) | set(o.r_values)}) -1618 idl_dict.update({key: o.idl.get(key, 0) for key in set(o.deltas)}) -1619 -1620 names = sorted(new_dict.keys()) -1621 o = Obs([new_dict[name] for name in names], names, idl=[idl_dict[name] for name in names]) -1622 o.is_merged = {name: np.any([oi.is_merged.get(name, False) for oi in list_of_obs]) for name in o.names} -1623 o.reweighted = np.max([oi.reweighted for oi in list_of_obs]) -1624 return o -1625 -1626 -1627def cov_Obs(means, cov, name, grad=None): -1628 """Create an Obs based on mean(s) and a covariance matrix -1629 -1630 Parameters -1631 ---------- -1632 mean : list of floats or float -1633 N mean value(s) of the new Obs -1634 cov : list or array -1635 2d (NxN) Covariance matrix, 1d diagonal entries or 0d covariance -1636 name : str -1637 identifier for the covariance matrix -1638 grad : list or array -1639 Gradient of the Covobs wrt. the means belonging to cov. -1640 """ -1641 -1642 def covobs_to_obs(co): -1643 """Make an Obs out of a Covobs -1644 -1645 Parameters -1646 ---------- -1647 co : Covobs -1648 Covobs to be embedded into the Obs -1649 """ -1650 o = Obs([], [], means=[]) -1651 o._value = co.value -1652 o.names.append(co.name) -1653 o._covobs[co.name] = co -1654 o._dvalue = np.sqrt(co.errsq()) -1655 return o -1656 -1657 ol = [] -1658 if isinstance(means, (float, int)): -1659 means = [means] -1660 -1661 for i in range(len(means)): -1662 ol.append(covobs_to_obs(Covobs(means[i], cov, name, pos=i, grad=grad))) -1663 if ol[0].covobs[name].N != len(means): -1664 raise Exception('You have to provide %d mean values!' % (ol[0].N)) -1665 if len(ol) == 1: -1666 return ol[0] -1667 return ol +1579 for e_name in obs1.cov_names: +1580 +1581 if e_name not in obs2.cov_names: +1582 continue +1583 +1584 dvalue += float(np.dot(np.transpose(obs1.covobs[e_name].grad), np.dot(obs1.covobs[e_name].cov, obs2.covobs[e_name].grad))) +1585 +1586 return dvalue +1587 +1588 +1589def import_jackknife(jacks, name, idl=None): +1590 """Imports jackknife samples and returns an Obs +1591 +1592 Parameters +1593 ---------- +1594 jacks : numpy.ndarray +1595 numpy array containing the mean value as zeroth entry and +1596 the N jackknife samples as first to Nth entry. +1597 name : str +1598 name of the ensemble the samples are defined on. +1599 """ +1600 length = len(jacks) - 1 +1601 prj = (np.ones((length, length)) - (length - 1) * np.identity(length)) +1602 samples = jacks[1:] @ prj +1603 mean = np.mean(samples) +1604 new_obs = Obs([samples - mean], [name], idl=idl, means=[mean]) +1605 new_obs._value = jacks[0] +1606 return new_obs +1607 +1608 +1609def merge_obs(list_of_obs): +1610 """Combine all observables in list_of_obs into one new observable +1611 +1612 Parameters +1613 ---------- +1614 list_of_obs : list +1615 list of the Obs object to be combined +1616 +1617 Notes +1618 ----- +1619 It is not possible to combine obs which are based on the same replicum +1620 """ +1621 replist = [item for obs in list_of_obs for item in obs.names] +1622 if (len(replist) == len(set(replist))) is False: +1623 raise Exception('list_of_obs contains duplicate replica: %s' % (str(replist))) +1624 if any([len(o.cov_names) for o in list_of_obs]): +1625 raise Exception('Not possible to merge data that contains covobs!') +1626 new_dict = {} +1627 idl_dict = {} +1628 for o in list_of_obs: +1629 new_dict.update({key: o.deltas.get(key, 0) + o.r_values.get(key, 0) +1630 for key in set(o.deltas) | set(o.r_values)}) +1631 idl_dict.update({key: o.idl.get(key, 0) for key in set(o.deltas)}) +1632 +1633 names = sorted(new_dict.keys()) +1634 o = Obs([new_dict[name] for name in names], names, idl=[idl_dict[name] for name in names]) +1635 o.is_merged = {name: np.any([oi.is_merged.get(name, False) for oi in list_of_obs]) for name in o.names} +1636 o.reweighted = np.max([oi.reweighted for oi in list_of_obs]) +1637 return o +1638 +1639 +1640def cov_Obs(means, cov, name, grad=None): +1641 """Create an Obs based on mean(s) and a covariance matrix +1642 +1643 Parameters +1644 ---------- +1645 mean : list of floats or float +1646 N mean value(s) of the new Obs +1647 cov : list or array +1648 2d (NxN) Covariance matrix, 1d diagonal entries or 0d covariance +1649 name : str +1650 identifier for the covariance matrix +1651 grad : list or array +1652 Gradient of the Covobs wrt. the means belonging to cov. +1653 """ +1654 +1655 def covobs_to_obs(co): +1656 """Make an Obs out of a Covobs +1657 +1658 Parameters +1659 ---------- +1660 co : Covobs +1661 Covobs to be embedded into the Obs +1662 """ +1663 o = Obs([], [], means=[]) +1664 o._value = co.value +1665 o.names.append(co.name) +1666 o._covobs[co.name] = co +1667 o._dvalue = np.sqrt(co.errsq()) +1668 return o +1669 +1670 ol = [] +1671 if isinstance(means, (float, int)): +1672 means = [means] +1673 +1674 for i in range(len(means)): +1675 ol.append(covobs_to_obs(Covobs(means[i], cov, name, pos=i, grad=grad))) +1676 if ol[0].covobs[name].N != len(means): +1677 raise Exception('You have to provide %d mean values!' % (ol[0].N)) +1678 if len(ol) == 1: +1679 return ol[0] +1680 return ol @@ -2145,573 +2158,586 @@ 280 tmp = self.e_rho[e_name][i + 1:w_max] + np.concatenate([self.e_rho[e_name][i - 1::-1], self.e_rho[e_name][1:w_max - 2 * i]]) - 2 * self.e_rho[e_name][i] * self.e_rho[e_name][1:w_max - i] 281 self.e_drho[e_name][i] = np.sqrt(np.sum(tmp ** 2) / e_N) 282 -283 _compute_drho(1) -284 if self.tau_exp[e_name] > 0: -285 texp = self.tau_exp[e_name] -286 # Critical slowing down analysis -287 if w_max // 2 <= 1: -288 raise Exception("Need at least 8 samples for tau_exp error analysis") -289 for n in range(1, w_max // 2): -290 _compute_drho(n + 1) -291 if (self.e_rho[e_name][n] - self.N_sigma[e_name] * self.e_drho[e_name][n]) < 0 or n >= w_max // 2 - 2: -292 # Bias correction hep-lat/0306017 eq. (49) included -293 self.e_tauint[e_name] = self.e_n_tauint[e_name][n] * (1 + (2 * n + 1) / e_N) / (1 + 1 / e_N) + texp * np.abs(self.e_rho[e_name][n + 1]) # The absolute makes sure, that the tail contribution is always positive -294 self.e_dtauint[e_name] = np.sqrt(self.e_n_dtauint[e_name][n] ** 2 + texp ** 2 * self.e_drho[e_name][n + 1] ** 2) -295 # Error of tau_exp neglected so far, missing term: self.e_rho[e_name][n + 1] ** 2 * d_tau_exp ** 2 -296 self.e_dvalue[e_name] = np.sqrt(2 * self.e_tauint[e_name] * e_gamma[e_name][0] * (1 + 1 / e_N) / e_N) -297 self.e_ddvalue[e_name] = self.e_dvalue[e_name] * np.sqrt((n + 0.5) / e_N) -298 self.e_windowsize[e_name] = n -299 break -300 else: -301 if self.S[e_name] == 0.0: -302 self.e_tauint[e_name] = 0.5 -303 self.e_dtauint[e_name] = 0.0 -304 self.e_dvalue[e_name] = np.sqrt(e_gamma[e_name][0] / (e_N - 1)) -305 self.e_ddvalue[e_name] = self.e_dvalue[e_name] * np.sqrt(0.5 / e_N) -306 self.e_windowsize[e_name] = 0 -307 else: -308 # Standard automatic windowing procedure -309 tau = self.S[e_name] / np.log((2 * self.e_n_tauint[e_name][1:] + 1) / (2 * self.e_n_tauint[e_name][1:] - 1)) -310 g_w = np.exp(- np.arange(1, w_max) / tau) - tau / np.sqrt(np.arange(1, w_max) * e_N) -311 for n in range(1, w_max): -312 if n < w_max // 2 - 2: -313 _compute_drho(n + 1) -314 if g_w[n - 1] < 0 or n >= w_max - 1: -315 self.e_tauint[e_name] = self.e_n_tauint[e_name][n] * (1 + (2 * n + 1) / e_N) / (1 + 1 / e_N) # Bias correction hep-lat/0306017 eq. (49) -316 self.e_dtauint[e_name] = self.e_n_dtauint[e_name][n] -317 self.e_dvalue[e_name] = np.sqrt(2 * self.e_tauint[e_name] * e_gamma[e_name][0] * (1 + 1 / e_N) / e_N) -318 self.e_ddvalue[e_name] = self.e_dvalue[e_name] * np.sqrt((n + 0.5) / e_N) -319 self.e_windowsize[e_name] = n -320 break -321 -322 self._dvalue += self.e_dvalue[e_name] ** 2 -323 self.ddvalue += (self.e_dvalue[e_name] * self.e_ddvalue[e_name]) ** 2 -324 -325 for e_name in self.cov_names: -326 self.e_dvalue[e_name] = np.sqrt(self.covobs[e_name].errsq()) -327 self.e_ddvalue[e_name] = 0 -328 self._dvalue += self.e_dvalue[e_name]**2 -329 -330 self._dvalue = np.sqrt(self._dvalue) -331 if self._dvalue == 0.0: -332 self.ddvalue = 0.0 -333 else: -334 self.ddvalue = np.sqrt(self.ddvalue) / self._dvalue -335 return -336 -337 def _calc_gamma(self, deltas, idx, shape, w_max, fft): -338 """Calculate Gamma_{AA} from the deltas, which are defined on idx. -339 idx is assumed to be a contiguous range (possibly with a stepsize != 1) -340 -341 Parameters -342 ---------- -343 deltas : list -344 List of fluctuations -345 idx : list -346 List or range of configurations on which the deltas are defined. -347 shape : int -348 Number of configurations in idx. -349 w_max : int -350 Upper bound for the summation window. -351 fft : bool -352 determines whether the fft algorithm is used for the computation -353 of the autocorrelation function. -354 """ -355 gamma = np.zeros(w_max) -356 deltas = _expand_deltas(deltas, idx, shape) -357 new_shape = len(deltas) -358 if fft: -359 max_gamma = min(new_shape, w_max) -360 # The padding for the fft has to be even -361 padding = new_shape + max_gamma + (new_shape + max_gamma) % 2 -362 gamma[:max_gamma] += np.fft.irfft(np.abs(np.fft.rfft(deltas, padding)) ** 2)[:max_gamma] -363 else: -364 for n in range(w_max): -365 if new_shape - n >= 0: -366 gamma[n] += deltas[0:new_shape - n].dot(deltas[n:new_shape]) -367 -368 return gamma -369 -370 def details(self, ens_content=True): -371 """Output detailed properties of the Obs. -372 -373 Parameters -374 ---------- -375 ens_content : bool -376 print details about the ensembles and replica if true. -377 """ -378 if self.tag is not None: -379 print("Description:", self.tag) -380 if not hasattr(self, 'e_dvalue'): -381 print('Result\t %3.8e' % (self.value)) -382 else: -383 if self.value == 0.0: -384 percentage = np.nan -385 else: -386 percentage = np.abs(self._dvalue / self.value) * 100 -387 print('Result\t %3.8e +/- %3.8e +/- %3.8e (%3.3f%%)' % (self.value, self._dvalue, self.ddvalue, percentage)) -388 if len(self.e_names) > 1: -389 print(' Ensemble errors:') -390 for e_name in self.mc_names: -391 if len(self.e_names) > 1: -392 print('', e_name, '\t %3.8e +/- %3.8e' % (self.e_dvalue[e_name], self.e_ddvalue[e_name])) -393 if self.tau_exp[e_name] > 0: -394 print(' t_int\t %3.8e +/- %3.8e tau_exp = %3.2f, N_sigma = %1.0i' % (self.e_tauint[e_name], self.e_dtauint[e_name], self.tau_exp[e_name], self.N_sigma[e_name])) -395 else: -396 print(' t_int\t %3.8e +/- %3.8e S = %3.2f' % (self.e_tauint[e_name], self.e_dtauint[e_name], self.S[e_name])) -397 for e_name in self.cov_names: -398 print('', e_name, '\t %3.8e' % (self.e_dvalue[e_name])) -399 if ens_content is True: -400 if len(self.e_names) == 1: -401 print(self.N, 'samples in', len(self.e_names), 'ensemble:') -402 else: -403 print(self.N, 'samples in', len(self.e_names), 'ensembles:') -404 my_string_list = [] -405 for key, value in sorted(self.e_content.items()): -406 if key not in self.covobs: -407 my_string = ' ' + "\u00B7 Ensemble '" + key + "' " -408 if len(value) == 1: -409 my_string += f': {self.shape[value[0]]} configurations' -410 if isinstance(self.idl[value[0]], range): -411 my_string += f' (from {self.idl[value[0]].start} to {self.idl[value[0]][-1]}' + int(self.idl[value[0]].step != 1) * f' in steps of {self.idl[value[0]].step}' + ')' -412 else: -413 my_string += ' (irregular range)' -414 else: -415 sublist = [] -416 for v in value: -417 my_substring = ' ' + "\u00B7 Replicum '" + v[len(key) + 1:] + "' " -418 my_substring += f': {self.shape[v]} configurations' -419 if isinstance(self.idl[v], range): -420 my_substring += f' (from {self.idl[v].start} to {self.idl[v][-1]}' + int(self.idl[v].step != 1) * f' in steps of {self.idl[v].step}' + ')' -421 else: -422 my_substring += ' (irregular range)' -423 sublist.append(my_substring) -424 -425 my_string += '\n' + '\n'.join(sublist) -426 else: -427 my_string = ' ' + "\u00B7 Covobs '" + key + "' " -428 my_string_list.append(my_string) -429 print('\n'.join(my_string_list)) -430 -431 def reweight(self, weight): -432 """Reweight the obs with given rewighting factors. -433 -434 Parameters -435 ---------- -436 weight : Obs -437 Reweighting factor. An Observable that has to be defined on a superset of the -438 configurations in obs[i].idl for all i. -439 all_configs : bool -440 if True, the reweighted observables are normalized by the average of -441 the reweighting factor on all configurations in weight.idl and not -442 on the configurations in obs[i].idl. Default False. -443 """ -444 return reweight(weight, [self])[0] -445 -446 def is_zero_within_error(self, sigma=1): -447 """Checks whether the observable is zero within 'sigma' standard errors. -448 -449 Parameters -450 ---------- -451 sigma : int -452 Number of standard errors used for the check. -453 -454 Works only properly when the gamma method was run. -455 """ -456 return self.is_zero() or np.abs(self.value) <= sigma * self._dvalue -457 -458 def is_zero(self, atol=1e-10): -459 """Checks whether the observable is zero within a given tolerance. -460 -461 Parameters -462 ---------- -463 atol : float -464 Absolute tolerance (for details see numpy documentation). -465 """ -466 return np.isclose(0.0, self.value, 1e-14, atol) and all(np.allclose(0.0, delta, 1e-14, atol) for delta in self.deltas.values()) and all(np.allclose(0.0, delta.errsq(), 1e-14, atol) for delta in self.covobs.values()) -467 -468 def plot_tauint(self, save=None): -469 """Plot integrated autocorrelation time for each ensemble. +283 gaps = [] +284 for r_name in e_content[e_name]: +285 if isinstance(self.idl[r_name], range): +286 gaps.append(1) +287 else: +288 gaps.append(np.min(np.diff(self.idl[r_name]))) +289 +290 if not np.all([gi == gaps[0] for gi in gaps]): +291 raise Exception(f"Replica for ensemble {e_name} are not equally spaced.", gaps) +292 else: +293 gapsize = gaps[0] +294 +295 _compute_drho(gapsize) +296 if self.tau_exp[e_name] > 0: +297 texp = self.tau_exp[e_name] +298 # Critical slowing down analysis +299 if w_max // 2 <= 1: +300 raise Exception("Need at least 8 samples for tau_exp error analysis") +301 for n in range(gapsize, w_max // 2, gapsize): +302 _compute_drho(n + gapsize) +303 if (self.e_rho[e_name][n] - self.N_sigma[e_name] * self.e_drho[e_name][n]) < 0 or n >= w_max // 2 - 2: +304 # Bias correction hep-lat/0306017 eq. (49) included +305 self.e_tauint[e_name] = self.e_n_tauint[e_name][n] * (1 + (2 * n / gapsize + 1) / e_N) / (1 + 1 / e_N) + texp * np.abs(self.e_rho[e_name][n + 1]) # The absolute makes sure, that the tail contribution is always positive +306 self.e_dtauint[e_name] = np.sqrt(self.e_n_dtauint[e_name][n] ** 2 + texp ** 2 * self.e_drho[e_name][n + 1] ** 2) +307 # Error of tau_exp neglected so far, missing term: self.e_rho[e_name][n + 1] ** 2 * d_tau_exp ** 2 +308 self.e_dvalue[e_name] = np.sqrt(2 * self.e_tauint[e_name] * e_gamma[e_name][0] * (1 + 1 / e_N) / e_N) +309 self.e_ddvalue[e_name] = self.e_dvalue[e_name] * np.sqrt((n / gapsize + 0.5) / e_N) +310 self.e_windowsize[e_name] = n +311 break +312 else: +313 if self.S[e_name] == 0.0: +314 self.e_tauint[e_name] = 0.5 +315 self.e_dtauint[e_name] = 0.0 +316 self.e_dvalue[e_name] = np.sqrt(e_gamma[e_name][0] / (e_N - 1)) +317 self.e_ddvalue[e_name] = self.e_dvalue[e_name] * np.sqrt(0.5 / e_N) +318 self.e_windowsize[e_name] = 0 +319 else: +320 # Standard automatic windowing procedure +321 tau = self.S[e_name] / np.log((2 * self.e_n_tauint[e_name][gapsize::gapsize] + 1) / (2 * self.e_n_tauint[e_name][gapsize::gapsize] - 1)) +322 g_w = np.exp(- np.arange(1, len(tau) + 1) / tau) - tau / np.sqrt(np.arange(1, len(tau) + 1) * e_N) +323 for n in range(1, w_max): +324 if n < w_max // 2 - 2: +325 _compute_drho(gapsize * n + gapsize) +326 if g_w[n - 1] < 0 or n >= w_max - 1: +327 n *= gapsize +328 self.e_tauint[e_name] = self.e_n_tauint[e_name][n] * (1 + (2 * n / gapsize + 1) / e_N) / (1 + 1 / e_N) # Bias correction hep-lat/0306017 eq. (49) +329 self.e_dtauint[e_name] = self.e_n_dtauint[e_name][n] +330 self.e_dvalue[e_name] = np.sqrt(2 * self.e_tauint[e_name] * e_gamma[e_name][0] * (1 + 1 / e_N) / e_N) +331 self.e_ddvalue[e_name] = self.e_dvalue[e_name] * np.sqrt((n / gapsize + 0.5) / e_N) +332 self.e_windowsize[e_name] = n +333 break +334 +335 self._dvalue += self.e_dvalue[e_name] ** 2 +336 self.ddvalue += (self.e_dvalue[e_name] * self.e_ddvalue[e_name]) ** 2 +337 +338 for e_name in self.cov_names: +339 self.e_dvalue[e_name] = np.sqrt(self.covobs[e_name].errsq()) +340 self.e_ddvalue[e_name] = 0 +341 self._dvalue += self.e_dvalue[e_name]**2 +342 +343 self._dvalue = np.sqrt(self._dvalue) +344 if self._dvalue == 0.0: +345 self.ddvalue = 0.0 +346 else: +347 self.ddvalue = np.sqrt(self.ddvalue) / self._dvalue +348 return +349 +350 def _calc_gamma(self, deltas, idx, shape, w_max, fft): +351 """Calculate Gamma_{AA} from the deltas, which are defined on idx. +352 idx is assumed to be a contiguous range (possibly with a stepsize != 1) +353 +354 Parameters +355 ---------- +356 deltas : list +357 List of fluctuations +358 idx : list +359 List or range of configurations on which the deltas are defined. +360 shape : int +361 Number of configurations in idx. +362 w_max : int +363 Upper bound for the summation window. +364 fft : bool +365 determines whether the fft algorithm is used for the computation +366 of the autocorrelation function. +367 """ +368 gamma = np.zeros(w_max) +369 deltas = _expand_deltas(deltas, idx, shape) +370 new_shape = len(deltas) +371 if fft: +372 max_gamma = min(new_shape, w_max) +373 # The padding for the fft has to be even +374 padding = new_shape + max_gamma + (new_shape + max_gamma) % 2 +375 gamma[:max_gamma] += np.fft.irfft(np.abs(np.fft.rfft(deltas, padding)) ** 2)[:max_gamma] +376 else: +377 for n in range(w_max): +378 if new_shape - n >= 0: +379 gamma[n] += deltas[0:new_shape - n].dot(deltas[n:new_shape]) +380 +381 return gamma +382 +383 def details(self, ens_content=True): +384 """Output detailed properties of the Obs. +385 +386 Parameters +387 ---------- +388 ens_content : bool +389 print details about the ensembles and replica if true. +390 """ +391 if self.tag is not None: +392 print("Description:", self.tag) +393 if not hasattr(self, 'e_dvalue'): +394 print('Result\t %3.8e' % (self.value)) +395 else: +396 if self.value == 0.0: +397 percentage = np.nan +398 else: +399 percentage = np.abs(self._dvalue / self.value) * 100 +400 print('Result\t %3.8e +/- %3.8e +/- %3.8e (%3.3f%%)' % (self.value, self._dvalue, self.ddvalue, percentage)) +401 if len(self.e_names) > 1: +402 print(' Ensemble errors:') +403 for e_name in self.mc_names: +404 if len(self.e_names) > 1: +405 print('', e_name, '\t %3.8e +/- %3.8e' % (self.e_dvalue[e_name], self.e_ddvalue[e_name])) +406 if self.tau_exp[e_name] > 0: +407 print(' t_int\t %3.8e +/- %3.8e tau_exp = %3.2f, N_sigma = %1.0i' % (self.e_tauint[e_name], self.e_dtauint[e_name], self.tau_exp[e_name], self.N_sigma[e_name])) +408 else: +409 print(' t_int\t %3.8e +/- %3.8e S = %3.2f' % (self.e_tauint[e_name], self.e_dtauint[e_name], self.S[e_name])) +410 for e_name in self.cov_names: +411 print('', e_name, '\t %3.8e' % (self.e_dvalue[e_name])) +412 if ens_content is True: +413 if len(self.e_names) == 1: +414 print(self.N, 'samples in', len(self.e_names), 'ensemble:') +415 else: +416 print(self.N, 'samples in', len(self.e_names), 'ensembles:') +417 my_string_list = [] +418 for key, value in sorted(self.e_content.items()): +419 if key not in self.covobs: +420 my_string = ' ' + "\u00B7 Ensemble '" + key + "' " +421 if len(value) == 1: +422 my_string += f': {self.shape[value[0]]} configurations' +423 if isinstance(self.idl[value[0]], range): +424 my_string += f' (from {self.idl[value[0]].start} to {self.idl[value[0]][-1]}' + int(self.idl[value[0]].step != 1) * f' in steps of {self.idl[value[0]].step}' + ')' +425 else: +426 my_string += ' (irregular range)' +427 else: +428 sublist = [] +429 for v in value: +430 my_substring = ' ' + "\u00B7 Replicum '" + v[len(key) + 1:] + "' " +431 my_substring += f': {self.shape[v]} configurations' +432 if isinstance(self.idl[v], range): +433 my_substring += f' (from {self.idl[v].start} to {self.idl[v][-1]}' + int(self.idl[v].step != 1) * f' in steps of {self.idl[v].step}' + ')' +434 else: +435 my_substring += ' (irregular range)' +436 sublist.append(my_substring) +437 +438 my_string += '\n' + '\n'.join(sublist) +439 else: +440 my_string = ' ' + "\u00B7 Covobs '" + key + "' " +441 my_string_list.append(my_string) +442 print('\n'.join(my_string_list)) +443 +444 def reweight(self, weight): +445 """Reweight the obs with given rewighting factors. +446 +447 Parameters +448 ---------- +449 weight : Obs +450 Reweighting factor. An Observable that has to be defined on a superset of the +451 configurations in obs[i].idl for all i. +452 all_configs : bool +453 if True, the reweighted observables are normalized by the average of +454 the reweighting factor on all configurations in weight.idl and not +455 on the configurations in obs[i].idl. Default False. +456 """ +457 return reweight(weight, [self])[0] +458 +459 def is_zero_within_error(self, sigma=1): +460 """Checks whether the observable is zero within 'sigma' standard errors. +461 +462 Parameters +463 ---------- +464 sigma : int +465 Number of standard errors used for the check. +466 +467 Works only properly when the gamma method was run. +468 """ +469 return self.is_zero() or np.abs(self.value) <= sigma * self._dvalue 470 -471 Parameters -472 ---------- -473 save : str -474 saves the figure to a file named 'save' if. -475 """ -476 if not hasattr(self, 'e_dvalue'): -477 raise Exception('Run the gamma method first.') -478 -479 for e, e_name in enumerate(self.mc_names): -480 fig = plt.figure() -481 plt.xlabel(r'$W$') -482 plt.ylabel(r'$\tau_\mathrm{int}$') -483 length = int(len(self.e_n_tauint[e_name])) -484 if self.tau_exp[e_name] > 0: -485 base = self.e_n_tauint[e_name][self.e_windowsize[e_name]] -486 x_help = np.arange(2 * self.tau_exp[e_name]) -487 y_help = (x_help + 1) * np.abs(self.e_rho[e_name][self.e_windowsize[e_name] + 1]) * (1 - x_help / (2 * (2 * self.tau_exp[e_name] - 1))) + base -488 x_arr = np.arange(self.e_windowsize[e_name] + 1, self.e_windowsize[e_name] + 1 + 2 * self.tau_exp[e_name]) -489 plt.plot(x_arr, y_help, 'C' + str(e), linewidth=1, ls='--', marker=',') -490 plt.errorbar([self.e_windowsize[e_name] + 2 * self.tau_exp[e_name]], [self.e_tauint[e_name]], -491 yerr=[self.e_dtauint[e_name]], fmt='C' + str(e), linewidth=1, capsize=2, marker='o', mfc=plt.rcParams['axes.facecolor']) -492 xmax = self.e_windowsize[e_name] + 2 * self.tau_exp[e_name] + 1.5 -493 label = e_name + r', $\tau_\mathrm{exp}$=' + str(np.around(self.tau_exp[e_name], decimals=2)) -494 else: -495 label = e_name + ', S=' + str(np.around(self.S[e_name], decimals=2)) -496 xmax = max(10.5, 2 * self.e_windowsize[e_name] - 0.5) -497 -498 plt.errorbar(np.arange(length)[:int(xmax) + 1], self.e_n_tauint[e_name][:int(xmax) + 1], yerr=self.e_n_dtauint[e_name][:int(xmax) + 1], linewidth=1, capsize=2, label=label) -499 plt.axvline(x=self.e_windowsize[e_name], color='C' + str(e), alpha=0.5, marker=',', ls='--') -500 plt.legend() -501 plt.xlim(-0.5, xmax) -502 ylim = plt.ylim() -503 plt.ylim(bottom=0.0, top=max(1.0, ylim[1])) -504 plt.draw() -505 if save: -506 fig.savefig(save + "_" + str(e)) -507 -508 def plot_rho(self, save=None): -509 """Plot normalized autocorrelation function time for each ensemble. +471 def is_zero(self, atol=1e-10): +472 """Checks whether the observable is zero within a given tolerance. +473 +474 Parameters +475 ---------- +476 atol : float +477 Absolute tolerance (for details see numpy documentation). +478 """ +479 return np.isclose(0.0, self.value, 1e-14, atol) and all(np.allclose(0.0, delta, 1e-14, atol) for delta in self.deltas.values()) and all(np.allclose(0.0, delta.errsq(), 1e-14, atol) for delta in self.covobs.values()) +480 +481 def plot_tauint(self, save=None): +482 """Plot integrated autocorrelation time for each ensemble. +483 +484 Parameters +485 ---------- +486 save : str +487 saves the figure to a file named 'save' if. +488 """ +489 if not hasattr(self, 'e_dvalue'): +490 raise Exception('Run the gamma method first.') +491 +492 for e, e_name in enumerate(self.mc_names): +493 fig = plt.figure() +494 plt.xlabel(r'$W$') +495 plt.ylabel(r'$\tau_\mathrm{int}$') +496 length = int(len(self.e_n_tauint[e_name])) +497 if self.tau_exp[e_name] > 0: +498 base = self.e_n_tauint[e_name][self.e_windowsize[e_name]] +499 x_help = np.arange(2 * self.tau_exp[e_name]) +500 y_help = (x_help + 1) * np.abs(self.e_rho[e_name][self.e_windowsize[e_name] + 1]) * (1 - x_help / (2 * (2 * self.tau_exp[e_name] - 1))) + base +501 x_arr = np.arange(self.e_windowsize[e_name] + 1, self.e_windowsize[e_name] + 1 + 2 * self.tau_exp[e_name]) +502 plt.plot(x_arr, y_help, 'C' + str(e), linewidth=1, ls='--', marker=',') +503 plt.errorbar([self.e_windowsize[e_name] + 2 * self.tau_exp[e_name]], [self.e_tauint[e_name]], +504 yerr=[self.e_dtauint[e_name]], fmt='C' + str(e), linewidth=1, capsize=2, marker='o', mfc=plt.rcParams['axes.facecolor']) +505 xmax = self.e_windowsize[e_name] + 2 * self.tau_exp[e_name] + 1.5 +506 label = e_name + r', $\tau_\mathrm{exp}$=' + str(np.around(self.tau_exp[e_name], decimals=2)) +507 else: +508 label = e_name + ', S=' + str(np.around(self.S[e_name], decimals=2)) +509 xmax = max(10.5, 2 * self.e_windowsize[e_name] - 0.5) 510 -511 Parameters -512 ---------- -513 save : str -514 saves the figure to a file named 'save' if. -515 """ -516 if not hasattr(self, 'e_dvalue'): -517 raise Exception('Run the gamma method first.') -518 for e, e_name in enumerate(self.mc_names): -519 fig = plt.figure() -520 plt.xlabel('W') -521 plt.ylabel('rho') -522 length = int(len(self.e_drho[e_name])) -523 plt.errorbar(np.arange(length), self.e_rho[e_name][:length], yerr=self.e_drho[e_name][:], linewidth=1, capsize=2) -524 plt.axvline(x=self.e_windowsize[e_name], color='r', alpha=0.25, ls='--', marker=',') -525 if self.tau_exp[e_name] > 0: -526 plt.plot([self.e_windowsize[e_name] + 1, self.e_windowsize[e_name] + 1 + 2 * self.tau_exp[e_name]], -527 [self.e_rho[e_name][self.e_windowsize[e_name] + 1], 0], 'k-', lw=1) -528 xmax = self.e_windowsize[e_name] + 2 * self.tau_exp[e_name] + 1.5 -529 plt.title('Rho ' + e_name + r', tau\_exp=' + str(np.around(self.tau_exp[e_name], decimals=2))) -530 else: -531 xmax = max(10.5, 2 * self.e_windowsize[e_name] - 0.5) -532 plt.title('Rho ' + e_name + ', S=' + str(np.around(self.S[e_name], decimals=2))) -533 plt.plot([-0.5, xmax], [0, 0], 'k--', lw=1) -534 plt.xlim(-0.5, xmax) -535 plt.draw() -536 if save: -537 fig.savefig(save + "_" + str(e)) -538 -539 def plot_rep_dist(self): -540 """Plot replica distribution for each ensemble with more than one replicum.""" -541 if not hasattr(self, 'e_dvalue'): -542 raise Exception('Run the gamma method first.') -543 for e, e_name in enumerate(self.mc_names): -544 if len(self.e_content[e_name]) == 1: -545 print('No replica distribution for a single replicum (', e_name, ')') -546 continue -547 r_length = [] -548 sub_r_mean = 0 -549 for r, r_name in enumerate(self.e_content[e_name]): -550 r_length.append(len(self.deltas[r_name])) -551 sub_r_mean += self.shape[r_name] * self.r_values[r_name] -552 e_N = np.sum(r_length) -553 sub_r_mean /= e_N -554 arr = np.zeros(len(self.e_content[e_name])) -555 for r, r_name in enumerate(self.e_content[e_name]): -556 arr[r] = (self.r_values[r_name] - sub_r_mean) / (self.e_dvalue[e_name] * np.sqrt(e_N / self.shape[r_name] - 1)) -557 plt.hist(arr, rwidth=0.8, bins=len(self.e_content[e_name])) -558 plt.title('Replica distribution' + e_name + ' (mean=0, var=1)') -559 plt.draw() -560 -561 def plot_history(self, expand=True): -562 """Plot derived Monte Carlo history for each ensemble -563 -564 Parameters -565 ---------- -566 expand : bool -567 show expanded history for irregular Monte Carlo chains (default: True). -568 """ -569 for e, e_name in enumerate(self.mc_names): -570 plt.figure() -571 r_length = [] -572 tmp = [] -573 tmp_expanded = [] -574 for r, r_name in enumerate(self.e_content[e_name]): -575 tmp.append(self.deltas[r_name] + self.r_values[r_name]) -576 if expand: -577 tmp_expanded.append(_expand_deltas(self.deltas[r_name], list(self.idl[r_name]), self.shape[r_name]) + self.r_values[r_name]) -578 r_length.append(len(tmp_expanded[-1])) -579 else: -580 r_length.append(len(tmp[-1])) -581 e_N = np.sum(r_length) -582 x = np.arange(e_N) -583 y_test = np.concatenate(tmp, axis=0) -584 if expand: -585 y = np.concatenate(tmp_expanded, axis=0) -586 else: -587 y = y_test -588 plt.errorbar(x, y, fmt='.', markersize=3) -589 plt.xlim(-0.5, e_N - 0.5) -590 plt.title(e_name + f'\nskew: {skew(y_test):.3f} (p={skewtest(y_test).pvalue:.3f}), kurtosis: {kurtosis(y_test):.3f} (p={kurtosistest(y_test).pvalue:.3f})') -591 plt.draw() -592 -593 def plot_piechart(self, save=None): -594 """Plot piechart which shows the fractional contribution of each -595 ensemble to the error and returns a dictionary containing the fractions. -596 -597 Parameters -598 ---------- -599 save : str -600 saves the figure to a file named 'save' if. -601 """ -602 if not hasattr(self, 'e_dvalue'): -603 raise Exception('Run the gamma method first.') -604 if np.isclose(0.0, self._dvalue, atol=1e-15): -605 raise Exception('Error is 0.0') -606 labels = self.e_names -607 sizes = [self.e_dvalue[name] ** 2 for name in labels] / self._dvalue ** 2 -608 fig1, ax1 = plt.subplots() -609 ax1.pie(sizes, labels=labels, startangle=90, normalize=True) -610 ax1.axis('equal') -611 plt.draw() -612 if save: -613 fig1.savefig(save) -614 -615 return dict(zip(self.e_names, sizes)) -616 -617 def dump(self, filename, datatype="json.gz", description="", **kwargs): -618 """Dump the Obs to a file 'name' of chosen format. -619 -620 Parameters -621 ---------- -622 filename : str -623 name of the file to be saved. -624 datatype : str -625 Format of the exported file. Supported formats include -626 "json.gz" and "pickle" -627 description : str -628 Description for output file, only relevant for json.gz format. -629 path : str -630 specifies a custom path for the file (default '.') -631 """ -632 if 'path' in kwargs: -633 file_name = kwargs.get('path') + '/' + filename -634 else: -635 file_name = filename -636 -637 if datatype == "json.gz": -638 from .input.json import dump_to_json -639 dump_to_json([self], file_name, description=description) -640 elif datatype == "pickle": -641 with open(file_name + '.p', 'wb') as fb: -642 pickle.dump(self, fb) -643 else: -644 raise Exception("Unknown datatype " + str(datatype)) -645 -646 def export_jackknife(self): -647 """Export jackknife samples from the Obs -648 -649 Returns -650 ------- -651 numpy.ndarray -652 Returns a numpy array of length N + 1 where N is the number of samples -653 for the given ensemble and replicum. The zeroth entry of the array contains -654 the mean value of the Obs, entries 1 to N contain the N jackknife samples -655 derived from the Obs. The current implementation only works for observables -656 defined on exactly one ensemble and replicum. The derived jackknife samples -657 should agree with samples from a full jackknife analysis up to O(1/N). -658 """ -659 -660 if len(self.names) != 1: -661 raise Exception("'export_jackknife' is only implemented for Obs defined on one ensemble and replicum.") -662 -663 name = self.names[0] -664 full_data = self.deltas[name] + self.r_values[name] -665 n = full_data.size -666 mean = self.value -667 tmp_jacks = np.zeros(n + 1) -668 tmp_jacks[0] = mean -669 tmp_jacks[1:] = (n * mean - full_data) / (n - 1) -670 return tmp_jacks -671 -672 def __float__(self): -673 return float(self.value) -674 -675 def __repr__(self): -676 return 'Obs[' + str(self) + ']' -677 -678 def __str__(self): -679 if self._dvalue == 0.0: -680 return str(self.value) -681 fexp = np.floor(np.log10(self._dvalue)) -682 if fexp < 0.0: -683 return '{:{form}}({:2.0f})'.format(self.value, self._dvalue * 10 ** (-fexp + 1), form='.' + str(-int(fexp) + 1) + 'f') -684 elif fexp == 0.0: -685 return '{:.1f}({:1.1f})'.format(self.value, self._dvalue) -686 else: -687 return '{:.0f}({:2.0f})'.format(self.value, self._dvalue) -688 -689 def __hash__(self): -690 hash_tuple = (np.array([self.value]).astype(np.float32).data.tobytes(),) -691 hash_tuple += tuple([o.astype(np.float32).data.tobytes() for o in self.deltas.values()]) -692 hash_tuple += tuple([np.array([o.errsq()]).astype(np.float32).data.tobytes() for o in self.covobs.values()]) -693 hash_tuple += tuple([o.encode() for o in self.names]) -694 m = hashlib.md5() -695 [m.update(o) for o in hash_tuple] -696 return int(m.hexdigest(), 16) & 0xFFFFFFFF -697 -698 # Overload comparisons -699 def __lt__(self, other): -700 return self.value < other +511 plt.errorbar(np.arange(length)[:int(xmax) + 1], self.e_n_tauint[e_name][:int(xmax) + 1], yerr=self.e_n_dtauint[e_name][:int(xmax) + 1], linewidth=1, capsize=2, label=label) +512 plt.axvline(x=self.e_windowsize[e_name], color='C' + str(e), alpha=0.5, marker=',', ls='--') +513 plt.legend() +514 plt.xlim(-0.5, xmax) +515 ylim = plt.ylim() +516 plt.ylim(bottom=0.0, top=max(1.0, ylim[1])) +517 plt.draw() +518 if save: +519 fig.savefig(save + "_" + str(e)) +520 +521 def plot_rho(self, save=None): +522 """Plot normalized autocorrelation function time for each ensemble. +523 +524 Parameters +525 ---------- +526 save : str +527 saves the figure to a file named 'save' if. +528 """ +529 if not hasattr(self, 'e_dvalue'): +530 raise Exception('Run the gamma method first.') +531 for e, e_name in enumerate(self.mc_names): +532 fig = plt.figure() +533 plt.xlabel('W') +534 plt.ylabel('rho') +535 length = int(len(self.e_drho[e_name])) +536 plt.errorbar(np.arange(length), self.e_rho[e_name][:length], yerr=self.e_drho[e_name][:], linewidth=1, capsize=2) +537 plt.axvline(x=self.e_windowsize[e_name], color='r', alpha=0.25, ls='--', marker=',') +538 if self.tau_exp[e_name] > 0: +539 plt.plot([self.e_windowsize[e_name] + 1, self.e_windowsize[e_name] + 1 + 2 * self.tau_exp[e_name]], +540 [self.e_rho[e_name][self.e_windowsize[e_name] + 1], 0], 'k-', lw=1) +541 xmax = self.e_windowsize[e_name] + 2 * self.tau_exp[e_name] + 1.5 +542 plt.title('Rho ' + e_name + r', tau\_exp=' + str(np.around(self.tau_exp[e_name], decimals=2))) +543 else: +544 xmax = max(10.5, 2 * self.e_windowsize[e_name] - 0.5) +545 plt.title('Rho ' + e_name + ', S=' + str(np.around(self.S[e_name], decimals=2))) +546 plt.plot([-0.5, xmax], [0, 0], 'k--', lw=1) +547 plt.xlim(-0.5, xmax) +548 plt.draw() +549 if save: +550 fig.savefig(save + "_" + str(e)) +551 +552 def plot_rep_dist(self): +553 """Plot replica distribution for each ensemble with more than one replicum.""" +554 if not hasattr(self, 'e_dvalue'): +555 raise Exception('Run the gamma method first.') +556 for e, e_name in enumerate(self.mc_names): +557 if len(self.e_content[e_name]) == 1: +558 print('No replica distribution for a single replicum (', e_name, ')') +559 continue +560 r_length = [] +561 sub_r_mean = 0 +562 for r, r_name in enumerate(self.e_content[e_name]): +563 r_length.append(len(self.deltas[r_name])) +564 sub_r_mean += self.shape[r_name] * self.r_values[r_name] +565 e_N = np.sum(r_length) +566 sub_r_mean /= e_N +567 arr = np.zeros(len(self.e_content[e_name])) +568 for r, r_name in enumerate(self.e_content[e_name]): +569 arr[r] = (self.r_values[r_name] - sub_r_mean) / (self.e_dvalue[e_name] * np.sqrt(e_N / self.shape[r_name] - 1)) +570 plt.hist(arr, rwidth=0.8, bins=len(self.e_content[e_name])) +571 plt.title('Replica distribution' + e_name + ' (mean=0, var=1)') +572 plt.draw() +573 +574 def plot_history(self, expand=True): +575 """Plot derived Monte Carlo history for each ensemble +576 +577 Parameters +578 ---------- +579 expand : bool +580 show expanded history for irregular Monte Carlo chains (default: True). +581 """ +582 for e, e_name in enumerate(self.mc_names): +583 plt.figure() +584 r_length = [] +585 tmp = [] +586 tmp_expanded = [] +587 for r, r_name in enumerate(self.e_content[e_name]): +588 tmp.append(self.deltas[r_name] + self.r_values[r_name]) +589 if expand: +590 tmp_expanded.append(_expand_deltas(self.deltas[r_name], list(self.idl[r_name]), self.shape[r_name]) + self.r_values[r_name]) +591 r_length.append(len(tmp_expanded[-1])) +592 else: +593 r_length.append(len(tmp[-1])) +594 e_N = np.sum(r_length) +595 x = np.arange(e_N) +596 y_test = np.concatenate(tmp, axis=0) +597 if expand: +598 y = np.concatenate(tmp_expanded, axis=0) +599 else: +600 y = y_test +601 plt.errorbar(x, y, fmt='.', markersize=3) +602 plt.xlim(-0.5, e_N - 0.5) +603 plt.title(e_name + f'\nskew: {skew(y_test):.3f} (p={skewtest(y_test).pvalue:.3f}), kurtosis: {kurtosis(y_test):.3f} (p={kurtosistest(y_test).pvalue:.3f})') +604 plt.draw() +605 +606 def plot_piechart(self, save=None): +607 """Plot piechart which shows the fractional contribution of each +608 ensemble to the error and returns a dictionary containing the fractions. +609 +610 Parameters +611 ---------- +612 save : str +613 saves the figure to a file named 'save' if. +614 """ +615 if not hasattr(self, 'e_dvalue'): +616 raise Exception('Run the gamma method first.') +617 if np.isclose(0.0, self._dvalue, atol=1e-15): +618 raise Exception('Error is 0.0') +619 labels = self.e_names +620 sizes = [self.e_dvalue[name] ** 2 for name in labels] / self._dvalue ** 2 +621 fig1, ax1 = plt.subplots() +622 ax1.pie(sizes, labels=labels, startangle=90, normalize=True) +623 ax1.axis('equal') +624 plt.draw() +625 if save: +626 fig1.savefig(save) +627 +628 return dict(zip(self.e_names, sizes)) +629 +630 def dump(self, filename, datatype="json.gz", description="", **kwargs): +631 """Dump the Obs to a file 'name' of chosen format. +632 +633 Parameters +634 ---------- +635 filename : str +636 name of the file to be saved. +637 datatype : str +638 Format of the exported file. Supported formats include +639 "json.gz" and "pickle" +640 description : str +641 Description for output file, only relevant for json.gz format. +642 path : str +643 specifies a custom path for the file (default '.') +644 """ +645 if 'path' in kwargs: +646 file_name = kwargs.get('path') + '/' + filename +647 else: +648 file_name = filename +649 +650 if datatype == "json.gz": +651 from .input.json import dump_to_json +652 dump_to_json([self], file_name, description=description) +653 elif datatype == "pickle": +654 with open(file_name + '.p', 'wb') as fb: +655 pickle.dump(self, fb) +656 else: +657 raise Exception("Unknown datatype " + str(datatype)) +658 +659 def export_jackknife(self): +660 """Export jackknife samples from the Obs +661 +662 Returns +663 ------- +664 numpy.ndarray +665 Returns a numpy array of length N + 1 where N is the number of samples +666 for the given ensemble and replicum. The zeroth entry of the array contains +667 the mean value of the Obs, entries 1 to N contain the N jackknife samples +668 derived from the Obs. The current implementation only works for observables +669 defined on exactly one ensemble and replicum. The derived jackknife samples +670 should agree with samples from a full jackknife analysis up to O(1/N). +671 """ +672 +673 if len(self.names) != 1: +674 raise Exception("'export_jackknife' is only implemented for Obs defined on one ensemble and replicum.") +675 +676 name = self.names[0] +677 full_data = self.deltas[name] + self.r_values[name] +678 n = full_data.size +679 mean = self.value +680 tmp_jacks = np.zeros(n + 1) +681 tmp_jacks[0] = mean +682 tmp_jacks[1:] = (n * mean - full_data) / (n - 1) +683 return tmp_jacks +684 +685 def __float__(self): +686 return float(self.value) +687 +688 def __repr__(self): +689 return 'Obs[' + str(self) + ']' +690 +691 def __str__(self): +692 if self._dvalue == 0.0: +693 return str(self.value) +694 fexp = np.floor(np.log10(self._dvalue)) +695 if fexp < 0.0: +696 return '{:{form}}({:2.0f})'.format(self.value, self._dvalue * 10 ** (-fexp + 1), form='.' + str(-int(fexp) + 1) + 'f') +697 elif fexp == 0.0: +698 return '{:.1f}({:1.1f})'.format(self.value, self._dvalue) +699 else: +700 return '{:.0f}({:2.0f})'.format(self.value, self._dvalue) 701 -702 def __le__(self, other): -703 return self.value <= other -704 -705 def __gt__(self, other): -706 return self.value > other -707 -708 def __ge__(self, other): -709 return self.value >= other +702 def __hash__(self): +703 hash_tuple = (np.array([self.value]).astype(np.float32).data.tobytes(),) +704 hash_tuple += tuple([o.astype(np.float32).data.tobytes() for o in self.deltas.values()]) +705 hash_tuple += tuple([np.array([o.errsq()]).astype(np.float32).data.tobytes() for o in self.covobs.values()]) +706 hash_tuple += tuple([o.encode() for o in self.names]) +707 m = hashlib.md5() +708 [m.update(o) for o in hash_tuple] +709 return int(m.hexdigest(), 16) & 0xFFFFFFFF 710 -711 def __eq__(self, other): -712 return (self - other).is_zero() -713 -714 def __ne__(self, other): -715 return not (self - other).is_zero() -716 -717 # Overload math operations -718 def __add__(self, y): -719 if isinstance(y, Obs): -720 return derived_observable(lambda x, **kwargs: x[0] + x[1], [self, y], man_grad=[1, 1]) -721 else: -722 if isinstance(y, np.ndarray): -723 return np.array([self + o for o in y]) -724 elif y.__class__.__name__ in ['Corr', 'CObs']: -725 return NotImplemented -726 else: -727 return derived_observable(lambda x, **kwargs: x[0] + y, [self], man_grad=[1]) -728 -729 def __radd__(self, y): -730 return self + y -731 -732 def __mul__(self, y): -733 if isinstance(y, Obs): -734 return derived_observable(lambda x, **kwargs: x[0] * x[1], [self, y], man_grad=[y.value, self.value]) -735 else: -736 if isinstance(y, np.ndarray): -737 return np.array([self * o for o in y]) -738 elif isinstance(y, complex): -739 return CObs(self * y.real, self * y.imag) -740 elif y.__class__.__name__ in ['Corr', 'CObs']: -741 return NotImplemented -742 else: -743 return derived_observable(lambda x, **kwargs: x[0] * y, [self], man_grad=[y]) +711 # Overload comparisons +712 def __lt__(self, other): +713 return self.value < other +714 +715 def __le__(self, other): +716 return self.value <= other +717 +718 def __gt__(self, other): +719 return self.value > other +720 +721 def __ge__(self, other): +722 return self.value >= other +723 +724 def __eq__(self, other): +725 return (self - other).is_zero() +726 +727 def __ne__(self, other): +728 return not (self - other).is_zero() +729 +730 # Overload math operations +731 def __add__(self, y): +732 if isinstance(y, Obs): +733 return derived_observable(lambda x, **kwargs: x[0] + x[1], [self, y], man_grad=[1, 1]) +734 else: +735 if isinstance(y, np.ndarray): +736 return np.array([self + o for o in y]) +737 elif y.__class__.__name__ in ['Corr', 'CObs']: +738 return NotImplemented +739 else: +740 return derived_observable(lambda x, **kwargs: x[0] + y, [self], man_grad=[1]) +741 +742 def __radd__(self, y): +743 return self + y 744 -745 def __rmul__(self, y): -746 return self * y -747 -748 def __sub__(self, y): -749 if isinstance(y, Obs): -750 return derived_observable(lambda x, **kwargs: x[0] - x[1], [self, y], man_grad=[1, -1]) -751 else: -752 if isinstance(y, np.ndarray): -753 return np.array([self - o for o in y]) -754 elif y.__class__.__name__ in ['Corr', 'CObs']: -755 return NotImplemented -756 else: -757 return derived_observable(lambda x, **kwargs: x[0] - y, [self], man_grad=[1]) -758 -759 def __rsub__(self, y): -760 return -1 * (self - y) -761 -762 def __pos__(self): -763 return self -764 -765 def __neg__(self): -766 return -1 * self -767 -768 def __truediv__(self, y): -769 if isinstance(y, Obs): -770 return derived_observable(lambda x, **kwargs: x[0] / x[1], [self, y], man_grad=[1 / y.value, - self.value / y.value ** 2]) -771 else: -772 if isinstance(y, np.ndarray): -773 return np.array([self / o for o in y]) -774 elif y.__class__.__name__ in ['Corr', 'CObs']: -775 return NotImplemented -776 else: -777 return derived_observable(lambda x, **kwargs: x[0] / y, [self], man_grad=[1 / y]) -778 -779 def __rtruediv__(self, y): -780 if isinstance(y, Obs): -781 return derived_observable(lambda x, **kwargs: x[0] / x[1], [y, self], man_grad=[1 / self.value, - y.value / self.value ** 2]) -782 else: -783 if isinstance(y, np.ndarray): -784 return np.array([o / self for o in y]) -785 elif y.__class__.__name__ in ['Corr', 'CObs']: -786 return NotImplemented -787 else: -788 return derived_observable(lambda x, **kwargs: y / x[0], [self], man_grad=[-y / self.value ** 2]) -789 -790 def __pow__(self, y): -791 if isinstance(y, Obs): -792 return derived_observable(lambda x: x[0] ** x[1], [self, y]) -793 else: -794 return derived_observable(lambda x: x[0] ** y, [self]) -795 -796 def __rpow__(self, y): -797 if isinstance(y, Obs): -798 return derived_observable(lambda x: x[0] ** x[1], [y, self]) -799 else: -800 return derived_observable(lambda x: y ** x[0], [self]) -801 -802 def __abs__(self): -803 return derived_observable(lambda x: anp.abs(x[0]), [self]) -804 -805 # Overload numpy functions -806 def sqrt(self): -807 return derived_observable(lambda x, **kwargs: np.sqrt(x[0]), [self], man_grad=[1 / 2 / np.sqrt(self.value)]) +745 def __mul__(self, y): +746 if isinstance(y, Obs): +747 return derived_observable(lambda x, **kwargs: x[0] * x[1], [self, y], man_grad=[y.value, self.value]) +748 else: +749 if isinstance(y, np.ndarray): +750 return np.array([self * o for o in y]) +751 elif isinstance(y, complex): +752 return CObs(self * y.real, self * y.imag) +753 elif y.__class__.__name__ in ['Corr', 'CObs']: +754 return NotImplemented +755 else: +756 return derived_observable(lambda x, **kwargs: x[0] * y, [self], man_grad=[y]) +757 +758 def __rmul__(self, y): +759 return self * y +760 +761 def __sub__(self, y): +762 if isinstance(y, Obs): +763 return derived_observable(lambda x, **kwargs: x[0] - x[1], [self, y], man_grad=[1, -1]) +764 else: +765 if isinstance(y, np.ndarray): +766 return np.array([self - o for o in y]) +767 elif y.__class__.__name__ in ['Corr', 'CObs']: +768 return NotImplemented +769 else: +770 return derived_observable(lambda x, **kwargs: x[0] - y, [self], man_grad=[1]) +771 +772 def __rsub__(self, y): +773 return -1 * (self - y) +774 +775 def __pos__(self): +776 return self +777 +778 def __neg__(self): +779 return -1 * self +780 +781 def __truediv__(self, y): +782 if isinstance(y, Obs): +783 return derived_observable(lambda x, **kwargs: x[0] / x[1], [self, y], man_grad=[1 / y.value, - self.value / y.value ** 2]) +784 else: +785 if isinstance(y, np.ndarray): +786 return np.array([self / o for o in y]) +787 elif y.__class__.__name__ in ['Corr', 'CObs']: +788 return NotImplemented +789 else: +790 return derived_observable(lambda x, **kwargs: x[0] / y, [self], man_grad=[1 / y]) +791 +792 def __rtruediv__(self, y): +793 if isinstance(y, Obs): +794 return derived_observable(lambda x, **kwargs: x[0] / x[1], [y, self], man_grad=[1 / self.value, - y.value / self.value ** 2]) +795 else: +796 if isinstance(y, np.ndarray): +797 return np.array([o / self for o in y]) +798 elif y.__class__.__name__ in ['Corr', 'CObs']: +799 return NotImplemented +800 else: +801 return derived_observable(lambda x, **kwargs: y / x[0], [self], man_grad=[-y / self.value ** 2]) +802 +803 def __pow__(self, y): +804 if isinstance(y, Obs): +805 return derived_observable(lambda x: x[0] ** x[1], [self, y]) +806 else: +807 return derived_observable(lambda x: x[0] ** y, [self]) 808 -809 def log(self): -810 return derived_observable(lambda x, **kwargs: np.log(x[0]), [self], man_grad=[1 / self.value]) -811 -812 def exp(self): -813 return derived_observable(lambda x, **kwargs: np.exp(x[0]), [self], man_grad=[np.exp(self.value)]) +809 def __rpow__(self, y): +810 if isinstance(y, Obs): +811 return derived_observable(lambda x: x[0] ** x[1], [y, self]) +812 else: +813 return derived_observable(lambda x: y ** x[0], [self]) 814 -815 def sin(self): -816 return derived_observable(lambda x, **kwargs: np.sin(x[0]), [self], man_grad=[np.cos(self.value)]) +815 def __abs__(self): +816 return derived_observable(lambda x: anp.abs(x[0]), [self]) 817 -818 def cos(self): -819 return derived_observable(lambda x, **kwargs: np.cos(x[0]), [self], man_grad=[-np.sin(self.value)]) -820 -821 def tan(self): -822 return derived_observable(lambda x, **kwargs: np.tan(x[0]), [self], man_grad=[1 / np.cos(self.value) ** 2]) -823 -824 def arcsin(self): -825 return derived_observable(lambda x: anp.arcsin(x[0]), [self]) -826 -827 def arccos(self): -828 return derived_observable(lambda x: anp.arccos(x[0]), [self]) -829 -830 def arctan(self): -831 return derived_observable(lambda x: anp.arctan(x[0]), [self]) -832 -833 def sinh(self): -834 return derived_observable(lambda x, **kwargs: np.sinh(x[0]), [self], man_grad=[np.cosh(self.value)]) -835 -836 def cosh(self): -837 return derived_observable(lambda x, **kwargs: np.cosh(x[0]), [self], man_grad=[np.sinh(self.value)]) -838 -839 def tanh(self): -840 return derived_observable(lambda x, **kwargs: np.tanh(x[0]), [self], man_grad=[1 / np.cosh(self.value) ** 2]) -841 -842 def arcsinh(self): -843 return derived_observable(lambda x: anp.arcsinh(x[0]), [self]) -844 -845 def arccosh(self): -846 return derived_observable(lambda x: anp.arccosh(x[0]), [self]) -847 -848 def arctanh(self): -849 return derived_observable(lambda x: anp.arctanh(x[0]), [self]) +818 # Overload numpy functions +819 def sqrt(self): +820 return derived_observable(lambda x, **kwargs: np.sqrt(x[0]), [self], man_grad=[1 / 2 / np.sqrt(self.value)]) +821 +822 def log(self): +823 return derived_observable(lambda x, **kwargs: np.log(x[0]), [self], man_grad=[1 / self.value]) +824 +825 def exp(self): +826 return derived_observable(lambda x, **kwargs: np.exp(x[0]), [self], man_grad=[np.exp(self.value)]) +827 +828 def sin(self): +829 return derived_observable(lambda x, **kwargs: np.sin(x[0]), [self], man_grad=[np.cos(self.value)]) +830 +831 def cos(self): +832 return derived_observable(lambda x, **kwargs: np.cos(x[0]), [self], man_grad=[-np.sin(self.value)]) +833 +834 def tan(self): +835 return derived_observable(lambda x, **kwargs: np.tan(x[0]), [self], man_grad=[1 / np.cos(self.value) ** 2]) +836 +837 def arcsin(self): +838 return derived_observable(lambda x: anp.arcsin(x[0]), [self]) +839 +840 def arccos(self): +841 return derived_observable(lambda x: anp.arccos(x[0]), [self]) +842 +843 def arctan(self): +844 return derived_observable(lambda x: anp.arctan(x[0]), [self]) +845 +846 def sinh(self): +847 return derived_observable(lambda x, **kwargs: np.sinh(x[0]), [self], man_grad=[np.cosh(self.value)]) +848 +849 def cosh(self): +850 return derived_observable(lambda x, **kwargs: np.cosh(x[0]), [self], man_grad=[np.sinh(self.value)]) +851 +852 def tanh(self): +853 return derived_observable(lambda x, **kwargs: np.tanh(x[0]), [self], man_grad=[1 / np.cosh(self.value) ** 2]) +854 +855 def arcsinh(self): +856 return derived_observable(lambda x: anp.arcsinh(x[0]), [self]) +857 +858 def arccosh(self): +859 return derived_observable(lambda x: anp.arccosh(x[0]), [self]) +860 +861 def arctanh(self): +862 return derived_observable(lambda x: anp.arctanh(x[0]), [self]) @@ -2976,59 +3002,72 @@ list of ranges or lists on which the samples are defined 280 tmp = self.e_rho[e_name][i + 1:w_max] + np.concatenate([self.e_rho[e_name][i - 1::-1], self.e_rho[e_name][1:w_max - 2 * i]]) - 2 * self.e_rho[e_name][i] * self.e_rho[e_name][1:w_max - i] 281 self.e_drho[e_name][i] = np.sqrt(np.sum(tmp ** 2) / e_N) 282 -283 _compute_drho(1) -284 if self.tau_exp[e_name] > 0: -285 texp = self.tau_exp[e_name] -286 # Critical slowing down analysis -287 if w_max // 2 <= 1: -288 raise Exception("Need at least 8 samples for tau_exp error analysis") -289 for n in range(1, w_max // 2): -290 _compute_drho(n + 1) -291 if (self.e_rho[e_name][n] - self.N_sigma[e_name] * self.e_drho[e_name][n]) < 0 or n >= w_max // 2 - 2: -292 # Bias correction hep-lat/0306017 eq. (49) included -293 self.e_tauint[e_name] = self.e_n_tauint[e_name][n] * (1 + (2 * n + 1) / e_N) / (1 + 1 / e_N) + texp * np.abs(self.e_rho[e_name][n + 1]) # The absolute makes sure, that the tail contribution is always positive -294 self.e_dtauint[e_name] = np.sqrt(self.e_n_dtauint[e_name][n] ** 2 + texp ** 2 * self.e_drho[e_name][n + 1] ** 2) -295 # Error of tau_exp neglected so far, missing term: self.e_rho[e_name][n + 1] ** 2 * d_tau_exp ** 2 -296 self.e_dvalue[e_name] = np.sqrt(2 * self.e_tauint[e_name] * e_gamma[e_name][0] * (1 + 1 / e_N) / e_N) -297 self.e_ddvalue[e_name] = self.e_dvalue[e_name] * np.sqrt((n + 0.5) / e_N) -298 self.e_windowsize[e_name] = n -299 break -300 else: -301 if self.S[e_name] == 0.0: -302 self.e_tauint[e_name] = 0.5 -303 self.e_dtauint[e_name] = 0.0 -304 self.e_dvalue[e_name] = np.sqrt(e_gamma[e_name][0] / (e_N - 1)) -305 self.e_ddvalue[e_name] = self.e_dvalue[e_name] * np.sqrt(0.5 / e_N) -306 self.e_windowsize[e_name] = 0 -307 else: -308 # Standard automatic windowing procedure -309 tau = self.S[e_name] / np.log((2 * self.e_n_tauint[e_name][1:] + 1) / (2 * self.e_n_tauint[e_name][1:] - 1)) -310 g_w = np.exp(- np.arange(1, w_max) / tau) - tau / np.sqrt(np.arange(1, w_max) * e_N) -311 for n in range(1, w_max): -312 if n < w_max // 2 - 2: -313 _compute_drho(n + 1) -314 if g_w[n - 1] < 0 or n >= w_max - 1: -315 self.e_tauint[e_name] = self.e_n_tauint[e_name][n] * (1 + (2 * n + 1) / e_N) / (1 + 1 / e_N) # Bias correction hep-lat/0306017 eq. (49) -316 self.e_dtauint[e_name] = self.e_n_dtauint[e_name][n] -317 self.e_dvalue[e_name] = np.sqrt(2 * self.e_tauint[e_name] * e_gamma[e_name][0] * (1 + 1 / e_N) / e_N) -318 self.e_ddvalue[e_name] = self.e_dvalue[e_name] * np.sqrt((n + 0.5) / e_N) -319 self.e_windowsize[e_name] = n -320 break -321 -322 self._dvalue += self.e_dvalue[e_name] ** 2 -323 self.ddvalue += (self.e_dvalue[e_name] * self.e_ddvalue[e_name]) ** 2 -324 -325 for e_name in self.cov_names: -326 self.e_dvalue[e_name] = np.sqrt(self.covobs[e_name].errsq()) -327 self.e_ddvalue[e_name] = 0 -328 self._dvalue += self.e_dvalue[e_name]**2 -329 -330 self._dvalue = np.sqrt(self._dvalue) -331 if self._dvalue == 0.0: -332 self.ddvalue = 0.0 -333 else: -334 self.ddvalue = np.sqrt(self.ddvalue) / self._dvalue -335 return +283 gaps = [] +284 for r_name in e_content[e_name]: +285 if isinstance(self.idl[r_name], range): +286 gaps.append(1) +287 else: +288 gaps.append(np.min(np.diff(self.idl[r_name]))) +289 +290 if not np.all([gi == gaps[0] for gi in gaps]): +291 raise Exception(f"Replica for ensemble {e_name} are not equally spaced.", gaps) +292 else: +293 gapsize = gaps[0] +294 +295 _compute_drho(gapsize) +296 if self.tau_exp[e_name] > 0: +297 texp = self.tau_exp[e_name] +298 # Critical slowing down analysis +299 if w_max // 2 <= 1: +300 raise Exception("Need at least 8 samples for tau_exp error analysis") +301 for n in range(gapsize, w_max // 2, gapsize): +302 _compute_drho(n + gapsize) +303 if (self.e_rho[e_name][n] - self.N_sigma[e_name] * self.e_drho[e_name][n]) < 0 or n >= w_max // 2 - 2: +304 # Bias correction hep-lat/0306017 eq. (49) included +305 self.e_tauint[e_name] = self.e_n_tauint[e_name][n] * (1 + (2 * n / gapsize + 1) / e_N) / (1 + 1 / e_N) + texp * np.abs(self.e_rho[e_name][n + 1]) # The absolute makes sure, that the tail contribution is always positive +306 self.e_dtauint[e_name] = np.sqrt(self.e_n_dtauint[e_name][n] ** 2 + texp ** 2 * self.e_drho[e_name][n + 1] ** 2) +307 # Error of tau_exp neglected so far, missing term: self.e_rho[e_name][n + 1] ** 2 * d_tau_exp ** 2 +308 self.e_dvalue[e_name] = np.sqrt(2 * self.e_tauint[e_name] * e_gamma[e_name][0] * (1 + 1 / e_N) / e_N) +309 self.e_ddvalue[e_name] = self.e_dvalue[e_name] * np.sqrt((n / gapsize + 0.5) / e_N) +310 self.e_windowsize[e_name] = n +311 break +312 else: +313 if self.S[e_name] == 0.0: +314 self.e_tauint[e_name] = 0.5 +315 self.e_dtauint[e_name] = 0.0 +316 self.e_dvalue[e_name] = np.sqrt(e_gamma[e_name][0] / (e_N - 1)) +317 self.e_ddvalue[e_name] = self.e_dvalue[e_name] * np.sqrt(0.5 / e_N) +318 self.e_windowsize[e_name] = 0 +319 else: +320 # Standard automatic windowing procedure +321 tau = self.S[e_name] / np.log((2 * self.e_n_tauint[e_name][gapsize::gapsize] + 1) / (2 * self.e_n_tauint[e_name][gapsize::gapsize] - 1)) +322 g_w = np.exp(- np.arange(1, len(tau) + 1) / tau) - tau / np.sqrt(np.arange(1, len(tau) + 1) * e_N) +323 for n in range(1, w_max): +324 if n < w_max // 2 - 2: +325 _compute_drho(gapsize * n + gapsize) +326 if g_w[n - 1] < 0 or n >= w_max - 1: +327 n *= gapsize +328 self.e_tauint[e_name] = self.e_n_tauint[e_name][n] * (1 + (2 * n / gapsize + 1) / e_N) / (1 + 1 / e_N) # Bias correction hep-lat/0306017 eq. (49) +329 self.e_dtauint[e_name] = self.e_n_dtauint[e_name][n] +330 self.e_dvalue[e_name] = np.sqrt(2 * self.e_tauint[e_name] * e_gamma[e_name][0] * (1 + 1 / e_N) / e_N) +331 self.e_ddvalue[e_name] = self.e_dvalue[e_name] * np.sqrt((n / gapsize + 0.5) / e_N) +332 self.e_windowsize[e_name] = n +333 break +334 +335 self._dvalue += self.e_dvalue[e_name] ** 2 +336 self.ddvalue += (self.e_dvalue[e_name] * self.e_ddvalue[e_name]) ** 2 +337 +338 for e_name in self.cov_names: +339 self.e_dvalue[e_name] = np.sqrt(self.covobs[e_name].errsq()) +340 self.e_ddvalue[e_name] = 0 +341 self._dvalue += self.e_dvalue[e_name]**2 +342 +343 self._dvalue = np.sqrt(self._dvalue) +344 if self._dvalue == 0.0: +345 self.ddvalue = 0.0 +346 else: +347 self.ddvalue = np.sqrt(self.ddvalue) / self._dvalue +348 return @@ -3067,66 +3106,66 @@ of the autocorrelation function (default True) -
370    def details(self, ens_content=True):
-371        """Output detailed properties of the Obs.
-372
-373        Parameters
-374        ----------
-375        ens_content : bool
-376            print details about the ensembles and replica if true.
-377        """
-378        if self.tag is not None:
-379            print("Description:", self.tag)
-380        if not hasattr(self, 'e_dvalue'):
-381            print('Result\t %3.8e' % (self.value))
-382        else:
-383            if self.value == 0.0:
-384                percentage = np.nan
-385            else:
-386                percentage = np.abs(self._dvalue / self.value) * 100
-387            print('Result\t %3.8e +/- %3.8e +/- %3.8e (%3.3f%%)' % (self.value, self._dvalue, self.ddvalue, percentage))
-388            if len(self.e_names) > 1:
-389                print(' Ensemble errors:')
-390            for e_name in self.mc_names:
-391                if len(self.e_names) > 1:
-392                    print('', e_name, '\t %3.8e +/- %3.8e' % (self.e_dvalue[e_name], self.e_ddvalue[e_name]))
-393                if self.tau_exp[e_name] > 0:
-394                    print(' t_int\t %3.8e +/- %3.8e tau_exp = %3.2f,  N_sigma = %1.0i' % (self.e_tauint[e_name], self.e_dtauint[e_name], self.tau_exp[e_name], self.N_sigma[e_name]))
-395                else:
-396                    print(' t_int\t %3.8e +/- %3.8e S = %3.2f' % (self.e_tauint[e_name], self.e_dtauint[e_name], self.S[e_name]))
-397            for e_name in self.cov_names:
-398                print('', e_name, '\t %3.8e' % (self.e_dvalue[e_name]))
-399        if ens_content is True:
-400            if len(self.e_names) == 1:
-401                print(self.N, 'samples in', len(self.e_names), 'ensemble:')
-402            else:
-403                print(self.N, 'samples in', len(self.e_names), 'ensembles:')
-404            my_string_list = []
-405            for key, value in sorted(self.e_content.items()):
-406                if key not in self.covobs:
-407                    my_string = '  ' + "\u00B7 Ensemble '" + key + "' "
-408                    if len(value) == 1:
-409                        my_string += f': {self.shape[value[0]]} configurations'
-410                        if isinstance(self.idl[value[0]], range):
-411                            my_string += f' (from {self.idl[value[0]].start} to {self.idl[value[0]][-1]}' + int(self.idl[value[0]].step != 1) * f' in steps of {self.idl[value[0]].step}' + ')'
-412                        else:
-413                            my_string += ' (irregular range)'
-414                    else:
-415                        sublist = []
-416                        for v in value:
-417                            my_substring = '    ' + "\u00B7 Replicum '" + v[len(key) + 1:] + "' "
-418                            my_substring += f': {self.shape[v]} configurations'
-419                            if isinstance(self.idl[v], range):
-420                                my_substring += f' (from {self.idl[v].start} to {self.idl[v][-1]}' + int(self.idl[v].step != 1) * f' in steps of {self.idl[v].step}' + ')'
-421                            else:
-422                                my_substring += ' (irregular range)'
-423                            sublist.append(my_substring)
-424
-425                        my_string += '\n' + '\n'.join(sublist)
-426                else:
-427                    my_string = '  ' + "\u00B7 Covobs   '" + key + "' "
-428                my_string_list.append(my_string)
-429            print('\n'.join(my_string_list))
+            
383    def details(self, ens_content=True):
+384        """Output detailed properties of the Obs.
+385
+386        Parameters
+387        ----------
+388        ens_content : bool
+389            print details about the ensembles and replica if true.
+390        """
+391        if self.tag is not None:
+392            print("Description:", self.tag)
+393        if not hasattr(self, 'e_dvalue'):
+394            print('Result\t %3.8e' % (self.value))
+395        else:
+396            if self.value == 0.0:
+397                percentage = np.nan
+398            else:
+399                percentage = np.abs(self._dvalue / self.value) * 100
+400            print('Result\t %3.8e +/- %3.8e +/- %3.8e (%3.3f%%)' % (self.value, self._dvalue, self.ddvalue, percentage))
+401            if len(self.e_names) > 1:
+402                print(' Ensemble errors:')
+403            for e_name in self.mc_names:
+404                if len(self.e_names) > 1:
+405                    print('', e_name, '\t %3.8e +/- %3.8e' % (self.e_dvalue[e_name], self.e_ddvalue[e_name]))
+406                if self.tau_exp[e_name] > 0:
+407                    print(' t_int\t %3.8e +/- %3.8e tau_exp = %3.2f,  N_sigma = %1.0i' % (self.e_tauint[e_name], self.e_dtauint[e_name], self.tau_exp[e_name], self.N_sigma[e_name]))
+408                else:
+409                    print(' t_int\t %3.8e +/- %3.8e S = %3.2f' % (self.e_tauint[e_name], self.e_dtauint[e_name], self.S[e_name]))
+410            for e_name in self.cov_names:
+411                print('', e_name, '\t %3.8e' % (self.e_dvalue[e_name]))
+412        if ens_content is True:
+413            if len(self.e_names) == 1:
+414                print(self.N, 'samples in', len(self.e_names), 'ensemble:')
+415            else:
+416                print(self.N, 'samples in', len(self.e_names), 'ensembles:')
+417            my_string_list = []
+418            for key, value in sorted(self.e_content.items()):
+419                if key not in self.covobs:
+420                    my_string = '  ' + "\u00B7 Ensemble '" + key + "' "
+421                    if len(value) == 1:
+422                        my_string += f': {self.shape[value[0]]} configurations'
+423                        if isinstance(self.idl[value[0]], range):
+424                            my_string += f' (from {self.idl[value[0]].start} to {self.idl[value[0]][-1]}' + int(self.idl[value[0]].step != 1) * f' in steps of {self.idl[value[0]].step}' + ')'
+425                        else:
+426                            my_string += ' (irregular range)'
+427                    else:
+428                        sublist = []
+429                        for v in value:
+430                            my_substring = '    ' + "\u00B7 Replicum '" + v[len(key) + 1:] + "' "
+431                            my_substring += f': {self.shape[v]} configurations'
+432                            if isinstance(self.idl[v], range):
+433                                my_substring += f' (from {self.idl[v].start} to {self.idl[v][-1]}' + int(self.idl[v].step != 1) * f' in steps of {self.idl[v].step}' + ')'
+434                            else:
+435                                my_substring += ' (irregular range)'
+436                            sublist.append(my_substring)
+437
+438                        my_string += '\n' + '\n'.join(sublist)
+439                else:
+440                    my_string = '  ' + "\u00B7 Covobs   '" + key + "' "
+441                my_string_list.append(my_string)
+442            print('\n'.join(my_string_list))
 
@@ -3153,20 +3192,20 @@ print details about the ensembles and replica if true.
-
431    def reweight(self, weight):
-432        """Reweight the obs with given rewighting factors.
-433
-434        Parameters
-435        ----------
-436        weight : Obs
-437            Reweighting factor. An Observable that has to be defined on a superset of the
-438            configurations in obs[i].idl for all i.
-439        all_configs : bool
-440            if True, the reweighted observables are normalized by the average of
-441            the reweighting factor on all configurations in weight.idl and not
-442            on the configurations in obs[i].idl. Default False.
-443        """
-444        return reweight(weight, [self])[0]
+            
444    def reweight(self, weight):
+445        """Reweight the obs with given rewighting factors.
+446
+447        Parameters
+448        ----------
+449        weight : Obs
+450            Reweighting factor. An Observable that has to be defined on a superset of the
+451            configurations in obs[i].idl for all i.
+452        all_configs : bool
+453            if True, the reweighted observables are normalized by the average of
+454            the reweighting factor on all configurations in weight.idl and not
+455            on the configurations in obs[i].idl. Default False.
+456        """
+457        return reweight(weight, [self])[0]
 
@@ -3198,17 +3237,17 @@ on the configurations in obs[i].idl. Default False.
-
446    def is_zero_within_error(self, sigma=1):
-447        """Checks whether the observable is zero within 'sigma' standard errors.
-448
-449        Parameters
-450        ----------
-451        sigma : int
-452            Number of standard errors used for the check.
-453
-454        Works only properly when the gamma method was run.
-455        """
-456        return self.is_zero() or np.abs(self.value) <= sigma * self._dvalue
+            
459    def is_zero_within_error(self, sigma=1):
+460        """Checks whether the observable is zero within 'sigma' standard errors.
+461
+462        Parameters
+463        ----------
+464        sigma : int
+465            Number of standard errors used for the check.
+466
+467        Works only properly when the gamma method was run.
+468        """
+469        return self.is_zero() or np.abs(self.value) <= sigma * self._dvalue
 
@@ -3236,15 +3275,15 @@ Number of standard errors used for the check.
-
458    def is_zero(self, atol=1e-10):
-459        """Checks whether the observable is zero within a given tolerance.
-460
-461        Parameters
-462        ----------
-463        atol : float
-464            Absolute tolerance (for details see numpy documentation).
-465        """
-466        return np.isclose(0.0, self.value, 1e-14, atol) and all(np.allclose(0.0, delta, 1e-14, atol) for delta in self.deltas.values()) and all(np.allclose(0.0, delta.errsq(), 1e-14, atol) for delta in self.covobs.values())
+            
471    def is_zero(self, atol=1e-10):
+472        """Checks whether the observable is zero within a given tolerance.
+473
+474        Parameters
+475        ----------
+476        atol : float
+477            Absolute tolerance (for details see numpy documentation).
+478        """
+479        return np.isclose(0.0, self.value, 1e-14, atol) and all(np.allclose(0.0, delta, 1e-14, atol) for delta in self.deltas.values()) and all(np.allclose(0.0, delta.errsq(), 1e-14, atol) for delta in self.covobs.values())
 
@@ -3271,45 +3310,45 @@ Absolute tolerance (for details see numpy documentation).
-
468    def plot_tauint(self, save=None):
-469        """Plot integrated autocorrelation time for each ensemble.
-470
-471        Parameters
-472        ----------
-473        save : str
-474            saves the figure to a file named 'save' if.
-475        """
-476        if not hasattr(self, 'e_dvalue'):
-477            raise Exception('Run the gamma method first.')
-478
-479        for e, e_name in enumerate(self.mc_names):
-480            fig = plt.figure()
-481            plt.xlabel(r'$W$')
-482            plt.ylabel(r'$\tau_\mathrm{int}$')
-483            length = int(len(self.e_n_tauint[e_name]))
-484            if self.tau_exp[e_name] > 0:
-485                base = self.e_n_tauint[e_name][self.e_windowsize[e_name]]
-486                x_help = np.arange(2 * self.tau_exp[e_name])
-487                y_help = (x_help + 1) * np.abs(self.e_rho[e_name][self.e_windowsize[e_name] + 1]) * (1 - x_help / (2 * (2 * self.tau_exp[e_name] - 1))) + base
-488                x_arr = np.arange(self.e_windowsize[e_name] + 1, self.e_windowsize[e_name] + 1 + 2 * self.tau_exp[e_name])
-489                plt.plot(x_arr, y_help, 'C' + str(e), linewidth=1, ls='--', marker=',')
-490                plt.errorbar([self.e_windowsize[e_name] + 2 * self.tau_exp[e_name]], [self.e_tauint[e_name]],
-491                             yerr=[self.e_dtauint[e_name]], fmt='C' + str(e), linewidth=1, capsize=2, marker='o', mfc=plt.rcParams['axes.facecolor'])
-492                xmax = self.e_windowsize[e_name] + 2 * self.tau_exp[e_name] + 1.5
-493                label = e_name + r', $\tau_\mathrm{exp}$=' + str(np.around(self.tau_exp[e_name], decimals=2))
-494            else:
-495                label = e_name + ', S=' + str(np.around(self.S[e_name], decimals=2))
-496                xmax = max(10.5, 2 * self.e_windowsize[e_name] - 0.5)
-497
-498            plt.errorbar(np.arange(length)[:int(xmax) + 1], self.e_n_tauint[e_name][:int(xmax) + 1], yerr=self.e_n_dtauint[e_name][:int(xmax) + 1], linewidth=1, capsize=2, label=label)
-499            plt.axvline(x=self.e_windowsize[e_name], color='C' + str(e), alpha=0.5, marker=',', ls='--')
-500            plt.legend()
-501            plt.xlim(-0.5, xmax)
-502            ylim = plt.ylim()
-503            plt.ylim(bottom=0.0, top=max(1.0, ylim[1]))
-504            plt.draw()
-505            if save:
-506                fig.savefig(save + "_" + str(e))
+            
481    def plot_tauint(self, save=None):
+482        """Plot integrated autocorrelation time for each ensemble.
+483
+484        Parameters
+485        ----------
+486        save : str
+487            saves the figure to a file named 'save' if.
+488        """
+489        if not hasattr(self, 'e_dvalue'):
+490            raise Exception('Run the gamma method first.')
+491
+492        for e, e_name in enumerate(self.mc_names):
+493            fig = plt.figure()
+494            plt.xlabel(r'$W$')
+495            plt.ylabel(r'$\tau_\mathrm{int}$')
+496            length = int(len(self.e_n_tauint[e_name]))
+497            if self.tau_exp[e_name] > 0:
+498                base = self.e_n_tauint[e_name][self.e_windowsize[e_name]]
+499                x_help = np.arange(2 * self.tau_exp[e_name])
+500                y_help = (x_help + 1) * np.abs(self.e_rho[e_name][self.e_windowsize[e_name] + 1]) * (1 - x_help / (2 * (2 * self.tau_exp[e_name] - 1))) + base
+501                x_arr = np.arange(self.e_windowsize[e_name] + 1, self.e_windowsize[e_name] + 1 + 2 * self.tau_exp[e_name])
+502                plt.plot(x_arr, y_help, 'C' + str(e), linewidth=1, ls='--', marker=',')
+503                plt.errorbar([self.e_windowsize[e_name] + 2 * self.tau_exp[e_name]], [self.e_tauint[e_name]],
+504                             yerr=[self.e_dtauint[e_name]], fmt='C' + str(e), linewidth=1, capsize=2, marker='o', mfc=plt.rcParams['axes.facecolor'])
+505                xmax = self.e_windowsize[e_name] + 2 * self.tau_exp[e_name] + 1.5
+506                label = e_name + r', $\tau_\mathrm{exp}$=' + str(np.around(self.tau_exp[e_name], decimals=2))
+507            else:
+508                label = e_name + ', S=' + str(np.around(self.S[e_name], decimals=2))
+509                xmax = max(10.5, 2 * self.e_windowsize[e_name] - 0.5)
+510
+511            plt.errorbar(np.arange(length)[:int(xmax) + 1], self.e_n_tauint[e_name][:int(xmax) + 1], yerr=self.e_n_dtauint[e_name][:int(xmax) + 1], linewidth=1, capsize=2, label=label)
+512            plt.axvline(x=self.e_windowsize[e_name], color='C' + str(e), alpha=0.5, marker=',', ls='--')
+513            plt.legend()
+514            plt.xlim(-0.5, xmax)
+515            ylim = plt.ylim()
+516            plt.ylim(bottom=0.0, top=max(1.0, ylim[1]))
+517            plt.draw()
+518            if save:
+519                fig.savefig(save + "_" + str(e))
 
@@ -3336,36 +3375,36 @@ saves the figure to a file named 'save' if.
-
508    def plot_rho(self, save=None):
-509        """Plot normalized autocorrelation function time for each ensemble.
-510
-511        Parameters
-512        ----------
-513        save : str
-514            saves the figure to a file named 'save' if.
-515        """
-516        if not hasattr(self, 'e_dvalue'):
-517            raise Exception('Run the gamma method first.')
-518        for e, e_name in enumerate(self.mc_names):
-519            fig = plt.figure()
-520            plt.xlabel('W')
-521            plt.ylabel('rho')
-522            length = int(len(self.e_drho[e_name]))
-523            plt.errorbar(np.arange(length), self.e_rho[e_name][:length], yerr=self.e_drho[e_name][:], linewidth=1, capsize=2)
-524            plt.axvline(x=self.e_windowsize[e_name], color='r', alpha=0.25, ls='--', marker=',')
-525            if self.tau_exp[e_name] > 0:
-526                plt.plot([self.e_windowsize[e_name] + 1, self.e_windowsize[e_name] + 1 + 2 * self.tau_exp[e_name]],
-527                         [self.e_rho[e_name][self.e_windowsize[e_name] + 1], 0], 'k-', lw=1)
-528                xmax = self.e_windowsize[e_name] + 2 * self.tau_exp[e_name] + 1.5
-529                plt.title('Rho ' + e_name + r', tau\_exp=' + str(np.around(self.tau_exp[e_name], decimals=2)))
-530            else:
-531                xmax = max(10.5, 2 * self.e_windowsize[e_name] - 0.5)
-532                plt.title('Rho ' + e_name + ', S=' + str(np.around(self.S[e_name], decimals=2)))
-533            plt.plot([-0.5, xmax], [0, 0], 'k--', lw=1)
-534            plt.xlim(-0.5, xmax)
-535            plt.draw()
-536            if save:
-537                fig.savefig(save + "_" + str(e))
+            
521    def plot_rho(self, save=None):
+522        """Plot normalized autocorrelation function time for each ensemble.
+523
+524        Parameters
+525        ----------
+526        save : str
+527            saves the figure to a file named 'save' if.
+528        """
+529        if not hasattr(self, 'e_dvalue'):
+530            raise Exception('Run the gamma method first.')
+531        for e, e_name in enumerate(self.mc_names):
+532            fig = plt.figure()
+533            plt.xlabel('W')
+534            plt.ylabel('rho')
+535            length = int(len(self.e_drho[e_name]))
+536            plt.errorbar(np.arange(length), self.e_rho[e_name][:length], yerr=self.e_drho[e_name][:], linewidth=1, capsize=2)
+537            plt.axvline(x=self.e_windowsize[e_name], color='r', alpha=0.25, ls='--', marker=',')
+538            if self.tau_exp[e_name] > 0:
+539                plt.plot([self.e_windowsize[e_name] + 1, self.e_windowsize[e_name] + 1 + 2 * self.tau_exp[e_name]],
+540                         [self.e_rho[e_name][self.e_windowsize[e_name] + 1], 0], 'k-', lw=1)
+541                xmax = self.e_windowsize[e_name] + 2 * self.tau_exp[e_name] + 1.5
+542                plt.title('Rho ' + e_name + r', tau\_exp=' + str(np.around(self.tau_exp[e_name], decimals=2)))
+543            else:
+544                xmax = max(10.5, 2 * self.e_windowsize[e_name] - 0.5)
+545                plt.title('Rho ' + e_name + ', S=' + str(np.around(self.S[e_name], decimals=2)))
+546            plt.plot([-0.5, xmax], [0, 0], 'k--', lw=1)
+547            plt.xlim(-0.5, xmax)
+548            plt.draw()
+549            if save:
+550                fig.savefig(save + "_" + str(e))
 
@@ -3392,27 +3431,27 @@ saves the figure to a file named 'save' if.
-
539    def plot_rep_dist(self):
-540        """Plot replica distribution for each ensemble with more than one replicum."""
-541        if not hasattr(self, 'e_dvalue'):
-542            raise Exception('Run the gamma method first.')
-543        for e, e_name in enumerate(self.mc_names):
-544            if len(self.e_content[e_name]) == 1:
-545                print('No replica distribution for a single replicum (', e_name, ')')
-546                continue
-547            r_length = []
-548            sub_r_mean = 0
-549            for r, r_name in enumerate(self.e_content[e_name]):
-550                r_length.append(len(self.deltas[r_name]))
-551                sub_r_mean += self.shape[r_name] * self.r_values[r_name]
-552            e_N = np.sum(r_length)
-553            sub_r_mean /= e_N
-554            arr = np.zeros(len(self.e_content[e_name]))
-555            for r, r_name in enumerate(self.e_content[e_name]):
-556                arr[r] = (self.r_values[r_name] - sub_r_mean) / (self.e_dvalue[e_name] * np.sqrt(e_N / self.shape[r_name] - 1))
-557            plt.hist(arr, rwidth=0.8, bins=len(self.e_content[e_name]))
-558            plt.title('Replica distribution' + e_name + ' (mean=0, var=1)')
-559            plt.draw()
+            
552    def plot_rep_dist(self):
+553        """Plot replica distribution for each ensemble with more than one replicum."""
+554        if not hasattr(self, 'e_dvalue'):
+555            raise Exception('Run the gamma method first.')
+556        for e, e_name in enumerate(self.mc_names):
+557            if len(self.e_content[e_name]) == 1:
+558                print('No replica distribution for a single replicum (', e_name, ')')
+559                continue
+560            r_length = []
+561            sub_r_mean = 0
+562            for r, r_name in enumerate(self.e_content[e_name]):
+563                r_length.append(len(self.deltas[r_name]))
+564                sub_r_mean += self.shape[r_name] * self.r_values[r_name]
+565            e_N = np.sum(r_length)
+566            sub_r_mean /= e_N
+567            arr = np.zeros(len(self.e_content[e_name]))
+568            for r, r_name in enumerate(self.e_content[e_name]):
+569                arr[r] = (self.r_values[r_name] - sub_r_mean) / (self.e_dvalue[e_name] * np.sqrt(e_N / self.shape[r_name] - 1))
+570            plt.hist(arr, rwidth=0.8, bins=len(self.e_content[e_name]))
+571            plt.title('Replica distribution' + e_name + ' (mean=0, var=1)')
+572            plt.draw()
 
@@ -3432,37 +3471,37 @@ saves the figure to a file named 'save' if.
-
561    def plot_history(self, expand=True):
-562        """Plot derived Monte Carlo history for each ensemble
-563
-564        Parameters
-565        ----------
-566        expand : bool
-567            show expanded history for irregular Monte Carlo chains (default: True).
-568        """
-569        for e, e_name in enumerate(self.mc_names):
-570            plt.figure()
-571            r_length = []
-572            tmp = []
-573            tmp_expanded = []
-574            for r, r_name in enumerate(self.e_content[e_name]):
-575                tmp.append(self.deltas[r_name] + self.r_values[r_name])
-576                if expand:
-577                    tmp_expanded.append(_expand_deltas(self.deltas[r_name], list(self.idl[r_name]), self.shape[r_name]) + self.r_values[r_name])
-578                    r_length.append(len(tmp_expanded[-1]))
-579                else:
-580                    r_length.append(len(tmp[-1]))
-581            e_N = np.sum(r_length)
-582            x = np.arange(e_N)
-583            y_test = np.concatenate(tmp, axis=0)
-584            if expand:
-585                y = np.concatenate(tmp_expanded, axis=0)
-586            else:
-587                y = y_test
-588            plt.errorbar(x, y, fmt='.', markersize=3)
-589            plt.xlim(-0.5, e_N - 0.5)
-590            plt.title(e_name + f'\nskew: {skew(y_test):.3f} (p={skewtest(y_test).pvalue:.3f}), kurtosis: {kurtosis(y_test):.3f} (p={kurtosistest(y_test).pvalue:.3f})')
-591            plt.draw()
+            
574    def plot_history(self, expand=True):
+575        """Plot derived Monte Carlo history for each ensemble
+576
+577        Parameters
+578        ----------
+579        expand : bool
+580            show expanded history for irregular Monte Carlo chains (default: True).
+581        """
+582        for e, e_name in enumerate(self.mc_names):
+583            plt.figure()
+584            r_length = []
+585            tmp = []
+586            tmp_expanded = []
+587            for r, r_name in enumerate(self.e_content[e_name]):
+588                tmp.append(self.deltas[r_name] + self.r_values[r_name])
+589                if expand:
+590                    tmp_expanded.append(_expand_deltas(self.deltas[r_name], list(self.idl[r_name]), self.shape[r_name]) + self.r_values[r_name])
+591                    r_length.append(len(tmp_expanded[-1]))
+592                else:
+593                    r_length.append(len(tmp[-1]))
+594            e_N = np.sum(r_length)
+595            x = np.arange(e_N)
+596            y_test = np.concatenate(tmp, axis=0)
+597            if expand:
+598                y = np.concatenate(tmp_expanded, axis=0)
+599            else:
+600                y = y_test
+601            plt.errorbar(x, y, fmt='.', markersize=3)
+602            plt.xlim(-0.5, e_N - 0.5)
+603            plt.title(e_name + f'\nskew: {skew(y_test):.3f} (p={skewtest(y_test).pvalue:.3f}), kurtosis: {kurtosis(y_test):.3f} (p={kurtosistest(y_test).pvalue:.3f})')
+604            plt.draw()
 
@@ -3489,29 +3528,29 @@ show expanded history for irregular Monte Carlo chains (default: True).
-
593    def plot_piechart(self, save=None):
-594        """Plot piechart which shows the fractional contribution of each
-595        ensemble to the error and returns a dictionary containing the fractions.
-596
-597        Parameters
-598        ----------
-599        save : str
-600            saves the figure to a file named 'save' if.
-601        """
-602        if not hasattr(self, 'e_dvalue'):
-603            raise Exception('Run the gamma method first.')
-604        if np.isclose(0.0, self._dvalue, atol=1e-15):
-605            raise Exception('Error is 0.0')
-606        labels = self.e_names
-607        sizes = [self.e_dvalue[name] ** 2 for name in labels] / self._dvalue ** 2
-608        fig1, ax1 = plt.subplots()
-609        ax1.pie(sizes, labels=labels, startangle=90, normalize=True)
-610        ax1.axis('equal')
-611        plt.draw()
-612        if save:
-613            fig1.savefig(save)
-614
-615        return dict(zip(self.e_names, sizes))
+            
606    def plot_piechart(self, save=None):
+607        """Plot piechart which shows the fractional contribution of each
+608        ensemble to the error and returns a dictionary containing the fractions.
+609
+610        Parameters
+611        ----------
+612        save : str
+613            saves the figure to a file named 'save' if.
+614        """
+615        if not hasattr(self, 'e_dvalue'):
+616            raise Exception('Run the gamma method first.')
+617        if np.isclose(0.0, self._dvalue, atol=1e-15):
+618            raise Exception('Error is 0.0')
+619        labels = self.e_names
+620        sizes = [self.e_dvalue[name] ** 2 for name in labels] / self._dvalue ** 2
+621        fig1, ax1 = plt.subplots()
+622        ax1.pie(sizes, labels=labels, startangle=90, normalize=True)
+623        ax1.axis('equal')
+624        plt.draw()
+625        if save:
+626            fig1.savefig(save)
+627
+628        return dict(zip(self.e_names, sizes))
 
@@ -3539,34 +3578,34 @@ saves the figure to a file named 'save' if.
-
617    def dump(self, filename, datatype="json.gz", description="", **kwargs):
-618        """Dump the Obs to a file 'name' of chosen format.
-619
-620        Parameters
-621        ----------
-622        filename : str
-623            name of the file to be saved.
-624        datatype : str
-625            Format of the exported file. Supported formats include
-626            "json.gz" and "pickle"
-627        description : str
-628            Description for output file, only relevant for json.gz format.
-629        path : str
-630            specifies a custom path for the file (default '.')
-631        """
-632        if 'path' in kwargs:
-633            file_name = kwargs.get('path') + '/' + filename
-634        else:
-635            file_name = filename
-636
-637        if datatype == "json.gz":
-638            from .input.json import dump_to_json
-639            dump_to_json([self], file_name, description=description)
-640        elif datatype == "pickle":
-641            with open(file_name + '.p', 'wb') as fb:
-642                pickle.dump(self, fb)
-643        else:
-644            raise Exception("Unknown datatype " + str(datatype))
+            
630    def dump(self, filename, datatype="json.gz", description="", **kwargs):
+631        """Dump the Obs to a file 'name' of chosen format.
+632
+633        Parameters
+634        ----------
+635        filename : str
+636            name of the file to be saved.
+637        datatype : str
+638            Format of the exported file. Supported formats include
+639            "json.gz" and "pickle"
+640        description : str
+641            Description for output file, only relevant for json.gz format.
+642        path : str
+643            specifies a custom path for the file (default '.')
+644        """
+645        if 'path' in kwargs:
+646            file_name = kwargs.get('path') + '/' + filename
+647        else:
+648            file_name = filename
+649
+650        if datatype == "json.gz":
+651            from .input.json import dump_to_json
+652            dump_to_json([self], file_name, description=description)
+653        elif datatype == "pickle":
+654            with open(file_name + '.p', 'wb') as fb:
+655                pickle.dump(self, fb)
+656        else:
+657            raise Exception("Unknown datatype " + str(datatype))
 
@@ -3600,31 +3639,31 @@ specifies a custom path for the file (default '.')
-
646    def export_jackknife(self):
-647        """Export jackknife samples from the Obs
-648
-649        Returns
-650        -------
-651        numpy.ndarray
-652            Returns a numpy array of length N + 1 where N is the number of samples
-653            for the given ensemble and replicum. The zeroth entry of the array contains
-654            the mean value of the Obs, entries 1 to N contain the N jackknife samples
-655            derived from the Obs. The current implementation only works for observables
-656            defined on exactly one ensemble and replicum. The derived jackknife samples
-657            should agree with samples from a full jackknife analysis up to O(1/N).
-658        """
-659
-660        if len(self.names) != 1:
-661            raise Exception("'export_jackknife' is only implemented for Obs defined on one ensemble and replicum.")
-662
-663        name = self.names[0]
-664        full_data = self.deltas[name] + self.r_values[name]
-665        n = full_data.size
-666        mean = self.value
-667        tmp_jacks = np.zeros(n + 1)
-668        tmp_jacks[0] = mean
-669        tmp_jacks[1:] = (n * mean - full_data) / (n - 1)
-670        return tmp_jacks
+            
659    def export_jackknife(self):
+660        """Export jackknife samples from the Obs
+661
+662        Returns
+663        -------
+664        numpy.ndarray
+665            Returns a numpy array of length N + 1 where N is the number of samples
+666            for the given ensemble and replicum. The zeroth entry of the array contains
+667            the mean value of the Obs, entries 1 to N contain the N jackknife samples
+668            derived from the Obs. The current implementation only works for observables
+669            defined on exactly one ensemble and replicum. The derived jackknife samples
+670            should agree with samples from a full jackknife analysis up to O(1/N).
+671        """
+672
+673        if len(self.names) != 1:
+674            raise Exception("'export_jackknife' is only implemented for Obs defined on one ensemble and replicum.")
+675
+676        name = self.names[0]
+677        full_data = self.deltas[name] + self.r_values[name]
+678        n = full_data.size
+679        mean = self.value
+680        tmp_jacks = np.zeros(n + 1)
+681        tmp_jacks[0] = mean
+682        tmp_jacks[1:] = (n * mean - full_data) / (n - 1)
+683        return tmp_jacks
 
@@ -3655,8 +3694,8 @@ should agree with samples from a full jackknife analysis up to O(1/N).
-
806    def sqrt(self):
-807        return derived_observable(lambda x, **kwargs: np.sqrt(x[0]), [self], man_grad=[1 / 2 / np.sqrt(self.value)])
+            
819    def sqrt(self):
+820        return derived_observable(lambda x, **kwargs: np.sqrt(x[0]), [self], man_grad=[1 / 2 / np.sqrt(self.value)])
 
@@ -3674,8 +3713,8 @@ should agree with samples from a full jackknife analysis up to O(1/N).
-
809    def log(self):
-810        return derived_observable(lambda x, **kwargs: np.log(x[0]), [self], man_grad=[1 / self.value])
+            
822    def log(self):
+823        return derived_observable(lambda x, **kwargs: np.log(x[0]), [self], man_grad=[1 / self.value])
 
@@ -3693,8 +3732,8 @@ should agree with samples from a full jackknife analysis up to O(1/N).
-
812    def exp(self):
-813        return derived_observable(lambda x, **kwargs: np.exp(x[0]), [self], man_grad=[np.exp(self.value)])
+            
825    def exp(self):
+826        return derived_observable(lambda x, **kwargs: np.exp(x[0]), [self], man_grad=[np.exp(self.value)])
 
@@ -3712,8 +3751,8 @@ should agree with samples from a full jackknife analysis up to O(1/N).
-
815    def sin(self):
-816        return derived_observable(lambda x, **kwargs: np.sin(x[0]), [self], man_grad=[np.cos(self.value)])
+            
828    def sin(self):
+829        return derived_observable(lambda x, **kwargs: np.sin(x[0]), [self], man_grad=[np.cos(self.value)])
 
@@ -3731,8 +3770,8 @@ should agree with samples from a full jackknife analysis up to O(1/N).
-
818    def cos(self):
-819        return derived_observable(lambda x, **kwargs: np.cos(x[0]), [self], man_grad=[-np.sin(self.value)])
+            
831    def cos(self):
+832        return derived_observable(lambda x, **kwargs: np.cos(x[0]), [self], man_grad=[-np.sin(self.value)])
 
@@ -3750,8 +3789,8 @@ should agree with samples from a full jackknife analysis up to O(1/N).
-
821    def tan(self):
-822        return derived_observable(lambda x, **kwargs: np.tan(x[0]), [self], man_grad=[1 / np.cos(self.value) ** 2])
+            
834    def tan(self):
+835        return derived_observable(lambda x, **kwargs: np.tan(x[0]), [self], man_grad=[1 / np.cos(self.value) ** 2])
 
@@ -3769,8 +3808,8 @@ should agree with samples from a full jackknife analysis up to O(1/N).
-
824    def arcsin(self):
-825        return derived_observable(lambda x: anp.arcsin(x[0]), [self])
+            
837    def arcsin(self):
+838        return derived_observable(lambda x: anp.arcsin(x[0]), [self])
 
@@ -3788,8 +3827,8 @@ should agree with samples from a full jackknife analysis up to O(1/N).
-
827    def arccos(self):
-828        return derived_observable(lambda x: anp.arccos(x[0]), [self])
+            
840    def arccos(self):
+841        return derived_observable(lambda x: anp.arccos(x[0]), [self])
 
@@ -3807,8 +3846,8 @@ should agree with samples from a full jackknife analysis up to O(1/N).
-
830    def arctan(self):
-831        return derived_observable(lambda x: anp.arctan(x[0]), [self])
+            
843    def arctan(self):
+844        return derived_observable(lambda x: anp.arctan(x[0]), [self])
 
@@ -3826,8 +3865,8 @@ should agree with samples from a full jackknife analysis up to O(1/N).
-
833    def sinh(self):
-834        return derived_observable(lambda x, **kwargs: np.sinh(x[0]), [self], man_grad=[np.cosh(self.value)])
+            
846    def sinh(self):
+847        return derived_observable(lambda x, **kwargs: np.sinh(x[0]), [self], man_grad=[np.cosh(self.value)])
 
@@ -3845,8 +3884,8 @@ should agree with samples from a full jackknife analysis up to O(1/N).
-
836    def cosh(self):
-837        return derived_observable(lambda x, **kwargs: np.cosh(x[0]), [self], man_grad=[np.sinh(self.value)])
+            
849    def cosh(self):
+850        return derived_observable(lambda x, **kwargs: np.cosh(x[0]), [self], man_grad=[np.sinh(self.value)])
 
@@ -3864,8 +3903,8 @@ should agree with samples from a full jackknife analysis up to O(1/N).
-
839    def tanh(self):
-840        return derived_observable(lambda x, **kwargs: np.tanh(x[0]), [self], man_grad=[1 / np.cosh(self.value) ** 2])
+            
852    def tanh(self):
+853        return derived_observable(lambda x, **kwargs: np.tanh(x[0]), [self], man_grad=[1 / np.cosh(self.value) ** 2])
 
@@ -3883,8 +3922,8 @@ should agree with samples from a full jackknife analysis up to O(1/N).
-
842    def arcsinh(self):
-843        return derived_observable(lambda x: anp.arcsinh(x[0]), [self])
+            
855    def arcsinh(self):
+856        return derived_observable(lambda x: anp.arcsinh(x[0]), [self])
 
@@ -3902,8 +3941,8 @@ should agree with samples from a full jackknife analysis up to O(1/N).
-
845    def arccosh(self):
-846        return derived_observable(lambda x: anp.arccosh(x[0]), [self])
+            
858    def arccosh(self):
+859        return derived_observable(lambda x: anp.arccosh(x[0]), [self])
 
@@ -3921,8 +3960,8 @@ should agree with samples from a full jackknife analysis up to O(1/N).
-
848    def arctanh(self):
-849        return derived_observable(lambda x: anp.arctanh(x[0]), [self])
+            
861    def arctanh(self):
+862        return derived_observable(lambda x: anp.arctanh(x[0]), [self])
 
@@ -3941,115 +3980,115 @@ should agree with samples from a full jackknife analysis up to O(1/N).
-
852class CObs:
-853    """Class for a complex valued observable."""
-854    __slots__ = ['_real', '_imag', 'tag']
-855
-856    def __init__(self, real, imag=0.0):
-857        self._real = real
-858        self._imag = imag
-859        self.tag = None
-860
-861    @property
-862    def real(self):
-863        return self._real
-864
-865    @property
-866    def imag(self):
-867        return self._imag
+            
865class CObs:
+866    """Class for a complex valued observable."""
+867    __slots__ = ['_real', '_imag', 'tag']
 868
-869    def gamma_method(self, **kwargs):
-870        """Executes the gamma_method for the real and the imaginary part."""
-871        if isinstance(self.real, Obs):
-872            self.real.gamma_method(**kwargs)
-873        if isinstance(self.imag, Obs):
-874            self.imag.gamma_method(**kwargs)
-875
-876    def is_zero(self):
-877        """Checks whether both real and imaginary part are zero within machine precision."""
-878        return self.real == 0.0 and self.imag == 0.0
-879
-880    def conjugate(self):
-881        return CObs(self.real, -self.imag)
-882
-883    def __add__(self, other):
-884        if isinstance(other, np.ndarray):
-885            return other + self
-886        elif hasattr(other, 'real') and hasattr(other, 'imag'):
-887            return CObs(self.real + other.real,
-888                        self.imag + other.imag)
-889        else:
-890            return CObs(self.real + other, self.imag)
-891
-892    def __radd__(self, y):
-893        return self + y
-894
-895    def __sub__(self, other):
-896        if isinstance(other, np.ndarray):
-897            return -1 * (other - self)
-898        elif hasattr(other, 'real') and hasattr(other, 'imag'):
-899            return CObs(self.real - other.real, self.imag - other.imag)
-900        else:
-901            return CObs(self.real - other, self.imag)
-902
-903    def __rsub__(self, other):
-904        return -1 * (self - other)
-905
-906    def __mul__(self, other):
-907        if isinstance(other, np.ndarray):
-908            return other * self
-909        elif hasattr(other, 'real') and hasattr(other, 'imag'):
-910            if all(isinstance(i, Obs) for i in [self.real, self.imag, other.real, other.imag]):
-911                return CObs(derived_observable(lambda x, **kwargs: x[0] * x[1] - x[2] * x[3],
-912                                               [self.real, other.real, self.imag, other.imag],
-913                                               man_grad=[other.real.value, self.real.value, -other.imag.value, -self.imag.value]),
-914                            derived_observable(lambda x, **kwargs: x[2] * x[1] + x[0] * x[3],
-915                                               [self.real, other.real, self.imag, other.imag],
-916                                               man_grad=[other.imag.value, self.imag.value, other.real.value, self.real.value]))
-917            elif getattr(other, 'imag', 0) != 0:
-918                return CObs(self.real * other.real - self.imag * other.imag,
-919                            self.imag * other.real + self.real * other.imag)
-920            else:
-921                return CObs(self.real * other.real, self.imag * other.real)
-922        else:
-923            return CObs(self.real * other, self.imag * other)
-924
-925    def __rmul__(self, other):
-926        return self * other
-927
-928    def __truediv__(self, other):
-929        if isinstance(other, np.ndarray):
-930            return 1 / (other / self)
-931        elif hasattr(other, 'real') and hasattr(other, 'imag'):
-932            r = other.real ** 2 + other.imag ** 2
-933            return CObs((self.real * other.real + self.imag * other.imag) / r, (self.imag * other.real - self.real * other.imag) / r)
-934        else:
-935            return CObs(self.real / other, self.imag / other)
-936
-937    def __rtruediv__(self, other):
-938        r = self.real ** 2 + self.imag ** 2
-939        if hasattr(other, 'real') and hasattr(other, 'imag'):
-940            return CObs((self.real * other.real + self.imag * other.imag) / r, (self.real * other.imag - self.imag * other.real) / r)
-941        else:
-942            return CObs(self.real * other / r, -self.imag * other / r)
-943
-944    def __abs__(self):
-945        return np.sqrt(self.real**2 + self.imag**2)
-946
-947    def __pos__(self):
-948        return self
+869    def __init__(self, real, imag=0.0):
+870        self._real = real
+871        self._imag = imag
+872        self.tag = None
+873
+874    @property
+875    def real(self):
+876        return self._real
+877
+878    @property
+879    def imag(self):
+880        return self._imag
+881
+882    def gamma_method(self, **kwargs):
+883        """Executes the gamma_method for the real and the imaginary part."""
+884        if isinstance(self.real, Obs):
+885            self.real.gamma_method(**kwargs)
+886        if isinstance(self.imag, Obs):
+887            self.imag.gamma_method(**kwargs)
+888
+889    def is_zero(self):
+890        """Checks whether both real and imaginary part are zero within machine precision."""
+891        return self.real == 0.0 and self.imag == 0.0
+892
+893    def conjugate(self):
+894        return CObs(self.real, -self.imag)
+895
+896    def __add__(self, other):
+897        if isinstance(other, np.ndarray):
+898            return other + self
+899        elif hasattr(other, 'real') and hasattr(other, 'imag'):
+900            return CObs(self.real + other.real,
+901                        self.imag + other.imag)
+902        else:
+903            return CObs(self.real + other, self.imag)
+904
+905    def __radd__(self, y):
+906        return self + y
+907
+908    def __sub__(self, other):
+909        if isinstance(other, np.ndarray):
+910            return -1 * (other - self)
+911        elif hasattr(other, 'real') and hasattr(other, 'imag'):
+912            return CObs(self.real - other.real, self.imag - other.imag)
+913        else:
+914            return CObs(self.real - other, self.imag)
+915
+916    def __rsub__(self, other):
+917        return -1 * (self - other)
+918
+919    def __mul__(self, other):
+920        if isinstance(other, np.ndarray):
+921            return other * self
+922        elif hasattr(other, 'real') and hasattr(other, 'imag'):
+923            if all(isinstance(i, Obs) for i in [self.real, self.imag, other.real, other.imag]):
+924                return CObs(derived_observable(lambda x, **kwargs: x[0] * x[1] - x[2] * x[3],
+925                                               [self.real, other.real, self.imag, other.imag],
+926                                               man_grad=[other.real.value, self.real.value, -other.imag.value, -self.imag.value]),
+927                            derived_observable(lambda x, **kwargs: x[2] * x[1] + x[0] * x[3],
+928                                               [self.real, other.real, self.imag, other.imag],
+929                                               man_grad=[other.imag.value, self.imag.value, other.real.value, self.real.value]))
+930            elif getattr(other, 'imag', 0) != 0:
+931                return CObs(self.real * other.real - self.imag * other.imag,
+932                            self.imag * other.real + self.real * other.imag)
+933            else:
+934                return CObs(self.real * other.real, self.imag * other.real)
+935        else:
+936            return CObs(self.real * other, self.imag * other)
+937
+938    def __rmul__(self, other):
+939        return self * other
+940
+941    def __truediv__(self, other):
+942        if isinstance(other, np.ndarray):
+943            return 1 / (other / self)
+944        elif hasattr(other, 'real') and hasattr(other, 'imag'):
+945            r = other.real ** 2 + other.imag ** 2
+946            return CObs((self.real * other.real + self.imag * other.imag) / r, (self.imag * other.real - self.real * other.imag) / r)
+947        else:
+948            return CObs(self.real / other, self.imag / other)
 949
-950    def __neg__(self):
-951        return -1 * self
-952
-953    def __eq__(self, other):
-954        return self.real == other.real and self.imag == other.imag
-955
-956    def __str__(self):
-957        return '(' + str(self.real) + int(self.imag >= 0.0) * '+' + str(self.imag) + 'j)'
-958
-959    def __repr__(self):
-960        return 'CObs[' + str(self) + ']'
+950    def __rtruediv__(self, other):
+951        r = self.real ** 2 + self.imag ** 2
+952        if hasattr(other, 'real') and hasattr(other, 'imag'):
+953            return CObs((self.real * other.real + self.imag * other.imag) / r, (self.real * other.imag - self.imag * other.real) / r)
+954        else:
+955            return CObs(self.real * other / r, -self.imag * other / r)
+956
+957    def __abs__(self):
+958        return np.sqrt(self.real**2 + self.imag**2)
+959
+960    def __pos__(self):
+961        return self
+962
+963    def __neg__(self):
+964        return -1 * self
+965
+966    def __eq__(self, other):
+967        return self.real == other.real and self.imag == other.imag
+968
+969    def __str__(self):
+970        return '(' + str(self.real) + int(self.imag >= 0.0) * '+' + str(self.imag) + 'j)'
+971
+972    def __repr__(self):
+973        return 'CObs[' + str(self) + ']'
 
@@ -4067,10 +4106,10 @@ should agree with samples from a full jackknife analysis up to O(1/N).
-
856    def __init__(self, real, imag=0.0):
-857        self._real = real
-858        self._imag = imag
-859        self.tag = None
+            
869    def __init__(self, real, imag=0.0):
+870        self._real = real
+871        self._imag = imag
+872        self.tag = None
 
@@ -4088,12 +4127,12 @@ should agree with samples from a full jackknife analysis up to O(1/N).
-
869    def gamma_method(self, **kwargs):
-870        """Executes the gamma_method for the real and the imaginary part."""
-871        if isinstance(self.real, Obs):
-872            self.real.gamma_method(**kwargs)
-873        if isinstance(self.imag, Obs):
-874            self.imag.gamma_method(**kwargs)
+            
882    def gamma_method(self, **kwargs):
+883        """Executes the gamma_method for the real and the imaginary part."""
+884        if isinstance(self.real, Obs):
+885            self.real.gamma_method(**kwargs)
+886        if isinstance(self.imag, Obs):
+887            self.imag.gamma_method(**kwargs)
 
@@ -4113,9 +4152,9 @@ should agree with samples from a full jackknife analysis up to O(1/N).
-
876    def is_zero(self):
-877        """Checks whether both real and imaginary part are zero within machine precision."""
-878        return self.real == 0.0 and self.imag == 0.0
+            
889    def is_zero(self):
+890        """Checks whether both real and imaginary part are zero within machine precision."""
+891        return self.real == 0.0 and self.imag == 0.0
 
@@ -4135,8 +4174,8 @@ should agree with samples from a full jackknife analysis up to O(1/N).
-
880    def conjugate(self):
-881        return CObs(self.real, -self.imag)
+            
893    def conjugate(self):
+894        return CObs(self.real, -self.imag)
 
@@ -4155,184 +4194,184 @@ should agree with samples from a full jackknife analysis up to O(1/N).
-
1129def derived_observable(func, data, array_mode=False, **kwargs):
-1130    """Construct a derived Obs according to func(data, **kwargs) using automatic differentiation.
-1131
-1132    Parameters
-1133    ----------
-1134    func : object
-1135        arbitrary function of the form func(data, **kwargs). For the
-1136        automatic differentiation to work, all numpy functions have to have
-1137        the autograd wrapper (use 'import autograd.numpy as anp').
-1138    data : list
-1139        list of Obs, e.g. [obs1, obs2, obs3].
-1140    num_grad : bool
-1141        if True, numerical derivatives are used instead of autograd
-1142        (default False). To control the numerical differentiation the
-1143        kwargs of numdifftools.step_generators.MaxStepGenerator
-1144        can be used.
-1145    man_grad : list
-1146        manually supply a list or an array which contains the jacobian
-1147        of func. Use cautiously, supplying the wrong derivative will
-1148        not be intercepted.
-1149
-1150    Notes
-1151    -----
-1152    For simple mathematical operations it can be practical to use anonymous
-1153    functions. For the ratio of two observables one can e.g. use
-1154
-1155    new_obs = derived_observable(lambda x: x[0] / x[1], [obs1, obs2])
-1156    """
-1157
-1158    data = np.asarray(data)
-1159    raveled_data = data.ravel()
-1160
-1161    # Workaround for matrix operations containing non Obs data
-1162    if not all(isinstance(x, Obs) for x in raveled_data):
-1163        for i in range(len(raveled_data)):
-1164            if isinstance(raveled_data[i], (int, float)):
-1165                raveled_data[i] = cov_Obs(raveled_data[i], 0.0, "###dummy_covobs###")
-1166
-1167    allcov = {}
-1168    for o in raveled_data:
-1169        for name in o.cov_names:
-1170            if name in allcov:
-1171                if not np.allclose(allcov[name], o.covobs[name].cov):
-1172                    raise Exception('Inconsistent covariance matrices for %s!' % (name))
-1173            else:
-1174                allcov[name] = o.covobs[name].cov
-1175
-1176    n_obs = len(raveled_data)
-1177    new_names = sorted(set([y for x in [o.names for o in raveled_data] for y in x]))
-1178    new_cov_names = sorted(set([y for x in [o.cov_names for o in raveled_data] for y in x]))
-1179    new_sample_names = sorted(set(new_names) - set(new_cov_names))
-1180
-1181    is_merged = {name: (len(list(filter(lambda o: o.is_merged.get(name, False) is True, raveled_data))) > 0) for name in new_sample_names}
-1182    reweighted = len(list(filter(lambda o: o.reweighted is True, raveled_data))) > 0
-1183
-1184    if data.ndim == 1:
-1185        values = np.array([o.value for o in data])
-1186    else:
-1187        values = np.vectorize(lambda x: x.value)(data)
+            
1142def derived_observable(func, data, array_mode=False, **kwargs):
+1143    """Construct a derived Obs according to func(data, **kwargs) using automatic differentiation.
+1144
+1145    Parameters
+1146    ----------
+1147    func : object
+1148        arbitrary function of the form func(data, **kwargs). For the
+1149        automatic differentiation to work, all numpy functions have to have
+1150        the autograd wrapper (use 'import autograd.numpy as anp').
+1151    data : list
+1152        list of Obs, e.g. [obs1, obs2, obs3].
+1153    num_grad : bool
+1154        if True, numerical derivatives are used instead of autograd
+1155        (default False). To control the numerical differentiation the
+1156        kwargs of numdifftools.step_generators.MaxStepGenerator
+1157        can be used.
+1158    man_grad : list
+1159        manually supply a list or an array which contains the jacobian
+1160        of func. Use cautiously, supplying the wrong derivative will
+1161        not be intercepted.
+1162
+1163    Notes
+1164    -----
+1165    For simple mathematical operations it can be practical to use anonymous
+1166    functions. For the ratio of two observables one can e.g. use
+1167
+1168    new_obs = derived_observable(lambda x: x[0] / x[1], [obs1, obs2])
+1169    """
+1170
+1171    data = np.asarray(data)
+1172    raveled_data = data.ravel()
+1173
+1174    # Workaround for matrix operations containing non Obs data
+1175    if not all(isinstance(x, Obs) for x in raveled_data):
+1176        for i in range(len(raveled_data)):
+1177            if isinstance(raveled_data[i], (int, float)):
+1178                raveled_data[i] = cov_Obs(raveled_data[i], 0.0, "###dummy_covobs###")
+1179
+1180    allcov = {}
+1181    for o in raveled_data:
+1182        for name in o.cov_names:
+1183            if name in allcov:
+1184                if not np.allclose(allcov[name], o.covobs[name].cov):
+1185                    raise Exception('Inconsistent covariance matrices for %s!' % (name))
+1186            else:
+1187                allcov[name] = o.covobs[name].cov
 1188
-1189    new_values = func(values, **kwargs)
-1190
-1191    multi = int(isinstance(new_values, np.ndarray))
-1192
-1193    new_r_values = {}
-1194    new_idl_d = {}
-1195    for name in new_sample_names:
-1196        idl = []
-1197        tmp_values = np.zeros(n_obs)
-1198        for i, item in enumerate(raveled_data):
-1199            tmp_values[i] = item.r_values.get(name, item.value)
-1200            tmp_idl = item.idl.get(name)
-1201            if tmp_idl is not None:
-1202                idl.append(tmp_idl)
-1203        if multi > 0:
-1204            tmp_values = np.array(tmp_values).reshape(data.shape)
-1205        new_r_values[name] = func(tmp_values, **kwargs)
-1206        new_idl_d[name] = _merge_idx(idl)
-1207        if not is_merged[name]:
-1208            is_merged[name] = (1 != len(set([len(idx) for idx in [*idl, new_idl_d[name]]])))
-1209
-1210    if 'man_grad' in kwargs:
-1211        deriv = np.asarray(kwargs.get('man_grad'))
-1212        if new_values.shape + data.shape != deriv.shape:
-1213            raise Exception('Manual derivative does not have correct shape.')
-1214    elif kwargs.get('num_grad') is True:
-1215        if multi > 0:
-1216            raise Exception('Multi mode currently not supported for numerical derivative')
-1217        options = {
-1218            'base_step': 0.1,
-1219            'step_ratio': 2.5}
-1220        for key in options.keys():
-1221            kwarg = kwargs.get(key)
-1222            if kwarg is not None:
-1223                options[key] = kwarg
-1224        tmp_df = nd.Gradient(func, order=4, **{k: v for k, v in options.items() if v is not None})(values, **kwargs)
-1225        if tmp_df.size == 1:
-1226            deriv = np.array([tmp_df.real])
-1227        else:
-1228            deriv = tmp_df.real
-1229    else:
-1230        deriv = jacobian(func)(values, **kwargs)
-1231
-1232    final_result = np.zeros(new_values.shape, dtype=object)
-1233
-1234    if array_mode is True:
-1235
-1236        class _Zero_grad():
-1237            def __init__(self, N):
-1238                self.grad = np.zeros((N, 1))
-1239
-1240        new_covobs_lengths = dict(set([y for x in [[(n, o.covobs[n].N) for n in o.cov_names] for o in raveled_data] for y in x]))
-1241        d_extracted = {}
-1242        g_extracted = {}
-1243        for name in new_sample_names:
-1244            d_extracted[name] = []
-1245            ens_length = len(new_idl_d[name])
-1246            for i_dat, dat in enumerate(data):
-1247                d_extracted[name].append(np.array([_expand_deltas_for_merge(o.deltas.get(name, np.zeros(ens_length)), o.idl.get(name, new_idl_d[name]), o.shape.get(name, ens_length), new_idl_d[name]) for o in dat.reshape(np.prod(dat.shape))]).reshape(dat.shape + (ens_length, )))
-1248        for name in new_cov_names:
-1249            g_extracted[name] = []
-1250            zero_grad = _Zero_grad(new_covobs_lengths[name])
-1251            for i_dat, dat in enumerate(data):
-1252                g_extracted[name].append(np.array([o.covobs.get(name, zero_grad).grad for o in dat.reshape(np.prod(dat.shape))]).reshape(dat.shape + (new_covobs_lengths[name], 1)))
-1253
-1254    for i_val, new_val in np.ndenumerate(new_values):
-1255        new_deltas = {}
-1256        new_grad = {}
-1257        if array_mode is True:
-1258            for name in new_sample_names:
-1259                ens_length = d_extracted[name][0].shape[-1]
-1260                new_deltas[name] = np.zeros(ens_length)
-1261                for i_dat, dat in enumerate(d_extracted[name]):
-1262                    new_deltas[name] += np.tensordot(deriv[i_val + (i_dat, )], dat)
-1263            for name in new_cov_names:
-1264                new_grad[name] = 0
-1265                for i_dat, dat in enumerate(g_extracted[name]):
-1266                    new_grad[name] += np.tensordot(deriv[i_val + (i_dat, )], dat)
-1267        else:
-1268            for j_obs, obs in np.ndenumerate(data):
-1269                for name in obs.names:
-1270                    if name in obs.cov_names:
-1271                        new_grad[name] = new_grad.get(name, 0) + deriv[i_val + j_obs] * obs.covobs[name].grad
-1272                    else:
-1273                        new_deltas[name] = new_deltas.get(name, 0) + deriv[i_val + j_obs] * _expand_deltas_for_merge(obs.deltas[name], obs.idl[name], obs.shape[name], new_idl_d[name])
-1274
-1275        new_covobs = {name: Covobs(0, allcov[name], name, grad=new_grad[name]) for name in new_grad}
-1276
-1277        if not set(new_covobs.keys()).isdisjoint(new_deltas.keys()):
-1278            raise Exception('The same name has been used for deltas and covobs!')
-1279        new_samples = []
-1280        new_means = []
-1281        new_idl = []
-1282        new_names_obs = []
-1283        for name in new_names:
-1284            if name not in new_covobs:
-1285                if is_merged[name]:
-1286                    filtered_deltas, filtered_idl_d = _filter_zeroes(new_deltas[name], new_idl_d[name])
-1287                else:
-1288                    filtered_deltas = new_deltas[name]
-1289                    filtered_idl_d = new_idl_d[name]
-1290
-1291                new_samples.append(filtered_deltas)
-1292                new_idl.append(filtered_idl_d)
-1293                new_means.append(new_r_values[name][i_val])
-1294                new_names_obs.append(name)
-1295        final_result[i_val] = Obs(new_samples, new_names_obs, means=new_means, idl=new_idl)
-1296        for name in new_covobs:
-1297            final_result[i_val].names.append(name)
-1298        final_result[i_val]._covobs = new_covobs
-1299        final_result[i_val]._value = new_val
-1300        final_result[i_val].is_merged = is_merged
-1301        final_result[i_val].reweighted = reweighted
-1302
-1303    if multi == 0:
-1304        final_result = final_result.item()
-1305
-1306    return final_result
+1189    n_obs = len(raveled_data)
+1190    new_names = sorted(set([y for x in [o.names for o in raveled_data] for y in x]))
+1191    new_cov_names = sorted(set([y for x in [o.cov_names for o in raveled_data] for y in x]))
+1192    new_sample_names = sorted(set(new_names) - set(new_cov_names))
+1193
+1194    is_merged = {name: (len(list(filter(lambda o: o.is_merged.get(name, False) is True, raveled_data))) > 0) for name in new_sample_names}
+1195    reweighted = len(list(filter(lambda o: o.reweighted is True, raveled_data))) > 0
+1196
+1197    if data.ndim == 1:
+1198        values = np.array([o.value for o in data])
+1199    else:
+1200        values = np.vectorize(lambda x: x.value)(data)
+1201
+1202    new_values = func(values, **kwargs)
+1203
+1204    multi = int(isinstance(new_values, np.ndarray))
+1205
+1206    new_r_values = {}
+1207    new_idl_d = {}
+1208    for name in new_sample_names:
+1209        idl = []
+1210        tmp_values = np.zeros(n_obs)
+1211        for i, item in enumerate(raveled_data):
+1212            tmp_values[i] = item.r_values.get(name, item.value)
+1213            tmp_idl = item.idl.get(name)
+1214            if tmp_idl is not None:
+1215                idl.append(tmp_idl)
+1216        if multi > 0:
+1217            tmp_values = np.array(tmp_values).reshape(data.shape)
+1218        new_r_values[name] = func(tmp_values, **kwargs)
+1219        new_idl_d[name] = _merge_idx(idl)
+1220        if not is_merged[name]:
+1221            is_merged[name] = (1 != len(set([len(idx) for idx in [*idl, new_idl_d[name]]])))
+1222
+1223    if 'man_grad' in kwargs:
+1224        deriv = np.asarray(kwargs.get('man_grad'))
+1225        if new_values.shape + data.shape != deriv.shape:
+1226            raise Exception('Manual derivative does not have correct shape.')
+1227    elif kwargs.get('num_grad') is True:
+1228        if multi > 0:
+1229            raise Exception('Multi mode currently not supported for numerical derivative')
+1230        options = {
+1231            'base_step': 0.1,
+1232            'step_ratio': 2.5}
+1233        for key in options.keys():
+1234            kwarg = kwargs.get(key)
+1235            if kwarg is not None:
+1236                options[key] = kwarg
+1237        tmp_df = nd.Gradient(func, order=4, **{k: v for k, v in options.items() if v is not None})(values, **kwargs)
+1238        if tmp_df.size == 1:
+1239            deriv = np.array([tmp_df.real])
+1240        else:
+1241            deriv = tmp_df.real
+1242    else:
+1243        deriv = jacobian(func)(values, **kwargs)
+1244
+1245    final_result = np.zeros(new_values.shape, dtype=object)
+1246
+1247    if array_mode is True:
+1248
+1249        class _Zero_grad():
+1250            def __init__(self, N):
+1251                self.grad = np.zeros((N, 1))
+1252
+1253        new_covobs_lengths = dict(set([y for x in [[(n, o.covobs[n].N) for n in o.cov_names] for o in raveled_data] for y in x]))
+1254        d_extracted = {}
+1255        g_extracted = {}
+1256        for name in new_sample_names:
+1257            d_extracted[name] = []
+1258            ens_length = len(new_idl_d[name])
+1259            for i_dat, dat in enumerate(data):
+1260                d_extracted[name].append(np.array([_expand_deltas_for_merge(o.deltas.get(name, np.zeros(ens_length)), o.idl.get(name, new_idl_d[name]), o.shape.get(name, ens_length), new_idl_d[name]) for o in dat.reshape(np.prod(dat.shape))]).reshape(dat.shape + (ens_length, )))
+1261        for name in new_cov_names:
+1262            g_extracted[name] = []
+1263            zero_grad = _Zero_grad(new_covobs_lengths[name])
+1264            for i_dat, dat in enumerate(data):
+1265                g_extracted[name].append(np.array([o.covobs.get(name, zero_grad).grad for o in dat.reshape(np.prod(dat.shape))]).reshape(dat.shape + (new_covobs_lengths[name], 1)))
+1266
+1267    for i_val, new_val in np.ndenumerate(new_values):
+1268        new_deltas = {}
+1269        new_grad = {}
+1270        if array_mode is True:
+1271            for name in new_sample_names:
+1272                ens_length = d_extracted[name][0].shape[-1]
+1273                new_deltas[name] = np.zeros(ens_length)
+1274                for i_dat, dat in enumerate(d_extracted[name]):
+1275                    new_deltas[name] += np.tensordot(deriv[i_val + (i_dat, )], dat)
+1276            for name in new_cov_names:
+1277                new_grad[name] = 0
+1278                for i_dat, dat in enumerate(g_extracted[name]):
+1279                    new_grad[name] += np.tensordot(deriv[i_val + (i_dat, )], dat)
+1280        else:
+1281            for j_obs, obs in np.ndenumerate(data):
+1282                for name in obs.names:
+1283                    if name in obs.cov_names:
+1284                        new_grad[name] = new_grad.get(name, 0) + deriv[i_val + j_obs] * obs.covobs[name].grad
+1285                    else:
+1286                        new_deltas[name] = new_deltas.get(name, 0) + deriv[i_val + j_obs] * _expand_deltas_for_merge(obs.deltas[name], obs.idl[name], obs.shape[name], new_idl_d[name])
+1287
+1288        new_covobs = {name: Covobs(0, allcov[name], name, grad=new_grad[name]) for name in new_grad}
+1289
+1290        if not set(new_covobs.keys()).isdisjoint(new_deltas.keys()):
+1291            raise Exception('The same name has been used for deltas and covobs!')
+1292        new_samples = []
+1293        new_means = []
+1294        new_idl = []
+1295        new_names_obs = []
+1296        for name in new_names:
+1297            if name not in new_covobs:
+1298                if is_merged[name]:
+1299                    filtered_deltas, filtered_idl_d = _filter_zeroes(new_deltas[name], new_idl_d[name])
+1300                else:
+1301                    filtered_deltas = new_deltas[name]
+1302                    filtered_idl_d = new_idl_d[name]
+1303
+1304                new_samples.append(filtered_deltas)
+1305                new_idl.append(filtered_idl_d)
+1306                new_means.append(new_r_values[name][i_val])
+1307                new_names_obs.append(name)
+1308        final_result[i_val] = Obs(new_samples, new_names_obs, means=new_means, idl=new_idl)
+1309        for name in new_covobs:
+1310            final_result[i_val].names.append(name)
+1311        final_result[i_val]._covobs = new_covobs
+1312        final_result[i_val]._value = new_val
+1313        final_result[i_val].is_merged = is_merged
+1314        final_result[i_val].reweighted = reweighted
+1315
+1316    if multi == 0:
+1317        final_result = final_result.item()
+1318
+1319    return final_result
 
@@ -4379,47 +4418,47 @@ functions. For the ratio of two observables one can e.g. use

-
1346def reweight(weight, obs, **kwargs):
-1347    """Reweight a list of observables.
-1348
-1349    Parameters
-1350    ----------
-1351    weight : Obs
-1352        Reweighting factor. An Observable that has to be defined on a superset of the
-1353        configurations in obs[i].idl for all i.
-1354    obs : list
-1355        list of Obs, e.g. [obs1, obs2, obs3].
-1356    all_configs : bool
-1357        if True, the reweighted observables are normalized by the average of
-1358        the reweighting factor on all configurations in weight.idl and not
-1359        on the configurations in obs[i].idl. Default False.
-1360    """
-1361    result = []
-1362    for i in range(len(obs)):
-1363        if len(obs[i].cov_names):
-1364            raise Exception('Error: Not possible to reweight an Obs that contains covobs!')
-1365        if not set(obs[i].names).issubset(weight.names):
-1366            raise Exception('Error: Ensembles do not fit')
-1367        for name in obs[i].names:
-1368            if not set(obs[i].idl[name]).issubset(weight.idl[name]):
-1369                raise Exception('obs[%d] has to be defined on a subset of the configs in weight.idl[%s]!' % (i, name))
-1370        new_samples = []
-1371        w_deltas = {}
-1372        for name in sorted(obs[i].names):
-1373            w_deltas[name] = _reduce_deltas(weight.deltas[name], weight.idl[name], obs[i].idl[name])
-1374            new_samples.append((w_deltas[name] + weight.r_values[name]) * (obs[i].deltas[name] + obs[i].r_values[name]))
-1375        tmp_obs = Obs(new_samples, sorted(obs[i].names), idl=[obs[i].idl[name] for name in sorted(obs[i].names)])
-1376
-1377        if kwargs.get('all_configs'):
-1378            new_weight = weight
-1379        else:
-1380            new_weight = Obs([w_deltas[name] + weight.r_values[name] for name in sorted(obs[i].names)], sorted(obs[i].names), idl=[obs[i].idl[name] for name in sorted(obs[i].names)])
-1381
-1382        result.append(tmp_obs / new_weight)
-1383        result[-1].reweighted = True
-1384        result[-1].is_merged = obs[i].is_merged
-1385
-1386    return result
+            
1359def reweight(weight, obs, **kwargs):
+1360    """Reweight a list of observables.
+1361
+1362    Parameters
+1363    ----------
+1364    weight : Obs
+1365        Reweighting factor. An Observable that has to be defined on a superset of the
+1366        configurations in obs[i].idl for all i.
+1367    obs : list
+1368        list of Obs, e.g. [obs1, obs2, obs3].
+1369    all_configs : bool
+1370        if True, the reweighted observables are normalized by the average of
+1371        the reweighting factor on all configurations in weight.idl and not
+1372        on the configurations in obs[i].idl. Default False.
+1373    """
+1374    result = []
+1375    for i in range(len(obs)):
+1376        if len(obs[i].cov_names):
+1377            raise Exception('Error: Not possible to reweight an Obs that contains covobs!')
+1378        if not set(obs[i].names).issubset(weight.names):
+1379            raise Exception('Error: Ensembles do not fit')
+1380        for name in obs[i].names:
+1381            if not set(obs[i].idl[name]).issubset(weight.idl[name]):
+1382                raise Exception('obs[%d] has to be defined on a subset of the configs in weight.idl[%s]!' % (i, name))
+1383        new_samples = []
+1384        w_deltas = {}
+1385        for name in sorted(obs[i].names):
+1386            w_deltas[name] = _reduce_deltas(weight.deltas[name], weight.idl[name], obs[i].idl[name])
+1387            new_samples.append((w_deltas[name] + weight.r_values[name]) * (obs[i].deltas[name] + obs[i].r_values[name]))
+1388        tmp_obs = Obs(new_samples, sorted(obs[i].names), idl=[obs[i].idl[name] for name in sorted(obs[i].names)])
+1389
+1390        if kwargs.get('all_configs'):
+1391            new_weight = weight
+1392        else:
+1393            new_weight = Obs([w_deltas[name] + weight.r_values[name] for name in sorted(obs[i].names)], sorted(obs[i].names), idl=[obs[i].idl[name] for name in sorted(obs[i].names)])
+1394
+1395        result.append(tmp_obs / new_weight)
+1396        result[-1].reweighted = True
+1397        result[-1].is_merged = obs[i].is_merged
+1398
+1399    return result
 
@@ -4453,48 +4492,48 @@ on the configurations in obs[i].idl. Default False.
-
1389def correlate(obs_a, obs_b):
-1390    """Correlate two observables.
-1391
-1392    Parameters
-1393    ----------
-1394    obs_a : Obs
-1395        First observable
-1396    obs_b : Obs
-1397        Second observable
-1398
-1399    Notes
-1400    -----
-1401    Keep in mind to only correlate primary observables which have not been reweighted
-1402    yet. The reweighting has to be applied after correlating the observables.
-1403    Currently only works if ensembles are identical (this is not strictly necessary).
-1404    """
-1405
-1406    if sorted(obs_a.names) != sorted(obs_b.names):
-1407        raise Exception('Ensembles do not fit')
-1408    if len(obs_a.cov_names) or len(obs_b.cov_names):
-1409        raise Exception('Error: Not possible to correlate Obs that contain covobs!')
-1410    for name in obs_a.names:
-1411        if obs_a.shape[name] != obs_b.shape[name]:
-1412            raise Exception('Shapes of ensemble', name, 'do not fit')
-1413        if obs_a.idl[name] != obs_b.idl[name]:
-1414            raise Exception('idl of ensemble', name, 'do not fit')
-1415
-1416    if obs_a.reweighted is True:
-1417        warnings.warn("The first observable is already reweighted.", RuntimeWarning)
-1418    if obs_b.reweighted is True:
-1419        warnings.warn("The second observable is already reweighted.", RuntimeWarning)
-1420
-1421    new_samples = []
-1422    new_idl = []
-1423    for name in sorted(obs_a.names):
-1424        new_samples.append((obs_a.deltas[name] + obs_a.r_values[name]) * (obs_b.deltas[name] + obs_b.r_values[name]))
-1425        new_idl.append(obs_a.idl[name])
-1426
-1427    o = Obs(new_samples, sorted(obs_a.names), idl=new_idl)
-1428    o.is_merged = {name: (obs_a.is_merged.get(name, False) or obs_b.is_merged.get(name, False)) for name in o.names}
-1429    o.reweighted = obs_a.reweighted or obs_b.reweighted
-1430    return o
+            
1402def correlate(obs_a, obs_b):
+1403    """Correlate two observables.
+1404
+1405    Parameters
+1406    ----------
+1407    obs_a : Obs
+1408        First observable
+1409    obs_b : Obs
+1410        Second observable
+1411
+1412    Notes
+1413    -----
+1414    Keep in mind to only correlate primary observables which have not been reweighted
+1415    yet. The reweighting has to be applied after correlating the observables.
+1416    Currently only works if ensembles are identical (this is not strictly necessary).
+1417    """
+1418
+1419    if sorted(obs_a.names) != sorted(obs_b.names):
+1420        raise Exception('Ensembles do not fit')
+1421    if len(obs_a.cov_names) or len(obs_b.cov_names):
+1422        raise Exception('Error: Not possible to correlate Obs that contain covobs!')
+1423    for name in obs_a.names:
+1424        if obs_a.shape[name] != obs_b.shape[name]:
+1425            raise Exception('Shapes of ensemble', name, 'do not fit')
+1426        if obs_a.idl[name] != obs_b.idl[name]:
+1427            raise Exception('idl of ensemble', name, 'do not fit')
+1428
+1429    if obs_a.reweighted is True:
+1430        warnings.warn("The first observable is already reweighted.", RuntimeWarning)
+1431    if obs_b.reweighted is True:
+1432        warnings.warn("The second observable is already reweighted.", RuntimeWarning)
+1433
+1434    new_samples = []
+1435    new_idl = []
+1436    for name in sorted(obs_a.names):
+1437        new_samples.append((obs_a.deltas[name] + obs_a.r_values[name]) * (obs_b.deltas[name] + obs_b.r_values[name]))
+1438        new_idl.append(obs_a.idl[name])
+1439
+1440    o = Obs(new_samples, sorted(obs_a.names), idl=new_idl)
+1441    o.is_merged = {name: (obs_a.is_merged.get(name, False) or obs_b.is_merged.get(name, False)) for name in o.names}
+1442    o.reweighted = obs_a.reweighted or obs_b.reweighted
+1443    return o
 
@@ -4529,71 +4568,71 @@ Currently only works if ensembles are identical (this is not strictly necessary)
-
1433def covariance(obs, visualize=False, correlation=False, smooth=None, **kwargs):
-1434    r'''Calculates the error covariance matrix of a set of observables.
-1435
-1436    The gamma method has to be applied first to all observables.
-1437
-1438    Parameters
-1439    ----------
-1440    obs : list or numpy.ndarray
-1441        List or one dimensional array of Obs
-1442    visualize : bool
-1443        If True plots the corresponding normalized correlation matrix (default False).
-1444    correlation : bool
-1445        If True the correlation matrix instead of the error covariance matrix is returned (default False).
-1446    smooth : None or int
-1447        If smooth is an integer 'E' between 2 and the dimension of the matrix minus 1 the eigenvalue
-1448        smoothing procedure of hep-lat/9412087 is applied to the correlation matrix which leaves the
-1449        largest E eigenvalues essentially unchanged and smoothes the smaller eigenvalues to avoid extremely
-1450        small ones.
-1451
-1452    Notes
-1453    -----
-1454    The error covariance is defined such that it agrees with the squared standard error for two identical observables
-1455    $$\operatorname{cov}(a,a)=\sum_{s=1}^N\delta_a^s\delta_a^s/N^2=\Gamma_{aa}(0)/N=\operatorname{var}(a)/N=\sigma_a^2$$
-1456    in the absence of autocorrelation.
-1457    The error covariance is estimated by calculating the correlation matrix assuming no autocorrelation and then rescaling the correlation matrix by the full errors including the previous gamma method estimate for the autocorrelation of the observables. The covariance at windowsize 0 is guaranteed to be positive semi-definite
-1458    $$\sum_{i,j}v_i\Gamma_{ij}(0)v_j=\frac{1}{N}\sum_{s=1}^N\sum_{i,j}v_i\delta_i^s\delta_j^s v_j=\frac{1}{N}\sum_{s=1}^N\sum_{i}|v_i\delta_i^s|^2\geq 0\,,$$ for every $v\in\mathbb{R}^M$, while such an identity does not hold for larger windows/lags.
-1459    For observables defined on a single ensemble our approximation is equivalent to assuming that the integrated autocorrelation time of an off-diagonal element is equal to the geometric mean of the integrated autocorrelation times of the corresponding diagonal elements.
-1460    $$\tau_{\mathrm{int}, ij}=\sqrt{\tau_{\mathrm{int}, i}\times \tau_{\mathrm{int}, j}}$$
-1461    This construction ensures that the estimated covariance matrix is positive semi-definite (up to numerical rounding errors).
-1462    '''
-1463
-1464    length = len(obs)
-1465
-1466    max_samples = np.max([o.N for o in obs])
-1467    if max_samples <= length and not [item for sublist in [o.cov_names for o in obs] for item in sublist]:
-1468        warnings.warn(f"The dimension of the covariance matrix ({length}) is larger or equal to the number of samples ({max_samples}). This will result in a rank deficient matrix.", RuntimeWarning)
-1469
-1470    cov = np.zeros((length, length))
-1471    for i in range(length):
-1472        for j in range(i, length):
-1473            cov[i, j] = _covariance_element(obs[i], obs[j])
-1474    cov = cov + cov.T - np.diag(np.diag(cov))
-1475
-1476    corr = np.diag(1 / np.sqrt(np.diag(cov))) @ cov @ np.diag(1 / np.sqrt(np.diag(cov)))
-1477
-1478    if isinstance(smooth, int):
-1479        corr = _smooth_eigenvalues(corr, smooth)
-1480
-1481    if visualize:
-1482        plt.matshow(corr, vmin=-1, vmax=1)
-1483        plt.set_cmap('RdBu')
-1484        plt.colorbar()
-1485        plt.draw()
-1486
-1487    if correlation is True:
-1488        return corr
-1489
-1490    errors = [o.dvalue for o in obs]
-1491    cov = np.diag(errors) @ corr @ np.diag(errors)
-1492
-1493    eigenvalues = np.linalg.eigh(cov)[0]
-1494    if not np.all(eigenvalues >= 0):
-1495        warnings.warn("Covariance matrix is not positive semi-definite (Eigenvalues: " + str(eigenvalues) + ")", RuntimeWarning)
-1496
-1497    return cov
+            
1446def covariance(obs, visualize=False, correlation=False, smooth=None, **kwargs):
+1447    r'''Calculates the error covariance matrix of a set of observables.
+1448
+1449    The gamma method has to be applied first to all observables.
+1450
+1451    Parameters
+1452    ----------
+1453    obs : list or numpy.ndarray
+1454        List or one dimensional array of Obs
+1455    visualize : bool
+1456        If True plots the corresponding normalized correlation matrix (default False).
+1457    correlation : bool
+1458        If True the correlation matrix instead of the error covariance matrix is returned (default False).
+1459    smooth : None or int
+1460        If smooth is an integer 'E' between 2 and the dimension of the matrix minus 1 the eigenvalue
+1461        smoothing procedure of hep-lat/9412087 is applied to the correlation matrix which leaves the
+1462        largest E eigenvalues essentially unchanged and smoothes the smaller eigenvalues to avoid extremely
+1463        small ones.
+1464
+1465    Notes
+1466    -----
+1467    The error covariance is defined such that it agrees with the squared standard error for two identical observables
+1468    $$\operatorname{cov}(a,a)=\sum_{s=1}^N\delta_a^s\delta_a^s/N^2=\Gamma_{aa}(0)/N=\operatorname{var}(a)/N=\sigma_a^2$$
+1469    in the absence of autocorrelation.
+1470    The error covariance is estimated by calculating the correlation matrix assuming no autocorrelation and then rescaling the correlation matrix by the full errors including the previous gamma method estimate for the autocorrelation of the observables. The covariance at windowsize 0 is guaranteed to be positive semi-definite
+1471    $$\sum_{i,j}v_i\Gamma_{ij}(0)v_j=\frac{1}{N}\sum_{s=1}^N\sum_{i,j}v_i\delta_i^s\delta_j^s v_j=\frac{1}{N}\sum_{s=1}^N\sum_{i}|v_i\delta_i^s|^2\geq 0\,,$$ for every $v\in\mathbb{R}^M$, while such an identity does not hold for larger windows/lags.
+1472    For observables defined on a single ensemble our approximation is equivalent to assuming that the integrated autocorrelation time of an off-diagonal element is equal to the geometric mean of the integrated autocorrelation times of the corresponding diagonal elements.
+1473    $$\tau_{\mathrm{int}, ij}=\sqrt{\tau_{\mathrm{int}, i}\times \tau_{\mathrm{int}, j}}$$
+1474    This construction ensures that the estimated covariance matrix is positive semi-definite (up to numerical rounding errors).
+1475    '''
+1476
+1477    length = len(obs)
+1478
+1479    max_samples = np.max([o.N for o in obs])
+1480    if max_samples <= length and not [item for sublist in [o.cov_names for o in obs] for item in sublist]:
+1481        warnings.warn(f"The dimension of the covariance matrix ({length}) is larger or equal to the number of samples ({max_samples}). This will result in a rank deficient matrix.", RuntimeWarning)
+1482
+1483    cov = np.zeros((length, length))
+1484    for i in range(length):
+1485        for j in range(i, length):
+1486            cov[i, j] = _covariance_element(obs[i], obs[j])
+1487    cov = cov + cov.T - np.diag(np.diag(cov))
+1488
+1489    corr = np.diag(1 / np.sqrt(np.diag(cov))) @ cov @ np.diag(1 / np.sqrt(np.diag(cov)))
+1490
+1491    if isinstance(smooth, int):
+1492        corr = _smooth_eigenvalues(corr, smooth)
+1493
+1494    if visualize:
+1495        plt.matshow(corr, vmin=-1, vmax=1)
+1496        plt.set_cmap('RdBu')
+1497        plt.colorbar()
+1498        plt.draw()
+1499
+1500    if correlation is True:
+1501        return corr
+1502
+1503    errors = [o.dvalue for o in obs]
+1504    cov = np.diag(errors) @ corr @ np.diag(errors)
+1505
+1506    eigenvalues = np.linalg.eigh(cov)[0]
+1507    if not np.all(eigenvalues >= 0):
+1508        warnings.warn("Covariance matrix is not positive semi-definite (Eigenvalues: " + str(eigenvalues) + ")", RuntimeWarning)
+1509
+1510    return cov
 
@@ -4642,24 +4681,24 @@ This construction ensures that the estimated covariance matrix is positive semi-
-
1577def import_jackknife(jacks, name, idl=None):
-1578    """Imports jackknife samples and returns an Obs
-1579
-1580    Parameters
-1581    ----------
-1582    jacks : numpy.ndarray
-1583        numpy array containing the mean value as zeroth entry and
-1584        the N jackknife samples as first to Nth entry.
-1585    name : str
-1586        name of the ensemble the samples are defined on.
-1587    """
-1588    length = len(jacks) - 1
-1589    prj = (np.ones((length, length)) - (length - 1) * np.identity(length))
-1590    samples = jacks[1:] @ prj
-1591    mean = np.mean(samples)
-1592    new_obs = Obs([samples - mean], [name], idl=idl, means=[mean])
-1593    new_obs._value = jacks[0]
-1594    return new_obs
+            
1590def import_jackknife(jacks, name, idl=None):
+1591    """Imports jackknife samples and returns an Obs
+1592
+1593    Parameters
+1594    ----------
+1595    jacks : numpy.ndarray
+1596        numpy array containing the mean value as zeroth entry and
+1597        the N jackknife samples as first to Nth entry.
+1598    name : str
+1599        name of the ensemble the samples are defined on.
+1600    """
+1601    length = len(jacks) - 1
+1602    prj = (np.ones((length, length)) - (length - 1) * np.identity(length))
+1603    samples = jacks[1:] @ prj
+1604    mean = np.mean(samples)
+1605    new_obs = Obs([samples - mean], [name], idl=idl, means=[mean])
+1606    new_obs._value = jacks[0]
+1607    return new_obs
 
@@ -4689,35 +4728,35 @@ name of the ensemble the samples are defined on.
-
1597def merge_obs(list_of_obs):
-1598    """Combine all observables in list_of_obs into one new observable
-1599
-1600    Parameters
-1601    ----------
-1602    list_of_obs : list
-1603        list of the Obs object to be combined
-1604
-1605    Notes
-1606    -----
-1607    It is not possible to combine obs which are based on the same replicum
-1608    """
-1609    replist = [item for obs in list_of_obs for item in obs.names]
-1610    if (len(replist) == len(set(replist))) is False:
-1611        raise Exception('list_of_obs contains duplicate replica: %s' % (str(replist)))
-1612    if any([len(o.cov_names) for o in list_of_obs]):
-1613        raise Exception('Not possible to merge data that contains covobs!')
-1614    new_dict = {}
-1615    idl_dict = {}
-1616    for o in list_of_obs:
-1617        new_dict.update({key: o.deltas.get(key, 0) + o.r_values.get(key, 0)
-1618                        for key in set(o.deltas) | set(o.r_values)})
-1619        idl_dict.update({key: o.idl.get(key, 0) for key in set(o.deltas)})
-1620
-1621    names = sorted(new_dict.keys())
-1622    o = Obs([new_dict[name] for name in names], names, idl=[idl_dict[name] for name in names])
-1623    o.is_merged = {name: np.any([oi.is_merged.get(name, False) for oi in list_of_obs]) for name in o.names}
-1624    o.reweighted = np.max([oi.reweighted for oi in list_of_obs])
-1625    return o
+            
1610def merge_obs(list_of_obs):
+1611    """Combine all observables in list_of_obs into one new observable
+1612
+1613    Parameters
+1614    ----------
+1615    list_of_obs : list
+1616        list of the Obs object to be combined
+1617
+1618    Notes
+1619    -----
+1620    It is not possible to combine obs which are based on the same replicum
+1621    """
+1622    replist = [item for obs in list_of_obs for item in obs.names]
+1623    if (len(replist) == len(set(replist))) is False:
+1624        raise Exception('list_of_obs contains duplicate replica: %s' % (str(replist)))
+1625    if any([len(o.cov_names) for o in list_of_obs]):
+1626        raise Exception('Not possible to merge data that contains covobs!')
+1627    new_dict = {}
+1628    idl_dict = {}
+1629    for o in list_of_obs:
+1630        new_dict.update({key: o.deltas.get(key, 0) + o.r_values.get(key, 0)
+1631                        for key in set(o.deltas) | set(o.r_values)})
+1632        idl_dict.update({key: o.idl.get(key, 0) for key in set(o.deltas)})
+1633
+1634    names = sorted(new_dict.keys())
+1635    o = Obs([new_dict[name] for name in names], names, idl=[idl_dict[name] for name in names])
+1636    o.is_merged = {name: np.any([oi.is_merged.get(name, False) for oi in list_of_obs]) for name in o.names}
+1637    o.reweighted = np.max([oi.reweighted for oi in list_of_obs])
+1638    return o
 
@@ -4748,47 +4787,47 @@ list of the Obs object to be combined
-
1628def cov_Obs(means, cov, name, grad=None):
-1629    """Create an Obs based on mean(s) and a covariance matrix
-1630
-1631    Parameters
-1632    ----------
-1633    mean : list of floats or float
-1634        N mean value(s) of the new Obs
-1635    cov : list or array
-1636        2d (NxN) Covariance matrix, 1d diagonal entries or 0d covariance
-1637    name : str
-1638        identifier for the covariance matrix
-1639    grad : list or array
-1640        Gradient of the Covobs wrt. the means belonging to cov.
-1641    """
-1642
-1643    def covobs_to_obs(co):
-1644        """Make an Obs out of a Covobs
-1645
-1646        Parameters
-1647        ----------
-1648        co : Covobs
-1649            Covobs to be embedded into the Obs
-1650        """
-1651        o = Obs([], [], means=[])
-1652        o._value = co.value
-1653        o.names.append(co.name)
-1654        o._covobs[co.name] = co
-1655        o._dvalue = np.sqrt(co.errsq())
-1656        return o
-1657
-1658    ol = []
-1659    if isinstance(means, (float, int)):
-1660        means = [means]
-1661
-1662    for i in range(len(means)):
-1663        ol.append(covobs_to_obs(Covobs(means[i], cov, name, pos=i, grad=grad)))
-1664    if ol[0].covobs[name].N != len(means):
-1665        raise Exception('You have to provide %d mean values!' % (ol[0].N))
-1666    if len(ol) == 1:
-1667        return ol[0]
-1668    return ol
+            
1641def cov_Obs(means, cov, name, grad=None):
+1642    """Create an Obs based on mean(s) and a covariance matrix
+1643
+1644    Parameters
+1645    ----------
+1646    mean : list of floats or float
+1647        N mean value(s) of the new Obs
+1648    cov : list or array
+1649        2d (NxN) Covariance matrix, 1d diagonal entries or 0d covariance
+1650    name : str
+1651        identifier for the covariance matrix
+1652    grad : list or array
+1653        Gradient of the Covobs wrt. the means belonging to cov.
+1654    """
+1655
+1656    def covobs_to_obs(co):
+1657        """Make an Obs out of a Covobs
+1658
+1659        Parameters
+1660        ----------
+1661        co : Covobs
+1662            Covobs to be embedded into the Obs
+1663        """
+1664        o = Obs([], [], means=[])
+1665        o._value = co.value
+1666        o.names.append(co.name)
+1667        o._covobs[co.name] = co
+1668        o._dvalue = np.sqrt(co.errsq())
+1669        return o
+1670
+1671    ol = []
+1672    if isinstance(means, (float, int)):
+1673        means = [means]
+1674
+1675    for i in range(len(means)):
+1676        ol.append(covobs_to_obs(Covobs(means[i], cov, name, pos=i, grad=grad)))
+1677    if ol[0].covobs[name].N != len(means):
+1678        raise Exception('You have to provide %d mean values!' % (ol[0].N))
+1679    if len(ol) == 1:
+1680        return ol[0]
+1681    return ol