diff --git a/docs/pyerrors/obs.html b/docs/pyerrors/obs.html index 9fb087b0..74afdeb6 100644 --- a/docs/pyerrors/obs.html +++ b/docs/pyerrors/obs.html @@ -598,1285 +598,1301 @@ 399 print('Result\t %3.8e +/- %3.8e +/- %3.8e (%3.3f%%)' % (self.value, self._dvalue, self.ddvalue, percentage)) 400 if len(self.e_names) > 1: 401 print(' Ensemble errors:') - 402 for e_name in self.mc_names: - 403 if len(self.e_names) > 1: - 404 print('', e_name, '\t %3.8e +/- %3.8e' % (self.e_dvalue[e_name], self.e_ddvalue[e_name])) - 405 if self.tau_exp[e_name] > 0: - 406 print(' t_int\t %3.8e +/- %3.8e tau_exp = %3.2f, N_sigma = %1.0i' % (self.e_tauint[e_name], self.e_dtauint[e_name], self.tau_exp[e_name], self.N_sigma[e_name])) - 407 else: - 408 print(' t_int\t %3.8e +/- %3.8e S = %3.2f' % (self.e_tauint[e_name], self.e_dtauint[e_name], self.S[e_name])) - 409 for e_name in self.cov_names: - 410 print('', e_name, '\t %3.8e' % (self.e_dvalue[e_name])) - 411 if ens_content is True: - 412 if len(self.e_names) == 1: - 413 print(self.N, 'samples in', len(self.e_names), 'ensemble:') - 414 else: - 415 print(self.N, 'samples in', len(self.e_names), 'ensembles:') - 416 my_string_list = [] - 417 for key, value in sorted(self.e_content.items()): - 418 if key not in self.covobs: - 419 my_string = ' ' + "\u00B7 Ensemble '" + key + "' " - 420 if len(value) == 1: - 421 my_string += f': {self.shape[value[0]]} configurations' - 422 if isinstance(self.idl[value[0]], range): - 423 my_string += f' (from {self.idl[value[0]].start} to {self.idl[value[0]][-1]}' + int(self.idl[value[0]].step != 1) * f' in steps of {self.idl[value[0]].step}' + ')' - 424 else: - 425 my_string += ' (irregular range)' - 426 else: - 427 sublist = [] - 428 for v in value: - 429 my_substring = ' ' + "\u00B7 Replicum '" + v[len(key) + 1:] + "' " - 430 my_substring += f': {self.shape[v]} configurations' - 431 if isinstance(self.idl[v], range): - 432 my_substring += f' (from {self.idl[v].start} to {self.idl[v][-1]}' + int(self.idl[v].step != 1) * f' in steps of {self.idl[v].step}' + ')' - 433 else: - 434 my_substring += ' (irregular range)' - 435 sublist.append(my_substring) - 436 - 437 my_string += '\n' + '\n'.join(sublist) - 438 else: - 439 my_string = ' ' + "\u00B7 Covobs '" + key + "' " - 440 my_string_list.append(my_string) - 441 print('\n'.join(my_string_list)) - 442 - 443 def reweight(self, weight): - 444 """Reweight the obs with given rewighting factors. - 445 - 446 Parameters - 447 ---------- - 448 weight : Obs - 449 Reweighting factor. An Observable that has to be defined on a superset of the - 450 configurations in obs[i].idl for all i. - 451 all_configs : bool - 452 if True, the reweighted observables are normalized by the average of - 453 the reweighting factor on all configurations in weight.idl and not - 454 on the configurations in obs[i].idl. Default False. - 455 """ - 456 return reweight(weight, [self])[0] - 457 - 458 def is_zero_within_error(self, sigma=1): - 459 """Checks whether the observable is zero within 'sigma' standard errors. - 460 - 461 Parameters - 462 ---------- - 463 sigma : int - 464 Number of standard errors used for the check. - 465 - 466 Works only properly when the gamma method was run. - 467 """ - 468 return self.is_zero() or np.abs(self.value) <= sigma * self._dvalue - 469 - 470 def is_zero(self, atol=1e-10): - 471 """Checks whether the observable is zero within a given tolerance. - 472 - 473 Parameters - 474 ---------- - 475 atol : float - 476 Absolute tolerance (for details see numpy documentation). - 477 """ - 478 return np.isclose(0.0, self.value, 1e-14, atol) and all(np.allclose(0.0, delta, 1e-14, atol) for delta in self.deltas.values()) and all(np.allclose(0.0, delta.errsq(), 1e-14, atol) for delta in self.covobs.values()) - 479 - 480 def plot_tauint(self, save=None): - 481 """Plot integrated autocorrelation time for each ensemble. - 482 - 483 Parameters - 484 ---------- - 485 save : str - 486 saves the figure to a file named 'save' if. - 487 """ - 488 if not hasattr(self, 'e_dvalue'): - 489 raise Exception('Run the gamma method first.') + 402 e_content = self.e_content + 403 for e_name in self.mc_names: + 404 if isinstance(self.idl[e_content[e_name][0]], range): + 405 gap = self.idl[e_content[e_name][0]].step + 406 else: + 407 gap = np.min(np.diff(self.idl[e_content[e_name][0]])) + 408 + 409 if len(self.e_names) > 1: + 410 print('', e_name, '\t %3.6e +/- %3.6e' % (self.e_dvalue[e_name], self.e_ddvalue[e_name])) + 411 tau_string = " \N{GREEK SMALL LETTER TAU}_int\t " + _format_uncertainty(self.e_tauint[e_name], self.e_dtauint[e_name]) + 412 tau_string += f" in units of {gap} config" + 413 if gap > 1: + 414 tau_string += "s" + 415 if self.tau_exp[e_name] > 0: + 416 tau_string = f"{tau_string: <45}" + '\t(\N{GREEK SMALL LETTER TAU}_exp=%3.2f, N_\N{GREEK SMALL LETTER SIGMA}=%1.0i)' % (self.tau_exp[e_name], self.N_sigma[e_name]) + 417 else: + 418 tau_string = f"{tau_string: <45}" + '\t(S=%3.2f)' % (self.S[e_name]) + 419 print(tau_string) + 420 for e_name in self.cov_names: + 421 print('', e_name, '\t %3.8e' % (self.e_dvalue[e_name])) + 422 if ens_content is True: + 423 if len(self.e_names) == 1: + 424 print(self.N, 'samples in', len(self.e_names), 'ensemble:') + 425 else: + 426 print(self.N, 'samples in', len(self.e_names), 'ensembles:') + 427 my_string_list = [] + 428 for key, value in sorted(self.e_content.items()): + 429 if key not in self.covobs: + 430 my_string = ' ' + "\u00B7 Ensemble '" + key + "' " + 431 if len(value) == 1: + 432 my_string += f': {self.shape[value[0]]} configurations' + 433 if isinstance(self.idl[value[0]], range): + 434 my_string += f' (from {self.idl[value[0]].start} to {self.idl[value[0]][-1]}' + int(self.idl[value[0]].step != 1) * f' in steps of {self.idl[value[0]].step}' + ')' + 435 else: + 436 my_string += f' (irregular range from {self.idl[value[0]][0]} to {self.idl[value[0]][-1]})' + 437 else: + 438 sublist = [] + 439 for v in value: + 440 my_substring = ' ' + "\u00B7 Replicum '" + v[len(key) + 1:] + "' " + 441 my_substring += f': {self.shape[v]} configurations' + 442 if isinstance(self.idl[v], range): + 443 my_substring += f' (from {self.idl[v].start} to {self.idl[v][-1]}' + int(self.idl[v].step != 1) * f' in steps of {self.idl[v].step}' + ')' + 444 else: + 445 my_substring += f' (irregular range from {self.idl[v][0]} to {self.idl[v][-1]})' + 446 sublist.append(my_substring) + 447 + 448 my_string += '\n' + '\n'.join(sublist) + 449 else: + 450 my_string = ' ' + "\u00B7 Covobs '" + key + "' " + 451 my_string_list.append(my_string) + 452 print('\n'.join(my_string_list)) + 453 + 454 def reweight(self, weight): + 455 """Reweight the obs with given rewighting factors. + 456 + 457 Parameters + 458 ---------- + 459 weight : Obs + 460 Reweighting factor. An Observable that has to be defined on a superset of the + 461 configurations in obs[i].idl for all i. + 462 all_configs : bool + 463 if True, the reweighted observables are normalized by the average of + 464 the reweighting factor on all configurations in weight.idl and not + 465 on the configurations in obs[i].idl. Default False. + 466 """ + 467 return reweight(weight, [self])[0] + 468 + 469 def is_zero_within_error(self, sigma=1): + 470 """Checks whether the observable is zero within 'sigma' standard errors. + 471 + 472 Parameters + 473 ---------- + 474 sigma : int + 475 Number of standard errors used for the check. + 476 + 477 Works only properly when the gamma method was run. + 478 """ + 479 return self.is_zero() or np.abs(self.value) <= sigma * self._dvalue + 480 + 481 def is_zero(self, atol=1e-10): + 482 """Checks whether the observable is zero within a given tolerance. + 483 + 484 Parameters + 485 ---------- + 486 atol : float + 487 Absolute tolerance (for details see numpy documentation). + 488 """ + 489 return np.isclose(0.0, self.value, 1e-14, atol) and all(np.allclose(0.0, delta, 1e-14, atol) for delta in self.deltas.values()) and all(np.allclose(0.0, delta.errsq(), 1e-14, atol) for delta in self.covobs.values()) 490 - 491 for e, e_name in enumerate(self.mc_names): - 492 fig = plt.figure() - 493 plt.xlabel(r'$W$') - 494 plt.ylabel(r'$\tau_\mathrm{int}$') - 495 length = int(len(self.e_n_tauint[e_name])) - 496 if self.tau_exp[e_name] > 0: - 497 base = self.e_n_tauint[e_name][self.e_windowsize[e_name]] - 498 x_help = np.arange(2 * self.tau_exp[e_name]) - 499 y_help = (x_help + 1) * np.abs(self.e_rho[e_name][self.e_windowsize[e_name] + 1]) * (1 - x_help / (2 * (2 * self.tau_exp[e_name] - 1))) + base - 500 x_arr = np.arange(self.e_windowsize[e_name] + 1, self.e_windowsize[e_name] + 1 + 2 * self.tau_exp[e_name]) - 501 plt.plot(x_arr, y_help, 'C' + str(e), linewidth=1, ls='--', marker=',') - 502 plt.errorbar([self.e_windowsize[e_name] + 2 * self.tau_exp[e_name]], [self.e_tauint[e_name]], - 503 yerr=[self.e_dtauint[e_name]], fmt='C' + str(e), linewidth=1, capsize=2, marker='o', mfc=plt.rcParams['axes.facecolor']) - 504 xmax = self.e_windowsize[e_name] + 2 * self.tau_exp[e_name] + 1.5 - 505 label = e_name + r', $\tau_\mathrm{exp}$=' + str(np.around(self.tau_exp[e_name], decimals=2)) - 506 else: - 507 label = e_name + ', S=' + str(np.around(self.S[e_name], decimals=2)) - 508 xmax = max(10.5, 2 * self.e_windowsize[e_name] - 0.5) - 509 - 510 plt.errorbar(np.arange(length)[:int(xmax) + 1], self.e_n_tauint[e_name][:int(xmax) + 1], yerr=self.e_n_dtauint[e_name][:int(xmax) + 1], linewidth=1, capsize=2, label=label) - 511 plt.axvline(x=self.e_windowsize[e_name], color='C' + str(e), alpha=0.5, marker=',', ls='--') - 512 plt.legend() - 513 plt.xlim(-0.5, xmax) - 514 ylim = plt.ylim() - 515 plt.ylim(bottom=0.0, top=max(1.0, ylim[1])) - 516 plt.draw() - 517 if save: - 518 fig.savefig(save + "_" + str(e)) - 519 - 520 def plot_rho(self, save=None): - 521 """Plot normalized autocorrelation function time for each ensemble. - 522 - 523 Parameters - 524 ---------- - 525 save : str - 526 saves the figure to a file named 'save' if. - 527 """ - 528 if not hasattr(self, 'e_dvalue'): - 529 raise Exception('Run the gamma method first.') - 530 for e, e_name in enumerate(self.mc_names): - 531 fig = plt.figure() - 532 plt.xlabel('W') - 533 plt.ylabel('rho') - 534 length = int(len(self.e_drho[e_name])) - 535 plt.errorbar(np.arange(length), self.e_rho[e_name][:length], yerr=self.e_drho[e_name][:], linewidth=1, capsize=2) - 536 plt.axvline(x=self.e_windowsize[e_name], color='r', alpha=0.25, ls='--', marker=',') - 537 if self.tau_exp[e_name] > 0: - 538 plt.plot([self.e_windowsize[e_name] + 1, self.e_windowsize[e_name] + 1 + 2 * self.tau_exp[e_name]], - 539 [self.e_rho[e_name][self.e_windowsize[e_name] + 1], 0], 'k-', lw=1) - 540 xmax = self.e_windowsize[e_name] + 2 * self.tau_exp[e_name] + 1.5 - 541 plt.title('Rho ' + e_name + r', tau\_exp=' + str(np.around(self.tau_exp[e_name], decimals=2))) - 542 else: - 543 xmax = max(10.5, 2 * self.e_windowsize[e_name] - 0.5) - 544 plt.title('Rho ' + e_name + ', S=' + str(np.around(self.S[e_name], decimals=2))) - 545 plt.plot([-0.5, xmax], [0, 0], 'k--', lw=1) - 546 plt.xlim(-0.5, xmax) - 547 plt.draw() - 548 if save: - 549 fig.savefig(save + "_" + str(e)) - 550 - 551 def plot_rep_dist(self): - 552 """Plot replica distribution for each ensemble with more than one replicum.""" - 553 if not hasattr(self, 'e_dvalue'): - 554 raise Exception('Run the gamma method first.') - 555 for e, e_name in enumerate(self.mc_names): - 556 if len(self.e_content[e_name]) == 1: - 557 print('No replica distribution for a single replicum (', e_name, ')') - 558 continue - 559 r_length = [] - 560 sub_r_mean = 0 - 561 for r, r_name in enumerate(self.e_content[e_name]): - 562 r_length.append(len(self.deltas[r_name])) - 563 sub_r_mean += self.shape[r_name] * self.r_values[r_name] - 564 e_N = np.sum(r_length) - 565 sub_r_mean /= e_N - 566 arr = np.zeros(len(self.e_content[e_name])) - 567 for r, r_name in enumerate(self.e_content[e_name]): - 568 arr[r] = (self.r_values[r_name] - sub_r_mean) / (self.e_dvalue[e_name] * np.sqrt(e_N / self.shape[r_name] - 1)) - 569 plt.hist(arr, rwidth=0.8, bins=len(self.e_content[e_name])) - 570 plt.title('Replica distribution' + e_name + ' (mean=0, var=1)') - 571 plt.draw() - 572 - 573 def plot_history(self, expand=True): - 574 """Plot derived Monte Carlo history for each ensemble - 575 - 576 Parameters - 577 ---------- - 578 expand : bool - 579 show expanded history for irregular Monte Carlo chains (default: True). - 580 """ - 581 for e, e_name in enumerate(self.mc_names): - 582 plt.figure() - 583 r_length = [] - 584 tmp = [] - 585 tmp_expanded = [] - 586 for r, r_name in enumerate(self.e_content[e_name]): - 587 tmp.append(self.deltas[r_name] + self.r_values[r_name]) - 588 if expand: - 589 tmp_expanded.append(_expand_deltas(self.deltas[r_name], list(self.idl[r_name]), self.shape[r_name]) + self.r_values[r_name]) - 590 r_length.append(len(tmp_expanded[-1])) - 591 else: - 592 r_length.append(len(tmp[-1])) - 593 e_N = np.sum(r_length) - 594 x = np.arange(e_N) - 595 y_test = np.concatenate(tmp, axis=0) - 596 if expand: - 597 y = np.concatenate(tmp_expanded, axis=0) - 598 else: - 599 y = y_test - 600 plt.errorbar(x, y, fmt='.', markersize=3) - 601 plt.xlim(-0.5, e_N - 0.5) - 602 plt.title(e_name + f'\nskew: {skew(y_test):.3f} (p={skewtest(y_test).pvalue:.3f}), kurtosis: {kurtosis(y_test):.3f} (p={kurtosistest(y_test).pvalue:.3f})') - 603 plt.draw() - 604 - 605 def plot_piechart(self, save=None): - 606 """Plot piechart which shows the fractional contribution of each - 607 ensemble to the error and returns a dictionary containing the fractions. - 608 - 609 Parameters - 610 ---------- - 611 save : str - 612 saves the figure to a file named 'save' if. - 613 """ - 614 if not hasattr(self, 'e_dvalue'): - 615 raise Exception('Run the gamma method first.') - 616 if np.isclose(0.0, self._dvalue, atol=1e-15): - 617 raise Exception('Error is 0.0') - 618 labels = self.e_names - 619 sizes = [self.e_dvalue[name] ** 2 for name in labels] / self._dvalue ** 2 - 620 fig1, ax1 = plt.subplots() - 621 ax1.pie(sizes, labels=labels, startangle=90, normalize=True) - 622 ax1.axis('equal') - 623 plt.draw() - 624 if save: - 625 fig1.savefig(save) - 626 - 627 return dict(zip(self.e_names, sizes)) - 628 - 629 def dump(self, filename, datatype="json.gz", description="", **kwargs): - 630 """Dump the Obs to a file 'name' of chosen format. - 631 - 632 Parameters - 633 ---------- - 634 filename : str - 635 name of the file to be saved. - 636 datatype : str - 637 Format of the exported file. Supported formats include - 638 "json.gz" and "pickle" - 639 description : str - 640 Description for output file, only relevant for json.gz format. - 641 path : str - 642 specifies a custom path for the file (default '.') - 643 """ - 644 if 'path' in kwargs: - 645 file_name = kwargs.get('path') + '/' + filename - 646 else: - 647 file_name = filename - 648 - 649 if datatype == "json.gz": - 650 from .input.json import dump_to_json - 651 dump_to_json([self], file_name, description=description) - 652 elif datatype == "pickle": - 653 with open(file_name + '.p', 'wb') as fb: - 654 pickle.dump(self, fb) - 655 else: - 656 raise Exception("Unknown datatype " + str(datatype)) - 657 - 658 def export_jackknife(self): - 659 """Export jackknife samples from the Obs - 660 - 661 Returns - 662 ------- - 663 numpy.ndarray - 664 Returns a numpy array of length N + 1 where N is the number of samples - 665 for the given ensemble and replicum. The zeroth entry of the array contains - 666 the mean value of the Obs, entries 1 to N contain the N jackknife samples - 667 derived from the Obs. The current implementation only works for observables - 668 defined on exactly one ensemble and replicum. The derived jackknife samples - 669 should agree with samples from a full jackknife analysis up to O(1/N). - 670 """ + 491 def plot_tauint(self, save=None): + 492 """Plot integrated autocorrelation time for each ensemble. + 493 + 494 Parameters + 495 ---------- + 496 save : str + 497 saves the figure to a file named 'save' if. + 498 """ + 499 if not hasattr(self, 'e_dvalue'): + 500 raise Exception('Run the gamma method first.') + 501 + 502 for e, e_name in enumerate(self.mc_names): + 503 fig = plt.figure() + 504 plt.xlabel(r'$W$') + 505 plt.ylabel(r'$\tau_\mathrm{int}$') + 506 length = int(len(self.e_n_tauint[e_name])) + 507 if self.tau_exp[e_name] > 0: + 508 base = self.e_n_tauint[e_name][self.e_windowsize[e_name]] + 509 x_help = np.arange(2 * self.tau_exp[e_name]) + 510 y_help = (x_help + 1) * np.abs(self.e_rho[e_name][self.e_windowsize[e_name] + 1]) * (1 - x_help / (2 * (2 * self.tau_exp[e_name] - 1))) + base + 511 x_arr = np.arange(self.e_windowsize[e_name] + 1, self.e_windowsize[e_name] + 1 + 2 * self.tau_exp[e_name]) + 512 plt.plot(x_arr, y_help, 'C' + str(e), linewidth=1, ls='--', marker=',') + 513 plt.errorbar([self.e_windowsize[e_name] + 2 * self.tau_exp[e_name]], [self.e_tauint[e_name]], + 514 yerr=[self.e_dtauint[e_name]], fmt='C' + str(e), linewidth=1, capsize=2, marker='o', mfc=plt.rcParams['axes.facecolor']) + 515 xmax = self.e_windowsize[e_name] + 2 * self.tau_exp[e_name] + 1.5 + 516 label = e_name + r', $\tau_\mathrm{exp}$=' + str(np.around(self.tau_exp[e_name], decimals=2)) + 517 else: + 518 label = e_name + ', S=' + str(np.around(self.S[e_name], decimals=2)) + 519 xmax = max(10.5, 2 * self.e_windowsize[e_name] - 0.5) + 520 + 521 plt.errorbar(np.arange(length)[:int(xmax) + 1], self.e_n_tauint[e_name][:int(xmax) + 1], yerr=self.e_n_dtauint[e_name][:int(xmax) + 1], linewidth=1, capsize=2, label=label) + 522 plt.axvline(x=self.e_windowsize[e_name], color='C' + str(e), alpha=0.5, marker=',', ls='--') + 523 plt.legend() + 524 plt.xlim(-0.5, xmax) + 525 ylim = plt.ylim() + 526 plt.ylim(bottom=0.0, top=max(1.0, ylim[1])) + 527 plt.draw() + 528 if save: + 529 fig.savefig(save + "_" + str(e)) + 530 + 531 def plot_rho(self, save=None): + 532 """Plot normalized autocorrelation function time for each ensemble. + 533 + 534 Parameters + 535 ---------- + 536 save : str + 537 saves the figure to a file named 'save' if. + 538 """ + 539 if not hasattr(self, 'e_dvalue'): + 540 raise Exception('Run the gamma method first.') + 541 for e, e_name in enumerate(self.mc_names): + 542 fig = plt.figure() + 543 plt.xlabel('W') + 544 plt.ylabel('rho') + 545 length = int(len(self.e_drho[e_name])) + 546 plt.errorbar(np.arange(length), self.e_rho[e_name][:length], yerr=self.e_drho[e_name][:], linewidth=1, capsize=2) + 547 plt.axvline(x=self.e_windowsize[e_name], color='r', alpha=0.25, ls='--', marker=',') + 548 if self.tau_exp[e_name] > 0: + 549 plt.plot([self.e_windowsize[e_name] + 1, self.e_windowsize[e_name] + 1 + 2 * self.tau_exp[e_name]], + 550 [self.e_rho[e_name][self.e_windowsize[e_name] + 1], 0], 'k-', lw=1) + 551 xmax = self.e_windowsize[e_name] + 2 * self.tau_exp[e_name] + 1.5 + 552 plt.title('Rho ' + e_name + r', tau\_exp=' + str(np.around(self.tau_exp[e_name], decimals=2))) + 553 else: + 554 xmax = max(10.5, 2 * self.e_windowsize[e_name] - 0.5) + 555 plt.title('Rho ' + e_name + ', S=' + str(np.around(self.S[e_name], decimals=2))) + 556 plt.plot([-0.5, xmax], [0, 0], 'k--', lw=1) + 557 plt.xlim(-0.5, xmax) + 558 plt.draw() + 559 if save: + 560 fig.savefig(save + "_" + str(e)) + 561 + 562 def plot_rep_dist(self): + 563 """Plot replica distribution for each ensemble with more than one replicum.""" + 564 if not hasattr(self, 'e_dvalue'): + 565 raise Exception('Run the gamma method first.') + 566 for e, e_name in enumerate(self.mc_names): + 567 if len(self.e_content[e_name]) == 1: + 568 print('No replica distribution for a single replicum (', e_name, ')') + 569 continue + 570 r_length = [] + 571 sub_r_mean = 0 + 572 for r, r_name in enumerate(self.e_content[e_name]): + 573 r_length.append(len(self.deltas[r_name])) + 574 sub_r_mean += self.shape[r_name] * self.r_values[r_name] + 575 e_N = np.sum(r_length) + 576 sub_r_mean /= e_N + 577 arr = np.zeros(len(self.e_content[e_name])) + 578 for r, r_name in enumerate(self.e_content[e_name]): + 579 arr[r] = (self.r_values[r_name] - sub_r_mean) / (self.e_dvalue[e_name] * np.sqrt(e_N / self.shape[r_name] - 1)) + 580 plt.hist(arr, rwidth=0.8, bins=len(self.e_content[e_name])) + 581 plt.title('Replica distribution' + e_name + ' (mean=0, var=1)') + 582 plt.draw() + 583 + 584 def plot_history(self, expand=True): + 585 """Plot derived Monte Carlo history for each ensemble + 586 + 587 Parameters + 588 ---------- + 589 expand : bool + 590 show expanded history for irregular Monte Carlo chains (default: True). + 591 """ + 592 for e, e_name in enumerate(self.mc_names): + 593 plt.figure() + 594 r_length = [] + 595 tmp = [] + 596 tmp_expanded = [] + 597 for r, r_name in enumerate(self.e_content[e_name]): + 598 tmp.append(self.deltas[r_name] + self.r_values[r_name]) + 599 if expand: + 600 tmp_expanded.append(_expand_deltas(self.deltas[r_name], list(self.idl[r_name]), self.shape[r_name]) + self.r_values[r_name]) + 601 r_length.append(len(tmp_expanded[-1])) + 602 else: + 603 r_length.append(len(tmp[-1])) + 604 e_N = np.sum(r_length) + 605 x = np.arange(e_N) + 606 y_test = np.concatenate(tmp, axis=0) + 607 if expand: + 608 y = np.concatenate(tmp_expanded, axis=0) + 609 else: + 610 y = y_test + 611 plt.errorbar(x, y, fmt='.', markersize=3) + 612 plt.xlim(-0.5, e_N - 0.5) + 613 plt.title(e_name + f'\nskew: {skew(y_test):.3f} (p={skewtest(y_test).pvalue:.3f}), kurtosis: {kurtosis(y_test):.3f} (p={kurtosistest(y_test).pvalue:.3f})') + 614 plt.draw() + 615 + 616 def plot_piechart(self, save=None): + 617 """Plot piechart which shows the fractional contribution of each + 618 ensemble to the error and returns a dictionary containing the fractions. + 619 + 620 Parameters + 621 ---------- + 622 save : str + 623 saves the figure to a file named 'save' if. + 624 """ + 625 if not hasattr(self, 'e_dvalue'): + 626 raise Exception('Run the gamma method first.') + 627 if np.isclose(0.0, self._dvalue, atol=1e-15): + 628 raise Exception('Error is 0.0') + 629 labels = self.e_names + 630 sizes = [self.e_dvalue[name] ** 2 for name in labels] / self._dvalue ** 2 + 631 fig1, ax1 = plt.subplots() + 632 ax1.pie(sizes, labels=labels, startangle=90, normalize=True) + 633 ax1.axis('equal') + 634 plt.draw() + 635 if save: + 636 fig1.savefig(save) + 637 + 638 return dict(zip(self.e_names, sizes)) + 639 + 640 def dump(self, filename, datatype="json.gz", description="", **kwargs): + 641 """Dump the Obs to a file 'name' of chosen format. + 642 + 643 Parameters + 644 ---------- + 645 filename : str + 646 name of the file to be saved. + 647 datatype : str + 648 Format of the exported file. Supported formats include + 649 "json.gz" and "pickle" + 650 description : str + 651 Description for output file, only relevant for json.gz format. + 652 path : str + 653 specifies a custom path for the file (default '.') + 654 """ + 655 if 'path' in kwargs: + 656 file_name = kwargs.get('path') + '/' + filename + 657 else: + 658 file_name = filename + 659 + 660 if datatype == "json.gz": + 661 from .input.json import dump_to_json + 662 dump_to_json([self], file_name, description=description) + 663 elif datatype == "pickle": + 664 with open(file_name + '.p', 'wb') as fb: + 665 pickle.dump(self, fb) + 666 else: + 667 raise Exception("Unknown datatype " + str(datatype)) + 668 + 669 def export_jackknife(self): + 670 """Export jackknife samples from the Obs 671 - 672 if len(self.names) != 1: - 673 raise Exception("'export_jackknife' is only implemented for Obs defined on one ensemble and replicum.") - 674 - 675 name = self.names[0] - 676 full_data = self.deltas[name] + self.r_values[name] - 677 n = full_data.size - 678 mean = self.value - 679 tmp_jacks = np.zeros(n + 1) - 680 tmp_jacks[0] = mean - 681 tmp_jacks[1:] = (n * mean - full_data) / (n - 1) - 682 return tmp_jacks - 683 - 684 def __float__(self): - 685 return float(self.value) - 686 - 687 def __repr__(self): - 688 return 'Obs[' + str(self) + ']' - 689 - 690 def __str__(self): - 691 if self._dvalue == 0.0: - 692 return str(self.value) - 693 fexp = np.floor(np.log10(self._dvalue)) - 694 if fexp < 0.0: - 695 return '{:{form}}({:2.0f})'.format(self.value, self._dvalue * 10 ** (-fexp + 1), form='.' + str(-int(fexp) + 1) + 'f') - 696 elif fexp == 0.0: - 697 return '{:.1f}({:1.1f})'.format(self.value, self._dvalue) - 698 else: - 699 return '{:.0f}({:2.0f})'.format(self.value, self._dvalue) + 672 Returns + 673 ------- + 674 numpy.ndarray + 675 Returns a numpy array of length N + 1 where N is the number of samples + 676 for the given ensemble and replicum. The zeroth entry of the array contains + 677 the mean value of the Obs, entries 1 to N contain the N jackknife samples + 678 derived from the Obs. The current implementation only works for observables + 679 defined on exactly one ensemble and replicum. The derived jackknife samples + 680 should agree with samples from a full jackknife analysis up to O(1/N). + 681 """ + 682 + 683 if len(self.names) != 1: + 684 raise Exception("'export_jackknife' is only implemented for Obs defined on one ensemble and replicum.") + 685 + 686 name = self.names[0] + 687 full_data = self.deltas[name] + self.r_values[name] + 688 n = full_data.size + 689 mean = self.value + 690 tmp_jacks = np.zeros(n + 1) + 691 tmp_jacks[0] = mean + 692 tmp_jacks[1:] = (n * mean - full_data) / (n - 1) + 693 return tmp_jacks + 694 + 695 def __float__(self): + 696 return float(self.value) + 697 + 698 def __repr__(self): + 699 return 'Obs[' + str(self) + ']' 700 - 701 def __hash__(self): - 702 hash_tuple = (np.array([self.value]).astype(np.float32).data.tobytes(),) - 703 hash_tuple += tuple([o.astype(np.float32).data.tobytes() for o in self.deltas.values()]) - 704 hash_tuple += tuple([np.array([o.errsq()]).astype(np.float32).data.tobytes() for o in self.covobs.values()]) - 705 hash_tuple += tuple([o.encode() for o in self.names]) - 706 m = hashlib.md5() - 707 [m.update(o) for o in hash_tuple] - 708 return int(m.hexdigest(), 16) & 0xFFFFFFFF - 709 - 710 # Overload comparisons - 711 def __lt__(self, other): - 712 return self.value < other - 713 - 714 def __le__(self, other): - 715 return self.value <= other - 716 - 717 def __gt__(self, other): - 718 return self.value > other - 719 - 720 def __ge__(self, other): - 721 return self.value >= other - 722 - 723 def __eq__(self, other): - 724 return (self - other).is_zero() - 725 - 726 def __ne__(self, other): - 727 return not (self - other).is_zero() - 728 - 729 # Overload math operations - 730 def __add__(self, y): - 731 if isinstance(y, Obs): - 732 return derived_observable(lambda x, **kwargs: x[0] + x[1], [self, y], man_grad=[1, 1]) - 733 else: - 734 if isinstance(y, np.ndarray): - 735 return np.array([self + o for o in y]) - 736 elif y.__class__.__name__ in ['Corr', 'CObs']: - 737 return NotImplemented - 738 else: - 739 return derived_observable(lambda x, **kwargs: x[0] + y, [self], man_grad=[1]) - 740 - 741 def __radd__(self, y): - 742 return self + y - 743 - 744 def __mul__(self, y): - 745 if isinstance(y, Obs): - 746 return derived_observable(lambda x, **kwargs: x[0] * x[1], [self, y], man_grad=[y.value, self.value]) - 747 else: - 748 if isinstance(y, np.ndarray): - 749 return np.array([self * o for o in y]) - 750 elif isinstance(y, complex): - 751 return CObs(self * y.real, self * y.imag) - 752 elif y.__class__.__name__ in ['Corr', 'CObs']: - 753 return NotImplemented - 754 else: - 755 return derived_observable(lambda x, **kwargs: x[0] * y, [self], man_grad=[y]) - 756 - 757 def __rmul__(self, y): - 758 return self * y - 759 - 760 def __sub__(self, y): - 761 if isinstance(y, Obs): - 762 return derived_observable(lambda x, **kwargs: x[0] - x[1], [self, y], man_grad=[1, -1]) - 763 else: - 764 if isinstance(y, np.ndarray): - 765 return np.array([self - o for o in y]) - 766 elif y.__class__.__name__ in ['Corr', 'CObs']: - 767 return NotImplemented - 768 else: - 769 return derived_observable(lambda x, **kwargs: x[0] - y, [self], man_grad=[1]) - 770 - 771 def __rsub__(self, y): - 772 return -1 * (self - y) - 773 - 774 def __pos__(self): - 775 return self - 776 - 777 def __neg__(self): - 778 return -1 * self - 779 - 780 def __truediv__(self, y): - 781 if isinstance(y, Obs): - 782 return derived_observable(lambda x, **kwargs: x[0] / x[1], [self, y], man_grad=[1 / y.value, - self.value / y.value ** 2]) - 783 else: - 784 if isinstance(y, np.ndarray): - 785 return np.array([self / o for o in y]) - 786 elif y.__class__.__name__ in ['Corr', 'CObs']: - 787 return NotImplemented - 788 else: - 789 return derived_observable(lambda x, **kwargs: x[0] / y, [self], man_grad=[1 / y]) - 790 - 791 def __rtruediv__(self, y): - 792 if isinstance(y, Obs): - 793 return derived_observable(lambda x, **kwargs: x[0] / x[1], [y, self], man_grad=[1 / self.value, - y.value / self.value ** 2]) - 794 else: - 795 if isinstance(y, np.ndarray): - 796 return np.array([o / self for o in y]) - 797 elif y.__class__.__name__ in ['Corr', 'CObs']: - 798 return NotImplemented - 799 else: - 800 return derived_observable(lambda x, **kwargs: y / x[0], [self], man_grad=[-y / self.value ** 2]) - 801 - 802 def __pow__(self, y): - 803 if isinstance(y, Obs): - 804 return derived_observable(lambda x: x[0] ** x[1], [self, y]) - 805 else: - 806 return derived_observable(lambda x: x[0] ** y, [self]) - 807 - 808 def __rpow__(self, y): - 809 if isinstance(y, Obs): - 810 return derived_observable(lambda x: x[0] ** x[1], [y, self]) - 811 else: - 812 return derived_observable(lambda x: y ** x[0], [self]) - 813 - 814 def __abs__(self): - 815 return derived_observable(lambda x: anp.abs(x[0]), [self]) - 816 - 817 # Overload numpy functions - 818 def sqrt(self): - 819 return derived_observable(lambda x, **kwargs: np.sqrt(x[0]), [self], man_grad=[1 / 2 / np.sqrt(self.value)]) - 820 - 821 def log(self): - 822 return derived_observable(lambda x, **kwargs: np.log(x[0]), [self], man_grad=[1 / self.value]) - 823 - 824 def exp(self): - 825 return derived_observable(lambda x, **kwargs: np.exp(x[0]), [self], man_grad=[np.exp(self.value)]) - 826 - 827 def sin(self): - 828 return derived_observable(lambda x, **kwargs: np.sin(x[0]), [self], man_grad=[np.cos(self.value)]) - 829 - 830 def cos(self): - 831 return derived_observable(lambda x, **kwargs: np.cos(x[0]), [self], man_grad=[-np.sin(self.value)]) - 832 - 833 def tan(self): - 834 return derived_observable(lambda x, **kwargs: np.tan(x[0]), [self], man_grad=[1 / np.cos(self.value) ** 2]) - 835 - 836 def arcsin(self): - 837 return derived_observable(lambda x: anp.arcsin(x[0]), [self]) - 838 - 839 def arccos(self): - 840 return derived_observable(lambda x: anp.arccos(x[0]), [self]) - 841 - 842 def arctan(self): - 843 return derived_observable(lambda x: anp.arctan(x[0]), [self]) - 844 - 845 def sinh(self): - 846 return derived_observable(lambda x, **kwargs: np.sinh(x[0]), [self], man_grad=[np.cosh(self.value)]) - 847 - 848 def cosh(self): - 849 return derived_observable(lambda x, **kwargs: np.cosh(x[0]), [self], man_grad=[np.sinh(self.value)]) - 850 - 851 def tanh(self): - 852 return derived_observable(lambda x, **kwargs: np.tanh(x[0]), [self], man_grad=[1 / np.cosh(self.value) ** 2]) - 853 - 854 def arcsinh(self): - 855 return derived_observable(lambda x: anp.arcsinh(x[0]), [self]) - 856 - 857 def arccosh(self): - 858 return derived_observable(lambda x: anp.arccosh(x[0]), [self]) - 859 - 860 def arctanh(self): - 861 return derived_observable(lambda x: anp.arctanh(x[0]), [self]) - 862 - 863 - 864class CObs: - 865 """Class for a complex valued observable.""" - 866 __slots__ = ['_real', '_imag', 'tag'] + 701 def __str__(self): + 702 if self._dvalue == 0.0: + 703 return str(self.value) + 704 return _format_uncertainty(self.value, self._dvalue) + 705 + 706 def __hash__(self): + 707 hash_tuple = (np.array([self.value]).astype(np.float32).data.tobytes(),) + 708 hash_tuple += tuple([o.astype(np.float32).data.tobytes() for o in self.deltas.values()]) + 709 hash_tuple += tuple([np.array([o.errsq()]).astype(np.float32).data.tobytes() for o in self.covobs.values()]) + 710 hash_tuple += tuple([o.encode() for o in self.names]) + 711 m = hashlib.md5() + 712 [m.update(o) for o in hash_tuple] + 713 return int(m.hexdigest(), 16) & 0xFFFFFFFF + 714 + 715 # Overload comparisons + 716 def __lt__(self, other): + 717 return self.value < other + 718 + 719 def __le__(self, other): + 720 return self.value <= other + 721 + 722 def __gt__(self, other): + 723 return self.value > other + 724 + 725 def __ge__(self, other): + 726 return self.value >= other + 727 + 728 def __eq__(self, other): + 729 return (self - other).is_zero() + 730 + 731 def __ne__(self, other): + 732 return not (self - other).is_zero() + 733 + 734 # Overload math operations + 735 def __add__(self, y): + 736 if isinstance(y, Obs): + 737 return derived_observable(lambda x, **kwargs: x[0] + x[1], [self, y], man_grad=[1, 1]) + 738 else: + 739 if isinstance(y, np.ndarray): + 740 return np.array([self + o for o in y]) + 741 elif y.__class__.__name__ in ['Corr', 'CObs']: + 742 return NotImplemented + 743 else: + 744 return derived_observable(lambda x, **kwargs: x[0] + y, [self], man_grad=[1]) + 745 + 746 def __radd__(self, y): + 747 return self + y + 748 + 749 def __mul__(self, y): + 750 if isinstance(y, Obs): + 751 return derived_observable(lambda x, **kwargs: x[0] * x[1], [self, y], man_grad=[y.value, self.value]) + 752 else: + 753 if isinstance(y, np.ndarray): + 754 return np.array([self * o for o in y]) + 755 elif isinstance(y, complex): + 756 return CObs(self * y.real, self * y.imag) + 757 elif y.__class__.__name__ in ['Corr', 'CObs']: + 758 return NotImplemented + 759 else: + 760 return derived_observable(lambda x, **kwargs: x[0] * y, [self], man_grad=[y]) + 761 + 762 def __rmul__(self, y): + 763 return self * y + 764 + 765 def __sub__(self, y): + 766 if isinstance(y, Obs): + 767 return derived_observable(lambda x, **kwargs: x[0] - x[1], [self, y], man_grad=[1, -1]) + 768 else: + 769 if isinstance(y, np.ndarray): + 770 return np.array([self - o for o in y]) + 771 elif y.__class__.__name__ in ['Corr', 'CObs']: + 772 return NotImplemented + 773 else: + 774 return derived_observable(lambda x, **kwargs: x[0] - y, [self], man_grad=[1]) + 775 + 776 def __rsub__(self, y): + 777 return -1 * (self - y) + 778 + 779 def __pos__(self): + 780 return self + 781 + 782 def __neg__(self): + 783 return -1 * self + 784 + 785 def __truediv__(self, y): + 786 if isinstance(y, Obs): + 787 return derived_observable(lambda x, **kwargs: x[0] / x[1], [self, y], man_grad=[1 / y.value, - self.value / y.value ** 2]) + 788 else: + 789 if isinstance(y, np.ndarray): + 790 return np.array([self / o for o in y]) + 791 elif y.__class__.__name__ in ['Corr', 'CObs']: + 792 return NotImplemented + 793 else: + 794 return derived_observable(lambda x, **kwargs: x[0] / y, [self], man_grad=[1 / y]) + 795 + 796 def __rtruediv__(self, y): + 797 if isinstance(y, Obs): + 798 return derived_observable(lambda x, **kwargs: x[0] / x[1], [y, self], man_grad=[1 / self.value, - y.value / self.value ** 2]) + 799 else: + 800 if isinstance(y, np.ndarray): + 801 return np.array([o / self for o in y]) + 802 elif y.__class__.__name__ in ['Corr', 'CObs']: + 803 return NotImplemented + 804 else: + 805 return derived_observable(lambda x, **kwargs: y / x[0], [self], man_grad=[-y / self.value ** 2]) + 806 + 807 def __pow__(self, y): + 808 if isinstance(y, Obs): + 809 return derived_observable(lambda x: x[0] ** x[1], [self, y]) + 810 else: + 811 return derived_observable(lambda x: x[0] ** y, [self]) + 812 + 813 def __rpow__(self, y): + 814 if isinstance(y, Obs): + 815 return derived_observable(lambda x: x[0] ** x[1], [y, self]) + 816 else: + 817 return derived_observable(lambda x: y ** x[0], [self]) + 818 + 819 def __abs__(self): + 820 return derived_observable(lambda x: anp.abs(x[0]), [self]) + 821 + 822 # Overload numpy functions + 823 def sqrt(self): + 824 return derived_observable(lambda x, **kwargs: np.sqrt(x[0]), [self], man_grad=[1 / 2 / np.sqrt(self.value)]) + 825 + 826 def log(self): + 827 return derived_observable(lambda x, **kwargs: np.log(x[0]), [self], man_grad=[1 / self.value]) + 828 + 829 def exp(self): + 830 return derived_observable(lambda x, **kwargs: np.exp(x[0]), [self], man_grad=[np.exp(self.value)]) + 831 + 832 def sin(self): + 833 return derived_observable(lambda x, **kwargs: np.sin(x[0]), [self], man_grad=[np.cos(self.value)]) + 834 + 835 def cos(self): + 836 return derived_observable(lambda x, **kwargs: np.cos(x[0]), [self], man_grad=[-np.sin(self.value)]) + 837 + 838 def tan(self): + 839 return derived_observable(lambda x, **kwargs: np.tan(x[0]), [self], man_grad=[1 / np.cos(self.value) ** 2]) + 840 + 841 def arcsin(self): + 842 return derived_observable(lambda x: anp.arcsin(x[0]), [self]) + 843 + 844 def arccos(self): + 845 return derived_observable(lambda x: anp.arccos(x[0]), [self]) + 846 + 847 def arctan(self): + 848 return derived_observable(lambda x: anp.arctan(x[0]), [self]) + 849 + 850 def sinh(self): + 851 return derived_observable(lambda x, **kwargs: np.sinh(x[0]), [self], man_grad=[np.cosh(self.value)]) + 852 + 853 def cosh(self): + 854 return derived_observable(lambda x, **kwargs: np.cosh(x[0]), [self], man_grad=[np.sinh(self.value)]) + 855 + 856 def tanh(self): + 857 return derived_observable(lambda x, **kwargs: np.tanh(x[0]), [self], man_grad=[1 / np.cosh(self.value) ** 2]) + 858 + 859 def arcsinh(self): + 860 return derived_observable(lambda x: anp.arcsinh(x[0]), [self]) + 861 + 862 def arccosh(self): + 863 return derived_observable(lambda x: anp.arccosh(x[0]), [self]) + 864 + 865 def arctanh(self): + 866 return derived_observable(lambda x: anp.arctanh(x[0]), [self]) 867 - 868 def __init__(self, real, imag=0.0): - 869 self._real = real - 870 self._imag = imag - 871 self.tag = None + 868 + 869class CObs: + 870 """Class for a complex valued observable.""" + 871 __slots__ = ['_real', '_imag', 'tag'] 872 - 873 @property - 874 def real(self): - 875 return self._real - 876 - 877 @property - 878 def imag(self): - 879 return self._imag - 880 - 881 def gamma_method(self, **kwargs): - 882 """Executes the gamma_method for the real and the imaginary part.""" - 883 if isinstance(self.real, Obs): - 884 self.real.gamma_method(**kwargs) - 885 if isinstance(self.imag, Obs): - 886 self.imag.gamma_method(**kwargs) - 887 - 888 def is_zero(self): - 889 """Checks whether both real and imaginary part are zero within machine precision.""" - 890 return self.real == 0.0 and self.imag == 0.0 - 891 - 892 def conjugate(self): - 893 return CObs(self.real, -self.imag) - 894 - 895 def __add__(self, other): - 896 if isinstance(other, np.ndarray): - 897 return other + self - 898 elif hasattr(other, 'real') and hasattr(other, 'imag'): - 899 return CObs(self.real + other.real, - 900 self.imag + other.imag) - 901 else: - 902 return CObs(self.real + other, self.imag) - 903 - 904 def __radd__(self, y): - 905 return self + y - 906 - 907 def __sub__(self, other): - 908 if isinstance(other, np.ndarray): - 909 return -1 * (other - self) - 910 elif hasattr(other, 'real') and hasattr(other, 'imag'): - 911 return CObs(self.real - other.real, self.imag - other.imag) - 912 else: - 913 return CObs(self.real - other, self.imag) - 914 - 915 def __rsub__(self, other): - 916 return -1 * (self - other) - 917 - 918 def __mul__(self, other): - 919 if isinstance(other, np.ndarray): - 920 return other * self - 921 elif hasattr(other, 'real') and hasattr(other, 'imag'): - 922 if all(isinstance(i, Obs) for i in [self.real, self.imag, other.real, other.imag]): - 923 return CObs(derived_observable(lambda x, **kwargs: x[0] * x[1] - x[2] * x[3], - 924 [self.real, other.real, self.imag, other.imag], - 925 man_grad=[other.real.value, self.real.value, -other.imag.value, -self.imag.value]), - 926 derived_observable(lambda x, **kwargs: x[2] * x[1] + x[0] * x[3], - 927 [self.real, other.real, self.imag, other.imag], - 928 man_grad=[other.imag.value, self.imag.value, other.real.value, self.real.value])) - 929 elif getattr(other, 'imag', 0) != 0: - 930 return CObs(self.real * other.real - self.imag * other.imag, - 931 self.imag * other.real + self.real * other.imag) - 932 else: - 933 return CObs(self.real * other.real, self.imag * other.real) - 934 else: - 935 return CObs(self.real * other, self.imag * other) - 936 - 937 def __rmul__(self, other): - 938 return self * other - 939 - 940 def __truediv__(self, other): - 941 if isinstance(other, np.ndarray): - 942 return 1 / (other / self) - 943 elif hasattr(other, 'real') and hasattr(other, 'imag'): - 944 r = other.real ** 2 + other.imag ** 2 - 945 return CObs((self.real * other.real + self.imag * other.imag) / r, (self.imag * other.real - self.real * other.imag) / r) - 946 else: - 947 return CObs(self.real / other, self.imag / other) - 948 - 949 def __rtruediv__(self, other): - 950 r = self.real ** 2 + self.imag ** 2 - 951 if hasattr(other, 'real') and hasattr(other, 'imag'): - 952 return CObs((self.real * other.real + self.imag * other.imag) / r, (self.real * other.imag - self.imag * other.real) / r) - 953 else: - 954 return CObs(self.real * other / r, -self.imag * other / r) - 955 - 956 def __abs__(self): - 957 return np.sqrt(self.real**2 + self.imag**2) - 958 - 959 def __pos__(self): - 960 return self - 961 - 962 def __neg__(self): - 963 return -1 * self - 964 - 965 def __eq__(self, other): - 966 return self.real == other.real and self.imag == other.imag - 967 - 968 def __str__(self): - 969 return '(' + str(self.real) + int(self.imag >= 0.0) * '+' + str(self.imag) + 'j)' - 970 - 971 def __repr__(self): - 972 return 'CObs[' + str(self) + ']' - 973 - 974 - 975def _expand_deltas(deltas, idx, shape): - 976 """Expand deltas defined on idx to a regular, contiguous range, where holes are filled by 0. - 977 If idx is of type range, the deltas are not changed + 873 def __init__(self, real, imag=0.0): + 874 self._real = real + 875 self._imag = imag + 876 self.tag = None + 877 + 878 @property + 879 def real(self): + 880 return self._real + 881 + 882 @property + 883 def imag(self): + 884 return self._imag + 885 + 886 def gamma_method(self, **kwargs): + 887 """Executes the gamma_method for the real and the imaginary part.""" + 888 if isinstance(self.real, Obs): + 889 self.real.gamma_method(**kwargs) + 890 if isinstance(self.imag, Obs): + 891 self.imag.gamma_method(**kwargs) + 892 + 893 def is_zero(self): + 894 """Checks whether both real and imaginary part are zero within machine precision.""" + 895 return self.real == 0.0 and self.imag == 0.0 + 896 + 897 def conjugate(self): + 898 return CObs(self.real, -self.imag) + 899 + 900 def __add__(self, other): + 901 if isinstance(other, np.ndarray): + 902 return other + self + 903 elif hasattr(other, 'real') and hasattr(other, 'imag'): + 904 return CObs(self.real + other.real, + 905 self.imag + other.imag) + 906 else: + 907 return CObs(self.real + other, self.imag) + 908 + 909 def __radd__(self, y): + 910 return self + y + 911 + 912 def __sub__(self, other): + 913 if isinstance(other, np.ndarray): + 914 return -1 * (other - self) + 915 elif hasattr(other, 'real') and hasattr(other, 'imag'): + 916 return CObs(self.real - other.real, self.imag - other.imag) + 917 else: + 918 return CObs(self.real - other, self.imag) + 919 + 920 def __rsub__(self, other): + 921 return -1 * (self - other) + 922 + 923 def __mul__(self, other): + 924 if isinstance(other, np.ndarray): + 925 return other * self + 926 elif hasattr(other, 'real') and hasattr(other, 'imag'): + 927 if all(isinstance(i, Obs) for i in [self.real, self.imag, other.real, other.imag]): + 928 return CObs(derived_observable(lambda x, **kwargs: x[0] * x[1] - x[2] * x[3], + 929 [self.real, other.real, self.imag, other.imag], + 930 man_grad=[other.real.value, self.real.value, -other.imag.value, -self.imag.value]), + 931 derived_observable(lambda x, **kwargs: x[2] * x[1] + x[0] * x[3], + 932 [self.real, other.real, self.imag, other.imag], + 933 man_grad=[other.imag.value, self.imag.value, other.real.value, self.real.value])) + 934 elif getattr(other, 'imag', 0) != 0: + 935 return CObs(self.real * other.real - self.imag * other.imag, + 936 self.imag * other.real + self.real * other.imag) + 937 else: + 938 return CObs(self.real * other.real, self.imag * other.real) + 939 else: + 940 return CObs(self.real * other, self.imag * other) + 941 + 942 def __rmul__(self, other): + 943 return self * other + 944 + 945 def __truediv__(self, other): + 946 if isinstance(other, np.ndarray): + 947 return 1 / (other / self) + 948 elif hasattr(other, 'real') and hasattr(other, 'imag'): + 949 r = other.real ** 2 + other.imag ** 2 + 950 return CObs((self.real * other.real + self.imag * other.imag) / r, (self.imag * other.real - self.real * other.imag) / r) + 951 else: + 952 return CObs(self.real / other, self.imag / other) + 953 + 954 def __rtruediv__(self, other): + 955 r = self.real ** 2 + self.imag ** 2 + 956 if hasattr(other, 'real') and hasattr(other, 'imag'): + 957 return CObs((self.real * other.real + self.imag * other.imag) / r, (self.real * other.imag - self.imag * other.real) / r) + 958 else: + 959 return CObs(self.real * other / r, -self.imag * other / r) + 960 + 961 def __abs__(self): + 962 return np.sqrt(self.real**2 + self.imag**2) + 963 + 964 def __pos__(self): + 965 return self + 966 + 967 def __neg__(self): + 968 return -1 * self + 969 + 970 def __eq__(self, other): + 971 return self.real == other.real and self.imag == other.imag + 972 + 973 def __str__(self): + 974 return '(' + str(self.real) + int(self.imag >= 0.0) * '+' + str(self.imag) + 'j)' + 975 + 976 def __repr__(self): + 977 return 'CObs[' + str(self) + ']' 978 - 979 Parameters - 980 ---------- - 981 deltas : list - 982 List of fluctuations - 983 idx : list - 984 List or range of configs on which the deltas are defined, has to be sorted in ascending order. - 985 shape : int - 986 Number of configs in idx. - 987 """ - 988 if isinstance(idx, range): - 989 return deltas - 990 else: - 991 ret = np.zeros(idx[-1] - idx[0] + 1) - 992 for i in range(shape): - 993 ret[idx[i] - idx[0]] = deltas[i] - 994 return ret - 995 - 996 - 997def _merge_idx(idl): - 998 """Returns the union of all lists in idl as sorted list - 999 -1000 Parameters -1001 ---------- -1002 idl : list -1003 List of lists or ranges. -1004 """ -1005 -1006 # Use groupby to efficiently check whether all elements of idl are identical -1007 try: -1008 g = groupby(idl) -1009 if next(g, True) and not next(g, False): -1010 return idl[0] -1011 except Exception: -1012 pass -1013 -1014 if np.all([type(idx) is range for idx in idl]): -1015 if len(set([idx[0] for idx in idl])) == 1: -1016 idstart = min([idx.start for idx in idl]) -1017 idstop = max([idx.stop for idx in idl]) -1018 idstep = min([idx.step for idx in idl]) -1019 return range(idstart, idstop, idstep) -1020 -1021 return sorted(set().union(*idl)) -1022 -1023 -1024def _intersection_idx(idl): -1025 """Returns the intersection of all lists in idl as sorted list -1026 -1027 Parameters -1028 ---------- -1029 idl : list -1030 List of lists or ranges. -1031 """ -1032 -1033 def _lcm(*args): -1034 """Returns the lowest common multiple of args. -1035 -1036 From python 3.9 onwards the math library contains an lcm function.""" -1037 return reduce(lambda a, b: a * b // gcd(a, b), args) + 979 + 980def _format_uncertainty(value, dvalue): + 981 """Creates a string of a value and its error in paranthesis notation, e.g., 13.02(45)""" + 982 fexp = np.floor(np.log10(dvalue)) + 983 if fexp < 0.0: + 984 return '{:{form}}({:2.0f})'.format(value, dvalue * 10 ** (-fexp + 1), form='.' + str(-int(fexp) + 1) + 'f') + 985 elif fexp == 0.0: + 986 return '{:.1f}({:1.1f})'.format(value, dvalue) + 987 else: + 988 return '{:.0f}({:2.0f})'.format(value, dvalue) + 989 + 990 + 991def _expand_deltas(deltas, idx, shape): + 992 """Expand deltas defined on idx to a regular, contiguous range, where holes are filled by 0. + 993 If idx is of type range, the deltas are not changed + 994 + 995 Parameters + 996 ---------- + 997 deltas : list + 998 List of fluctuations + 999 idx : list +1000 List or range of configs on which the deltas are defined, has to be sorted in ascending order. +1001 shape : int +1002 Number of configs in idx. +1003 """ +1004 if isinstance(idx, range): +1005 return deltas +1006 else: +1007 ret = np.zeros(idx[-1] - idx[0] + 1) +1008 for i in range(shape): +1009 ret[idx[i] - idx[0]] = deltas[i] +1010 return ret +1011 +1012 +1013def _merge_idx(idl): +1014 """Returns the union of all lists in idl as sorted list +1015 +1016 Parameters +1017 ---------- +1018 idl : list +1019 List of lists or ranges. +1020 """ +1021 +1022 # Use groupby to efficiently check whether all elements of idl are identical +1023 try: +1024 g = groupby(idl) +1025 if next(g, True) and not next(g, False): +1026 return idl[0] +1027 except Exception: +1028 pass +1029 +1030 if np.all([type(idx) is range for idx in idl]): +1031 if len(set([idx[0] for idx in idl])) == 1: +1032 idstart = min([idx.start for idx in idl]) +1033 idstop = max([idx.stop for idx in idl]) +1034 idstep = min([idx.step for idx in idl]) +1035 return range(idstart, idstop, idstep) +1036 +1037 return sorted(set().union(*idl)) 1038 -1039 # Use groupby to efficiently check whether all elements of idl are identical -1040 try: -1041 g = groupby(idl) -1042 if next(g, True) and not next(g, False): -1043 return idl[0] -1044 except Exception: -1045 pass -1046 -1047 if np.all([type(idx) is range for idx in idl]): -1048 if len(set([idx[0] for idx in idl])) == 1: -1049 idstart = max([idx.start for idx in idl]) -1050 idstop = min([idx.stop for idx in idl]) -1051 idstep = _lcm(*[idx.step for idx in idl]) -1052 return range(idstart, idstop, idstep) -1053 -1054 return sorted(set.intersection(*[set(o) for o in idl])) -1055 -1056 -1057def _expand_deltas_for_merge(deltas, idx, shape, new_idx): -1058 """Expand deltas defined on idx to the list of configs that is defined by new_idx. -1059 New, empty entries are filled by 0. If idx and new_idx are of type range, the smallest -1060 common divisor of the step sizes is used as new step size. -1061 -1062 Parameters -1063 ---------- -1064 deltas : list -1065 List of fluctuations -1066 idx : list -1067 List or range of configs on which the deltas are defined. -1068 Has to be a subset of new_idx and has to be sorted in ascending order. -1069 shape : list -1070 Number of configs in idx. -1071 new_idx : list -1072 List of configs that defines the new range, has to be sorted in ascending order. -1073 """ -1074 -1075 if type(idx) is range and type(new_idx) is range: -1076 if idx == new_idx: -1077 return deltas -1078 ret = np.zeros(new_idx[-1] - new_idx[0] + 1) -1079 for i in range(shape): -1080 ret[idx[i] - new_idx[0]] = deltas[i] -1081 return np.array([ret[new_idx[i] - new_idx[0]] for i in range(len(new_idx))]) -1082 -1083 -1084def _collapse_deltas_for_merge(deltas, idx, shape, new_idx): -1085 """Collapse deltas defined on idx to the list of configs that is defined by new_idx. -1086 If idx and new_idx are of type range, the smallest -1087 common divisor of the step sizes is used as new step size. -1088 -1089 Parameters -1090 ---------- -1091 deltas : list -1092 List of fluctuations -1093 idx : list -1094 List or range of configs on which the deltas are defined. -1095 Has to be a subset of new_idx and has to be sorted in ascending order. -1096 shape : list -1097 Number of configs in idx. -1098 new_idx : list -1099 List of configs that defines the new range, has to be sorted in ascending order. -1100 """ -1101 -1102 if type(idx) is range and type(new_idx) is range: -1103 if idx == new_idx: -1104 return deltas -1105 ret = np.zeros(new_idx[-1] - new_idx[0] + 1) -1106 for i in range(shape): -1107 if idx[i] in new_idx: -1108 ret[idx[i] - new_idx[0]] = deltas[i] -1109 return np.array([ret[new_idx[i] - new_idx[0]] for i in range(len(new_idx))]) -1110 -1111 -1112def _filter_zeroes(deltas, idx, eps=Obs.filter_eps): -1113 """Filter out all configurations with vanishing fluctuation such that they do not -1114 contribute to the error estimate anymore. Returns the new deltas and -1115 idx according to the filtering. -1116 A fluctuation is considered to be vanishing, if it is smaller than eps times -1117 the mean of the absolute values of all deltas in one list. -1118 -1119 Parameters -1120 ---------- -1121 deltas : list -1122 List of fluctuations -1123 idx : list -1124 List or ranges of configs on which the deltas are defined. -1125 eps : float -1126 Prefactor that enters the filter criterion. -1127 """ -1128 new_deltas = [] -1129 new_idx = [] -1130 maxd = np.mean(np.fabs(deltas)) -1131 for i in range(len(deltas)): -1132 if abs(deltas[i]) > eps * maxd: -1133 new_deltas.append(deltas[i]) -1134 new_idx.append(idx[i]) -1135 if new_idx: -1136 return np.array(new_deltas), new_idx -1137 else: -1138 return deltas, idx -1139 -1140 -1141def derived_observable(func, data, array_mode=False, **kwargs): -1142 """Construct a derived Obs according to func(data, **kwargs) using automatic differentiation. -1143 -1144 Parameters -1145 ---------- -1146 func : object -1147 arbitrary function of the form func(data, **kwargs). For the -1148 automatic differentiation to work, all numpy functions have to have -1149 the autograd wrapper (use 'import autograd.numpy as anp'). -1150 data : list -1151 list of Obs, e.g. [obs1, obs2, obs3]. -1152 num_grad : bool -1153 if True, numerical derivatives are used instead of autograd -1154 (default False). To control the numerical differentiation the -1155 kwargs of numdifftools.step_generators.MaxStepGenerator -1156 can be used. -1157 man_grad : list -1158 manually supply a list or an array which contains the jacobian -1159 of func. Use cautiously, supplying the wrong derivative will -1160 not be intercepted. -1161 -1162 Notes -1163 ----- -1164 For simple mathematical operations it can be practical to use anonymous -1165 functions. For the ratio of two observables one can e.g. use -1166 -1167 new_obs = derived_observable(lambda x: x[0] / x[1], [obs1, obs2]) -1168 """ -1169 -1170 data = np.asarray(data) -1171 raveled_data = data.ravel() -1172 -1173 # Workaround for matrix operations containing non Obs data -1174 if not all(isinstance(x, Obs) for x in raveled_data): -1175 for i in range(len(raveled_data)): -1176 if isinstance(raveled_data[i], (int, float)): -1177 raveled_data[i] = cov_Obs(raveled_data[i], 0.0, "###dummy_covobs###") -1178 -1179 allcov = {} -1180 for o in raveled_data: -1181 for name in o.cov_names: -1182 if name in allcov: -1183 if not np.allclose(allcov[name], o.covobs[name].cov): -1184 raise Exception('Inconsistent covariance matrices for %s!' % (name)) -1185 else: -1186 allcov[name] = o.covobs[name].cov -1187 -1188 n_obs = len(raveled_data) -1189 new_names = sorted(set([y for x in [o.names for o in raveled_data] for y in x])) -1190 new_cov_names = sorted(set([y for x in [o.cov_names for o in raveled_data] for y in x])) -1191 new_sample_names = sorted(set(new_names) - set(new_cov_names)) -1192 -1193 is_merged = {name: (len(list(filter(lambda o: o.is_merged.get(name, False) is True, raveled_data))) > 0) for name in new_sample_names} -1194 reweighted = len(list(filter(lambda o: o.reweighted is True, raveled_data))) > 0 -1195 -1196 if data.ndim == 1: -1197 values = np.array([o.value for o in data]) -1198 else: -1199 values = np.vectorize(lambda x: x.value)(data) -1200 -1201 new_values = func(values, **kwargs) -1202 -1203 multi = int(isinstance(new_values, np.ndarray)) -1204 -1205 new_r_values = {} -1206 new_idl_d = {} -1207 for name in new_sample_names: -1208 idl = [] -1209 tmp_values = np.zeros(n_obs) -1210 for i, item in enumerate(raveled_data): -1211 tmp_values[i] = item.r_values.get(name, item.value) -1212 tmp_idl = item.idl.get(name) -1213 if tmp_idl is not None: -1214 idl.append(tmp_idl) -1215 if multi > 0: -1216 tmp_values = np.array(tmp_values).reshape(data.shape) -1217 new_r_values[name] = func(tmp_values, **kwargs) -1218 new_idl_d[name] = _merge_idx(idl) -1219 if not is_merged[name]: -1220 is_merged[name] = (1 != len(set([len(idx) for idx in [*idl, new_idl_d[name]]]))) -1221 -1222 if 'man_grad' in kwargs: -1223 deriv = np.asarray(kwargs.get('man_grad')) -1224 if new_values.shape + data.shape != deriv.shape: -1225 raise Exception('Manual derivative does not have correct shape.') -1226 elif kwargs.get('num_grad') is True: -1227 if multi > 0: -1228 raise Exception('Multi mode currently not supported for numerical derivative') -1229 options = { -1230 'base_step': 0.1, -1231 'step_ratio': 2.5} -1232 for key in options.keys(): -1233 kwarg = kwargs.get(key) -1234 if kwarg is not None: -1235 options[key] = kwarg -1236 tmp_df = nd.Gradient(func, order=4, **{k: v for k, v in options.items() if v is not None})(values, **kwargs) -1237 if tmp_df.size == 1: -1238 deriv = np.array([tmp_df.real]) -1239 else: -1240 deriv = tmp_df.real -1241 else: -1242 deriv = jacobian(func)(values, **kwargs) -1243 -1244 final_result = np.zeros(new_values.shape, dtype=object) -1245 -1246 if array_mode is True: -1247 -1248 class _Zero_grad(): -1249 def __init__(self, N): -1250 self.grad = np.zeros((N, 1)) -1251 -1252 new_covobs_lengths = dict(set([y for x in [[(n, o.covobs[n].N) for n in o.cov_names] for o in raveled_data] for y in x])) -1253 d_extracted = {} -1254 g_extracted = {} -1255 for name in new_sample_names: -1256 d_extracted[name] = [] -1257 ens_length = len(new_idl_d[name]) -1258 for i_dat, dat in enumerate(data): -1259 d_extracted[name].append(np.array([_expand_deltas_for_merge(o.deltas.get(name, np.zeros(ens_length)), o.idl.get(name, new_idl_d[name]), o.shape.get(name, ens_length), new_idl_d[name]) for o in dat.reshape(np.prod(dat.shape))]).reshape(dat.shape + (ens_length, ))) -1260 for name in new_cov_names: -1261 g_extracted[name] = [] -1262 zero_grad = _Zero_grad(new_covobs_lengths[name]) -1263 for i_dat, dat in enumerate(data): -1264 g_extracted[name].append(np.array([o.covobs.get(name, zero_grad).grad for o in dat.reshape(np.prod(dat.shape))]).reshape(dat.shape + (new_covobs_lengths[name], 1))) -1265 -1266 for i_val, new_val in np.ndenumerate(new_values): -1267 new_deltas = {} -1268 new_grad = {} -1269 if array_mode is True: -1270 for name in new_sample_names: -1271 ens_length = d_extracted[name][0].shape[-1] -1272 new_deltas[name] = np.zeros(ens_length) -1273 for i_dat, dat in enumerate(d_extracted[name]): -1274 new_deltas[name] += np.tensordot(deriv[i_val + (i_dat, )], dat) -1275 for name in new_cov_names: -1276 new_grad[name] = 0 -1277 for i_dat, dat in enumerate(g_extracted[name]): -1278 new_grad[name] += np.tensordot(deriv[i_val + (i_dat, )], dat) -1279 else: -1280 for j_obs, obs in np.ndenumerate(data): -1281 for name in obs.names: -1282 if name in obs.cov_names: -1283 new_grad[name] = new_grad.get(name, 0) + deriv[i_val + j_obs] * obs.covobs[name].grad -1284 else: -1285 new_deltas[name] = new_deltas.get(name, 0) + deriv[i_val + j_obs] * _expand_deltas_for_merge(obs.deltas[name], obs.idl[name], obs.shape[name], new_idl_d[name]) -1286 -1287 new_covobs = {name: Covobs(0, allcov[name], name, grad=new_grad[name]) for name in new_grad} -1288 -1289 if not set(new_covobs.keys()).isdisjoint(new_deltas.keys()): -1290 raise Exception('The same name has been used for deltas and covobs!') -1291 new_samples = [] -1292 new_means = [] -1293 new_idl = [] -1294 new_names_obs = [] -1295 for name in new_names: -1296 if name not in new_covobs: -1297 if is_merged[name]: -1298 filtered_deltas, filtered_idl_d = _filter_zeroes(new_deltas[name], new_idl_d[name]) -1299 else: -1300 filtered_deltas = new_deltas[name] -1301 filtered_idl_d = new_idl_d[name] +1039 +1040def _intersection_idx(idl): +1041 """Returns the intersection of all lists in idl as sorted list +1042 +1043 Parameters +1044 ---------- +1045 idl : list +1046 List of lists or ranges. +1047 """ +1048 +1049 def _lcm(*args): +1050 """Returns the lowest common multiple of args. +1051 +1052 From python 3.9 onwards the math library contains an lcm function.""" +1053 return reduce(lambda a, b: a * b // gcd(a, b), args) +1054 +1055 # Use groupby to efficiently check whether all elements of idl are identical +1056 try: +1057 g = groupby(idl) +1058 if next(g, True) and not next(g, False): +1059 return idl[0] +1060 except Exception: +1061 pass +1062 +1063 if np.all([type(idx) is range for idx in idl]): +1064 if len(set([idx[0] for idx in idl])) == 1: +1065 idstart = max([idx.start for idx in idl]) +1066 idstop = min([idx.stop for idx in idl]) +1067 idstep = _lcm(*[idx.step for idx in idl]) +1068 return range(idstart, idstop, idstep) +1069 +1070 return sorted(set.intersection(*[set(o) for o in idl])) +1071 +1072 +1073def _expand_deltas_for_merge(deltas, idx, shape, new_idx): +1074 """Expand deltas defined on idx to the list of configs that is defined by new_idx. +1075 New, empty entries are filled by 0. If idx and new_idx are of type range, the smallest +1076 common divisor of the step sizes is used as new step size. +1077 +1078 Parameters +1079 ---------- +1080 deltas : list +1081 List of fluctuations +1082 idx : list +1083 List or range of configs on which the deltas are defined. +1084 Has to be a subset of new_idx and has to be sorted in ascending order. +1085 shape : list +1086 Number of configs in idx. +1087 new_idx : list +1088 List of configs that defines the new range, has to be sorted in ascending order. +1089 """ +1090 +1091 if type(idx) is range and type(new_idx) is range: +1092 if idx == new_idx: +1093 return deltas +1094 ret = np.zeros(new_idx[-1] - new_idx[0] + 1) +1095 for i in range(shape): +1096 ret[idx[i] - new_idx[0]] = deltas[i] +1097 return np.array([ret[new_idx[i] - new_idx[0]] for i in range(len(new_idx))]) +1098 +1099 +1100def _collapse_deltas_for_merge(deltas, idx, shape, new_idx): +1101 """Collapse deltas defined on idx to the list of configs that is defined by new_idx. +1102 If idx and new_idx are of type range, the smallest +1103 common divisor of the step sizes is used as new step size. +1104 +1105 Parameters +1106 ---------- +1107 deltas : list +1108 List of fluctuations +1109 idx : list +1110 List or range of configs on which the deltas are defined. +1111 Has to be a subset of new_idx and has to be sorted in ascending order. +1112 shape : list +1113 Number of configs in idx. +1114 new_idx : list +1115 List of configs that defines the new range, has to be sorted in ascending order. +1116 """ +1117 +1118 if type(idx) is range and type(new_idx) is range: +1119 if idx == new_idx: +1120 return deltas +1121 ret = np.zeros(new_idx[-1] - new_idx[0] + 1) +1122 for i in range(shape): +1123 if idx[i] in new_idx: +1124 ret[idx[i] - new_idx[0]] = deltas[i] +1125 return np.array([ret[new_idx[i] - new_idx[0]] for i in range(len(new_idx))]) +1126 +1127 +1128def _filter_zeroes(deltas, idx, eps=Obs.filter_eps): +1129 """Filter out all configurations with vanishing fluctuation such that they do not +1130 contribute to the error estimate anymore. Returns the new deltas and +1131 idx according to the filtering. +1132 A fluctuation is considered to be vanishing, if it is smaller than eps times +1133 the mean of the absolute values of all deltas in one list. +1134 +1135 Parameters +1136 ---------- +1137 deltas : list +1138 List of fluctuations +1139 idx : list +1140 List or ranges of configs on which the deltas are defined. +1141 eps : float +1142 Prefactor that enters the filter criterion. +1143 """ +1144 new_deltas = [] +1145 new_idx = [] +1146 maxd = np.mean(np.fabs(deltas)) +1147 for i in range(len(deltas)): +1148 if abs(deltas[i]) > eps * maxd: +1149 new_deltas.append(deltas[i]) +1150 new_idx.append(idx[i]) +1151 if new_idx: +1152 return np.array(new_deltas), new_idx +1153 else: +1154 return deltas, idx +1155 +1156 +1157def derived_observable(func, data, array_mode=False, **kwargs): +1158 """Construct a derived Obs according to func(data, **kwargs) using automatic differentiation. +1159 +1160 Parameters +1161 ---------- +1162 func : object +1163 arbitrary function of the form func(data, **kwargs). For the +1164 automatic differentiation to work, all numpy functions have to have +1165 the autograd wrapper (use 'import autograd.numpy as anp'). +1166 data : list +1167 list of Obs, e.g. [obs1, obs2, obs3]. +1168 num_grad : bool +1169 if True, numerical derivatives are used instead of autograd +1170 (default False). To control the numerical differentiation the +1171 kwargs of numdifftools.step_generators.MaxStepGenerator +1172 can be used. +1173 man_grad : list +1174 manually supply a list or an array which contains the jacobian +1175 of func. Use cautiously, supplying the wrong derivative will +1176 not be intercepted. +1177 +1178 Notes +1179 ----- +1180 For simple mathematical operations it can be practical to use anonymous +1181 functions. For the ratio of two observables one can e.g. use +1182 +1183 new_obs = derived_observable(lambda x: x[0] / x[1], [obs1, obs2]) +1184 """ +1185 +1186 data = np.asarray(data) +1187 raveled_data = data.ravel() +1188 +1189 # Workaround for matrix operations containing non Obs data +1190 if not all(isinstance(x, Obs) for x in raveled_data): +1191 for i in range(len(raveled_data)): +1192 if isinstance(raveled_data[i], (int, float)): +1193 raveled_data[i] = cov_Obs(raveled_data[i], 0.0, "###dummy_covobs###") +1194 +1195 allcov = {} +1196 for o in raveled_data: +1197 for name in o.cov_names: +1198 if name in allcov: +1199 if not np.allclose(allcov[name], o.covobs[name].cov): +1200 raise Exception('Inconsistent covariance matrices for %s!' % (name)) +1201 else: +1202 allcov[name] = o.covobs[name].cov +1203 +1204 n_obs = len(raveled_data) +1205 new_names = sorted(set([y for x in [o.names for o in raveled_data] for y in x])) +1206 new_cov_names = sorted(set([y for x in [o.cov_names for o in raveled_data] for y in x])) +1207 new_sample_names = sorted(set(new_names) - set(new_cov_names)) +1208 +1209 is_merged = {name: (len(list(filter(lambda o: o.is_merged.get(name, False) is True, raveled_data))) > 0) for name in new_sample_names} +1210 reweighted = len(list(filter(lambda o: o.reweighted is True, raveled_data))) > 0 +1211 +1212 if data.ndim == 1: +1213 values = np.array([o.value for o in data]) +1214 else: +1215 values = np.vectorize(lambda x: x.value)(data) +1216 +1217 new_values = func(values, **kwargs) +1218 +1219 multi = int(isinstance(new_values, np.ndarray)) +1220 +1221 new_r_values = {} +1222 new_idl_d = {} +1223 for name in new_sample_names: +1224 idl = [] +1225 tmp_values = np.zeros(n_obs) +1226 for i, item in enumerate(raveled_data): +1227 tmp_values[i] = item.r_values.get(name, item.value) +1228 tmp_idl = item.idl.get(name) +1229 if tmp_idl is not None: +1230 idl.append(tmp_idl) +1231 if multi > 0: +1232 tmp_values = np.array(tmp_values).reshape(data.shape) +1233 new_r_values[name] = func(tmp_values, **kwargs) +1234 new_idl_d[name] = _merge_idx(idl) +1235 if not is_merged[name]: +1236 is_merged[name] = (1 != len(set([len(idx) for idx in [*idl, new_idl_d[name]]]))) +1237 +1238 if 'man_grad' in kwargs: +1239 deriv = np.asarray(kwargs.get('man_grad')) +1240 if new_values.shape + data.shape != deriv.shape: +1241 raise Exception('Manual derivative does not have correct shape.') +1242 elif kwargs.get('num_grad') is True: +1243 if multi > 0: +1244 raise Exception('Multi mode currently not supported for numerical derivative') +1245 options = { +1246 'base_step': 0.1, +1247 'step_ratio': 2.5} +1248 for key in options.keys(): +1249 kwarg = kwargs.get(key) +1250 if kwarg is not None: +1251 options[key] = kwarg +1252 tmp_df = nd.Gradient(func, order=4, **{k: v for k, v in options.items() if v is not None})(values, **kwargs) +1253 if tmp_df.size == 1: +1254 deriv = np.array([tmp_df.real]) +1255 else: +1256 deriv = tmp_df.real +1257 else: +1258 deriv = jacobian(func)(values, **kwargs) +1259 +1260 final_result = np.zeros(new_values.shape, dtype=object) +1261 +1262 if array_mode is True: +1263 +1264 class _Zero_grad(): +1265 def __init__(self, N): +1266 self.grad = np.zeros((N, 1)) +1267 +1268 new_covobs_lengths = dict(set([y for x in [[(n, o.covobs[n].N) for n in o.cov_names] for o in raveled_data] for y in x])) +1269 d_extracted = {} +1270 g_extracted = {} +1271 for name in new_sample_names: +1272 d_extracted[name] = [] +1273 ens_length = len(new_idl_d[name]) +1274 for i_dat, dat in enumerate(data): +1275 d_extracted[name].append(np.array([_expand_deltas_for_merge(o.deltas.get(name, np.zeros(ens_length)), o.idl.get(name, new_idl_d[name]), o.shape.get(name, ens_length), new_idl_d[name]) for o in dat.reshape(np.prod(dat.shape))]).reshape(dat.shape + (ens_length, ))) +1276 for name in new_cov_names: +1277 g_extracted[name] = [] +1278 zero_grad = _Zero_grad(new_covobs_lengths[name]) +1279 for i_dat, dat in enumerate(data): +1280 g_extracted[name].append(np.array([o.covobs.get(name, zero_grad).grad for o in dat.reshape(np.prod(dat.shape))]).reshape(dat.shape + (new_covobs_lengths[name], 1))) +1281 +1282 for i_val, new_val in np.ndenumerate(new_values): +1283 new_deltas = {} +1284 new_grad = {} +1285 if array_mode is True: +1286 for name in new_sample_names: +1287 ens_length = d_extracted[name][0].shape[-1] +1288 new_deltas[name] = np.zeros(ens_length) +1289 for i_dat, dat in enumerate(d_extracted[name]): +1290 new_deltas[name] += np.tensordot(deriv[i_val + (i_dat, )], dat) +1291 for name in new_cov_names: +1292 new_grad[name] = 0 +1293 for i_dat, dat in enumerate(g_extracted[name]): +1294 new_grad[name] += np.tensordot(deriv[i_val + (i_dat, )], dat) +1295 else: +1296 for j_obs, obs in np.ndenumerate(data): +1297 for name in obs.names: +1298 if name in obs.cov_names: +1299 new_grad[name] = new_grad.get(name, 0) + deriv[i_val + j_obs] * obs.covobs[name].grad +1300 else: +1301 new_deltas[name] = new_deltas.get(name, 0) + deriv[i_val + j_obs] * _expand_deltas_for_merge(obs.deltas[name], obs.idl[name], obs.shape[name], new_idl_d[name]) 1302 -1303 new_samples.append(filtered_deltas) -1304 new_idl.append(filtered_idl_d) -1305 new_means.append(new_r_values[name][i_val]) -1306 new_names_obs.append(name) -1307 final_result[i_val] = Obs(new_samples, new_names_obs, means=new_means, idl=new_idl) -1308 for name in new_covobs: -1309 final_result[i_val].names.append(name) -1310 final_result[i_val]._covobs = new_covobs -1311 final_result[i_val]._value = new_val -1312 final_result[i_val].is_merged = is_merged -1313 final_result[i_val].reweighted = reweighted -1314 -1315 if multi == 0: -1316 final_result = final_result.item() -1317 -1318 return final_result -1319 -1320 -1321def _reduce_deltas(deltas, idx_old, idx_new): -1322 """Extract deltas defined on idx_old on all configs of idx_new. -1323 -1324 Assumes, that idx_old and idx_new are correctly defined idl, i.e., they -1325 are ordered in an ascending order. -1326 -1327 Parameters -1328 ---------- -1329 deltas : list -1330 List of fluctuations -1331 idx_old : list -1332 List or range of configs on which the deltas are defined -1333 idx_new : list -1334 List of configs for which we want to extract the deltas. -1335 Has to be a subset of idx_old. -1336 """ -1337 if not len(deltas) == len(idx_old): -1338 raise Exception('Length of deltas and idx_old have to be the same: %d != %d' % (len(deltas), len(idx_old))) -1339 if type(idx_old) is range and type(idx_new) is range: -1340 if idx_old == idx_new: -1341 return deltas -1342 shape = len(idx_new) -1343 ret = np.zeros(shape) -1344 oldpos = 0 -1345 for i in range(shape): -1346 pos = -1 -1347 for j in range(oldpos, len(idx_old)): -1348 if idx_old[j] == idx_new[i]: -1349 pos = j -1350 break -1351 if pos < 0: -1352 raise Exception('Error in _reduce_deltas: Config %d not in idx_old' % (idx_new[i])) -1353 ret[i] = deltas[pos] -1354 oldpos = pos -1355 return np.array(ret) -1356 -1357 -1358def reweight(weight, obs, **kwargs): -1359 """Reweight a list of observables. -1360 -1361 Parameters -1362 ---------- -1363 weight : Obs -1364 Reweighting factor. An Observable that has to be defined on a superset of the -1365 configurations in obs[i].idl for all i. -1366 obs : list -1367 list of Obs, e.g. [obs1, obs2, obs3]. -1368 all_configs : bool -1369 if True, the reweighted observables are normalized by the average of -1370 the reweighting factor on all configurations in weight.idl and not -1371 on the configurations in obs[i].idl. Default False. -1372 """ -1373 result = [] -1374 for i in range(len(obs)): -1375 if len(obs[i].cov_names): -1376 raise Exception('Error: Not possible to reweight an Obs that contains covobs!') -1377 if not set(obs[i].names).issubset(weight.names): -1378 raise Exception('Error: Ensembles do not fit') -1379 for name in obs[i].names: -1380 if not set(obs[i].idl[name]).issubset(weight.idl[name]): -1381 raise Exception('obs[%d] has to be defined on a subset of the configs in weight.idl[%s]!' % (i, name)) -1382 new_samples = [] -1383 w_deltas = {} -1384 for name in sorted(obs[i].names): -1385 w_deltas[name] = _reduce_deltas(weight.deltas[name], weight.idl[name], obs[i].idl[name]) -1386 new_samples.append((w_deltas[name] + weight.r_values[name]) * (obs[i].deltas[name] + obs[i].r_values[name])) -1387 tmp_obs = Obs(new_samples, sorted(obs[i].names), idl=[obs[i].idl[name] for name in sorted(obs[i].names)]) -1388 -1389 if kwargs.get('all_configs'): -1390 new_weight = weight -1391 else: -1392 new_weight = Obs([w_deltas[name] + weight.r_values[name] for name in sorted(obs[i].names)], sorted(obs[i].names), idl=[obs[i].idl[name] for name in sorted(obs[i].names)]) -1393 -1394 result.append(tmp_obs / new_weight) -1395 result[-1].reweighted = True -1396 result[-1].is_merged = obs[i].is_merged -1397 -1398 return result -1399 -1400 -1401def correlate(obs_a, obs_b): -1402 """Correlate two observables. -1403 -1404 Parameters -1405 ---------- -1406 obs_a : Obs -1407 First observable -1408 obs_b : Obs -1409 Second observable -1410 -1411 Notes -1412 ----- -1413 Keep in mind to only correlate primary observables which have not been reweighted -1414 yet. The reweighting has to be applied after correlating the observables. -1415 Currently only works if ensembles are identical (this is not strictly necessary). -1416 """ -1417 -1418 if sorted(obs_a.names) != sorted(obs_b.names): -1419 raise Exception('Ensembles do not fit') -1420 if len(obs_a.cov_names) or len(obs_b.cov_names): -1421 raise Exception('Error: Not possible to correlate Obs that contain covobs!') -1422 for name in obs_a.names: -1423 if obs_a.shape[name] != obs_b.shape[name]: -1424 raise Exception('Shapes of ensemble', name, 'do not fit') -1425 if obs_a.idl[name] != obs_b.idl[name]: -1426 raise Exception('idl of ensemble', name, 'do not fit') -1427 -1428 if obs_a.reweighted is True: -1429 warnings.warn("The first observable is already reweighted.", RuntimeWarning) -1430 if obs_b.reweighted is True: -1431 warnings.warn("The second observable is already reweighted.", RuntimeWarning) -1432 -1433 new_samples = [] -1434 new_idl = [] -1435 for name in sorted(obs_a.names): -1436 new_samples.append((obs_a.deltas[name] + obs_a.r_values[name]) * (obs_b.deltas[name] + obs_b.r_values[name])) -1437 new_idl.append(obs_a.idl[name]) -1438 -1439 o = Obs(new_samples, sorted(obs_a.names), idl=new_idl) -1440 o.is_merged = {name: (obs_a.is_merged.get(name, False) or obs_b.is_merged.get(name, False)) for name in o.names} -1441 o.reweighted = obs_a.reweighted or obs_b.reweighted -1442 return o +1303 new_covobs = {name: Covobs(0, allcov[name], name, grad=new_grad[name]) for name in new_grad} +1304 +1305 if not set(new_covobs.keys()).isdisjoint(new_deltas.keys()): +1306 raise Exception('The same name has been used for deltas and covobs!') +1307 new_samples = [] +1308 new_means = [] +1309 new_idl = [] +1310 new_names_obs = [] +1311 for name in new_names: +1312 if name not in new_covobs: +1313 if is_merged[name]: +1314 filtered_deltas, filtered_idl_d = _filter_zeroes(new_deltas[name], new_idl_d[name]) +1315 else: +1316 filtered_deltas = new_deltas[name] +1317 filtered_idl_d = new_idl_d[name] +1318 +1319 new_samples.append(filtered_deltas) +1320 new_idl.append(filtered_idl_d) +1321 new_means.append(new_r_values[name][i_val]) +1322 new_names_obs.append(name) +1323 final_result[i_val] = Obs(new_samples, new_names_obs, means=new_means, idl=new_idl) +1324 for name in new_covobs: +1325 final_result[i_val].names.append(name) +1326 final_result[i_val]._covobs = new_covobs +1327 final_result[i_val]._value = new_val +1328 final_result[i_val].is_merged = is_merged +1329 final_result[i_val].reweighted = reweighted +1330 +1331 if multi == 0: +1332 final_result = final_result.item() +1333 +1334 return final_result +1335 +1336 +1337def _reduce_deltas(deltas, idx_old, idx_new): +1338 """Extract deltas defined on idx_old on all configs of idx_new. +1339 +1340 Assumes, that idx_old and idx_new are correctly defined idl, i.e., they +1341 are ordered in an ascending order. +1342 +1343 Parameters +1344 ---------- +1345 deltas : list +1346 List of fluctuations +1347 idx_old : list +1348 List or range of configs on which the deltas are defined +1349 idx_new : list +1350 List of configs for which we want to extract the deltas. +1351 Has to be a subset of idx_old. +1352 """ +1353 if not len(deltas) == len(idx_old): +1354 raise Exception('Length of deltas and idx_old have to be the same: %d != %d' % (len(deltas), len(idx_old))) +1355 if type(idx_old) is range and type(idx_new) is range: +1356 if idx_old == idx_new: +1357 return deltas +1358 shape = len(idx_new) +1359 ret = np.zeros(shape) +1360 oldpos = 0 +1361 for i in range(shape): +1362 pos = -1 +1363 for j in range(oldpos, len(idx_old)): +1364 if idx_old[j] == idx_new[i]: +1365 pos = j +1366 break +1367 if pos < 0: +1368 raise Exception('Error in _reduce_deltas: Config %d not in idx_old' % (idx_new[i])) +1369 ret[i] = deltas[pos] +1370 oldpos = pos +1371 return np.array(ret) +1372 +1373 +1374def reweight(weight, obs, **kwargs): +1375 """Reweight a list of observables. +1376 +1377 Parameters +1378 ---------- +1379 weight : Obs +1380 Reweighting factor. An Observable that has to be defined on a superset of the +1381 configurations in obs[i].idl for all i. +1382 obs : list +1383 list of Obs, e.g. [obs1, obs2, obs3]. +1384 all_configs : bool +1385 if True, the reweighted observables are normalized by the average of +1386 the reweighting factor on all configurations in weight.idl and not +1387 on the configurations in obs[i].idl. Default False. +1388 """ +1389 result = [] +1390 for i in range(len(obs)): +1391 if len(obs[i].cov_names): +1392 raise Exception('Error: Not possible to reweight an Obs that contains covobs!') +1393 if not set(obs[i].names).issubset(weight.names): +1394 raise Exception('Error: Ensembles do not fit') +1395 for name in obs[i].names: +1396 if not set(obs[i].idl[name]).issubset(weight.idl[name]): +1397 raise Exception('obs[%d] has to be defined on a subset of the configs in weight.idl[%s]!' % (i, name)) +1398 new_samples = [] +1399 w_deltas = {} +1400 for name in sorted(obs[i].names): +1401 w_deltas[name] = _reduce_deltas(weight.deltas[name], weight.idl[name], obs[i].idl[name]) +1402 new_samples.append((w_deltas[name] + weight.r_values[name]) * (obs[i].deltas[name] + obs[i].r_values[name])) +1403 tmp_obs = Obs(new_samples, sorted(obs[i].names), idl=[obs[i].idl[name] for name in sorted(obs[i].names)]) +1404 +1405 if kwargs.get('all_configs'): +1406 new_weight = weight +1407 else: +1408 new_weight = Obs([w_deltas[name] + weight.r_values[name] for name in sorted(obs[i].names)], sorted(obs[i].names), idl=[obs[i].idl[name] for name in sorted(obs[i].names)]) +1409 +1410 result.append(tmp_obs / new_weight) +1411 result[-1].reweighted = True +1412 result[-1].is_merged = obs[i].is_merged +1413 +1414 return result +1415 +1416 +1417def correlate(obs_a, obs_b): +1418 """Correlate two observables. +1419 +1420 Parameters +1421 ---------- +1422 obs_a : Obs +1423 First observable +1424 obs_b : Obs +1425 Second observable +1426 +1427 Notes +1428 ----- +1429 Keep in mind to only correlate primary observables which have not been reweighted +1430 yet. The reweighting has to be applied after correlating the observables. +1431 Currently only works if ensembles are identical (this is not strictly necessary). +1432 """ +1433 +1434 if sorted(obs_a.names) != sorted(obs_b.names): +1435 raise Exception('Ensembles do not fit') +1436 if len(obs_a.cov_names) or len(obs_b.cov_names): +1437 raise Exception('Error: Not possible to correlate Obs that contain covobs!') +1438 for name in obs_a.names: +1439 if obs_a.shape[name] != obs_b.shape[name]: +1440 raise Exception('Shapes of ensemble', name, 'do not fit') +1441 if obs_a.idl[name] != obs_b.idl[name]: +1442 raise Exception('idl of ensemble', name, 'do not fit') 1443 -1444 -1445def covariance(obs, visualize=False, correlation=False, smooth=None, **kwargs): -1446 r'''Calculates the error covariance matrix of a set of observables. -1447 -1448 The gamma method has to be applied first to all observables. -1449 -1450 Parameters -1451 ---------- -1452 obs : list or numpy.ndarray -1453 List or one dimensional array of Obs -1454 visualize : bool -1455 If True plots the corresponding normalized correlation matrix (default False). -1456 correlation : bool -1457 If True the correlation matrix instead of the error covariance matrix is returned (default False). -1458 smooth : None or int -1459 If smooth is an integer 'E' between 2 and the dimension of the matrix minus 1 the eigenvalue -1460 smoothing procedure of hep-lat/9412087 is applied to the correlation matrix which leaves the -1461 largest E eigenvalues essentially unchanged and smoothes the smaller eigenvalues to avoid extremely -1462 small ones. +1444 if obs_a.reweighted is True: +1445 warnings.warn("The first observable is already reweighted.", RuntimeWarning) +1446 if obs_b.reweighted is True: +1447 warnings.warn("The second observable is already reweighted.", RuntimeWarning) +1448 +1449 new_samples = [] +1450 new_idl = [] +1451 for name in sorted(obs_a.names): +1452 new_samples.append((obs_a.deltas[name] + obs_a.r_values[name]) * (obs_b.deltas[name] + obs_b.r_values[name])) +1453 new_idl.append(obs_a.idl[name]) +1454 +1455 o = Obs(new_samples, sorted(obs_a.names), idl=new_idl) +1456 o.is_merged = {name: (obs_a.is_merged.get(name, False) or obs_b.is_merged.get(name, False)) for name in o.names} +1457 o.reweighted = obs_a.reweighted or obs_b.reweighted +1458 return o +1459 +1460 +1461def covariance(obs, visualize=False, correlation=False, smooth=None, **kwargs): +1462 r'''Calculates the error covariance matrix of a set of observables. 1463 -1464 Notes -1465 ----- -1466 The error covariance is defined such that it agrees with the squared standard error for two identical observables -1467 $$\operatorname{cov}(a,a)=\sum_{s=1}^N\delta_a^s\delta_a^s/N^2=\Gamma_{aa}(0)/N=\operatorname{var}(a)/N=\sigma_a^2$$ -1468 in the absence of autocorrelation. -1469 The error covariance is estimated by calculating the correlation matrix assuming no autocorrelation and then rescaling the correlation matrix by the full errors including the previous gamma method estimate for the autocorrelation of the observables. The covariance at windowsize 0 is guaranteed to be positive semi-definite -1470 $$\sum_{i,j}v_i\Gamma_{ij}(0)v_j=\frac{1}{N}\sum_{s=1}^N\sum_{i,j}v_i\delta_i^s\delta_j^s v_j=\frac{1}{N}\sum_{s=1}^N\sum_{i}|v_i\delta_i^s|^2\geq 0\,,$$ for every $v\in\mathbb{R}^M$, while such an identity does not hold for larger windows/lags. -1471 For observables defined on a single ensemble our approximation is equivalent to assuming that the integrated autocorrelation time of an off-diagonal element is equal to the geometric mean of the integrated autocorrelation times of the corresponding diagonal elements. -1472 $$\tau_{\mathrm{int}, ij}=\sqrt{\tau_{\mathrm{int}, i}\times \tau_{\mathrm{int}, j}}$$ -1473 This construction ensures that the estimated covariance matrix is positive semi-definite (up to numerical rounding errors). -1474 ''' -1475 -1476 length = len(obs) -1477 -1478 max_samples = np.max([o.N for o in obs]) -1479 if max_samples <= length and not [item for sublist in [o.cov_names for o in obs] for item in sublist]: -1480 warnings.warn(f"The dimension of the covariance matrix ({length}) is larger or equal to the number of samples ({max_samples}). This will result in a rank deficient matrix.", RuntimeWarning) -1481 -1482 cov = np.zeros((length, length)) -1483 for i in range(length): -1484 for j in range(i, length): -1485 cov[i, j] = _covariance_element(obs[i], obs[j]) -1486 cov = cov + cov.T - np.diag(np.diag(cov)) -1487 -1488 corr = np.diag(1 / np.sqrt(np.diag(cov))) @ cov @ np.diag(1 / np.sqrt(np.diag(cov))) -1489 -1490 if isinstance(smooth, int): -1491 corr = _smooth_eigenvalues(corr, smooth) -1492 -1493 if visualize: -1494 plt.matshow(corr, vmin=-1, vmax=1) -1495 plt.set_cmap('RdBu') -1496 plt.colorbar() -1497 plt.draw() -1498 -1499 if correlation is True: -1500 return corr -1501 -1502 errors = [o.dvalue for o in obs] -1503 cov = np.diag(errors) @ corr @ np.diag(errors) -1504 -1505 eigenvalues = np.linalg.eigh(cov)[0] -1506 if not np.all(eigenvalues >= 0): -1507 warnings.warn("Covariance matrix is not positive semi-definite (Eigenvalues: " + str(eigenvalues) + ")", RuntimeWarning) +1464 The gamma method has to be applied first to all observables. +1465 +1466 Parameters +1467 ---------- +1468 obs : list or numpy.ndarray +1469 List or one dimensional array of Obs +1470 visualize : bool +1471 If True plots the corresponding normalized correlation matrix (default False). +1472 correlation : bool +1473 If True the correlation matrix instead of the error covariance matrix is returned (default False). +1474 smooth : None or int +1475 If smooth is an integer 'E' between 2 and the dimension of the matrix minus 1 the eigenvalue +1476 smoothing procedure of hep-lat/9412087 is applied to the correlation matrix which leaves the +1477 largest E eigenvalues essentially unchanged and smoothes the smaller eigenvalues to avoid extremely +1478 small ones. +1479 +1480 Notes +1481 ----- +1482 The error covariance is defined such that it agrees with the squared standard error for two identical observables +1483 $$\operatorname{cov}(a,a)=\sum_{s=1}^N\delta_a^s\delta_a^s/N^2=\Gamma_{aa}(0)/N=\operatorname{var}(a)/N=\sigma_a^2$$ +1484 in the absence of autocorrelation. +1485 The error covariance is estimated by calculating the correlation matrix assuming no autocorrelation and then rescaling the correlation matrix by the full errors including the previous gamma method estimate for the autocorrelation of the observables. The covariance at windowsize 0 is guaranteed to be positive semi-definite +1486 $$\sum_{i,j}v_i\Gamma_{ij}(0)v_j=\frac{1}{N}\sum_{s=1}^N\sum_{i,j}v_i\delta_i^s\delta_j^s v_j=\frac{1}{N}\sum_{s=1}^N\sum_{i}|v_i\delta_i^s|^2\geq 0\,,$$ for every $v\in\mathbb{R}^M$, while such an identity does not hold for larger windows/lags. +1487 For observables defined on a single ensemble our approximation is equivalent to assuming that the integrated autocorrelation time of an off-diagonal element is equal to the geometric mean of the integrated autocorrelation times of the corresponding diagonal elements. +1488 $$\tau_{\mathrm{int}, ij}=\sqrt{\tau_{\mathrm{int}, i}\times \tau_{\mathrm{int}, j}}$$ +1489 This construction ensures that the estimated covariance matrix is positive semi-definite (up to numerical rounding errors). +1490 ''' +1491 +1492 length = len(obs) +1493 +1494 max_samples = np.max([o.N for o in obs]) +1495 if max_samples <= length and not [item for sublist in [o.cov_names for o in obs] for item in sublist]: +1496 warnings.warn(f"The dimension of the covariance matrix ({length}) is larger or equal to the number of samples ({max_samples}). This will result in a rank deficient matrix.", RuntimeWarning) +1497 +1498 cov = np.zeros((length, length)) +1499 for i in range(length): +1500 for j in range(i, length): +1501 cov[i, j] = _covariance_element(obs[i], obs[j]) +1502 cov = cov + cov.T - np.diag(np.diag(cov)) +1503 +1504 corr = np.diag(1 / np.sqrt(np.diag(cov))) @ cov @ np.diag(1 / np.sqrt(np.diag(cov))) +1505 +1506 if isinstance(smooth, int): +1507 corr = _smooth_eigenvalues(corr, smooth) 1508 -1509 return cov -1510 -1511 -1512def _smooth_eigenvalues(corr, E): -1513 """Eigenvalue smoothing as described in hep-lat/9412087 +1509 if visualize: +1510 plt.matshow(corr, vmin=-1, vmax=1) +1511 plt.set_cmap('RdBu') +1512 plt.colorbar() +1513 plt.draw() 1514 -1515 corr : np.ndarray -1516 correlation matrix -1517 E : integer -1518 Number of eigenvalues to be left substantially unchanged -1519 """ -1520 if not (2 < E < corr.shape[0] - 1): -1521 raise Exception(f"'E' has to be between 2 and the dimension of the correlation matrix minus 1 ({corr.shape[0] - 1}).") -1522 vals, vec = np.linalg.eigh(corr) -1523 lambda_min = np.mean(vals[:-E]) -1524 vals[vals < lambda_min] = lambda_min -1525 vals /= np.mean(vals) -1526 return vec @ np.diag(vals) @ vec.T +1515 if correlation is True: +1516 return corr +1517 +1518 errors = [o.dvalue for o in obs] +1519 cov = np.diag(errors) @ corr @ np.diag(errors) +1520 +1521 eigenvalues = np.linalg.eigh(cov)[0] +1522 if not np.all(eigenvalues >= 0): +1523 warnings.warn("Covariance matrix is not positive semi-definite (Eigenvalues: " + str(eigenvalues) + ")", RuntimeWarning) +1524 +1525 return cov +1526 1527 -1528 -1529def _covariance_element(obs1, obs2): -1530 """Estimates the covariance of two Obs objects, neglecting autocorrelations.""" -1531 -1532 def calc_gamma(deltas1, deltas2, idx1, idx2, new_idx): -1533 deltas1 = _collapse_deltas_for_merge(deltas1, idx1, len(idx1), new_idx) -1534 deltas2 = _collapse_deltas_for_merge(deltas2, idx2, len(idx2), new_idx) -1535 return np.sum(deltas1 * deltas2) -1536 -1537 if set(obs1.names).isdisjoint(set(obs2.names)): -1538 return 0.0 -1539 -1540 if not hasattr(obs1, 'e_dvalue') or not hasattr(obs2, 'e_dvalue'): -1541 raise Exception('The gamma method has to be applied to both Obs first.') -1542 -1543 dvalue = 0.0 +1528def _smooth_eigenvalues(corr, E): +1529 """Eigenvalue smoothing as described in hep-lat/9412087 +1530 +1531 corr : np.ndarray +1532 correlation matrix +1533 E : integer +1534 Number of eigenvalues to be left substantially unchanged +1535 """ +1536 if not (2 < E < corr.shape[0] - 1): +1537 raise Exception(f"'E' has to be between 2 and the dimension of the correlation matrix minus 1 ({corr.shape[0] - 1}).") +1538 vals, vec = np.linalg.eigh(corr) +1539 lambda_min = np.mean(vals[:-E]) +1540 vals[vals < lambda_min] = lambda_min +1541 vals /= np.mean(vals) +1542 return vec @ np.diag(vals) @ vec.T +1543 1544 -1545 for e_name in obs1.mc_names: -1546 -1547 if e_name not in obs2.mc_names: -1548 continue -1549 -1550 idl_d = {} -1551 for r_name in obs1.e_content[e_name]: -1552 if r_name not in obs2.e_content[e_name]: -1553 continue -1554 idl_d[r_name] = _intersection_idx([obs1.idl[r_name], obs2.idl[r_name]]) +1545def _covariance_element(obs1, obs2): +1546 """Estimates the covariance of two Obs objects, neglecting autocorrelations.""" +1547 +1548 def calc_gamma(deltas1, deltas2, idx1, idx2, new_idx): +1549 deltas1 = _collapse_deltas_for_merge(deltas1, idx1, len(idx1), new_idx) +1550 deltas2 = _collapse_deltas_for_merge(deltas2, idx2, len(idx2), new_idx) +1551 return np.sum(deltas1 * deltas2) +1552 +1553 if set(obs1.names).isdisjoint(set(obs2.names)): +1554 return 0.0 1555 -1556 gamma = 0.0 -1557 -1558 for r_name in obs1.e_content[e_name]: -1559 if r_name not in obs2.e_content[e_name]: -1560 continue -1561 if len(idl_d[r_name]) == 0: -1562 continue -1563 gamma += calc_gamma(obs1.deltas[r_name], obs2.deltas[r_name], obs1.idl[r_name], obs2.idl[r_name], idl_d[r_name]) -1564 -1565 if gamma == 0.0: -1566 continue -1567 -1568 gamma_div = 0.0 -1569 for r_name in obs1.e_content[e_name]: -1570 if r_name not in obs2.e_content[e_name]: -1571 continue -1572 if len(idl_d[r_name]) == 0: -1573 continue -1574 gamma_div += np.sqrt(calc_gamma(obs1.deltas[r_name], obs1.deltas[r_name], obs1.idl[r_name], obs1.idl[r_name], idl_d[r_name]) * calc_gamma(obs2.deltas[r_name], obs2.deltas[r_name], obs2.idl[r_name], obs2.idl[r_name], idl_d[r_name])) -1575 gamma /= gamma_div -1576 -1577 dvalue += gamma -1578 -1579 for e_name in obs1.cov_names: +1556 if not hasattr(obs1, 'e_dvalue') or not hasattr(obs2, 'e_dvalue'): +1557 raise Exception('The gamma method has to be applied to both Obs first.') +1558 +1559 dvalue = 0.0 +1560 +1561 for e_name in obs1.mc_names: +1562 +1563 if e_name not in obs2.mc_names: +1564 continue +1565 +1566 idl_d = {} +1567 for r_name in obs1.e_content[e_name]: +1568 if r_name not in obs2.e_content[e_name]: +1569 continue +1570 idl_d[r_name] = _intersection_idx([obs1.idl[r_name], obs2.idl[r_name]]) +1571 +1572 gamma = 0.0 +1573 +1574 for r_name in obs1.e_content[e_name]: +1575 if r_name not in obs2.e_content[e_name]: +1576 continue +1577 if len(idl_d[r_name]) == 0: +1578 continue +1579 gamma += calc_gamma(obs1.deltas[r_name], obs2.deltas[r_name], obs1.idl[r_name], obs2.idl[r_name], idl_d[r_name]) 1580 -1581 if e_name not in obs2.cov_names: +1581 if gamma == 0.0: 1582 continue 1583 -1584 dvalue += float(np.dot(np.transpose(obs1.covobs[e_name].grad), np.dot(obs1.covobs[e_name].cov, obs2.covobs[e_name].grad))) -1585 -1586 return dvalue -1587 -1588 -1589def import_jackknife(jacks, name, idl=None): -1590 """Imports jackknife samples and returns an Obs -1591 -1592 Parameters -1593 ---------- -1594 jacks : numpy.ndarray -1595 numpy array containing the mean value as zeroth entry and -1596 the N jackknife samples as first to Nth entry. -1597 name : str -1598 name of the ensemble the samples are defined on. -1599 """ -1600 length = len(jacks) - 1 -1601 prj = (np.ones((length, length)) - (length - 1) * np.identity(length)) -1602 samples = jacks[1:] @ prj -1603 mean = np.mean(samples) -1604 new_obs = Obs([samples - mean], [name], idl=idl, means=[mean]) -1605 new_obs._value = jacks[0] -1606 return new_obs +1584 gamma_div = 0.0 +1585 for r_name in obs1.e_content[e_name]: +1586 if r_name not in obs2.e_content[e_name]: +1587 continue +1588 if len(idl_d[r_name]) == 0: +1589 continue +1590 gamma_div += np.sqrt(calc_gamma(obs1.deltas[r_name], obs1.deltas[r_name], obs1.idl[r_name], obs1.idl[r_name], idl_d[r_name]) * calc_gamma(obs2.deltas[r_name], obs2.deltas[r_name], obs2.idl[r_name], obs2.idl[r_name], idl_d[r_name])) +1591 gamma /= gamma_div +1592 +1593 dvalue += gamma +1594 +1595 for e_name in obs1.cov_names: +1596 +1597 if e_name not in obs2.cov_names: +1598 continue +1599 +1600 dvalue += float(np.dot(np.transpose(obs1.covobs[e_name].grad), np.dot(obs1.covobs[e_name].cov, obs2.covobs[e_name].grad))) +1601 +1602 return dvalue +1603 +1604 +1605def import_jackknife(jacks, name, idl=None): +1606 """Imports jackknife samples and returns an Obs 1607 -1608 -1609def merge_obs(list_of_obs): -1610 """Combine all observables in list_of_obs into one new observable -1611 -1612 Parameters -1613 ---------- -1614 list_of_obs : list -1615 list of the Obs object to be combined -1616 -1617 Notes -1618 ----- -1619 It is not possible to combine obs which are based on the same replicum -1620 """ -1621 replist = [item for obs in list_of_obs for item in obs.names] -1622 if (len(replist) == len(set(replist))) is False: -1623 raise Exception('list_of_obs contains duplicate replica: %s' % (str(replist))) -1624 if any([len(o.cov_names) for o in list_of_obs]): -1625 raise Exception('Not possible to merge data that contains covobs!') -1626 new_dict = {} -1627 idl_dict = {} -1628 for o in list_of_obs: -1629 new_dict.update({key: o.deltas.get(key, 0) + o.r_values.get(key, 0) -1630 for key in set(o.deltas) | set(o.r_values)}) -1631 idl_dict.update({key: o.idl.get(key, 0) for key in set(o.deltas)}) +1608 Parameters +1609 ---------- +1610 jacks : numpy.ndarray +1611 numpy array containing the mean value as zeroth entry and +1612 the N jackknife samples as first to Nth entry. +1613 name : str +1614 name of the ensemble the samples are defined on. +1615 """ +1616 length = len(jacks) - 1 +1617 prj = (np.ones((length, length)) - (length - 1) * np.identity(length)) +1618 samples = jacks[1:] @ prj +1619 mean = np.mean(samples) +1620 new_obs = Obs([samples - mean], [name], idl=idl, means=[mean]) +1621 new_obs._value = jacks[0] +1622 return new_obs +1623 +1624 +1625def merge_obs(list_of_obs): +1626 """Combine all observables in list_of_obs into one new observable +1627 +1628 Parameters +1629 ---------- +1630 list_of_obs : list +1631 list of the Obs object to be combined 1632 -1633 names = sorted(new_dict.keys()) -1634 o = Obs([new_dict[name] for name in names], names, idl=[idl_dict[name] for name in names]) -1635 o.is_merged = {name: np.any([oi.is_merged.get(name, False) for oi in list_of_obs]) for name in o.names} -1636 o.reweighted = np.max([oi.reweighted for oi in list_of_obs]) -1637 return o -1638 -1639 -1640def cov_Obs(means, cov, name, grad=None): -1641 """Create an Obs based on mean(s) and a covariance matrix -1642 -1643 Parameters -1644 ---------- -1645 mean : list of floats or float -1646 N mean value(s) of the new Obs -1647 cov : list or array -1648 2d (NxN) Covariance matrix, 1d diagonal entries or 0d covariance -1649 name : str -1650 identifier for the covariance matrix -1651 grad : list or array -1652 Gradient of the Covobs wrt. the means belonging to cov. -1653 """ +1633 Notes +1634 ----- +1635 It is not possible to combine obs which are based on the same replicum +1636 """ +1637 replist = [item for obs in list_of_obs for item in obs.names] +1638 if (len(replist) == len(set(replist))) is False: +1639 raise Exception('list_of_obs contains duplicate replica: %s' % (str(replist))) +1640 if any([len(o.cov_names) for o in list_of_obs]): +1641 raise Exception('Not possible to merge data that contains covobs!') +1642 new_dict = {} +1643 idl_dict = {} +1644 for o in list_of_obs: +1645 new_dict.update({key: o.deltas.get(key, 0) + o.r_values.get(key, 0) +1646 for key in set(o.deltas) | set(o.r_values)}) +1647 idl_dict.update({key: o.idl.get(key, 0) for key in set(o.deltas)}) +1648 +1649 names = sorted(new_dict.keys()) +1650 o = Obs([new_dict[name] for name in names], names, idl=[idl_dict[name] for name in names]) +1651 o.is_merged = {name: np.any([oi.is_merged.get(name, False) for oi in list_of_obs]) for name in o.names} +1652 o.reweighted = np.max([oi.reweighted for oi in list_of_obs]) +1653 return o 1654 -1655 def covobs_to_obs(co): -1656 """Make an Obs out of a Covobs -1657 -1658 Parameters -1659 ---------- -1660 co : Covobs -1661 Covobs to be embedded into the Obs -1662 """ -1663 o = Obs([], [], means=[]) -1664 o._value = co.value -1665 o.names.append(co.name) -1666 o._covobs[co.name] = co -1667 o._dvalue = np.sqrt(co.errsq()) -1668 return o -1669 -1670 ol = [] -1671 if isinstance(means, (float, int)): -1672 means = [means] +1655 +1656def cov_Obs(means, cov, name, grad=None): +1657 """Create an Obs based on mean(s) and a covariance matrix +1658 +1659 Parameters +1660 ---------- +1661 mean : list of floats or float +1662 N mean value(s) of the new Obs +1663 cov : list or array +1664 2d (NxN) Covariance matrix, 1d diagonal entries or 0d covariance +1665 name : str +1666 identifier for the covariance matrix +1667 grad : list or array +1668 Gradient of the Covobs wrt. the means belonging to cov. +1669 """ +1670 +1671 def covobs_to_obs(co): +1672 """Make an Obs out of a Covobs 1673 -1674 for i in range(len(means)): -1675 ol.append(covobs_to_obs(Covobs(means[i], cov, name, pos=i, grad=grad))) -1676 if ol[0].covobs[name].N != len(means): -1677 raise Exception('You have to provide %d mean values!' % (ol[0].N)) -1678 if len(ol) == 1: -1679 return ol[0] -1680 return ol +1674 Parameters +1675 ---------- +1676 co : Covobs +1677 Covobs to be embedded into the Obs +1678 """ +1679 o = Obs([], [], means=[]) +1680 o._value = co.value +1681 o.names.append(co.name) +1682 o._covobs[co.name] = co +1683 o._dvalue = np.sqrt(co.errsq()) +1684 return o +1685 +1686 ol = [] +1687 if isinstance(means, (float, int)): +1688 means = [means] +1689 +1690 for i in range(len(means)): +1691 ol.append(covobs_to_obs(Covobs(means[i], cov, name, pos=i, grad=grad))) +1692 if ol[0].covobs[name].N != len(means): +1693 raise Exception('You have to provide %d mean values!' % (ol[0].N)) +1694 if len(ol) == 1: +1695 return ol[0] +1696 return ol @@ -2278,466 +2294,471 @@ 400 print('Result\t %3.8e +/- %3.8e +/- %3.8e (%3.3f%%)' % (self.value, self._dvalue, self.ddvalue, percentage)) 401 if len(self.e_names) > 1: 402 print(' Ensemble errors:') -403 for e_name in self.mc_names: -404 if len(self.e_names) > 1: -405 print('', e_name, '\t %3.8e +/- %3.8e' % (self.e_dvalue[e_name], self.e_ddvalue[e_name])) -406 if self.tau_exp[e_name] > 0: -407 print(' t_int\t %3.8e +/- %3.8e tau_exp = %3.2f, N_sigma = %1.0i' % (self.e_tauint[e_name], self.e_dtauint[e_name], self.tau_exp[e_name], self.N_sigma[e_name])) -408 else: -409 print(' t_int\t %3.8e +/- %3.8e S = %3.2f' % (self.e_tauint[e_name], self.e_dtauint[e_name], self.S[e_name])) -410 for e_name in self.cov_names: -411 print('', e_name, '\t %3.8e' % (self.e_dvalue[e_name])) -412 if ens_content is True: -413 if len(self.e_names) == 1: -414 print(self.N, 'samples in', len(self.e_names), 'ensemble:') -415 else: -416 print(self.N, 'samples in', len(self.e_names), 'ensembles:') -417 my_string_list = [] -418 for key, value in sorted(self.e_content.items()): -419 if key not in self.covobs: -420 my_string = ' ' + "\u00B7 Ensemble '" + key + "' " -421 if len(value) == 1: -422 my_string += f': {self.shape[value[0]]} configurations' -423 if isinstance(self.idl[value[0]], range): -424 my_string += f' (from {self.idl[value[0]].start} to {self.idl[value[0]][-1]}' + int(self.idl[value[0]].step != 1) * f' in steps of {self.idl[value[0]].step}' + ')' -425 else: -426 my_string += ' (irregular range)' -427 else: -428 sublist = [] -429 for v in value: -430 my_substring = ' ' + "\u00B7 Replicum '" + v[len(key) + 1:] + "' " -431 my_substring += f': {self.shape[v]} configurations' -432 if isinstance(self.idl[v], range): -433 my_substring += f' (from {self.idl[v].start} to {self.idl[v][-1]}' + int(self.idl[v].step != 1) * f' in steps of {self.idl[v].step}' + ')' -434 else: -435 my_substring += ' (irregular range)' -436 sublist.append(my_substring) -437 -438 my_string += '\n' + '\n'.join(sublist) -439 else: -440 my_string = ' ' + "\u00B7 Covobs '" + key + "' " -441 my_string_list.append(my_string) -442 print('\n'.join(my_string_list)) -443 -444 def reweight(self, weight): -445 """Reweight the obs with given rewighting factors. -446 -447 Parameters -448 ---------- -449 weight : Obs -450 Reweighting factor. An Observable that has to be defined on a superset of the -451 configurations in obs[i].idl for all i. -452 all_configs : bool -453 if True, the reweighted observables are normalized by the average of -454 the reweighting factor on all configurations in weight.idl and not -455 on the configurations in obs[i].idl. Default False. -456 """ -457 return reweight(weight, [self])[0] -458 -459 def is_zero_within_error(self, sigma=1): -460 """Checks whether the observable is zero within 'sigma' standard errors. -461 -462 Parameters -463 ---------- -464 sigma : int -465 Number of standard errors used for the check. -466 -467 Works only properly when the gamma method was run. -468 """ -469 return self.is_zero() or np.abs(self.value) <= sigma * self._dvalue -470 -471 def is_zero(self, atol=1e-10): -472 """Checks whether the observable is zero within a given tolerance. -473 -474 Parameters -475 ---------- -476 atol : float -477 Absolute tolerance (for details see numpy documentation). -478 """ -479 return np.isclose(0.0, self.value, 1e-14, atol) and all(np.allclose(0.0, delta, 1e-14, atol) for delta in self.deltas.values()) and all(np.allclose(0.0, delta.errsq(), 1e-14, atol) for delta in self.covobs.values()) -480 -481 def plot_tauint(self, save=None): -482 """Plot integrated autocorrelation time for each ensemble. -483 -484 Parameters -485 ---------- -486 save : str -487 saves the figure to a file named 'save' if. -488 """ -489 if not hasattr(self, 'e_dvalue'): -490 raise Exception('Run the gamma method first.') +403 e_content = self.e_content +404 for e_name in self.mc_names: +405 if isinstance(self.idl[e_content[e_name][0]], range): +406 gap = self.idl[e_content[e_name][0]].step +407 else: +408 gap = np.min(np.diff(self.idl[e_content[e_name][0]])) +409 +410 if len(self.e_names) > 1: +411 print('', e_name, '\t %3.6e +/- %3.6e' % (self.e_dvalue[e_name], self.e_ddvalue[e_name])) +412 tau_string = " \N{GREEK SMALL LETTER TAU}_int\t " + _format_uncertainty(self.e_tauint[e_name], self.e_dtauint[e_name]) +413 tau_string += f" in units of {gap} config" +414 if gap > 1: +415 tau_string += "s" +416 if self.tau_exp[e_name] > 0: +417 tau_string = f"{tau_string: <45}" + '\t(\N{GREEK SMALL LETTER TAU}_exp=%3.2f, N_\N{GREEK SMALL LETTER SIGMA}=%1.0i)' % (self.tau_exp[e_name], self.N_sigma[e_name]) +418 else: +419 tau_string = f"{tau_string: <45}" + '\t(S=%3.2f)' % (self.S[e_name]) +420 print(tau_string) +421 for e_name in self.cov_names: +422 print('', e_name, '\t %3.8e' % (self.e_dvalue[e_name])) +423 if ens_content is True: +424 if len(self.e_names) == 1: +425 print(self.N, 'samples in', len(self.e_names), 'ensemble:') +426 else: +427 print(self.N, 'samples in', len(self.e_names), 'ensembles:') +428 my_string_list = [] +429 for key, value in sorted(self.e_content.items()): +430 if key not in self.covobs: +431 my_string = ' ' + "\u00B7 Ensemble '" + key + "' " +432 if len(value) == 1: +433 my_string += f': {self.shape[value[0]]} configurations' +434 if isinstance(self.idl[value[0]], range): +435 my_string += f' (from {self.idl[value[0]].start} to {self.idl[value[0]][-1]}' + int(self.idl[value[0]].step != 1) * f' in steps of {self.idl[value[0]].step}' + ')' +436 else: +437 my_string += f' (irregular range from {self.idl[value[0]][0]} to {self.idl[value[0]][-1]})' +438 else: +439 sublist = [] +440 for v in value: +441 my_substring = ' ' + "\u00B7 Replicum '" + v[len(key) + 1:] + "' " +442 my_substring += f': {self.shape[v]} configurations' +443 if isinstance(self.idl[v], range): +444 my_substring += f' (from {self.idl[v].start} to {self.idl[v][-1]}' + int(self.idl[v].step != 1) * f' in steps of {self.idl[v].step}' + ')' +445 else: +446 my_substring += f' (irregular range from {self.idl[v][0]} to {self.idl[v][-1]})' +447 sublist.append(my_substring) +448 +449 my_string += '\n' + '\n'.join(sublist) +450 else: +451 my_string = ' ' + "\u00B7 Covobs '" + key + "' " +452 my_string_list.append(my_string) +453 print('\n'.join(my_string_list)) +454 +455 def reweight(self, weight): +456 """Reweight the obs with given rewighting factors. +457 +458 Parameters +459 ---------- +460 weight : Obs +461 Reweighting factor. An Observable that has to be defined on a superset of the +462 configurations in obs[i].idl for all i. +463 all_configs : bool +464 if True, the reweighted observables are normalized by the average of +465 the reweighting factor on all configurations in weight.idl and not +466 on the configurations in obs[i].idl. Default False. +467 """ +468 return reweight(weight, [self])[0] +469 +470 def is_zero_within_error(self, sigma=1): +471 """Checks whether the observable is zero within 'sigma' standard errors. +472 +473 Parameters +474 ---------- +475 sigma : int +476 Number of standard errors used for the check. +477 +478 Works only properly when the gamma method was run. +479 """ +480 return self.is_zero() or np.abs(self.value) <= sigma * self._dvalue +481 +482 def is_zero(self, atol=1e-10): +483 """Checks whether the observable is zero within a given tolerance. +484 +485 Parameters +486 ---------- +487 atol : float +488 Absolute tolerance (for details see numpy documentation). +489 """ +490 return np.isclose(0.0, self.value, 1e-14, atol) and all(np.allclose(0.0, delta, 1e-14, atol) for delta in self.deltas.values()) and all(np.allclose(0.0, delta.errsq(), 1e-14, atol) for delta in self.covobs.values()) 491 -492 for e, e_name in enumerate(self.mc_names): -493 fig = plt.figure() -494 plt.xlabel(r'$W$') -495 plt.ylabel(r'$\tau_\mathrm{int}$') -496 length = int(len(self.e_n_tauint[e_name])) -497 if self.tau_exp[e_name] > 0: -498 base = self.e_n_tauint[e_name][self.e_windowsize[e_name]] -499 x_help = np.arange(2 * self.tau_exp[e_name]) -500 y_help = (x_help + 1) * np.abs(self.e_rho[e_name][self.e_windowsize[e_name] + 1]) * (1 - x_help / (2 * (2 * self.tau_exp[e_name] - 1))) + base -501 x_arr = np.arange(self.e_windowsize[e_name] + 1, self.e_windowsize[e_name] + 1 + 2 * self.tau_exp[e_name]) -502 plt.plot(x_arr, y_help, 'C' + str(e), linewidth=1, ls='--', marker=',') -503 plt.errorbar([self.e_windowsize[e_name] + 2 * self.tau_exp[e_name]], [self.e_tauint[e_name]], -504 yerr=[self.e_dtauint[e_name]], fmt='C' + str(e), linewidth=1, capsize=2, marker='o', mfc=plt.rcParams['axes.facecolor']) -505 xmax = self.e_windowsize[e_name] + 2 * self.tau_exp[e_name] + 1.5 -506 label = e_name + r', $\tau_\mathrm{exp}$=' + str(np.around(self.tau_exp[e_name], decimals=2)) -507 else: -508 label = e_name + ', S=' + str(np.around(self.S[e_name], decimals=2)) -509 xmax = max(10.5, 2 * self.e_windowsize[e_name] - 0.5) -510 -511 plt.errorbar(np.arange(length)[:int(xmax) + 1], self.e_n_tauint[e_name][:int(xmax) + 1], yerr=self.e_n_dtauint[e_name][:int(xmax) + 1], linewidth=1, capsize=2, label=label) -512 plt.axvline(x=self.e_windowsize[e_name], color='C' + str(e), alpha=0.5, marker=',', ls='--') -513 plt.legend() -514 plt.xlim(-0.5, xmax) -515 ylim = plt.ylim() -516 plt.ylim(bottom=0.0, top=max(1.0, ylim[1])) -517 plt.draw() -518 if save: -519 fig.savefig(save + "_" + str(e)) -520 -521 def plot_rho(self, save=None): -522 """Plot normalized autocorrelation function time for each ensemble. -523 -524 Parameters -525 ---------- -526 save : str -527 saves the figure to a file named 'save' if. -528 """ -529 if not hasattr(self, 'e_dvalue'): -530 raise Exception('Run the gamma method first.') -531 for e, e_name in enumerate(self.mc_names): -532 fig = plt.figure() -533 plt.xlabel('W') -534 plt.ylabel('rho') -535 length = int(len(self.e_drho[e_name])) -536 plt.errorbar(np.arange(length), self.e_rho[e_name][:length], yerr=self.e_drho[e_name][:], linewidth=1, capsize=2) -537 plt.axvline(x=self.e_windowsize[e_name], color='r', alpha=0.25, ls='--', marker=',') -538 if self.tau_exp[e_name] > 0: -539 plt.plot([self.e_windowsize[e_name] + 1, self.e_windowsize[e_name] + 1 + 2 * self.tau_exp[e_name]], -540 [self.e_rho[e_name][self.e_windowsize[e_name] + 1], 0], 'k-', lw=1) -541 xmax = self.e_windowsize[e_name] + 2 * self.tau_exp[e_name] + 1.5 -542 plt.title('Rho ' + e_name + r', tau\_exp=' + str(np.around(self.tau_exp[e_name], decimals=2))) -543 else: -544 xmax = max(10.5, 2 * self.e_windowsize[e_name] - 0.5) -545 plt.title('Rho ' + e_name + ', S=' + str(np.around(self.S[e_name], decimals=2))) -546 plt.plot([-0.5, xmax], [0, 0], 'k--', lw=1) -547 plt.xlim(-0.5, xmax) -548 plt.draw() -549 if save: -550 fig.savefig(save + "_" + str(e)) -551 -552 def plot_rep_dist(self): -553 """Plot replica distribution for each ensemble with more than one replicum.""" -554 if not hasattr(self, 'e_dvalue'): -555 raise Exception('Run the gamma method first.') -556 for e, e_name in enumerate(self.mc_names): -557 if len(self.e_content[e_name]) == 1: -558 print('No replica distribution for a single replicum (', e_name, ')') -559 continue -560 r_length = [] -561 sub_r_mean = 0 -562 for r, r_name in enumerate(self.e_content[e_name]): -563 r_length.append(len(self.deltas[r_name])) -564 sub_r_mean += self.shape[r_name] * self.r_values[r_name] -565 e_N = np.sum(r_length) -566 sub_r_mean /= e_N -567 arr = np.zeros(len(self.e_content[e_name])) -568 for r, r_name in enumerate(self.e_content[e_name]): -569 arr[r] = (self.r_values[r_name] - sub_r_mean) / (self.e_dvalue[e_name] * np.sqrt(e_N / self.shape[r_name] - 1)) -570 plt.hist(arr, rwidth=0.8, bins=len(self.e_content[e_name])) -571 plt.title('Replica distribution' + e_name + ' (mean=0, var=1)') -572 plt.draw() -573 -574 def plot_history(self, expand=True): -575 """Plot derived Monte Carlo history for each ensemble -576 -577 Parameters -578 ---------- -579 expand : bool -580 show expanded history for irregular Monte Carlo chains (default: True). -581 """ -582 for e, e_name in enumerate(self.mc_names): -583 plt.figure() -584 r_length = [] -585 tmp = [] -586 tmp_expanded = [] -587 for r, r_name in enumerate(self.e_content[e_name]): -588 tmp.append(self.deltas[r_name] + self.r_values[r_name]) -589 if expand: -590 tmp_expanded.append(_expand_deltas(self.deltas[r_name], list(self.idl[r_name]), self.shape[r_name]) + self.r_values[r_name]) -591 r_length.append(len(tmp_expanded[-1])) -592 else: -593 r_length.append(len(tmp[-1])) -594 e_N = np.sum(r_length) -595 x = np.arange(e_N) -596 y_test = np.concatenate(tmp, axis=0) -597 if expand: -598 y = np.concatenate(tmp_expanded, axis=0) -599 else: -600 y = y_test -601 plt.errorbar(x, y, fmt='.', markersize=3) -602 plt.xlim(-0.5, e_N - 0.5) -603 plt.title(e_name + f'\nskew: {skew(y_test):.3f} (p={skewtest(y_test).pvalue:.3f}), kurtosis: {kurtosis(y_test):.3f} (p={kurtosistest(y_test).pvalue:.3f})') -604 plt.draw() -605 -606 def plot_piechart(self, save=None): -607 """Plot piechart which shows the fractional contribution of each -608 ensemble to the error and returns a dictionary containing the fractions. -609 -610 Parameters -611 ---------- -612 save : str -613 saves the figure to a file named 'save' if. -614 """ -615 if not hasattr(self, 'e_dvalue'): -616 raise Exception('Run the gamma method first.') -617 if np.isclose(0.0, self._dvalue, atol=1e-15): -618 raise Exception('Error is 0.0') -619 labels = self.e_names -620 sizes = [self.e_dvalue[name] ** 2 for name in labels] / self._dvalue ** 2 -621 fig1, ax1 = plt.subplots() -622 ax1.pie(sizes, labels=labels, startangle=90, normalize=True) -623 ax1.axis('equal') -624 plt.draw() -625 if save: -626 fig1.savefig(save) -627 -628 return dict(zip(self.e_names, sizes)) -629 -630 def dump(self, filename, datatype="json.gz", description="", **kwargs): -631 """Dump the Obs to a file 'name' of chosen format. -632 -633 Parameters -634 ---------- -635 filename : str -636 name of the file to be saved. -637 datatype : str -638 Format of the exported file. Supported formats include -639 "json.gz" and "pickle" -640 description : str -641 Description for output file, only relevant for json.gz format. -642 path : str -643 specifies a custom path for the file (default '.') -644 """ -645 if 'path' in kwargs: -646 file_name = kwargs.get('path') + '/' + filename -647 else: -648 file_name = filename -649 -650 if datatype == "json.gz": -651 from .input.json import dump_to_json -652 dump_to_json([self], file_name, description=description) -653 elif datatype == "pickle": -654 with open(file_name + '.p', 'wb') as fb: -655 pickle.dump(self, fb) -656 else: -657 raise Exception("Unknown datatype " + str(datatype)) -658 -659 def export_jackknife(self): -660 """Export jackknife samples from the Obs -661 -662 Returns -663 ------- -664 numpy.ndarray -665 Returns a numpy array of length N + 1 where N is the number of samples -666 for the given ensemble and replicum. The zeroth entry of the array contains -667 the mean value of the Obs, entries 1 to N contain the N jackknife samples -668 derived from the Obs. The current implementation only works for observables -669 defined on exactly one ensemble and replicum. The derived jackknife samples -670 should agree with samples from a full jackknife analysis up to O(1/N). -671 """ +492 def plot_tauint(self, save=None): +493 """Plot integrated autocorrelation time for each ensemble. +494 +495 Parameters +496 ---------- +497 save : str +498 saves the figure to a file named 'save' if. +499 """ +500 if not hasattr(self, 'e_dvalue'): +501 raise Exception('Run the gamma method first.') +502 +503 for e, e_name in enumerate(self.mc_names): +504 fig = plt.figure() +505 plt.xlabel(r'$W$') +506 plt.ylabel(r'$\tau_\mathrm{int}$') +507 length = int(len(self.e_n_tauint[e_name])) +508 if self.tau_exp[e_name] > 0: +509 base = self.e_n_tauint[e_name][self.e_windowsize[e_name]] +510 x_help = np.arange(2 * self.tau_exp[e_name]) +511 y_help = (x_help + 1) * np.abs(self.e_rho[e_name][self.e_windowsize[e_name] + 1]) * (1 - x_help / (2 * (2 * self.tau_exp[e_name] - 1))) + base +512 x_arr = np.arange(self.e_windowsize[e_name] + 1, self.e_windowsize[e_name] + 1 + 2 * self.tau_exp[e_name]) +513 plt.plot(x_arr, y_help, 'C' + str(e), linewidth=1, ls='--', marker=',') +514 plt.errorbar([self.e_windowsize[e_name] + 2 * self.tau_exp[e_name]], [self.e_tauint[e_name]], +515 yerr=[self.e_dtauint[e_name]], fmt='C' + str(e), linewidth=1, capsize=2, marker='o', mfc=plt.rcParams['axes.facecolor']) +516 xmax = self.e_windowsize[e_name] + 2 * self.tau_exp[e_name] + 1.5 +517 label = e_name + r', $\tau_\mathrm{exp}$=' + str(np.around(self.tau_exp[e_name], decimals=2)) +518 else: +519 label = e_name + ', S=' + str(np.around(self.S[e_name], decimals=2)) +520 xmax = max(10.5, 2 * self.e_windowsize[e_name] - 0.5) +521 +522 plt.errorbar(np.arange(length)[:int(xmax) + 1], self.e_n_tauint[e_name][:int(xmax) + 1], yerr=self.e_n_dtauint[e_name][:int(xmax) + 1], linewidth=1, capsize=2, label=label) +523 plt.axvline(x=self.e_windowsize[e_name], color='C' + str(e), alpha=0.5, marker=',', ls='--') +524 plt.legend() +525 plt.xlim(-0.5, xmax) +526 ylim = plt.ylim() +527 plt.ylim(bottom=0.0, top=max(1.0, ylim[1])) +528 plt.draw() +529 if save: +530 fig.savefig(save + "_" + str(e)) +531 +532 def plot_rho(self, save=None): +533 """Plot normalized autocorrelation function time for each ensemble. +534 +535 Parameters +536 ---------- +537 save : str +538 saves the figure to a file named 'save' if. +539 """ +540 if not hasattr(self, 'e_dvalue'): +541 raise Exception('Run the gamma method first.') +542 for e, e_name in enumerate(self.mc_names): +543 fig = plt.figure() +544 plt.xlabel('W') +545 plt.ylabel('rho') +546 length = int(len(self.e_drho[e_name])) +547 plt.errorbar(np.arange(length), self.e_rho[e_name][:length], yerr=self.e_drho[e_name][:], linewidth=1, capsize=2) +548 plt.axvline(x=self.e_windowsize[e_name], color='r', alpha=0.25, ls='--', marker=',') +549 if self.tau_exp[e_name] > 0: +550 plt.plot([self.e_windowsize[e_name] + 1, self.e_windowsize[e_name] + 1 + 2 * self.tau_exp[e_name]], +551 [self.e_rho[e_name][self.e_windowsize[e_name] + 1], 0], 'k-', lw=1) +552 xmax = self.e_windowsize[e_name] + 2 * self.tau_exp[e_name] + 1.5 +553 plt.title('Rho ' + e_name + r', tau\_exp=' + str(np.around(self.tau_exp[e_name], decimals=2))) +554 else: +555 xmax = max(10.5, 2 * self.e_windowsize[e_name] - 0.5) +556 plt.title('Rho ' + e_name + ', S=' + str(np.around(self.S[e_name], decimals=2))) +557 plt.plot([-0.5, xmax], [0, 0], 'k--', lw=1) +558 plt.xlim(-0.5, xmax) +559 plt.draw() +560 if save: +561 fig.savefig(save + "_" + str(e)) +562 +563 def plot_rep_dist(self): +564 """Plot replica distribution for each ensemble with more than one replicum.""" +565 if not hasattr(self, 'e_dvalue'): +566 raise Exception('Run the gamma method first.') +567 for e, e_name in enumerate(self.mc_names): +568 if len(self.e_content[e_name]) == 1: +569 print('No replica distribution for a single replicum (', e_name, ')') +570 continue +571 r_length = [] +572 sub_r_mean = 0 +573 for r, r_name in enumerate(self.e_content[e_name]): +574 r_length.append(len(self.deltas[r_name])) +575 sub_r_mean += self.shape[r_name] * self.r_values[r_name] +576 e_N = np.sum(r_length) +577 sub_r_mean /= e_N +578 arr = np.zeros(len(self.e_content[e_name])) +579 for r, r_name in enumerate(self.e_content[e_name]): +580 arr[r] = (self.r_values[r_name] - sub_r_mean) / (self.e_dvalue[e_name] * np.sqrt(e_N / self.shape[r_name] - 1)) +581 plt.hist(arr, rwidth=0.8, bins=len(self.e_content[e_name])) +582 plt.title('Replica distribution' + e_name + ' (mean=0, var=1)') +583 plt.draw() +584 +585 def plot_history(self, expand=True): +586 """Plot derived Monte Carlo history for each ensemble +587 +588 Parameters +589 ---------- +590 expand : bool +591 show expanded history for irregular Monte Carlo chains (default: True). +592 """ +593 for e, e_name in enumerate(self.mc_names): +594 plt.figure() +595 r_length = [] +596 tmp = [] +597 tmp_expanded = [] +598 for r, r_name in enumerate(self.e_content[e_name]): +599 tmp.append(self.deltas[r_name] + self.r_values[r_name]) +600 if expand: +601 tmp_expanded.append(_expand_deltas(self.deltas[r_name], list(self.idl[r_name]), self.shape[r_name]) + self.r_values[r_name]) +602 r_length.append(len(tmp_expanded[-1])) +603 else: +604 r_length.append(len(tmp[-1])) +605 e_N = np.sum(r_length) +606 x = np.arange(e_N) +607 y_test = np.concatenate(tmp, axis=0) +608 if expand: +609 y = np.concatenate(tmp_expanded, axis=0) +610 else: +611 y = y_test +612 plt.errorbar(x, y, fmt='.', markersize=3) +613 plt.xlim(-0.5, e_N - 0.5) +614 plt.title(e_name + f'\nskew: {skew(y_test):.3f} (p={skewtest(y_test).pvalue:.3f}), kurtosis: {kurtosis(y_test):.3f} (p={kurtosistest(y_test).pvalue:.3f})') +615 plt.draw() +616 +617 def plot_piechart(self, save=None): +618 """Plot piechart which shows the fractional contribution of each +619 ensemble to the error and returns a dictionary containing the fractions. +620 +621 Parameters +622 ---------- +623 save : str +624 saves the figure to a file named 'save' if. +625 """ +626 if not hasattr(self, 'e_dvalue'): +627 raise Exception('Run the gamma method first.') +628 if np.isclose(0.0, self._dvalue, atol=1e-15): +629 raise Exception('Error is 0.0') +630 labels = self.e_names +631 sizes = [self.e_dvalue[name] ** 2 for name in labels] / self._dvalue ** 2 +632 fig1, ax1 = plt.subplots() +633 ax1.pie(sizes, labels=labels, startangle=90, normalize=True) +634 ax1.axis('equal') +635 plt.draw() +636 if save: +637 fig1.savefig(save) +638 +639 return dict(zip(self.e_names, sizes)) +640 +641 def dump(self, filename, datatype="json.gz", description="", **kwargs): +642 """Dump the Obs to a file 'name' of chosen format. +643 +644 Parameters +645 ---------- +646 filename : str +647 name of the file to be saved. +648 datatype : str +649 Format of the exported file. Supported formats include +650 "json.gz" and "pickle" +651 description : str +652 Description for output file, only relevant for json.gz format. +653 path : str +654 specifies a custom path for the file (default '.') +655 """ +656 if 'path' in kwargs: +657 file_name = kwargs.get('path') + '/' + filename +658 else: +659 file_name = filename +660 +661 if datatype == "json.gz": +662 from .input.json import dump_to_json +663 dump_to_json([self], file_name, description=description) +664 elif datatype == "pickle": +665 with open(file_name + '.p', 'wb') as fb: +666 pickle.dump(self, fb) +667 else: +668 raise Exception("Unknown datatype " + str(datatype)) +669 +670 def export_jackknife(self): +671 """Export jackknife samples from the Obs 672 -673 if len(self.names) != 1: -674 raise Exception("'export_jackknife' is only implemented for Obs defined on one ensemble and replicum.") -675 -676 name = self.names[0] -677 full_data = self.deltas[name] + self.r_values[name] -678 n = full_data.size -679 mean = self.value -680 tmp_jacks = np.zeros(n + 1) -681 tmp_jacks[0] = mean -682 tmp_jacks[1:] = (n * mean - full_data) / (n - 1) -683 return tmp_jacks -684 -685 def __float__(self): -686 return float(self.value) -687 -688 def __repr__(self): -689 return 'Obs[' + str(self) + ']' -690 -691 def __str__(self): -692 if self._dvalue == 0.0: -693 return str(self.value) -694 fexp = np.floor(np.log10(self._dvalue)) -695 if fexp < 0.0: -696 return '{:{form}}({:2.0f})'.format(self.value, self._dvalue * 10 ** (-fexp + 1), form='.' + str(-int(fexp) + 1) + 'f') -697 elif fexp == 0.0: -698 return '{:.1f}({:1.1f})'.format(self.value, self._dvalue) -699 else: -700 return '{:.0f}({:2.0f})'.format(self.value, self._dvalue) +673 Returns +674 ------- +675 numpy.ndarray +676 Returns a numpy array of length N + 1 where N is the number of samples +677 for the given ensemble and replicum. The zeroth entry of the array contains +678 the mean value of the Obs, entries 1 to N contain the N jackknife samples +679 derived from the Obs. The current implementation only works for observables +680 defined on exactly one ensemble and replicum. The derived jackknife samples +681 should agree with samples from a full jackknife analysis up to O(1/N). +682 """ +683 +684 if len(self.names) != 1: +685 raise Exception("'export_jackknife' is only implemented for Obs defined on one ensemble and replicum.") +686 +687 name = self.names[0] +688 full_data = self.deltas[name] + self.r_values[name] +689 n = full_data.size +690 mean = self.value +691 tmp_jacks = np.zeros(n + 1) +692 tmp_jacks[0] = mean +693 tmp_jacks[1:] = (n * mean - full_data) / (n - 1) +694 return tmp_jacks +695 +696 def __float__(self): +697 return float(self.value) +698 +699 def __repr__(self): +700 return 'Obs[' + str(self) + ']' 701 -702 def __hash__(self): -703 hash_tuple = (np.array([self.value]).astype(np.float32).data.tobytes(),) -704 hash_tuple += tuple([o.astype(np.float32).data.tobytes() for o in self.deltas.values()]) -705 hash_tuple += tuple([np.array([o.errsq()]).astype(np.float32).data.tobytes() for o in self.covobs.values()]) -706 hash_tuple += tuple([o.encode() for o in self.names]) -707 m = hashlib.md5() -708 [m.update(o) for o in hash_tuple] -709 return int(m.hexdigest(), 16) & 0xFFFFFFFF -710 -711 # Overload comparisons -712 def __lt__(self, other): -713 return self.value < other -714 -715 def __le__(self, other): -716 return self.value <= other -717 -718 def __gt__(self, other): -719 return self.value > other -720 -721 def __ge__(self, other): -722 return self.value >= other -723 -724 def __eq__(self, other): -725 return (self - other).is_zero() -726 -727 def __ne__(self, other): -728 return not (self - other).is_zero() -729 -730 # Overload math operations -731 def __add__(self, y): -732 if isinstance(y, Obs): -733 return derived_observable(lambda x, **kwargs: x[0] + x[1], [self, y], man_grad=[1, 1]) -734 else: -735 if isinstance(y, np.ndarray): -736 return np.array([self + o for o in y]) -737 elif y.__class__.__name__ in ['Corr', 'CObs']: -738 return NotImplemented -739 else: -740 return derived_observable(lambda x, **kwargs: x[0] + y, [self], man_grad=[1]) -741 -742 def __radd__(self, y): -743 return self + y -744 -745 def __mul__(self, y): -746 if isinstance(y, Obs): -747 return derived_observable(lambda x, **kwargs: x[0] * x[1], [self, y], man_grad=[y.value, self.value]) -748 else: -749 if isinstance(y, np.ndarray): -750 return np.array([self * o for o in y]) -751 elif isinstance(y, complex): -752 return CObs(self * y.real, self * y.imag) -753 elif y.__class__.__name__ in ['Corr', 'CObs']: -754 return NotImplemented -755 else: -756 return derived_observable(lambda x, **kwargs: x[0] * y, [self], man_grad=[y]) -757 -758 def __rmul__(self, y): -759 return self * y -760 -761 def __sub__(self, y): -762 if isinstance(y, Obs): -763 return derived_observable(lambda x, **kwargs: x[0] - x[1], [self, y], man_grad=[1, -1]) -764 else: -765 if isinstance(y, np.ndarray): -766 return np.array([self - o for o in y]) -767 elif y.__class__.__name__ in ['Corr', 'CObs']: -768 return NotImplemented -769 else: -770 return derived_observable(lambda x, **kwargs: x[0] - y, [self], man_grad=[1]) -771 -772 def __rsub__(self, y): -773 return -1 * (self - y) -774 -775 def __pos__(self): -776 return self -777 -778 def __neg__(self): -779 return -1 * self -780 -781 def __truediv__(self, y): -782 if isinstance(y, Obs): -783 return derived_observable(lambda x, **kwargs: x[0] / x[1], [self, y], man_grad=[1 / y.value, - self.value / y.value ** 2]) -784 else: -785 if isinstance(y, np.ndarray): -786 return np.array([self / o for o in y]) -787 elif y.__class__.__name__ in ['Corr', 'CObs']: -788 return NotImplemented -789 else: -790 return derived_observable(lambda x, **kwargs: x[0] / y, [self], man_grad=[1 / y]) -791 -792 def __rtruediv__(self, y): -793 if isinstance(y, Obs): -794 return derived_observable(lambda x, **kwargs: x[0] / x[1], [y, self], man_grad=[1 / self.value, - y.value / self.value ** 2]) -795 else: -796 if isinstance(y, np.ndarray): -797 return np.array([o / self for o in y]) -798 elif y.__class__.__name__ in ['Corr', 'CObs']: -799 return NotImplemented -800 else: -801 return derived_observable(lambda x, **kwargs: y / x[0], [self], man_grad=[-y / self.value ** 2]) -802 -803 def __pow__(self, y): -804 if isinstance(y, Obs): -805 return derived_observable(lambda x: x[0] ** x[1], [self, y]) -806 else: -807 return derived_observable(lambda x: x[0] ** y, [self]) -808 -809 def __rpow__(self, y): -810 if isinstance(y, Obs): -811 return derived_observable(lambda x: x[0] ** x[1], [y, self]) -812 else: -813 return derived_observable(lambda x: y ** x[0], [self]) -814 -815 def __abs__(self): -816 return derived_observable(lambda x: anp.abs(x[0]), [self]) -817 -818 # Overload numpy functions -819 def sqrt(self): -820 return derived_observable(lambda x, **kwargs: np.sqrt(x[0]), [self], man_grad=[1 / 2 / np.sqrt(self.value)]) -821 -822 def log(self): -823 return derived_observable(lambda x, **kwargs: np.log(x[0]), [self], man_grad=[1 / self.value]) -824 -825 def exp(self): -826 return derived_observable(lambda x, **kwargs: np.exp(x[0]), [self], man_grad=[np.exp(self.value)]) -827 -828 def sin(self): -829 return derived_observable(lambda x, **kwargs: np.sin(x[0]), [self], man_grad=[np.cos(self.value)]) -830 -831 def cos(self): -832 return derived_observable(lambda x, **kwargs: np.cos(x[0]), [self], man_grad=[-np.sin(self.value)]) -833 -834 def tan(self): -835 return derived_observable(lambda x, **kwargs: np.tan(x[0]), [self], man_grad=[1 / np.cos(self.value) ** 2]) -836 -837 def arcsin(self): -838 return derived_observable(lambda x: anp.arcsin(x[0]), [self]) -839 -840 def arccos(self): -841 return derived_observable(lambda x: anp.arccos(x[0]), [self]) -842 -843 def arctan(self): -844 return derived_observable(lambda x: anp.arctan(x[0]), [self]) -845 -846 def sinh(self): -847 return derived_observable(lambda x, **kwargs: np.sinh(x[0]), [self], man_grad=[np.cosh(self.value)]) -848 -849 def cosh(self): -850 return derived_observable(lambda x, **kwargs: np.cosh(x[0]), [self], man_grad=[np.sinh(self.value)]) -851 -852 def tanh(self): -853 return derived_observable(lambda x, **kwargs: np.tanh(x[0]), [self], man_grad=[1 / np.cosh(self.value) ** 2]) -854 -855 def arcsinh(self): -856 return derived_observable(lambda x: anp.arcsinh(x[0]), [self]) -857 -858 def arccosh(self): -859 return derived_observable(lambda x: anp.arccosh(x[0]), [self]) -860 -861 def arctanh(self): -862 return derived_observable(lambda x: anp.arctanh(x[0]), [self]) +702 def __str__(self): +703 if self._dvalue == 0.0: +704 return str(self.value) +705 return _format_uncertainty(self.value, self._dvalue) +706 +707 def __hash__(self): +708 hash_tuple = (np.array([self.value]).astype(np.float32).data.tobytes(),) +709 hash_tuple += tuple([o.astype(np.float32).data.tobytes() for o in self.deltas.values()]) +710 hash_tuple += tuple([np.array([o.errsq()]).astype(np.float32).data.tobytes() for o in self.covobs.values()]) +711 hash_tuple += tuple([o.encode() for o in self.names]) +712 m = hashlib.md5() +713 [m.update(o) for o in hash_tuple] +714 return int(m.hexdigest(), 16) & 0xFFFFFFFF +715 +716 # Overload comparisons +717 def __lt__(self, other): +718 return self.value < other +719 +720 def __le__(self, other): +721 return self.value <= other +722 +723 def __gt__(self, other): +724 return self.value > other +725 +726 def __ge__(self, other): +727 return self.value >= other +728 +729 def __eq__(self, other): +730 return (self - other).is_zero() +731 +732 def __ne__(self, other): +733 return not (self - other).is_zero() +734 +735 # Overload math operations +736 def __add__(self, y): +737 if isinstance(y, Obs): +738 return derived_observable(lambda x, **kwargs: x[0] + x[1], [self, y], man_grad=[1, 1]) +739 else: +740 if isinstance(y, np.ndarray): +741 return np.array([self + o for o in y]) +742 elif y.__class__.__name__ in ['Corr', 'CObs']: +743 return NotImplemented +744 else: +745 return derived_observable(lambda x, **kwargs: x[0] + y, [self], man_grad=[1]) +746 +747 def __radd__(self, y): +748 return self + y +749 +750 def __mul__(self, y): +751 if isinstance(y, Obs): +752 return derived_observable(lambda x, **kwargs: x[0] * x[1], [self, y], man_grad=[y.value, self.value]) +753 else: +754 if isinstance(y, np.ndarray): +755 return np.array([self * o for o in y]) +756 elif isinstance(y, complex): +757 return CObs(self * y.real, self * y.imag) +758 elif y.__class__.__name__ in ['Corr', 'CObs']: +759 return NotImplemented +760 else: +761 return derived_observable(lambda x, **kwargs: x[0] * y, [self], man_grad=[y]) +762 +763 def __rmul__(self, y): +764 return self * y +765 +766 def __sub__(self, y): +767 if isinstance(y, Obs): +768 return derived_observable(lambda x, **kwargs: x[0] - x[1], [self, y], man_grad=[1, -1]) +769 else: +770 if isinstance(y, np.ndarray): +771 return np.array([self - o for o in y]) +772 elif y.__class__.__name__ in ['Corr', 'CObs']: +773 return NotImplemented +774 else: +775 return derived_observable(lambda x, **kwargs: x[0] - y, [self], man_grad=[1]) +776 +777 def __rsub__(self, y): +778 return -1 * (self - y) +779 +780 def __pos__(self): +781 return self +782 +783 def __neg__(self): +784 return -1 * self +785 +786 def __truediv__(self, y): +787 if isinstance(y, Obs): +788 return derived_observable(lambda x, **kwargs: x[0] / x[1], [self, y], man_grad=[1 / y.value, - self.value / y.value ** 2]) +789 else: +790 if isinstance(y, np.ndarray): +791 return np.array([self / o for o in y]) +792 elif y.__class__.__name__ in ['Corr', 'CObs']: +793 return NotImplemented +794 else: +795 return derived_observable(lambda x, **kwargs: x[0] / y, [self], man_grad=[1 / y]) +796 +797 def __rtruediv__(self, y): +798 if isinstance(y, Obs): +799 return derived_observable(lambda x, **kwargs: x[0] / x[1], [y, self], man_grad=[1 / self.value, - y.value / self.value ** 2]) +800 else: +801 if isinstance(y, np.ndarray): +802 return np.array([o / self for o in y]) +803 elif y.__class__.__name__ in ['Corr', 'CObs']: +804 return NotImplemented +805 else: +806 return derived_observable(lambda x, **kwargs: y / x[0], [self], man_grad=[-y / self.value ** 2]) +807 +808 def __pow__(self, y): +809 if isinstance(y, Obs): +810 return derived_observable(lambda x: x[0] ** x[1], [self, y]) +811 else: +812 return derived_observable(lambda x: x[0] ** y, [self]) +813 +814 def __rpow__(self, y): +815 if isinstance(y, Obs): +816 return derived_observable(lambda x: x[0] ** x[1], [y, self]) +817 else: +818 return derived_observable(lambda x: y ** x[0], [self]) +819 +820 def __abs__(self): +821 return derived_observable(lambda x: anp.abs(x[0]), [self]) +822 +823 # Overload numpy functions +824 def sqrt(self): +825 return derived_observable(lambda x, **kwargs: np.sqrt(x[0]), [self], man_grad=[1 / 2 / np.sqrt(self.value)]) +826 +827 def log(self): +828 return derived_observable(lambda x, **kwargs: np.log(x[0]), [self], man_grad=[1 / self.value]) +829 +830 def exp(self): +831 return derived_observable(lambda x, **kwargs: np.exp(x[0]), [self], man_grad=[np.exp(self.value)]) +832 +833 def sin(self): +834 return derived_observable(lambda x, **kwargs: np.sin(x[0]), [self], man_grad=[np.cos(self.value)]) +835 +836 def cos(self): +837 return derived_observable(lambda x, **kwargs: np.cos(x[0]), [self], man_grad=[-np.sin(self.value)]) +838 +839 def tan(self): +840 return derived_observable(lambda x, **kwargs: np.tan(x[0]), [self], man_grad=[1 / np.cos(self.value) ** 2]) +841 +842 def arcsin(self): +843 return derived_observable(lambda x: anp.arcsin(x[0]), [self]) +844 +845 def arccos(self): +846 return derived_observable(lambda x: anp.arccos(x[0]), [self]) +847 +848 def arctan(self): +849 return derived_observable(lambda x: anp.arctan(x[0]), [self]) +850 +851 def sinh(self): +852 return derived_observable(lambda x, **kwargs: np.sinh(x[0]), [self], man_grad=[np.cosh(self.value)]) +853 +854 def cosh(self): +855 return derived_observable(lambda x, **kwargs: np.cosh(x[0]), [self], man_grad=[np.sinh(self.value)]) +856 +857 def tanh(self): +858 return derived_observable(lambda x, **kwargs: np.tanh(x[0]), [self], man_grad=[1 / np.cosh(self.value) ** 2]) +859 +860 def arcsinh(self): +861 return derived_observable(lambda x: anp.arcsinh(x[0]), [self]) +862 +863 def arccosh(self): +864 return derived_observable(lambda x: anp.arccosh(x[0]), [self]) +865 +866 def arctanh(self): +867 return derived_observable(lambda x: anp.arctanh(x[0]), [self]) @@ -3126,46 +3147,57 @@ of the autocorrelation function (default True) 400 print('Result\t %3.8e +/- %3.8e +/- %3.8e (%3.3f%%)' % (self.value, self._dvalue, self.ddvalue, percentage)) 401 if len(self.e_names) > 1: 402 print(' Ensemble errors:') -403 for e_name in self.mc_names: -404 if len(self.e_names) > 1: -405 print('', e_name, '\t %3.8e +/- %3.8e' % (self.e_dvalue[e_name], self.e_ddvalue[e_name])) -406 if self.tau_exp[e_name] > 0: -407 print(' t_int\t %3.8e +/- %3.8e tau_exp = %3.2f, N_sigma = %1.0i' % (self.e_tauint[e_name], self.e_dtauint[e_name], self.tau_exp[e_name], self.N_sigma[e_name])) -408 else: -409 print(' t_int\t %3.8e +/- %3.8e S = %3.2f' % (self.e_tauint[e_name], self.e_dtauint[e_name], self.S[e_name])) -410 for e_name in self.cov_names: -411 print('', e_name, '\t %3.8e' % (self.e_dvalue[e_name])) -412 if ens_content is True: -413 if len(self.e_names) == 1: -414 print(self.N, 'samples in', len(self.e_names), 'ensemble:') -415 else: -416 print(self.N, 'samples in', len(self.e_names), 'ensembles:') -417 my_string_list = [] -418 for key, value in sorted(self.e_content.items()): -419 if key not in self.covobs: -420 my_string = ' ' + "\u00B7 Ensemble '" + key + "' " -421 if len(value) == 1: -422 my_string += f': {self.shape[value[0]]} configurations' -423 if isinstance(self.idl[value[0]], range): -424 my_string += f' (from {self.idl[value[0]].start} to {self.idl[value[0]][-1]}' + int(self.idl[value[0]].step != 1) * f' in steps of {self.idl[value[0]].step}' + ')' -425 else: -426 my_string += ' (irregular range)' -427 else: -428 sublist = [] -429 for v in value: -430 my_substring = ' ' + "\u00B7 Replicum '" + v[len(key) + 1:] + "' " -431 my_substring += f': {self.shape[v]} configurations' -432 if isinstance(self.idl[v], range): -433 my_substring += f' (from {self.idl[v].start} to {self.idl[v][-1]}' + int(self.idl[v].step != 1) * f' in steps of {self.idl[v].step}' + ')' -434 else: -435 my_substring += ' (irregular range)' -436 sublist.append(my_substring) -437 -438 my_string += '\n' + '\n'.join(sublist) -439 else: -440 my_string = ' ' + "\u00B7 Covobs '" + key + "' " -441 my_string_list.append(my_string) -442 print('\n'.join(my_string_list)) +403 e_content = self.e_content +404 for e_name in self.mc_names: +405 if isinstance(self.idl[e_content[e_name][0]], range): +406 gap = self.idl[e_content[e_name][0]].step +407 else: +408 gap = np.min(np.diff(self.idl[e_content[e_name][0]])) +409 +410 if len(self.e_names) > 1: +411 print('', e_name, '\t %3.6e +/- %3.6e' % (self.e_dvalue[e_name], self.e_ddvalue[e_name])) +412 tau_string = " \N{GREEK SMALL LETTER TAU}_int\t " + _format_uncertainty(self.e_tauint[e_name], self.e_dtauint[e_name]) +413 tau_string += f" in units of {gap} config" +414 if gap > 1: +415 tau_string += "s" +416 if self.tau_exp[e_name] > 0: +417 tau_string = f"{tau_string: <45}" + '\t(\N{GREEK SMALL LETTER TAU}_exp=%3.2f, N_\N{GREEK SMALL LETTER SIGMA}=%1.0i)' % (self.tau_exp[e_name], self.N_sigma[e_name]) +418 else: +419 tau_string = f"{tau_string: <45}" + '\t(S=%3.2f)' % (self.S[e_name]) +420 print(tau_string) +421 for e_name in self.cov_names: +422 print('', e_name, '\t %3.8e' % (self.e_dvalue[e_name])) +423 if ens_content is True: +424 if len(self.e_names) == 1: +425 print(self.N, 'samples in', len(self.e_names), 'ensemble:') +426 else: +427 print(self.N, 'samples in', len(self.e_names), 'ensembles:') +428 my_string_list = [] +429 for key, value in sorted(self.e_content.items()): +430 if key not in self.covobs: +431 my_string = ' ' + "\u00B7 Ensemble '" + key + "' " +432 if len(value) == 1: +433 my_string += f': {self.shape[value[0]]} configurations' +434 if isinstance(self.idl[value[0]], range): +435 my_string += f' (from {self.idl[value[0]].start} to {self.idl[value[0]][-1]}' + int(self.idl[value[0]].step != 1) * f' in steps of {self.idl[value[0]].step}' + ')' +436 else: +437 my_string += f' (irregular range from {self.idl[value[0]][0]} to {self.idl[value[0]][-1]})' +438 else: +439 sublist = [] +440 for v in value: +441 my_substring = ' ' + "\u00B7 Replicum '" + v[len(key) + 1:] + "' " +442 my_substring += f': {self.shape[v]} configurations' +443 if isinstance(self.idl[v], range): +444 my_substring += f' (from {self.idl[v].start} to {self.idl[v][-1]}' + int(self.idl[v].step != 1) * f' in steps of {self.idl[v].step}' + ')' +445 else: +446 my_substring += f' (irregular range from {self.idl[v][0]} to {self.idl[v][-1]})' +447 sublist.append(my_substring) +448 +449 my_string += '\n' + '\n'.join(sublist) +450 else: +451 my_string = ' ' + "\u00B7 Covobs '" + key + "' " +452 my_string_list.append(my_string) +453 print('\n'.join(my_string_list)) @@ -3192,20 +3224,20 @@ print details about the ensembles and replica if true. -
444    def reweight(self, weight):
-445        """Reweight the obs with given rewighting factors.
-446
-447        Parameters
-448        ----------
-449        weight : Obs
-450            Reweighting factor. An Observable that has to be defined on a superset of the
-451            configurations in obs[i].idl for all i.
-452        all_configs : bool
-453            if True, the reweighted observables are normalized by the average of
-454            the reweighting factor on all configurations in weight.idl and not
-455            on the configurations in obs[i].idl. Default False.
-456        """
-457        return reweight(weight, [self])[0]
+            
455    def reweight(self, weight):
+456        """Reweight the obs with given rewighting factors.
+457
+458        Parameters
+459        ----------
+460        weight : Obs
+461            Reweighting factor. An Observable that has to be defined on a superset of the
+462            configurations in obs[i].idl for all i.
+463        all_configs : bool
+464            if True, the reweighted observables are normalized by the average of
+465            the reweighting factor on all configurations in weight.idl and not
+466            on the configurations in obs[i].idl. Default False.
+467        """
+468        return reweight(weight, [self])[0]
 
@@ -3237,17 +3269,17 @@ on the configurations in obs[i].idl. Default False.
-
459    def is_zero_within_error(self, sigma=1):
-460        """Checks whether the observable is zero within 'sigma' standard errors.
-461
-462        Parameters
-463        ----------
-464        sigma : int
-465            Number of standard errors used for the check.
-466
-467        Works only properly when the gamma method was run.
-468        """
-469        return self.is_zero() or np.abs(self.value) <= sigma * self._dvalue
+            
470    def is_zero_within_error(self, sigma=1):
+471        """Checks whether the observable is zero within 'sigma' standard errors.
+472
+473        Parameters
+474        ----------
+475        sigma : int
+476            Number of standard errors used for the check.
+477
+478        Works only properly when the gamma method was run.
+479        """
+480        return self.is_zero() or np.abs(self.value) <= sigma * self._dvalue
 
@@ -3275,15 +3307,15 @@ Number of standard errors used for the check.
-
471    def is_zero(self, atol=1e-10):
-472        """Checks whether the observable is zero within a given tolerance.
-473
-474        Parameters
-475        ----------
-476        atol : float
-477            Absolute tolerance (for details see numpy documentation).
-478        """
-479        return np.isclose(0.0, self.value, 1e-14, atol) and all(np.allclose(0.0, delta, 1e-14, atol) for delta in self.deltas.values()) and all(np.allclose(0.0, delta.errsq(), 1e-14, atol) for delta in self.covobs.values())
+            
482    def is_zero(self, atol=1e-10):
+483        """Checks whether the observable is zero within a given tolerance.
+484
+485        Parameters
+486        ----------
+487        atol : float
+488            Absolute tolerance (for details see numpy documentation).
+489        """
+490        return np.isclose(0.0, self.value, 1e-14, atol) and all(np.allclose(0.0, delta, 1e-14, atol) for delta in self.deltas.values()) and all(np.allclose(0.0, delta.errsq(), 1e-14, atol) for delta in self.covobs.values())
 
@@ -3310,45 +3342,45 @@ Absolute tolerance (for details see numpy documentation).
-
481    def plot_tauint(self, save=None):
-482        """Plot integrated autocorrelation time for each ensemble.
-483
-484        Parameters
-485        ----------
-486        save : str
-487            saves the figure to a file named 'save' if.
-488        """
-489        if not hasattr(self, 'e_dvalue'):
-490            raise Exception('Run the gamma method first.')
-491
-492        for e, e_name in enumerate(self.mc_names):
-493            fig = plt.figure()
-494            plt.xlabel(r'$W$')
-495            plt.ylabel(r'$\tau_\mathrm{int}$')
-496            length = int(len(self.e_n_tauint[e_name]))
-497            if self.tau_exp[e_name] > 0:
-498                base = self.e_n_tauint[e_name][self.e_windowsize[e_name]]
-499                x_help = np.arange(2 * self.tau_exp[e_name])
-500                y_help = (x_help + 1) * np.abs(self.e_rho[e_name][self.e_windowsize[e_name] + 1]) * (1 - x_help / (2 * (2 * self.tau_exp[e_name] - 1))) + base
-501                x_arr = np.arange(self.e_windowsize[e_name] + 1, self.e_windowsize[e_name] + 1 + 2 * self.tau_exp[e_name])
-502                plt.plot(x_arr, y_help, 'C' + str(e), linewidth=1, ls='--', marker=',')
-503                plt.errorbar([self.e_windowsize[e_name] + 2 * self.tau_exp[e_name]], [self.e_tauint[e_name]],
-504                             yerr=[self.e_dtauint[e_name]], fmt='C' + str(e), linewidth=1, capsize=2, marker='o', mfc=plt.rcParams['axes.facecolor'])
-505                xmax = self.e_windowsize[e_name] + 2 * self.tau_exp[e_name] + 1.5
-506                label = e_name + r', $\tau_\mathrm{exp}$=' + str(np.around(self.tau_exp[e_name], decimals=2))
-507            else:
-508                label = e_name + ', S=' + str(np.around(self.S[e_name], decimals=2))
-509                xmax = max(10.5, 2 * self.e_windowsize[e_name] - 0.5)
-510
-511            plt.errorbar(np.arange(length)[:int(xmax) + 1], self.e_n_tauint[e_name][:int(xmax) + 1], yerr=self.e_n_dtauint[e_name][:int(xmax) + 1], linewidth=1, capsize=2, label=label)
-512            plt.axvline(x=self.e_windowsize[e_name], color='C' + str(e), alpha=0.5, marker=',', ls='--')
-513            plt.legend()
-514            plt.xlim(-0.5, xmax)
-515            ylim = plt.ylim()
-516            plt.ylim(bottom=0.0, top=max(1.0, ylim[1]))
-517            plt.draw()
-518            if save:
-519                fig.savefig(save + "_" + str(e))
+            
492    def plot_tauint(self, save=None):
+493        """Plot integrated autocorrelation time for each ensemble.
+494
+495        Parameters
+496        ----------
+497        save : str
+498            saves the figure to a file named 'save' if.
+499        """
+500        if not hasattr(self, 'e_dvalue'):
+501            raise Exception('Run the gamma method first.')
+502
+503        for e, e_name in enumerate(self.mc_names):
+504            fig = plt.figure()
+505            plt.xlabel(r'$W$')
+506            plt.ylabel(r'$\tau_\mathrm{int}$')
+507            length = int(len(self.e_n_tauint[e_name]))
+508            if self.tau_exp[e_name] > 0:
+509                base = self.e_n_tauint[e_name][self.e_windowsize[e_name]]
+510                x_help = np.arange(2 * self.tau_exp[e_name])
+511                y_help = (x_help + 1) * np.abs(self.e_rho[e_name][self.e_windowsize[e_name] + 1]) * (1 - x_help / (2 * (2 * self.tau_exp[e_name] - 1))) + base
+512                x_arr = np.arange(self.e_windowsize[e_name] + 1, self.e_windowsize[e_name] + 1 + 2 * self.tau_exp[e_name])
+513                plt.plot(x_arr, y_help, 'C' + str(e), linewidth=1, ls='--', marker=',')
+514                plt.errorbar([self.e_windowsize[e_name] + 2 * self.tau_exp[e_name]], [self.e_tauint[e_name]],
+515                             yerr=[self.e_dtauint[e_name]], fmt='C' + str(e), linewidth=1, capsize=2, marker='o', mfc=plt.rcParams['axes.facecolor'])
+516                xmax = self.e_windowsize[e_name] + 2 * self.tau_exp[e_name] + 1.5
+517                label = e_name + r', $\tau_\mathrm{exp}$=' + str(np.around(self.tau_exp[e_name], decimals=2))
+518            else:
+519                label = e_name + ', S=' + str(np.around(self.S[e_name], decimals=2))
+520                xmax = max(10.5, 2 * self.e_windowsize[e_name] - 0.5)
+521
+522            plt.errorbar(np.arange(length)[:int(xmax) + 1], self.e_n_tauint[e_name][:int(xmax) + 1], yerr=self.e_n_dtauint[e_name][:int(xmax) + 1], linewidth=1, capsize=2, label=label)
+523            plt.axvline(x=self.e_windowsize[e_name], color='C' + str(e), alpha=0.5, marker=',', ls='--')
+524            plt.legend()
+525            plt.xlim(-0.5, xmax)
+526            ylim = plt.ylim()
+527            plt.ylim(bottom=0.0, top=max(1.0, ylim[1]))
+528            plt.draw()
+529            if save:
+530                fig.savefig(save + "_" + str(e))
 
@@ -3375,36 +3407,36 @@ saves the figure to a file named 'save' if.
-
521    def plot_rho(self, save=None):
-522        """Plot normalized autocorrelation function time for each ensemble.
-523
-524        Parameters
-525        ----------
-526        save : str
-527            saves the figure to a file named 'save' if.
-528        """
-529        if not hasattr(self, 'e_dvalue'):
-530            raise Exception('Run the gamma method first.')
-531        for e, e_name in enumerate(self.mc_names):
-532            fig = plt.figure()
-533            plt.xlabel('W')
-534            plt.ylabel('rho')
-535            length = int(len(self.e_drho[e_name]))
-536            plt.errorbar(np.arange(length), self.e_rho[e_name][:length], yerr=self.e_drho[e_name][:], linewidth=1, capsize=2)
-537            plt.axvline(x=self.e_windowsize[e_name], color='r', alpha=0.25, ls='--', marker=',')
-538            if self.tau_exp[e_name] > 0:
-539                plt.plot([self.e_windowsize[e_name] + 1, self.e_windowsize[e_name] + 1 + 2 * self.tau_exp[e_name]],
-540                         [self.e_rho[e_name][self.e_windowsize[e_name] + 1], 0], 'k-', lw=1)
-541                xmax = self.e_windowsize[e_name] + 2 * self.tau_exp[e_name] + 1.5
-542                plt.title('Rho ' + e_name + r', tau\_exp=' + str(np.around(self.tau_exp[e_name], decimals=2)))
-543            else:
-544                xmax = max(10.5, 2 * self.e_windowsize[e_name] - 0.5)
-545                plt.title('Rho ' + e_name + ', S=' + str(np.around(self.S[e_name], decimals=2)))
-546            plt.plot([-0.5, xmax], [0, 0], 'k--', lw=1)
-547            plt.xlim(-0.5, xmax)
-548            plt.draw()
-549            if save:
-550                fig.savefig(save + "_" + str(e))
+            
532    def plot_rho(self, save=None):
+533        """Plot normalized autocorrelation function time for each ensemble.
+534
+535        Parameters
+536        ----------
+537        save : str
+538            saves the figure to a file named 'save' if.
+539        """
+540        if not hasattr(self, 'e_dvalue'):
+541            raise Exception('Run the gamma method first.')
+542        for e, e_name in enumerate(self.mc_names):
+543            fig = plt.figure()
+544            plt.xlabel('W')
+545            plt.ylabel('rho')
+546            length = int(len(self.e_drho[e_name]))
+547            plt.errorbar(np.arange(length), self.e_rho[e_name][:length], yerr=self.e_drho[e_name][:], linewidth=1, capsize=2)
+548            plt.axvline(x=self.e_windowsize[e_name], color='r', alpha=0.25, ls='--', marker=',')
+549            if self.tau_exp[e_name] > 0:
+550                plt.plot([self.e_windowsize[e_name] + 1, self.e_windowsize[e_name] + 1 + 2 * self.tau_exp[e_name]],
+551                         [self.e_rho[e_name][self.e_windowsize[e_name] + 1], 0], 'k-', lw=1)
+552                xmax = self.e_windowsize[e_name] + 2 * self.tau_exp[e_name] + 1.5
+553                plt.title('Rho ' + e_name + r', tau\_exp=' + str(np.around(self.tau_exp[e_name], decimals=2)))
+554            else:
+555                xmax = max(10.5, 2 * self.e_windowsize[e_name] - 0.5)
+556                plt.title('Rho ' + e_name + ', S=' + str(np.around(self.S[e_name], decimals=2)))
+557            plt.plot([-0.5, xmax], [0, 0], 'k--', lw=1)
+558            plt.xlim(-0.5, xmax)
+559            plt.draw()
+560            if save:
+561                fig.savefig(save + "_" + str(e))
 
@@ -3431,27 +3463,27 @@ saves the figure to a file named 'save' if.
-
552    def plot_rep_dist(self):
-553        """Plot replica distribution for each ensemble with more than one replicum."""
-554        if not hasattr(self, 'e_dvalue'):
-555            raise Exception('Run the gamma method first.')
-556        for e, e_name in enumerate(self.mc_names):
-557            if len(self.e_content[e_name]) == 1:
-558                print('No replica distribution for a single replicum (', e_name, ')')
-559                continue
-560            r_length = []
-561            sub_r_mean = 0
-562            for r, r_name in enumerate(self.e_content[e_name]):
-563                r_length.append(len(self.deltas[r_name]))
-564                sub_r_mean += self.shape[r_name] * self.r_values[r_name]
-565            e_N = np.sum(r_length)
-566            sub_r_mean /= e_N
-567            arr = np.zeros(len(self.e_content[e_name]))
-568            for r, r_name in enumerate(self.e_content[e_name]):
-569                arr[r] = (self.r_values[r_name] - sub_r_mean) / (self.e_dvalue[e_name] * np.sqrt(e_N / self.shape[r_name] - 1))
-570            plt.hist(arr, rwidth=0.8, bins=len(self.e_content[e_name]))
-571            plt.title('Replica distribution' + e_name + ' (mean=0, var=1)')
-572            plt.draw()
+            
563    def plot_rep_dist(self):
+564        """Plot replica distribution for each ensemble with more than one replicum."""
+565        if not hasattr(self, 'e_dvalue'):
+566            raise Exception('Run the gamma method first.')
+567        for e, e_name in enumerate(self.mc_names):
+568            if len(self.e_content[e_name]) == 1:
+569                print('No replica distribution for a single replicum (', e_name, ')')
+570                continue
+571            r_length = []
+572            sub_r_mean = 0
+573            for r, r_name in enumerate(self.e_content[e_name]):
+574                r_length.append(len(self.deltas[r_name]))
+575                sub_r_mean += self.shape[r_name] * self.r_values[r_name]
+576            e_N = np.sum(r_length)
+577            sub_r_mean /= e_N
+578            arr = np.zeros(len(self.e_content[e_name]))
+579            for r, r_name in enumerate(self.e_content[e_name]):
+580                arr[r] = (self.r_values[r_name] - sub_r_mean) / (self.e_dvalue[e_name] * np.sqrt(e_N / self.shape[r_name] - 1))
+581            plt.hist(arr, rwidth=0.8, bins=len(self.e_content[e_name]))
+582            plt.title('Replica distribution' + e_name + ' (mean=0, var=1)')
+583            plt.draw()
 
@@ -3471,37 +3503,37 @@ saves the figure to a file named 'save' if.
-
574    def plot_history(self, expand=True):
-575        """Plot derived Monte Carlo history for each ensemble
-576
-577        Parameters
-578        ----------
-579        expand : bool
-580            show expanded history for irregular Monte Carlo chains (default: True).
-581        """
-582        for e, e_name in enumerate(self.mc_names):
-583            plt.figure()
-584            r_length = []
-585            tmp = []
-586            tmp_expanded = []
-587            for r, r_name in enumerate(self.e_content[e_name]):
-588                tmp.append(self.deltas[r_name] + self.r_values[r_name])
-589                if expand:
-590                    tmp_expanded.append(_expand_deltas(self.deltas[r_name], list(self.idl[r_name]), self.shape[r_name]) + self.r_values[r_name])
-591                    r_length.append(len(tmp_expanded[-1]))
-592                else:
-593                    r_length.append(len(tmp[-1]))
-594            e_N = np.sum(r_length)
-595            x = np.arange(e_N)
-596            y_test = np.concatenate(tmp, axis=0)
-597            if expand:
-598                y = np.concatenate(tmp_expanded, axis=0)
-599            else:
-600                y = y_test
-601            plt.errorbar(x, y, fmt='.', markersize=3)
-602            plt.xlim(-0.5, e_N - 0.5)
-603            plt.title(e_name + f'\nskew: {skew(y_test):.3f} (p={skewtest(y_test).pvalue:.3f}), kurtosis: {kurtosis(y_test):.3f} (p={kurtosistest(y_test).pvalue:.3f})')
-604            plt.draw()
+            
585    def plot_history(self, expand=True):
+586        """Plot derived Monte Carlo history for each ensemble
+587
+588        Parameters
+589        ----------
+590        expand : bool
+591            show expanded history for irregular Monte Carlo chains (default: True).
+592        """
+593        for e, e_name in enumerate(self.mc_names):
+594            plt.figure()
+595            r_length = []
+596            tmp = []
+597            tmp_expanded = []
+598            for r, r_name in enumerate(self.e_content[e_name]):
+599                tmp.append(self.deltas[r_name] + self.r_values[r_name])
+600                if expand:
+601                    tmp_expanded.append(_expand_deltas(self.deltas[r_name], list(self.idl[r_name]), self.shape[r_name]) + self.r_values[r_name])
+602                    r_length.append(len(tmp_expanded[-1]))
+603                else:
+604                    r_length.append(len(tmp[-1]))
+605            e_N = np.sum(r_length)
+606            x = np.arange(e_N)
+607            y_test = np.concatenate(tmp, axis=0)
+608            if expand:
+609                y = np.concatenate(tmp_expanded, axis=0)
+610            else:
+611                y = y_test
+612            plt.errorbar(x, y, fmt='.', markersize=3)
+613            plt.xlim(-0.5, e_N - 0.5)
+614            plt.title(e_name + f'\nskew: {skew(y_test):.3f} (p={skewtest(y_test).pvalue:.3f}), kurtosis: {kurtosis(y_test):.3f} (p={kurtosistest(y_test).pvalue:.3f})')
+615            plt.draw()
 
@@ -3528,29 +3560,29 @@ show expanded history for irregular Monte Carlo chains (default: True).
-
606    def plot_piechart(self, save=None):
-607        """Plot piechart which shows the fractional contribution of each
-608        ensemble to the error and returns a dictionary containing the fractions.
-609
-610        Parameters
-611        ----------
-612        save : str
-613            saves the figure to a file named 'save' if.
-614        """
-615        if not hasattr(self, 'e_dvalue'):
-616            raise Exception('Run the gamma method first.')
-617        if np.isclose(0.0, self._dvalue, atol=1e-15):
-618            raise Exception('Error is 0.0')
-619        labels = self.e_names
-620        sizes = [self.e_dvalue[name] ** 2 for name in labels] / self._dvalue ** 2
-621        fig1, ax1 = plt.subplots()
-622        ax1.pie(sizes, labels=labels, startangle=90, normalize=True)
-623        ax1.axis('equal')
-624        plt.draw()
-625        if save:
-626            fig1.savefig(save)
-627
-628        return dict(zip(self.e_names, sizes))
+            
617    def plot_piechart(self, save=None):
+618        """Plot piechart which shows the fractional contribution of each
+619        ensemble to the error and returns a dictionary containing the fractions.
+620
+621        Parameters
+622        ----------
+623        save : str
+624            saves the figure to a file named 'save' if.
+625        """
+626        if not hasattr(self, 'e_dvalue'):
+627            raise Exception('Run the gamma method first.')
+628        if np.isclose(0.0, self._dvalue, atol=1e-15):
+629            raise Exception('Error is 0.0')
+630        labels = self.e_names
+631        sizes = [self.e_dvalue[name] ** 2 for name in labels] / self._dvalue ** 2
+632        fig1, ax1 = plt.subplots()
+633        ax1.pie(sizes, labels=labels, startangle=90, normalize=True)
+634        ax1.axis('equal')
+635        plt.draw()
+636        if save:
+637            fig1.savefig(save)
+638
+639        return dict(zip(self.e_names, sizes))
 
@@ -3578,34 +3610,34 @@ saves the figure to a file named 'save' if.
-
630    def dump(self, filename, datatype="json.gz", description="", **kwargs):
-631        """Dump the Obs to a file 'name' of chosen format.
-632
-633        Parameters
-634        ----------
-635        filename : str
-636            name of the file to be saved.
-637        datatype : str
-638            Format of the exported file. Supported formats include
-639            "json.gz" and "pickle"
-640        description : str
-641            Description for output file, only relevant for json.gz format.
-642        path : str
-643            specifies a custom path for the file (default '.')
-644        """
-645        if 'path' in kwargs:
-646            file_name = kwargs.get('path') + '/' + filename
-647        else:
-648            file_name = filename
-649
-650        if datatype == "json.gz":
-651            from .input.json import dump_to_json
-652            dump_to_json([self], file_name, description=description)
-653        elif datatype == "pickle":
-654            with open(file_name + '.p', 'wb') as fb:
-655                pickle.dump(self, fb)
-656        else:
-657            raise Exception("Unknown datatype " + str(datatype))
+            
641    def dump(self, filename, datatype="json.gz", description="", **kwargs):
+642        """Dump the Obs to a file 'name' of chosen format.
+643
+644        Parameters
+645        ----------
+646        filename : str
+647            name of the file to be saved.
+648        datatype : str
+649            Format of the exported file. Supported formats include
+650            "json.gz" and "pickle"
+651        description : str
+652            Description for output file, only relevant for json.gz format.
+653        path : str
+654            specifies a custom path for the file (default '.')
+655        """
+656        if 'path' in kwargs:
+657            file_name = kwargs.get('path') + '/' + filename
+658        else:
+659            file_name = filename
+660
+661        if datatype == "json.gz":
+662            from .input.json import dump_to_json
+663            dump_to_json([self], file_name, description=description)
+664        elif datatype == "pickle":
+665            with open(file_name + '.p', 'wb') as fb:
+666                pickle.dump(self, fb)
+667        else:
+668            raise Exception("Unknown datatype " + str(datatype))
 
@@ -3639,31 +3671,31 @@ specifies a custom path for the file (default '.')
-
659    def export_jackknife(self):
-660        """Export jackknife samples from the Obs
-661
-662        Returns
-663        -------
-664        numpy.ndarray
-665            Returns a numpy array of length N + 1 where N is the number of samples
-666            for the given ensemble and replicum. The zeroth entry of the array contains
-667            the mean value of the Obs, entries 1 to N contain the N jackknife samples
-668            derived from the Obs. The current implementation only works for observables
-669            defined on exactly one ensemble and replicum. The derived jackknife samples
-670            should agree with samples from a full jackknife analysis up to O(1/N).
-671        """
+            
670    def export_jackknife(self):
+671        """Export jackknife samples from the Obs
 672
-673        if len(self.names) != 1:
-674            raise Exception("'export_jackknife' is only implemented for Obs defined on one ensemble and replicum.")
-675
-676        name = self.names[0]
-677        full_data = self.deltas[name] + self.r_values[name]
-678        n = full_data.size
-679        mean = self.value
-680        tmp_jacks = np.zeros(n + 1)
-681        tmp_jacks[0] = mean
-682        tmp_jacks[1:] = (n * mean - full_data) / (n - 1)
-683        return tmp_jacks
+673        Returns
+674        -------
+675        numpy.ndarray
+676            Returns a numpy array of length N + 1 where N is the number of samples
+677            for the given ensemble and replicum. The zeroth entry of the array contains
+678            the mean value of the Obs, entries 1 to N contain the N jackknife samples
+679            derived from the Obs. The current implementation only works for observables
+680            defined on exactly one ensemble and replicum. The derived jackknife samples
+681            should agree with samples from a full jackknife analysis up to O(1/N).
+682        """
+683
+684        if len(self.names) != 1:
+685            raise Exception("'export_jackknife' is only implemented for Obs defined on one ensemble and replicum.")
+686
+687        name = self.names[0]
+688        full_data = self.deltas[name] + self.r_values[name]
+689        n = full_data.size
+690        mean = self.value
+691        tmp_jacks = np.zeros(n + 1)
+692        tmp_jacks[0] = mean
+693        tmp_jacks[1:] = (n * mean - full_data) / (n - 1)
+694        return tmp_jacks
 
@@ -3694,8 +3726,8 @@ should agree with samples from a full jackknife analysis up to O(1/N).
-
819    def sqrt(self):
-820        return derived_observable(lambda x, **kwargs: np.sqrt(x[0]), [self], man_grad=[1 / 2 / np.sqrt(self.value)])
+            
824    def sqrt(self):
+825        return derived_observable(lambda x, **kwargs: np.sqrt(x[0]), [self], man_grad=[1 / 2 / np.sqrt(self.value)])
 
@@ -3713,8 +3745,8 @@ should agree with samples from a full jackknife analysis up to O(1/N).
-
822    def log(self):
-823        return derived_observable(lambda x, **kwargs: np.log(x[0]), [self], man_grad=[1 / self.value])
+            
827    def log(self):
+828        return derived_observable(lambda x, **kwargs: np.log(x[0]), [self], man_grad=[1 / self.value])
 
@@ -3732,8 +3764,8 @@ should agree with samples from a full jackknife analysis up to O(1/N).
-
825    def exp(self):
-826        return derived_observable(lambda x, **kwargs: np.exp(x[0]), [self], man_grad=[np.exp(self.value)])
+            
830    def exp(self):
+831        return derived_observable(lambda x, **kwargs: np.exp(x[0]), [self], man_grad=[np.exp(self.value)])
 
@@ -3751,8 +3783,8 @@ should agree with samples from a full jackknife analysis up to O(1/N).
-
828    def sin(self):
-829        return derived_observable(lambda x, **kwargs: np.sin(x[0]), [self], man_grad=[np.cos(self.value)])
+            
833    def sin(self):
+834        return derived_observable(lambda x, **kwargs: np.sin(x[0]), [self], man_grad=[np.cos(self.value)])
 
@@ -3770,8 +3802,8 @@ should agree with samples from a full jackknife analysis up to O(1/N).
-
831    def cos(self):
-832        return derived_observable(lambda x, **kwargs: np.cos(x[0]), [self], man_grad=[-np.sin(self.value)])
+            
836    def cos(self):
+837        return derived_observable(lambda x, **kwargs: np.cos(x[0]), [self], man_grad=[-np.sin(self.value)])
 
@@ -3789,8 +3821,8 @@ should agree with samples from a full jackknife analysis up to O(1/N).
-
834    def tan(self):
-835        return derived_observable(lambda x, **kwargs: np.tan(x[0]), [self], man_grad=[1 / np.cos(self.value) ** 2])
+            
839    def tan(self):
+840        return derived_observable(lambda x, **kwargs: np.tan(x[0]), [self], man_grad=[1 / np.cos(self.value) ** 2])
 
@@ -3808,8 +3840,8 @@ should agree with samples from a full jackknife analysis up to O(1/N).
-
837    def arcsin(self):
-838        return derived_observable(lambda x: anp.arcsin(x[0]), [self])
+            
842    def arcsin(self):
+843        return derived_observable(lambda x: anp.arcsin(x[0]), [self])
 
@@ -3827,8 +3859,8 @@ should agree with samples from a full jackknife analysis up to O(1/N).
-
840    def arccos(self):
-841        return derived_observable(lambda x: anp.arccos(x[0]), [self])
+            
845    def arccos(self):
+846        return derived_observable(lambda x: anp.arccos(x[0]), [self])
 
@@ -3846,8 +3878,8 @@ should agree with samples from a full jackknife analysis up to O(1/N).
-
843    def arctan(self):
-844        return derived_observable(lambda x: anp.arctan(x[0]), [self])
+            
848    def arctan(self):
+849        return derived_observable(lambda x: anp.arctan(x[0]), [self])
 
@@ -3865,8 +3897,8 @@ should agree with samples from a full jackknife analysis up to O(1/N).
-
846    def sinh(self):
-847        return derived_observable(lambda x, **kwargs: np.sinh(x[0]), [self], man_grad=[np.cosh(self.value)])
+            
851    def sinh(self):
+852        return derived_observable(lambda x, **kwargs: np.sinh(x[0]), [self], man_grad=[np.cosh(self.value)])
 
@@ -3884,8 +3916,8 @@ should agree with samples from a full jackknife analysis up to O(1/N).
-
849    def cosh(self):
-850        return derived_observable(lambda x, **kwargs: np.cosh(x[0]), [self], man_grad=[np.sinh(self.value)])
+            
854    def cosh(self):
+855        return derived_observable(lambda x, **kwargs: np.cosh(x[0]), [self], man_grad=[np.sinh(self.value)])
 
@@ -3903,8 +3935,8 @@ should agree with samples from a full jackknife analysis up to O(1/N).
-
852    def tanh(self):
-853        return derived_observable(lambda x, **kwargs: np.tanh(x[0]), [self], man_grad=[1 / np.cosh(self.value) ** 2])
+            
857    def tanh(self):
+858        return derived_observable(lambda x, **kwargs: np.tanh(x[0]), [self], man_grad=[1 / np.cosh(self.value) ** 2])
 
@@ -3922,8 +3954,8 @@ should agree with samples from a full jackknife analysis up to O(1/N).
-
855    def arcsinh(self):
-856        return derived_observable(lambda x: anp.arcsinh(x[0]), [self])
+            
860    def arcsinh(self):
+861        return derived_observable(lambda x: anp.arcsinh(x[0]), [self])
 
@@ -3941,8 +3973,8 @@ should agree with samples from a full jackknife analysis up to O(1/N).
-
858    def arccosh(self):
-859        return derived_observable(lambda x: anp.arccosh(x[0]), [self])
+            
863    def arccosh(self):
+864        return derived_observable(lambda x: anp.arccosh(x[0]), [self])
 
@@ -3960,8 +3992,8 @@ should agree with samples from a full jackknife analysis up to O(1/N).
-
861    def arctanh(self):
-862        return derived_observable(lambda x: anp.arctanh(x[0]), [self])
+            
866    def arctanh(self):
+867        return derived_observable(lambda x: anp.arctanh(x[0]), [self])
 
@@ -3980,115 +4012,115 @@ should agree with samples from a full jackknife analysis up to O(1/N).
-
865class CObs:
-866    """Class for a complex valued observable."""
-867    __slots__ = ['_real', '_imag', 'tag']
-868
-869    def __init__(self, real, imag=0.0):
-870        self._real = real
-871        self._imag = imag
-872        self.tag = None
+            
870class CObs:
+871    """Class for a complex valued observable."""
+872    __slots__ = ['_real', '_imag', 'tag']
 873
-874    @property
-875    def real(self):
-876        return self._real
-877
-878    @property
-879    def imag(self):
-880        return self._imag
-881
-882    def gamma_method(self, **kwargs):
-883        """Executes the gamma_method for the real and the imaginary part."""
-884        if isinstance(self.real, Obs):
-885            self.real.gamma_method(**kwargs)
-886        if isinstance(self.imag, Obs):
-887            self.imag.gamma_method(**kwargs)
-888
-889    def is_zero(self):
-890        """Checks whether both real and imaginary part are zero within machine precision."""
-891        return self.real == 0.0 and self.imag == 0.0
-892
-893    def conjugate(self):
-894        return CObs(self.real, -self.imag)
-895
-896    def __add__(self, other):
-897        if isinstance(other, np.ndarray):
-898            return other + self
-899        elif hasattr(other, 'real') and hasattr(other, 'imag'):
-900            return CObs(self.real + other.real,
-901                        self.imag + other.imag)
-902        else:
-903            return CObs(self.real + other, self.imag)
-904
-905    def __radd__(self, y):
-906        return self + y
-907
-908    def __sub__(self, other):
-909        if isinstance(other, np.ndarray):
-910            return -1 * (other - self)
-911        elif hasattr(other, 'real') and hasattr(other, 'imag'):
-912            return CObs(self.real - other.real, self.imag - other.imag)
-913        else:
-914            return CObs(self.real - other, self.imag)
-915
-916    def __rsub__(self, other):
-917        return -1 * (self - other)
-918
-919    def __mul__(self, other):
-920        if isinstance(other, np.ndarray):
-921            return other * self
-922        elif hasattr(other, 'real') and hasattr(other, 'imag'):
-923            if all(isinstance(i, Obs) for i in [self.real, self.imag, other.real, other.imag]):
-924                return CObs(derived_observable(lambda x, **kwargs: x[0] * x[1] - x[2] * x[3],
-925                                               [self.real, other.real, self.imag, other.imag],
-926                                               man_grad=[other.real.value, self.real.value, -other.imag.value, -self.imag.value]),
-927                            derived_observable(lambda x, **kwargs: x[2] * x[1] + x[0] * x[3],
-928                                               [self.real, other.real, self.imag, other.imag],
-929                                               man_grad=[other.imag.value, self.imag.value, other.real.value, self.real.value]))
-930            elif getattr(other, 'imag', 0) != 0:
-931                return CObs(self.real * other.real - self.imag * other.imag,
-932                            self.imag * other.real + self.real * other.imag)
-933            else:
-934                return CObs(self.real * other.real, self.imag * other.real)
-935        else:
-936            return CObs(self.real * other, self.imag * other)
-937
-938    def __rmul__(self, other):
-939        return self * other
-940
-941    def __truediv__(self, other):
-942        if isinstance(other, np.ndarray):
-943            return 1 / (other / self)
-944        elif hasattr(other, 'real') and hasattr(other, 'imag'):
-945            r = other.real ** 2 + other.imag ** 2
-946            return CObs((self.real * other.real + self.imag * other.imag) / r, (self.imag * other.real - self.real * other.imag) / r)
-947        else:
-948            return CObs(self.real / other, self.imag / other)
-949
-950    def __rtruediv__(self, other):
-951        r = self.real ** 2 + self.imag ** 2
-952        if hasattr(other, 'real') and hasattr(other, 'imag'):
-953            return CObs((self.real * other.real + self.imag * other.imag) / r, (self.real * other.imag - self.imag * other.real) / r)
-954        else:
-955            return CObs(self.real * other / r, -self.imag * other / r)
-956
-957    def __abs__(self):
-958        return np.sqrt(self.real**2 + self.imag**2)
-959
-960    def __pos__(self):
-961        return self
-962
-963    def __neg__(self):
-964        return -1 * self
-965
-966    def __eq__(self, other):
-967        return self.real == other.real and self.imag == other.imag
-968
-969    def __str__(self):
-970        return '(' + str(self.real) + int(self.imag >= 0.0) * '+' + str(self.imag) + 'j)'
-971
-972    def __repr__(self):
-973        return 'CObs[' + str(self) + ']'
+874    def __init__(self, real, imag=0.0):
+875        self._real = real
+876        self._imag = imag
+877        self.tag = None
+878
+879    @property
+880    def real(self):
+881        return self._real
+882
+883    @property
+884    def imag(self):
+885        return self._imag
+886
+887    def gamma_method(self, **kwargs):
+888        """Executes the gamma_method for the real and the imaginary part."""
+889        if isinstance(self.real, Obs):
+890            self.real.gamma_method(**kwargs)
+891        if isinstance(self.imag, Obs):
+892            self.imag.gamma_method(**kwargs)
+893
+894    def is_zero(self):
+895        """Checks whether both real and imaginary part are zero within machine precision."""
+896        return self.real == 0.0 and self.imag == 0.0
+897
+898    def conjugate(self):
+899        return CObs(self.real, -self.imag)
+900
+901    def __add__(self, other):
+902        if isinstance(other, np.ndarray):
+903            return other + self
+904        elif hasattr(other, 'real') and hasattr(other, 'imag'):
+905            return CObs(self.real + other.real,
+906                        self.imag + other.imag)
+907        else:
+908            return CObs(self.real + other, self.imag)
+909
+910    def __radd__(self, y):
+911        return self + y
+912
+913    def __sub__(self, other):
+914        if isinstance(other, np.ndarray):
+915            return -1 * (other - self)
+916        elif hasattr(other, 'real') and hasattr(other, 'imag'):
+917            return CObs(self.real - other.real, self.imag - other.imag)
+918        else:
+919            return CObs(self.real - other, self.imag)
+920
+921    def __rsub__(self, other):
+922        return -1 * (self - other)
+923
+924    def __mul__(self, other):
+925        if isinstance(other, np.ndarray):
+926            return other * self
+927        elif hasattr(other, 'real') and hasattr(other, 'imag'):
+928            if all(isinstance(i, Obs) for i in [self.real, self.imag, other.real, other.imag]):
+929                return CObs(derived_observable(lambda x, **kwargs: x[0] * x[1] - x[2] * x[3],
+930                                               [self.real, other.real, self.imag, other.imag],
+931                                               man_grad=[other.real.value, self.real.value, -other.imag.value, -self.imag.value]),
+932                            derived_observable(lambda x, **kwargs: x[2] * x[1] + x[0] * x[3],
+933                                               [self.real, other.real, self.imag, other.imag],
+934                                               man_grad=[other.imag.value, self.imag.value, other.real.value, self.real.value]))
+935            elif getattr(other, 'imag', 0) != 0:
+936                return CObs(self.real * other.real - self.imag * other.imag,
+937                            self.imag * other.real + self.real * other.imag)
+938            else:
+939                return CObs(self.real * other.real, self.imag * other.real)
+940        else:
+941            return CObs(self.real * other, self.imag * other)
+942
+943    def __rmul__(self, other):
+944        return self * other
+945
+946    def __truediv__(self, other):
+947        if isinstance(other, np.ndarray):
+948            return 1 / (other / self)
+949        elif hasattr(other, 'real') and hasattr(other, 'imag'):
+950            r = other.real ** 2 + other.imag ** 2
+951            return CObs((self.real * other.real + self.imag * other.imag) / r, (self.imag * other.real - self.real * other.imag) / r)
+952        else:
+953            return CObs(self.real / other, self.imag / other)
+954
+955    def __rtruediv__(self, other):
+956        r = self.real ** 2 + self.imag ** 2
+957        if hasattr(other, 'real') and hasattr(other, 'imag'):
+958            return CObs((self.real * other.real + self.imag * other.imag) / r, (self.real * other.imag - self.imag * other.real) / r)
+959        else:
+960            return CObs(self.real * other / r, -self.imag * other / r)
+961
+962    def __abs__(self):
+963        return np.sqrt(self.real**2 + self.imag**2)
+964
+965    def __pos__(self):
+966        return self
+967
+968    def __neg__(self):
+969        return -1 * self
+970
+971    def __eq__(self, other):
+972        return self.real == other.real and self.imag == other.imag
+973
+974    def __str__(self):
+975        return '(' + str(self.real) + int(self.imag >= 0.0) * '+' + str(self.imag) + 'j)'
+976
+977    def __repr__(self):
+978        return 'CObs[' + str(self) + ']'
 
@@ -4106,10 +4138,10 @@ should agree with samples from a full jackknife analysis up to O(1/N).
-
869    def __init__(self, real, imag=0.0):
-870        self._real = real
-871        self._imag = imag
-872        self.tag = None
+            
874    def __init__(self, real, imag=0.0):
+875        self._real = real
+876        self._imag = imag
+877        self.tag = None
 
@@ -4127,12 +4159,12 @@ should agree with samples from a full jackknife analysis up to O(1/N).
-
882    def gamma_method(self, **kwargs):
-883        """Executes the gamma_method for the real and the imaginary part."""
-884        if isinstance(self.real, Obs):
-885            self.real.gamma_method(**kwargs)
-886        if isinstance(self.imag, Obs):
-887            self.imag.gamma_method(**kwargs)
+            
887    def gamma_method(self, **kwargs):
+888        """Executes the gamma_method for the real and the imaginary part."""
+889        if isinstance(self.real, Obs):
+890            self.real.gamma_method(**kwargs)
+891        if isinstance(self.imag, Obs):
+892            self.imag.gamma_method(**kwargs)
 
@@ -4152,9 +4184,9 @@ should agree with samples from a full jackknife analysis up to O(1/N).
-
889    def is_zero(self):
-890        """Checks whether both real and imaginary part are zero within machine precision."""
-891        return self.real == 0.0 and self.imag == 0.0
+            
894    def is_zero(self):
+895        """Checks whether both real and imaginary part are zero within machine precision."""
+896        return self.real == 0.0 and self.imag == 0.0
 
@@ -4174,8 +4206,8 @@ should agree with samples from a full jackknife analysis up to O(1/N).
-
893    def conjugate(self):
-894        return CObs(self.real, -self.imag)
+            
898    def conjugate(self):
+899        return CObs(self.real, -self.imag)
 
@@ -4194,184 +4226,184 @@ should agree with samples from a full jackknife analysis up to O(1/N).
-
1142def derived_observable(func, data, array_mode=False, **kwargs):
-1143    """Construct a derived Obs according to func(data, **kwargs) using automatic differentiation.
-1144
-1145    Parameters
-1146    ----------
-1147    func : object
-1148        arbitrary function of the form func(data, **kwargs). For the
-1149        automatic differentiation to work, all numpy functions have to have
-1150        the autograd wrapper (use 'import autograd.numpy as anp').
-1151    data : list
-1152        list of Obs, e.g. [obs1, obs2, obs3].
-1153    num_grad : bool
-1154        if True, numerical derivatives are used instead of autograd
-1155        (default False). To control the numerical differentiation the
-1156        kwargs of numdifftools.step_generators.MaxStepGenerator
-1157        can be used.
-1158    man_grad : list
-1159        manually supply a list or an array which contains the jacobian
-1160        of func. Use cautiously, supplying the wrong derivative will
-1161        not be intercepted.
-1162
-1163    Notes
-1164    -----
-1165    For simple mathematical operations it can be practical to use anonymous
-1166    functions. For the ratio of two observables one can e.g. use
-1167
-1168    new_obs = derived_observable(lambda x: x[0] / x[1], [obs1, obs2])
-1169    """
-1170
-1171    data = np.asarray(data)
-1172    raveled_data = data.ravel()
-1173
-1174    # Workaround for matrix operations containing non Obs data
-1175    if not all(isinstance(x, Obs) for x in raveled_data):
-1176        for i in range(len(raveled_data)):
-1177            if isinstance(raveled_data[i], (int, float)):
-1178                raveled_data[i] = cov_Obs(raveled_data[i], 0.0, "###dummy_covobs###")
-1179
-1180    allcov = {}
-1181    for o in raveled_data:
-1182        for name in o.cov_names:
-1183            if name in allcov:
-1184                if not np.allclose(allcov[name], o.covobs[name].cov):
-1185                    raise Exception('Inconsistent covariance matrices for %s!' % (name))
-1186            else:
-1187                allcov[name] = o.covobs[name].cov
-1188
-1189    n_obs = len(raveled_data)
-1190    new_names = sorted(set([y for x in [o.names for o in raveled_data] for y in x]))
-1191    new_cov_names = sorted(set([y for x in [o.cov_names for o in raveled_data] for y in x]))
-1192    new_sample_names = sorted(set(new_names) - set(new_cov_names))
-1193
-1194    is_merged = {name: (len(list(filter(lambda o: o.is_merged.get(name, False) is True, raveled_data))) > 0) for name in new_sample_names}
-1195    reweighted = len(list(filter(lambda o: o.reweighted is True, raveled_data))) > 0
-1196
-1197    if data.ndim == 1:
-1198        values = np.array([o.value for o in data])
-1199    else:
-1200        values = np.vectorize(lambda x: x.value)(data)
-1201
-1202    new_values = func(values, **kwargs)
-1203
-1204    multi = int(isinstance(new_values, np.ndarray))
-1205
-1206    new_r_values = {}
-1207    new_idl_d = {}
-1208    for name in new_sample_names:
-1209        idl = []
-1210        tmp_values = np.zeros(n_obs)
-1211        for i, item in enumerate(raveled_data):
-1212            tmp_values[i] = item.r_values.get(name, item.value)
-1213            tmp_idl = item.idl.get(name)
-1214            if tmp_idl is not None:
-1215                idl.append(tmp_idl)
-1216        if multi > 0:
-1217            tmp_values = np.array(tmp_values).reshape(data.shape)
-1218        new_r_values[name] = func(tmp_values, **kwargs)
-1219        new_idl_d[name] = _merge_idx(idl)
-1220        if not is_merged[name]:
-1221            is_merged[name] = (1 != len(set([len(idx) for idx in [*idl, new_idl_d[name]]])))
-1222
-1223    if 'man_grad' in kwargs:
-1224        deriv = np.asarray(kwargs.get('man_grad'))
-1225        if new_values.shape + data.shape != deriv.shape:
-1226            raise Exception('Manual derivative does not have correct shape.')
-1227    elif kwargs.get('num_grad') is True:
-1228        if multi > 0:
-1229            raise Exception('Multi mode currently not supported for numerical derivative')
-1230        options = {
-1231            'base_step': 0.1,
-1232            'step_ratio': 2.5}
-1233        for key in options.keys():
-1234            kwarg = kwargs.get(key)
-1235            if kwarg is not None:
-1236                options[key] = kwarg
-1237        tmp_df = nd.Gradient(func, order=4, **{k: v for k, v in options.items() if v is not None})(values, **kwargs)
-1238        if tmp_df.size == 1:
-1239            deriv = np.array([tmp_df.real])
-1240        else:
-1241            deriv = tmp_df.real
-1242    else:
-1243        deriv = jacobian(func)(values, **kwargs)
-1244
-1245    final_result = np.zeros(new_values.shape, dtype=object)
-1246
-1247    if array_mode is True:
-1248
-1249        class _Zero_grad():
-1250            def __init__(self, N):
-1251                self.grad = np.zeros((N, 1))
-1252
-1253        new_covobs_lengths = dict(set([y for x in [[(n, o.covobs[n].N) for n in o.cov_names] for o in raveled_data] for y in x]))
-1254        d_extracted = {}
-1255        g_extracted = {}
-1256        for name in new_sample_names:
-1257            d_extracted[name] = []
-1258            ens_length = len(new_idl_d[name])
-1259            for i_dat, dat in enumerate(data):
-1260                d_extracted[name].append(np.array([_expand_deltas_for_merge(o.deltas.get(name, np.zeros(ens_length)), o.idl.get(name, new_idl_d[name]), o.shape.get(name, ens_length), new_idl_d[name]) for o in dat.reshape(np.prod(dat.shape))]).reshape(dat.shape + (ens_length, )))
-1261        for name in new_cov_names:
-1262            g_extracted[name] = []
-1263            zero_grad = _Zero_grad(new_covobs_lengths[name])
-1264            for i_dat, dat in enumerate(data):
-1265                g_extracted[name].append(np.array([o.covobs.get(name, zero_grad).grad for o in dat.reshape(np.prod(dat.shape))]).reshape(dat.shape + (new_covobs_lengths[name], 1)))
-1266
-1267    for i_val, new_val in np.ndenumerate(new_values):
-1268        new_deltas = {}
-1269        new_grad = {}
-1270        if array_mode is True:
-1271            for name in new_sample_names:
-1272                ens_length = d_extracted[name][0].shape[-1]
-1273                new_deltas[name] = np.zeros(ens_length)
-1274                for i_dat, dat in enumerate(d_extracted[name]):
-1275                    new_deltas[name] += np.tensordot(deriv[i_val + (i_dat, )], dat)
-1276            for name in new_cov_names:
-1277                new_grad[name] = 0
-1278                for i_dat, dat in enumerate(g_extracted[name]):
-1279                    new_grad[name] += np.tensordot(deriv[i_val + (i_dat, )], dat)
-1280        else:
-1281            for j_obs, obs in np.ndenumerate(data):
-1282                for name in obs.names:
-1283                    if name in obs.cov_names:
-1284                        new_grad[name] = new_grad.get(name, 0) + deriv[i_val + j_obs] * obs.covobs[name].grad
-1285                    else:
-1286                        new_deltas[name] = new_deltas.get(name, 0) + deriv[i_val + j_obs] * _expand_deltas_for_merge(obs.deltas[name], obs.idl[name], obs.shape[name], new_idl_d[name])
-1287
-1288        new_covobs = {name: Covobs(0, allcov[name], name, grad=new_grad[name]) for name in new_grad}
-1289
-1290        if not set(new_covobs.keys()).isdisjoint(new_deltas.keys()):
-1291            raise Exception('The same name has been used for deltas and covobs!')
-1292        new_samples = []
-1293        new_means = []
-1294        new_idl = []
-1295        new_names_obs = []
-1296        for name in new_names:
-1297            if name not in new_covobs:
-1298                if is_merged[name]:
-1299                    filtered_deltas, filtered_idl_d = _filter_zeroes(new_deltas[name], new_idl_d[name])
-1300                else:
-1301                    filtered_deltas = new_deltas[name]
-1302                    filtered_idl_d = new_idl_d[name]
+            
1158def derived_observable(func, data, array_mode=False, **kwargs):
+1159    """Construct a derived Obs according to func(data, **kwargs) using automatic differentiation.
+1160
+1161    Parameters
+1162    ----------
+1163    func : object
+1164        arbitrary function of the form func(data, **kwargs). For the
+1165        automatic differentiation to work, all numpy functions have to have
+1166        the autograd wrapper (use 'import autograd.numpy as anp').
+1167    data : list
+1168        list of Obs, e.g. [obs1, obs2, obs3].
+1169    num_grad : bool
+1170        if True, numerical derivatives are used instead of autograd
+1171        (default False). To control the numerical differentiation the
+1172        kwargs of numdifftools.step_generators.MaxStepGenerator
+1173        can be used.
+1174    man_grad : list
+1175        manually supply a list or an array which contains the jacobian
+1176        of func. Use cautiously, supplying the wrong derivative will
+1177        not be intercepted.
+1178
+1179    Notes
+1180    -----
+1181    For simple mathematical operations it can be practical to use anonymous
+1182    functions. For the ratio of two observables one can e.g. use
+1183
+1184    new_obs = derived_observable(lambda x: x[0] / x[1], [obs1, obs2])
+1185    """
+1186
+1187    data = np.asarray(data)
+1188    raveled_data = data.ravel()
+1189
+1190    # Workaround for matrix operations containing non Obs data
+1191    if not all(isinstance(x, Obs) for x in raveled_data):
+1192        for i in range(len(raveled_data)):
+1193            if isinstance(raveled_data[i], (int, float)):
+1194                raveled_data[i] = cov_Obs(raveled_data[i], 0.0, "###dummy_covobs###")
+1195
+1196    allcov = {}
+1197    for o in raveled_data:
+1198        for name in o.cov_names:
+1199            if name in allcov:
+1200                if not np.allclose(allcov[name], o.covobs[name].cov):
+1201                    raise Exception('Inconsistent covariance matrices for %s!' % (name))
+1202            else:
+1203                allcov[name] = o.covobs[name].cov
+1204
+1205    n_obs = len(raveled_data)
+1206    new_names = sorted(set([y for x in [o.names for o in raveled_data] for y in x]))
+1207    new_cov_names = sorted(set([y for x in [o.cov_names for o in raveled_data] for y in x]))
+1208    new_sample_names = sorted(set(new_names) - set(new_cov_names))
+1209
+1210    is_merged = {name: (len(list(filter(lambda o: o.is_merged.get(name, False) is True, raveled_data))) > 0) for name in new_sample_names}
+1211    reweighted = len(list(filter(lambda o: o.reweighted is True, raveled_data))) > 0
+1212
+1213    if data.ndim == 1:
+1214        values = np.array([o.value for o in data])
+1215    else:
+1216        values = np.vectorize(lambda x: x.value)(data)
+1217
+1218    new_values = func(values, **kwargs)
+1219
+1220    multi = int(isinstance(new_values, np.ndarray))
+1221
+1222    new_r_values = {}
+1223    new_idl_d = {}
+1224    for name in new_sample_names:
+1225        idl = []
+1226        tmp_values = np.zeros(n_obs)
+1227        for i, item in enumerate(raveled_data):
+1228            tmp_values[i] = item.r_values.get(name, item.value)
+1229            tmp_idl = item.idl.get(name)
+1230            if tmp_idl is not None:
+1231                idl.append(tmp_idl)
+1232        if multi > 0:
+1233            tmp_values = np.array(tmp_values).reshape(data.shape)
+1234        new_r_values[name] = func(tmp_values, **kwargs)
+1235        new_idl_d[name] = _merge_idx(idl)
+1236        if not is_merged[name]:
+1237            is_merged[name] = (1 != len(set([len(idx) for idx in [*idl, new_idl_d[name]]])))
+1238
+1239    if 'man_grad' in kwargs:
+1240        deriv = np.asarray(kwargs.get('man_grad'))
+1241        if new_values.shape + data.shape != deriv.shape:
+1242            raise Exception('Manual derivative does not have correct shape.')
+1243    elif kwargs.get('num_grad') is True:
+1244        if multi > 0:
+1245            raise Exception('Multi mode currently not supported for numerical derivative')
+1246        options = {
+1247            'base_step': 0.1,
+1248            'step_ratio': 2.5}
+1249        for key in options.keys():
+1250            kwarg = kwargs.get(key)
+1251            if kwarg is not None:
+1252                options[key] = kwarg
+1253        tmp_df = nd.Gradient(func, order=4, **{k: v for k, v in options.items() if v is not None})(values, **kwargs)
+1254        if tmp_df.size == 1:
+1255            deriv = np.array([tmp_df.real])
+1256        else:
+1257            deriv = tmp_df.real
+1258    else:
+1259        deriv = jacobian(func)(values, **kwargs)
+1260
+1261    final_result = np.zeros(new_values.shape, dtype=object)
+1262
+1263    if array_mode is True:
+1264
+1265        class _Zero_grad():
+1266            def __init__(self, N):
+1267                self.grad = np.zeros((N, 1))
+1268
+1269        new_covobs_lengths = dict(set([y for x in [[(n, o.covobs[n].N) for n in o.cov_names] for o in raveled_data] for y in x]))
+1270        d_extracted = {}
+1271        g_extracted = {}
+1272        for name in new_sample_names:
+1273            d_extracted[name] = []
+1274            ens_length = len(new_idl_d[name])
+1275            for i_dat, dat in enumerate(data):
+1276                d_extracted[name].append(np.array([_expand_deltas_for_merge(o.deltas.get(name, np.zeros(ens_length)), o.idl.get(name, new_idl_d[name]), o.shape.get(name, ens_length), new_idl_d[name]) for o in dat.reshape(np.prod(dat.shape))]).reshape(dat.shape + (ens_length, )))
+1277        for name in new_cov_names:
+1278            g_extracted[name] = []
+1279            zero_grad = _Zero_grad(new_covobs_lengths[name])
+1280            for i_dat, dat in enumerate(data):
+1281                g_extracted[name].append(np.array([o.covobs.get(name, zero_grad).grad for o in dat.reshape(np.prod(dat.shape))]).reshape(dat.shape + (new_covobs_lengths[name], 1)))
+1282
+1283    for i_val, new_val in np.ndenumerate(new_values):
+1284        new_deltas = {}
+1285        new_grad = {}
+1286        if array_mode is True:
+1287            for name in new_sample_names:
+1288                ens_length = d_extracted[name][0].shape[-1]
+1289                new_deltas[name] = np.zeros(ens_length)
+1290                for i_dat, dat in enumerate(d_extracted[name]):
+1291                    new_deltas[name] += np.tensordot(deriv[i_val + (i_dat, )], dat)
+1292            for name in new_cov_names:
+1293                new_grad[name] = 0
+1294                for i_dat, dat in enumerate(g_extracted[name]):
+1295                    new_grad[name] += np.tensordot(deriv[i_val + (i_dat, )], dat)
+1296        else:
+1297            for j_obs, obs in np.ndenumerate(data):
+1298                for name in obs.names:
+1299                    if name in obs.cov_names:
+1300                        new_grad[name] = new_grad.get(name, 0) + deriv[i_val + j_obs] * obs.covobs[name].grad
+1301                    else:
+1302                        new_deltas[name] = new_deltas.get(name, 0) + deriv[i_val + j_obs] * _expand_deltas_for_merge(obs.deltas[name], obs.idl[name], obs.shape[name], new_idl_d[name])
 1303
-1304                new_samples.append(filtered_deltas)
-1305                new_idl.append(filtered_idl_d)
-1306                new_means.append(new_r_values[name][i_val])
-1307                new_names_obs.append(name)
-1308        final_result[i_val] = Obs(new_samples, new_names_obs, means=new_means, idl=new_idl)
-1309        for name in new_covobs:
-1310            final_result[i_val].names.append(name)
-1311        final_result[i_val]._covobs = new_covobs
-1312        final_result[i_val]._value = new_val
-1313        final_result[i_val].is_merged = is_merged
-1314        final_result[i_val].reweighted = reweighted
-1315
-1316    if multi == 0:
-1317        final_result = final_result.item()
-1318
-1319    return final_result
+1304        new_covobs = {name: Covobs(0, allcov[name], name, grad=new_grad[name]) for name in new_grad}
+1305
+1306        if not set(new_covobs.keys()).isdisjoint(new_deltas.keys()):
+1307            raise Exception('The same name has been used for deltas and covobs!')
+1308        new_samples = []
+1309        new_means = []
+1310        new_idl = []
+1311        new_names_obs = []
+1312        for name in new_names:
+1313            if name not in new_covobs:
+1314                if is_merged[name]:
+1315                    filtered_deltas, filtered_idl_d = _filter_zeroes(new_deltas[name], new_idl_d[name])
+1316                else:
+1317                    filtered_deltas = new_deltas[name]
+1318                    filtered_idl_d = new_idl_d[name]
+1319
+1320                new_samples.append(filtered_deltas)
+1321                new_idl.append(filtered_idl_d)
+1322                new_means.append(new_r_values[name][i_val])
+1323                new_names_obs.append(name)
+1324        final_result[i_val] = Obs(new_samples, new_names_obs, means=new_means, idl=new_idl)
+1325        for name in new_covobs:
+1326            final_result[i_val].names.append(name)
+1327        final_result[i_val]._covobs = new_covobs
+1328        final_result[i_val]._value = new_val
+1329        final_result[i_val].is_merged = is_merged
+1330        final_result[i_val].reweighted = reweighted
+1331
+1332    if multi == 0:
+1333        final_result = final_result.item()
+1334
+1335    return final_result
 
@@ -4418,47 +4450,47 @@ functions. For the ratio of two observables one can e.g. use

-
1359def reweight(weight, obs, **kwargs):
-1360    """Reweight a list of observables.
-1361
-1362    Parameters
-1363    ----------
-1364    weight : Obs
-1365        Reweighting factor. An Observable that has to be defined on a superset of the
-1366        configurations in obs[i].idl for all i.
-1367    obs : list
-1368        list of Obs, e.g. [obs1, obs2, obs3].
-1369    all_configs : bool
-1370        if True, the reweighted observables are normalized by the average of
-1371        the reweighting factor on all configurations in weight.idl and not
-1372        on the configurations in obs[i].idl. Default False.
-1373    """
-1374    result = []
-1375    for i in range(len(obs)):
-1376        if len(obs[i].cov_names):
-1377            raise Exception('Error: Not possible to reweight an Obs that contains covobs!')
-1378        if not set(obs[i].names).issubset(weight.names):
-1379            raise Exception('Error: Ensembles do not fit')
-1380        for name in obs[i].names:
-1381            if not set(obs[i].idl[name]).issubset(weight.idl[name]):
-1382                raise Exception('obs[%d] has to be defined on a subset of the configs in weight.idl[%s]!' % (i, name))
-1383        new_samples = []
-1384        w_deltas = {}
-1385        for name in sorted(obs[i].names):
-1386            w_deltas[name] = _reduce_deltas(weight.deltas[name], weight.idl[name], obs[i].idl[name])
-1387            new_samples.append((w_deltas[name] + weight.r_values[name]) * (obs[i].deltas[name] + obs[i].r_values[name]))
-1388        tmp_obs = Obs(new_samples, sorted(obs[i].names), idl=[obs[i].idl[name] for name in sorted(obs[i].names)])
-1389
-1390        if kwargs.get('all_configs'):
-1391            new_weight = weight
-1392        else:
-1393            new_weight = Obs([w_deltas[name] + weight.r_values[name] for name in sorted(obs[i].names)], sorted(obs[i].names), idl=[obs[i].idl[name] for name in sorted(obs[i].names)])
-1394
-1395        result.append(tmp_obs / new_weight)
-1396        result[-1].reweighted = True
-1397        result[-1].is_merged = obs[i].is_merged
-1398
-1399    return result
+            
1375def reweight(weight, obs, **kwargs):
+1376    """Reweight a list of observables.
+1377
+1378    Parameters
+1379    ----------
+1380    weight : Obs
+1381        Reweighting factor. An Observable that has to be defined on a superset of the
+1382        configurations in obs[i].idl for all i.
+1383    obs : list
+1384        list of Obs, e.g. [obs1, obs2, obs3].
+1385    all_configs : bool
+1386        if True, the reweighted observables are normalized by the average of
+1387        the reweighting factor on all configurations in weight.idl and not
+1388        on the configurations in obs[i].idl. Default False.
+1389    """
+1390    result = []
+1391    for i in range(len(obs)):
+1392        if len(obs[i].cov_names):
+1393            raise Exception('Error: Not possible to reweight an Obs that contains covobs!')
+1394        if not set(obs[i].names).issubset(weight.names):
+1395            raise Exception('Error: Ensembles do not fit')
+1396        for name in obs[i].names:
+1397            if not set(obs[i].idl[name]).issubset(weight.idl[name]):
+1398                raise Exception('obs[%d] has to be defined on a subset of the configs in weight.idl[%s]!' % (i, name))
+1399        new_samples = []
+1400        w_deltas = {}
+1401        for name in sorted(obs[i].names):
+1402            w_deltas[name] = _reduce_deltas(weight.deltas[name], weight.idl[name], obs[i].idl[name])
+1403            new_samples.append((w_deltas[name] + weight.r_values[name]) * (obs[i].deltas[name] + obs[i].r_values[name]))
+1404        tmp_obs = Obs(new_samples, sorted(obs[i].names), idl=[obs[i].idl[name] for name in sorted(obs[i].names)])
+1405
+1406        if kwargs.get('all_configs'):
+1407            new_weight = weight
+1408        else:
+1409            new_weight = Obs([w_deltas[name] + weight.r_values[name] for name in sorted(obs[i].names)], sorted(obs[i].names), idl=[obs[i].idl[name] for name in sorted(obs[i].names)])
+1410
+1411        result.append(tmp_obs / new_weight)
+1412        result[-1].reweighted = True
+1413        result[-1].is_merged = obs[i].is_merged
+1414
+1415    return result
 
@@ -4492,48 +4524,48 @@ on the configurations in obs[i].idl. Default False.
-
1402def correlate(obs_a, obs_b):
-1403    """Correlate two observables.
-1404
-1405    Parameters
-1406    ----------
-1407    obs_a : Obs
-1408        First observable
-1409    obs_b : Obs
-1410        Second observable
-1411
-1412    Notes
-1413    -----
-1414    Keep in mind to only correlate primary observables which have not been reweighted
-1415    yet. The reweighting has to be applied after correlating the observables.
-1416    Currently only works if ensembles are identical (this is not strictly necessary).
-1417    """
-1418
-1419    if sorted(obs_a.names) != sorted(obs_b.names):
-1420        raise Exception('Ensembles do not fit')
-1421    if len(obs_a.cov_names) or len(obs_b.cov_names):
-1422        raise Exception('Error: Not possible to correlate Obs that contain covobs!')
-1423    for name in obs_a.names:
-1424        if obs_a.shape[name] != obs_b.shape[name]:
-1425            raise Exception('Shapes of ensemble', name, 'do not fit')
-1426        if obs_a.idl[name] != obs_b.idl[name]:
-1427            raise Exception('idl of ensemble', name, 'do not fit')
-1428
-1429    if obs_a.reweighted is True:
-1430        warnings.warn("The first observable is already reweighted.", RuntimeWarning)
-1431    if obs_b.reweighted is True:
-1432        warnings.warn("The second observable is already reweighted.", RuntimeWarning)
-1433
-1434    new_samples = []
-1435    new_idl = []
-1436    for name in sorted(obs_a.names):
-1437        new_samples.append((obs_a.deltas[name] + obs_a.r_values[name]) * (obs_b.deltas[name] + obs_b.r_values[name]))
-1438        new_idl.append(obs_a.idl[name])
-1439
-1440    o = Obs(new_samples, sorted(obs_a.names), idl=new_idl)
-1441    o.is_merged = {name: (obs_a.is_merged.get(name, False) or obs_b.is_merged.get(name, False)) for name in o.names}
-1442    o.reweighted = obs_a.reweighted or obs_b.reweighted
-1443    return o
+            
1418def correlate(obs_a, obs_b):
+1419    """Correlate two observables.
+1420
+1421    Parameters
+1422    ----------
+1423    obs_a : Obs
+1424        First observable
+1425    obs_b : Obs
+1426        Second observable
+1427
+1428    Notes
+1429    -----
+1430    Keep in mind to only correlate primary observables which have not been reweighted
+1431    yet. The reweighting has to be applied after correlating the observables.
+1432    Currently only works if ensembles are identical (this is not strictly necessary).
+1433    """
+1434
+1435    if sorted(obs_a.names) != sorted(obs_b.names):
+1436        raise Exception('Ensembles do not fit')
+1437    if len(obs_a.cov_names) or len(obs_b.cov_names):
+1438        raise Exception('Error: Not possible to correlate Obs that contain covobs!')
+1439    for name in obs_a.names:
+1440        if obs_a.shape[name] != obs_b.shape[name]:
+1441            raise Exception('Shapes of ensemble', name, 'do not fit')
+1442        if obs_a.idl[name] != obs_b.idl[name]:
+1443            raise Exception('idl of ensemble', name, 'do not fit')
+1444
+1445    if obs_a.reweighted is True:
+1446        warnings.warn("The first observable is already reweighted.", RuntimeWarning)
+1447    if obs_b.reweighted is True:
+1448        warnings.warn("The second observable is already reweighted.", RuntimeWarning)
+1449
+1450    new_samples = []
+1451    new_idl = []
+1452    for name in sorted(obs_a.names):
+1453        new_samples.append((obs_a.deltas[name] + obs_a.r_values[name]) * (obs_b.deltas[name] + obs_b.r_values[name]))
+1454        new_idl.append(obs_a.idl[name])
+1455
+1456    o = Obs(new_samples, sorted(obs_a.names), idl=new_idl)
+1457    o.is_merged = {name: (obs_a.is_merged.get(name, False) or obs_b.is_merged.get(name, False)) for name in o.names}
+1458    o.reweighted = obs_a.reweighted or obs_b.reweighted
+1459    return o
 
@@ -4568,71 +4600,71 @@ Currently only works if ensembles are identical (this is not strictly necessary)
-
1446def covariance(obs, visualize=False, correlation=False, smooth=None, **kwargs):
-1447    r'''Calculates the error covariance matrix of a set of observables.
-1448
-1449    The gamma method has to be applied first to all observables.
-1450
-1451    Parameters
-1452    ----------
-1453    obs : list or numpy.ndarray
-1454        List or one dimensional array of Obs
-1455    visualize : bool
-1456        If True plots the corresponding normalized correlation matrix (default False).
-1457    correlation : bool
-1458        If True the correlation matrix instead of the error covariance matrix is returned (default False).
-1459    smooth : None or int
-1460        If smooth is an integer 'E' between 2 and the dimension of the matrix minus 1 the eigenvalue
-1461        smoothing procedure of hep-lat/9412087 is applied to the correlation matrix which leaves the
-1462        largest E eigenvalues essentially unchanged and smoothes the smaller eigenvalues to avoid extremely
-1463        small ones.
+            
1462def covariance(obs, visualize=False, correlation=False, smooth=None, **kwargs):
+1463    r'''Calculates the error covariance matrix of a set of observables.
 1464
-1465    Notes
-1466    -----
-1467    The error covariance is defined such that it agrees with the squared standard error for two identical observables
-1468    $$\operatorname{cov}(a,a)=\sum_{s=1}^N\delta_a^s\delta_a^s/N^2=\Gamma_{aa}(0)/N=\operatorname{var}(a)/N=\sigma_a^2$$
-1469    in the absence of autocorrelation.
-1470    The error covariance is estimated by calculating the correlation matrix assuming no autocorrelation and then rescaling the correlation matrix by the full errors including the previous gamma method estimate for the autocorrelation of the observables. The covariance at windowsize 0 is guaranteed to be positive semi-definite
-1471    $$\sum_{i,j}v_i\Gamma_{ij}(0)v_j=\frac{1}{N}\sum_{s=1}^N\sum_{i,j}v_i\delta_i^s\delta_j^s v_j=\frac{1}{N}\sum_{s=1}^N\sum_{i}|v_i\delta_i^s|^2\geq 0\,,$$ for every $v\in\mathbb{R}^M$, while such an identity does not hold for larger windows/lags.
-1472    For observables defined on a single ensemble our approximation is equivalent to assuming that the integrated autocorrelation time of an off-diagonal element is equal to the geometric mean of the integrated autocorrelation times of the corresponding diagonal elements.
-1473    $$\tau_{\mathrm{int}, ij}=\sqrt{\tau_{\mathrm{int}, i}\times \tau_{\mathrm{int}, j}}$$
-1474    This construction ensures that the estimated covariance matrix is positive semi-definite (up to numerical rounding errors).
-1475    '''
-1476
-1477    length = len(obs)
-1478
-1479    max_samples = np.max([o.N for o in obs])
-1480    if max_samples <= length and not [item for sublist in [o.cov_names for o in obs] for item in sublist]:
-1481        warnings.warn(f"The dimension of the covariance matrix ({length}) is larger or equal to the number of samples ({max_samples}). This will result in a rank deficient matrix.", RuntimeWarning)
-1482
-1483    cov = np.zeros((length, length))
-1484    for i in range(length):
-1485        for j in range(i, length):
-1486            cov[i, j] = _covariance_element(obs[i], obs[j])
-1487    cov = cov + cov.T - np.diag(np.diag(cov))
-1488
-1489    corr = np.diag(1 / np.sqrt(np.diag(cov))) @ cov @ np.diag(1 / np.sqrt(np.diag(cov)))
-1490
-1491    if isinstance(smooth, int):
-1492        corr = _smooth_eigenvalues(corr, smooth)
-1493
-1494    if visualize:
-1495        plt.matshow(corr, vmin=-1, vmax=1)
-1496        plt.set_cmap('RdBu')
-1497        plt.colorbar()
-1498        plt.draw()
-1499
-1500    if correlation is True:
-1501        return corr
-1502
-1503    errors = [o.dvalue for o in obs]
-1504    cov = np.diag(errors) @ corr @ np.diag(errors)
-1505
-1506    eigenvalues = np.linalg.eigh(cov)[0]
-1507    if not np.all(eigenvalues >= 0):
-1508        warnings.warn("Covariance matrix is not positive semi-definite (Eigenvalues: " + str(eigenvalues) + ")", RuntimeWarning)
+1465    The gamma method has to be applied first to all observables.
+1466
+1467    Parameters
+1468    ----------
+1469    obs : list or numpy.ndarray
+1470        List or one dimensional array of Obs
+1471    visualize : bool
+1472        If True plots the corresponding normalized correlation matrix (default False).
+1473    correlation : bool
+1474        If True the correlation matrix instead of the error covariance matrix is returned (default False).
+1475    smooth : None or int
+1476        If smooth is an integer 'E' between 2 and the dimension of the matrix minus 1 the eigenvalue
+1477        smoothing procedure of hep-lat/9412087 is applied to the correlation matrix which leaves the
+1478        largest E eigenvalues essentially unchanged and smoothes the smaller eigenvalues to avoid extremely
+1479        small ones.
+1480
+1481    Notes
+1482    -----
+1483    The error covariance is defined such that it agrees with the squared standard error for two identical observables
+1484    $$\operatorname{cov}(a,a)=\sum_{s=1}^N\delta_a^s\delta_a^s/N^2=\Gamma_{aa}(0)/N=\operatorname{var}(a)/N=\sigma_a^2$$
+1485    in the absence of autocorrelation.
+1486    The error covariance is estimated by calculating the correlation matrix assuming no autocorrelation and then rescaling the correlation matrix by the full errors including the previous gamma method estimate for the autocorrelation of the observables. The covariance at windowsize 0 is guaranteed to be positive semi-definite
+1487    $$\sum_{i,j}v_i\Gamma_{ij}(0)v_j=\frac{1}{N}\sum_{s=1}^N\sum_{i,j}v_i\delta_i^s\delta_j^s v_j=\frac{1}{N}\sum_{s=1}^N\sum_{i}|v_i\delta_i^s|^2\geq 0\,,$$ for every $v\in\mathbb{R}^M$, while such an identity does not hold for larger windows/lags.
+1488    For observables defined on a single ensemble our approximation is equivalent to assuming that the integrated autocorrelation time of an off-diagonal element is equal to the geometric mean of the integrated autocorrelation times of the corresponding diagonal elements.
+1489    $$\tau_{\mathrm{int}, ij}=\sqrt{\tau_{\mathrm{int}, i}\times \tau_{\mathrm{int}, j}}$$
+1490    This construction ensures that the estimated covariance matrix is positive semi-definite (up to numerical rounding errors).
+1491    '''
+1492
+1493    length = len(obs)
+1494
+1495    max_samples = np.max([o.N for o in obs])
+1496    if max_samples <= length and not [item for sublist in [o.cov_names for o in obs] for item in sublist]:
+1497        warnings.warn(f"The dimension of the covariance matrix ({length}) is larger or equal to the number of samples ({max_samples}). This will result in a rank deficient matrix.", RuntimeWarning)
+1498
+1499    cov = np.zeros((length, length))
+1500    for i in range(length):
+1501        for j in range(i, length):
+1502            cov[i, j] = _covariance_element(obs[i], obs[j])
+1503    cov = cov + cov.T - np.diag(np.diag(cov))
+1504
+1505    corr = np.diag(1 / np.sqrt(np.diag(cov))) @ cov @ np.diag(1 / np.sqrt(np.diag(cov)))
+1506
+1507    if isinstance(smooth, int):
+1508        corr = _smooth_eigenvalues(corr, smooth)
 1509
-1510    return cov
+1510    if visualize:
+1511        plt.matshow(corr, vmin=-1, vmax=1)
+1512        plt.set_cmap('RdBu')
+1513        plt.colorbar()
+1514        plt.draw()
+1515
+1516    if correlation is True:
+1517        return corr
+1518
+1519    errors = [o.dvalue for o in obs]
+1520    cov = np.diag(errors) @ corr @ np.diag(errors)
+1521
+1522    eigenvalues = np.linalg.eigh(cov)[0]
+1523    if not np.all(eigenvalues >= 0):
+1524        warnings.warn("Covariance matrix is not positive semi-definite (Eigenvalues: " + str(eigenvalues) + ")", RuntimeWarning)
+1525
+1526    return cov
 
@@ -4681,24 +4713,24 @@ This construction ensures that the estimated covariance matrix is positive semi-
-
1590def import_jackknife(jacks, name, idl=None):
-1591    """Imports jackknife samples and returns an Obs
-1592
-1593    Parameters
-1594    ----------
-1595    jacks : numpy.ndarray
-1596        numpy array containing the mean value as zeroth entry and
-1597        the N jackknife samples as first to Nth entry.
-1598    name : str
-1599        name of the ensemble the samples are defined on.
-1600    """
-1601    length = len(jacks) - 1
-1602    prj = (np.ones((length, length)) - (length - 1) * np.identity(length))
-1603    samples = jacks[1:] @ prj
-1604    mean = np.mean(samples)
-1605    new_obs = Obs([samples - mean], [name], idl=idl, means=[mean])
-1606    new_obs._value = jacks[0]
-1607    return new_obs
+            
1606def import_jackknife(jacks, name, idl=None):
+1607    """Imports jackknife samples and returns an Obs
+1608
+1609    Parameters
+1610    ----------
+1611    jacks : numpy.ndarray
+1612        numpy array containing the mean value as zeroth entry and
+1613        the N jackknife samples as first to Nth entry.
+1614    name : str
+1615        name of the ensemble the samples are defined on.
+1616    """
+1617    length = len(jacks) - 1
+1618    prj = (np.ones((length, length)) - (length - 1) * np.identity(length))
+1619    samples = jacks[1:] @ prj
+1620    mean = np.mean(samples)
+1621    new_obs = Obs([samples - mean], [name], idl=idl, means=[mean])
+1622    new_obs._value = jacks[0]
+1623    return new_obs
 
@@ -4728,35 +4760,35 @@ name of the ensemble the samples are defined on.
-
1610def merge_obs(list_of_obs):
-1611    """Combine all observables in list_of_obs into one new observable
-1612
-1613    Parameters
-1614    ----------
-1615    list_of_obs : list
-1616        list of the Obs object to be combined
-1617
-1618    Notes
-1619    -----
-1620    It is not possible to combine obs which are based on the same replicum
-1621    """
-1622    replist = [item for obs in list_of_obs for item in obs.names]
-1623    if (len(replist) == len(set(replist))) is False:
-1624        raise Exception('list_of_obs contains duplicate replica: %s' % (str(replist)))
-1625    if any([len(o.cov_names) for o in list_of_obs]):
-1626        raise Exception('Not possible to merge data that contains covobs!')
-1627    new_dict = {}
-1628    idl_dict = {}
-1629    for o in list_of_obs:
-1630        new_dict.update({key: o.deltas.get(key, 0) + o.r_values.get(key, 0)
-1631                        for key in set(o.deltas) | set(o.r_values)})
-1632        idl_dict.update({key: o.idl.get(key, 0) for key in set(o.deltas)})
+            
1626def merge_obs(list_of_obs):
+1627    """Combine all observables in list_of_obs into one new observable
+1628
+1629    Parameters
+1630    ----------
+1631    list_of_obs : list
+1632        list of the Obs object to be combined
 1633
-1634    names = sorted(new_dict.keys())
-1635    o = Obs([new_dict[name] for name in names], names, idl=[idl_dict[name] for name in names])
-1636    o.is_merged = {name: np.any([oi.is_merged.get(name, False) for oi in list_of_obs]) for name in o.names}
-1637    o.reweighted = np.max([oi.reweighted for oi in list_of_obs])
-1638    return o
+1634    Notes
+1635    -----
+1636    It is not possible to combine obs which are based on the same replicum
+1637    """
+1638    replist = [item for obs in list_of_obs for item in obs.names]
+1639    if (len(replist) == len(set(replist))) is False:
+1640        raise Exception('list_of_obs contains duplicate replica: %s' % (str(replist)))
+1641    if any([len(o.cov_names) for o in list_of_obs]):
+1642        raise Exception('Not possible to merge data that contains covobs!')
+1643    new_dict = {}
+1644    idl_dict = {}
+1645    for o in list_of_obs:
+1646        new_dict.update({key: o.deltas.get(key, 0) + o.r_values.get(key, 0)
+1647                        for key in set(o.deltas) | set(o.r_values)})
+1648        idl_dict.update({key: o.idl.get(key, 0) for key in set(o.deltas)})
+1649
+1650    names = sorted(new_dict.keys())
+1651    o = Obs([new_dict[name] for name in names], names, idl=[idl_dict[name] for name in names])
+1652    o.is_merged = {name: np.any([oi.is_merged.get(name, False) for oi in list_of_obs]) for name in o.names}
+1653    o.reweighted = np.max([oi.reweighted for oi in list_of_obs])
+1654    return o
 
@@ -4787,47 +4819,47 @@ list of the Obs object to be combined
-
1641def cov_Obs(means, cov, name, grad=None):
-1642    """Create an Obs based on mean(s) and a covariance matrix
-1643
-1644    Parameters
-1645    ----------
-1646    mean : list of floats or float
-1647        N mean value(s) of the new Obs
-1648    cov : list or array
-1649        2d (NxN) Covariance matrix, 1d diagonal entries or 0d covariance
-1650    name : str
-1651        identifier for the covariance matrix
-1652    grad : list or array
-1653        Gradient of the Covobs wrt. the means belonging to cov.
-1654    """
-1655
-1656    def covobs_to_obs(co):
-1657        """Make an Obs out of a Covobs
-1658
-1659        Parameters
-1660        ----------
-1661        co : Covobs
-1662            Covobs to be embedded into the Obs
-1663        """
-1664        o = Obs([], [], means=[])
-1665        o._value = co.value
-1666        o.names.append(co.name)
-1667        o._covobs[co.name] = co
-1668        o._dvalue = np.sqrt(co.errsq())
-1669        return o
-1670
-1671    ol = []
-1672    if isinstance(means, (float, int)):
-1673        means = [means]
+            
1657def cov_Obs(means, cov, name, grad=None):
+1658    """Create an Obs based on mean(s) and a covariance matrix
+1659
+1660    Parameters
+1661    ----------
+1662    mean : list of floats or float
+1663        N mean value(s) of the new Obs
+1664    cov : list or array
+1665        2d (NxN) Covariance matrix, 1d diagonal entries or 0d covariance
+1666    name : str
+1667        identifier for the covariance matrix
+1668    grad : list or array
+1669        Gradient of the Covobs wrt. the means belonging to cov.
+1670    """
+1671
+1672    def covobs_to_obs(co):
+1673        """Make an Obs out of a Covobs
 1674
-1675    for i in range(len(means)):
-1676        ol.append(covobs_to_obs(Covobs(means[i], cov, name, pos=i, grad=grad)))
-1677    if ol[0].covobs[name].N != len(means):
-1678        raise Exception('You have to provide %d mean values!' % (ol[0].N))
-1679    if len(ol) == 1:
-1680        return ol[0]
-1681    return ol
+1675        Parameters
+1676        ----------
+1677        co : Covobs
+1678            Covobs to be embedded into the Obs
+1679        """
+1680        o = Obs([], [], means=[])
+1681        o._value = co.value
+1682        o.names.append(co.name)
+1683        o._covobs[co.name] = co
+1684        o._dvalue = np.sqrt(co.errsq())
+1685        return o
+1686
+1687    ol = []
+1688    if isinstance(means, (float, int)):
+1689        means = [means]
+1690
+1691    for i in range(len(means)):
+1692        ol.append(covobs_to_obs(Covobs(means[i], cov, name, pos=i, grad=grad)))
+1693    if ol[0].covobs[name].N != len(means):
+1694        raise Exception('You have to provide %d mean values!' % (ol[0].N))
+1695    if len(ol) == 1:
+1696        return ol[0]
+1697    return ol