From 2a0b58c388b97e07638458d6538342762b5472cd Mon Sep 17 00:00:00 2001 From: fjosw Date: Thu, 20 Apr 2023 16:35:11 +0000 Subject: [PATCH] Documentation updated --- docs/pyerrors/obs.html | 3212 ++++++++++++++++++++-------------------- 1 file changed, 1617 insertions(+), 1595 deletions(-) diff --git a/docs/pyerrors/obs.html b/docs/pyerrors/obs.html index 08587452..1d790a77 100644 --- a/docs/pyerrors/obs.html +++ b/docs/pyerrors/obs.html @@ -902,948 +902,961 @@ 695 def __str__(self): 696 return _format_uncertainty(self.value, self._dvalue) 697 - 698 def __hash__(self): - 699 hash_tuple = (np.array([self.value]).astype(np.float32).data.tobytes(),) - 700 hash_tuple += tuple([o.astype(np.float32).data.tobytes() for o in self.deltas.values()]) - 701 hash_tuple += tuple([np.array([o.errsq()]).astype(np.float32).data.tobytes() for o in self.covobs.values()]) - 702 hash_tuple += tuple([o.encode() for o in self.names]) - 703 m = hashlib.md5() - 704 [m.update(o) for o in hash_tuple] - 705 return int(m.hexdigest(), 16) & 0xFFFFFFFF + 698 def __format__(self, format_type): + 699 my_str = _format_uncertainty(self.value, self._dvalue, + 700 significance=int(float(format_type.replace("+", "").replace("-", "")))) + 701 for char in ["+", " "]: + 702 if format_type.startswith(char): + 703 if my_str[0] != "-": + 704 my_str = char + my_str + 705 return my_str 706 - 707 # Overload comparisons - 708 def __lt__(self, other): - 709 return self.value < other - 710 - 711 def __le__(self, other): - 712 return self.value <= other - 713 - 714 def __gt__(self, other): - 715 return self.value > other - 716 - 717 def __ge__(self, other): - 718 return self.value >= other + 707 def __hash__(self): + 708 hash_tuple = (np.array([self.value]).astype(np.float32).data.tobytes(),) + 709 hash_tuple += tuple([o.astype(np.float32).data.tobytes() for o in self.deltas.values()]) + 710 hash_tuple += tuple([np.array([o.errsq()]).astype(np.float32).data.tobytes() for o in self.covobs.values()]) + 711 hash_tuple += tuple([o.encode() for o in self.names]) + 712 m = hashlib.md5() + 713 [m.update(o) for o in hash_tuple] + 714 return int(m.hexdigest(), 16) & 0xFFFFFFFF + 715 + 716 # Overload comparisons + 717 def __lt__(self, other): + 718 return self.value < other 719 - 720 def __eq__(self, other): - 721 return (self - other).is_zero() + 720 def __le__(self, other): + 721 return self.value <= other 722 - 723 def __ne__(self, other): - 724 return not (self - other).is_zero() + 723 def __gt__(self, other): + 724 return self.value > other 725 - 726 # Overload math operations - 727 def __add__(self, y): - 728 if isinstance(y, Obs): - 729 return derived_observable(lambda x, **kwargs: x[0] + x[1], [self, y], man_grad=[1, 1]) - 730 else: - 731 if isinstance(y, np.ndarray): - 732 return np.array([self + o for o in y]) - 733 elif y.__class__.__name__ in ['Corr', 'CObs']: - 734 return NotImplemented - 735 else: - 736 return derived_observable(lambda x, **kwargs: x[0] + y, [self], man_grad=[1]) - 737 - 738 def __radd__(self, y): - 739 return self + y - 740 - 741 def __mul__(self, y): - 742 if isinstance(y, Obs): - 743 return derived_observable(lambda x, **kwargs: x[0] * x[1], [self, y], man_grad=[y.value, self.value]) - 744 else: - 745 if isinstance(y, np.ndarray): - 746 return np.array([self * o for o in y]) - 747 elif isinstance(y, complex): - 748 return CObs(self * y.real, self * y.imag) - 749 elif y.__class__.__name__ in ['Corr', 'CObs']: - 750 return NotImplemented - 751 else: - 752 return derived_observable(lambda x, **kwargs: x[0] * y, [self], man_grad=[y]) - 753 - 754 def __rmul__(self, y): - 755 return self * y - 756 - 757 def __sub__(self, y): - 758 if isinstance(y, Obs): - 759 return derived_observable(lambda x, **kwargs: x[0] - x[1], [self, y], man_grad=[1, -1]) - 760 else: - 761 if isinstance(y, np.ndarray): - 762 return np.array([self - o for o in y]) - 763 elif y.__class__.__name__ in ['Corr', 'CObs']: - 764 return NotImplemented - 765 else: - 766 return derived_observable(lambda x, **kwargs: x[0] - y, [self], man_grad=[1]) - 767 - 768 def __rsub__(self, y): - 769 return -1 * (self - y) - 770 - 771 def __pos__(self): - 772 return self - 773 - 774 def __neg__(self): - 775 return -1 * self + 726 def __ge__(self, other): + 727 return self.value >= other + 728 + 729 def __eq__(self, other): + 730 return (self - other).is_zero() + 731 + 732 def __ne__(self, other): + 733 return not (self - other).is_zero() + 734 + 735 # Overload math operations + 736 def __add__(self, y): + 737 if isinstance(y, Obs): + 738 return derived_observable(lambda x, **kwargs: x[0] + x[1], [self, y], man_grad=[1, 1]) + 739 else: + 740 if isinstance(y, np.ndarray): + 741 return np.array([self + o for o in y]) + 742 elif y.__class__.__name__ in ['Corr', 'CObs']: + 743 return NotImplemented + 744 else: + 745 return derived_observable(lambda x, **kwargs: x[0] + y, [self], man_grad=[1]) + 746 + 747 def __radd__(self, y): + 748 return self + y + 749 + 750 def __mul__(self, y): + 751 if isinstance(y, Obs): + 752 return derived_observable(lambda x, **kwargs: x[0] * x[1], [self, y], man_grad=[y.value, self.value]) + 753 else: + 754 if isinstance(y, np.ndarray): + 755 return np.array([self * o for o in y]) + 756 elif isinstance(y, complex): + 757 return CObs(self * y.real, self * y.imag) + 758 elif y.__class__.__name__ in ['Corr', 'CObs']: + 759 return NotImplemented + 760 else: + 761 return derived_observable(lambda x, **kwargs: x[0] * y, [self], man_grad=[y]) + 762 + 763 def __rmul__(self, y): + 764 return self * y + 765 + 766 def __sub__(self, y): + 767 if isinstance(y, Obs): + 768 return derived_observable(lambda x, **kwargs: x[0] - x[1], [self, y], man_grad=[1, -1]) + 769 else: + 770 if isinstance(y, np.ndarray): + 771 return np.array([self - o for o in y]) + 772 elif y.__class__.__name__ in ['Corr', 'CObs']: + 773 return NotImplemented + 774 else: + 775 return derived_observable(lambda x, **kwargs: x[0] - y, [self], man_grad=[1]) 776 - 777 def __truediv__(self, y): - 778 if isinstance(y, Obs): - 779 return derived_observable(lambda x, **kwargs: x[0] / x[1], [self, y], man_grad=[1 / y.value, - self.value / y.value ** 2]) - 780 else: - 781 if isinstance(y, np.ndarray): - 782 return np.array([self / o for o in y]) - 783 elif y.__class__.__name__ in ['Corr', 'CObs']: - 784 return NotImplemented - 785 else: - 786 return derived_observable(lambda x, **kwargs: x[0] / y, [self], man_grad=[1 / y]) - 787 - 788 def __rtruediv__(self, y): - 789 if isinstance(y, Obs): - 790 return derived_observable(lambda x, **kwargs: x[0] / x[1], [y, self], man_grad=[1 / self.value, - y.value / self.value ** 2]) - 791 else: - 792 if isinstance(y, np.ndarray): - 793 return np.array([o / self for o in y]) - 794 elif y.__class__.__name__ in ['Corr', 'CObs']: - 795 return NotImplemented - 796 else: - 797 return derived_observable(lambda x, **kwargs: y / x[0], [self], man_grad=[-y / self.value ** 2]) - 798 - 799 def __pow__(self, y): - 800 if isinstance(y, Obs): - 801 return derived_observable(lambda x: x[0] ** x[1], [self, y]) - 802 else: - 803 return derived_observable(lambda x: x[0] ** y, [self]) - 804 - 805 def __rpow__(self, y): - 806 if isinstance(y, Obs): - 807 return derived_observable(lambda x: x[0] ** x[1], [y, self]) - 808 else: - 809 return derived_observable(lambda x: y ** x[0], [self]) - 810 - 811 def __abs__(self): - 812 return derived_observable(lambda x: anp.abs(x[0]), [self]) + 777 def __rsub__(self, y): + 778 return -1 * (self - y) + 779 + 780 def __pos__(self): + 781 return self + 782 + 783 def __neg__(self): + 784 return -1 * self + 785 + 786 def __truediv__(self, y): + 787 if isinstance(y, Obs): + 788 return derived_observable(lambda x, **kwargs: x[0] / x[1], [self, y], man_grad=[1 / y.value, - self.value / y.value ** 2]) + 789 else: + 790 if isinstance(y, np.ndarray): + 791 return np.array([self / o for o in y]) + 792 elif y.__class__.__name__ in ['Corr', 'CObs']: + 793 return NotImplemented + 794 else: + 795 return derived_observable(lambda x, **kwargs: x[0] / y, [self], man_grad=[1 / y]) + 796 + 797 def __rtruediv__(self, y): + 798 if isinstance(y, Obs): + 799 return derived_observable(lambda x, **kwargs: x[0] / x[1], [y, self], man_grad=[1 / self.value, - y.value / self.value ** 2]) + 800 else: + 801 if isinstance(y, np.ndarray): + 802 return np.array([o / self for o in y]) + 803 elif y.__class__.__name__ in ['Corr', 'CObs']: + 804 return NotImplemented + 805 else: + 806 return derived_observable(lambda x, **kwargs: y / x[0], [self], man_grad=[-y / self.value ** 2]) + 807 + 808 def __pow__(self, y): + 809 if isinstance(y, Obs): + 810 return derived_observable(lambda x: x[0] ** x[1], [self, y]) + 811 else: + 812 return derived_observable(lambda x: x[0] ** y, [self]) 813 - 814 # Overload numpy functions - 815 def sqrt(self): - 816 return derived_observable(lambda x, **kwargs: np.sqrt(x[0]), [self], man_grad=[1 / 2 / np.sqrt(self.value)]) - 817 - 818 def log(self): - 819 return derived_observable(lambda x, **kwargs: np.log(x[0]), [self], man_grad=[1 / self.value]) - 820 - 821 def exp(self): - 822 return derived_observable(lambda x, **kwargs: np.exp(x[0]), [self], man_grad=[np.exp(self.value)]) - 823 - 824 def sin(self): - 825 return derived_observable(lambda x, **kwargs: np.sin(x[0]), [self], man_grad=[np.cos(self.value)]) + 814 def __rpow__(self, y): + 815 if isinstance(y, Obs): + 816 return derived_observable(lambda x: x[0] ** x[1], [y, self]) + 817 else: + 818 return derived_observable(lambda x: y ** x[0], [self]) + 819 + 820 def __abs__(self): + 821 return derived_observable(lambda x: anp.abs(x[0]), [self]) + 822 + 823 # Overload numpy functions + 824 def sqrt(self): + 825 return derived_observable(lambda x, **kwargs: np.sqrt(x[0]), [self], man_grad=[1 / 2 / np.sqrt(self.value)]) 826 - 827 def cos(self): - 828 return derived_observable(lambda x, **kwargs: np.cos(x[0]), [self], man_grad=[-np.sin(self.value)]) + 827 def log(self): + 828 return derived_observable(lambda x, **kwargs: np.log(x[0]), [self], man_grad=[1 / self.value]) 829 - 830 def tan(self): - 831 return derived_observable(lambda x, **kwargs: np.tan(x[0]), [self], man_grad=[1 / np.cos(self.value) ** 2]) + 830 def exp(self): + 831 return derived_observable(lambda x, **kwargs: np.exp(x[0]), [self], man_grad=[np.exp(self.value)]) 832 - 833 def arcsin(self): - 834 return derived_observable(lambda x: anp.arcsin(x[0]), [self]) + 833 def sin(self): + 834 return derived_observable(lambda x, **kwargs: np.sin(x[0]), [self], man_grad=[np.cos(self.value)]) 835 - 836 def arccos(self): - 837 return derived_observable(lambda x: anp.arccos(x[0]), [self]) + 836 def cos(self): + 837 return derived_observable(lambda x, **kwargs: np.cos(x[0]), [self], man_grad=[-np.sin(self.value)]) 838 - 839 def arctan(self): - 840 return derived_observable(lambda x: anp.arctan(x[0]), [self]) + 839 def tan(self): + 840 return derived_observable(lambda x, **kwargs: np.tan(x[0]), [self], man_grad=[1 / np.cos(self.value) ** 2]) 841 - 842 def sinh(self): - 843 return derived_observable(lambda x, **kwargs: np.sinh(x[0]), [self], man_grad=[np.cosh(self.value)]) + 842 def arcsin(self): + 843 return derived_observable(lambda x: anp.arcsin(x[0]), [self]) 844 - 845 def cosh(self): - 846 return derived_observable(lambda x, **kwargs: np.cosh(x[0]), [self], man_grad=[np.sinh(self.value)]) + 845 def arccos(self): + 846 return derived_observable(lambda x: anp.arccos(x[0]), [self]) 847 - 848 def tanh(self): - 849 return derived_observable(lambda x, **kwargs: np.tanh(x[0]), [self], man_grad=[1 / np.cosh(self.value) ** 2]) + 848 def arctan(self): + 849 return derived_observable(lambda x: anp.arctan(x[0]), [self]) 850 - 851 def arcsinh(self): - 852 return derived_observable(lambda x: anp.arcsinh(x[0]), [self]) + 851 def sinh(self): + 852 return derived_observable(lambda x, **kwargs: np.sinh(x[0]), [self], man_grad=[np.cosh(self.value)]) 853 - 854 def arccosh(self): - 855 return derived_observable(lambda x: anp.arccosh(x[0]), [self]) + 854 def cosh(self): + 855 return derived_observable(lambda x, **kwargs: np.cosh(x[0]), [self], man_grad=[np.sinh(self.value)]) 856 - 857 def arctanh(self): - 858 return derived_observable(lambda x: anp.arctanh(x[0]), [self]) + 857 def tanh(self): + 858 return derived_observable(lambda x, **kwargs: np.tanh(x[0]), [self], man_grad=[1 / np.cosh(self.value) ** 2]) 859 - 860 - 861class CObs: - 862 """Class for a complex valued observable.""" - 863 __slots__ = ['_real', '_imag', 'tag'] - 864 - 865 def __init__(self, real, imag=0.0): - 866 self._real = real - 867 self._imag = imag - 868 self.tag = None + 860 def arcsinh(self): + 861 return derived_observable(lambda x: anp.arcsinh(x[0]), [self]) + 862 + 863 def arccosh(self): + 864 return derived_observable(lambda x: anp.arccosh(x[0]), [self]) + 865 + 866 def arctanh(self): + 867 return derived_observable(lambda x: anp.arctanh(x[0]), [self]) + 868 869 - 870 @property - 871 def real(self): - 872 return self._real + 870class CObs: + 871 """Class for a complex valued observable.""" + 872 __slots__ = ['_real', '_imag', 'tag'] 873 - 874 @property - 875 def imag(self): - 876 return self._imag - 877 - 878 def gamma_method(self, **kwargs): - 879 """Executes the gamma_method for the real and the imaginary part.""" - 880 if isinstance(self.real, Obs): - 881 self.real.gamma_method(**kwargs) - 882 if isinstance(self.imag, Obs): - 883 self.imag.gamma_method(**kwargs) - 884 - 885 def is_zero(self): - 886 """Checks whether both real and imaginary part are zero within machine precision.""" - 887 return self.real == 0.0 and self.imag == 0.0 - 888 - 889 def conjugate(self): - 890 return CObs(self.real, -self.imag) - 891 - 892 def __add__(self, other): - 893 if isinstance(other, np.ndarray): - 894 return other + self - 895 elif hasattr(other, 'real') and hasattr(other, 'imag'): - 896 return CObs(self.real + other.real, - 897 self.imag + other.imag) - 898 else: - 899 return CObs(self.real + other, self.imag) + 874 def __init__(self, real, imag=0.0): + 875 self._real = real + 876 self._imag = imag + 877 self.tag = None + 878 + 879 @property + 880 def real(self): + 881 return self._real + 882 + 883 @property + 884 def imag(self): + 885 return self._imag + 886 + 887 def gamma_method(self, **kwargs): + 888 """Executes the gamma_method for the real and the imaginary part.""" + 889 if isinstance(self.real, Obs): + 890 self.real.gamma_method(**kwargs) + 891 if isinstance(self.imag, Obs): + 892 self.imag.gamma_method(**kwargs) + 893 + 894 def is_zero(self): + 895 """Checks whether both real and imaginary part are zero within machine precision.""" + 896 return self.real == 0.0 and self.imag == 0.0 + 897 + 898 def conjugate(self): + 899 return CObs(self.real, -self.imag) 900 - 901 def __radd__(self, y): - 902 return self + y - 903 - 904 def __sub__(self, other): - 905 if isinstance(other, np.ndarray): - 906 return -1 * (other - self) - 907 elif hasattr(other, 'real') and hasattr(other, 'imag'): - 908 return CObs(self.real - other.real, self.imag - other.imag) - 909 else: - 910 return CObs(self.real - other, self.imag) - 911 - 912 def __rsub__(self, other): - 913 return -1 * (self - other) - 914 - 915 def __mul__(self, other): - 916 if isinstance(other, np.ndarray): - 917 return other * self - 918 elif hasattr(other, 'real') and hasattr(other, 'imag'): - 919 if all(isinstance(i, Obs) for i in [self.real, self.imag, other.real, other.imag]): - 920 return CObs(derived_observable(lambda x, **kwargs: x[0] * x[1] - x[2] * x[3], - 921 [self.real, other.real, self.imag, other.imag], - 922 man_grad=[other.real.value, self.real.value, -other.imag.value, -self.imag.value]), - 923 derived_observable(lambda x, **kwargs: x[2] * x[1] + x[0] * x[3], - 924 [self.real, other.real, self.imag, other.imag], - 925 man_grad=[other.imag.value, self.imag.value, other.real.value, self.real.value])) - 926 elif getattr(other, 'imag', 0) != 0: - 927 return CObs(self.real * other.real - self.imag * other.imag, - 928 self.imag * other.real + self.real * other.imag) - 929 else: - 930 return CObs(self.real * other.real, self.imag * other.real) - 931 else: - 932 return CObs(self.real * other, self.imag * other) - 933 - 934 def __rmul__(self, other): - 935 return self * other - 936 - 937 def __truediv__(self, other): - 938 if isinstance(other, np.ndarray): - 939 return 1 / (other / self) - 940 elif hasattr(other, 'real') and hasattr(other, 'imag'): - 941 r = other.real ** 2 + other.imag ** 2 - 942 return CObs((self.real * other.real + self.imag * other.imag) / r, (self.imag * other.real - self.real * other.imag) / r) - 943 else: - 944 return CObs(self.real / other, self.imag / other) + 901 def __add__(self, other): + 902 if isinstance(other, np.ndarray): + 903 return other + self + 904 elif hasattr(other, 'real') and hasattr(other, 'imag'): + 905 return CObs(self.real + other.real, + 906 self.imag + other.imag) + 907 else: + 908 return CObs(self.real + other, self.imag) + 909 + 910 def __radd__(self, y): + 911 return self + y + 912 + 913 def __sub__(self, other): + 914 if isinstance(other, np.ndarray): + 915 return -1 * (other - self) + 916 elif hasattr(other, 'real') and hasattr(other, 'imag'): + 917 return CObs(self.real - other.real, self.imag - other.imag) + 918 else: + 919 return CObs(self.real - other, self.imag) + 920 + 921 def __rsub__(self, other): + 922 return -1 * (self - other) + 923 + 924 def __mul__(self, other): + 925 if isinstance(other, np.ndarray): + 926 return other * self + 927 elif hasattr(other, 'real') and hasattr(other, 'imag'): + 928 if all(isinstance(i, Obs) for i in [self.real, self.imag, other.real, other.imag]): + 929 return CObs(derived_observable(lambda x, **kwargs: x[0] * x[1] - x[2] * x[3], + 930 [self.real, other.real, self.imag, other.imag], + 931 man_grad=[other.real.value, self.real.value, -other.imag.value, -self.imag.value]), + 932 derived_observable(lambda x, **kwargs: x[2] * x[1] + x[0] * x[3], + 933 [self.real, other.real, self.imag, other.imag], + 934 man_grad=[other.imag.value, self.imag.value, other.real.value, self.real.value])) + 935 elif getattr(other, 'imag', 0) != 0: + 936 return CObs(self.real * other.real - self.imag * other.imag, + 937 self.imag * other.real + self.real * other.imag) + 938 else: + 939 return CObs(self.real * other.real, self.imag * other.real) + 940 else: + 941 return CObs(self.real * other, self.imag * other) + 942 + 943 def __rmul__(self, other): + 944 return self * other 945 - 946 def __rtruediv__(self, other): - 947 r = self.real ** 2 + self.imag ** 2 - 948 if hasattr(other, 'real') and hasattr(other, 'imag'): - 949 return CObs((self.real * other.real + self.imag * other.imag) / r, (self.real * other.imag - self.imag * other.real) / r) - 950 else: - 951 return CObs(self.real * other / r, -self.imag * other / r) - 952 - 953 def __abs__(self): - 954 return np.sqrt(self.real**2 + self.imag**2) - 955 - 956 def __pos__(self): - 957 return self - 958 - 959 def __neg__(self): - 960 return -1 * self + 946 def __truediv__(self, other): + 947 if isinstance(other, np.ndarray): + 948 return 1 / (other / self) + 949 elif hasattr(other, 'real') and hasattr(other, 'imag'): + 950 r = other.real ** 2 + other.imag ** 2 + 951 return CObs((self.real * other.real + self.imag * other.imag) / r, (self.imag * other.real - self.real * other.imag) / r) + 952 else: + 953 return CObs(self.real / other, self.imag / other) + 954 + 955 def __rtruediv__(self, other): + 956 r = self.real ** 2 + self.imag ** 2 + 957 if hasattr(other, 'real') and hasattr(other, 'imag'): + 958 return CObs((self.real * other.real + self.imag * other.imag) / r, (self.real * other.imag - self.imag * other.real) / r) + 959 else: + 960 return CObs(self.real * other / r, -self.imag * other / r) 961 - 962 def __eq__(self, other): - 963 return self.real == other.real and self.imag == other.imag + 962 def __abs__(self): + 963 return np.sqrt(self.real**2 + self.imag**2) 964 - 965 def __str__(self): - 966 return '(' + str(self.real) + int(self.imag >= 0.0) * '+' + str(self.imag) + 'j)' + 965 def __pos__(self): + 966 return self 967 - 968 def __repr__(self): - 969 return 'CObs[' + str(self) + ']' + 968 def __neg__(self): + 969 return -1 * self 970 - 971 - 972def _format_uncertainty(value, dvalue): - 973 """Creates a string of a value and its error in paranthesis notation, e.g., 13.02(45)""" - 974 if dvalue == 0.0: - 975 return str(value) - 976 fexp = np.floor(np.log10(dvalue)) - 977 if fexp < 0.0: - 978 return '{:{form}}({:2.0f})'.format(value, dvalue * 10 ** (-fexp + 1), form='.' + str(-int(fexp) + 1) + 'f') - 979 elif fexp == 0.0: - 980 return '{:.1f}({:1.1f})'.format(value, dvalue) - 981 else: - 982 return '{:.0f}({:2.0f})'.format(value, dvalue) - 983 - 984 - 985def _expand_deltas(deltas, idx, shape, gapsize): - 986 """Expand deltas defined on idx to a regular range with spacing gapsize between two - 987 configurations and where holes are filled by 0. - 988 If idx is of type range, the deltas are not changed if the idx.step == gapsize. - 989 - 990 Parameters - 991 ---------- - 992 deltas : list - 993 List of fluctuations - 994 idx : list - 995 List or range of configs on which the deltas are defined, has to be sorted in ascending order. - 996 shape : int - 997 Number of configs in idx. - 998 gapsize : int - 999 The target distance between two configurations. If longer distances -1000 are found in idx, the data is expanded. -1001 """ -1002 if isinstance(idx, range): -1003 if (idx.step == gapsize): -1004 return deltas -1005 ret = np.zeros((idx[-1] - idx[0] + gapsize) // gapsize) -1006 for i in range(shape): -1007 ret[(idx[i] - idx[0]) // gapsize] = deltas[i] -1008 return ret -1009 -1010 -1011def _merge_idx(idl): -1012 """Returns the union of all lists in idl as sorted list -1013 -1014 Parameters -1015 ---------- -1016 idl : list -1017 List of lists or ranges. -1018 """ -1019 -1020 # Use groupby to efficiently check whether all elements of idl are identical -1021 try: -1022 g = groupby(idl) -1023 if next(g, True) and not next(g, False): -1024 return idl[0] -1025 except Exception: -1026 pass -1027 -1028 if np.all([type(idx) is range for idx in idl]): -1029 if len(set([idx[0] for idx in idl])) == 1: -1030 idstart = min([idx.start for idx in idl]) -1031 idstop = max([idx.stop for idx in idl]) -1032 idstep = min([idx.step for idx in idl]) -1033 return range(idstart, idstop, idstep) -1034 -1035 return sorted(set().union(*idl)) -1036 -1037 -1038def _intersection_idx(idl): -1039 """Returns the intersection of all lists in idl as sorted list + 971 def __eq__(self, other): + 972 return self.real == other.real and self.imag == other.imag + 973 + 974 def __str__(self): + 975 return '(' + str(self.real) + int(self.imag >= 0.0) * '+' + str(self.imag) + 'j)' + 976 + 977 def __repr__(self): + 978 return 'CObs[' + str(self) + ']' + 979 + 980 + 981def _format_uncertainty(value, dvalue, significance=2): + 982 """Creates a string of a value and its error in paranthesis notation, e.g., 13.02(45)""" + 983 if dvalue == 0.0: + 984 return str(value) + 985 if not isinstance(significance, int): + 986 raise TypeError("significance needs to be an integer.") + 987 if significance < 1: + 988 raise ValueError("significance needs to be larger than zero.") + 989 fexp = np.floor(np.log10(dvalue)) + 990 if fexp < 0.0: + 991 return '{:{form}}({:1.0f})'.format(value, dvalue * 10 ** (-fexp + significance - 1), form='.' + str(-int(fexp) + significance - 1) + 'f') + 992 elif fexp == 0.0: + 993 return f"{value:.{significance - 1}f}({dvalue:1.{significance - 1}f})" + 994 else: + 995 return f"{value:.{max(0, int(significance - fexp - 1))}f}({dvalue:2.{max(0, int(significance - fexp - 1))}f})" + 996 + 997 + 998def _expand_deltas(deltas, idx, shape, gapsize): + 999 """Expand deltas defined on idx to a regular range with spacing gapsize between two +1000 configurations and where holes are filled by 0. +1001 If idx is of type range, the deltas are not changed if the idx.step == gapsize. +1002 +1003 Parameters +1004 ---------- +1005 deltas : list +1006 List of fluctuations +1007 idx : list +1008 List or range of configs on which the deltas are defined, has to be sorted in ascending order. +1009 shape : int +1010 Number of configs in idx. +1011 gapsize : int +1012 The target distance between two configurations. If longer distances +1013 are found in idx, the data is expanded. +1014 """ +1015 if isinstance(idx, range): +1016 if (idx.step == gapsize): +1017 return deltas +1018 ret = np.zeros((idx[-1] - idx[0] + gapsize) // gapsize) +1019 for i in range(shape): +1020 ret[(idx[i] - idx[0]) // gapsize] = deltas[i] +1021 return ret +1022 +1023 +1024def _merge_idx(idl): +1025 """Returns the union of all lists in idl as sorted list +1026 +1027 Parameters +1028 ---------- +1029 idl : list +1030 List of lists or ranges. +1031 """ +1032 +1033 # Use groupby to efficiently check whether all elements of idl are identical +1034 try: +1035 g = groupby(idl) +1036 if next(g, True) and not next(g, False): +1037 return idl[0] +1038 except Exception: +1039 pass 1040 -1041 Parameters -1042 ---------- -1043 idl : list -1044 List of lists or ranges. -1045 """ -1046 -1047 def _lcm(*args): -1048 """Returns the lowest common multiple of args. +1041 if np.all([type(idx) is range for idx in idl]): +1042 if len(set([idx[0] for idx in idl])) == 1: +1043 idstart = min([idx.start for idx in idl]) +1044 idstop = max([idx.stop for idx in idl]) +1045 idstep = min([idx.step for idx in idl]) +1046 return range(idstart, idstop, idstep) +1047 +1048 return sorted(set().union(*idl)) 1049 -1050 From python 3.9 onwards the math library contains an lcm function.""" -1051 return reduce(lambda a, b: a * b // gcd(a, b), args) -1052 -1053 # Use groupby to efficiently check whether all elements of idl are identical -1054 try: -1055 g = groupby(idl) -1056 if next(g, True) and not next(g, False): -1057 return idl[0] -1058 except Exception: -1059 pass -1060 -1061 if np.all([type(idx) is range for idx in idl]): -1062 if len(set([idx[0] for idx in idl])) == 1: -1063 idstart = max([idx.start for idx in idl]) -1064 idstop = min([idx.stop for idx in idl]) -1065 idstep = _lcm(*[idx.step for idx in idl]) -1066 return range(idstart, idstop, idstep) -1067 -1068 return sorted(set.intersection(*[set(o) for o in idl])) -1069 -1070 -1071def _expand_deltas_for_merge(deltas, idx, shape, new_idx): -1072 """Expand deltas defined on idx to the list of configs that is defined by new_idx. -1073 New, empty entries are filled by 0. If idx and new_idx are of type range, the smallest -1074 common divisor of the step sizes is used as new step size. -1075 -1076 Parameters -1077 ---------- -1078 deltas : list -1079 List of fluctuations -1080 idx : list -1081 List or range of configs on which the deltas are defined. -1082 Has to be a subset of new_idx and has to be sorted in ascending order. -1083 shape : list -1084 Number of configs in idx. -1085 new_idx : list -1086 List of configs that defines the new range, has to be sorted in ascending order. -1087 """ +1050 +1051def _intersection_idx(idl): +1052 """Returns the intersection of all lists in idl as sorted list +1053 +1054 Parameters +1055 ---------- +1056 idl : list +1057 List of lists or ranges. +1058 """ +1059 +1060 def _lcm(*args): +1061 """Returns the lowest common multiple of args. +1062 +1063 From python 3.9 onwards the math library contains an lcm function.""" +1064 return reduce(lambda a, b: a * b // gcd(a, b), args) +1065 +1066 # Use groupby to efficiently check whether all elements of idl are identical +1067 try: +1068 g = groupby(idl) +1069 if next(g, True) and not next(g, False): +1070 return idl[0] +1071 except Exception: +1072 pass +1073 +1074 if np.all([type(idx) is range for idx in idl]): +1075 if len(set([idx[0] for idx in idl])) == 1: +1076 idstart = max([idx.start for idx in idl]) +1077 idstop = min([idx.stop for idx in idl]) +1078 idstep = _lcm(*[idx.step for idx in idl]) +1079 return range(idstart, idstop, idstep) +1080 +1081 return sorted(set.intersection(*[set(o) for o in idl])) +1082 +1083 +1084def _expand_deltas_for_merge(deltas, idx, shape, new_idx): +1085 """Expand deltas defined on idx to the list of configs that is defined by new_idx. +1086 New, empty entries are filled by 0. If idx and new_idx are of type range, the smallest +1087 common divisor of the step sizes is used as new step size. 1088 -1089 if type(idx) is range and type(new_idx) is range: -1090 if idx == new_idx: -1091 return deltas -1092 ret = np.zeros(new_idx[-1] - new_idx[0] + 1) -1093 for i in range(shape): -1094 ret[idx[i] - new_idx[0]] = deltas[i] -1095 return np.array([ret[new_idx[i] - new_idx[0]] for i in range(len(new_idx))]) * len(new_idx) / len(idx) -1096 -1097 -1098def derived_observable(func, data, array_mode=False, **kwargs): -1099 """Construct a derived Obs according to func(data, **kwargs) using automatic differentiation. -1100 -1101 Parameters -1102 ---------- -1103 func : object -1104 arbitrary function of the form func(data, **kwargs). For the -1105 automatic differentiation to work, all numpy functions have to have -1106 the autograd wrapper (use 'import autograd.numpy as anp'). -1107 data : list -1108 list of Obs, e.g. [obs1, obs2, obs3]. -1109 num_grad : bool -1110 if True, numerical derivatives are used instead of autograd -1111 (default False). To control the numerical differentiation the -1112 kwargs of numdifftools.step_generators.MaxStepGenerator -1113 can be used. -1114 man_grad : list -1115 manually supply a list or an array which contains the jacobian -1116 of func. Use cautiously, supplying the wrong derivative will -1117 not be intercepted. -1118 -1119 Notes -1120 ----- -1121 For simple mathematical operations it can be practical to use anonymous -1122 functions. For the ratio of two observables one can e.g. use -1123 -1124 new_obs = derived_observable(lambda x: x[0] / x[1], [obs1, obs2]) -1125 """ -1126 -1127 data = np.asarray(data) -1128 raveled_data = data.ravel() -1129 -1130 # Workaround for matrix operations containing non Obs data -1131 if not all(isinstance(x, Obs) for x in raveled_data): -1132 for i in range(len(raveled_data)): -1133 if isinstance(raveled_data[i], (int, float)): -1134 raveled_data[i] = cov_Obs(raveled_data[i], 0.0, "###dummy_covobs###") -1135 -1136 allcov = {} -1137 for o in raveled_data: -1138 for name in o.cov_names: -1139 if name in allcov: -1140 if not np.allclose(allcov[name], o.covobs[name].cov): -1141 raise Exception('Inconsistent covariance matrices for %s!' % (name)) -1142 else: -1143 allcov[name] = o.covobs[name].cov -1144 -1145 n_obs = len(raveled_data) -1146 new_names = sorted(set([y for x in [o.names for o in raveled_data] for y in x])) -1147 new_cov_names = sorted(set([y for x in [o.cov_names for o in raveled_data] for y in x])) -1148 new_sample_names = sorted(set(new_names) - set(new_cov_names)) -1149 -1150 reweighted = len(list(filter(lambda o: o.reweighted is True, raveled_data))) > 0 -1151 -1152 if data.ndim == 1: -1153 values = np.array([o.value for o in data]) -1154 else: -1155 values = np.vectorize(lambda x: x.value)(data) -1156 -1157 new_values = func(values, **kwargs) -1158 -1159 multi = int(isinstance(new_values, np.ndarray)) -1160 -1161 new_r_values = {} -1162 new_idl_d = {} -1163 for name in new_sample_names: -1164 idl = [] -1165 tmp_values = np.zeros(n_obs) -1166 for i, item in enumerate(raveled_data): -1167 tmp_values[i] = item.r_values.get(name, item.value) -1168 tmp_idl = item.idl.get(name) -1169 if tmp_idl is not None: -1170 idl.append(tmp_idl) -1171 if multi > 0: -1172 tmp_values = np.array(tmp_values).reshape(data.shape) -1173 new_r_values[name] = func(tmp_values, **kwargs) -1174 new_idl_d[name] = _merge_idx(idl) -1175 -1176 if 'man_grad' in kwargs: -1177 deriv = np.asarray(kwargs.get('man_grad')) -1178 if new_values.shape + data.shape != deriv.shape: -1179 raise Exception('Manual derivative does not have correct shape.') -1180 elif kwargs.get('num_grad') is True: -1181 if multi > 0: -1182 raise Exception('Multi mode currently not supported for numerical derivative') -1183 options = { -1184 'base_step': 0.1, -1185 'step_ratio': 2.5} -1186 for key in options.keys(): -1187 kwarg = kwargs.get(key) -1188 if kwarg is not None: -1189 options[key] = kwarg -1190 tmp_df = nd.Gradient(func, order=4, **{k: v for k, v in options.items() if v is not None})(values, **kwargs) -1191 if tmp_df.size == 1: -1192 deriv = np.array([tmp_df.real]) -1193 else: -1194 deriv = tmp_df.real -1195 else: -1196 deriv = jacobian(func)(values, **kwargs) -1197 -1198 final_result = np.zeros(new_values.shape, dtype=object) -1199 -1200 if array_mode is True: -1201 -1202 class _Zero_grad(): -1203 def __init__(self, N): -1204 self.grad = np.zeros((N, 1)) -1205 -1206 new_covobs_lengths = dict(set([y for x in [[(n, o.covobs[n].N) for n in o.cov_names] for o in raveled_data] for y in x])) -1207 d_extracted = {} -1208 g_extracted = {} -1209 for name in new_sample_names: -1210 d_extracted[name] = [] -1211 ens_length = len(new_idl_d[name]) -1212 for i_dat, dat in enumerate(data): -1213 d_extracted[name].append(np.array([_expand_deltas_for_merge(o.deltas.get(name, np.zeros(ens_length)), o.idl.get(name, new_idl_d[name]), o.shape.get(name, ens_length), new_idl_d[name]) for o in dat.reshape(np.prod(dat.shape))]).reshape(dat.shape + (ens_length, ))) -1214 for name in new_cov_names: -1215 g_extracted[name] = [] -1216 zero_grad = _Zero_grad(new_covobs_lengths[name]) -1217 for i_dat, dat in enumerate(data): -1218 g_extracted[name].append(np.array([o.covobs.get(name, zero_grad).grad for o in dat.reshape(np.prod(dat.shape))]).reshape(dat.shape + (new_covobs_lengths[name], 1))) -1219 -1220 for i_val, new_val in np.ndenumerate(new_values): -1221 new_deltas = {} -1222 new_grad = {} -1223 if array_mode is True: -1224 for name in new_sample_names: -1225 ens_length = d_extracted[name][0].shape[-1] -1226 new_deltas[name] = np.zeros(ens_length) -1227 for i_dat, dat in enumerate(d_extracted[name]): -1228 new_deltas[name] += np.tensordot(deriv[i_val + (i_dat, )], dat) -1229 for name in new_cov_names: -1230 new_grad[name] = 0 -1231 for i_dat, dat in enumerate(g_extracted[name]): -1232 new_grad[name] += np.tensordot(deriv[i_val + (i_dat, )], dat) -1233 else: -1234 for j_obs, obs in np.ndenumerate(data): -1235 for name in obs.names: -1236 if name in obs.cov_names: -1237 new_grad[name] = new_grad.get(name, 0) + deriv[i_val + j_obs] * obs.covobs[name].grad -1238 else: -1239 new_deltas[name] = new_deltas.get(name, 0) + deriv[i_val + j_obs] * _expand_deltas_for_merge(obs.deltas[name], obs.idl[name], obs.shape[name], new_idl_d[name]) -1240 -1241 new_covobs = {name: Covobs(0, allcov[name], name, grad=new_grad[name]) for name in new_grad} -1242 -1243 if not set(new_covobs.keys()).isdisjoint(new_deltas.keys()): -1244 raise Exception('The same name has been used for deltas and covobs!') -1245 new_samples = [] -1246 new_means = [] -1247 new_idl = [] -1248 new_names_obs = [] -1249 for name in new_names: -1250 if name not in new_covobs: -1251 new_samples.append(new_deltas[name]) -1252 new_idl.append(new_idl_d[name]) -1253 new_means.append(new_r_values[name][i_val]) -1254 new_names_obs.append(name) -1255 final_result[i_val] = Obs(new_samples, new_names_obs, means=new_means, idl=new_idl) -1256 for name in new_covobs: -1257 final_result[i_val].names.append(name) -1258 final_result[i_val]._covobs = new_covobs -1259 final_result[i_val]._value = new_val -1260 final_result[i_val].reweighted = reweighted -1261 -1262 if multi == 0: -1263 final_result = final_result.item() -1264 -1265 return final_result -1266 -1267 -1268def _reduce_deltas(deltas, idx_old, idx_new): -1269 """Extract deltas defined on idx_old on all configs of idx_new. -1270 -1271 Assumes, that idx_old and idx_new are correctly defined idl, i.e., they -1272 are ordered in an ascending order. -1273 -1274 Parameters -1275 ---------- -1276 deltas : list -1277 List of fluctuations -1278 idx_old : list -1279 List or range of configs on which the deltas are defined -1280 idx_new : list -1281 List of configs for which we want to extract the deltas. -1282 Has to be a subset of idx_old. -1283 """ -1284 if not len(deltas) == len(idx_old): -1285 raise Exception('Length of deltas and idx_old have to be the same: %d != %d' % (len(deltas), len(idx_old))) -1286 if type(idx_old) is range and type(idx_new) is range: -1287 if idx_old == idx_new: -1288 return deltas -1289 # Use groupby to efficiently check whether all elements of idx_old and idx_new are identical -1290 try: -1291 g = groupby([idx_old, idx_new]) -1292 if next(g, True) and not next(g, False): -1293 return deltas -1294 except Exception: -1295 pass -1296 indices = np.intersect1d(idx_old, idx_new, assume_unique=True, return_indices=True)[1] -1297 if len(indices) < len(idx_new): -1298 raise Exception('Error in _reduce_deltas: Config of idx_new not in idx_old') -1299 return np.array(deltas)[indices] -1300 -1301 -1302def reweight(weight, obs, **kwargs): -1303 """Reweight a list of observables. -1304 -1305 Parameters -1306 ---------- -1307 weight : Obs -1308 Reweighting factor. An Observable that has to be defined on a superset of the -1309 configurations in obs[i].idl for all i. -1310 obs : list -1311 list of Obs, e.g. [obs1, obs2, obs3]. -1312 all_configs : bool -1313 if True, the reweighted observables are normalized by the average of -1314 the reweighting factor on all configurations in weight.idl and not -1315 on the configurations in obs[i].idl. Default False. -1316 """ -1317 result = [] -1318 for i in range(len(obs)): -1319 if len(obs[i].cov_names): -1320 raise Exception('Error: Not possible to reweight an Obs that contains covobs!') -1321 if not set(obs[i].names).issubset(weight.names): -1322 raise Exception('Error: Ensembles do not fit') -1323 for name in obs[i].names: -1324 if not set(obs[i].idl[name]).issubset(weight.idl[name]): -1325 raise Exception('obs[%d] has to be defined on a subset of the configs in weight.idl[%s]!' % (i, name)) -1326 new_samples = [] -1327 w_deltas = {} -1328 for name in sorted(obs[i].names): -1329 w_deltas[name] = _reduce_deltas(weight.deltas[name], weight.idl[name], obs[i].idl[name]) -1330 new_samples.append((w_deltas[name] + weight.r_values[name]) * (obs[i].deltas[name] + obs[i].r_values[name])) -1331 tmp_obs = Obs(new_samples, sorted(obs[i].names), idl=[obs[i].idl[name] for name in sorted(obs[i].names)]) -1332 -1333 if kwargs.get('all_configs'): -1334 new_weight = weight -1335 else: -1336 new_weight = Obs([w_deltas[name] + weight.r_values[name] for name in sorted(obs[i].names)], sorted(obs[i].names), idl=[obs[i].idl[name] for name in sorted(obs[i].names)]) -1337 -1338 result.append(tmp_obs / new_weight) -1339 result[-1].reweighted = True -1340 -1341 return result -1342 -1343 -1344def correlate(obs_a, obs_b): -1345 """Correlate two observables. -1346 -1347 Parameters -1348 ---------- -1349 obs_a : Obs -1350 First observable -1351 obs_b : Obs -1352 Second observable +1089 Parameters +1090 ---------- +1091 deltas : list +1092 List of fluctuations +1093 idx : list +1094 List or range of configs on which the deltas are defined. +1095 Has to be a subset of new_idx and has to be sorted in ascending order. +1096 shape : list +1097 Number of configs in idx. +1098 new_idx : list +1099 List of configs that defines the new range, has to be sorted in ascending order. +1100 """ +1101 +1102 if type(idx) is range and type(new_idx) is range: +1103 if idx == new_idx: +1104 return deltas +1105 ret = np.zeros(new_idx[-1] - new_idx[0] + 1) +1106 for i in range(shape): +1107 ret[idx[i] - new_idx[0]] = deltas[i] +1108 return np.array([ret[new_idx[i] - new_idx[0]] for i in range(len(new_idx))]) * len(new_idx) / len(idx) +1109 +1110 +1111def derived_observable(func, data, array_mode=False, **kwargs): +1112 """Construct a derived Obs according to func(data, **kwargs) using automatic differentiation. +1113 +1114 Parameters +1115 ---------- +1116 func : object +1117 arbitrary function of the form func(data, **kwargs). For the +1118 automatic differentiation to work, all numpy functions have to have +1119 the autograd wrapper (use 'import autograd.numpy as anp'). +1120 data : list +1121 list of Obs, e.g. [obs1, obs2, obs3]. +1122 num_grad : bool +1123 if True, numerical derivatives are used instead of autograd +1124 (default False). To control the numerical differentiation the +1125 kwargs of numdifftools.step_generators.MaxStepGenerator +1126 can be used. +1127 man_grad : list +1128 manually supply a list or an array which contains the jacobian +1129 of func. Use cautiously, supplying the wrong derivative will +1130 not be intercepted. +1131 +1132 Notes +1133 ----- +1134 For simple mathematical operations it can be practical to use anonymous +1135 functions. For the ratio of two observables one can e.g. use +1136 +1137 new_obs = derived_observable(lambda x: x[0] / x[1], [obs1, obs2]) +1138 """ +1139 +1140 data = np.asarray(data) +1141 raveled_data = data.ravel() +1142 +1143 # Workaround for matrix operations containing non Obs data +1144 if not all(isinstance(x, Obs) for x in raveled_data): +1145 for i in range(len(raveled_data)): +1146 if isinstance(raveled_data[i], (int, float)): +1147 raveled_data[i] = cov_Obs(raveled_data[i], 0.0, "###dummy_covobs###") +1148 +1149 allcov = {} +1150 for o in raveled_data: +1151 for name in o.cov_names: +1152 if name in allcov: +1153 if not np.allclose(allcov[name], o.covobs[name].cov): +1154 raise Exception('Inconsistent covariance matrices for %s!' % (name)) +1155 else: +1156 allcov[name] = o.covobs[name].cov +1157 +1158 n_obs = len(raveled_data) +1159 new_names = sorted(set([y for x in [o.names for o in raveled_data] for y in x])) +1160 new_cov_names = sorted(set([y for x in [o.cov_names for o in raveled_data] for y in x])) +1161 new_sample_names = sorted(set(new_names) - set(new_cov_names)) +1162 +1163 reweighted = len(list(filter(lambda o: o.reweighted is True, raveled_data))) > 0 +1164 +1165 if data.ndim == 1: +1166 values = np.array([o.value for o in data]) +1167 else: +1168 values = np.vectorize(lambda x: x.value)(data) +1169 +1170 new_values = func(values, **kwargs) +1171 +1172 multi = int(isinstance(new_values, np.ndarray)) +1173 +1174 new_r_values = {} +1175 new_idl_d = {} +1176 for name in new_sample_names: +1177 idl = [] +1178 tmp_values = np.zeros(n_obs) +1179 for i, item in enumerate(raveled_data): +1180 tmp_values[i] = item.r_values.get(name, item.value) +1181 tmp_idl = item.idl.get(name) +1182 if tmp_idl is not None: +1183 idl.append(tmp_idl) +1184 if multi > 0: +1185 tmp_values = np.array(tmp_values).reshape(data.shape) +1186 new_r_values[name] = func(tmp_values, **kwargs) +1187 new_idl_d[name] = _merge_idx(idl) +1188 +1189 if 'man_grad' in kwargs: +1190 deriv = np.asarray(kwargs.get('man_grad')) +1191 if new_values.shape + data.shape != deriv.shape: +1192 raise Exception('Manual derivative does not have correct shape.') +1193 elif kwargs.get('num_grad') is True: +1194 if multi > 0: +1195 raise Exception('Multi mode currently not supported for numerical derivative') +1196 options = { +1197 'base_step': 0.1, +1198 'step_ratio': 2.5} +1199 for key in options.keys(): +1200 kwarg = kwargs.get(key) +1201 if kwarg is not None: +1202 options[key] = kwarg +1203 tmp_df = nd.Gradient(func, order=4, **{k: v for k, v in options.items() if v is not None})(values, **kwargs) +1204 if tmp_df.size == 1: +1205 deriv = np.array([tmp_df.real]) +1206 else: +1207 deriv = tmp_df.real +1208 else: +1209 deriv = jacobian(func)(values, **kwargs) +1210 +1211 final_result = np.zeros(new_values.shape, dtype=object) +1212 +1213 if array_mode is True: +1214 +1215 class _Zero_grad(): +1216 def __init__(self, N): +1217 self.grad = np.zeros((N, 1)) +1218 +1219 new_covobs_lengths = dict(set([y for x in [[(n, o.covobs[n].N) for n in o.cov_names] for o in raveled_data] for y in x])) +1220 d_extracted = {} +1221 g_extracted = {} +1222 for name in new_sample_names: +1223 d_extracted[name] = [] +1224 ens_length = len(new_idl_d[name]) +1225 for i_dat, dat in enumerate(data): +1226 d_extracted[name].append(np.array([_expand_deltas_for_merge(o.deltas.get(name, np.zeros(ens_length)), o.idl.get(name, new_idl_d[name]), o.shape.get(name, ens_length), new_idl_d[name]) for o in dat.reshape(np.prod(dat.shape))]).reshape(dat.shape + (ens_length, ))) +1227 for name in new_cov_names: +1228 g_extracted[name] = [] +1229 zero_grad = _Zero_grad(new_covobs_lengths[name]) +1230 for i_dat, dat in enumerate(data): +1231 g_extracted[name].append(np.array([o.covobs.get(name, zero_grad).grad for o in dat.reshape(np.prod(dat.shape))]).reshape(dat.shape + (new_covobs_lengths[name], 1))) +1232 +1233 for i_val, new_val in np.ndenumerate(new_values): +1234 new_deltas = {} +1235 new_grad = {} +1236 if array_mode is True: +1237 for name in new_sample_names: +1238 ens_length = d_extracted[name][0].shape[-1] +1239 new_deltas[name] = np.zeros(ens_length) +1240 for i_dat, dat in enumerate(d_extracted[name]): +1241 new_deltas[name] += np.tensordot(deriv[i_val + (i_dat, )], dat) +1242 for name in new_cov_names: +1243 new_grad[name] = 0 +1244 for i_dat, dat in enumerate(g_extracted[name]): +1245 new_grad[name] += np.tensordot(deriv[i_val + (i_dat, )], dat) +1246 else: +1247 for j_obs, obs in np.ndenumerate(data): +1248 for name in obs.names: +1249 if name in obs.cov_names: +1250 new_grad[name] = new_grad.get(name, 0) + deriv[i_val + j_obs] * obs.covobs[name].grad +1251 else: +1252 new_deltas[name] = new_deltas.get(name, 0) + deriv[i_val + j_obs] * _expand_deltas_for_merge(obs.deltas[name], obs.idl[name], obs.shape[name], new_idl_d[name]) +1253 +1254 new_covobs = {name: Covobs(0, allcov[name], name, grad=new_grad[name]) for name in new_grad} +1255 +1256 if not set(new_covobs.keys()).isdisjoint(new_deltas.keys()): +1257 raise Exception('The same name has been used for deltas and covobs!') +1258 new_samples = [] +1259 new_means = [] +1260 new_idl = [] +1261 new_names_obs = [] +1262 for name in new_names: +1263 if name not in new_covobs: +1264 new_samples.append(new_deltas[name]) +1265 new_idl.append(new_idl_d[name]) +1266 new_means.append(new_r_values[name][i_val]) +1267 new_names_obs.append(name) +1268 final_result[i_val] = Obs(new_samples, new_names_obs, means=new_means, idl=new_idl) +1269 for name in new_covobs: +1270 final_result[i_val].names.append(name) +1271 final_result[i_val]._covobs = new_covobs +1272 final_result[i_val]._value = new_val +1273 final_result[i_val].reweighted = reweighted +1274 +1275 if multi == 0: +1276 final_result = final_result.item() +1277 +1278 return final_result +1279 +1280 +1281def _reduce_deltas(deltas, idx_old, idx_new): +1282 """Extract deltas defined on idx_old on all configs of idx_new. +1283 +1284 Assumes, that idx_old and idx_new are correctly defined idl, i.e., they +1285 are ordered in an ascending order. +1286 +1287 Parameters +1288 ---------- +1289 deltas : list +1290 List of fluctuations +1291 idx_old : list +1292 List or range of configs on which the deltas are defined +1293 idx_new : list +1294 List of configs for which we want to extract the deltas. +1295 Has to be a subset of idx_old. +1296 """ +1297 if not len(deltas) == len(idx_old): +1298 raise Exception('Length of deltas and idx_old have to be the same: %d != %d' % (len(deltas), len(idx_old))) +1299 if type(idx_old) is range and type(idx_new) is range: +1300 if idx_old == idx_new: +1301 return deltas +1302 # Use groupby to efficiently check whether all elements of idx_old and idx_new are identical +1303 try: +1304 g = groupby([idx_old, idx_new]) +1305 if next(g, True) and not next(g, False): +1306 return deltas +1307 except Exception: +1308 pass +1309 indices = np.intersect1d(idx_old, idx_new, assume_unique=True, return_indices=True)[1] +1310 if len(indices) < len(idx_new): +1311 raise Exception('Error in _reduce_deltas: Config of idx_new not in idx_old') +1312 return np.array(deltas)[indices] +1313 +1314 +1315def reweight(weight, obs, **kwargs): +1316 """Reweight a list of observables. +1317 +1318 Parameters +1319 ---------- +1320 weight : Obs +1321 Reweighting factor. An Observable that has to be defined on a superset of the +1322 configurations in obs[i].idl for all i. +1323 obs : list +1324 list of Obs, e.g. [obs1, obs2, obs3]. +1325 all_configs : bool +1326 if True, the reweighted observables are normalized by the average of +1327 the reweighting factor on all configurations in weight.idl and not +1328 on the configurations in obs[i].idl. Default False. +1329 """ +1330 result = [] +1331 for i in range(len(obs)): +1332 if len(obs[i].cov_names): +1333 raise Exception('Error: Not possible to reweight an Obs that contains covobs!') +1334 if not set(obs[i].names).issubset(weight.names): +1335 raise Exception('Error: Ensembles do not fit') +1336 for name in obs[i].names: +1337 if not set(obs[i].idl[name]).issubset(weight.idl[name]): +1338 raise Exception('obs[%d] has to be defined on a subset of the configs in weight.idl[%s]!' % (i, name)) +1339 new_samples = [] +1340 w_deltas = {} +1341 for name in sorted(obs[i].names): +1342 w_deltas[name] = _reduce_deltas(weight.deltas[name], weight.idl[name], obs[i].idl[name]) +1343 new_samples.append((w_deltas[name] + weight.r_values[name]) * (obs[i].deltas[name] + obs[i].r_values[name])) +1344 tmp_obs = Obs(new_samples, sorted(obs[i].names), idl=[obs[i].idl[name] for name in sorted(obs[i].names)]) +1345 +1346 if kwargs.get('all_configs'): +1347 new_weight = weight +1348 else: +1349 new_weight = Obs([w_deltas[name] + weight.r_values[name] for name in sorted(obs[i].names)], sorted(obs[i].names), idl=[obs[i].idl[name] for name in sorted(obs[i].names)]) +1350 +1351 result.append(tmp_obs / new_weight) +1352 result[-1].reweighted = True 1353 -1354 Notes -1355 ----- -1356 Keep in mind to only correlate primary observables which have not been reweighted -1357 yet. The reweighting has to be applied after correlating the observables. -1358 Currently only works if ensembles are identical (this is not strictly necessary). -1359 """ -1360 -1361 if sorted(obs_a.names) != sorted(obs_b.names): -1362 raise Exception(f"Ensembles do not fit {set(sorted(obs_a.names)) ^ set(sorted(obs_b.names))}") -1363 if len(obs_a.cov_names) or len(obs_b.cov_names): -1364 raise Exception('Error: Not possible to correlate Obs that contain covobs!') -1365 for name in obs_a.names: -1366 if obs_a.shape[name] != obs_b.shape[name]: -1367 raise Exception('Shapes of ensemble', name, 'do not fit') -1368 if obs_a.idl[name] != obs_b.idl[name]: -1369 raise Exception('idl of ensemble', name, 'do not fit') -1370 -1371 if obs_a.reweighted is True: -1372 warnings.warn("The first observable is already reweighted.", RuntimeWarning) -1373 if obs_b.reweighted is True: -1374 warnings.warn("The second observable is already reweighted.", RuntimeWarning) -1375 -1376 new_samples = [] -1377 new_idl = [] -1378 for name in sorted(obs_a.names): -1379 new_samples.append((obs_a.deltas[name] + obs_a.r_values[name]) * (obs_b.deltas[name] + obs_b.r_values[name])) -1380 new_idl.append(obs_a.idl[name]) -1381 -1382 o = Obs(new_samples, sorted(obs_a.names), idl=new_idl) -1383 o.reweighted = obs_a.reweighted or obs_b.reweighted -1384 return o -1385 -1386 -1387def covariance(obs, visualize=False, correlation=False, smooth=None, **kwargs): -1388 r'''Calculates the error covariance matrix of a set of observables. -1389 -1390 WARNING: This function should be used with care, especially for observables with support on multiple -1391 ensembles with differing autocorrelations. See the notes below for details. -1392 -1393 The gamma method has to be applied first to all observables. +1354 return result +1355 +1356 +1357def correlate(obs_a, obs_b): +1358 """Correlate two observables. +1359 +1360 Parameters +1361 ---------- +1362 obs_a : Obs +1363 First observable +1364 obs_b : Obs +1365 Second observable +1366 +1367 Notes +1368 ----- +1369 Keep in mind to only correlate primary observables which have not been reweighted +1370 yet. The reweighting has to be applied after correlating the observables. +1371 Currently only works if ensembles are identical (this is not strictly necessary). +1372 """ +1373 +1374 if sorted(obs_a.names) != sorted(obs_b.names): +1375 raise Exception(f"Ensembles do not fit {set(sorted(obs_a.names)) ^ set(sorted(obs_b.names))}") +1376 if len(obs_a.cov_names) or len(obs_b.cov_names): +1377 raise Exception('Error: Not possible to correlate Obs that contain covobs!') +1378 for name in obs_a.names: +1379 if obs_a.shape[name] != obs_b.shape[name]: +1380 raise Exception('Shapes of ensemble', name, 'do not fit') +1381 if obs_a.idl[name] != obs_b.idl[name]: +1382 raise Exception('idl of ensemble', name, 'do not fit') +1383 +1384 if obs_a.reweighted is True: +1385 warnings.warn("The first observable is already reweighted.", RuntimeWarning) +1386 if obs_b.reweighted is True: +1387 warnings.warn("The second observable is already reweighted.", RuntimeWarning) +1388 +1389 new_samples = [] +1390 new_idl = [] +1391 for name in sorted(obs_a.names): +1392 new_samples.append((obs_a.deltas[name] + obs_a.r_values[name]) * (obs_b.deltas[name] + obs_b.r_values[name])) +1393 new_idl.append(obs_a.idl[name]) 1394 -1395 Parameters -1396 ---------- -1397 obs : list or numpy.ndarray -1398 List or one dimensional array of Obs -1399 visualize : bool -1400 If True plots the corresponding normalized correlation matrix (default False). -1401 correlation : bool -1402 If True the correlation matrix instead of the error covariance matrix is returned (default False). -1403 smooth : None or int -1404 If smooth is an integer 'E' between 2 and the dimension of the matrix minus 1 the eigenvalue -1405 smoothing procedure of hep-lat/9412087 is applied to the correlation matrix which leaves the -1406 largest E eigenvalues essentially unchanged and smoothes the smaller eigenvalues to avoid extremely -1407 small ones. -1408 -1409 Notes -1410 ----- -1411 The error covariance is defined such that it agrees with the squared standard error for two identical observables -1412 $$\operatorname{cov}(a,a)=\sum_{s=1}^N\delta_a^s\delta_a^s/N^2=\Gamma_{aa}(0)/N=\operatorname{var}(a)/N=\sigma_a^2$$ -1413 in the absence of autocorrelation. -1414 The error covariance is estimated by calculating the correlation matrix assuming no autocorrelation and then rescaling the correlation matrix by the full errors including the previous gamma method estimate for the autocorrelation of the observables. The covariance at windowsize 0 is guaranteed to be positive semi-definite -1415 $$\sum_{i,j}v_i\Gamma_{ij}(0)v_j=\frac{1}{N}\sum_{s=1}^N\sum_{i,j}v_i\delta_i^s\delta_j^s v_j=\frac{1}{N}\sum_{s=1}^N\sum_{i}|v_i\delta_i^s|^2\geq 0\,,$$ for every $v\in\mathbb{R}^M$, while such an identity does not hold for larger windows/lags. -1416 For observables defined on a single ensemble our approximation is equivalent to assuming that the integrated autocorrelation time of an off-diagonal element is equal to the geometric mean of the integrated autocorrelation times of the corresponding diagonal elements. -1417 $$\tau_{\mathrm{int}, ij}=\sqrt{\tau_{\mathrm{int}, i}\times \tau_{\mathrm{int}, j}}$$ -1418 This construction ensures that the estimated covariance matrix is positive semi-definite (up to numerical rounding errors). -1419 ''' -1420 -1421 length = len(obs) -1422 -1423 max_samples = np.max([o.N for o in obs]) -1424 if max_samples <= length and not [item for sublist in [o.cov_names for o in obs] for item in sublist]: -1425 warnings.warn(f"The dimension of the covariance matrix ({length}) is larger or equal to the number of samples ({max_samples}). This will result in a rank deficient matrix.", RuntimeWarning) -1426 -1427 cov = np.zeros((length, length)) -1428 for i in range(length): -1429 for j in range(i, length): -1430 cov[i, j] = _covariance_element(obs[i], obs[j]) -1431 cov = cov + cov.T - np.diag(np.diag(cov)) -1432 -1433 corr = np.diag(1 / np.sqrt(np.diag(cov))) @ cov @ np.diag(1 / np.sqrt(np.diag(cov))) -1434 -1435 if isinstance(smooth, int): -1436 corr = _smooth_eigenvalues(corr, smooth) -1437 -1438 if visualize: -1439 plt.matshow(corr, vmin=-1, vmax=1) -1440 plt.set_cmap('RdBu') -1441 plt.colorbar() -1442 plt.draw() -1443 -1444 if correlation is True: -1445 return corr -1446 -1447 errors = [o.dvalue for o in obs] -1448 cov = np.diag(errors) @ corr @ np.diag(errors) -1449 -1450 eigenvalues = np.linalg.eigh(cov)[0] -1451 if not np.all(eigenvalues >= 0): -1452 warnings.warn("Covariance matrix is not positive semi-definite (Eigenvalues: " + str(eigenvalues) + ")", RuntimeWarning) -1453 -1454 return cov -1455 +1395 o = Obs(new_samples, sorted(obs_a.names), idl=new_idl) +1396 o.reweighted = obs_a.reweighted or obs_b.reweighted +1397 return o +1398 +1399 +1400def covariance(obs, visualize=False, correlation=False, smooth=None, **kwargs): +1401 r'''Calculates the error covariance matrix of a set of observables. +1402 +1403 WARNING: This function should be used with care, especially for observables with support on multiple +1404 ensembles with differing autocorrelations. See the notes below for details. +1405 +1406 The gamma method has to be applied first to all observables. +1407 +1408 Parameters +1409 ---------- +1410 obs : list or numpy.ndarray +1411 List or one dimensional array of Obs +1412 visualize : bool +1413 If True plots the corresponding normalized correlation matrix (default False). +1414 correlation : bool +1415 If True the correlation matrix instead of the error covariance matrix is returned (default False). +1416 smooth : None or int +1417 If smooth is an integer 'E' between 2 and the dimension of the matrix minus 1 the eigenvalue +1418 smoothing procedure of hep-lat/9412087 is applied to the correlation matrix which leaves the +1419 largest E eigenvalues essentially unchanged and smoothes the smaller eigenvalues to avoid extremely +1420 small ones. +1421 +1422 Notes +1423 ----- +1424 The error covariance is defined such that it agrees with the squared standard error for two identical observables +1425 $$\operatorname{cov}(a,a)=\sum_{s=1}^N\delta_a^s\delta_a^s/N^2=\Gamma_{aa}(0)/N=\operatorname{var}(a)/N=\sigma_a^2$$ +1426 in the absence of autocorrelation. +1427 The error covariance is estimated by calculating the correlation matrix assuming no autocorrelation and then rescaling the correlation matrix by the full errors including the previous gamma method estimate for the autocorrelation of the observables. The covariance at windowsize 0 is guaranteed to be positive semi-definite +1428 $$\sum_{i,j}v_i\Gamma_{ij}(0)v_j=\frac{1}{N}\sum_{s=1}^N\sum_{i,j}v_i\delta_i^s\delta_j^s v_j=\frac{1}{N}\sum_{s=1}^N\sum_{i}|v_i\delta_i^s|^2\geq 0\,,$$ for every $v\in\mathbb{R}^M$, while such an identity does not hold for larger windows/lags. +1429 For observables defined on a single ensemble our approximation is equivalent to assuming that the integrated autocorrelation time of an off-diagonal element is equal to the geometric mean of the integrated autocorrelation times of the corresponding diagonal elements. +1430 $$\tau_{\mathrm{int}, ij}=\sqrt{\tau_{\mathrm{int}, i}\times \tau_{\mathrm{int}, j}}$$ +1431 This construction ensures that the estimated covariance matrix is positive semi-definite (up to numerical rounding errors). +1432 ''' +1433 +1434 length = len(obs) +1435 +1436 max_samples = np.max([o.N for o in obs]) +1437 if max_samples <= length and not [item for sublist in [o.cov_names for o in obs] for item in sublist]: +1438 warnings.warn(f"The dimension of the covariance matrix ({length}) is larger or equal to the number of samples ({max_samples}). This will result in a rank deficient matrix.", RuntimeWarning) +1439 +1440 cov = np.zeros((length, length)) +1441 for i in range(length): +1442 for j in range(i, length): +1443 cov[i, j] = _covariance_element(obs[i], obs[j]) +1444 cov = cov + cov.T - np.diag(np.diag(cov)) +1445 +1446 corr = np.diag(1 / np.sqrt(np.diag(cov))) @ cov @ np.diag(1 / np.sqrt(np.diag(cov))) +1447 +1448 if isinstance(smooth, int): +1449 corr = _smooth_eigenvalues(corr, smooth) +1450 +1451 if visualize: +1452 plt.matshow(corr, vmin=-1, vmax=1) +1453 plt.set_cmap('RdBu') +1454 plt.colorbar() +1455 plt.draw() 1456 -1457def _smooth_eigenvalues(corr, E): -1458 """Eigenvalue smoothing as described in hep-lat/9412087 +1457 if correlation is True: +1458 return corr 1459 -1460 corr : np.ndarray -1461 correlation matrix -1462 E : integer -1463 Number of eigenvalues to be left substantially unchanged -1464 """ -1465 if not (2 < E < corr.shape[0] - 1): -1466 raise Exception(f"'E' has to be between 2 and the dimension of the correlation matrix minus 1 ({corr.shape[0] - 1}).") -1467 vals, vec = np.linalg.eigh(corr) -1468 lambda_min = np.mean(vals[:-E]) -1469 vals[vals < lambda_min] = lambda_min -1470 vals /= np.mean(vals) -1471 return vec @ np.diag(vals) @ vec.T +1460 errors = [o.dvalue for o in obs] +1461 cov = np.diag(errors) @ corr @ np.diag(errors) +1462 +1463 eigenvalues = np.linalg.eigh(cov)[0] +1464 if not np.all(eigenvalues >= 0): +1465 warnings.warn("Covariance matrix is not positive semi-definite (Eigenvalues: " + str(eigenvalues) + ")", RuntimeWarning) +1466 +1467 return cov +1468 +1469 +1470def _smooth_eigenvalues(corr, E): +1471 """Eigenvalue smoothing as described in hep-lat/9412087 1472 -1473 -1474def _covariance_element(obs1, obs2): -1475 """Estimates the covariance of two Obs objects, neglecting autocorrelations.""" -1476 -1477 def calc_gamma(deltas1, deltas2, idx1, idx2, new_idx): -1478 deltas1 = _reduce_deltas(deltas1, idx1, new_idx) -1479 deltas2 = _reduce_deltas(deltas2, idx2, new_idx) -1480 return np.sum(deltas1 * deltas2) -1481 -1482 if set(obs1.names).isdisjoint(set(obs2.names)): -1483 return 0.0 -1484 -1485 if not hasattr(obs1, 'e_dvalue') or not hasattr(obs2, 'e_dvalue'): -1486 raise Exception('The gamma method has to be applied to both Obs first.') -1487 -1488 dvalue = 0.0 +1473 corr : np.ndarray +1474 correlation matrix +1475 E : integer +1476 Number of eigenvalues to be left substantially unchanged +1477 """ +1478 if not (2 < E < corr.shape[0] - 1): +1479 raise Exception(f"'E' has to be between 2 and the dimension of the correlation matrix minus 1 ({corr.shape[0] - 1}).") +1480 vals, vec = np.linalg.eigh(corr) +1481 lambda_min = np.mean(vals[:-E]) +1482 vals[vals < lambda_min] = lambda_min +1483 vals /= np.mean(vals) +1484 return vec @ np.diag(vals) @ vec.T +1485 +1486 +1487def _covariance_element(obs1, obs2): +1488 """Estimates the covariance of two Obs objects, neglecting autocorrelations.""" 1489 -1490 for e_name in obs1.mc_names: -1491 -1492 if e_name not in obs2.mc_names: -1493 continue +1490 def calc_gamma(deltas1, deltas2, idx1, idx2, new_idx): +1491 deltas1 = _reduce_deltas(deltas1, idx1, new_idx) +1492 deltas2 = _reduce_deltas(deltas2, idx2, new_idx) +1493 return np.sum(deltas1 * deltas2) 1494 -1495 idl_d = {} -1496 for r_name in obs1.e_content[e_name]: -1497 if r_name not in obs2.e_content[e_name]: -1498 continue -1499 idl_d[r_name] = _intersection_idx([obs1.idl[r_name], obs2.idl[r_name]]) +1495 if set(obs1.names).isdisjoint(set(obs2.names)): +1496 return 0.0 +1497 +1498 if not hasattr(obs1, 'e_dvalue') or not hasattr(obs2, 'e_dvalue'): +1499 raise Exception('The gamma method has to be applied to both Obs first.') 1500 -1501 gamma = 0.0 +1501 dvalue = 0.0 1502 -1503 for r_name in obs1.e_content[e_name]: -1504 if r_name not in obs2.e_content[e_name]: -1505 continue -1506 if len(idl_d[r_name]) == 0: -1507 continue -1508 gamma += calc_gamma(obs1.deltas[r_name], obs2.deltas[r_name], obs1.idl[r_name], obs2.idl[r_name], idl_d[r_name]) -1509 -1510 if gamma == 0.0: -1511 continue -1512 -1513 gamma_div = 0.0 -1514 for r_name in obs1.e_content[e_name]: -1515 if r_name not in obs2.e_content[e_name]: -1516 continue -1517 if len(idl_d[r_name]) == 0: +1503 for e_name in obs1.mc_names: +1504 +1505 if e_name not in obs2.mc_names: +1506 continue +1507 +1508 idl_d = {} +1509 for r_name in obs1.e_content[e_name]: +1510 if r_name not in obs2.e_content[e_name]: +1511 continue +1512 idl_d[r_name] = _intersection_idx([obs1.idl[r_name], obs2.idl[r_name]]) +1513 +1514 gamma = 0.0 +1515 +1516 for r_name in obs1.e_content[e_name]: +1517 if r_name not in obs2.e_content[e_name]: 1518 continue -1519 gamma_div += np.sqrt(calc_gamma(obs1.deltas[r_name], obs1.deltas[r_name], obs1.idl[r_name], obs1.idl[r_name], idl_d[r_name]) * calc_gamma(obs2.deltas[r_name], obs2.deltas[r_name], obs2.idl[r_name], obs2.idl[r_name], idl_d[r_name])) -1520 gamma /= gamma_div -1521 -1522 dvalue += gamma -1523 -1524 for e_name in obs1.cov_names: +1519 if len(idl_d[r_name]) == 0: +1520 continue +1521 gamma += calc_gamma(obs1.deltas[r_name], obs2.deltas[r_name], obs1.idl[r_name], obs2.idl[r_name], idl_d[r_name]) +1522 +1523 if gamma == 0.0: +1524 continue 1525 -1526 if e_name not in obs2.cov_names: -1527 continue -1528 -1529 dvalue += float(np.dot(np.transpose(obs1.covobs[e_name].grad), np.dot(obs1.covobs[e_name].cov, obs2.covobs[e_name].grad))) -1530 -1531 return dvalue -1532 -1533 -1534def import_jackknife(jacks, name, idl=None): -1535 """Imports jackknife samples and returns an Obs +1526 gamma_div = 0.0 +1527 for r_name in obs1.e_content[e_name]: +1528 if r_name not in obs2.e_content[e_name]: +1529 continue +1530 if len(idl_d[r_name]) == 0: +1531 continue +1532 gamma_div += np.sqrt(calc_gamma(obs1.deltas[r_name], obs1.deltas[r_name], obs1.idl[r_name], obs1.idl[r_name], idl_d[r_name]) * calc_gamma(obs2.deltas[r_name], obs2.deltas[r_name], obs2.idl[r_name], obs2.idl[r_name], idl_d[r_name])) +1533 gamma /= gamma_div +1534 +1535 dvalue += gamma 1536 -1537 Parameters -1538 ---------- -1539 jacks : numpy.ndarray -1540 numpy array containing the mean value as zeroth entry and -1541 the N jackknife samples as first to Nth entry. -1542 name : str -1543 name of the ensemble the samples are defined on. -1544 """ -1545 length = len(jacks) - 1 -1546 prj = (np.ones((length, length)) - (length - 1) * np.identity(length)) -1547 samples = jacks[1:] @ prj -1548 mean = np.mean(samples) -1549 new_obs = Obs([samples - mean], [name], idl=idl, means=[mean]) -1550 new_obs._value = jacks[0] -1551 return new_obs -1552 -1553 -1554def merge_obs(list_of_obs): -1555 """Combine all observables in list_of_obs into one new observable -1556 -1557 Parameters -1558 ---------- -1559 list_of_obs : list -1560 list of the Obs object to be combined -1561 -1562 Notes -1563 ----- -1564 It is not possible to combine obs which are based on the same replicum -1565 """ -1566 replist = [item for obs in list_of_obs for item in obs.names] -1567 if (len(replist) == len(set(replist))) is False: -1568 raise Exception('list_of_obs contains duplicate replica: %s' % (str(replist))) -1569 if any([len(o.cov_names) for o in list_of_obs]): -1570 raise Exception('Not possible to merge data that contains covobs!') -1571 new_dict = {} -1572 idl_dict = {} -1573 for o in list_of_obs: -1574 new_dict.update({key: o.deltas.get(key, 0) + o.r_values.get(key, 0) -1575 for key in set(o.deltas) | set(o.r_values)}) -1576 idl_dict.update({key: o.idl.get(key, 0) for key in set(o.deltas)}) -1577 -1578 names = sorted(new_dict.keys()) -1579 o = Obs([new_dict[name] for name in names], names, idl=[idl_dict[name] for name in names]) -1580 o.reweighted = np.max([oi.reweighted for oi in list_of_obs]) -1581 return o -1582 -1583 -1584def cov_Obs(means, cov, name, grad=None): -1585 """Create an Obs based on mean(s) and a covariance matrix -1586 -1587 Parameters -1588 ---------- -1589 mean : list of floats or float -1590 N mean value(s) of the new Obs -1591 cov : list or array -1592 2d (NxN) Covariance matrix, 1d diagonal entries or 0d covariance -1593 name : str -1594 identifier for the covariance matrix -1595 grad : list or array -1596 Gradient of the Covobs wrt. the means belonging to cov. -1597 """ -1598 -1599 def covobs_to_obs(co): -1600 """Make an Obs out of a Covobs -1601 -1602 Parameters -1603 ---------- -1604 co : Covobs -1605 Covobs to be embedded into the Obs -1606 """ -1607 o = Obs([], [], means=[]) -1608 o._value = co.value -1609 o.names.append(co.name) -1610 o._covobs[co.name] = co -1611 o._dvalue = np.sqrt(co.errsq()) -1612 return o -1613 -1614 ol = [] -1615 if isinstance(means, (float, int)): -1616 means = [means] -1617 -1618 for i in range(len(means)): -1619 ol.append(covobs_to_obs(Covobs(means[i], cov, name, pos=i, grad=grad))) -1620 if ol[0].covobs[name].N != len(means): -1621 raise Exception('You have to provide %d mean values!' % (ol[0].N)) -1622 if len(ol) == 1: -1623 return ol[0] -1624 return ol -1625 +1537 for e_name in obs1.cov_names: +1538 +1539 if e_name not in obs2.cov_names: +1540 continue +1541 +1542 dvalue += float(np.dot(np.transpose(obs1.covobs[e_name].grad), np.dot(obs1.covobs[e_name].cov, obs2.covobs[e_name].grad))) +1543 +1544 return dvalue +1545 +1546 +1547def import_jackknife(jacks, name, idl=None): +1548 """Imports jackknife samples and returns an Obs +1549 +1550 Parameters +1551 ---------- +1552 jacks : numpy.ndarray +1553 numpy array containing the mean value as zeroth entry and +1554 the N jackknife samples as first to Nth entry. +1555 name : str +1556 name of the ensemble the samples are defined on. +1557 """ +1558 length = len(jacks) - 1 +1559 prj = (np.ones((length, length)) - (length - 1) * np.identity(length)) +1560 samples = jacks[1:] @ prj +1561 mean = np.mean(samples) +1562 new_obs = Obs([samples - mean], [name], idl=idl, means=[mean]) +1563 new_obs._value = jacks[0] +1564 return new_obs +1565 +1566 +1567def merge_obs(list_of_obs): +1568 """Combine all observables in list_of_obs into one new observable +1569 +1570 Parameters +1571 ---------- +1572 list_of_obs : list +1573 list of the Obs object to be combined +1574 +1575 Notes +1576 ----- +1577 It is not possible to combine obs which are based on the same replicum +1578 """ +1579 replist = [item for obs in list_of_obs for item in obs.names] +1580 if (len(replist) == len(set(replist))) is False: +1581 raise Exception('list_of_obs contains duplicate replica: %s' % (str(replist))) +1582 if any([len(o.cov_names) for o in list_of_obs]): +1583 raise Exception('Not possible to merge data that contains covobs!') +1584 new_dict = {} +1585 idl_dict = {} +1586 for o in list_of_obs: +1587 new_dict.update({key: o.deltas.get(key, 0) + o.r_values.get(key, 0) +1588 for key in set(o.deltas) | set(o.r_values)}) +1589 idl_dict.update({key: o.idl.get(key, 0) for key in set(o.deltas)}) +1590 +1591 names = sorted(new_dict.keys()) +1592 o = Obs([new_dict[name] for name in names], names, idl=[idl_dict[name] for name in names]) +1593 o.reweighted = np.max([oi.reweighted for oi in list_of_obs]) +1594 return o +1595 +1596 +1597def cov_Obs(means, cov, name, grad=None): +1598 """Create an Obs based on mean(s) and a covariance matrix +1599 +1600 Parameters +1601 ---------- +1602 mean : list of floats or float +1603 N mean value(s) of the new Obs +1604 cov : list or array +1605 2d (NxN) Covariance matrix, 1d diagonal entries or 0d covariance +1606 name : str +1607 identifier for the covariance matrix +1608 grad : list or array +1609 Gradient of the Covobs wrt. the means belonging to cov. +1610 """ +1611 +1612 def covobs_to_obs(co): +1613 """Make an Obs out of a Covobs +1614 +1615 Parameters +1616 ---------- +1617 co : Covobs +1618 Covobs to be embedded into the Obs +1619 """ +1620 o = Obs([], [], means=[]) +1621 o._value = co.value +1622 o.names.append(co.name) +1623 o._covobs[co.name] = co +1624 o._dvalue = np.sqrt(co.errsq()) +1625 return o 1626 -1627def _determine_gap(o, e_content, e_name): -1628 gaps = [] -1629 for r_name in e_content[e_name]: -1630 if isinstance(o.idl[r_name], range): -1631 gaps.append(o.idl[r_name].step) -1632 else: -1633 gaps.append(np.min(np.diff(o.idl[r_name]))) -1634 -1635 gap = min(gaps) -1636 if not np.all([gi % gap == 0 for gi in gaps]): -1637 raise Exception(f"Replica for ensemble {e_name} do not have a common spacing.", gaps) +1627 ol = [] +1628 if isinstance(means, (float, int)): +1629 means = [means] +1630 +1631 for i in range(len(means)): +1632 ol.append(covobs_to_obs(Covobs(means[i], cov, name, pos=i, grad=grad))) +1633 if ol[0].covobs[name].N != len(means): +1634 raise Exception('You have to provide %d mean values!' % (ol[0].N)) +1635 if len(ol) == 1: +1636 return ol[0] +1637 return ol 1638 -1639 return gap +1639 +1640def _determine_gap(o, e_content, e_name): +1641 gaps = [] +1642 for r_name in e_content[e_name]: +1643 if isinstance(o.idl[r_name], range): +1644 gaps.append(o.idl[r_name].step) +1645 else: +1646 gaps.append(np.min(np.diff(o.idl[r_name]))) +1647 +1648 gap = min(gaps) +1649 if not np.all([gi % gap == 0 for gi in gaps]): +1650 raise Exception(f"Replica for ensemble {e_name} do not have a common spacing.", gaps) +1651 +1652 return gap @@ -2538,167 +2551,176 @@ 696 def __str__(self): 697 return _format_uncertainty(self.value, self._dvalue) 698 -699 def __hash__(self): -700 hash_tuple = (np.array([self.value]).astype(np.float32).data.tobytes(),) -701 hash_tuple += tuple([o.astype(np.float32).data.tobytes() for o in self.deltas.values()]) -702 hash_tuple += tuple([np.array([o.errsq()]).astype(np.float32).data.tobytes() for o in self.covobs.values()]) -703 hash_tuple += tuple([o.encode() for o in self.names]) -704 m = hashlib.md5() -705 [m.update(o) for o in hash_tuple] -706 return int(m.hexdigest(), 16) & 0xFFFFFFFF +699 def __format__(self, format_type): +700 my_str = _format_uncertainty(self.value, self._dvalue, +701 significance=int(float(format_type.replace("+", "").replace("-", "")))) +702 for char in ["+", " "]: +703 if format_type.startswith(char): +704 if my_str[0] != "-": +705 my_str = char + my_str +706 return my_str 707 -708 # Overload comparisons -709 def __lt__(self, other): -710 return self.value < other -711 -712 def __le__(self, other): -713 return self.value <= other -714 -715 def __gt__(self, other): -716 return self.value > other -717 -718 def __ge__(self, other): -719 return self.value >= other +708 def __hash__(self): +709 hash_tuple = (np.array([self.value]).astype(np.float32).data.tobytes(),) +710 hash_tuple += tuple([o.astype(np.float32).data.tobytes() for o in self.deltas.values()]) +711 hash_tuple += tuple([np.array([o.errsq()]).astype(np.float32).data.tobytes() for o in self.covobs.values()]) +712 hash_tuple += tuple([o.encode() for o in self.names]) +713 m = hashlib.md5() +714 [m.update(o) for o in hash_tuple] +715 return int(m.hexdigest(), 16) & 0xFFFFFFFF +716 +717 # Overload comparisons +718 def __lt__(self, other): +719 return self.value < other 720 -721 def __eq__(self, other): -722 return (self - other).is_zero() +721 def __le__(self, other): +722 return self.value <= other 723 -724 def __ne__(self, other): -725 return not (self - other).is_zero() +724 def __gt__(self, other): +725 return self.value > other 726 -727 # Overload math operations -728 def __add__(self, y): -729 if isinstance(y, Obs): -730 return derived_observable(lambda x, **kwargs: x[0] + x[1], [self, y], man_grad=[1, 1]) -731 else: -732 if isinstance(y, np.ndarray): -733 return np.array([self + o for o in y]) -734 elif y.__class__.__name__ in ['Corr', 'CObs']: -735 return NotImplemented -736 else: -737 return derived_observable(lambda x, **kwargs: x[0] + y, [self], man_grad=[1]) -738 -739 def __radd__(self, y): -740 return self + y -741 -742 def __mul__(self, y): -743 if isinstance(y, Obs): -744 return derived_observable(lambda x, **kwargs: x[0] * x[1], [self, y], man_grad=[y.value, self.value]) -745 else: -746 if isinstance(y, np.ndarray): -747 return np.array([self * o for o in y]) -748 elif isinstance(y, complex): -749 return CObs(self * y.real, self * y.imag) -750 elif y.__class__.__name__ in ['Corr', 'CObs']: -751 return NotImplemented -752 else: -753 return derived_observable(lambda x, **kwargs: x[0] * y, [self], man_grad=[y]) -754 -755 def __rmul__(self, y): -756 return self * y -757 -758 def __sub__(self, y): -759 if isinstance(y, Obs): -760 return derived_observable(lambda x, **kwargs: x[0] - x[1], [self, y], man_grad=[1, -1]) -761 else: -762 if isinstance(y, np.ndarray): -763 return np.array([self - o for o in y]) -764 elif y.__class__.__name__ in ['Corr', 'CObs']: -765 return NotImplemented -766 else: -767 return derived_observable(lambda x, **kwargs: x[0] - y, [self], man_grad=[1]) -768 -769 def __rsub__(self, y): -770 return -1 * (self - y) -771 -772 def __pos__(self): -773 return self -774 -775 def __neg__(self): -776 return -1 * self +727 def __ge__(self, other): +728 return self.value >= other +729 +730 def __eq__(self, other): +731 return (self - other).is_zero() +732 +733 def __ne__(self, other): +734 return not (self - other).is_zero() +735 +736 # Overload math operations +737 def __add__(self, y): +738 if isinstance(y, Obs): +739 return derived_observable(lambda x, **kwargs: x[0] + x[1], [self, y], man_grad=[1, 1]) +740 else: +741 if isinstance(y, np.ndarray): +742 return np.array([self + o for o in y]) +743 elif y.__class__.__name__ in ['Corr', 'CObs']: +744 return NotImplemented +745 else: +746 return derived_observable(lambda x, **kwargs: x[0] + y, [self], man_grad=[1]) +747 +748 def __radd__(self, y): +749 return self + y +750 +751 def __mul__(self, y): +752 if isinstance(y, Obs): +753 return derived_observable(lambda x, **kwargs: x[0] * x[1], [self, y], man_grad=[y.value, self.value]) +754 else: +755 if isinstance(y, np.ndarray): +756 return np.array([self * o for o in y]) +757 elif isinstance(y, complex): +758 return CObs(self * y.real, self * y.imag) +759 elif y.__class__.__name__ in ['Corr', 'CObs']: +760 return NotImplemented +761 else: +762 return derived_observable(lambda x, **kwargs: x[0] * y, [self], man_grad=[y]) +763 +764 def __rmul__(self, y): +765 return self * y +766 +767 def __sub__(self, y): +768 if isinstance(y, Obs): +769 return derived_observable(lambda x, **kwargs: x[0] - x[1], [self, y], man_grad=[1, -1]) +770 else: +771 if isinstance(y, np.ndarray): +772 return np.array([self - o for o in y]) +773 elif y.__class__.__name__ in ['Corr', 'CObs']: +774 return NotImplemented +775 else: +776 return derived_observable(lambda x, **kwargs: x[0] - y, [self], man_grad=[1]) 777 -778 def __truediv__(self, y): -779 if isinstance(y, Obs): -780 return derived_observable(lambda x, **kwargs: x[0] / x[1], [self, y], man_grad=[1 / y.value, - self.value / y.value ** 2]) -781 else: -782 if isinstance(y, np.ndarray): -783 return np.array([self / o for o in y]) -784 elif y.__class__.__name__ in ['Corr', 'CObs']: -785 return NotImplemented -786 else: -787 return derived_observable(lambda x, **kwargs: x[0] / y, [self], man_grad=[1 / y]) -788 -789 def __rtruediv__(self, y): -790 if isinstance(y, Obs): -791 return derived_observable(lambda x, **kwargs: x[0] / x[1], [y, self], man_grad=[1 / self.value, - y.value / self.value ** 2]) -792 else: -793 if isinstance(y, np.ndarray): -794 return np.array([o / self for o in y]) -795 elif y.__class__.__name__ in ['Corr', 'CObs']: -796 return NotImplemented -797 else: -798 return derived_observable(lambda x, **kwargs: y / x[0], [self], man_grad=[-y / self.value ** 2]) -799 -800 def __pow__(self, y): -801 if isinstance(y, Obs): -802 return derived_observable(lambda x: x[0] ** x[1], [self, y]) -803 else: -804 return derived_observable(lambda x: x[0] ** y, [self]) -805 -806 def __rpow__(self, y): -807 if isinstance(y, Obs): -808 return derived_observable(lambda x: x[0] ** x[1], [y, self]) -809 else: -810 return derived_observable(lambda x: y ** x[0], [self]) -811 -812 def __abs__(self): -813 return derived_observable(lambda x: anp.abs(x[0]), [self]) +778 def __rsub__(self, y): +779 return -1 * (self - y) +780 +781 def __pos__(self): +782 return self +783 +784 def __neg__(self): +785 return -1 * self +786 +787 def __truediv__(self, y): +788 if isinstance(y, Obs): +789 return derived_observable(lambda x, **kwargs: x[0] / x[1], [self, y], man_grad=[1 / y.value, - self.value / y.value ** 2]) +790 else: +791 if isinstance(y, np.ndarray): +792 return np.array([self / o for o in y]) +793 elif y.__class__.__name__ in ['Corr', 'CObs']: +794 return NotImplemented +795 else: +796 return derived_observable(lambda x, **kwargs: x[0] / y, [self], man_grad=[1 / y]) +797 +798 def __rtruediv__(self, y): +799 if isinstance(y, Obs): +800 return derived_observable(lambda x, **kwargs: x[0] / x[1], [y, self], man_grad=[1 / self.value, - y.value / self.value ** 2]) +801 else: +802 if isinstance(y, np.ndarray): +803 return np.array([o / self for o in y]) +804 elif y.__class__.__name__ in ['Corr', 'CObs']: +805 return NotImplemented +806 else: +807 return derived_observable(lambda x, **kwargs: y / x[0], [self], man_grad=[-y / self.value ** 2]) +808 +809 def __pow__(self, y): +810 if isinstance(y, Obs): +811 return derived_observable(lambda x: x[0] ** x[1], [self, y]) +812 else: +813 return derived_observable(lambda x: x[0] ** y, [self]) 814 -815 # Overload numpy functions -816 def sqrt(self): -817 return derived_observable(lambda x, **kwargs: np.sqrt(x[0]), [self], man_grad=[1 / 2 / np.sqrt(self.value)]) -818 -819 def log(self): -820 return derived_observable(lambda x, **kwargs: np.log(x[0]), [self], man_grad=[1 / self.value]) -821 -822 def exp(self): -823 return derived_observable(lambda x, **kwargs: np.exp(x[0]), [self], man_grad=[np.exp(self.value)]) -824 -825 def sin(self): -826 return derived_observable(lambda x, **kwargs: np.sin(x[0]), [self], man_grad=[np.cos(self.value)]) +815 def __rpow__(self, y): +816 if isinstance(y, Obs): +817 return derived_observable(lambda x: x[0] ** x[1], [y, self]) +818 else: +819 return derived_observable(lambda x: y ** x[0], [self]) +820 +821 def __abs__(self): +822 return derived_observable(lambda x: anp.abs(x[0]), [self]) +823 +824 # Overload numpy functions +825 def sqrt(self): +826 return derived_observable(lambda x, **kwargs: np.sqrt(x[0]), [self], man_grad=[1 / 2 / np.sqrt(self.value)]) 827 -828 def cos(self): -829 return derived_observable(lambda x, **kwargs: np.cos(x[0]), [self], man_grad=[-np.sin(self.value)]) +828 def log(self): +829 return derived_observable(lambda x, **kwargs: np.log(x[0]), [self], man_grad=[1 / self.value]) 830 -831 def tan(self): -832 return derived_observable(lambda x, **kwargs: np.tan(x[0]), [self], man_grad=[1 / np.cos(self.value) ** 2]) +831 def exp(self): +832 return derived_observable(lambda x, **kwargs: np.exp(x[0]), [self], man_grad=[np.exp(self.value)]) 833 -834 def arcsin(self): -835 return derived_observable(lambda x: anp.arcsin(x[0]), [self]) +834 def sin(self): +835 return derived_observable(lambda x, **kwargs: np.sin(x[0]), [self], man_grad=[np.cos(self.value)]) 836 -837 def arccos(self): -838 return derived_observable(lambda x: anp.arccos(x[0]), [self]) +837 def cos(self): +838 return derived_observable(lambda x, **kwargs: np.cos(x[0]), [self], man_grad=[-np.sin(self.value)]) 839 -840 def arctan(self): -841 return derived_observable(lambda x: anp.arctan(x[0]), [self]) +840 def tan(self): +841 return derived_observable(lambda x, **kwargs: np.tan(x[0]), [self], man_grad=[1 / np.cos(self.value) ** 2]) 842 -843 def sinh(self): -844 return derived_observable(lambda x, **kwargs: np.sinh(x[0]), [self], man_grad=[np.cosh(self.value)]) +843 def arcsin(self): +844 return derived_observable(lambda x: anp.arcsin(x[0]), [self]) 845 -846 def cosh(self): -847 return derived_observable(lambda x, **kwargs: np.cosh(x[0]), [self], man_grad=[np.sinh(self.value)]) +846 def arccos(self): +847 return derived_observable(lambda x: anp.arccos(x[0]), [self]) 848 -849 def tanh(self): -850 return derived_observable(lambda x, **kwargs: np.tanh(x[0]), [self], man_grad=[1 / np.cosh(self.value) ** 2]) +849 def arctan(self): +850 return derived_observable(lambda x: anp.arctan(x[0]), [self]) 851 -852 def arcsinh(self): -853 return derived_observable(lambda x: anp.arcsinh(x[0]), [self]) +852 def sinh(self): +853 return derived_observable(lambda x, **kwargs: np.sinh(x[0]), [self], man_grad=[np.cosh(self.value)]) 854 -855 def arccosh(self): -856 return derived_observable(lambda x: anp.arccosh(x[0]), [self]) +855 def cosh(self): +856 return derived_observable(lambda x, **kwargs: np.cosh(x[0]), [self], man_grad=[np.sinh(self.value)]) 857 -858 def arctanh(self): -859 return derived_observable(lambda x: anp.arctanh(x[0]), [self]) +858 def tanh(self): +859 return derived_observable(lambda x, **kwargs: np.tanh(x[0]), [self], man_grad=[1 / np.cosh(self.value) ** 2]) +860 +861 def arcsinh(self): +862 return derived_observable(lambda x: anp.arcsinh(x[0]), [self]) +863 +864 def arccosh(self): +865 return derived_observable(lambda x: anp.arccosh(x[0]), [self]) +866 +867 def arctanh(self): +868 return derived_observable(lambda x: anp.arctanh(x[0]), [self]) @@ -3856,8 +3878,8 @@ should agree with samples from a full jackknife analysis up to O(1/N). -
816    def sqrt(self):
-817        return derived_observable(lambda x, **kwargs: np.sqrt(x[0]), [self], man_grad=[1 / 2 / np.sqrt(self.value)])
+            
825    def sqrt(self):
+826        return derived_observable(lambda x, **kwargs: np.sqrt(x[0]), [self], man_grad=[1 / 2 / np.sqrt(self.value)])
 
@@ -3875,8 +3897,8 @@ should agree with samples from a full jackknife analysis up to O(1/N).
-
819    def log(self):
-820        return derived_observable(lambda x, **kwargs: np.log(x[0]), [self], man_grad=[1 / self.value])
+            
828    def log(self):
+829        return derived_observable(lambda x, **kwargs: np.log(x[0]), [self], man_grad=[1 / self.value])
 
@@ -3894,8 +3916,8 @@ should agree with samples from a full jackknife analysis up to O(1/N).
-
822    def exp(self):
-823        return derived_observable(lambda x, **kwargs: np.exp(x[0]), [self], man_grad=[np.exp(self.value)])
+            
831    def exp(self):
+832        return derived_observable(lambda x, **kwargs: np.exp(x[0]), [self], man_grad=[np.exp(self.value)])
 
@@ -3913,8 +3935,8 @@ should agree with samples from a full jackknife analysis up to O(1/N).
-
825    def sin(self):
-826        return derived_observable(lambda x, **kwargs: np.sin(x[0]), [self], man_grad=[np.cos(self.value)])
+            
834    def sin(self):
+835        return derived_observable(lambda x, **kwargs: np.sin(x[0]), [self], man_grad=[np.cos(self.value)])
 
@@ -3932,8 +3954,8 @@ should agree with samples from a full jackknife analysis up to O(1/N).
-
828    def cos(self):
-829        return derived_observable(lambda x, **kwargs: np.cos(x[0]), [self], man_grad=[-np.sin(self.value)])
+            
837    def cos(self):
+838        return derived_observable(lambda x, **kwargs: np.cos(x[0]), [self], man_grad=[-np.sin(self.value)])
 
@@ -3951,8 +3973,8 @@ should agree with samples from a full jackknife analysis up to O(1/N).
-
831    def tan(self):
-832        return derived_observable(lambda x, **kwargs: np.tan(x[0]), [self], man_grad=[1 / np.cos(self.value) ** 2])
+            
840    def tan(self):
+841        return derived_observable(lambda x, **kwargs: np.tan(x[0]), [self], man_grad=[1 / np.cos(self.value) ** 2])
 
@@ -3970,8 +3992,8 @@ should agree with samples from a full jackknife analysis up to O(1/N).
-
834    def arcsin(self):
-835        return derived_observable(lambda x: anp.arcsin(x[0]), [self])
+            
843    def arcsin(self):
+844        return derived_observable(lambda x: anp.arcsin(x[0]), [self])
 
@@ -3989,8 +4011,8 @@ should agree with samples from a full jackknife analysis up to O(1/N).
-
837    def arccos(self):
-838        return derived_observable(lambda x: anp.arccos(x[0]), [self])
+            
846    def arccos(self):
+847        return derived_observable(lambda x: anp.arccos(x[0]), [self])
 
@@ -4008,8 +4030,8 @@ should agree with samples from a full jackknife analysis up to O(1/N).
-
840    def arctan(self):
-841        return derived_observable(lambda x: anp.arctan(x[0]), [self])
+            
849    def arctan(self):
+850        return derived_observable(lambda x: anp.arctan(x[0]), [self])
 
@@ -4027,8 +4049,8 @@ should agree with samples from a full jackknife analysis up to O(1/N).
-
843    def sinh(self):
-844        return derived_observable(lambda x, **kwargs: np.sinh(x[0]), [self], man_grad=[np.cosh(self.value)])
+            
852    def sinh(self):
+853        return derived_observable(lambda x, **kwargs: np.sinh(x[0]), [self], man_grad=[np.cosh(self.value)])
 
@@ -4046,8 +4068,8 @@ should agree with samples from a full jackknife analysis up to O(1/N).
-
846    def cosh(self):
-847        return derived_observable(lambda x, **kwargs: np.cosh(x[0]), [self], man_grad=[np.sinh(self.value)])
+            
855    def cosh(self):
+856        return derived_observable(lambda x, **kwargs: np.cosh(x[0]), [self], man_grad=[np.sinh(self.value)])
 
@@ -4065,8 +4087,8 @@ should agree with samples from a full jackknife analysis up to O(1/N).
-
849    def tanh(self):
-850        return derived_observable(lambda x, **kwargs: np.tanh(x[0]), [self], man_grad=[1 / np.cosh(self.value) ** 2])
+            
858    def tanh(self):
+859        return derived_observable(lambda x, **kwargs: np.tanh(x[0]), [self], man_grad=[1 / np.cosh(self.value) ** 2])
 
@@ -4084,8 +4106,8 @@ should agree with samples from a full jackknife analysis up to O(1/N).
-
852    def arcsinh(self):
-853        return derived_observable(lambda x: anp.arcsinh(x[0]), [self])
+            
861    def arcsinh(self):
+862        return derived_observable(lambda x: anp.arcsinh(x[0]), [self])
 
@@ -4103,8 +4125,8 @@ should agree with samples from a full jackknife analysis up to O(1/N).
-
855    def arccosh(self):
-856        return derived_observable(lambda x: anp.arccosh(x[0]), [self])
+            
864    def arccosh(self):
+865        return derived_observable(lambda x: anp.arccosh(x[0]), [self])
 
@@ -4122,8 +4144,8 @@ should agree with samples from a full jackknife analysis up to O(1/N).
-
858    def arctanh(self):
-859        return derived_observable(lambda x: anp.arctanh(x[0]), [self])
+            
867    def arctanh(self):
+868        return derived_observable(lambda x: anp.arctanh(x[0]), [self])
 
@@ -4142,115 +4164,115 @@ should agree with samples from a full jackknife analysis up to O(1/N).
-
862class CObs:
-863    """Class for a complex valued observable."""
-864    __slots__ = ['_real', '_imag', 'tag']
-865
-866    def __init__(self, real, imag=0.0):
-867        self._real = real
-868        self._imag = imag
-869        self.tag = None
-870
-871    @property
-872    def real(self):
-873        return self._real
+            
871class CObs:
+872    """Class for a complex valued observable."""
+873    __slots__ = ['_real', '_imag', 'tag']
 874
-875    @property
-876    def imag(self):
-877        return self._imag
-878
-879    def gamma_method(self, **kwargs):
-880        """Executes the gamma_method for the real and the imaginary part."""
-881        if isinstance(self.real, Obs):
-882            self.real.gamma_method(**kwargs)
-883        if isinstance(self.imag, Obs):
-884            self.imag.gamma_method(**kwargs)
-885
-886    def is_zero(self):
-887        """Checks whether both real and imaginary part are zero within machine precision."""
-888        return self.real == 0.0 and self.imag == 0.0
-889
-890    def conjugate(self):
-891        return CObs(self.real, -self.imag)
-892
-893    def __add__(self, other):
-894        if isinstance(other, np.ndarray):
-895            return other + self
-896        elif hasattr(other, 'real') and hasattr(other, 'imag'):
-897            return CObs(self.real + other.real,
-898                        self.imag + other.imag)
-899        else:
-900            return CObs(self.real + other, self.imag)
+875    def __init__(self, real, imag=0.0):
+876        self._real = real
+877        self._imag = imag
+878        self.tag = None
+879
+880    @property
+881    def real(self):
+882        return self._real
+883
+884    @property
+885    def imag(self):
+886        return self._imag
+887
+888    def gamma_method(self, **kwargs):
+889        """Executes the gamma_method for the real and the imaginary part."""
+890        if isinstance(self.real, Obs):
+891            self.real.gamma_method(**kwargs)
+892        if isinstance(self.imag, Obs):
+893            self.imag.gamma_method(**kwargs)
+894
+895    def is_zero(self):
+896        """Checks whether both real and imaginary part are zero within machine precision."""
+897        return self.real == 0.0 and self.imag == 0.0
+898
+899    def conjugate(self):
+900        return CObs(self.real, -self.imag)
 901
-902    def __radd__(self, y):
-903        return self + y
-904
-905    def __sub__(self, other):
-906        if isinstance(other, np.ndarray):
-907            return -1 * (other - self)
-908        elif hasattr(other, 'real') and hasattr(other, 'imag'):
-909            return CObs(self.real - other.real, self.imag - other.imag)
-910        else:
-911            return CObs(self.real - other, self.imag)
-912
-913    def __rsub__(self, other):
-914        return -1 * (self - other)
-915
-916    def __mul__(self, other):
-917        if isinstance(other, np.ndarray):
-918            return other * self
-919        elif hasattr(other, 'real') and hasattr(other, 'imag'):
-920            if all(isinstance(i, Obs) for i in [self.real, self.imag, other.real, other.imag]):
-921                return CObs(derived_observable(lambda x, **kwargs: x[0] * x[1] - x[2] * x[3],
-922                                               [self.real, other.real, self.imag, other.imag],
-923                                               man_grad=[other.real.value, self.real.value, -other.imag.value, -self.imag.value]),
-924                            derived_observable(lambda x, **kwargs: x[2] * x[1] + x[0] * x[3],
-925                                               [self.real, other.real, self.imag, other.imag],
-926                                               man_grad=[other.imag.value, self.imag.value, other.real.value, self.real.value]))
-927            elif getattr(other, 'imag', 0) != 0:
-928                return CObs(self.real * other.real - self.imag * other.imag,
-929                            self.imag * other.real + self.real * other.imag)
-930            else:
-931                return CObs(self.real * other.real, self.imag * other.real)
-932        else:
-933            return CObs(self.real * other, self.imag * other)
-934
-935    def __rmul__(self, other):
-936        return self * other
-937
-938    def __truediv__(self, other):
-939        if isinstance(other, np.ndarray):
-940            return 1 / (other / self)
-941        elif hasattr(other, 'real') and hasattr(other, 'imag'):
-942            r = other.real ** 2 + other.imag ** 2
-943            return CObs((self.real * other.real + self.imag * other.imag) / r, (self.imag * other.real - self.real * other.imag) / r)
-944        else:
-945            return CObs(self.real / other, self.imag / other)
+902    def __add__(self, other):
+903        if isinstance(other, np.ndarray):
+904            return other + self
+905        elif hasattr(other, 'real') and hasattr(other, 'imag'):
+906            return CObs(self.real + other.real,
+907                        self.imag + other.imag)
+908        else:
+909            return CObs(self.real + other, self.imag)
+910
+911    def __radd__(self, y):
+912        return self + y
+913
+914    def __sub__(self, other):
+915        if isinstance(other, np.ndarray):
+916            return -1 * (other - self)
+917        elif hasattr(other, 'real') and hasattr(other, 'imag'):
+918            return CObs(self.real - other.real, self.imag - other.imag)
+919        else:
+920            return CObs(self.real - other, self.imag)
+921
+922    def __rsub__(self, other):
+923        return -1 * (self - other)
+924
+925    def __mul__(self, other):
+926        if isinstance(other, np.ndarray):
+927            return other * self
+928        elif hasattr(other, 'real') and hasattr(other, 'imag'):
+929            if all(isinstance(i, Obs) for i in [self.real, self.imag, other.real, other.imag]):
+930                return CObs(derived_observable(lambda x, **kwargs: x[0] * x[1] - x[2] * x[3],
+931                                               [self.real, other.real, self.imag, other.imag],
+932                                               man_grad=[other.real.value, self.real.value, -other.imag.value, -self.imag.value]),
+933                            derived_observable(lambda x, **kwargs: x[2] * x[1] + x[0] * x[3],
+934                                               [self.real, other.real, self.imag, other.imag],
+935                                               man_grad=[other.imag.value, self.imag.value, other.real.value, self.real.value]))
+936            elif getattr(other, 'imag', 0) != 0:
+937                return CObs(self.real * other.real - self.imag * other.imag,
+938                            self.imag * other.real + self.real * other.imag)
+939            else:
+940                return CObs(self.real * other.real, self.imag * other.real)
+941        else:
+942            return CObs(self.real * other, self.imag * other)
+943
+944    def __rmul__(self, other):
+945        return self * other
 946
-947    def __rtruediv__(self, other):
-948        r = self.real ** 2 + self.imag ** 2
-949        if hasattr(other, 'real') and hasattr(other, 'imag'):
-950            return CObs((self.real * other.real + self.imag * other.imag) / r, (self.real * other.imag - self.imag * other.real) / r)
-951        else:
-952            return CObs(self.real * other / r, -self.imag * other / r)
-953
-954    def __abs__(self):
-955        return np.sqrt(self.real**2 + self.imag**2)
-956
-957    def __pos__(self):
-958        return self
-959
-960    def __neg__(self):
-961        return -1 * self
+947    def __truediv__(self, other):
+948        if isinstance(other, np.ndarray):
+949            return 1 / (other / self)
+950        elif hasattr(other, 'real') and hasattr(other, 'imag'):
+951            r = other.real ** 2 + other.imag ** 2
+952            return CObs((self.real * other.real + self.imag * other.imag) / r, (self.imag * other.real - self.real * other.imag) / r)
+953        else:
+954            return CObs(self.real / other, self.imag / other)
+955
+956    def __rtruediv__(self, other):
+957        r = self.real ** 2 + self.imag ** 2
+958        if hasattr(other, 'real') and hasattr(other, 'imag'):
+959            return CObs((self.real * other.real + self.imag * other.imag) / r, (self.real * other.imag - self.imag * other.real) / r)
+960        else:
+961            return CObs(self.real * other / r, -self.imag * other / r)
 962
-963    def __eq__(self, other):
-964        return self.real == other.real and self.imag == other.imag
+963    def __abs__(self):
+964        return np.sqrt(self.real**2 + self.imag**2)
 965
-966    def __str__(self):
-967        return '(' + str(self.real) + int(self.imag >= 0.0) * '+' + str(self.imag) + 'j)'
+966    def __pos__(self):
+967        return self
 968
-969    def __repr__(self):
-970        return 'CObs[' + str(self) + ']'
+969    def __neg__(self):
+970        return -1 * self
+971
+972    def __eq__(self, other):
+973        return self.real == other.real and self.imag == other.imag
+974
+975    def __str__(self):
+976        return '(' + str(self.real) + int(self.imag >= 0.0) * '+' + str(self.imag) + 'j)'
+977
+978    def __repr__(self):
+979        return 'CObs[' + str(self) + ']'
 
@@ -4268,10 +4290,10 @@ should agree with samples from a full jackknife analysis up to O(1/N).
-
866    def __init__(self, real, imag=0.0):
-867        self._real = real
-868        self._imag = imag
-869        self.tag = None
+            
875    def __init__(self, real, imag=0.0):
+876        self._real = real
+877        self._imag = imag
+878        self.tag = None
 
@@ -4289,12 +4311,12 @@ should agree with samples from a full jackknife analysis up to O(1/N).
-
879    def gamma_method(self, **kwargs):
-880        """Executes the gamma_method for the real and the imaginary part."""
-881        if isinstance(self.real, Obs):
-882            self.real.gamma_method(**kwargs)
-883        if isinstance(self.imag, Obs):
-884            self.imag.gamma_method(**kwargs)
+            
888    def gamma_method(self, **kwargs):
+889        """Executes the gamma_method for the real and the imaginary part."""
+890        if isinstance(self.real, Obs):
+891            self.real.gamma_method(**kwargs)
+892        if isinstance(self.imag, Obs):
+893            self.imag.gamma_method(**kwargs)
 
@@ -4314,9 +4336,9 @@ should agree with samples from a full jackknife analysis up to O(1/N).
-
886    def is_zero(self):
-887        """Checks whether both real and imaginary part are zero within machine precision."""
-888        return self.real == 0.0 and self.imag == 0.0
+            
895    def is_zero(self):
+896        """Checks whether both real and imaginary part are zero within machine precision."""
+897        return self.real == 0.0 and self.imag == 0.0
 
@@ -4336,8 +4358,8 @@ should agree with samples from a full jackknife analysis up to O(1/N).
-
890    def conjugate(self):
-891        return CObs(self.real, -self.imag)
+            
899    def conjugate(self):
+900        return CObs(self.real, -self.imag)
 
@@ -4356,174 +4378,174 @@ should agree with samples from a full jackknife analysis up to O(1/N).
-
1099def derived_observable(func, data, array_mode=False, **kwargs):
-1100    """Construct a derived Obs according to func(data, **kwargs) using automatic differentiation.
-1101
-1102    Parameters
-1103    ----------
-1104    func : object
-1105        arbitrary function of the form func(data, **kwargs). For the
-1106        automatic differentiation to work, all numpy functions have to have
-1107        the autograd wrapper (use 'import autograd.numpy as anp').
-1108    data : list
-1109        list of Obs, e.g. [obs1, obs2, obs3].
-1110    num_grad : bool
-1111        if True, numerical derivatives are used instead of autograd
-1112        (default False). To control the numerical differentiation the
-1113        kwargs of numdifftools.step_generators.MaxStepGenerator
-1114        can be used.
-1115    man_grad : list
-1116        manually supply a list or an array which contains the jacobian
-1117        of func. Use cautiously, supplying the wrong derivative will
-1118        not be intercepted.
-1119
-1120    Notes
-1121    -----
-1122    For simple mathematical operations it can be practical to use anonymous
-1123    functions. For the ratio of two observables one can e.g. use
-1124
-1125    new_obs = derived_observable(lambda x: x[0] / x[1], [obs1, obs2])
-1126    """
-1127
-1128    data = np.asarray(data)
-1129    raveled_data = data.ravel()
-1130
-1131    # Workaround for matrix operations containing non Obs data
-1132    if not all(isinstance(x, Obs) for x in raveled_data):
-1133        for i in range(len(raveled_data)):
-1134            if isinstance(raveled_data[i], (int, float)):
-1135                raveled_data[i] = cov_Obs(raveled_data[i], 0.0, "###dummy_covobs###")
-1136
-1137    allcov = {}
-1138    for o in raveled_data:
-1139        for name in o.cov_names:
-1140            if name in allcov:
-1141                if not np.allclose(allcov[name], o.covobs[name].cov):
-1142                    raise Exception('Inconsistent covariance matrices for %s!' % (name))
-1143            else:
-1144                allcov[name] = o.covobs[name].cov
-1145
-1146    n_obs = len(raveled_data)
-1147    new_names = sorted(set([y for x in [o.names for o in raveled_data] for y in x]))
-1148    new_cov_names = sorted(set([y for x in [o.cov_names for o in raveled_data] for y in x]))
-1149    new_sample_names = sorted(set(new_names) - set(new_cov_names))
-1150
-1151    reweighted = len(list(filter(lambda o: o.reweighted is True, raveled_data))) > 0
-1152
-1153    if data.ndim == 1:
-1154        values = np.array([o.value for o in data])
-1155    else:
-1156        values = np.vectorize(lambda x: x.value)(data)
-1157
-1158    new_values = func(values, **kwargs)
-1159
-1160    multi = int(isinstance(new_values, np.ndarray))
-1161
-1162    new_r_values = {}
-1163    new_idl_d = {}
-1164    for name in new_sample_names:
-1165        idl = []
-1166        tmp_values = np.zeros(n_obs)
-1167        for i, item in enumerate(raveled_data):
-1168            tmp_values[i] = item.r_values.get(name, item.value)
-1169            tmp_idl = item.idl.get(name)
-1170            if tmp_idl is not None:
-1171                idl.append(tmp_idl)
-1172        if multi > 0:
-1173            tmp_values = np.array(tmp_values).reshape(data.shape)
-1174        new_r_values[name] = func(tmp_values, **kwargs)
-1175        new_idl_d[name] = _merge_idx(idl)
-1176
-1177    if 'man_grad' in kwargs:
-1178        deriv = np.asarray(kwargs.get('man_grad'))
-1179        if new_values.shape + data.shape != deriv.shape:
-1180            raise Exception('Manual derivative does not have correct shape.')
-1181    elif kwargs.get('num_grad') is True:
-1182        if multi > 0:
-1183            raise Exception('Multi mode currently not supported for numerical derivative')
-1184        options = {
-1185            'base_step': 0.1,
-1186            'step_ratio': 2.5}
-1187        for key in options.keys():
-1188            kwarg = kwargs.get(key)
-1189            if kwarg is not None:
-1190                options[key] = kwarg
-1191        tmp_df = nd.Gradient(func, order=4, **{k: v for k, v in options.items() if v is not None})(values, **kwargs)
-1192        if tmp_df.size == 1:
-1193            deriv = np.array([tmp_df.real])
-1194        else:
-1195            deriv = tmp_df.real
-1196    else:
-1197        deriv = jacobian(func)(values, **kwargs)
-1198
-1199    final_result = np.zeros(new_values.shape, dtype=object)
-1200
-1201    if array_mode is True:
-1202
-1203        class _Zero_grad():
-1204            def __init__(self, N):
-1205                self.grad = np.zeros((N, 1))
-1206
-1207        new_covobs_lengths = dict(set([y for x in [[(n, o.covobs[n].N) for n in o.cov_names] for o in raveled_data] for y in x]))
-1208        d_extracted = {}
-1209        g_extracted = {}
-1210        for name in new_sample_names:
-1211            d_extracted[name] = []
-1212            ens_length = len(new_idl_d[name])
-1213            for i_dat, dat in enumerate(data):
-1214                d_extracted[name].append(np.array([_expand_deltas_for_merge(o.deltas.get(name, np.zeros(ens_length)), o.idl.get(name, new_idl_d[name]), o.shape.get(name, ens_length), new_idl_d[name]) for o in dat.reshape(np.prod(dat.shape))]).reshape(dat.shape + (ens_length, )))
-1215        for name in new_cov_names:
-1216            g_extracted[name] = []
-1217            zero_grad = _Zero_grad(new_covobs_lengths[name])
-1218            for i_dat, dat in enumerate(data):
-1219                g_extracted[name].append(np.array([o.covobs.get(name, zero_grad).grad for o in dat.reshape(np.prod(dat.shape))]).reshape(dat.shape + (new_covobs_lengths[name], 1)))
-1220
-1221    for i_val, new_val in np.ndenumerate(new_values):
-1222        new_deltas = {}
-1223        new_grad = {}
-1224        if array_mode is True:
-1225            for name in new_sample_names:
-1226                ens_length = d_extracted[name][0].shape[-1]
-1227                new_deltas[name] = np.zeros(ens_length)
-1228                for i_dat, dat in enumerate(d_extracted[name]):
-1229                    new_deltas[name] += np.tensordot(deriv[i_val + (i_dat, )], dat)
-1230            for name in new_cov_names:
-1231                new_grad[name] = 0
-1232                for i_dat, dat in enumerate(g_extracted[name]):
-1233                    new_grad[name] += np.tensordot(deriv[i_val + (i_dat, )], dat)
-1234        else:
-1235            for j_obs, obs in np.ndenumerate(data):
-1236                for name in obs.names:
-1237                    if name in obs.cov_names:
-1238                        new_grad[name] = new_grad.get(name, 0) + deriv[i_val + j_obs] * obs.covobs[name].grad
-1239                    else:
-1240                        new_deltas[name] = new_deltas.get(name, 0) + deriv[i_val + j_obs] * _expand_deltas_for_merge(obs.deltas[name], obs.idl[name], obs.shape[name], new_idl_d[name])
-1241
-1242        new_covobs = {name: Covobs(0, allcov[name], name, grad=new_grad[name]) for name in new_grad}
-1243
-1244        if not set(new_covobs.keys()).isdisjoint(new_deltas.keys()):
-1245            raise Exception('The same name has been used for deltas and covobs!')
-1246        new_samples = []
-1247        new_means = []
-1248        new_idl = []
-1249        new_names_obs = []
-1250        for name in new_names:
-1251            if name not in new_covobs:
-1252                new_samples.append(new_deltas[name])
-1253                new_idl.append(new_idl_d[name])
-1254                new_means.append(new_r_values[name][i_val])
-1255                new_names_obs.append(name)
-1256        final_result[i_val] = Obs(new_samples, new_names_obs, means=new_means, idl=new_idl)
-1257        for name in new_covobs:
-1258            final_result[i_val].names.append(name)
-1259        final_result[i_val]._covobs = new_covobs
-1260        final_result[i_val]._value = new_val
-1261        final_result[i_val].reweighted = reweighted
-1262
-1263    if multi == 0:
-1264        final_result = final_result.item()
-1265
-1266    return final_result
+            
1112def derived_observable(func, data, array_mode=False, **kwargs):
+1113    """Construct a derived Obs according to func(data, **kwargs) using automatic differentiation.
+1114
+1115    Parameters
+1116    ----------
+1117    func : object
+1118        arbitrary function of the form func(data, **kwargs). For the
+1119        automatic differentiation to work, all numpy functions have to have
+1120        the autograd wrapper (use 'import autograd.numpy as anp').
+1121    data : list
+1122        list of Obs, e.g. [obs1, obs2, obs3].
+1123    num_grad : bool
+1124        if True, numerical derivatives are used instead of autograd
+1125        (default False). To control the numerical differentiation the
+1126        kwargs of numdifftools.step_generators.MaxStepGenerator
+1127        can be used.
+1128    man_grad : list
+1129        manually supply a list or an array which contains the jacobian
+1130        of func. Use cautiously, supplying the wrong derivative will
+1131        not be intercepted.
+1132
+1133    Notes
+1134    -----
+1135    For simple mathematical operations it can be practical to use anonymous
+1136    functions. For the ratio of two observables one can e.g. use
+1137
+1138    new_obs = derived_observable(lambda x: x[0] / x[1], [obs1, obs2])
+1139    """
+1140
+1141    data = np.asarray(data)
+1142    raveled_data = data.ravel()
+1143
+1144    # Workaround for matrix operations containing non Obs data
+1145    if not all(isinstance(x, Obs) for x in raveled_data):
+1146        for i in range(len(raveled_data)):
+1147            if isinstance(raveled_data[i], (int, float)):
+1148                raveled_data[i] = cov_Obs(raveled_data[i], 0.0, "###dummy_covobs###")
+1149
+1150    allcov = {}
+1151    for o in raveled_data:
+1152        for name in o.cov_names:
+1153            if name in allcov:
+1154                if not np.allclose(allcov[name], o.covobs[name].cov):
+1155                    raise Exception('Inconsistent covariance matrices for %s!' % (name))
+1156            else:
+1157                allcov[name] = o.covobs[name].cov
+1158
+1159    n_obs = len(raveled_data)
+1160    new_names = sorted(set([y for x in [o.names for o in raveled_data] for y in x]))
+1161    new_cov_names = sorted(set([y for x in [o.cov_names for o in raveled_data] for y in x]))
+1162    new_sample_names = sorted(set(new_names) - set(new_cov_names))
+1163
+1164    reweighted = len(list(filter(lambda o: o.reweighted is True, raveled_data))) > 0
+1165
+1166    if data.ndim == 1:
+1167        values = np.array([o.value for o in data])
+1168    else:
+1169        values = np.vectorize(lambda x: x.value)(data)
+1170
+1171    new_values = func(values, **kwargs)
+1172
+1173    multi = int(isinstance(new_values, np.ndarray))
+1174
+1175    new_r_values = {}
+1176    new_idl_d = {}
+1177    for name in new_sample_names:
+1178        idl = []
+1179        tmp_values = np.zeros(n_obs)
+1180        for i, item in enumerate(raveled_data):
+1181            tmp_values[i] = item.r_values.get(name, item.value)
+1182            tmp_idl = item.idl.get(name)
+1183            if tmp_idl is not None:
+1184                idl.append(tmp_idl)
+1185        if multi > 0:
+1186            tmp_values = np.array(tmp_values).reshape(data.shape)
+1187        new_r_values[name] = func(tmp_values, **kwargs)
+1188        new_idl_d[name] = _merge_idx(idl)
+1189
+1190    if 'man_grad' in kwargs:
+1191        deriv = np.asarray(kwargs.get('man_grad'))
+1192        if new_values.shape + data.shape != deriv.shape:
+1193            raise Exception('Manual derivative does not have correct shape.')
+1194    elif kwargs.get('num_grad') is True:
+1195        if multi > 0:
+1196            raise Exception('Multi mode currently not supported for numerical derivative')
+1197        options = {
+1198            'base_step': 0.1,
+1199            'step_ratio': 2.5}
+1200        for key in options.keys():
+1201            kwarg = kwargs.get(key)
+1202            if kwarg is not None:
+1203                options[key] = kwarg
+1204        tmp_df = nd.Gradient(func, order=4, **{k: v for k, v in options.items() if v is not None})(values, **kwargs)
+1205        if tmp_df.size == 1:
+1206            deriv = np.array([tmp_df.real])
+1207        else:
+1208            deriv = tmp_df.real
+1209    else:
+1210        deriv = jacobian(func)(values, **kwargs)
+1211
+1212    final_result = np.zeros(new_values.shape, dtype=object)
+1213
+1214    if array_mode is True:
+1215
+1216        class _Zero_grad():
+1217            def __init__(self, N):
+1218                self.grad = np.zeros((N, 1))
+1219
+1220        new_covobs_lengths = dict(set([y for x in [[(n, o.covobs[n].N) for n in o.cov_names] for o in raveled_data] for y in x]))
+1221        d_extracted = {}
+1222        g_extracted = {}
+1223        for name in new_sample_names:
+1224            d_extracted[name] = []
+1225            ens_length = len(new_idl_d[name])
+1226            for i_dat, dat in enumerate(data):
+1227                d_extracted[name].append(np.array([_expand_deltas_for_merge(o.deltas.get(name, np.zeros(ens_length)), o.idl.get(name, new_idl_d[name]), o.shape.get(name, ens_length), new_idl_d[name]) for o in dat.reshape(np.prod(dat.shape))]).reshape(dat.shape + (ens_length, )))
+1228        for name in new_cov_names:
+1229            g_extracted[name] = []
+1230            zero_grad = _Zero_grad(new_covobs_lengths[name])
+1231            for i_dat, dat in enumerate(data):
+1232                g_extracted[name].append(np.array([o.covobs.get(name, zero_grad).grad for o in dat.reshape(np.prod(dat.shape))]).reshape(dat.shape + (new_covobs_lengths[name], 1)))
+1233
+1234    for i_val, new_val in np.ndenumerate(new_values):
+1235        new_deltas = {}
+1236        new_grad = {}
+1237        if array_mode is True:
+1238            for name in new_sample_names:
+1239                ens_length = d_extracted[name][0].shape[-1]
+1240                new_deltas[name] = np.zeros(ens_length)
+1241                for i_dat, dat in enumerate(d_extracted[name]):
+1242                    new_deltas[name] += np.tensordot(deriv[i_val + (i_dat, )], dat)
+1243            for name in new_cov_names:
+1244                new_grad[name] = 0
+1245                for i_dat, dat in enumerate(g_extracted[name]):
+1246                    new_grad[name] += np.tensordot(deriv[i_val + (i_dat, )], dat)
+1247        else:
+1248            for j_obs, obs in np.ndenumerate(data):
+1249                for name in obs.names:
+1250                    if name in obs.cov_names:
+1251                        new_grad[name] = new_grad.get(name, 0) + deriv[i_val + j_obs] * obs.covobs[name].grad
+1252                    else:
+1253                        new_deltas[name] = new_deltas.get(name, 0) + deriv[i_val + j_obs] * _expand_deltas_for_merge(obs.deltas[name], obs.idl[name], obs.shape[name], new_idl_d[name])
+1254
+1255        new_covobs = {name: Covobs(0, allcov[name], name, grad=new_grad[name]) for name in new_grad}
+1256
+1257        if not set(new_covobs.keys()).isdisjoint(new_deltas.keys()):
+1258            raise Exception('The same name has been used for deltas and covobs!')
+1259        new_samples = []
+1260        new_means = []
+1261        new_idl = []
+1262        new_names_obs = []
+1263        for name in new_names:
+1264            if name not in new_covobs:
+1265                new_samples.append(new_deltas[name])
+1266                new_idl.append(new_idl_d[name])
+1267                new_means.append(new_r_values[name][i_val])
+1268                new_names_obs.append(name)
+1269        final_result[i_val] = Obs(new_samples, new_names_obs, means=new_means, idl=new_idl)
+1270        for name in new_covobs:
+1271            final_result[i_val].names.append(name)
+1272        final_result[i_val]._covobs = new_covobs
+1273        final_result[i_val]._value = new_val
+1274        final_result[i_val].reweighted = reweighted
+1275
+1276    if multi == 0:
+1277        final_result = final_result.item()
+1278
+1279    return final_result
 
@@ -4570,46 +4592,46 @@ functions. For the ratio of two observables one can e.g. use

-
1303def reweight(weight, obs, **kwargs):
-1304    """Reweight a list of observables.
-1305
-1306    Parameters
-1307    ----------
-1308    weight : Obs
-1309        Reweighting factor. An Observable that has to be defined on a superset of the
-1310        configurations in obs[i].idl for all i.
-1311    obs : list
-1312        list of Obs, e.g. [obs1, obs2, obs3].
-1313    all_configs : bool
-1314        if True, the reweighted observables are normalized by the average of
-1315        the reweighting factor on all configurations in weight.idl and not
-1316        on the configurations in obs[i].idl. Default False.
-1317    """
-1318    result = []
-1319    for i in range(len(obs)):
-1320        if len(obs[i].cov_names):
-1321            raise Exception('Error: Not possible to reweight an Obs that contains covobs!')
-1322        if not set(obs[i].names).issubset(weight.names):
-1323            raise Exception('Error: Ensembles do not fit')
-1324        for name in obs[i].names:
-1325            if not set(obs[i].idl[name]).issubset(weight.idl[name]):
-1326                raise Exception('obs[%d] has to be defined on a subset of the configs in weight.idl[%s]!' % (i, name))
-1327        new_samples = []
-1328        w_deltas = {}
-1329        for name in sorted(obs[i].names):
-1330            w_deltas[name] = _reduce_deltas(weight.deltas[name], weight.idl[name], obs[i].idl[name])
-1331            new_samples.append((w_deltas[name] + weight.r_values[name]) * (obs[i].deltas[name] + obs[i].r_values[name]))
-1332        tmp_obs = Obs(new_samples, sorted(obs[i].names), idl=[obs[i].idl[name] for name in sorted(obs[i].names)])
-1333
-1334        if kwargs.get('all_configs'):
-1335            new_weight = weight
-1336        else:
-1337            new_weight = Obs([w_deltas[name] + weight.r_values[name] for name in sorted(obs[i].names)], sorted(obs[i].names), idl=[obs[i].idl[name] for name in sorted(obs[i].names)])
-1338
-1339        result.append(tmp_obs / new_weight)
-1340        result[-1].reweighted = True
-1341
-1342    return result
+            
1316def reweight(weight, obs, **kwargs):
+1317    """Reweight a list of observables.
+1318
+1319    Parameters
+1320    ----------
+1321    weight : Obs
+1322        Reweighting factor. An Observable that has to be defined on a superset of the
+1323        configurations in obs[i].idl for all i.
+1324    obs : list
+1325        list of Obs, e.g. [obs1, obs2, obs3].
+1326    all_configs : bool
+1327        if True, the reweighted observables are normalized by the average of
+1328        the reweighting factor on all configurations in weight.idl and not
+1329        on the configurations in obs[i].idl. Default False.
+1330    """
+1331    result = []
+1332    for i in range(len(obs)):
+1333        if len(obs[i].cov_names):
+1334            raise Exception('Error: Not possible to reweight an Obs that contains covobs!')
+1335        if not set(obs[i].names).issubset(weight.names):
+1336            raise Exception('Error: Ensembles do not fit')
+1337        for name in obs[i].names:
+1338            if not set(obs[i].idl[name]).issubset(weight.idl[name]):
+1339                raise Exception('obs[%d] has to be defined on a subset of the configs in weight.idl[%s]!' % (i, name))
+1340        new_samples = []
+1341        w_deltas = {}
+1342        for name in sorted(obs[i].names):
+1343            w_deltas[name] = _reduce_deltas(weight.deltas[name], weight.idl[name], obs[i].idl[name])
+1344            new_samples.append((w_deltas[name] + weight.r_values[name]) * (obs[i].deltas[name] + obs[i].r_values[name]))
+1345        tmp_obs = Obs(new_samples, sorted(obs[i].names), idl=[obs[i].idl[name] for name in sorted(obs[i].names)])
+1346
+1347        if kwargs.get('all_configs'):
+1348            new_weight = weight
+1349        else:
+1350            new_weight = Obs([w_deltas[name] + weight.r_values[name] for name in sorted(obs[i].names)], sorted(obs[i].names), idl=[obs[i].idl[name] for name in sorted(obs[i].names)])
+1351
+1352        result.append(tmp_obs / new_weight)
+1353        result[-1].reweighted = True
+1354
+1355    return result
 
@@ -4643,47 +4665,47 @@ on the configurations in obs[i].idl. Default False.
-
1345def correlate(obs_a, obs_b):
-1346    """Correlate two observables.
-1347
-1348    Parameters
-1349    ----------
-1350    obs_a : Obs
-1351        First observable
-1352    obs_b : Obs
-1353        Second observable
-1354
-1355    Notes
-1356    -----
-1357    Keep in mind to only correlate primary observables which have not been reweighted
-1358    yet. The reweighting has to be applied after correlating the observables.
-1359    Currently only works if ensembles are identical (this is not strictly necessary).
-1360    """
-1361
-1362    if sorted(obs_a.names) != sorted(obs_b.names):
-1363        raise Exception(f"Ensembles do not fit {set(sorted(obs_a.names)) ^ set(sorted(obs_b.names))}")
-1364    if len(obs_a.cov_names) or len(obs_b.cov_names):
-1365        raise Exception('Error: Not possible to correlate Obs that contain covobs!')
-1366    for name in obs_a.names:
-1367        if obs_a.shape[name] != obs_b.shape[name]:
-1368            raise Exception('Shapes of ensemble', name, 'do not fit')
-1369        if obs_a.idl[name] != obs_b.idl[name]:
-1370            raise Exception('idl of ensemble', name, 'do not fit')
-1371
-1372    if obs_a.reweighted is True:
-1373        warnings.warn("The first observable is already reweighted.", RuntimeWarning)
-1374    if obs_b.reweighted is True:
-1375        warnings.warn("The second observable is already reweighted.", RuntimeWarning)
-1376
-1377    new_samples = []
-1378    new_idl = []
-1379    for name in sorted(obs_a.names):
-1380        new_samples.append((obs_a.deltas[name] + obs_a.r_values[name]) * (obs_b.deltas[name] + obs_b.r_values[name]))
-1381        new_idl.append(obs_a.idl[name])
-1382
-1383    o = Obs(new_samples, sorted(obs_a.names), idl=new_idl)
-1384    o.reweighted = obs_a.reweighted or obs_b.reweighted
-1385    return o
+            
1358def correlate(obs_a, obs_b):
+1359    """Correlate two observables.
+1360
+1361    Parameters
+1362    ----------
+1363    obs_a : Obs
+1364        First observable
+1365    obs_b : Obs
+1366        Second observable
+1367
+1368    Notes
+1369    -----
+1370    Keep in mind to only correlate primary observables which have not been reweighted
+1371    yet. The reweighting has to be applied after correlating the observables.
+1372    Currently only works if ensembles are identical (this is not strictly necessary).
+1373    """
+1374
+1375    if sorted(obs_a.names) != sorted(obs_b.names):
+1376        raise Exception(f"Ensembles do not fit {set(sorted(obs_a.names)) ^ set(sorted(obs_b.names))}")
+1377    if len(obs_a.cov_names) or len(obs_b.cov_names):
+1378        raise Exception('Error: Not possible to correlate Obs that contain covobs!')
+1379    for name in obs_a.names:
+1380        if obs_a.shape[name] != obs_b.shape[name]:
+1381            raise Exception('Shapes of ensemble', name, 'do not fit')
+1382        if obs_a.idl[name] != obs_b.idl[name]:
+1383            raise Exception('idl of ensemble', name, 'do not fit')
+1384
+1385    if obs_a.reweighted is True:
+1386        warnings.warn("The first observable is already reweighted.", RuntimeWarning)
+1387    if obs_b.reweighted is True:
+1388        warnings.warn("The second observable is already reweighted.", RuntimeWarning)
+1389
+1390    new_samples = []
+1391    new_idl = []
+1392    for name in sorted(obs_a.names):
+1393        new_samples.append((obs_a.deltas[name] + obs_a.r_values[name]) * (obs_b.deltas[name] + obs_b.r_values[name]))
+1394        new_idl.append(obs_a.idl[name])
+1395
+1396    o = Obs(new_samples, sorted(obs_a.names), idl=new_idl)
+1397    o.reweighted = obs_a.reweighted or obs_b.reweighted
+1398    return o
 
@@ -4718,74 +4740,74 @@ Currently only works if ensembles are identical (this is not strictly necessary)
-
1388def covariance(obs, visualize=False, correlation=False, smooth=None, **kwargs):
-1389    r'''Calculates the error covariance matrix of a set of observables.
-1390
-1391    WARNING: This function should be used with care, especially for observables with support on multiple
-1392             ensembles with differing autocorrelations. See the notes below for details.
-1393
-1394    The gamma method has to be applied first to all observables.
-1395
-1396    Parameters
-1397    ----------
-1398    obs : list or numpy.ndarray
-1399        List or one dimensional array of Obs
-1400    visualize : bool
-1401        If True plots the corresponding normalized correlation matrix (default False).
-1402    correlation : bool
-1403        If True the correlation matrix instead of the error covariance matrix is returned (default False).
-1404    smooth : None or int
-1405        If smooth is an integer 'E' between 2 and the dimension of the matrix minus 1 the eigenvalue
-1406        smoothing procedure of hep-lat/9412087 is applied to the correlation matrix which leaves the
-1407        largest E eigenvalues essentially unchanged and smoothes the smaller eigenvalues to avoid extremely
-1408        small ones.
-1409
-1410    Notes
-1411    -----
-1412    The error covariance is defined such that it agrees with the squared standard error for two identical observables
-1413    $$\operatorname{cov}(a,a)=\sum_{s=1}^N\delta_a^s\delta_a^s/N^2=\Gamma_{aa}(0)/N=\operatorname{var}(a)/N=\sigma_a^2$$
-1414    in the absence of autocorrelation.
-1415    The error covariance is estimated by calculating the correlation matrix assuming no autocorrelation and then rescaling the correlation matrix by the full errors including the previous gamma method estimate for the autocorrelation of the observables. The covariance at windowsize 0 is guaranteed to be positive semi-definite
-1416    $$\sum_{i,j}v_i\Gamma_{ij}(0)v_j=\frac{1}{N}\sum_{s=1}^N\sum_{i,j}v_i\delta_i^s\delta_j^s v_j=\frac{1}{N}\sum_{s=1}^N\sum_{i}|v_i\delta_i^s|^2\geq 0\,,$$ for every $v\in\mathbb{R}^M$, while such an identity does not hold for larger windows/lags.
-1417    For observables defined on a single ensemble our approximation is equivalent to assuming that the integrated autocorrelation time of an off-diagonal element is equal to the geometric mean of the integrated autocorrelation times of the corresponding diagonal elements.
-1418    $$\tau_{\mathrm{int}, ij}=\sqrt{\tau_{\mathrm{int}, i}\times \tau_{\mathrm{int}, j}}$$
-1419    This construction ensures that the estimated covariance matrix is positive semi-definite (up to numerical rounding errors).
-1420    '''
-1421
-1422    length = len(obs)
-1423
-1424    max_samples = np.max([o.N for o in obs])
-1425    if max_samples <= length and not [item for sublist in [o.cov_names for o in obs] for item in sublist]:
-1426        warnings.warn(f"The dimension of the covariance matrix ({length}) is larger or equal to the number of samples ({max_samples}). This will result in a rank deficient matrix.", RuntimeWarning)
-1427
-1428    cov = np.zeros((length, length))
-1429    for i in range(length):
-1430        for j in range(i, length):
-1431            cov[i, j] = _covariance_element(obs[i], obs[j])
-1432    cov = cov + cov.T - np.diag(np.diag(cov))
-1433
-1434    corr = np.diag(1 / np.sqrt(np.diag(cov))) @ cov @ np.diag(1 / np.sqrt(np.diag(cov)))
-1435
-1436    if isinstance(smooth, int):
-1437        corr = _smooth_eigenvalues(corr, smooth)
-1438
-1439    if visualize:
-1440        plt.matshow(corr, vmin=-1, vmax=1)
-1441        plt.set_cmap('RdBu')
-1442        plt.colorbar()
-1443        plt.draw()
-1444
-1445    if correlation is True:
-1446        return corr
-1447
-1448    errors = [o.dvalue for o in obs]
-1449    cov = np.diag(errors) @ corr @ np.diag(errors)
-1450
-1451    eigenvalues = np.linalg.eigh(cov)[0]
-1452    if not np.all(eigenvalues >= 0):
-1453        warnings.warn("Covariance matrix is not positive semi-definite (Eigenvalues: " + str(eigenvalues) + ")", RuntimeWarning)
-1454
-1455    return cov
+            
1401def covariance(obs, visualize=False, correlation=False, smooth=None, **kwargs):
+1402    r'''Calculates the error covariance matrix of a set of observables.
+1403
+1404    WARNING: This function should be used with care, especially for observables with support on multiple
+1405             ensembles with differing autocorrelations. See the notes below for details.
+1406
+1407    The gamma method has to be applied first to all observables.
+1408
+1409    Parameters
+1410    ----------
+1411    obs : list or numpy.ndarray
+1412        List or one dimensional array of Obs
+1413    visualize : bool
+1414        If True plots the corresponding normalized correlation matrix (default False).
+1415    correlation : bool
+1416        If True the correlation matrix instead of the error covariance matrix is returned (default False).
+1417    smooth : None or int
+1418        If smooth is an integer 'E' between 2 and the dimension of the matrix minus 1 the eigenvalue
+1419        smoothing procedure of hep-lat/9412087 is applied to the correlation matrix which leaves the
+1420        largest E eigenvalues essentially unchanged and smoothes the smaller eigenvalues to avoid extremely
+1421        small ones.
+1422
+1423    Notes
+1424    -----
+1425    The error covariance is defined such that it agrees with the squared standard error for two identical observables
+1426    $$\operatorname{cov}(a,a)=\sum_{s=1}^N\delta_a^s\delta_a^s/N^2=\Gamma_{aa}(0)/N=\operatorname{var}(a)/N=\sigma_a^2$$
+1427    in the absence of autocorrelation.
+1428    The error covariance is estimated by calculating the correlation matrix assuming no autocorrelation and then rescaling the correlation matrix by the full errors including the previous gamma method estimate for the autocorrelation of the observables. The covariance at windowsize 0 is guaranteed to be positive semi-definite
+1429    $$\sum_{i,j}v_i\Gamma_{ij}(0)v_j=\frac{1}{N}\sum_{s=1}^N\sum_{i,j}v_i\delta_i^s\delta_j^s v_j=\frac{1}{N}\sum_{s=1}^N\sum_{i}|v_i\delta_i^s|^2\geq 0\,,$$ for every $v\in\mathbb{R}^M$, while such an identity does not hold for larger windows/lags.
+1430    For observables defined on a single ensemble our approximation is equivalent to assuming that the integrated autocorrelation time of an off-diagonal element is equal to the geometric mean of the integrated autocorrelation times of the corresponding diagonal elements.
+1431    $$\tau_{\mathrm{int}, ij}=\sqrt{\tau_{\mathrm{int}, i}\times \tau_{\mathrm{int}, j}}$$
+1432    This construction ensures that the estimated covariance matrix is positive semi-definite (up to numerical rounding errors).
+1433    '''
+1434
+1435    length = len(obs)
+1436
+1437    max_samples = np.max([o.N for o in obs])
+1438    if max_samples <= length and not [item for sublist in [o.cov_names for o in obs] for item in sublist]:
+1439        warnings.warn(f"The dimension of the covariance matrix ({length}) is larger or equal to the number of samples ({max_samples}). This will result in a rank deficient matrix.", RuntimeWarning)
+1440
+1441    cov = np.zeros((length, length))
+1442    for i in range(length):
+1443        for j in range(i, length):
+1444            cov[i, j] = _covariance_element(obs[i], obs[j])
+1445    cov = cov + cov.T - np.diag(np.diag(cov))
+1446
+1447    corr = np.diag(1 / np.sqrt(np.diag(cov))) @ cov @ np.diag(1 / np.sqrt(np.diag(cov)))
+1448
+1449    if isinstance(smooth, int):
+1450        corr = _smooth_eigenvalues(corr, smooth)
+1451
+1452    if visualize:
+1453        plt.matshow(corr, vmin=-1, vmax=1)
+1454        plt.set_cmap('RdBu')
+1455        plt.colorbar()
+1456        plt.draw()
+1457
+1458    if correlation is True:
+1459        return corr
+1460
+1461    errors = [o.dvalue for o in obs]
+1462    cov = np.diag(errors) @ corr @ np.diag(errors)
+1463
+1464    eigenvalues = np.linalg.eigh(cov)[0]
+1465    if not np.all(eigenvalues >= 0):
+1466        warnings.warn("Covariance matrix is not positive semi-definite (Eigenvalues: " + str(eigenvalues) + ")", RuntimeWarning)
+1467
+1468    return cov
 
@@ -4837,24 +4859,24 @@ This construction ensures that the estimated covariance matrix is positive semi-
-
1535def import_jackknife(jacks, name, idl=None):
-1536    """Imports jackknife samples and returns an Obs
-1537
-1538    Parameters
-1539    ----------
-1540    jacks : numpy.ndarray
-1541        numpy array containing the mean value as zeroth entry and
-1542        the N jackknife samples as first to Nth entry.
-1543    name : str
-1544        name of the ensemble the samples are defined on.
-1545    """
-1546    length = len(jacks) - 1
-1547    prj = (np.ones((length, length)) - (length - 1) * np.identity(length))
-1548    samples = jacks[1:] @ prj
-1549    mean = np.mean(samples)
-1550    new_obs = Obs([samples - mean], [name], idl=idl, means=[mean])
-1551    new_obs._value = jacks[0]
-1552    return new_obs
+            
1548def import_jackknife(jacks, name, idl=None):
+1549    """Imports jackknife samples and returns an Obs
+1550
+1551    Parameters
+1552    ----------
+1553    jacks : numpy.ndarray
+1554        numpy array containing the mean value as zeroth entry and
+1555        the N jackknife samples as first to Nth entry.
+1556    name : str
+1557        name of the ensemble the samples are defined on.
+1558    """
+1559    length = len(jacks) - 1
+1560    prj = (np.ones((length, length)) - (length - 1) * np.identity(length))
+1561    samples = jacks[1:] @ prj
+1562    mean = np.mean(samples)
+1563    new_obs = Obs([samples - mean], [name], idl=idl, means=[mean])
+1564    new_obs._value = jacks[0]
+1565    return new_obs
 
@@ -4884,34 +4906,34 @@ name of the ensemble the samples are defined on.
-
1555def merge_obs(list_of_obs):
-1556    """Combine all observables in list_of_obs into one new observable
-1557
-1558    Parameters
-1559    ----------
-1560    list_of_obs : list
-1561        list of the Obs object to be combined
-1562
-1563    Notes
-1564    -----
-1565    It is not possible to combine obs which are based on the same replicum
-1566    """
-1567    replist = [item for obs in list_of_obs for item in obs.names]
-1568    if (len(replist) == len(set(replist))) is False:
-1569        raise Exception('list_of_obs contains duplicate replica: %s' % (str(replist)))
-1570    if any([len(o.cov_names) for o in list_of_obs]):
-1571        raise Exception('Not possible to merge data that contains covobs!')
-1572    new_dict = {}
-1573    idl_dict = {}
-1574    for o in list_of_obs:
-1575        new_dict.update({key: o.deltas.get(key, 0) + o.r_values.get(key, 0)
-1576                        for key in set(o.deltas) | set(o.r_values)})
-1577        idl_dict.update({key: o.idl.get(key, 0) for key in set(o.deltas)})
-1578
-1579    names = sorted(new_dict.keys())
-1580    o = Obs([new_dict[name] for name in names], names, idl=[idl_dict[name] for name in names])
-1581    o.reweighted = np.max([oi.reweighted for oi in list_of_obs])
-1582    return o
+            
1568def merge_obs(list_of_obs):
+1569    """Combine all observables in list_of_obs into one new observable
+1570
+1571    Parameters
+1572    ----------
+1573    list_of_obs : list
+1574        list of the Obs object to be combined
+1575
+1576    Notes
+1577    -----
+1578    It is not possible to combine obs which are based on the same replicum
+1579    """
+1580    replist = [item for obs in list_of_obs for item in obs.names]
+1581    if (len(replist) == len(set(replist))) is False:
+1582        raise Exception('list_of_obs contains duplicate replica: %s' % (str(replist)))
+1583    if any([len(o.cov_names) for o in list_of_obs]):
+1584        raise Exception('Not possible to merge data that contains covobs!')
+1585    new_dict = {}
+1586    idl_dict = {}
+1587    for o in list_of_obs:
+1588        new_dict.update({key: o.deltas.get(key, 0) + o.r_values.get(key, 0)
+1589                        for key in set(o.deltas) | set(o.r_values)})
+1590        idl_dict.update({key: o.idl.get(key, 0) for key in set(o.deltas)})
+1591
+1592    names = sorted(new_dict.keys())
+1593    o = Obs([new_dict[name] for name in names], names, idl=[idl_dict[name] for name in names])
+1594    o.reweighted = np.max([oi.reweighted for oi in list_of_obs])
+1595    return o
 
@@ -4942,47 +4964,47 @@ list of the Obs object to be combined
-
1585def cov_Obs(means, cov, name, grad=None):
-1586    """Create an Obs based on mean(s) and a covariance matrix
-1587
-1588    Parameters
-1589    ----------
-1590    mean : list of floats or float
-1591        N mean value(s) of the new Obs
-1592    cov : list or array
-1593        2d (NxN) Covariance matrix, 1d diagonal entries or 0d covariance
-1594    name : str
-1595        identifier for the covariance matrix
-1596    grad : list or array
-1597        Gradient of the Covobs wrt. the means belonging to cov.
-1598    """
-1599
-1600    def covobs_to_obs(co):
-1601        """Make an Obs out of a Covobs
-1602
-1603        Parameters
-1604        ----------
-1605        co : Covobs
-1606            Covobs to be embedded into the Obs
-1607        """
-1608        o = Obs([], [], means=[])
-1609        o._value = co.value
-1610        o.names.append(co.name)
-1611        o._covobs[co.name] = co
-1612        o._dvalue = np.sqrt(co.errsq())
-1613        return o
-1614
-1615    ol = []
-1616    if isinstance(means, (float, int)):
-1617        means = [means]
-1618
-1619    for i in range(len(means)):
-1620        ol.append(covobs_to_obs(Covobs(means[i], cov, name, pos=i, grad=grad)))
-1621    if ol[0].covobs[name].N != len(means):
-1622        raise Exception('You have to provide %d mean values!' % (ol[0].N))
-1623    if len(ol) == 1:
-1624        return ol[0]
-1625    return ol
+            
1598def cov_Obs(means, cov, name, grad=None):
+1599    """Create an Obs based on mean(s) and a covariance matrix
+1600
+1601    Parameters
+1602    ----------
+1603    mean : list of floats or float
+1604        N mean value(s) of the new Obs
+1605    cov : list or array
+1606        2d (NxN) Covariance matrix, 1d diagonal entries or 0d covariance
+1607    name : str
+1608        identifier for the covariance matrix
+1609    grad : list or array
+1610        Gradient of the Covobs wrt. the means belonging to cov.
+1611    """
+1612
+1613    def covobs_to_obs(co):
+1614        """Make an Obs out of a Covobs
+1615
+1616        Parameters
+1617        ----------
+1618        co : Covobs
+1619            Covobs to be embedded into the Obs
+1620        """
+1621        o = Obs([], [], means=[])
+1622        o._value = co.value
+1623        o.names.append(co.name)
+1624        o._covobs[co.name] = co
+1625        o._dvalue = np.sqrt(co.errsq())
+1626        return o
+1627
+1628    ol = []
+1629    if isinstance(means, (float, int)):
+1630        means = [means]
+1631
+1632    for i in range(len(means)):
+1633        ol.append(covobs_to_obs(Covobs(means[i], cov, name, pos=i, grad=grad)))
+1634    if ol[0].covobs[name].N != len(means):
+1635        raise Exception('You have to provide %d mean values!' % (ol[0].N))
+1636    if len(ol) == 1:
+1637        return ol[0]
+1638    return ol