mirror of
https://github.com/fjosw/pyerrors.git
synced 2025-05-14 19:43:41 +02:00
Resolved merge conflict in tests
This commit is contained in:
commit
c9789a34e6
27 changed files with 695 additions and 142 deletions
|
@ -8,6 +8,8 @@ It is based on the gamma method [arXiv:hep-lat/0306017](https://arxiv.org/abs/he
|
|||
- non-linear fits with x- and y-errors and exact linear error propagation based on automatic differentiation as introduced in [arXiv:1809.01289](https://arxiv.org/abs/1809.01289).
|
||||
- real and complex matrix operations and their error propagation based on automatic differentiation (Matrix inverse, Cholesky decomposition, calculation of eigenvalues and eigenvectors, singular value decomposition...).
|
||||
|
||||
More detailed examples can found in the [GitHub repository](https://github.com/fjosw/pyerrors/tree/develop/examples) [](https://mybinder.org/v2/gh/fjosw/pyerrors/HEAD?labpath=examples).
|
||||
|
||||
There exist similar publicly available implementations of gamma method error analysis suites in [Fortran](https://gitlab.ift.uam-csic.es/alberto/aderrors), [Julia](https://gitlab.ift.uam-csic.es/alberto/aderrors.jl) and [Python](https://github.com/mbruno46/pyobs).
|
||||
|
||||
## Basic example
|
||||
|
@ -443,8 +445,10 @@ Julia I/O routines for the json.gz format, compatible with [ADerrors.jl](https:/
|
|||
# Citing
|
||||
If you use `pyerrors` for research that leads to a publication please consider citing:
|
||||
- Ulli Wolff, *Monte Carlo errors with less errors*. Comput.Phys.Commun. 156 (2004) 143-153, Comput.Phys.Commun. 176 (2007) 383 (erratum).
|
||||
- Stefan Schaefer, Rainer Sommer, Francesco Virotta, *Critical slowing down and error analysis in lattice QCD simulations*. Nucl.Phys.B 845 (2011) 93-119.
|
||||
- Alberto Ramos, *Automatic differentiation for error analysis of Monte Carlo data*. Comput.Phys.Commun. 238 (2019) 19-35.
|
||||
and
|
||||
- Stefan Schaefer, Rainer Sommer, Francesco Virotta, *Critical slowing down and error analysis in lattice QCD simulations*. Nucl.Phys.B 845 (2011) 93-119.
|
||||
where applicable.
|
||||
'''
|
||||
from .obs import *
|
||||
from .correlators import *
|
||||
|
|
|
@ -155,7 +155,7 @@ class Corr:
|
|||
raise Exception("Vectors are of wrong shape!")
|
||||
if normalize:
|
||||
vector_l, vector_r = vector_l / np.sqrt((vector_l @ vector_l)), vector_r / np.sqrt(vector_r @ vector_r)
|
||||
newcontent = [None if (item is None) else np.asarray([vector_l.T @ item @ vector_r]) for item in self.content]
|
||||
newcontent = [None if _check_for_none(self, item) else np.asarray([vector_l.T @ item @ vector_r]) for item in self.content]
|
||||
|
||||
else:
|
||||
# There are no checks here yet. There are so many possible scenarios, where this can go wrong.
|
||||
|
@ -163,7 +163,7 @@ class Corr:
|
|||
for t in range(self.T):
|
||||
vector_l[t], vector_r[t] = vector_l[t] / np.sqrt((vector_l[t] @ vector_l[t])), vector_r[t] / np.sqrt(vector_r[t] @ vector_r[t])
|
||||
|
||||
newcontent = [None if (self.content[t] is None or vector_l[t] is None or vector_r[t] is None) else np.asarray([vector_l[t].T @ self.content[t] @ vector_r[t]]) for t in range(self.T)]
|
||||
newcontent = [None if (_check_for_none(self, self.content[t]) or vector_l[t] is None or vector_r[t] is None) else np.asarray([vector_l[t].T @ self.content[t] @ vector_r[t]]) for t in range(self.T)]
|
||||
return Corr(newcontent)
|
||||
|
||||
def item(self, i, j):
|
||||
|
@ -197,6 +197,8 @@ class Corr:
|
|||
|
||||
def symmetric(self):
|
||||
""" Symmetrize the correlator around x0=0."""
|
||||
if self.N != 1:
|
||||
raise Exception('symmetric cannot be safely applied to multi-dimensional correlators.')
|
||||
if self.T % 2 != 0:
|
||||
raise Exception("Can not symmetrize odd T")
|
||||
|
||||
|
@ -215,6 +217,8 @@ class Corr:
|
|||
|
||||
def anti_symmetric(self):
|
||||
"""Anti-symmetrize the correlator around x0=0."""
|
||||
if self.N != 1:
|
||||
raise Exception('anti_symmetric cannot be safely applied to multi-dimensional correlators.')
|
||||
if self.T % 2 != 0:
|
||||
raise Exception("Can not symmetrize odd T")
|
||||
|
||||
|
@ -236,7 +240,7 @@ class Corr:
|
|||
def matrix_symmetric(self):
|
||||
"""Symmetrizes the correlator matrices on every timeslice."""
|
||||
if self.N > 1:
|
||||
transposed = [None if (G is None) else G.T for G in self.content]
|
||||
transposed = [None if _check_for_none(self, G) else G.T for G in self.content]
|
||||
return 0.5 * (Corr(transposed) + self)
|
||||
if self.N == 1:
|
||||
raise Exception("Trying to symmetrize a correlator matrix, that already has N=1.")
|
||||
|
@ -419,13 +423,15 @@ class Corr:
|
|||
Can either be an Obs which is correlated with all entries of the
|
||||
correlator or a Corr of same length.
|
||||
"""
|
||||
if self.N != 1:
|
||||
raise Exception("Only one-dimensional correlators can be safely correlated.")
|
||||
new_content = []
|
||||
for x0, t_slice in enumerate(self.content):
|
||||
if t_slice is None:
|
||||
if _check_for_none(self, t_slice):
|
||||
new_content.append(None)
|
||||
else:
|
||||
if isinstance(partner, Corr):
|
||||
if partner.content[x0] is None:
|
||||
if _check_for_none(partner, partner.content[x0]):
|
||||
new_content.append(None)
|
||||
else:
|
||||
new_content.append(np.array([correlate(o, partner.content[x0][0]) for o in t_slice]))
|
||||
|
@ -449,9 +455,11 @@ class Corr:
|
|||
the reweighting factor on all configurations in weight.idl and not
|
||||
on the configurations in obs[i].idl.
|
||||
"""
|
||||
if self.N != 1:
|
||||
raise Exception("Reweighting only implemented for one-dimensional correlators.")
|
||||
new_content = []
|
||||
for t_slice in self.content:
|
||||
if t_slice is None:
|
||||
if _check_for_none(self, t_slice):
|
||||
new_content.append(None)
|
||||
else:
|
||||
new_content.append(np.array(reweight(weight, t_slice, **kwargs)))
|
||||
|
@ -467,6 +475,8 @@ class Corr:
|
|||
partity : int
|
||||
Parity quantum number of the correlator, can be +1 or -1
|
||||
"""
|
||||
if self.N != 1:
|
||||
raise Exception("T_symmetry only implemented for one-dimensional correlators.")
|
||||
if not isinstance(partner, Corr):
|
||||
raise Exception("T partner has to be a Corr object.")
|
||||
if parity not in [+1, -1]:
|
||||
|
@ -494,6 +504,8 @@ class Corr:
|
|||
decides which definition of the finite differences derivative is used.
|
||||
Available choice: symmetric, forward, backward, improved, default: symmetric
|
||||
"""
|
||||
if self.N != 1:
|
||||
raise Exception("deriv only implemented for one-dimensional correlators.")
|
||||
if variant == "symmetric":
|
||||
newcontent = []
|
||||
for t in range(1, self.T - 1):
|
||||
|
@ -546,6 +558,8 @@ class Corr:
|
|||
decides which definition of the finite differences derivative is used.
|
||||
Available choice: symmetric, improved, default: symmetric
|
||||
"""
|
||||
if self.N != 1:
|
||||
raise Exception("second_deriv only implemented for one-dimensional correlators.")
|
||||
if variant == "symmetric":
|
||||
newcontent = []
|
||||
for t in range(1, self.T - 1):
|
||||
|
@ -588,7 +602,7 @@ class Corr:
|
|||
if variant == 'log':
|
||||
newcontent = []
|
||||
for t in range(self.T - 1):
|
||||
if (self.content[t] is None) or (self.content[t + 1] is None):
|
||||
if ((self.content[t] is None) or (self.content[t + 1] is None)) or (self.content[t + 1][0].value == 0):
|
||||
newcontent.append(None)
|
||||
else:
|
||||
newcontent.append(self.content[t] / self.content[t + 1])
|
||||
|
@ -608,7 +622,7 @@ class Corr:
|
|||
|
||||
newcontent = []
|
||||
for t in range(self.T - 1):
|
||||
if (self.content[t] is None) or (self.content[t + 1] is None):
|
||||
if (self.content[t] is None) or (self.content[t + 1] is None) or (self.content[t + 1][0].value == 0):
|
||||
newcontent.append(None)
|
||||
# Fill the two timeslices in the middle of the lattice with their predecessors
|
||||
elif variant == 'sinh' and t in [self.T / 2, self.T / 2 - 1]:
|
||||
|
@ -623,7 +637,7 @@ class Corr:
|
|||
elif variant == 'arccosh':
|
||||
newcontent = []
|
||||
for t in range(1, self.T - 1):
|
||||
if (self.content[t] is None) or (self.content[t + 1] is None) or (self.content[t - 1] is None):
|
||||
if (self.content[t] is None) or (self.content[t + 1] is None) or (self.content[t - 1] is None) or (self.content[t][0].value == 0):
|
||||
newcontent.append(None)
|
||||
else:
|
||||
newcontent.append((self.content[t + 1] + self.content[t - 1]) / (2 * self.content[t]))
|
||||
|
@ -758,7 +772,7 @@ class Corr:
|
|||
|
||||
x, y, y_err = self.plottable()
|
||||
if hide_sigma:
|
||||
hide_from = np.argmax((hide_sigma * np.array(y_err)) > np.abs(y)) - 1
|
||||
hide_from = np.argmax((hide_sigma * np.array(y_err[1:])) > np.abs(y[1:])) - 1
|
||||
else:
|
||||
hide_from = None
|
||||
ax1.errorbar(x[:hide_from], y[:hide_from], y_err[:hide_from], label=self.tag)
|
||||
|
@ -781,7 +795,7 @@ class Corr:
|
|||
corr.gamma_method()
|
||||
x, y, y_err = corr.plottable()
|
||||
if hide_sigma:
|
||||
hide_from = np.argmax((hide_sigma * np.array(y_err)) > np.abs(y)) - 1
|
||||
hide_from = np.argmax((hide_sigma * np.array(y_err[1:])) > np.abs(y[1:])) - 1
|
||||
else:
|
||||
hide_from = None
|
||||
plt.errorbar(x[:hide_from], y[:hide_from], y_err[:hide_from], label=corr.tag, mfc=plt.rcParams['axes.facecolor'])
|
||||
|
@ -883,12 +897,14 @@ class Corr:
|
|||
else:
|
||||
raise Exception("Unknown datatype " + str(datatype))
|
||||
|
||||
def print(self, range=[0, None]):
|
||||
print(self.__repr__(range))
|
||||
def print(self, print_range=None):
|
||||
print(self.__repr__(print_range))
|
||||
|
||||
def __repr__(self, print_range=None):
|
||||
if print_range is None:
|
||||
print_range = [0, None]
|
||||
|
||||
def __repr__(self, range=[0, None]):
|
||||
content_string = ""
|
||||
|
||||
content_string += "Corr T=" + str(self.T) + " N=" + str(self.N) + "\n" # +" filled with"+ str(type(self.content[0][0])) there should be a good solution here
|
||||
|
||||
if self.tag is not None:
|
||||
|
@ -896,14 +912,14 @@ class Corr:
|
|||
if self.N != 1:
|
||||
return content_string
|
||||
|
||||
if range[1]:
|
||||
range[1] += 1
|
||||
if print_range[1]:
|
||||
print_range[1] += 1
|
||||
content_string += 'x0/a\tCorr(x0/a)\n------------------\n'
|
||||
for i, sub_corr in enumerate(self.content[range[0]:range[1]]):
|
||||
for i, sub_corr in enumerate(self.content[print_range[0]:print_range[1]]):
|
||||
if sub_corr is None:
|
||||
content_string += str(i + range[0]) + '\n'
|
||||
content_string += str(i + print_range[0]) + '\n'
|
||||
else:
|
||||
content_string += str(i + range[0])
|
||||
content_string += str(i + print_range[0])
|
||||
for element in sub_corr:
|
||||
content_string += '\t' + ' ' * int(element >= 0) + str(element)
|
||||
content_string += '\n'
|
||||
|
@ -923,7 +939,7 @@ class Corr:
|
|||
raise Exception("Addition of Corrs with different shape")
|
||||
newcontent = []
|
||||
for t in range(self.T):
|
||||
if (self.content[t] is None) or (y.content[t] is None):
|
||||
if _check_for_none(self, self.content[t]) or _check_for_none(y, y.content[t]):
|
||||
newcontent.append(None)
|
||||
else:
|
||||
newcontent.append(self.content[t] + y.content[t])
|
||||
|
@ -932,7 +948,7 @@ class Corr:
|
|||
elif isinstance(y, (Obs, int, float, CObs)):
|
||||
newcontent = []
|
||||
for t in range(self.T):
|
||||
if (self.content[t] is None):
|
||||
if _check_for_none(self, self.content[t]):
|
||||
newcontent.append(None)
|
||||
else:
|
||||
newcontent.append(self.content[t] + y)
|
||||
|
@ -951,7 +967,7 @@ class Corr:
|
|||
raise Exception("Multiplication of Corr object requires N=N or N=1 and T=T")
|
||||
newcontent = []
|
||||
for t in range(self.T):
|
||||
if (self.content[t] is None) or (y.content[t] is None):
|
||||
if _check_for_none(self, self.content[t]) or _check_for_none(y, y.content[t]):
|
||||
newcontent.append(None)
|
||||
else:
|
||||
newcontent.append(self.content[t] * y.content[t])
|
||||
|
@ -960,7 +976,7 @@ class Corr:
|
|||
elif isinstance(y, (Obs, int, float, CObs)):
|
||||
newcontent = []
|
||||
for t in range(self.T):
|
||||
if (self.content[t] is None):
|
||||
if _check_for_none(self, self.content[t]):
|
||||
newcontent.append(None)
|
||||
else:
|
||||
newcontent.append(self.content[t] * y)
|
||||
|
@ -979,12 +995,12 @@ class Corr:
|
|||
raise Exception("Multiplication of Corr object requires N=N or N=1 and T=T")
|
||||
newcontent = []
|
||||
for t in range(self.T):
|
||||
if (self.content[t] is None) or (y.content[t] is None):
|
||||
if _check_for_none(self, self.content[t]) or _check_for_none(y, y.content[t]):
|
||||
newcontent.append(None)
|
||||
else:
|
||||
newcontent.append(self.content[t] / y.content[t])
|
||||
for t in range(self.T):
|
||||
if newcontent[t] is None:
|
||||
if _check_for_none(self, newcontent[t]):
|
||||
continue
|
||||
if np.isnan(np.sum(newcontent[t]).value):
|
||||
newcontent[t] = None
|
||||
|
@ -1003,7 +1019,7 @@ class Corr:
|
|||
|
||||
newcontent = []
|
||||
for t in range(self.T):
|
||||
if (self.content[t] is None):
|
||||
if _check_for_none(self, self.content[t]):
|
||||
newcontent.append(None)
|
||||
else:
|
||||
newcontent.append(self.content[t] / y)
|
||||
|
@ -1014,7 +1030,7 @@ class Corr:
|
|||
raise Exception('Division by zero will return undefined correlator')
|
||||
newcontent = []
|
||||
for t in range(self.T):
|
||||
if (self.content[t] is None):
|
||||
if _check_for_none(self, self.content[t]):
|
||||
newcontent.append(None)
|
||||
else:
|
||||
newcontent.append(self.content[t] / y)
|
||||
|
@ -1028,7 +1044,7 @@ class Corr:
|
|||
raise TypeError('Corr / wrong type')
|
||||
|
||||
def __neg__(self):
|
||||
newcontent = [None if (item is None) else -1. * item for item in self.content]
|
||||
newcontent = [None if _check_for_none(self, item) else -1. * item for item in self.content]
|
||||
return Corr(newcontent, prange=self.prange)
|
||||
|
||||
def __sub__(self, y):
|
||||
|
@ -1036,31 +1052,31 @@ class Corr:
|
|||
|
||||
def __pow__(self, y):
|
||||
if isinstance(y, (Obs, int, float, CObs)):
|
||||
newcontent = [None if (item is None) else item**y for item in self.content]
|
||||
newcontent = [None if _check_for_none(self, item) else item**y for item in self.content]
|
||||
return Corr(newcontent, prange=self.prange)
|
||||
else:
|
||||
raise TypeError('Type of exponent not supported')
|
||||
|
||||
def __abs__(self):
|
||||
newcontent = [None if (item is None) else np.abs(item) for item in self.content]
|
||||
newcontent = [None if _check_for_none(self, item) else np.abs(item) for item in self.content]
|
||||
return Corr(newcontent, prange=self.prange)
|
||||
|
||||
# The numpy functions:
|
||||
def sqrt(self):
|
||||
return self**0.5
|
||||
return self ** 0.5
|
||||
|
||||
def log(self):
|
||||
newcontent = [None if (item is None) else np.log(item) for item in self.content]
|
||||
newcontent = [None if _check_for_none(self, item) else np.log(item) for item in self.content]
|
||||
return Corr(newcontent, prange=self.prange)
|
||||
|
||||
def exp(self):
|
||||
newcontent = [None if (item is None) else np.exp(item) for item in self.content]
|
||||
newcontent = [None if _check_for_none(self, item) else np.exp(item) for item in self.content]
|
||||
return Corr(newcontent, prange=self.prange)
|
||||
|
||||
def _apply_func_to_corr(self, func):
|
||||
newcontent = [None if (item is None) else func(item) for item in self.content]
|
||||
newcontent = [None if _check_for_none(self, item) else func(item) for item in self.content]
|
||||
for t in range(self.T):
|
||||
if newcontent[t] is None:
|
||||
if _check_for_none(self, newcontent[t]):
|
||||
continue
|
||||
if np.isnan(np.sum(newcontent[t]).value):
|
||||
newcontent[t] = None
|
||||
|
@ -1222,6 +1238,11 @@ def _sort_vectors(vec_set, ts):
|
|||
return sorted_vec_set
|
||||
|
||||
|
||||
def _check_for_none(corr, entry):
|
||||
"""Checks if entry for correlator corr is None"""
|
||||
return len(list(filter(None, np.asarray(entry).flatten()))) < corr.N ** 2
|
||||
|
||||
|
||||
def _GEVP_solver(Gt, G0):
|
||||
"""Helper function for solving the GEVP and sorting the eigenvectors.
|
||||
|
||||
|
|
|
@ -8,7 +8,6 @@ import scipy.stats
|
|||
import matplotlib.pyplot as plt
|
||||
from matplotlib import gridspec
|
||||
from scipy.odr import ODR, Model, RealData
|
||||
from scipy.stats import chi2
|
||||
import iminuit
|
||||
from autograd import jacobian
|
||||
from autograd import elementwise_grad as egrad
|
||||
|
@ -34,9 +33,9 @@ class Fit_result(Sequence):
|
|||
def __len__(self):
|
||||
return len(self.fit_parameters)
|
||||
|
||||
def gamma_method(self):
|
||||
def gamma_method(self, **kwargs):
|
||||
"""Apply the gamma method to all fit parameters"""
|
||||
[o.gamma_method() for o in self.fit_parameters]
|
||||
[o.gamma_method(**kwargs) for o in self.fit_parameters]
|
||||
|
||||
def __str__(self):
|
||||
my_str = 'Goodness of fit:\n'
|
||||
|
@ -95,7 +94,8 @@ def least_squares(x, y, func, priors=None, silent=False, **kwargs):
|
|||
If true all output to the console is omitted (default False).
|
||||
initial_guess : list
|
||||
can provide an initial guess for the input parameters. Relevant for
|
||||
non-linear fits with many parameters.
|
||||
non-linear fits with many parameters. In case of correlated fits the guess is used to perform
|
||||
an uncorrelated fit which then serves as guess for the correlated fit.
|
||||
method : str, optional
|
||||
can be used to choose an alternative method for the minimization of chisquare.
|
||||
The possible methods are the ones which can be used for scipy.optimize.minimize and
|
||||
|
@ -264,7 +264,7 @@ def total_least_squares(x, y, func, silent=False, **kwargs):
|
|||
|
||||
fitp = out.beta
|
||||
try:
|
||||
hess_inv = np.linalg.pinv(jacobian(jacobian(odr_chisquare))(np.concatenate((fitp, out.xplus.ravel()))))
|
||||
hess = jacobian(jacobian(odr_chisquare))(np.concatenate((fitp, out.xplus.ravel())))
|
||||
except TypeError:
|
||||
raise Exception("It is required to use autograd.numpy instead of numpy within fit functions, see the documentation for details.") from None
|
||||
|
||||
|
@ -275,7 +275,11 @@ def total_least_squares(x, y, func, silent=False, **kwargs):
|
|||
|
||||
jac_jac_x = jacobian(jacobian(odr_chisquare_compact_x))(np.concatenate((fitp, out.xplus.ravel(), x_f.ravel())))
|
||||
|
||||
deriv_x = -hess_inv @ jac_jac_x[:n_parms + m, n_parms + m:]
|
||||
# Compute hess^{-1} @ jac_jac_x[:n_parms + m, n_parms + m:] using LAPACK dgesv
|
||||
try:
|
||||
deriv_x = -scipy.linalg.solve(hess, jac_jac_x[:n_parms + m, n_parms + m:])
|
||||
except np.linalg.LinAlgError:
|
||||
raise Exception("Cannot invert hessian matrix.")
|
||||
|
||||
def odr_chisquare_compact_y(d):
|
||||
model = func(d[:n_parms], d[n_parms:n_parms + m].reshape(x_shape))
|
||||
|
@ -284,7 +288,11 @@ def total_least_squares(x, y, func, silent=False, **kwargs):
|
|||
|
||||
jac_jac_y = jacobian(jacobian(odr_chisquare_compact_y))(np.concatenate((fitp, out.xplus.ravel(), y_f)))
|
||||
|
||||
deriv_y = -hess_inv @ jac_jac_y[:n_parms + m, n_parms + m:]
|
||||
# Compute hess^{-1} @ jac_jac_y[:n_parms + m, n_parms + m:] using LAPACK dgesv
|
||||
try:
|
||||
deriv_y = -scipy.linalg.solve(hess, jac_jac_y[:n_parms + m, n_parms + m:])
|
||||
except np.linalg.LinAlgError:
|
||||
raise Exception("Cannot invert hessian matrix.")
|
||||
|
||||
result = []
|
||||
for i in range(n_parms):
|
||||
|
@ -294,7 +302,7 @@ def total_least_squares(x, y, func, silent=False, **kwargs):
|
|||
|
||||
output.odr_chisquare = odr_chisquare(np.concatenate((out.beta, out.xplus.ravel())))
|
||||
output.dof = x.shape[-1] - n_parms
|
||||
output.p_value = 1 - chi2.cdf(output.odr_chisquare, output.dof)
|
||||
output.p_value = 1 - scipy.stats.chi2.cdf(output.odr_chisquare, output.dof)
|
||||
|
||||
return output
|
||||
|
||||
|
@ -469,18 +477,17 @@ def _standard_fit(x, y, func, silent=False, **kwargs):
|
|||
if condn > 1 / np.sqrt(np.finfo(float).eps):
|
||||
warnings.warn("Correlation matrix may be ill-conditioned, condition number: {%1.2e}" % (condn), RuntimeWarning)
|
||||
chol = np.linalg.cholesky(corr)
|
||||
chol_inv = np.linalg.inv(chol)
|
||||
chol_inv = np.dot(chol_inv, covdiag)
|
||||
chol_inv = scipy.linalg.solve_triangular(chol, covdiag, lower=True)
|
||||
|
||||
def chisqfunc(p):
|
||||
def chisqfunc_corr(p):
|
||||
model = func(p, x)
|
||||
chisq = anp.sum(anp.dot(chol_inv, (y_f - model)) ** 2)
|
||||
return chisq
|
||||
else:
|
||||
def chisqfunc(p):
|
||||
model = func(p, x)
|
||||
chisq = anp.sum(((y_f - model) / dy_f) ** 2)
|
||||
return chisq
|
||||
|
||||
def chisqfunc(p):
|
||||
model = func(p, x)
|
||||
chisq = anp.sum(((y_f - model) / dy_f) ** 2)
|
||||
return chisq
|
||||
|
||||
output.method = kwargs.get('method', 'Levenberg-Marquardt')
|
||||
if not silent:
|
||||
|
@ -489,29 +496,38 @@ def _standard_fit(x, y, func, silent=False, **kwargs):
|
|||
if output.method != 'Levenberg-Marquardt':
|
||||
if output.method == 'migrad':
|
||||
fit_result = iminuit.minimize(chisqfunc, x0, tol=1e-4) # Stopping criterion 0.002 * tol * errordef
|
||||
if kwargs.get('correlated_fit') is True:
|
||||
fit_result = iminuit.minimize(chisqfunc_corr, fit_result.x, tol=1e-4) # Stopping criterion 0.002 * tol * errordef
|
||||
output.iterations = fit_result.nfev
|
||||
else:
|
||||
fit_result = scipy.optimize.minimize(chisqfunc, x0, method=kwargs.get('method'), tol=1e-12)
|
||||
if kwargs.get('correlated_fit') is True:
|
||||
fit_result = scipy.optimize.minimize(chisqfunc_corr, fit_result.x, method=kwargs.get('method'), tol=1e-12)
|
||||
output.iterations = fit_result.nit
|
||||
|
||||
chisquare = fit_result.fun
|
||||
|
||||
else:
|
||||
if kwargs.get('correlated_fit') is True:
|
||||
def chisqfunc_residuals(p):
|
||||
def chisqfunc_residuals_corr(p):
|
||||
model = func(p, x)
|
||||
chisq = anp.dot(chol_inv, (y_f - model))
|
||||
return chisq
|
||||
|
||||
else:
|
||||
def chisqfunc_residuals(p):
|
||||
model = func(p, x)
|
||||
chisq = ((y_f - model) / dy_f)
|
||||
return chisq
|
||||
def chisqfunc_residuals(p):
|
||||
model = func(p, x)
|
||||
chisq = ((y_f - model) / dy_f)
|
||||
return chisq
|
||||
|
||||
fit_result = scipy.optimize.least_squares(chisqfunc_residuals, x0, method='lm', ftol=1e-15, gtol=1e-15, xtol=1e-15)
|
||||
if kwargs.get('correlated_fit') is True:
|
||||
fit_result = scipy.optimize.least_squares(chisqfunc_residuals_corr, fit_result.x, method='lm', ftol=1e-15, gtol=1e-15, xtol=1e-15)
|
||||
|
||||
chisquare = np.sum(fit_result.fun ** 2)
|
||||
if kwargs.get('correlated_fit') is True:
|
||||
assert np.isclose(chisquare, chisqfunc_corr(fit_result.x), atol=1e-14)
|
||||
else:
|
||||
assert np.isclose(chisquare, chisqfunc(fit_result.x), atol=1e-14)
|
||||
|
||||
output.iterations = fit_result.nfev
|
||||
|
||||
|
@ -542,7 +558,10 @@ def _standard_fit(x, y, func, silent=False, **kwargs):
|
|||
|
||||
fitp = fit_result.x
|
||||
try:
|
||||
hess_inv = np.linalg.pinv(jacobian(jacobian(chisqfunc))(fitp))
|
||||
if kwargs.get('correlated_fit') is True:
|
||||
hess = jacobian(jacobian(chisqfunc_corr))(fitp)
|
||||
else:
|
||||
hess = jacobian(jacobian(chisqfunc))(fitp)
|
||||
except TypeError:
|
||||
raise Exception("It is required to use autograd.numpy instead of numpy within fit functions, see the documentation for details.") from None
|
||||
|
||||
|
@ -560,7 +579,11 @@ def _standard_fit(x, y, func, silent=False, **kwargs):
|
|||
|
||||
jac_jac = jacobian(jacobian(chisqfunc_compact))(np.concatenate((fitp, y_f)))
|
||||
|
||||
deriv = -hess_inv @ jac_jac[:n_parms, n_parms:]
|
||||
# Compute hess^{-1} @ jac_jac[:n_parms, n_parms:] using LAPACK dgesv
|
||||
try:
|
||||
deriv = -scipy.linalg.solve(hess, jac_jac[:n_parms, n_parms:])
|
||||
except np.linalg.LinAlgError:
|
||||
raise Exception("Cannot invert hessian matrix.")
|
||||
|
||||
result = []
|
||||
for i in range(n_parms):
|
||||
|
@ -568,9 +591,9 @@ def _standard_fit(x, y, func, silent=False, **kwargs):
|
|||
|
||||
output.fit_parameters = result
|
||||
|
||||
output.chisquare = chisqfunc(fit_result.x)
|
||||
output.chisquare = chisquare
|
||||
output.dof = x.shape[-1] - n_parms
|
||||
output.p_value = 1 - chi2.cdf(output.chisquare, output.dof)
|
||||
output.p_value = 1 - scipy.stats.chi2.cdf(output.chisquare, output.dof)
|
||||
|
||||
if kwargs.get('resplot') is True:
|
||||
residual_plot(x, y, func, result)
|
||||
|
|
|
@ -61,7 +61,7 @@ def read_ADerrors(file_path, bdio_path='./libbdio.so', **kwargs):
|
|||
return_list = []
|
||||
|
||||
print('Reading of bdio file started')
|
||||
while 1 > 0:
|
||||
while True:
|
||||
bdio_seek_record(fbdio)
|
||||
ruinfo = bdio_get_ruinfo(fbdio)
|
||||
|
||||
|
@ -373,7 +373,7 @@ def read_mesons(file_path, bdio_path='./libbdio.so', **kwargs):
|
|||
fbdio = bdio_open(ctypes.c_char_p(b_path), ctypes.c_char_p(b_read), ctypes.c_char_p(b_form))
|
||||
|
||||
print('Reading of bdio file started')
|
||||
while 1 > 0:
|
||||
while True:
|
||||
bdio_seek_record(fbdio)
|
||||
ruinfo = bdio_get_ruinfo(fbdio)
|
||||
if ruinfo < 0:
|
||||
|
@ -582,7 +582,7 @@ def read_dSdm(file_path, bdio_path='./libbdio.so', **kwargs):
|
|||
fbdio = bdio_open(ctypes.c_char_p(b_path), ctypes.c_char_p(b_read), ctypes.c_char_p(b_form))
|
||||
|
||||
print('Reading of bdio file started')
|
||||
while 1 > 0:
|
||||
while True:
|
||||
bdio_seek_record(fbdio)
|
||||
ruinfo = bdio_get_ruinfo(fbdio)
|
||||
if ruinfo < 0:
|
||||
|
|
|
@ -648,7 +648,7 @@ def _dobsdict_to_xmlstring_spaces(d, space=' '):
|
|||
return o
|
||||
|
||||
|
||||
def create_dobs_string(obsl, name, spec='dobs v1.0', origin='', symbol=[], who=None, enstags={}):
|
||||
def create_dobs_string(obsl, name, spec='dobs v1.0', origin='', symbol=[], who=None, enstags=None):
|
||||
"""Generate the string for the export of a list of Obs or structures containing Obs
|
||||
to a .xml.gz file according to the Zeuthen dobs format.
|
||||
|
||||
|
@ -674,6 +674,8 @@ def create_dobs_string(obsl, name, spec='dobs v1.0', origin='', symbol=[], who=N
|
|||
Provide alternative enstag for ensembles in the form enstags = {ename: enstag}
|
||||
Otherwise, the ensemble name is used.
|
||||
"""
|
||||
if enstags is None:
|
||||
enstags = {}
|
||||
od = {}
|
||||
r_names = []
|
||||
for o in obsl:
|
||||
|
@ -831,7 +833,7 @@ def create_dobs_string(obsl, name, spec='dobs v1.0', origin='', symbol=[], who=N
|
|||
return rs
|
||||
|
||||
|
||||
def write_dobs(obsl, fname, name, spec='dobs v1.0', origin='', symbol=[], who=None, enstags={}, gz=True):
|
||||
def write_dobs(obsl, fname, name, spec='dobs v1.0', origin='', symbol=[], who=None, enstags=None, gz=True):
|
||||
"""Export a list of Obs or structures containing Obs to a .xml.gz file
|
||||
according to the Zeuthen dobs format.
|
||||
|
||||
|
@ -861,6 +863,8 @@ def write_dobs(obsl, fname, name, spec='dobs v1.0', origin='', symbol=[], who=No
|
|||
gz : bool
|
||||
If True, the output is a gzipped XML. If False, the output is a XML file.
|
||||
"""
|
||||
if enstags is None:
|
||||
enstags = {}
|
||||
|
||||
dobsstring = create_dobs_string(obsl, name, spec, origin, symbol, who, enstags=enstags)
|
||||
|
||||
|
|
|
@ -55,8 +55,8 @@ def _get_files(path, filestem, idl):
|
|||
return filtered_files, idx
|
||||
|
||||
|
||||
def read_meson_hd5(path, filestem, ens_id, meson='meson_0', idl=None):
|
||||
"""Read hadrons meson hdf5 file and extract the meson labeled 'meson'
|
||||
def read_meson_hd5(path, filestem, ens_id, meson='meson_0', idl=None, gammas=None):
|
||||
r'''Read hadrons meson hdf5 file and extract the meson labeled 'meson'
|
||||
|
||||
Parameters
|
||||
-----------------
|
||||
|
@ -69,13 +69,31 @@ def read_meson_hd5(path, filestem, ens_id, meson='meson_0', idl=None):
|
|||
meson : str
|
||||
label of the meson to be extracted, standard value meson_0 which
|
||||
corresponds to the pseudoscalar pseudoscalar two-point function.
|
||||
gammas : tuple of strings
|
||||
Instrad of a meson label one can also provide a tuple of two strings
|
||||
indicating the gamma matrices at source and sink.
|
||||
("Gamma5", "Gamma5") corresponds to the pseudoscalar pseudoscalar
|
||||
two-point function. The gammas argument dominateds over meson.
|
||||
idl : range
|
||||
If specified only configurations in the given range are read in.
|
||||
"""
|
||||
'''
|
||||
|
||||
files, idx = _get_files(path, filestem, idl)
|
||||
|
||||
tree = meson.rsplit('_')[0]
|
||||
if gammas is not None:
|
||||
h5file = h5py.File(path + '/' + files[0], "r")
|
||||
found_meson = None
|
||||
for key in h5file[tree].keys():
|
||||
if gammas[0] == h5file[tree][key].attrs["gamma_snk"][0].decode() and h5file[tree][key].attrs["gamma_src"][0].decode() == gammas[1]:
|
||||
found_meson = key
|
||||
break
|
||||
h5file.close()
|
||||
if found_meson:
|
||||
meson = found_meson
|
||||
else:
|
||||
raise Exception("Source Sink combination " + str(gammas) + " not found.")
|
||||
|
||||
corr_data = []
|
||||
infos = []
|
||||
for hd5_file in files:
|
||||
|
@ -153,10 +171,6 @@ def read_DistillationContraction_hd5(path, ens_id, diagrams=["direct"], idl=None
|
|||
identifier = tuple(identifier)
|
||||
# "DistillationContraction/Metadata/DmfSuffix" contains info about different quarks, irrelevant in the SU(3) case.
|
||||
|
||||
check_traj = h5file["DistillationContraction/Metadata"].attrs.get("Traj")[0]
|
||||
|
||||
assert check_traj == n_traj
|
||||
|
||||
for diagram in diagrams:
|
||||
real_data = np.zeros(Nt)
|
||||
for x0 in range(Nt):
|
||||
|
|
|
@ -165,7 +165,7 @@ def create_json_string(ol, description='', indent=1):
|
|||
|
||||
d = {}
|
||||
d['program'] = 'pyerrors %s' % (pyerrorsversion.__version__)
|
||||
d['version'] = '1.0'
|
||||
d['version'] = '1.1'
|
||||
d['who'] = getpass.getuser()
|
||||
d['date'] = datetime.datetime.now().astimezone().strftime('%Y-%m-%d %H:%M:%S %z')
|
||||
d['host'] = socket.gethostname() + ', ' + platform.platform()
|
||||
|
@ -294,6 +294,7 @@ def _parse_json_dict(json_dict, verbose=True, full_output=False):
|
|||
|
||||
if od:
|
||||
ret = Obs([[ddi[0] + values[0] for ddi in di] for di in od['deltas']], od['names'], idl=od['idl'])
|
||||
ret._value = values[0]
|
||||
ret.is_merged = od['is_merged']
|
||||
else:
|
||||
ret = Obs([], [], means=[])
|
||||
|
@ -319,6 +320,7 @@ def _parse_json_dict(json_dict, verbose=True, full_output=False):
|
|||
for i in range(layout):
|
||||
if od:
|
||||
ret.append(Obs([list(di[:, i] + values[i]) for di in od['deltas']], od['names'], idl=od['idl']))
|
||||
ret[-1]._value = values[i]
|
||||
ret[-1].is_merged = od['is_merged']
|
||||
else:
|
||||
ret.append(Obs([], [], means=[]))
|
||||
|
@ -346,6 +348,7 @@ def _parse_json_dict(json_dict, verbose=True, full_output=False):
|
|||
for i in range(N):
|
||||
if od:
|
||||
ret.append(Obs([di[:, i] + values[i] for di in od['deltas']], od['names'], idl=od['idl']))
|
||||
ret[-1]._value = values[i]
|
||||
ret[-1].is_merged = od['is_merged']
|
||||
else:
|
||||
ret.append(Obs([], [], means=[]))
|
||||
|
|
|
@ -17,8 +17,6 @@ def read_pbp(path, prefix, **kwargs):
|
|||
list which contains the last config to be read for each replicum
|
||||
"""
|
||||
|
||||
extract_nfct = 1
|
||||
|
||||
ls = []
|
||||
for (dirpath, dirnames, filenames) in os.walk(path):
|
||||
ls.extend(filenames)
|
||||
|
@ -78,14 +76,10 @@ def read_pbp(path, prefix, **kwargs):
|
|||
|
||||
# This block is necessary for openQCD1.6 ms1 files
|
||||
nfct = []
|
||||
if extract_nfct == 1:
|
||||
for i in range(nrw):
|
||||
t = fp.read(4)
|
||||
nfct.append(struct.unpack('i', t)[0])
|
||||
print('nfct: ', nfct) # Hasenbusch factor, 1 for rat reweighting
|
||||
else:
|
||||
for i in range(nrw):
|
||||
nfct.append(1)
|
||||
for i in range(nrw):
|
||||
t = fp.read(4)
|
||||
nfct.append(struct.unpack('i', t)[0])
|
||||
print('nfct: ', nfct) # Hasenbusch factor, 1 for rat reweighting
|
||||
|
||||
nsrc = []
|
||||
for i in range(nrw):
|
||||
|
@ -93,7 +87,7 @@ def read_pbp(path, prefix, **kwargs):
|
|||
nsrc.append(struct.unpack('i', t)[0])
|
||||
|
||||
# body
|
||||
while 0 < 1:
|
||||
while True:
|
||||
t = fp.read(4)
|
||||
if len(t) < 4:
|
||||
break
|
||||
|
|
|
@ -150,7 +150,7 @@ def read_rwms(path, prefix, version='2.0', names=None, **kwargs):
|
|||
print('something is wrong!')
|
||||
|
||||
configlist.append([])
|
||||
while 0 < 1:
|
||||
while True:
|
||||
t = fp.read(4)
|
||||
if len(t) < 4:
|
||||
break
|
||||
|
@ -362,7 +362,7 @@ def extract_t0(path, prefix, dtr_read, xmin, spatial_extent, fit_range=5, **kwar
|
|||
Ysl = []
|
||||
|
||||
configlist.append([])
|
||||
while 0 < 1:
|
||||
while True:
|
||||
t = fp.read(4)
|
||||
if(len(t) < 4):
|
||||
break
|
||||
|
@ -657,7 +657,7 @@ def read_qtop(path, prefix, c, dtr_cnfg=1, version="openQCD", **kwargs):
|
|||
nfl = 1
|
||||
iobs = 8 * nfl # number of flow observables calculated
|
||||
|
||||
while 0 < 1:
|
||||
while True:
|
||||
t = fp.read(4)
|
||||
if(len(t) < 4):
|
||||
break
|
||||
|
@ -682,7 +682,7 @@ def read_qtop(path, prefix, c, dtr_cnfg=1, version="openQCD", **kwargs):
|
|||
t = fp.read(8)
|
||||
eps = struct.unpack('d', t)[0]
|
||||
|
||||
while 0 < 1:
|
||||
while True:
|
||||
t = fp.read(4)
|
||||
if(len(t) < 4):
|
||||
break
|
||||
|
|
100
pyerrors/obs.py
100
pyerrors/obs.py
|
@ -1,5 +1,7 @@
|
|||
import warnings
|
||||
import pickle
|
||||
from math import gcd
|
||||
from functools import reduce
|
||||
import numpy as np
|
||||
import autograd.numpy as anp # Thinly-wrapped numpy
|
||||
from autograd import jacobian
|
||||
|
@ -981,6 +983,39 @@ def _merge_idx(idl):
|
|||
return sorted(set().union(*idl))
|
||||
|
||||
|
||||
def _intersection_idx(idl):
|
||||
"""Returns the intersection of all lists in idl as sorted list
|
||||
|
||||
Parameters
|
||||
----------
|
||||
idl : list
|
||||
List of lists or ranges.
|
||||
"""
|
||||
|
||||
def _lcm(*args):
|
||||
"""Returns the lowest common multiple of args.
|
||||
|
||||
From python 3.9 onwards the math library contains an lcm function."""
|
||||
return reduce(lambda a, b: a * b // gcd(a, b), args)
|
||||
|
||||
# Use groupby to efficiently check whether all elements of idl are identical
|
||||
try:
|
||||
g = groupby(idl)
|
||||
if next(g, True) and not next(g, False):
|
||||
return idl[0]
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
if np.all([type(idx) is range for idx in idl]):
|
||||
if len(set([idx[0] for idx in idl])) == 1:
|
||||
idstart = max([idx.start for idx in idl])
|
||||
idstop = min([idx.stop for idx in idl])
|
||||
idstep = _lcm(*[idx.step for idx in idl])
|
||||
return range(idstart, idstop, idstep)
|
||||
|
||||
return sorted(set.intersection(*[set(o) for o in idl]))
|
||||
|
||||
|
||||
def _expand_deltas_for_merge(deltas, idx, shape, new_idx):
|
||||
"""Expand deltas defined on idx to the list of configs that is defined by new_idx.
|
||||
New, empty entries are filled by 0. If idx and new_idx are of type range, the smallest
|
||||
|
@ -1008,6 +1043,34 @@ def _expand_deltas_for_merge(deltas, idx, shape, new_idx):
|
|||
return np.array([ret[new_idx[i] - new_idx[0]] for i in range(len(new_idx))])
|
||||
|
||||
|
||||
def _collapse_deltas_for_merge(deltas, idx, shape, new_idx):
|
||||
"""Collapse deltas defined on idx to the list of configs that is defined by new_idx.
|
||||
If idx and new_idx are of type range, the smallest
|
||||
common divisor of the step sizes is used as new step size.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
deltas : list
|
||||
List of fluctuations
|
||||
idx : list
|
||||
List or range of configs on which the deltas are defined.
|
||||
Has to be a subset of new_idx and has to be sorted in ascending order.
|
||||
shape : list
|
||||
Number of configs in idx.
|
||||
new_idx : list
|
||||
List of configs that defines the new range, has to be sorted in ascending order.
|
||||
"""
|
||||
|
||||
if type(idx) is range and type(new_idx) is range:
|
||||
if idx == new_idx:
|
||||
return deltas
|
||||
ret = np.zeros(new_idx[-1] - new_idx[0] + 1)
|
||||
for i in range(shape):
|
||||
if idx[i] in new_idx:
|
||||
ret[idx[i] - new_idx[0]] = deltas[i]
|
||||
return np.array([ret[new_idx[i] - new_idx[0]] for i in range(len(new_idx))])
|
||||
|
||||
|
||||
def _filter_zeroes(deltas, idx, eps=Obs.filter_eps):
|
||||
"""Filter out all configurations with vanishing fluctuation such that they do not
|
||||
contribute to the error estimate anymore. Returns the new deltas and
|
||||
|
@ -1389,13 +1452,6 @@ def covariance(obs, visualize=False, correlation=False, smooth=None, **kwargs):
|
|||
if isinstance(smooth, int):
|
||||
corr = _smooth_eigenvalues(corr, smooth)
|
||||
|
||||
errors = [o.dvalue for o in obs]
|
||||
cov = np.diag(errors) @ corr @ np.diag(errors)
|
||||
|
||||
eigenvalues = np.linalg.eigh(cov)[0]
|
||||
if not np.all(eigenvalues >= 0):
|
||||
warnings.warn("Covariance matrix is not positive semi-definite (Eigenvalues: " + str(eigenvalues) + ")", RuntimeWarning)
|
||||
|
||||
if visualize:
|
||||
plt.matshow(corr, vmin=-1, vmax=1)
|
||||
plt.set_cmap('RdBu')
|
||||
|
@ -1404,8 +1460,15 @@ def covariance(obs, visualize=False, correlation=False, smooth=None, **kwargs):
|
|||
|
||||
if correlation is True:
|
||||
return corr
|
||||
else:
|
||||
return cov
|
||||
|
||||
errors = [o.dvalue for o in obs]
|
||||
cov = np.diag(errors) @ corr @ np.diag(errors)
|
||||
|
||||
eigenvalues = np.linalg.eigh(cov)[0]
|
||||
if not np.all(eigenvalues >= 0):
|
||||
warnings.warn("Covariance matrix is not positive semi-definite (Eigenvalues: " + str(eigenvalues) + ")", RuntimeWarning)
|
||||
|
||||
return cov
|
||||
|
||||
|
||||
def _smooth_eigenvalues(corr, E):
|
||||
|
@ -1429,8 +1492,8 @@ def _covariance_element(obs1, obs2):
|
|||
"""Estimates the covariance of two Obs objects, neglecting autocorrelations."""
|
||||
|
||||
def calc_gamma(deltas1, deltas2, idx1, idx2, new_idx):
|
||||
deltas1 = _expand_deltas_for_merge(deltas1, idx1, len(idx1), new_idx)
|
||||
deltas2 = _expand_deltas_for_merge(deltas2, idx2, len(idx2), new_idx)
|
||||
deltas1 = _collapse_deltas_for_merge(deltas1, idx1, len(idx1), new_idx)
|
||||
deltas2 = _collapse_deltas_for_merge(deltas2, idx2, len(idx2), new_idx)
|
||||
return np.sum(deltas1 * deltas2)
|
||||
|
||||
if set(obs1.names).isdisjoint(set(obs2.names)):
|
||||
|
@ -1450,29 +1513,30 @@ def _covariance_element(obs1, obs2):
|
|||
for r_name in obs1.e_content[e_name]:
|
||||
if r_name not in obs2.e_content[e_name]:
|
||||
continue
|
||||
idl_d[r_name] = _merge_idx([obs1.idl[r_name], obs2.idl[r_name]])
|
||||
idl_d[r_name] = _intersection_idx([obs1.idl[r_name], obs2.idl[r_name]])
|
||||
|
||||
gamma = 0.0
|
||||
|
||||
for r_name in obs1.e_content[e_name]:
|
||||
if r_name not in obs2.e_content[e_name]:
|
||||
continue
|
||||
if len(idl_d[r_name]) == 0:
|
||||
continue
|
||||
gamma += calc_gamma(obs1.deltas[r_name], obs2.deltas[r_name], obs1.idl[r_name], obs2.idl[r_name], idl_d[r_name])
|
||||
|
||||
if gamma == 0.0:
|
||||
continue
|
||||
|
||||
gamma_div = 0.0
|
||||
e_N = 0
|
||||
for r_name in obs1.e_content[e_name]:
|
||||
if r_name not in obs2.e_content[e_name]:
|
||||
continue
|
||||
gamma_div += calc_gamma(np.ones(obs1.shape[r_name]), np.ones(obs2.shape[r_name]), obs1.idl[r_name], obs2.idl[r_name], idl_d[r_name])
|
||||
e_N += len(idl_d[r_name])
|
||||
gamma /= max(gamma_div, 1.0)
|
||||
if len(idl_d[r_name]) == 0:
|
||||
continue
|
||||
gamma_div += np.sqrt(calc_gamma(obs1.deltas[r_name], obs1.deltas[r_name], obs1.idl[r_name], obs1.idl[r_name], idl_d[r_name]) * calc_gamma(obs2.deltas[r_name], obs2.deltas[r_name], obs2.idl[r_name], obs2.idl[r_name], idl_d[r_name]))
|
||||
gamma /= gamma_div
|
||||
|
||||
# Bias correction hep-lat/0306017 eq. (49)
|
||||
dvalue += (1 + 1 / e_N) * gamma / e_N
|
||||
dvalue += gamma
|
||||
|
||||
for e_name in obs1.cov_names:
|
||||
|
||||
|
|
|
@ -1 +1 @@
|
|||
__version__ = "2.1.0+dev"
|
||||
__version__ = "2.2.0+dev"
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue