mirror of
https://github.com/fjosw/pyerrors.git
synced 2025-03-15 14:50:25 +01:00
[Fix] Ruff rules and more precise Excpetion types (#248)
* [Fix] Fix test for membership should be 'not in' (E713) * [Fix] Fix module imported but unused (F401) * [Fix] More precise Exception types in dirac, obs and correlator
This commit is contained in:
parent
d908508120
commit
3eac9214b4
11 changed files with 143 additions and 143 deletions
|
@ -481,12 +481,12 @@ from .obs import *
|
||||||
from .correlators import *
|
from .correlators import *
|
||||||
from .fits import *
|
from .fits import *
|
||||||
from .misc import *
|
from .misc import *
|
||||||
from . import dirac
|
from . import dirac as dirac
|
||||||
from . import input
|
from . import input as input
|
||||||
from . import linalg
|
from . import linalg as linalg
|
||||||
from . import mpm
|
from . import mpm as mpm
|
||||||
from . import roots
|
from . import roots as roots
|
||||||
from . import integrate
|
from . import integrate as integrate
|
||||||
from . import special
|
from . import special as special
|
||||||
|
|
||||||
from .version import __version__
|
from .version import __version__ as __version__
|
||||||
|
|
|
@ -101,7 +101,7 @@ class Corr:
|
||||||
self.N = 1
|
self.N = 1
|
||||||
elif all([isinstance(item, np.ndarray) or item is None for item in data_input]) and any([isinstance(item, np.ndarray) for item in data_input]):
|
elif all([isinstance(item, np.ndarray) or item is None for item in data_input]) and any([isinstance(item, np.ndarray) for item in data_input]):
|
||||||
self.content = data_input
|
self.content = data_input
|
||||||
noNull = [a for a in self.content if not (a is None)] # To check if the matrices are correct for all undefined elements
|
noNull = [a for a in self.content if a is not None] # To check if the matrices are correct for all undefined elements
|
||||||
self.N = noNull[0].shape[0]
|
self.N = noNull[0].shape[0]
|
||||||
if self.N > 1 and noNull[0].shape[0] != noNull[0].shape[1]:
|
if self.N > 1 and noNull[0].shape[0] != noNull[0].shape[1]:
|
||||||
raise ValueError("Smearing matrices are not NxN.")
|
raise ValueError("Smearing matrices are not NxN.")
|
||||||
|
@ -141,7 +141,7 @@ class Corr:
|
||||||
def gamma_method(self, **kwargs):
|
def gamma_method(self, **kwargs):
|
||||||
"""Apply the gamma method to the content of the Corr."""
|
"""Apply the gamma method to the content of the Corr."""
|
||||||
for item in self.content:
|
for item in self.content:
|
||||||
if not (item is None):
|
if item is not None:
|
||||||
if self.N == 1:
|
if self.N == 1:
|
||||||
item[0].gamma_method(**kwargs)
|
item[0].gamma_method(**kwargs)
|
||||||
else:
|
else:
|
||||||
|
@ -159,7 +159,7 @@ class Corr:
|
||||||
By default it will return the lowest source, which usually means unsmeared-unsmeared (0,0), but it does not have to
|
By default it will return the lowest source, which usually means unsmeared-unsmeared (0,0), but it does not have to
|
||||||
"""
|
"""
|
||||||
if self.N == 1:
|
if self.N == 1:
|
||||||
raise Exception("Trying to project a Corr, that already has N=1.")
|
raise ValueError("Trying to project a Corr, that already has N=1.")
|
||||||
|
|
||||||
if vector_l is None:
|
if vector_l is None:
|
||||||
vector_l, vector_r = np.asarray([1.] + (self.N - 1) * [0.]), np.asarray([1.] + (self.N - 1) * [0.])
|
vector_l, vector_r = np.asarray([1.] + (self.N - 1) * [0.]), np.asarray([1.] + (self.N - 1) * [0.])
|
||||||
|
@ -167,16 +167,16 @@ class Corr:
|
||||||
vector_r = vector_l
|
vector_r = vector_l
|
||||||
if isinstance(vector_l, list) and not isinstance(vector_r, list):
|
if isinstance(vector_l, list) and not isinstance(vector_r, list):
|
||||||
if len(vector_l) != self.T:
|
if len(vector_l) != self.T:
|
||||||
raise Exception("Length of vector list must be equal to T")
|
raise ValueError("Length of vector list must be equal to T")
|
||||||
vector_r = [vector_r] * self.T
|
vector_r = [vector_r] * self.T
|
||||||
if isinstance(vector_r, list) and not isinstance(vector_l, list):
|
if isinstance(vector_r, list) and not isinstance(vector_l, list):
|
||||||
if len(vector_r) != self.T:
|
if len(vector_r) != self.T:
|
||||||
raise Exception("Length of vector list must be equal to T")
|
raise ValueError("Length of vector list must be equal to T")
|
||||||
vector_l = [vector_l] * self.T
|
vector_l = [vector_l] * self.T
|
||||||
|
|
||||||
if not isinstance(vector_l, list):
|
if not isinstance(vector_l, list):
|
||||||
if not vector_l.shape == vector_r.shape == (self.N,):
|
if not vector_l.shape == vector_r.shape == (self.N,):
|
||||||
raise Exception("Vectors are of wrong shape!")
|
raise ValueError("Vectors are of wrong shape!")
|
||||||
if normalize:
|
if normalize:
|
||||||
vector_l, vector_r = vector_l / np.sqrt((vector_l @ vector_l)), vector_r / np.sqrt(vector_r @ vector_r)
|
vector_l, vector_r = vector_l / np.sqrt((vector_l @ vector_l)), vector_r / np.sqrt(vector_r @ vector_r)
|
||||||
newcontent = [None if _check_for_none(self, item) else np.asarray([vector_l.T @ item @ vector_r]) for item in self.content]
|
newcontent = [None if _check_for_none(self, item) else np.asarray([vector_l.T @ item @ vector_r]) for item in self.content]
|
||||||
|
@ -201,7 +201,7 @@ class Corr:
|
||||||
Second index to be picked.
|
Second index to be picked.
|
||||||
"""
|
"""
|
||||||
if self.N == 1:
|
if self.N == 1:
|
||||||
raise Exception("Trying to pick item from projected Corr")
|
raise ValueError("Trying to pick item from projected Corr")
|
||||||
newcontent = [None if (item is None) else item[i, j] for item in self.content]
|
newcontent = [None if (item is None) else item[i, j] for item in self.content]
|
||||||
return Corr(newcontent)
|
return Corr(newcontent)
|
||||||
|
|
||||||
|
@ -212,8 +212,8 @@ class Corr:
|
||||||
timeslice and the error on each timeslice.
|
timeslice and the error on each timeslice.
|
||||||
"""
|
"""
|
||||||
if self.N != 1:
|
if self.N != 1:
|
||||||
raise Exception("Can only make Corr[N=1] plottable")
|
raise ValueError("Can only make Corr[N=1] plottable")
|
||||||
x_list = [x for x in range(self.T) if not self.content[x] is None]
|
x_list = [x for x in range(self.T) if self.content[x] is not None]
|
||||||
y_list = [y[0].value for y in self.content if y is not None]
|
y_list = [y[0].value for y in self.content if y is not None]
|
||||||
y_err_list = [y[0].dvalue for y in self.content if y is not None]
|
y_err_list = [y[0].dvalue for y in self.content if y is not None]
|
||||||
|
|
||||||
|
@ -222,9 +222,9 @@ class Corr:
|
||||||
def symmetric(self):
|
def symmetric(self):
|
||||||
""" Symmetrize the correlator around x0=0."""
|
""" Symmetrize the correlator around x0=0."""
|
||||||
if self.N != 1:
|
if self.N != 1:
|
||||||
raise Exception('symmetric cannot be safely applied to multi-dimensional correlators.')
|
raise ValueError('symmetric cannot be safely applied to multi-dimensional correlators.')
|
||||||
if self.T % 2 != 0:
|
if self.T % 2 != 0:
|
||||||
raise Exception("Can not symmetrize odd T")
|
raise ValueError("Can not symmetrize odd T")
|
||||||
|
|
||||||
if self.content[0] is not None:
|
if self.content[0] is not None:
|
||||||
if np.argmax(np.abs([o[0].value if o is not None else 0 for o in self.content])) != 0:
|
if np.argmax(np.abs([o[0].value if o is not None else 0 for o in self.content])) != 0:
|
||||||
|
@ -237,7 +237,7 @@ class Corr:
|
||||||
else:
|
else:
|
||||||
newcontent.append(0.5 * (self.content[t] + self.content[self.T - t]))
|
newcontent.append(0.5 * (self.content[t] + self.content[self.T - t]))
|
||||||
if (all([x is None for x in newcontent])):
|
if (all([x is None for x in newcontent])):
|
||||||
raise Exception("Corr could not be symmetrized: No redundant values")
|
raise ValueError("Corr could not be symmetrized: No redundant values")
|
||||||
return Corr(newcontent, prange=self.prange)
|
return Corr(newcontent, prange=self.prange)
|
||||||
|
|
||||||
def anti_symmetric(self):
|
def anti_symmetric(self):
|
||||||
|
@ -245,7 +245,7 @@ class Corr:
|
||||||
if self.N != 1:
|
if self.N != 1:
|
||||||
raise TypeError('anti_symmetric cannot be safely applied to multi-dimensional correlators.')
|
raise TypeError('anti_symmetric cannot be safely applied to multi-dimensional correlators.')
|
||||||
if self.T % 2 != 0:
|
if self.T % 2 != 0:
|
||||||
raise Exception("Can not symmetrize odd T")
|
raise ValueError("Can not symmetrize odd T")
|
||||||
|
|
||||||
test = 1 * self
|
test = 1 * self
|
||||||
test.gamma_method()
|
test.gamma_method()
|
||||||
|
@ -259,7 +259,7 @@ class Corr:
|
||||||
else:
|
else:
|
||||||
newcontent.append(0.5 * (self.content[t] - self.content[self.T - t]))
|
newcontent.append(0.5 * (self.content[t] - self.content[self.T - t]))
|
||||||
if (all([x is None for x in newcontent])):
|
if (all([x is None for x in newcontent])):
|
||||||
raise Exception("Corr could not be symmetrized: No redundant values")
|
raise ValueError("Corr could not be symmetrized: No redundant values")
|
||||||
return Corr(newcontent, prange=self.prange)
|
return Corr(newcontent, prange=self.prange)
|
||||||
|
|
||||||
def is_matrix_symmetric(self):
|
def is_matrix_symmetric(self):
|
||||||
|
@ -292,7 +292,7 @@ class Corr:
|
||||||
def matrix_symmetric(self):
|
def matrix_symmetric(self):
|
||||||
"""Symmetrizes the correlator matrices on every timeslice."""
|
"""Symmetrizes the correlator matrices on every timeslice."""
|
||||||
if self.N == 1:
|
if self.N == 1:
|
||||||
raise Exception("Trying to symmetrize a correlator matrix, that already has N=1.")
|
raise ValueError("Trying to symmetrize a correlator matrix, that already has N=1.")
|
||||||
if self.is_matrix_symmetric():
|
if self.is_matrix_symmetric():
|
||||||
return 1.0 * self
|
return 1.0 * self
|
||||||
else:
|
else:
|
||||||
|
@ -336,10 +336,10 @@ class Corr:
|
||||||
'''
|
'''
|
||||||
|
|
||||||
if self.N == 1:
|
if self.N == 1:
|
||||||
raise Exception("GEVP methods only works on correlator matrices and not single correlators.")
|
raise ValueError("GEVP methods only works on correlator matrices and not single correlators.")
|
||||||
if ts is not None:
|
if ts is not None:
|
||||||
if (ts <= t0):
|
if (ts <= t0):
|
||||||
raise Exception("ts has to be larger than t0.")
|
raise ValueError("ts has to be larger than t0.")
|
||||||
|
|
||||||
if "sorted_list" in kwargs:
|
if "sorted_list" in kwargs:
|
||||||
warnings.warn("Argument 'sorted_list' is deprecated, use 'sort' instead.", DeprecationWarning)
|
warnings.warn("Argument 'sorted_list' is deprecated, use 'sort' instead.", DeprecationWarning)
|
||||||
|
@ -371,9 +371,9 @@ class Corr:
|
||||||
|
|
||||||
if sort is None:
|
if sort is None:
|
||||||
if (ts is None):
|
if (ts is None):
|
||||||
raise Exception("ts is required if sort=None.")
|
raise ValueError("ts is required if sort=None.")
|
||||||
if (self.content[t0] is None) or (self.content[ts] is None):
|
if (self.content[t0] is None) or (self.content[ts] is None):
|
||||||
raise Exception("Corr not defined at t0/ts.")
|
raise ValueError("Corr not defined at t0/ts.")
|
||||||
Gt = _get_mat_at_t(ts)
|
Gt = _get_mat_at_t(ts)
|
||||||
reordered_vecs = _GEVP_solver(Gt, G0, method=method, chol_inv=chol_inv)
|
reordered_vecs = _GEVP_solver(Gt, G0, method=method, chol_inv=chol_inv)
|
||||||
if kwargs.get('auto_gamma', False) and vector_obs:
|
if kwargs.get('auto_gamma', False) and vector_obs:
|
||||||
|
@ -391,14 +391,14 @@ class Corr:
|
||||||
all_vecs.append(None)
|
all_vecs.append(None)
|
||||||
if sort == "Eigenvector":
|
if sort == "Eigenvector":
|
||||||
if ts is None:
|
if ts is None:
|
||||||
raise Exception("ts is required for the Eigenvector sorting method.")
|
raise ValueError("ts is required for the Eigenvector sorting method.")
|
||||||
all_vecs = _sort_vectors(all_vecs, ts)
|
all_vecs = _sort_vectors(all_vecs, ts)
|
||||||
|
|
||||||
reordered_vecs = [[v[s] if v is not None else None for v in all_vecs] for s in range(self.N)]
|
reordered_vecs = [[v[s] if v is not None else None for v in all_vecs] for s in range(self.N)]
|
||||||
if kwargs.get('auto_gamma', False) and vector_obs:
|
if kwargs.get('auto_gamma', False) and vector_obs:
|
||||||
[[[o.gm() for o in evn] for evn in ev if evn is not None] for ev in reordered_vecs]
|
[[[o.gm() for o in evn] for evn in ev if evn is not None] for ev in reordered_vecs]
|
||||||
else:
|
else:
|
||||||
raise Exception("Unknown value for 'sort'. Choose 'Eigenvalue', 'Eigenvector' or None.")
|
raise ValueError("Unknown value for 'sort'. Choose 'Eigenvalue', 'Eigenvector' or None.")
|
||||||
|
|
||||||
if "state" in kwargs:
|
if "state" in kwargs:
|
||||||
return reordered_vecs[kwargs.get("state")]
|
return reordered_vecs[kwargs.get("state")]
|
||||||
|
@ -435,7 +435,7 @@ class Corr:
|
||||||
"""
|
"""
|
||||||
|
|
||||||
if self.N != 1:
|
if self.N != 1:
|
||||||
raise Exception("Multi-operator Prony not implemented!")
|
raise NotImplementedError("Multi-operator Prony not implemented!")
|
||||||
|
|
||||||
array = np.empty([N, N], dtype="object")
|
array = np.empty([N, N], dtype="object")
|
||||||
new_content = []
|
new_content = []
|
||||||
|
@ -502,7 +502,7 @@ class Corr:
|
||||||
correlator or a Corr of same length.
|
correlator or a Corr of same length.
|
||||||
"""
|
"""
|
||||||
if self.N != 1:
|
if self.N != 1:
|
||||||
raise Exception("Only one-dimensional correlators can be safely correlated.")
|
raise ValueError("Only one-dimensional correlators can be safely correlated.")
|
||||||
new_content = []
|
new_content = []
|
||||||
for x0, t_slice in enumerate(self.content):
|
for x0, t_slice in enumerate(self.content):
|
||||||
if _check_for_none(self, t_slice):
|
if _check_for_none(self, t_slice):
|
||||||
|
@ -516,7 +516,7 @@ class Corr:
|
||||||
elif isinstance(partner, Obs): # Should this include CObs?
|
elif isinstance(partner, Obs): # Should this include CObs?
|
||||||
new_content.append(np.array([correlate(o, partner) for o in t_slice]))
|
new_content.append(np.array([correlate(o, partner) for o in t_slice]))
|
||||||
else:
|
else:
|
||||||
raise Exception("Can only correlate with an Obs or a Corr.")
|
raise TypeError("Can only correlate with an Obs or a Corr.")
|
||||||
|
|
||||||
return Corr(new_content)
|
return Corr(new_content)
|
||||||
|
|
||||||
|
@ -583,7 +583,7 @@ class Corr:
|
||||||
Available choice: symmetric, forward, backward, improved, log, default: symmetric
|
Available choice: symmetric, forward, backward, improved, log, default: symmetric
|
||||||
"""
|
"""
|
||||||
if self.N != 1:
|
if self.N != 1:
|
||||||
raise Exception("deriv only implemented for one-dimensional correlators.")
|
raise ValueError("deriv only implemented for one-dimensional correlators.")
|
||||||
if variant == "symmetric":
|
if variant == "symmetric":
|
||||||
newcontent = []
|
newcontent = []
|
||||||
for t in range(1, self.T - 1):
|
for t in range(1, self.T - 1):
|
||||||
|
@ -592,7 +592,7 @@ class Corr:
|
||||||
else:
|
else:
|
||||||
newcontent.append(0.5 * (self.content[t + 1] - self.content[t - 1]))
|
newcontent.append(0.5 * (self.content[t + 1] - self.content[t - 1]))
|
||||||
if (all([x is None for x in newcontent])):
|
if (all([x is None for x in newcontent])):
|
||||||
raise Exception('Derivative is undefined at all timeslices')
|
raise ValueError('Derivative is undefined at all timeslices')
|
||||||
return Corr(newcontent, padding=[1, 1])
|
return Corr(newcontent, padding=[1, 1])
|
||||||
elif variant == "forward":
|
elif variant == "forward":
|
||||||
newcontent = []
|
newcontent = []
|
||||||
|
@ -602,7 +602,7 @@ class Corr:
|
||||||
else:
|
else:
|
||||||
newcontent.append(self.content[t + 1] - self.content[t])
|
newcontent.append(self.content[t + 1] - self.content[t])
|
||||||
if (all([x is None for x in newcontent])):
|
if (all([x is None for x in newcontent])):
|
||||||
raise Exception("Derivative is undefined at all timeslices")
|
raise ValueError("Derivative is undefined at all timeslices")
|
||||||
return Corr(newcontent, padding=[0, 1])
|
return Corr(newcontent, padding=[0, 1])
|
||||||
elif variant == "backward":
|
elif variant == "backward":
|
||||||
newcontent = []
|
newcontent = []
|
||||||
|
@ -612,7 +612,7 @@ class Corr:
|
||||||
else:
|
else:
|
||||||
newcontent.append(self.content[t] - self.content[t - 1])
|
newcontent.append(self.content[t] - self.content[t - 1])
|
||||||
if (all([x is None for x in newcontent])):
|
if (all([x is None for x in newcontent])):
|
||||||
raise Exception("Derivative is undefined at all timeslices")
|
raise ValueError("Derivative is undefined at all timeslices")
|
||||||
return Corr(newcontent, padding=[1, 0])
|
return Corr(newcontent, padding=[1, 0])
|
||||||
elif variant == "improved":
|
elif variant == "improved":
|
||||||
newcontent = []
|
newcontent = []
|
||||||
|
@ -622,7 +622,7 @@ class Corr:
|
||||||
else:
|
else:
|
||||||
newcontent.append((1 / 12) * (self.content[t - 2] - 8 * self.content[t - 1] + 8 * self.content[t + 1] - self.content[t + 2]))
|
newcontent.append((1 / 12) * (self.content[t - 2] - 8 * self.content[t - 1] + 8 * self.content[t + 1] - self.content[t + 2]))
|
||||||
if (all([x is None for x in newcontent])):
|
if (all([x is None for x in newcontent])):
|
||||||
raise Exception('Derivative is undefined at all timeslices')
|
raise ValueError('Derivative is undefined at all timeslices')
|
||||||
return Corr(newcontent, padding=[2, 2])
|
return Corr(newcontent, padding=[2, 2])
|
||||||
elif variant == 'log':
|
elif variant == 'log':
|
||||||
newcontent = []
|
newcontent = []
|
||||||
|
@ -632,11 +632,11 @@ class Corr:
|
||||||
else:
|
else:
|
||||||
newcontent.append(np.log(self.content[t]))
|
newcontent.append(np.log(self.content[t]))
|
||||||
if (all([x is None for x in newcontent])):
|
if (all([x is None for x in newcontent])):
|
||||||
raise Exception("Log is undefined at all timeslices")
|
raise ValueError("Log is undefined at all timeslices")
|
||||||
logcorr = Corr(newcontent)
|
logcorr = Corr(newcontent)
|
||||||
return self * logcorr.deriv('symmetric')
|
return self * logcorr.deriv('symmetric')
|
||||||
else:
|
else:
|
||||||
raise Exception("Unknown variant.")
|
raise ValueError("Unknown variant.")
|
||||||
|
|
||||||
def second_deriv(self, variant="symmetric"):
|
def second_deriv(self, variant="symmetric"):
|
||||||
r"""Return the second derivative of the correlator with respect to x0.
|
r"""Return the second derivative of the correlator with respect to x0.
|
||||||
|
@ -656,7 +656,7 @@ class Corr:
|
||||||
$$f(x) = \tilde{\partial}^2_0 log(f(x_0))+(\tilde{\partial}_0 log(f(x_0)))^2$$
|
$$f(x) = \tilde{\partial}^2_0 log(f(x_0))+(\tilde{\partial}_0 log(f(x_0)))^2$$
|
||||||
"""
|
"""
|
||||||
if self.N != 1:
|
if self.N != 1:
|
||||||
raise Exception("second_deriv only implemented for one-dimensional correlators.")
|
raise ValueError("second_deriv only implemented for one-dimensional correlators.")
|
||||||
if variant == "symmetric":
|
if variant == "symmetric":
|
||||||
newcontent = []
|
newcontent = []
|
||||||
for t in range(1, self.T - 1):
|
for t in range(1, self.T - 1):
|
||||||
|
@ -665,7 +665,7 @@ class Corr:
|
||||||
else:
|
else:
|
||||||
newcontent.append((self.content[t + 1] - 2 * self.content[t] + self.content[t - 1]))
|
newcontent.append((self.content[t + 1] - 2 * self.content[t] + self.content[t - 1]))
|
||||||
if (all([x is None for x in newcontent])):
|
if (all([x is None for x in newcontent])):
|
||||||
raise Exception("Derivative is undefined at all timeslices")
|
raise ValueError("Derivative is undefined at all timeslices")
|
||||||
return Corr(newcontent, padding=[1, 1])
|
return Corr(newcontent, padding=[1, 1])
|
||||||
elif variant == "big_symmetric":
|
elif variant == "big_symmetric":
|
||||||
newcontent = []
|
newcontent = []
|
||||||
|
@ -675,7 +675,7 @@ class Corr:
|
||||||
else:
|
else:
|
||||||
newcontent.append((self.content[t + 2] - 2 * self.content[t] + self.content[t - 2]) / 4)
|
newcontent.append((self.content[t + 2] - 2 * self.content[t] + self.content[t - 2]) / 4)
|
||||||
if (all([x is None for x in newcontent])):
|
if (all([x is None for x in newcontent])):
|
||||||
raise Exception("Derivative is undefined at all timeslices")
|
raise ValueError("Derivative is undefined at all timeslices")
|
||||||
return Corr(newcontent, padding=[2, 2])
|
return Corr(newcontent, padding=[2, 2])
|
||||||
elif variant == "improved":
|
elif variant == "improved":
|
||||||
newcontent = []
|
newcontent = []
|
||||||
|
@ -685,7 +685,7 @@ class Corr:
|
||||||
else:
|
else:
|
||||||
newcontent.append((1 / 12) * (-self.content[t + 2] + 16 * self.content[t + 1] - 30 * self.content[t] + 16 * self.content[t - 1] - self.content[t - 2]))
|
newcontent.append((1 / 12) * (-self.content[t + 2] + 16 * self.content[t + 1] - 30 * self.content[t] + 16 * self.content[t - 1] - self.content[t - 2]))
|
||||||
if (all([x is None for x in newcontent])):
|
if (all([x is None for x in newcontent])):
|
||||||
raise Exception("Derivative is undefined at all timeslices")
|
raise ValueError("Derivative is undefined at all timeslices")
|
||||||
return Corr(newcontent, padding=[2, 2])
|
return Corr(newcontent, padding=[2, 2])
|
||||||
elif variant == 'log':
|
elif variant == 'log':
|
||||||
newcontent = []
|
newcontent = []
|
||||||
|
@ -695,11 +695,11 @@ class Corr:
|
||||||
else:
|
else:
|
||||||
newcontent.append(np.log(self.content[t]))
|
newcontent.append(np.log(self.content[t]))
|
||||||
if (all([x is None for x in newcontent])):
|
if (all([x is None for x in newcontent])):
|
||||||
raise Exception("Log is undefined at all timeslices")
|
raise ValueError("Log is undefined at all timeslices")
|
||||||
logcorr = Corr(newcontent)
|
logcorr = Corr(newcontent)
|
||||||
return self * (logcorr.second_deriv('symmetric') + (logcorr.deriv('symmetric'))**2)
|
return self * (logcorr.second_deriv('symmetric') + (logcorr.deriv('symmetric'))**2)
|
||||||
else:
|
else:
|
||||||
raise Exception("Unknown variant.")
|
raise ValueError("Unknown variant.")
|
||||||
|
|
||||||
def m_eff(self, variant='log', guess=1.0):
|
def m_eff(self, variant='log', guess=1.0):
|
||||||
"""Returns the effective mass of the correlator as correlator object
|
"""Returns the effective mass of the correlator as correlator object
|
||||||
|
@ -728,7 +728,7 @@ class Corr:
|
||||||
else:
|
else:
|
||||||
newcontent.append(self.content[t] / self.content[t + 1])
|
newcontent.append(self.content[t] / self.content[t + 1])
|
||||||
if (all([x is None for x in newcontent])):
|
if (all([x is None for x in newcontent])):
|
||||||
raise Exception('m_eff is undefined at all timeslices')
|
raise ValueError('m_eff is undefined at all timeslices')
|
||||||
|
|
||||||
return np.log(Corr(newcontent, padding=[0, 1]))
|
return np.log(Corr(newcontent, padding=[0, 1]))
|
||||||
|
|
||||||
|
@ -742,7 +742,7 @@ class Corr:
|
||||||
else:
|
else:
|
||||||
newcontent.append(self.content[t - 1] / self.content[t + 1])
|
newcontent.append(self.content[t - 1] / self.content[t + 1])
|
||||||
if (all([x is None for x in newcontent])):
|
if (all([x is None for x in newcontent])):
|
||||||
raise Exception('m_eff is undefined at all timeslices')
|
raise ValueError('m_eff is undefined at all timeslices')
|
||||||
|
|
||||||
return np.log(Corr(newcontent, padding=[1, 1])) / 2
|
return np.log(Corr(newcontent, padding=[1, 1])) / 2
|
||||||
|
|
||||||
|
@ -767,7 +767,7 @@ class Corr:
|
||||||
else:
|
else:
|
||||||
newcontent.append(np.abs(find_root(self.content[t][0] / self.content[t + 1][0], root_function, guess=guess)))
|
newcontent.append(np.abs(find_root(self.content[t][0] / self.content[t + 1][0], root_function, guess=guess)))
|
||||||
if (all([x is None for x in newcontent])):
|
if (all([x is None for x in newcontent])):
|
||||||
raise Exception('m_eff is undefined at all timeslices')
|
raise ValueError('m_eff is undefined at all timeslices')
|
||||||
|
|
||||||
return Corr(newcontent, padding=[0, 1])
|
return Corr(newcontent, padding=[0, 1])
|
||||||
|
|
||||||
|
@ -779,11 +779,11 @@ class Corr:
|
||||||
else:
|
else:
|
||||||
newcontent.append((self.content[t + 1] + self.content[t - 1]) / (2 * self.content[t]))
|
newcontent.append((self.content[t + 1] + self.content[t - 1]) / (2 * self.content[t]))
|
||||||
if (all([x is None for x in newcontent])):
|
if (all([x is None for x in newcontent])):
|
||||||
raise Exception("m_eff is undefined at all timeslices")
|
raise ValueError("m_eff is undefined at all timeslices")
|
||||||
return np.arccosh(Corr(newcontent, padding=[1, 1]))
|
return np.arccosh(Corr(newcontent, padding=[1, 1]))
|
||||||
|
|
||||||
else:
|
else:
|
||||||
raise Exception('Unknown variant.')
|
raise ValueError('Unknown variant.')
|
||||||
|
|
||||||
def fit(self, function, fitrange=None, silent=False, **kwargs):
|
def fit(self, function, fitrange=None, silent=False, **kwargs):
|
||||||
r'''Fits function to the data
|
r'''Fits function to the data
|
||||||
|
@ -801,7 +801,7 @@ class Corr:
|
||||||
Decides whether output is printed to the standard output.
|
Decides whether output is printed to the standard output.
|
||||||
'''
|
'''
|
||||||
if self.N != 1:
|
if self.N != 1:
|
||||||
raise Exception("Correlator must be projected before fitting")
|
raise ValueError("Correlator must be projected before fitting")
|
||||||
|
|
||||||
if fitrange is None:
|
if fitrange is None:
|
||||||
if self.prange:
|
if self.prange:
|
||||||
|
@ -810,12 +810,12 @@ class Corr:
|
||||||
fitrange = [0, self.T - 1]
|
fitrange = [0, self.T - 1]
|
||||||
else:
|
else:
|
||||||
if not isinstance(fitrange, list):
|
if not isinstance(fitrange, list):
|
||||||
raise Exception("fitrange has to be a list with two elements")
|
raise TypeError("fitrange has to be a list with two elements")
|
||||||
if len(fitrange) != 2:
|
if len(fitrange) != 2:
|
||||||
raise Exception("fitrange has to have exactly two elements [fit_start, fit_stop]")
|
raise ValueError("fitrange has to have exactly two elements [fit_start, fit_stop]")
|
||||||
|
|
||||||
xs = np.array([x for x in range(fitrange[0], fitrange[1] + 1) if not self.content[x] is None])
|
xs = np.array([x for x in range(fitrange[0], fitrange[1] + 1) if self.content[x] is not None])
|
||||||
ys = np.array([self.content[x][0] for x in range(fitrange[0], fitrange[1] + 1) if not self.content[x] is None])
|
ys = np.array([self.content[x][0] for x in range(fitrange[0], fitrange[1] + 1) if self.content[x] is not None])
|
||||||
result = least_squares(xs, ys, function, silent=silent, **kwargs)
|
result = least_squares(xs, ys, function, silent=silent, **kwargs)
|
||||||
return result
|
return result
|
||||||
|
|
||||||
|
@ -840,9 +840,9 @@ class Corr:
|
||||||
else:
|
else:
|
||||||
raise Exception("no plateau range provided")
|
raise Exception("no plateau range provided")
|
||||||
if self.N != 1:
|
if self.N != 1:
|
||||||
raise Exception("Correlator must be projected before getting a plateau.")
|
raise ValueError("Correlator must be projected before getting a plateau.")
|
||||||
if (all([self.content[t] is None for t in range(plateau_range[0], plateau_range[1] + 1)])):
|
if (all([self.content[t] is None for t in range(plateau_range[0], plateau_range[1] + 1)])):
|
||||||
raise Exception("plateau is undefined at all timeslices in plateaurange.")
|
raise ValueError("plateau is undefined at all timeslices in plateaurange.")
|
||||||
if auto_gamma:
|
if auto_gamma:
|
||||||
self.gamma_method()
|
self.gamma_method()
|
||||||
if method == "fit":
|
if method == "fit":
|
||||||
|
@ -854,16 +854,16 @@ class Corr:
|
||||||
return returnvalue
|
return returnvalue
|
||||||
|
|
||||||
else:
|
else:
|
||||||
raise Exception("Unsupported plateau method: " + method)
|
raise ValueError("Unsupported plateau method: " + method)
|
||||||
|
|
||||||
def set_prange(self, prange):
|
def set_prange(self, prange):
|
||||||
"""Sets the attribute prange of the Corr object."""
|
"""Sets the attribute prange of the Corr object."""
|
||||||
if not len(prange) == 2:
|
if not len(prange) == 2:
|
||||||
raise Exception("prange must be a list or array with two values")
|
raise ValueError("prange must be a list or array with two values")
|
||||||
if not ((isinstance(prange[0], int)) and (isinstance(prange[1], int))):
|
if not ((isinstance(prange[0], int)) and (isinstance(prange[1], int))):
|
||||||
raise Exception("Start and end point must be integers")
|
raise TypeError("Start and end point must be integers")
|
||||||
if not (0 <= prange[0] <= self.T and 0 <= prange[1] <= self.T and prange[0] <= prange[1]):
|
if not (0 <= prange[0] <= self.T and 0 <= prange[1] <= self.T and prange[0] <= prange[1]):
|
||||||
raise Exception("Start and end point must define a range in the interval 0,T")
|
raise ValueError("Start and end point must define a range in the interval 0,T")
|
||||||
|
|
||||||
self.prange = prange
|
self.prange = prange
|
||||||
return
|
return
|
||||||
|
@ -900,7 +900,7 @@ class Corr:
|
||||||
Optional title of the figure.
|
Optional title of the figure.
|
||||||
"""
|
"""
|
||||||
if self.N != 1:
|
if self.N != 1:
|
||||||
raise Exception("Correlator must be projected before plotting")
|
raise ValueError("Correlator must be projected before plotting")
|
||||||
|
|
||||||
if auto_gamma:
|
if auto_gamma:
|
||||||
self.gamma_method()
|
self.gamma_method()
|
||||||
|
@ -941,7 +941,7 @@ class Corr:
|
||||||
hide_from = None
|
hide_from = None
|
||||||
ax1.errorbar(x[:hide_from], y[:hide_from], y_err[:hide_from], label=corr.tag, mfc=plt.rcParams['axes.facecolor'])
|
ax1.errorbar(x[:hide_from], y[:hide_from], y_err[:hide_from], label=corr.tag, mfc=plt.rcParams['axes.facecolor'])
|
||||||
else:
|
else:
|
||||||
raise Exception("'comp' must be a correlator or a list of correlators.")
|
raise TypeError("'comp' must be a correlator or a list of correlators.")
|
||||||
|
|
||||||
if plateau:
|
if plateau:
|
||||||
if isinstance(plateau, Obs):
|
if isinstance(plateau, Obs):
|
||||||
|
@ -950,14 +950,14 @@ class Corr:
|
||||||
ax1.axhline(y=plateau.value, linewidth=2, color=plt.rcParams['text.color'], alpha=0.6, marker=',', ls='--', label=str(plateau))
|
ax1.axhline(y=plateau.value, linewidth=2, color=plt.rcParams['text.color'], alpha=0.6, marker=',', ls='--', label=str(plateau))
|
||||||
ax1.axhspan(plateau.value - plateau.dvalue, plateau.value + plateau.dvalue, alpha=0.25, color=plt.rcParams['text.color'], ls='-')
|
ax1.axhspan(plateau.value - plateau.dvalue, plateau.value + plateau.dvalue, alpha=0.25, color=plt.rcParams['text.color'], ls='-')
|
||||||
else:
|
else:
|
||||||
raise Exception("'plateau' must be an Obs")
|
raise TypeError("'plateau' must be an Obs")
|
||||||
|
|
||||||
if references:
|
if references:
|
||||||
if isinstance(references, list):
|
if isinstance(references, list):
|
||||||
for ref in references:
|
for ref in references:
|
||||||
ax1.axhline(y=ref, linewidth=1, color=plt.rcParams['text.color'], alpha=0.6, marker=',', ls='--')
|
ax1.axhline(y=ref, linewidth=1, color=plt.rcParams['text.color'], alpha=0.6, marker=',', ls='--')
|
||||||
else:
|
else:
|
||||||
raise Exception("'references' must be a list of floating pint values.")
|
raise TypeError("'references' must be a list of floating pint values.")
|
||||||
|
|
||||||
if self.prange:
|
if self.prange:
|
||||||
ax1.axvline(self.prange[0], 0, 1, ls='-', marker=',', color="black", zorder=0)
|
ax1.axvline(self.prange[0], 0, 1, ls='-', marker=',', color="black", zorder=0)
|
||||||
|
@ -991,7 +991,7 @@ class Corr:
|
||||||
if isinstance(save, str):
|
if isinstance(save, str):
|
||||||
fig.savefig(save, bbox_inches='tight')
|
fig.savefig(save, bbox_inches='tight')
|
||||||
else:
|
else:
|
||||||
raise Exception("'save' has to be a string.")
|
raise TypeError("'save' has to be a string.")
|
||||||
|
|
||||||
def spaghetti_plot(self, logscale=True):
|
def spaghetti_plot(self, logscale=True):
|
||||||
"""Produces a spaghetti plot of the correlator suited to monitor exceptional configurations.
|
"""Produces a spaghetti plot of the correlator suited to monitor exceptional configurations.
|
||||||
|
@ -1002,7 +1002,7 @@ class Corr:
|
||||||
Determines whether the scale of the y-axis is logarithmic or standard.
|
Determines whether the scale of the y-axis is logarithmic or standard.
|
||||||
"""
|
"""
|
||||||
if self.N != 1:
|
if self.N != 1:
|
||||||
raise Exception("Correlator needs to be projected first.")
|
raise ValueError("Correlator needs to be projected first.")
|
||||||
|
|
||||||
mc_names = list(set([item for sublist in [sum(map(o[0].e_content.get, o[0].mc_names), []) for o in self.content if o is not None] for item in sublist]))
|
mc_names = list(set([item for sublist in [sum(map(o[0].e_content.get, o[0].mc_names), []) for o in self.content if o is not None] for item in sublist]))
|
||||||
x0_vals = [n for (n, o) in zip(np.arange(self.T), self.content) if o is not None]
|
x0_vals = [n for (n, o) in zip(np.arange(self.T), self.content) if o is not None]
|
||||||
|
@ -1044,7 +1044,7 @@ class Corr:
|
||||||
elif datatype == "pickle":
|
elif datatype == "pickle":
|
||||||
dump_object(self, filename, **kwargs)
|
dump_object(self, filename, **kwargs)
|
||||||
else:
|
else:
|
||||||
raise Exception("Unknown datatype " + str(datatype))
|
raise ValueError("Unknown datatype " + str(datatype))
|
||||||
|
|
||||||
def print(self, print_range=None):
|
def print(self, print_range=None):
|
||||||
print(self.__repr__(print_range))
|
print(self.__repr__(print_range))
|
||||||
|
@ -1094,7 +1094,7 @@ class Corr:
|
||||||
def __add__(self, y):
|
def __add__(self, y):
|
||||||
if isinstance(y, Corr):
|
if isinstance(y, Corr):
|
||||||
if ((self.N != y.N) or (self.T != y.T)):
|
if ((self.N != y.N) or (self.T != y.T)):
|
||||||
raise Exception("Addition of Corrs with different shape")
|
raise ValueError("Addition of Corrs with different shape")
|
||||||
newcontent = []
|
newcontent = []
|
||||||
for t in range(self.T):
|
for t in range(self.T):
|
||||||
if _check_for_none(self, self.content[t]) or _check_for_none(y, y.content[t]):
|
if _check_for_none(self, self.content[t]) or _check_for_none(y, y.content[t]):
|
||||||
|
@ -1122,7 +1122,7 @@ class Corr:
|
||||||
def __mul__(self, y):
|
def __mul__(self, y):
|
||||||
if isinstance(y, Corr):
|
if isinstance(y, Corr):
|
||||||
if not ((self.N == 1 or y.N == 1 or self.N == y.N) and self.T == y.T):
|
if not ((self.N == 1 or y.N == 1 or self.N == y.N) and self.T == y.T):
|
||||||
raise Exception("Multiplication of Corr object requires N=N or N=1 and T=T")
|
raise ValueError("Multiplication of Corr object requires N=N or N=1 and T=T")
|
||||||
newcontent = []
|
newcontent = []
|
||||||
for t in range(self.T):
|
for t in range(self.T):
|
||||||
if _check_for_none(self, self.content[t]) or _check_for_none(y, y.content[t]):
|
if _check_for_none(self, self.content[t]) or _check_for_none(y, y.content[t]):
|
||||||
|
@ -1193,7 +1193,7 @@ class Corr:
|
||||||
def __truediv__(self, y):
|
def __truediv__(self, y):
|
||||||
if isinstance(y, Corr):
|
if isinstance(y, Corr):
|
||||||
if not ((self.N == 1 or y.N == 1 or self.N == y.N) and self.T == y.T):
|
if not ((self.N == 1 or y.N == 1 or self.N == y.N) and self.T == y.T):
|
||||||
raise Exception("Multiplication of Corr object requires N=N or N=1 and T=T")
|
raise ValueError("Multiplication of Corr object requires N=N or N=1 and T=T")
|
||||||
newcontent = []
|
newcontent = []
|
||||||
for t in range(self.T):
|
for t in range(self.T):
|
||||||
if _check_for_none(self, self.content[t]) or _check_for_none(y, y.content[t]):
|
if _check_for_none(self, self.content[t]) or _check_for_none(y, y.content[t]):
|
||||||
|
@ -1207,16 +1207,16 @@ class Corr:
|
||||||
newcontent[t] = None
|
newcontent[t] = None
|
||||||
|
|
||||||
if all([item is None for item in newcontent]):
|
if all([item is None for item in newcontent]):
|
||||||
raise Exception("Division returns completely undefined correlator")
|
raise ValueError("Division returns completely undefined correlator")
|
||||||
return Corr(newcontent)
|
return Corr(newcontent)
|
||||||
|
|
||||||
elif isinstance(y, (Obs, CObs)):
|
elif isinstance(y, (Obs, CObs)):
|
||||||
if isinstance(y, Obs):
|
if isinstance(y, Obs):
|
||||||
if y.value == 0:
|
if y.value == 0:
|
||||||
raise Exception('Division by zero will return undefined correlator')
|
raise ValueError('Division by zero will return undefined correlator')
|
||||||
if isinstance(y, CObs):
|
if isinstance(y, CObs):
|
||||||
if y.is_zero():
|
if y.is_zero():
|
||||||
raise Exception('Division by zero will return undefined correlator')
|
raise ValueError('Division by zero will return undefined correlator')
|
||||||
|
|
||||||
newcontent = []
|
newcontent = []
|
||||||
for t in range(self.T):
|
for t in range(self.T):
|
||||||
|
@ -1228,7 +1228,7 @@ class Corr:
|
||||||
|
|
||||||
elif isinstance(y, (int, float)):
|
elif isinstance(y, (int, float)):
|
||||||
if y == 0:
|
if y == 0:
|
||||||
raise Exception('Division by zero will return undefined correlator')
|
raise ValueError('Division by zero will return undefined correlator')
|
||||||
newcontent = []
|
newcontent = []
|
||||||
for t in range(self.T):
|
for t in range(self.T):
|
||||||
if _check_for_none(self, self.content[t]):
|
if _check_for_none(self, self.content[t]):
|
||||||
|
@ -1284,7 +1284,7 @@ class Corr:
|
||||||
if np.isnan(tmp_sum.value):
|
if np.isnan(tmp_sum.value):
|
||||||
newcontent[t] = None
|
newcontent[t] = None
|
||||||
if all([item is None for item in newcontent]):
|
if all([item is None for item in newcontent]):
|
||||||
raise Exception('Operation returns undefined correlator')
|
raise ValueError('Operation returns undefined correlator')
|
||||||
return Corr(newcontent)
|
return Corr(newcontent)
|
||||||
|
|
||||||
def sin(self):
|
def sin(self):
|
||||||
|
@ -1392,13 +1392,13 @@ class Corr:
|
||||||
'''
|
'''
|
||||||
|
|
||||||
if self.N == 1:
|
if self.N == 1:
|
||||||
raise Exception('Method cannot be applied to one-dimensional correlators.')
|
raise ValueError('Method cannot be applied to one-dimensional correlators.')
|
||||||
if basematrix is None:
|
if basematrix is None:
|
||||||
basematrix = self
|
basematrix = self
|
||||||
if Ntrunc >= basematrix.N:
|
if Ntrunc >= basematrix.N:
|
||||||
raise Exception('Cannot truncate using Ntrunc <= %d' % (basematrix.N))
|
raise ValueError('Cannot truncate using Ntrunc <= %d' % (basematrix.N))
|
||||||
if basematrix.N != self.N:
|
if basematrix.N != self.N:
|
||||||
raise Exception('basematrix and targetmatrix have to be of the same size.')
|
raise ValueError('basematrix and targetmatrix have to be of the same size.')
|
||||||
|
|
||||||
evecs = basematrix.GEVP(t0proj, tproj, sort=None)[:Ntrunc]
|
evecs = basematrix.GEVP(t0proj, tproj, sort=None)[:Ntrunc]
|
||||||
|
|
||||||
|
|
|
@ -34,7 +34,7 @@ def epsilon_tensor(i, j, k):
|
||||||
"""
|
"""
|
||||||
test_set = set((i, j, k))
|
test_set = set((i, j, k))
|
||||||
if not (test_set <= set((1, 2, 3)) or test_set <= set((0, 1, 2))):
|
if not (test_set <= set((1, 2, 3)) or test_set <= set((0, 1, 2))):
|
||||||
raise Exception("Unexpected input", i, j, k)
|
raise ValueError("Unexpected input", i, j, k)
|
||||||
|
|
||||||
return (i - j) * (j - k) * (k - i) / 2
|
return (i - j) * (j - k) * (k - i) / 2
|
||||||
|
|
||||||
|
@ -52,7 +52,7 @@ def epsilon_tensor_rank4(i, j, k, o):
|
||||||
"""
|
"""
|
||||||
test_set = set((i, j, k, o))
|
test_set = set((i, j, k, o))
|
||||||
if not (test_set <= set((1, 2, 3, 4)) or test_set <= set((0, 1, 2, 3))):
|
if not (test_set <= set((1, 2, 3, 4)) or test_set <= set((0, 1, 2, 3))):
|
||||||
raise Exception("Unexpected input", i, j, k, o)
|
raise ValueError("Unexpected input", i, j, k, o)
|
||||||
|
|
||||||
return (i - j) * (j - k) * (k - i) * (i - o) * (j - o) * (o - k) / 12
|
return (i - j) * (j - k) * (k - i) * (i - o) * (j - o) * (o - k) / 12
|
||||||
|
|
||||||
|
@ -92,5 +92,5 @@ def Grid_gamma(gamma_tag):
|
||||||
elif gamma_tag == 'SigmaZT':
|
elif gamma_tag == 'SigmaZT':
|
||||||
g = 0.5 * (gamma[2] @ gamma[3] - gamma[3] @ gamma[2])
|
g = 0.5 * (gamma[2] @ gamma[3] - gamma[3] @ gamma[2])
|
||||||
else:
|
else:
|
||||||
raise Exception('Unkown gamma structure', gamma_tag)
|
raise ValueError('Unkown gamma structure', gamma_tag)
|
||||||
return g
|
return g
|
||||||
|
|
|
@ -5,11 +5,11 @@ r'''
|
||||||
For comparison with other analysis workflows `pyerrors` can also generate jackknife samples from an `Obs` object or import jackknife samples into an `Obs` object.
|
For comparison with other analysis workflows `pyerrors` can also generate jackknife samples from an `Obs` object or import jackknife samples into an `Obs` object.
|
||||||
See `pyerrors.obs.Obs.export_jackknife` and `pyerrors.obs.import_jackknife` for details.
|
See `pyerrors.obs.Obs.export_jackknife` and `pyerrors.obs.import_jackknife` for details.
|
||||||
'''
|
'''
|
||||||
from . import bdio
|
from . import bdio as bdio
|
||||||
from . import dobs
|
from . import dobs as dobs
|
||||||
from . import hadrons
|
from . import hadrons as hadrons
|
||||||
from . import json
|
from . import json as json
|
||||||
from . import misc
|
from . import misc as misc
|
||||||
from . import openQCD
|
from . import openQCD as openQCD
|
||||||
from . import pandas
|
from . import pandas as pandas
|
||||||
from . import sfcf
|
from . import sfcf as sfcf
|
||||||
|
|
|
@ -79,7 +79,7 @@ def _dict_to_xmlstring_spaces(d, space=' '):
|
||||||
o += space
|
o += space
|
||||||
o += li + '\n'
|
o += li + '\n'
|
||||||
if li.startswith('<') and not cm:
|
if li.startswith('<') and not cm:
|
||||||
if not '<%s' % ('/') in li:
|
if '<%s' % ('/') not in li:
|
||||||
c += 1
|
c += 1
|
||||||
cm = False
|
cm = False
|
||||||
return o
|
return o
|
||||||
|
@ -671,7 +671,7 @@ def _dobsdict_to_xmlstring_spaces(d, space=' '):
|
||||||
o += space
|
o += space
|
||||||
o += li + '\n'
|
o += li + '\n'
|
||||||
if li.startswith('<') and not cm:
|
if li.startswith('<') and not cm:
|
||||||
if not '<%s' % ('/') in li:
|
if '<%s' % ('/') not in li:
|
||||||
c += 1
|
c += 1
|
||||||
cm = False
|
cm = False
|
||||||
return o
|
return o
|
||||||
|
|
|
@ -113,7 +113,7 @@ def read_hd5(filestem, ens_id, group, attrs=None, idl=None, part="real"):
|
||||||
infos = []
|
infos = []
|
||||||
for hd5_file in files:
|
for hd5_file in files:
|
||||||
h5file = h5py.File(path + '/' + hd5_file, "r")
|
h5file = h5py.File(path + '/' + hd5_file, "r")
|
||||||
if not group + '/' + entry in h5file:
|
if group + '/' + entry not in h5file:
|
||||||
raise Exception("Entry '" + entry + "' not contained in the files.")
|
raise Exception("Entry '" + entry + "' not contained in the files.")
|
||||||
raw_data = h5file[group + '/' + entry + '/corr']
|
raw_data = h5file[group + '/' + entry + '/corr']
|
||||||
real_data = raw_data[:].view("complex")
|
real_data = raw_data[:].view("complex")
|
||||||
|
@ -186,7 +186,7 @@ def _extract_real_arrays(path, files, tree, keys):
|
||||||
for hd5_file in files:
|
for hd5_file in files:
|
||||||
h5file = h5py.File(path + '/' + hd5_file, "r")
|
h5file = h5py.File(path + '/' + hd5_file, "r")
|
||||||
for key in keys:
|
for key in keys:
|
||||||
if not tree + '/' + key in h5file:
|
if tree + '/' + key not in h5file:
|
||||||
raise Exception("Entry '" + key + "' not contained in the files.")
|
raise Exception("Entry '" + key + "' not contained in the files.")
|
||||||
raw_data = h5file[tree + '/' + key + '/data']
|
raw_data = h5file[tree + '/' + key + '/data']
|
||||||
real_data = raw_data[:].astype(np.double)
|
real_data = raw_data[:].astype(np.double)
|
||||||
|
|
|
@ -47,7 +47,7 @@ def read_rwms(path, prefix, version='2.0', names=None, **kwargs):
|
||||||
Reweighting factors read
|
Reweighting factors read
|
||||||
"""
|
"""
|
||||||
known_oqcd_versions = ['1.4', '1.6', '2.0']
|
known_oqcd_versions = ['1.4', '1.6', '2.0']
|
||||||
if not (version in known_oqcd_versions):
|
if version not in known_oqcd_versions:
|
||||||
raise Exception('Unknown openQCD version defined!')
|
raise Exception('Unknown openQCD version defined!')
|
||||||
print("Working with openQCD version " + version)
|
print("Working with openQCD version " + version)
|
||||||
if 'postfix' in kwargs:
|
if 'postfix' in kwargs:
|
||||||
|
|
|
@ -222,7 +222,7 @@ class Obs:
|
||||||
tmp = kwargs.get(kwarg_name)
|
tmp = kwargs.get(kwarg_name)
|
||||||
if isinstance(tmp, (int, float)):
|
if isinstance(tmp, (int, float)):
|
||||||
if tmp < 0:
|
if tmp < 0:
|
||||||
raise Exception(kwarg_name + ' has to be larger or equal to 0.')
|
raise ValueError(kwarg_name + ' has to be larger or equal to 0.')
|
||||||
for e, e_name in enumerate(self.e_names):
|
for e, e_name in enumerate(self.e_names):
|
||||||
getattr(self, kwarg_name)[e_name] = tmp
|
getattr(self, kwarg_name)[e_name] = tmp
|
||||||
else:
|
else:
|
||||||
|
@ -291,7 +291,7 @@ class Obs:
|
||||||
texp = self.tau_exp[e_name]
|
texp = self.tau_exp[e_name]
|
||||||
# Critical slowing down analysis
|
# Critical slowing down analysis
|
||||||
if w_max // 2 <= 1:
|
if w_max // 2 <= 1:
|
||||||
raise Exception("Need at least 8 samples for tau_exp error analysis")
|
raise ValueError("Need at least 8 samples for tau_exp error analysis")
|
||||||
for n in range(1, w_max // 2):
|
for n in range(1, w_max // 2):
|
||||||
_compute_drho(n + 1)
|
_compute_drho(n + 1)
|
||||||
if (self.e_rho[e_name][n] - self.N_sigma[e_name] * self.e_drho[e_name][n]) < 0 or n >= w_max // 2 - 2:
|
if (self.e_rho[e_name][n] - self.N_sigma[e_name] * self.e_drho[e_name][n]) < 0 or n >= w_max // 2 - 2:
|
||||||
|
@ -620,7 +620,7 @@ class Obs:
|
||||||
if not hasattr(self, 'e_dvalue'):
|
if not hasattr(self, 'e_dvalue'):
|
||||||
raise Exception('Run the gamma method first.')
|
raise Exception('Run the gamma method first.')
|
||||||
if np.isclose(0.0, self._dvalue, atol=1e-15):
|
if np.isclose(0.0, self._dvalue, atol=1e-15):
|
||||||
raise Exception('Error is 0.0')
|
raise ValueError('Error is 0.0')
|
||||||
labels = self.e_names
|
labels = self.e_names
|
||||||
sizes = [self.e_dvalue[name] ** 2 for name in labels] / self._dvalue ** 2
|
sizes = [self.e_dvalue[name] ** 2 for name in labels] / self._dvalue ** 2
|
||||||
fig1, ax1 = plt.subplots()
|
fig1, ax1 = plt.subplots()
|
||||||
|
@ -659,7 +659,7 @@ class Obs:
|
||||||
with open(file_name + '.p', 'wb') as fb:
|
with open(file_name + '.p', 'wb') as fb:
|
||||||
pickle.dump(self, fb)
|
pickle.dump(self, fb)
|
||||||
else:
|
else:
|
||||||
raise Exception("Unknown datatype " + str(datatype))
|
raise TypeError("Unknown datatype " + str(datatype))
|
||||||
|
|
||||||
def export_jackknife(self):
|
def export_jackknife(self):
|
||||||
"""Export jackknife samples from the Obs
|
"""Export jackknife samples from the Obs
|
||||||
|
@ -676,7 +676,7 @@ class Obs:
|
||||||
"""
|
"""
|
||||||
|
|
||||||
if len(self.names) != 1:
|
if len(self.names) != 1:
|
||||||
raise Exception("'export_jackknife' is only implemented for Obs defined on one ensemble and replicum.")
|
raise ValueError("'export_jackknife' is only implemented for Obs defined on one ensemble and replicum.")
|
||||||
|
|
||||||
name = self.names[0]
|
name = self.names[0]
|
||||||
full_data = self.deltas[name] + self.r_values[name]
|
full_data = self.deltas[name] + self.r_values[name]
|
||||||
|
@ -711,7 +711,7 @@ class Obs:
|
||||||
should agree with samples from a full bootstrap analysis up to O(1/N).
|
should agree with samples from a full bootstrap analysis up to O(1/N).
|
||||||
"""
|
"""
|
||||||
if len(self.names) != 1:
|
if len(self.names) != 1:
|
||||||
raise Exception("'export_boostrap' is only implemented for Obs defined on one ensemble and replicum.")
|
raise ValueError("'export_boostrap' is only implemented for Obs defined on one ensemble and replicum.")
|
||||||
|
|
||||||
name = self.names[0]
|
name = self.names[0]
|
||||||
length = self.N
|
length = self.N
|
||||||
|
@ -1267,7 +1267,7 @@ def derived_observable(func, data, array_mode=False, **kwargs):
|
||||||
if 'man_grad' in kwargs:
|
if 'man_grad' in kwargs:
|
||||||
deriv = np.asarray(kwargs.get('man_grad'))
|
deriv = np.asarray(kwargs.get('man_grad'))
|
||||||
if new_values.shape + data.shape != deriv.shape:
|
if new_values.shape + data.shape != deriv.shape:
|
||||||
raise Exception('Manual derivative does not have correct shape.')
|
raise ValueError('Manual derivative does not have correct shape.')
|
||||||
elif kwargs.get('num_grad') is True:
|
elif kwargs.get('num_grad') is True:
|
||||||
if multi > 0:
|
if multi > 0:
|
||||||
raise Exception('Multi mode currently not supported for numerical derivative')
|
raise Exception('Multi mode currently not supported for numerical derivative')
|
||||||
|
@ -1333,7 +1333,7 @@ def derived_observable(func, data, array_mode=False, **kwargs):
|
||||||
new_covobs = {name: Covobs(0, allcov[name], name, grad=new_grad[name]) for name in new_grad}
|
new_covobs = {name: Covobs(0, allcov[name], name, grad=new_grad[name]) for name in new_grad}
|
||||||
|
|
||||||
if not set(new_covobs.keys()).isdisjoint(new_deltas.keys()):
|
if not set(new_covobs.keys()).isdisjoint(new_deltas.keys()):
|
||||||
raise Exception('The same name has been used for deltas and covobs!')
|
raise ValueError('The same name has been used for deltas and covobs!')
|
||||||
new_samples = []
|
new_samples = []
|
||||||
new_means = []
|
new_means = []
|
||||||
new_idl = []
|
new_idl = []
|
||||||
|
@ -1374,7 +1374,7 @@ def _reduce_deltas(deltas, idx_old, idx_new):
|
||||||
Has to be a subset of idx_old.
|
Has to be a subset of idx_old.
|
||||||
"""
|
"""
|
||||||
if not len(deltas) == len(idx_old):
|
if not len(deltas) == len(idx_old):
|
||||||
raise Exception('Length of deltas and idx_old have to be the same: %d != %d' % (len(deltas), len(idx_old)))
|
raise ValueError('Length of deltas and idx_old have to be the same: %d != %d' % (len(deltas), len(idx_old)))
|
||||||
if type(idx_old) is range and type(idx_new) is range:
|
if type(idx_old) is range and type(idx_new) is range:
|
||||||
if idx_old == idx_new:
|
if idx_old == idx_new:
|
||||||
return deltas
|
return deltas
|
||||||
|
@ -1382,7 +1382,7 @@ def _reduce_deltas(deltas, idx_old, idx_new):
|
||||||
return deltas
|
return deltas
|
||||||
indices = np.intersect1d(idx_old, idx_new, assume_unique=True, return_indices=True)[1]
|
indices = np.intersect1d(idx_old, idx_new, assume_unique=True, return_indices=True)[1]
|
||||||
if len(indices) < len(idx_new):
|
if len(indices) < len(idx_new):
|
||||||
raise Exception('Error in _reduce_deltas: Config of idx_new not in idx_old')
|
raise ValueError('Error in _reduce_deltas: Config of idx_new not in idx_old')
|
||||||
return np.array(deltas)[indices]
|
return np.array(deltas)[indices]
|
||||||
|
|
||||||
|
|
||||||
|
@ -1404,12 +1404,12 @@ def reweight(weight, obs, **kwargs):
|
||||||
result = []
|
result = []
|
||||||
for i in range(len(obs)):
|
for i in range(len(obs)):
|
||||||
if len(obs[i].cov_names):
|
if len(obs[i].cov_names):
|
||||||
raise Exception('Error: Not possible to reweight an Obs that contains covobs!')
|
raise ValueError('Error: Not possible to reweight an Obs that contains covobs!')
|
||||||
if not set(obs[i].names).issubset(weight.names):
|
if not set(obs[i].names).issubset(weight.names):
|
||||||
raise Exception('Error: Ensembles do not fit')
|
raise ValueError('Error: Ensembles do not fit')
|
||||||
for name in obs[i].names:
|
for name in obs[i].names:
|
||||||
if not set(obs[i].idl[name]).issubset(weight.idl[name]):
|
if not set(obs[i].idl[name]).issubset(weight.idl[name]):
|
||||||
raise Exception('obs[%d] has to be defined on a subset of the configs in weight.idl[%s]!' % (i, name))
|
raise ValueError('obs[%d] has to be defined on a subset of the configs in weight.idl[%s]!' % (i, name))
|
||||||
new_samples = []
|
new_samples = []
|
||||||
w_deltas = {}
|
w_deltas = {}
|
||||||
for name in sorted(obs[i].names):
|
for name in sorted(obs[i].names):
|
||||||
|
@ -1446,14 +1446,14 @@ def correlate(obs_a, obs_b):
|
||||||
"""
|
"""
|
||||||
|
|
||||||
if sorted(obs_a.names) != sorted(obs_b.names):
|
if sorted(obs_a.names) != sorted(obs_b.names):
|
||||||
raise Exception(f"Ensembles do not fit {set(sorted(obs_a.names)) ^ set(sorted(obs_b.names))}")
|
raise ValueError(f"Ensembles do not fit {set(sorted(obs_a.names)) ^ set(sorted(obs_b.names))}")
|
||||||
if len(obs_a.cov_names) or len(obs_b.cov_names):
|
if len(obs_a.cov_names) or len(obs_b.cov_names):
|
||||||
raise Exception('Error: Not possible to correlate Obs that contain covobs!')
|
raise ValueError('Error: Not possible to correlate Obs that contain covobs!')
|
||||||
for name in obs_a.names:
|
for name in obs_a.names:
|
||||||
if obs_a.shape[name] != obs_b.shape[name]:
|
if obs_a.shape[name] != obs_b.shape[name]:
|
||||||
raise Exception('Shapes of ensemble', name, 'do not fit')
|
raise ValueError('Shapes of ensemble', name, 'do not fit')
|
||||||
if obs_a.idl[name] != obs_b.idl[name]:
|
if obs_a.idl[name] != obs_b.idl[name]:
|
||||||
raise Exception('idl of ensemble', name, 'do not fit')
|
raise ValueError('idl of ensemble', name, 'do not fit')
|
||||||
|
|
||||||
if obs_a.reweighted is True:
|
if obs_a.reweighted is True:
|
||||||
warnings.warn("The first observable is already reweighted.", RuntimeWarning)
|
warnings.warn("The first observable is already reweighted.", RuntimeWarning)
|
||||||
|
@ -1555,7 +1555,7 @@ def invert_corr_cov_cholesky(corr, inverrdiag):
|
||||||
|
|
||||||
condn = np.linalg.cond(corr)
|
condn = np.linalg.cond(corr)
|
||||||
if condn > 0.1 / np.finfo(float).eps:
|
if condn > 0.1 / np.finfo(float).eps:
|
||||||
raise Exception(f"Cannot invert correlation matrix as its condition number exceeds machine precision ({condn:1.2e})")
|
raise ValueError(f"Cannot invert correlation matrix as its condition number exceeds machine precision ({condn:1.2e})")
|
||||||
if condn > 1e13:
|
if condn > 1e13:
|
||||||
warnings.warn("Correlation matrix may be ill-conditioned, condition number: {%1.2e}" % (condn), RuntimeWarning)
|
warnings.warn("Correlation matrix may be ill-conditioned, condition number: {%1.2e}" % (condn), RuntimeWarning)
|
||||||
chol = np.linalg.cholesky(corr)
|
chol = np.linalg.cholesky(corr)
|
||||||
|
@ -1636,7 +1636,7 @@ def _smooth_eigenvalues(corr, E):
|
||||||
Number of eigenvalues to be left substantially unchanged
|
Number of eigenvalues to be left substantially unchanged
|
||||||
"""
|
"""
|
||||||
if not (2 < E < corr.shape[0] - 1):
|
if not (2 < E < corr.shape[0] - 1):
|
||||||
raise Exception(f"'E' has to be between 2 and the dimension of the correlation matrix minus 1 ({corr.shape[0] - 1}).")
|
raise ValueError(f"'E' has to be between 2 and the dimension of the correlation matrix minus 1 ({corr.shape[0] - 1}).")
|
||||||
vals, vec = np.linalg.eigh(corr)
|
vals, vec = np.linalg.eigh(corr)
|
||||||
lambda_min = np.mean(vals[:-E])
|
lambda_min = np.mean(vals[:-E])
|
||||||
vals[vals < lambda_min] = lambda_min
|
vals[vals < lambda_min] = lambda_min
|
||||||
|
@ -1768,9 +1768,9 @@ def merge_obs(list_of_obs):
|
||||||
"""
|
"""
|
||||||
replist = [item for obs in list_of_obs for item in obs.names]
|
replist = [item for obs in list_of_obs for item in obs.names]
|
||||||
if (len(replist) == len(set(replist))) is False:
|
if (len(replist) == len(set(replist))) is False:
|
||||||
raise Exception('list_of_obs contains duplicate replica: %s' % (str(replist)))
|
raise ValueError('list_of_obs contains duplicate replica: %s' % (str(replist)))
|
||||||
if any([len(o.cov_names) for o in list_of_obs]):
|
if any([len(o.cov_names) for o in list_of_obs]):
|
||||||
raise Exception('Not possible to merge data that contains covobs!')
|
raise ValueError('Not possible to merge data that contains covobs!')
|
||||||
new_dict = {}
|
new_dict = {}
|
||||||
idl_dict = {}
|
idl_dict = {}
|
||||||
for o in list_of_obs:
|
for o in list_of_obs:
|
||||||
|
@ -1821,7 +1821,7 @@ def cov_Obs(means, cov, name, grad=None):
|
||||||
for i in range(len(means)):
|
for i in range(len(means)):
|
||||||
ol.append(covobs_to_obs(Covobs(means[i], cov, name, pos=i, grad=grad)))
|
ol.append(covobs_to_obs(Covobs(means[i], cov, name, pos=i, grad=grad)))
|
||||||
if ol[0].covobs[name].N != len(means):
|
if ol[0].covobs[name].N != len(means):
|
||||||
raise Exception('You have to provide %d mean values!' % (ol[0].N))
|
raise ValueError('You have to provide %d mean values!' % (ol[0].N))
|
||||||
if len(ol) == 1:
|
if len(ol) == 1:
|
||||||
return ol[0]
|
return ol[0]
|
||||||
return ol
|
return ol
|
||||||
|
@ -1837,7 +1837,7 @@ def _determine_gap(o, e_content, e_name):
|
||||||
|
|
||||||
gap = min(gaps)
|
gap = min(gaps)
|
||||||
if not np.all([gi % gap == 0 for gi in gaps]):
|
if not np.all([gi % gap == 0 for gi in gaps]):
|
||||||
raise Exception(f"Replica for ensemble {e_name} do not have a common spacing.", gaps)
|
raise ValueError(f"Replica for ensemble {e_name} do not have a common spacing.", gaps)
|
||||||
|
|
||||||
return gap
|
return gap
|
||||||
|
|
||||||
|
|
|
@ -129,7 +129,7 @@ def test_m_eff():
|
||||||
with pytest.warns(RuntimeWarning):
|
with pytest.warns(RuntimeWarning):
|
||||||
my_corr.m_eff('sinh')
|
my_corr.m_eff('sinh')
|
||||||
|
|
||||||
with pytest.raises(Exception):
|
with pytest.raises(ValueError):
|
||||||
my_corr.m_eff('unkown_variant')
|
my_corr.m_eff('unkown_variant')
|
||||||
|
|
||||||
|
|
||||||
|
@ -140,7 +140,7 @@ def test_m_eff_negative_values():
|
||||||
assert m_eff_log[padding + 1] is None
|
assert m_eff_log[padding + 1] is None
|
||||||
m_eff_cosh = my_corr.m_eff('cosh')
|
m_eff_cosh = my_corr.m_eff('cosh')
|
||||||
assert m_eff_cosh[padding + 1] is None
|
assert m_eff_cosh[padding + 1] is None
|
||||||
with pytest.raises(Exception):
|
with pytest.raises(ValueError):
|
||||||
my_corr.m_eff('logsym')
|
my_corr.m_eff('logsym')
|
||||||
|
|
||||||
|
|
||||||
|
@ -155,7 +155,7 @@ def test_correlate():
|
||||||
my_corr = pe.correlators.Corr([pe.pseudo_Obs(10, 0.1, 't'), pe.pseudo_Obs(0, 0.05, 't')])
|
my_corr = pe.correlators.Corr([pe.pseudo_Obs(10, 0.1, 't'), pe.pseudo_Obs(0, 0.05, 't')])
|
||||||
corr1 = my_corr.correlate(my_corr)
|
corr1 = my_corr.correlate(my_corr)
|
||||||
corr2 = my_corr.correlate(my_corr[0])
|
corr2 = my_corr.correlate(my_corr[0])
|
||||||
with pytest.raises(Exception):
|
with pytest.raises(TypeError):
|
||||||
corr3 = my_corr.correlate(7.3)
|
corr3 = my_corr.correlate(7.3)
|
||||||
|
|
||||||
|
|
||||||
|
@ -176,9 +176,9 @@ def test_fit_correlator():
|
||||||
assert fit_res[0] == my_corr[0]
|
assert fit_res[0] == my_corr[0]
|
||||||
assert fit_res[1] == my_corr[1] - my_corr[0]
|
assert fit_res[1] == my_corr[1] - my_corr[0]
|
||||||
|
|
||||||
with pytest.raises(Exception):
|
with pytest.raises(TypeError):
|
||||||
my_corr.fit(f, "from 0 to 3")
|
my_corr.fit(f, "from 0 to 3")
|
||||||
with pytest.raises(Exception):
|
with pytest.raises(ValueError):
|
||||||
my_corr.fit(f, [0, 2, 3])
|
my_corr.fit(f, [0, 2, 3])
|
||||||
|
|
||||||
|
|
||||||
|
@ -256,11 +256,11 @@ def test_prange():
|
||||||
corr = pe.correlators.Corr(corr_content)
|
corr = pe.correlators.Corr(corr_content)
|
||||||
|
|
||||||
corr.set_prange([2, 4])
|
corr.set_prange([2, 4])
|
||||||
with pytest.raises(Exception):
|
with pytest.raises(ValueError):
|
||||||
corr.set_prange([2])
|
corr.set_prange([2])
|
||||||
with pytest.raises(Exception):
|
with pytest.raises(TypeError):
|
||||||
corr.set_prange([2, 2.3])
|
corr.set_prange([2, 2.3])
|
||||||
with pytest.raises(Exception):
|
with pytest.raises(ValueError):
|
||||||
corr.set_prange([4, 1])
|
corr.set_prange([4, 1])
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -30,7 +30,7 @@ def test_grid_dirac():
|
||||||
'SigmaYZ',
|
'SigmaYZ',
|
||||||
'SigmaZT']:
|
'SigmaZT']:
|
||||||
pe.dirac.Grid_gamma(gamma)
|
pe.dirac.Grid_gamma(gamma)
|
||||||
with pytest.raises(Exception):
|
with pytest.raises(ValueError):
|
||||||
pe.dirac.Grid_gamma('Not a gamma matrix')
|
pe.dirac.Grid_gamma('Not a gamma matrix')
|
||||||
|
|
||||||
|
|
||||||
|
@ -44,7 +44,7 @@ def test_epsilon_tensor():
|
||||||
(1, 1, 3) : 0.0}
|
(1, 1, 3) : 0.0}
|
||||||
for key, value in check.items():
|
for key, value in check.items():
|
||||||
assert pe.dirac.epsilon_tensor(*key) == value
|
assert pe.dirac.epsilon_tensor(*key) == value
|
||||||
with pytest.raises(Exception):
|
with pytest.raises(ValueError):
|
||||||
pe.dirac.epsilon_tensor(0, 1, 3)
|
pe.dirac.epsilon_tensor(0, 1, 3)
|
||||||
|
|
||||||
|
|
||||||
|
@ -59,5 +59,5 @@ def test_epsilon_tensor_rank4():
|
||||||
(1, 2, 3, 1) : 0.0}
|
(1, 2, 3, 1) : 0.0}
|
||||||
for key, value in check.items():
|
for key, value in check.items():
|
||||||
assert pe.dirac.epsilon_tensor_rank4(*key) == value
|
assert pe.dirac.epsilon_tensor_rank4(*key) == value
|
||||||
with pytest.raises(Exception):
|
with pytest.raises(ValueError):
|
||||||
pe.dirac.epsilon_tensor_rank4(0, 1, 3, 4)
|
pe.dirac.epsilon_tensor_rank4(0, 1, 3, 4)
|
||||||
|
|
|
@ -61,9 +61,9 @@ def test_Obs_exceptions():
|
||||||
my_obs.plot_rep_dist()
|
my_obs.plot_rep_dist()
|
||||||
with pytest.raises(Exception):
|
with pytest.raises(Exception):
|
||||||
my_obs.plot_piechart()
|
my_obs.plot_piechart()
|
||||||
with pytest.raises(Exception):
|
with pytest.raises(TypeError):
|
||||||
my_obs.gamma_method(S='2.3')
|
my_obs.gamma_method(S='2.3')
|
||||||
with pytest.raises(Exception):
|
with pytest.raises(ValueError):
|
||||||
my_obs.gamma_method(tau_exp=2.3)
|
my_obs.gamma_method(tau_exp=2.3)
|
||||||
my_obs.gamma_method()
|
my_obs.gamma_method()
|
||||||
my_obs.details()
|
my_obs.details()
|
||||||
|
@ -199,7 +199,7 @@ def test_gamma_method_no_windowing():
|
||||||
assert np.isclose(np.sqrt(np.var(obs.deltas['ens'], ddof=1) / obs.shape['ens']), obs.dvalue)
|
assert np.isclose(np.sqrt(np.var(obs.deltas['ens'], ddof=1) / obs.shape['ens']), obs.dvalue)
|
||||||
obs.gamma_method(S=1.1)
|
obs.gamma_method(S=1.1)
|
||||||
assert obs.e_tauint['ens'] > 0.5
|
assert obs.e_tauint['ens'] > 0.5
|
||||||
with pytest.raises(Exception):
|
with pytest.raises(ValueError):
|
||||||
obs.gamma_method(S=-0.2)
|
obs.gamma_method(S=-0.2)
|
||||||
|
|
||||||
|
|
||||||
|
@ -490,12 +490,12 @@ def test_reweighting():
|
||||||
r_obs2 = r_obs[0] * my_obs
|
r_obs2 = r_obs[0] * my_obs
|
||||||
assert r_obs2.reweighted
|
assert r_obs2.reweighted
|
||||||
my_covobs = pe.cov_Obs(1.0, 0.003, 'cov')
|
my_covobs = pe.cov_Obs(1.0, 0.003, 'cov')
|
||||||
with pytest.raises(Exception):
|
with pytest.raises(ValueError):
|
||||||
pe.reweight(my_obs, [my_covobs])
|
pe.reweight(my_obs, [my_covobs])
|
||||||
my_obs2 = pe.Obs([np.random.rand(1000)], ['t2'])
|
my_obs2 = pe.Obs([np.random.rand(1000)], ['t2'])
|
||||||
with pytest.raises(Exception):
|
with pytest.raises(ValueError):
|
||||||
pe.reweight(my_obs, [my_obs + my_obs2])
|
pe.reweight(my_obs, [my_obs + my_obs2])
|
||||||
with pytest.raises(Exception):
|
with pytest.raises(ValueError):
|
||||||
pe.reweight(my_irregular_obs, [my_obs])
|
pe.reweight(my_irregular_obs, [my_obs])
|
||||||
|
|
||||||
|
|
||||||
|
@ -505,10 +505,10 @@ def test_merge_obs():
|
||||||
merged = pe.merge_obs([my_obs1, my_obs2])
|
merged = pe.merge_obs([my_obs1, my_obs2])
|
||||||
diff = merged - my_obs2 - my_obs1
|
diff = merged - my_obs2 - my_obs1
|
||||||
assert diff == -(my_obs1.value + my_obs2.value) / 2
|
assert diff == -(my_obs1.value + my_obs2.value) / 2
|
||||||
with pytest.raises(Exception):
|
with pytest.raises(ValueError):
|
||||||
pe.merge_obs([my_obs1, my_obs1])
|
pe.merge_obs([my_obs1, my_obs1])
|
||||||
my_covobs = pe.cov_Obs(1.0, 0.003, 'cov')
|
my_covobs = pe.cov_Obs(1.0, 0.003, 'cov')
|
||||||
with pytest.raises(Exception):
|
with pytest.raises(ValueError):
|
||||||
pe.merge_obs([my_obs1, my_covobs])
|
pe.merge_obs([my_obs1, my_covobs])
|
||||||
|
|
||||||
|
|
||||||
|
@ -531,11 +531,11 @@ def test_correlate():
|
||||||
assert corr1 == corr2
|
assert corr1 == corr2
|
||||||
|
|
||||||
my_obs3 = pe.Obs([np.random.rand(100)], ['t'], idl=[range(2, 102)])
|
my_obs3 = pe.Obs([np.random.rand(100)], ['t'], idl=[range(2, 102)])
|
||||||
with pytest.raises(Exception):
|
with pytest.raises(ValueError):
|
||||||
pe.correlate(my_obs1, my_obs3)
|
pe.correlate(my_obs1, my_obs3)
|
||||||
|
|
||||||
my_obs4 = pe.Obs([np.random.rand(99)], ['t'])
|
my_obs4 = pe.Obs([np.random.rand(99)], ['t'])
|
||||||
with pytest.raises(Exception):
|
with pytest.raises(ValueError):
|
||||||
pe.correlate(my_obs1, my_obs4)
|
pe.correlate(my_obs1, my_obs4)
|
||||||
|
|
||||||
my_obs5 = pe.Obs([np.random.rand(100)], ['t'], idl=[range(5, 505, 5)])
|
my_obs5 = pe.Obs([np.random.rand(100)], ['t'], idl=[range(5, 505, 5)])
|
||||||
|
@ -544,10 +544,10 @@ def test_correlate():
|
||||||
assert my_obs5.idl == corr3.idl
|
assert my_obs5.idl == corr3.idl
|
||||||
|
|
||||||
my_new_obs = pe.Obs([np.random.rand(100)], ['q3'])
|
my_new_obs = pe.Obs([np.random.rand(100)], ['q3'])
|
||||||
with pytest.raises(Exception):
|
with pytest.raises(ValueError):
|
||||||
pe.correlate(my_obs1, my_new_obs)
|
pe.correlate(my_obs1, my_new_obs)
|
||||||
my_covobs = pe.cov_Obs(1.0, 0.003, 'cov')
|
my_covobs = pe.cov_Obs(1.0, 0.003, 'cov')
|
||||||
with pytest.raises(Exception):
|
with pytest.raises(ValueError):
|
||||||
pe.correlate(my_covobs, my_covobs)
|
pe.correlate(my_covobs, my_covobs)
|
||||||
r_obs = pe.reweight(my_obs1, [my_obs1])[0]
|
r_obs = pe.reweight(my_obs1, [my_obs1])[0]
|
||||||
with pytest.warns(RuntimeWarning):
|
with pytest.warns(RuntimeWarning):
|
||||||
|
@ -774,7 +774,7 @@ def test_gamma_method_irregular():
|
||||||
my_obs.gm()
|
my_obs.gm()
|
||||||
idl += [range(1, 400, 4)]
|
idl += [range(1, 400, 4)]
|
||||||
my_obs = pe.Obs([dat for i in range(len(idl))], ['%s|%d' % ('A', i) for i in range(len(idl))], idl=idl)
|
my_obs = pe.Obs([dat for i in range(len(idl))], ['%s|%d' % ('A', i) for i in range(len(idl))], idl=idl)
|
||||||
with pytest.raises(Exception):
|
with pytest.raises(ValueError):
|
||||||
my_obs.gm()
|
my_obs.gm()
|
||||||
|
|
||||||
# check cases where tau is large compared to the chain length
|
# check cases where tau is large compared to the chain length
|
||||||
|
@ -1122,7 +1122,7 @@ def test_jackknife():
|
||||||
|
|
||||||
assert np.allclose(tmp_jacks, my_obs.export_jackknife())
|
assert np.allclose(tmp_jacks, my_obs.export_jackknife())
|
||||||
my_new_obs = my_obs + pe.Obs([full_data], ['test2'])
|
my_new_obs = my_obs + pe.Obs([full_data], ['test2'])
|
||||||
with pytest.raises(Exception):
|
with pytest.raises(ValueError):
|
||||||
my_new_obs.export_jackknife()
|
my_new_obs.export_jackknife()
|
||||||
|
|
||||||
|
|
||||||
|
|
Loading…
Add table
Reference in a new issue