mirror of
https://github.com/fjosw/pyerrors.git
synced 2025-05-15 20:13:41 +02:00
Merge branch 'develop' into feature/eigenvalue_smoothing
This commit is contained in:
commit
587cf6f9b0
11 changed files with 157 additions and 82 deletions
|
@ -794,7 +794,34 @@ class Corr:
|
|||
else:
|
||||
raise Exception("'save' has to be a string.")
|
||||
|
||||
return
|
||||
def spaghetti_plot(self, logscale=True):
|
||||
"""Produces a spaghetti plot of the correlator suited to monitor exceptional configurations.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
logscale : bool
|
||||
Determines whether the scale of the y-axis is logarithmic or standard.
|
||||
"""
|
||||
if self.N != 1:
|
||||
raise Exception("Correlator needs to be projected first.")
|
||||
|
||||
mc_names = list(set([item for sublist in [o[0].mc_names for o in self.content if o is not None] for item in sublist]))
|
||||
x0_vals = [n for (n, o) in zip(np.arange(self.T), self.content) if o is not None]
|
||||
|
||||
for name in mc_names:
|
||||
data = np.array([o[0].deltas[name] + o[0].r_values[name] for o in self.content if o is not None]).T
|
||||
|
||||
fig = plt.figure()
|
||||
ax = fig.add_subplot(111)
|
||||
for dat in data:
|
||||
ax.plot(x0_vals, dat, ls='-', marker='')
|
||||
|
||||
if logscale is True:
|
||||
ax.set_yscale('log')
|
||||
|
||||
ax.set_xlabel(r'$x_0 / a$')
|
||||
plt.title(name)
|
||||
plt.draw()
|
||||
|
||||
def dump(self, filename, datatype="json.gz", **kwargs):
|
||||
"""Dumps the Corr into a file of chosen type
|
||||
|
|
|
@ -464,8 +464,10 @@ def _standard_fit(x, y, func, silent=False, **kwargs):
|
|||
corr = covariance(y, correlation=True, **kwargs)
|
||||
covdiag = np.diag(1 / np.asarray(dy_f))
|
||||
condn = np.linalg.cond(corr)
|
||||
if condn > 1e8:
|
||||
warnings.warn("Correlation matrix may be ill-conditioned, condition number: %1.2e" % (condn), RuntimeWarning)
|
||||
if condn > 0.1 / np.finfo(float).eps:
|
||||
raise Exception(f"Cannot invert correlation matrix as its condition number exceeds machine precision ({condn:1.2e})")
|
||||
if condn > 1 / np.sqrt(np.finfo(float).eps):
|
||||
warnings.warn("Correlation matrix may be ill-conditioned, condition number: {%1.2e}" % (condn), RuntimeWarning)
|
||||
chol = np.linalg.cholesky(corr)
|
||||
chol_inv = np.linalg.inv(chol)
|
||||
chol_inv = np.dot(chol_inv, covdiag)
|
||||
|
@ -623,7 +625,7 @@ def qqplot(x, o_y, func, p):
|
|||
fit_stop = my_x[-1]
|
||||
samples = np.arange(fit_start, fit_stop, 0.01)
|
||||
plt.plot(samples, samples, 'k--', zorder=11, label='Standard normal distribution')
|
||||
plt.plot(samples, probplot[1][0] * samples + probplot[1][1], zorder=10, label='Least squares fit, r=' + str(np.around(probplot[1][2], 3)))
|
||||
plt.plot(samples, probplot[1][0] * samples + probplot[1][1], zorder=10, label='Least squares fit, r=' + str(np.around(probplot[1][2], 3)), marker='', ls='-')
|
||||
|
||||
plt.xlabel('Theoretical quantiles')
|
||||
plt.ylabel('Ordered Values')
|
||||
|
|
|
@ -358,7 +358,7 @@ def _parse_json_dict(json_dict, verbose=True, full_output=False):
|
|||
ret = Obs([[ddi[0] + values[0] for ddi in di] for di in od['deltas']], od['names'], idl=od['idl'])
|
||||
ret.is_merged = od['is_merged']
|
||||
else:
|
||||
ret = Obs([], [])
|
||||
ret = Obs([], [], means=[])
|
||||
ret._value = values[0]
|
||||
for name in cd:
|
||||
co = cd[name][0]
|
||||
|
@ -383,7 +383,7 @@ def _parse_json_dict(json_dict, verbose=True, full_output=False):
|
|||
ret.append(Obs([list(di[:, i] + values[i]) for di in od['deltas']], od['names'], idl=od['idl']))
|
||||
ret[-1].is_merged = od['is_merged']
|
||||
else:
|
||||
ret.append(Obs([], []))
|
||||
ret.append(Obs([], [], means=[]))
|
||||
ret[-1]._value = values[i]
|
||||
print('Created Obs with means= ', values[i])
|
||||
for name in cd:
|
||||
|
@ -410,7 +410,7 @@ def _parse_json_dict(json_dict, verbose=True, full_output=False):
|
|||
ret.append(Obs([di[:, i] + values[i] for di in od['deltas']], od['names'], idl=od['idl']))
|
||||
ret[-1].is_merged = od['is_merged']
|
||||
else:
|
||||
ret.append(Obs([], []))
|
||||
ret.append(Obs([], [], means=[]))
|
||||
ret[-1]._value = values[i]
|
||||
for name in cd:
|
||||
co = cd[name][i]
|
||||
|
|
100
pyerrors/obs.py
100
pyerrors/obs.py
|
@ -90,51 +90,44 @@ class Obs:
|
|||
self.deltas = {}
|
||||
self._covobs = {}
|
||||
|
||||
self._value = 0
|
||||
self.N = 0
|
||||
self.is_merged = {}
|
||||
self.idl = {}
|
||||
if len(samples):
|
||||
if idl is not None:
|
||||
for name, idx in sorted(zip(names, idl)):
|
||||
if isinstance(idx, range):
|
||||
self.idl[name] = idx
|
||||
elif isinstance(idx, (list, np.ndarray)):
|
||||
dc = np.unique(np.diff(idx))
|
||||
if np.any(dc < 0):
|
||||
raise Exception("Unsorted idx for idl[%s]" % (name))
|
||||
if len(dc) == 1:
|
||||
self.idl[name] = range(idx[0], idx[-1] + dc[0], dc[0])
|
||||
else:
|
||||
self.idl[name] = list(idx)
|
||||
if idl is not None:
|
||||
for name, idx in sorted(zip(names, idl)):
|
||||
if isinstance(idx, range):
|
||||
self.idl[name] = idx
|
||||
elif isinstance(idx, (list, np.ndarray)):
|
||||
dc = np.unique(np.diff(idx))
|
||||
if np.any(dc < 0):
|
||||
raise Exception("Unsorted idx for idl[%s]" % (name))
|
||||
if len(dc) == 1:
|
||||
self.idl[name] = range(idx[0], idx[-1] + dc[0], dc[0])
|
||||
else:
|
||||
raise Exception('incompatible type for idl[%s].' % (name))
|
||||
else:
|
||||
for name, sample in sorted(zip(names, samples)):
|
||||
self.idl[name] = range(1, len(sample) + 1)
|
||||
|
||||
self._value = 0
|
||||
self.N = 0
|
||||
if kwargs.get("means") is not None:
|
||||
for name, sample, mean in sorted(zip(names, samples, kwargs.get("means"))):
|
||||
self.shape[name] = len(self.idl[name])
|
||||
self.N += self.shape[name]
|
||||
self.r_values[name] = mean
|
||||
self.deltas[name] = sample
|
||||
else:
|
||||
for name, sample in sorted(zip(names, samples)):
|
||||
self.shape[name] = len(self.idl[name])
|
||||
self.N += self.shape[name]
|
||||
if len(sample) != self.shape[name]:
|
||||
raise Exception('Incompatible samples and idx for %s: %d vs. %d' % (name, len(sample), self.shape[name]))
|
||||
self.r_values[name] = np.mean(sample)
|
||||
self.deltas[name] = sample - self.r_values[name]
|
||||
self._value += self.shape[name] * self.r_values[name]
|
||||
self._value /= self.N
|
||||
|
||||
self.is_merged = {}
|
||||
|
||||
self.idl[name] = list(idx)
|
||||
else:
|
||||
raise Exception('incompatible type for idl[%s].' % (name))
|
||||
else:
|
||||
self._value = 0
|
||||
self.is_merged = {}
|
||||
self.N = 0
|
||||
for name, sample in sorted(zip(names, samples)):
|
||||
self.idl[name] = range(1, len(sample) + 1)
|
||||
|
||||
if kwargs.get("means") is not None:
|
||||
for name, sample, mean in sorted(zip(names, samples, kwargs.get("means"))):
|
||||
self.shape[name] = len(self.idl[name])
|
||||
self.N += self.shape[name]
|
||||
self.r_values[name] = mean
|
||||
self.deltas[name] = sample
|
||||
else:
|
||||
for name, sample in sorted(zip(names, samples)):
|
||||
self.shape[name] = len(self.idl[name])
|
||||
self.N += self.shape[name]
|
||||
if len(sample) != self.shape[name]:
|
||||
raise Exception('Incompatible samples and idx for %s: %d vs. %d' % (name, len(sample), self.shape[name]))
|
||||
self.r_values[name] = np.mean(sample)
|
||||
self.deltas[name] = sample - self.r_values[name]
|
||||
self._value += self.shape[name] * self.r_values[name]
|
||||
self._value /= self.N
|
||||
|
||||
self._dvalue = 0.0
|
||||
self.ddvalue = 0.0
|
||||
|
@ -443,17 +436,15 @@ class Obs:
|
|||
"""
|
||||
return self.is_zero() or np.abs(self.value) <= sigma * self._dvalue
|
||||
|
||||
def is_zero(self, rtol=1.e-5, atol=1.e-8):
|
||||
def is_zero(self, atol=1e-10):
|
||||
"""Checks whether the observable is zero within a given tolerance.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
rtol : float
|
||||
Relative tolerance (for details see numpy documentation).
|
||||
atol : float
|
||||
Absolute tolerance (for details see numpy documentation).
|
||||
"""
|
||||
return np.isclose(0.0, self.value, rtol, atol) and all(np.allclose(0.0, delta, rtol, atol) for delta in self.deltas.values()) and all(np.allclose(0.0, delta.errsq(), rtol, atol) for delta in self.covobs.values())
|
||||
return np.isclose(0.0, self.value, 1e-14, atol) and all(np.allclose(0.0, delta, 1e-14, atol) for delta in self.deltas.values()) and all(np.allclose(0.0, delta.errsq(), 1e-14, atol) for delta in self.covobs.values())
|
||||
|
||||
def plot_tauint(self, save=None):
|
||||
"""Plot integrated autocorrelation time for each ensemble.
|
||||
|
@ -584,7 +575,7 @@ class Obs:
|
|||
ensemble to the error and returns a dictionary containing the fractions."""
|
||||
if not hasattr(self, 'e_dvalue'):
|
||||
raise Exception('Run the gamma method first.')
|
||||
if self._dvalue == 0.0:
|
||||
if np.isclose(0.0, self._dvalue, atol=1e-15):
|
||||
raise Exception('Error is 0.0')
|
||||
labels = self.e_names
|
||||
sizes = [self.e_dvalue[name] ** 2 for name in labels] / self._dvalue ** 2
|
||||
|
@ -731,6 +722,9 @@ class Obs:
|
|||
def __rsub__(self, y):
|
||||
return -1 * (self - y)
|
||||
|
||||
def __pos__(self):
|
||||
return self
|
||||
|
||||
def __neg__(self):
|
||||
return -1 * self
|
||||
|
||||
|
@ -913,8 +907,11 @@ class CObs:
|
|||
def __abs__(self):
|
||||
return np.sqrt(self.real**2 + self.imag**2)
|
||||
|
||||
def __neg__(other):
|
||||
return -1 * other
|
||||
def __pos__(self):
|
||||
return self
|
||||
|
||||
def __neg__(self):
|
||||
return -1 * self
|
||||
|
||||
def __eq__(self, other):
|
||||
return self.real == other.real and self.imag == other.imag
|
||||
|
@ -1511,7 +1508,8 @@ def import_jackknife(jacks, name, idl=None):
|
|||
length = len(jacks) - 1
|
||||
prj = (np.ones((length, length)) - (length - 1) * np.identity(length))
|
||||
samples = jacks[1:] @ prj
|
||||
new_obs = Obs([samples], [name], idl=idl)
|
||||
mean = np.mean(samples)
|
||||
new_obs = Obs([samples - mean], [name], idl=idl, means=[mean])
|
||||
new_obs._value = jacks[0]
|
||||
return new_obs
|
||||
|
||||
|
@ -1570,7 +1568,7 @@ def cov_Obs(means, cov, name, grad=None):
|
|||
co : Covobs
|
||||
Covobs to be embedded into the Obs
|
||||
"""
|
||||
o = Obs([], [])
|
||||
o = Obs([], [], means=[])
|
||||
o._value = co.value
|
||||
o.names.append(co.name)
|
||||
o._covobs[co.name] = co
|
||||
|
|
|
@ -1 +1 @@
|
|||
__version__ = "2.0.0-rc.3+dev"
|
||||
__version__ = "2.1.0+dev"
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue