mirror of
https://github.com/fjosw/pyerrors.git
synced 2025-05-15 03:53:41 +02:00
flake8 style changes
This commit is contained in:
parent
43f85efff5
commit
57c6a07801
9 changed files with 87 additions and 128 deletions
|
@ -551,7 +551,7 @@ def qqplot(x, o_y, func, p):
|
|||
my_y = [o.value for o in residuals]
|
||||
probplot = scipy.stats.probplot(my_y)
|
||||
my_x = probplot[0][0]
|
||||
fig = plt.figure(figsize=(8, 8 / 1.618))
|
||||
plt.figure(figsize=(8, 8 / 1.618))
|
||||
plt.errorbar(my_x, my_y, fmt='o')
|
||||
fit_start = my_x[0]
|
||||
fit_stop = my_x[-1]
|
||||
|
|
|
@ -7,6 +7,7 @@ import numpy as np
|
|||
from ..pyerrors import Obs
|
||||
from ..correlators import Corr
|
||||
|
||||
|
||||
def read_meson_hd5(path, filestem, ens_id, meson='meson_0', tree='meson'):
|
||||
"""Read hadrons meson hdf5 file and extract the meson labeled 'meson'
|
||||
|
||||
|
@ -24,9 +25,9 @@ def read_meson_hd5(path, filestem, ens_id, meson='meson_0', tree='meson'):
|
|||
|
||||
# Clean up file list
|
||||
files = []
|
||||
for l in ls:
|
||||
if l.startswith(filestem):
|
||||
files.append(l)
|
||||
for line in ls:
|
||||
if line.startswith(filestem):
|
||||
files.append(line)
|
||||
|
||||
if not files:
|
||||
raise Exception('No files starting with', filestem, 'in folder', path)
|
||||
|
@ -37,8 +38,8 @@ def read_meson_hd5(path, filestem, ens_id, meson='meson_0', tree='meson'):
|
|||
|
||||
# Check that configurations are evenly spaced
|
||||
cnfg_numbers = []
|
||||
for l in files:
|
||||
cnfg_numbers.append(get_cnfg_number(l))
|
||||
for line in files:
|
||||
cnfg_numbers.append(get_cnfg_number(line))
|
||||
|
||||
if not all(np.diff(cnfg_numbers) == np.diff(cnfg_numbers)[0]):
|
||||
raise Exception('Configurations are not evenly spaced.')
|
||||
|
|
|
@ -25,7 +25,6 @@ class Jack:
|
|||
self.value = value # list(map(np.mean, self.jacks))
|
||||
self.dvalue = list(map(_jack_error, self.jacks))
|
||||
|
||||
|
||||
def print(self, **kwargs):
|
||||
"""Print basic properties of the Jack."""
|
||||
|
||||
|
@ -42,19 +41,16 @@ class Jack:
|
|||
|
||||
print('Result:\t %3.8e +/- %3.8e +/- %3.8e (%3.3f%%)' % (self.value, self.dvalue[b], self.dvalue[b] * np.sqrt(2 * b / self.N[0]), np.abs(self.dvalue[b] / self.value * 100)))
|
||||
|
||||
|
||||
def plot_tauint(self):
|
||||
plt.xlabel('binsize')
|
||||
plt.ylabel('tauint')
|
||||
length = self.max_binsize
|
||||
x = np.arange(length) + 1
|
||||
plt.errorbar(x[:], (self.dvalue[:] / self.dvalue[0]) ** 2 / 2, yerr=np.sqrt(((2 * (self.dvalue[:] / self.dvalue[0]) ** 2 * np.sqrt(2 * x[:] / self.N[0])) / 2) ** 2
|
||||
+ ((2 * (self.dvalue[:] / self.dvalue[0]) ** 2 * np.sqrt(2 / self.N[0])) / 2) ** 2), linewidth=1, capsize=2)
|
||||
plt.errorbar(x[:], (self.dvalue[:] / self.dvalue[0]) ** 2 / 2, yerr=np.sqrt(((2 * (self.dvalue[:] / self.dvalue[0]) ** 2 * np.sqrt(2 * x[:] / self.N[0])) / 2) ** 2 + ((2 * (self.dvalue[:] / self.dvalue[0]) ** 2 * np.sqrt(2 / self.N[0])) / 2) ** 2), linewidth=1, capsize=2)
|
||||
plt.xlim(0.5, length + 0.5)
|
||||
plt.title('Tauint')
|
||||
plt.show()
|
||||
|
||||
|
||||
def plot_history(self):
|
||||
N = self.N
|
||||
x = np.arange(N)
|
||||
|
@ -111,7 +107,6 @@ def generate_jack(obs, **kwargs):
|
|||
# generate jacks from data
|
||||
mean = np.mean(binned_data)
|
||||
tmp_jacks = np.zeros(n)
|
||||
#print(binned_data)
|
||||
for i in range(n):
|
||||
tmp_jacks[i] = (n * mean - binned_data[i]) / (n - 1)
|
||||
jacks.append(tmp_jacks)
|
||||
|
|
|
@ -6,7 +6,7 @@ import autograd.numpy as anp # Thinly-wrapped numpy
|
|||
from .pyerrors import derived_observable
|
||||
|
||||
|
||||
### This code block is directly taken from the current master branch of autograd and remains
|
||||
# This code block is directly taken from the current master branch of autograd and remains
|
||||
# only until the new version is released on PyPi
|
||||
from functools import partial
|
||||
from autograd.extend import defvjp
|
||||
|
@ -15,6 +15,8 @@ _dot = partial(anp.einsum, '...ij,...jk->...ik')
|
|||
# batched diag
|
||||
_diag = lambda a: anp.eye(a.shape[-1]) * a
|
||||
# batched diagonal, similar to matrix_diag in tensorflow
|
||||
|
||||
|
||||
def _matrix_diag(a):
|
||||
reps = anp.array(a.shape)
|
||||
reps[:-1] = 1
|
||||
|
@ -24,10 +26,13 @@ def _matrix_diag(a):
|
|||
|
||||
# https://arxiv.org/pdf/1701.00392.pdf Eq(4.77)
|
||||
# Note the formula from Sec3.1 in https://people.maths.ox.ac.uk/gilesm/files/NA-08-01.pdf is incomplete
|
||||
|
||||
|
||||
def grad_eig(ans, x):
|
||||
"""Gradient of a general square (complex valued) matrix"""
|
||||
e, u = ans # eigenvalues as 1d array, eigenvectors in columns
|
||||
n = e.shape[-1]
|
||||
|
||||
def vjp(g):
|
||||
ge, gu = g
|
||||
ge = _matrix_diag(ge)
|
||||
|
@ -43,8 +48,10 @@ def grad_eig(ans, x):
|
|||
# but the derivative should be real in real input case when imaginary delta is forbidden
|
||||
return r
|
||||
return vjp
|
||||
|
||||
|
||||
defvjp(anp.linalg.eig, grad_eig)
|
||||
### End of the code block from autograd.master
|
||||
# End of the code block from autograd.master
|
||||
|
||||
|
||||
def scalar_mat_op(op, obs, **kwargs):
|
||||
|
@ -214,7 +221,6 @@ def _num_diff_eigh(obs, **kwargs):
|
|||
for i in range(dim):
|
||||
res_vec.append(derived_observable(_mat, raveled_obs, n=0, i=i, **kwargs))
|
||||
|
||||
|
||||
res_mat = []
|
||||
for i in range(dim):
|
||||
row = []
|
||||
|
|
|
@ -81,4 +81,3 @@ def ks_test(obs=None):
|
|||
plt.show()
|
||||
|
||||
print(scipy.stats.kstest(Qs, 'uniform'))
|
||||
|
||||
|
|
|
@ -91,7 +91,6 @@ class Obs:
|
|||
self.e_n_tauint = {}
|
||||
self.e_n_dtauint = {}
|
||||
|
||||
|
||||
def gamma_method(self, **kwargs):
|
||||
"""Calculate the error and related properties of the Obs.
|
||||
|
||||
|
@ -262,16 +261,13 @@ class Obs:
|
|||
# Make sure no entry of tauint is smaller than 0.5
|
||||
self.e_n_tauint[e_name][self.e_n_tauint[e_name] < 0.5] = 0.500000000001
|
||||
# hep-lat/0306017 eq. (42)
|
||||
self.e_n_dtauint[e_name] = self.e_n_tauint[e_name] * 2 * np.sqrt(np.abs(np.arange(w_max)
|
||||
+ 0.5 - self.e_n_tauint[e_name]) / e_N)
|
||||
self.e_n_dtauint[e_name] = self.e_n_tauint[e_name] * 2 * np.sqrt(np.abs(np.arange(w_max) + 0.5 - self.e_n_tauint[e_name]) / e_N)
|
||||
self.e_n_dtauint[e_name][0] = 0.0
|
||||
|
||||
|
||||
def _compute_drho(i):
|
||||
tmp = self.e_rho[e_name][i + 1:w_max] + np.concatenate([self.e_rho[e_name][i - 1::-1], self.e_rho[e_name][1:w_max - 2 * i]]) - 2 * self.e_rho[e_name][i] * self.e_rho[e_name][1:w_max - i]
|
||||
self.e_drho[e_name][i] = np.sqrt(np.sum(tmp ** 2) / e_N)
|
||||
|
||||
|
||||
_compute_drho(1)
|
||||
if self.tau_exp[e_name] > 0:
|
||||
# Critical slowing down analysis
|
||||
|
@ -279,8 +275,7 @@ class Obs:
|
|||
_compute_drho(n + 1)
|
||||
if (self.e_rho[e_name][n] - self.N_sigma * self.e_drho[e_name][n]) < 0 or n >= w_max // 2 - 2:
|
||||
# Bias correction hep-lat/0306017 eq. (49) included
|
||||
self.e_tauint[e_name] = self.e_n_tauint[e_name][n] * (1 + (2 * n + 1) / e_N) / (1 + 1 / e_N) + self.tau_exp[e_name] * np.abs(self.e_rho[e_name][n + 1])
|
||||
# The absolute makes sure, that the tail contribution is always positive
|
||||
self.e_tauint[e_name] = self.e_n_tauint[e_name][n] * (1 + (2 * n + 1) / e_N) / (1 + 1 / e_N) + self.tau_exp[e_name] * np.abs(self.e_rho[e_name][n + 1]) # The absolute makes sure, that the tail contribution is always positive
|
||||
self.e_dtauint[e_name] = np.sqrt(self.e_n_dtauint[e_name][n] ** 2 + self.tau_exp[e_name] ** 2 * self.e_drho[e_name][n + 1] ** 2)
|
||||
# Error of tau_exp neglected so far, missing term: self.e_rho[e_name][n + 1] ** 2 * d_tau_exp ** 2
|
||||
self.e_dvalue[e_name] = np.sqrt(2 * self.e_tauint[e_name] * e_gamma[e_name][0] * (1 + 1 / e_N) / e_N)
|
||||
|
@ -325,7 +320,6 @@ class Obs:
|
|||
self.ddvalue = np.sqrt(self.ddvalue) / self.dvalue
|
||||
return 0
|
||||
|
||||
|
||||
def print(self, level=1):
|
||||
"""Print basic properties of the Obs."""
|
||||
if level == 0:
|
||||
|
@ -346,7 +340,6 @@ class Obs:
|
|||
for e_name in self.e_names:
|
||||
print(e_name, ':', self.e_content[e_name])
|
||||
|
||||
|
||||
def plot_tauint(self, save=None):
|
||||
"""Plot integrated autocorrelation time for each ensemble."""
|
||||
if not self.e_names:
|
||||
|
@ -380,7 +373,6 @@ class Obs:
|
|||
if save:
|
||||
fig.savefig(save)
|
||||
|
||||
|
||||
def plot_rho(self):
|
||||
"""Plot normalized autocorrelation function time for each ensemble."""
|
||||
if not self.e_names:
|
||||
|
@ -395,7 +387,7 @@ class Obs:
|
|||
plt.plot([self.e_windowsize[e_name] + 1, self.e_windowsize[e_name] + 1 + 2 * self.tau_exp[e_name]],
|
||||
[self.e_rho[e_name][self.e_windowsize[e_name] + 1], 0], 'k-', lw=1)
|
||||
xmax = self.e_windowsize[e_name] + 2 * self.tau_exp[e_name] + 1.5
|
||||
plt.title('Rho ' + e_name + ', tau\_exp=' + str(np.around(self.tau_exp[e_name], decimals=2)))
|
||||
plt.title('Rho ' + e_name + r', tau\_exp=' + str(np.around(self.tau_exp[e_name], decimals=2)))
|
||||
else:
|
||||
xmax = max(10.5, 2 * self.e_windowsize[e_name] - 0.5)
|
||||
plt.title('Rho ' + e_name + ', S=' + str(np.around(self.S[e_name], decimals=2)))
|
||||
|
@ -403,7 +395,6 @@ class Obs:
|
|||
plt.xlim(-0.5, xmax)
|
||||
plt.draw()
|
||||
|
||||
|
||||
def plot_rep_dist(self):
|
||||
"""Plot replica distribution for each ensemble with more than one replicum."""
|
||||
if not self.e_names:
|
||||
|
@ -426,16 +417,14 @@ class Obs:
|
|||
plt.title('Replica distribution' + e_name + ' (mean=0, var=1), Q=' + str(np.around(self.e_Q[e_name], decimals=2)))
|
||||
plt.show()
|
||||
|
||||
|
||||
def plot_history(self):
|
||||
"""Plot derived Monte Carlo history for each ensemble."""
|
||||
if not self.e_names:
|
||||
raise Exception('Run the gamma method first.')
|
||||
|
||||
for e, e_name in enumerate(self.e_names):
|
||||
f = plt.figure()
|
||||
plt.figure()
|
||||
r_length = []
|
||||
sub_r_mean = 0
|
||||
for r, r_name in enumerate(self.e_content[e_name]):
|
||||
r_length.append(len(self.deltas[r_name]))
|
||||
e_N = np.sum(r_length)
|
||||
|
@ -449,7 +438,6 @@ class Obs:
|
|||
plt.title(e_name)
|
||||
plt.show()
|
||||
|
||||
|
||||
def plot_piechart(self):
|
||||
"""Plot piechart which shows the fractional contribution of each
|
||||
ensemble to the error and returns a dictionary containing the fractions."""
|
||||
|
@ -480,7 +468,6 @@ class Obs:
|
|||
with open(file_name, 'wb') as fb:
|
||||
pickle.dump(self, fb)
|
||||
|
||||
|
||||
def __repr__(self):
|
||||
if self.dvalue == 0.0:
|
||||
return 'Obs[' + str(self.value) + ']'
|
||||
|
@ -492,7 +479,6 @@ class Obs:
|
|||
else:
|
||||
return 'Obs[{:.0f}({:2.0f})]'.format(self.value, self.dvalue)
|
||||
|
||||
|
||||
# Overload comparisons
|
||||
def __lt__(self, other):
|
||||
return self.value < other
|
||||
|
@ -500,7 +486,6 @@ class Obs:
|
|||
def __gt__(self, other):
|
||||
return self.value > other
|
||||
|
||||
|
||||
# Overload math operations
|
||||
def __add__(self, y):
|
||||
if isinstance(y, Obs):
|
||||
|
@ -512,10 +497,10 @@ class Obs:
|
|||
return NotImplemented
|
||||
else:
|
||||
return derived_observable(lambda x, **kwargs: x[0] + y, [self], man_grad=[1])
|
||||
|
||||
def __radd__(self, y):
|
||||
return self + y
|
||||
|
||||
|
||||
def __mul__(self, y):
|
||||
if isinstance(y, Obs):
|
||||
return derived_observable(lambda x, **kwargs: x[0] * x[1], [self, y], man_grad=[y.value, self.value])
|
||||
|
@ -531,7 +516,6 @@ class Obs:
|
|||
def __rmul__(self, y):
|
||||
return self * y
|
||||
|
||||
|
||||
def __sub__(self, y):
|
||||
if isinstance(y, Obs):
|
||||
return derived_observable(lambda x, **kwargs: x[0] - x[1], [self, y], man_grad=[1, -1])
|
||||
|
@ -545,15 +529,12 @@ class Obs:
|
|||
else:
|
||||
return derived_observable(lambda x, **kwargs: x[0] - y, [self], man_grad=[1])
|
||||
|
||||
|
||||
def __rsub__(self, y):
|
||||
return -1 * (self - y)
|
||||
|
||||
|
||||
def __neg__(self):
|
||||
return -1 * self
|
||||
|
||||
|
||||
def __truediv__(self, y):
|
||||
if isinstance(y, Obs):
|
||||
return derived_observable(lambda x, **kwargs: x[0] / x[1], [self, y], man_grad=[1 / y.value, - self.value / y.value ** 2])
|
||||
|
@ -567,7 +548,6 @@ class Obs:
|
|||
else:
|
||||
return derived_observable(lambda x, **kwargs: x[0] / y, [self], man_grad=[1 / y])
|
||||
|
||||
|
||||
def __rtruediv__(self, y):
|
||||
if isinstance(y, Obs):
|
||||
return derived_observable(lambda x, **kwargs: x[0] / x[1], [y, self], man_grad=[1 / self.value, - y.value / self.value ** 2])
|
||||
|
@ -577,86 +557,67 @@ class Obs:
|
|||
else:
|
||||
return derived_observable(lambda x, **kwargs: y / x[0], [self], man_grad=[-y / self.value ** 2])
|
||||
|
||||
|
||||
def __pow__(self, y):
|
||||
if isinstance(y, Obs):
|
||||
return derived_observable(lambda x: x[0] ** x[1], [self, y])
|
||||
else:
|
||||
return derived_observable(lambda x: x[0] ** y, [self])
|
||||
|
||||
|
||||
def __rpow__(self, y):
|
||||
if isinstance(y, Obs):
|
||||
return derived_observable(lambda x: x[0] ** x[1], [y, self])
|
||||
else:
|
||||
return derived_observable(lambda x: y ** x[0], [self])
|
||||
|
||||
|
||||
def __abs__(self):
|
||||
return derived_observable(lambda x: anp.abs(x[0]), [self])
|
||||
|
||||
|
||||
# Overload numpy functions
|
||||
def sqrt(self):
|
||||
return derived_observable(lambda x, **kwargs: np.sqrt(x[0]), [self], man_grad=[1 / 2 / np.sqrt(self.value)])
|
||||
|
||||
|
||||
def log(self):
|
||||
return derived_observable(lambda x, **kwargs: np.log(x[0]), [self], man_grad=[1 / self.value])
|
||||
|
||||
|
||||
def exp(self):
|
||||
return derived_observable(lambda x, **kwargs: np.exp(x[0]), [self], man_grad=[np.exp(self.value)])
|
||||
|
||||
|
||||
def sin(self):
|
||||
return derived_observable(lambda x, **kwargs: np.sin(x[0]), [self], man_grad=[np.cos(self.value)])
|
||||
|
||||
|
||||
def cos(self):
|
||||
return derived_observable(lambda x, **kwargs: np.cos(x[0]), [self], man_grad=[-np.sin(self.value)])
|
||||
|
||||
|
||||
def tan(self):
|
||||
return derived_observable(lambda x, **kwargs: np.tan(x[0]), [self], man_grad=[1 / np.cos(self.value) ** 2])
|
||||
|
||||
|
||||
def arcsin(self):
|
||||
return derived_observable(lambda x: anp.arcsin(x[0]), [self])
|
||||
|
||||
|
||||
def arccos(self):
|
||||
return derived_observable(lambda x: anp.arccos(x[0]), [self])
|
||||
|
||||
|
||||
def arctan(self):
|
||||
return derived_observable(lambda x: anp.arctan(x[0]), [self])
|
||||
|
||||
|
||||
def sinh(self):
|
||||
return derived_observable(lambda x, **kwargs: np.sinh(x[0]), [self], man_grad=[np.cosh(self.value)])
|
||||
|
||||
|
||||
def cosh(self):
|
||||
return derived_observable(lambda x, **kwargs: np.cosh(x[0]), [self], man_grad=[np.sinh(self.value)])
|
||||
|
||||
|
||||
def tanh(self):
|
||||
return derived_observable(lambda x, **kwargs: np.tanh(x[0]), [self], man_grad=[1 / np.cosh(self.value) ** 2])
|
||||
|
||||
|
||||
def arcsinh(self):
|
||||
return derived_observable(lambda x: anp.arcsinh(x[0]), [self])
|
||||
|
||||
|
||||
def arccosh(self):
|
||||
return derived_observable(lambda x: anp.arccosh(x[0]), [self])
|
||||
|
||||
|
||||
def arctanh(self):
|
||||
return derived_observable(lambda x: anp.arctanh(x[0]), [self])
|
||||
|
||||
|
||||
def sinc(self):
|
||||
return derived_observable(lambda x: anp.sinc(x[0]), [self])
|
||||
|
||||
|
@ -933,8 +894,7 @@ def covariance2(obs1, obs2, correlation=False, **kwargs):
|
|||
max_gamma = min(obs1.shape[r_name], w_max)
|
||||
# The padding for the fft has to be even
|
||||
padding = obs1.shape[r_name] + max_gamma + (obs1.shape[r_name] + max_gamma) % 2
|
||||
e_gamma[e_name][:max_gamma] += (np.fft.irfft(np.fft.rfft(obs1.deltas[r_name], padding) * np.conjugate(np.fft.rfft(obs2.deltas[r_name], padding)))[:max_gamma]
|
||||
+ np.fft.irfft(np.fft.rfft(obs2.deltas[r_name], padding) * np.conjugate(np.fft.rfft(obs1.deltas[r_name], padding)))[:max_gamma]) / 2.0
|
||||
e_gamma[e_name][:max_gamma] += (np.fft.irfft(np.fft.rfft(obs1.deltas[r_name], padding) * np.conjugate(np.fft.rfft(obs2.deltas[r_name], padding)))[:max_gamma] + np.fft.irfft(np.fft.rfft(obs2.deltas[r_name], padding) * np.conjugate(np.fft.rfft(obs1.deltas[r_name], padding)))[:max_gamma]) / 2.0
|
||||
|
||||
if np.all(e_gamma[e_name]) == 0.0:
|
||||
continue
|
||||
|
@ -964,7 +924,6 @@ def covariance2(obs1, obs2, correlation=False, **kwargs):
|
|||
# Make sure no entry of tauint is smaller than 0.5
|
||||
e_n_tauint[e_name][e_n_tauint[e_name] < 0.5] = 0.500000000001
|
||||
|
||||
|
||||
window = max(obs1.e_windowsize[e_name], obs2.e_windowsize[e_name])
|
||||
# Bias correction hep-lat/0306017 eq. (49)
|
||||
e_dvalue[e_name] = 2 * (e_n_tauint[e_name][window] + obs1.tau_exp[e_name] * np.abs(e_rho[e_name][window + 1])) * (1 + (2 * window + 1) / e_N) * e_gamma[e_name][0] / e_N
|
||||
|
@ -1141,7 +1100,6 @@ def plot_corrs(observables, **kwargs):
|
|||
for j in range(len(observables)):
|
||||
label.append(str(j + 1))
|
||||
|
||||
|
||||
f = plt.figure()
|
||||
for j in range(len(observables)):
|
||||
T = len(observables[j])
|
||||
|
@ -1202,8 +1160,7 @@ def plot_corrs(observables, **kwargs):
|
|||
y_fit = fit_result[1].value * np.exp(-fit_result[0].value * x)
|
||||
plt.plot(x, y_fit, color='k')
|
||||
if not (fit_result[0].e_names == {} and fit_result[1].e_names == {}):
|
||||
y_fit_err = np.sqrt((y_fit * fit_result[0].dvalue) ** 2 + 2 * covariance(fit_result[0], fit_result[1])* y_fit *
|
||||
np.exp(-fit_result[0].value * x) + (np.exp(-fit_result[0].value * x) * fit_result[1].dvalue) ** 2)
|
||||
y_fit_err = np.sqrt((y_fit * fit_result[0].dvalue) ** 2 + 2 * covariance(fit_result[0], fit_result[1]) * y_fit * np.exp(-fit_result[0].value * x) + (np.exp(-fit_result[0].value * x) * fit_result[1].dvalue) ** 2)
|
||||
plt.fill_between(x, y_fit + y_fit_err, y_fit - y_fit_err, color='k', alpha=0.1)
|
||||
|
||||
plt.xlabel('$x_0/a$')
|
||||
|
|
|
@ -3,7 +3,8 @@
|
|||
|
||||
import scipy.optimize
|
||||
from autograd import jacobian
|
||||
from .pyerrors import Obs, derived_observable, pseudo_Obs
|
||||
from .pyerrors import derived_observable, pseudo_Obs
|
||||
|
||||
|
||||
def find_root(d, func, guess=1.0, **kwargs):
|
||||
"""Finds the root of the function func(x, d) where d is an Obs.
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue