mirror of
https://github.com/fjosw/pyerrors.git
synced 2025-03-15 06:40:24 +01:00
Merge branch 'develop' into feature/v2.0
This commit is contained in:
commit
863ce6ae73
9 changed files with 85 additions and 125 deletions
|
@ -293,7 +293,7 @@ def odr_fit(x, y, func, silent=False, **kwargs):
|
|||
|
||||
old_jac = jacobian(func)(output.beta, output.xplus)
|
||||
fused_row1 = np.concatenate((old_jac, np.concatenate((number_of_x_parameters * [np.zeros(old_jac.shape)]), axis=0)))
|
||||
fused_row2 = np.concatenate((jacobian(lambda x, y : func(y, x))(output.xplus, output.beta).reshape(x_f.shape[-1], x_f.shape[-1] * number_of_x_parameters), np.identity(number_of_x_parameters * old_jac.shape[0])))
|
||||
fused_row2 = np.concatenate((jacobian(lambda x, y: func(y, x))(output.xplus, output.beta).reshape(x_f.shape[-1], x_f.shape[-1] * number_of_x_parameters), np.identity(number_of_x_parameters * old_jac.shape[0])))
|
||||
new_jac = np.concatenate((fused_row1, fused_row2), axis=1)
|
||||
|
||||
A = W @ new_jac
|
||||
|
@ -329,7 +329,7 @@ def odr_fit(x, y, func, silent=False, **kwargs):
|
|||
|
||||
result = []
|
||||
for i in range(n_parms):
|
||||
result.append(derived_observable(lambda x, **kwargs: x[0], [pseudo_Obs(output.beta[i], 0.0, y[0].names[0], y[0].shape[y[0].names[0]])] + list(x.ravel()) + list(y), man_grad=[0] + list(deriv_x[i]) + list(deriv_y[i])))
|
||||
result.append(derived_observable(lambda x, **kwargs: x[0], [pseudo_Obs(output.beta[i], 0.0, y[0].names[0], y[0].shape[y[0].names[0]])] + list(x.ravel()) + list(y), man_grad=[0] + list(deriv_x[i]) + list(deriv_y[i])))
|
||||
|
||||
result_dict['fit_parameters'] = result
|
||||
|
||||
|
@ -534,8 +534,8 @@ def fit_exp(data, **kwargs):
|
|||
tmp_log = np.log(np.abs(data[i - shift]))
|
||||
ysum += tmp_log
|
||||
xysum += i * tmp_log
|
||||
res0 = -(length * xysum - xsum * ysum) / (length * xsum2 - xsum * xsum) # mass
|
||||
res1 = np.exp((xsum2 * ysum - xsum * xysum) / (length * xsum2 - xsum * xsum)) # matrix element
|
||||
res0 = -(length * xysum - xsum * ysum) / (length * xsum2 - xsum * xsum) # mass
|
||||
res1 = np.exp((xsum2 * ysum - xsum * xysum) / (length * xsum2 - xsum * xsum)) # matrix element
|
||||
return [res0, res1]
|
||||
|
||||
|
||||
|
@ -551,7 +551,7 @@ def qqplot(x, o_y, func, p):
|
|||
my_y = [o.value for o in residuals]
|
||||
probplot = scipy.stats.probplot(my_y)
|
||||
my_x = probplot[0][0]
|
||||
fig = plt.figure(figsize=(8, 8 / 1.618))
|
||||
plt.figure(figsize=(8, 8 / 1.618))
|
||||
plt.errorbar(my_x, my_y, fmt='o')
|
||||
fit_start = my_x[0]
|
||||
fit_stop = my_x[-1]
|
||||
|
|
|
@ -69,11 +69,11 @@ def read_ADerrors(file_path, bdio_path='./libbdio.so', **kwargs):
|
|||
ruinfo = bdio_get_ruinfo(fbdio)
|
||||
|
||||
if ruinfo == 7:
|
||||
print('MD5sum found') # For now we just ignore these entries and do not perform any checks on them
|
||||
print('MD5sum found') # For now we just ignore these entries and do not perform any checks on them
|
||||
continue
|
||||
|
||||
if ruinfo < 0:
|
||||
# EOF reached
|
||||
# EOF reached
|
||||
break
|
||||
rlen = bdio_get_rlen(fbdio)
|
||||
|
||||
|
@ -225,9 +225,9 @@ def write_ADerrors(obs_list, file_path, bdio_path='./libbdio.so', **kwargs):
|
|||
keys = list(obs.e_content.keys())
|
||||
ids = []
|
||||
for key in keys:
|
||||
try: # Try to convert key to integer
|
||||
try: # Try to convert key to integer
|
||||
ids.append(int(key))
|
||||
except: # If not possible construct a hash
|
||||
except: # If not possible construct a hash
|
||||
ids.append(int(hashlib.sha256(key.encode('utf-8')).hexdigest(), 16) % 10 ** 8)
|
||||
print('ids', ids)
|
||||
nt = []
|
||||
|
@ -237,7 +237,7 @@ def write_ADerrors(obs_list, file_path, bdio_path='./libbdio.so', **kwargs):
|
|||
for r_name in obs.e_content[e_name]:
|
||||
r_length.append(len(obs.deltas[r_name]))
|
||||
|
||||
#e_N = np.sum(r_length)
|
||||
# e_N = np.sum(r_length)
|
||||
nt.append(max(r_length) // 2)
|
||||
print('nt', nt)
|
||||
zero = neid * [0.0]
|
||||
|
@ -347,15 +347,15 @@ def read_mesons(file_path, bdio_path='./libbdio.so', **kwargs):
|
|||
b_form = form.encode('utf-8')
|
||||
|
||||
ensemble_name = ''
|
||||
volume = [] # lattice volume
|
||||
volume = [] # lattice volume
|
||||
boundary_conditions = []
|
||||
corr_name = [] # Contains correlator names
|
||||
corr_type = [] # Contains correlator data type (important for reading out numerical data)
|
||||
corr_props = [] # Contanis propagator types (Component of corr_kappa)
|
||||
d0 = 0 # tvals
|
||||
d1 = 0 # nnoise
|
||||
prop_kappa = [] # Contains propagator kappas (Component of corr_kappa)
|
||||
prop_source = [] # Contains propagator source positions
|
||||
corr_name = [] # Contains correlator names
|
||||
corr_type = [] # Contains correlator data type (important for reading out numerical data)
|
||||
corr_props = [] # Contanis propagator types (Component of corr_kappa)
|
||||
d0 = 0 # tvals
|
||||
d1 = 0 # nnoise
|
||||
prop_kappa = [] # Contains propagator kappas (Component of corr_kappa)
|
||||
prop_source = [] # Contains propagator source positions
|
||||
# Check noise type for multiple replica?
|
||||
cnfg_no = -1
|
||||
corr_no = -1
|
||||
|
@ -379,7 +379,7 @@ def read_mesons(file_path, bdio_path='./libbdio.so', **kwargs):
|
|||
if corr_type[corr_no] == 'complex':
|
||||
tmp_mean = np.mean(np.asarray(np.split(np.asarray(pd_buf[2 + 2 * d1:-2 * d1:2]), d0 - 2)), axis=1)
|
||||
else:
|
||||
tmp_mean = np.mean(np.asarray(np.split(np.asarray(pd_buf[2 + d1:-d0 * d1 - d1]), d0 - 2)), axis=1)
|
||||
tmp_mean = np.mean(np.asarray(np.split(np.asarray(pd_buf[2 + d1:-d0 * d1 - d1]), d0 - 2)), axis=1)
|
||||
|
||||
data[corr_no].append(tmp_mean)
|
||||
corr_no += 1
|
||||
|
@ -447,7 +447,7 @@ def read_mesons(file_path, bdio_path='./libbdio.so', **kwargs):
|
|||
print('Number of corrs: ', len(corr_name))
|
||||
print('Number of configurations: ', cnfg_no + 1)
|
||||
|
||||
corr_kappa = [] # Contains kappa values for both propagators of given correlation function
|
||||
corr_kappa = [] # Contains kappa values for both propagators of given correlation function
|
||||
corr_source = []
|
||||
for item in corr_props:
|
||||
corr_kappa.append([float(prop_kappa[int(item[0])]), float(prop_kappa[int(item[1])])])
|
||||
|
@ -524,14 +524,14 @@ def read_dSdm(file_path, bdio_path='./libbdio.so', **kwargs):
|
|||
b_form = form.encode('utf-8')
|
||||
|
||||
ensemble_name = ''
|
||||
volume = [] # lattice volume
|
||||
volume = [] # lattice volume
|
||||
boundary_conditions = []
|
||||
corr_name = [] # Contains correlator names
|
||||
corr_type = [] # Contains correlator data type (important for reading out numerical data)
|
||||
corr_props = [] # Contains propagator types (Component of corr_kappa)
|
||||
d0 = 0 # tvals
|
||||
d1 = 0 # nnoise
|
||||
prop_kappa = [] # Contains propagator kappas (Component of corr_kappa)
|
||||
corr_name = [] # Contains correlator names
|
||||
corr_type = [] # Contains correlator data type (important for reading out numerical data)
|
||||
corr_props = [] # Contains propagator types (Component of corr_kappa)
|
||||
d0 = 0 # tvals
|
||||
d1 = 0 # nnoise
|
||||
prop_kappa = [] # Contains propagator kappas (Component of corr_kappa)
|
||||
# Check noise type for multiple replica?
|
||||
cnfg_no = -1
|
||||
corr_no = -1
|
||||
|
@ -612,7 +612,7 @@ def read_dSdm(file_path, bdio_path='./libbdio.so', **kwargs):
|
|||
print('Number of corrs: ', len(corr_name))
|
||||
print('Number of configurations: ', cnfg_no + 1)
|
||||
|
||||
corr_kappa = [] # Contains kappa values for both propagators of given correlation function
|
||||
corr_kappa = [] # Contains kappa values for both propagators of given correlation function
|
||||
corr_source = []
|
||||
for item in corr_props:
|
||||
corr_kappa.append(float(prop_kappa[int(item)]))
|
||||
|
|
|
@ -7,6 +7,7 @@ import numpy as np
|
|||
from ..pyerrors import Obs
|
||||
from ..correlators import Corr
|
||||
|
||||
|
||||
def read_meson_hd5(path, filestem, ens_id, meson='meson_0', tree='meson'):
|
||||
"""Read hadrons meson hdf5 file and extract the meson labeled 'meson'
|
||||
|
||||
|
@ -24,23 +25,23 @@ def read_meson_hd5(path, filestem, ens_id, meson='meson_0', tree='meson'):
|
|||
|
||||
# Clean up file list
|
||||
files = []
|
||||
for l in ls:
|
||||
if l.startswith(filestem):
|
||||
files.append(l)
|
||||
for line in ls:
|
||||
if line.startswith(filestem):
|
||||
files.append(line)
|
||||
|
||||
if not files:
|
||||
raise Exception('No files starting with', filestem, 'in folder', path)
|
||||
|
||||
# Sort according to configuration number
|
||||
get_cnfg_number = lambda x : int(x[len(filestem) + 1:-3])
|
||||
get_cnfg_number = lambda x: int(x[len(filestem) + 1:-3])
|
||||
files.sort(key=get_cnfg_number)
|
||||
|
||||
# Check that configurations are evenly spaced
|
||||
cnfg_numbers = []
|
||||
for l in files:
|
||||
cnfg_numbers.append(get_cnfg_number(l))
|
||||
for line in files:
|
||||
cnfg_numbers.append(get_cnfg_number(line))
|
||||
|
||||
if not all(np.diff(cnfg_numbers)==np.diff(cnfg_numbers)[0]):
|
||||
if not all(np.diff(cnfg_numbers) == np.diff(cnfg_numbers)[0]):
|
||||
raise Exception('Configurations are not evenly spaced.')
|
||||
|
||||
corr_data = []
|
||||
|
|
|
@ -22,10 +22,9 @@ class Jack:
|
|||
self.jacks = jacks
|
||||
self.N = list(map(np.size, self.jacks))
|
||||
self.max_binsize = len(self.N)
|
||||
self.value = value #list(map(np.mean, self.jacks))
|
||||
self.value = value # list(map(np.mean, self.jacks))
|
||||
self.dvalue = list(map(_jack_error, self.jacks))
|
||||
|
||||
|
||||
def print(self, **kwargs):
|
||||
"""Print basic properties of the Jack."""
|
||||
|
||||
|
@ -42,26 +41,23 @@ class Jack:
|
|||
|
||||
print('Result:\t %3.8e +/- %3.8e +/- %3.8e (%3.3f%%)' % (self.value, self.dvalue[b], self.dvalue[b] * np.sqrt(2 * b / self.N[0]), np.abs(self.dvalue[b] / self.value * 100)))
|
||||
|
||||
|
||||
def plot_tauint(self):
|
||||
plt.xlabel('binsize')
|
||||
plt.ylabel('tauint')
|
||||
length = self.max_binsize
|
||||
x = np.arange(length) + 1
|
||||
plt.errorbar(x[:], (self.dvalue[:] / self.dvalue[0]) ** 2 / 2, yerr=np.sqrt(((2 * (self.dvalue[:] / self.dvalue[0]) ** 2 * np.sqrt(2 * x[:] / self.N[0])) / 2) ** 2
|
||||
+ ((2 * (self.dvalue[:] / self.dvalue[0]) ** 2 * np.sqrt(2 / self.N[0])) / 2) ** 2), linewidth=1, capsize=2)
|
||||
plt.errorbar(x[:], (self.dvalue[:] / self.dvalue[0]) ** 2 / 2, yerr=np.sqrt(((2 * (self.dvalue[:] / self.dvalue[0]) ** 2 * np.sqrt(2 * x[:] / self.N[0])) / 2) ** 2 + ((2 * (self.dvalue[:] / self.dvalue[0]) ** 2 * np.sqrt(2 / self.N[0])) / 2) ** 2), linewidth=1, capsize=2)
|
||||
plt.xlim(0.5, length + 0.5)
|
||||
plt.title('Tauint')
|
||||
plt.show()
|
||||
|
||||
|
||||
def plot_history(self):
|
||||
N = self.N
|
||||
x = np.arange(N)
|
||||
tmp = []
|
||||
for i in range(self.replicas):
|
||||
tmp.append(self.deltas[i] + self.r_values[i])
|
||||
y = np.concatenate(tmp, axis=0) # Think about including kwarg to look only at some replica
|
||||
y = np.concatenate(tmp, axis=0) # Think about including kwarg to look only at some replica
|
||||
plt.errorbar(x, y, fmt='.', markersize=3)
|
||||
plt.xlim(-0.5, N - 0.5)
|
||||
plt.show()
|
||||
|
@ -97,7 +93,7 @@ def generate_jack(obs, **kwargs):
|
|||
max_b = 1
|
||||
|
||||
for b in range(max_b):
|
||||
#binning if necessary
|
||||
# binning if necessary
|
||||
if b > 0:
|
||||
n = full_data.size // (b + 1)
|
||||
binned_data = np.zeros(n)
|
||||
|
@ -108,10 +104,9 @@ def generate_jack(obs, **kwargs):
|
|||
else:
|
||||
binned_data = full_data
|
||||
n = binned_data.size
|
||||
#generate jacks from data
|
||||
# generate jacks from data
|
||||
mean = np.mean(binned_data)
|
||||
tmp_jacks = np.zeros(n)
|
||||
#print(binned_data)
|
||||
for i in range(n):
|
||||
tmp_jacks[i] = (n * mean - binned_data[i]) / (n - 1)
|
||||
jacks.append(tmp_jacks)
|
||||
|
|
|
@ -6,15 +6,17 @@ import autograd.numpy as anp # Thinly-wrapped numpy
|
|||
from .pyerrors import derived_observable
|
||||
|
||||
|
||||
### This code block is directly taken from the current master branch of autograd and remains
|
||||
# This code block is directly taken from the current master branch of autograd and remains
|
||||
# only until the new version is released on PyPi
|
||||
from functools import partial
|
||||
from autograd.extend import defvjp
|
||||
|
||||
_dot = partial(anp.einsum, '...ij,...jk->...ik')
|
||||
# batched diag
|
||||
_diag = lambda a: anp.eye(a.shape[-1])*a
|
||||
_diag = lambda a: anp.eye(a.shape[-1]) * a
|
||||
# batched diagonal, similar to matrix_diag in tensorflow
|
||||
|
||||
|
||||
def _matrix_diag(a):
|
||||
reps = anp.array(a.shape)
|
||||
reps[:-1] = 1
|
||||
|
@ -24,14 +26,17 @@ def _matrix_diag(a):
|
|||
|
||||
# https://arxiv.org/pdf/1701.00392.pdf Eq(4.77)
|
||||
# Note the formula from Sec3.1 in https://people.maths.ox.ac.uk/gilesm/files/NA-08-01.pdf is incomplete
|
||||
|
||||
|
||||
def grad_eig(ans, x):
|
||||
"""Gradient of a general square (complex valued) matrix"""
|
||||
e, u = ans # eigenvalues as 1d array, eigenvectors in columns
|
||||
e, u = ans # eigenvalues as 1d array, eigenvectors in columns
|
||||
n = e.shape[-1]
|
||||
|
||||
def vjp(g):
|
||||
ge, gu = g
|
||||
ge = _matrix_diag(ge)
|
||||
f = 1/(e[..., anp.newaxis, :] - e[..., :, anp.newaxis] + 1.e-20)
|
||||
f = 1 / (e[..., anp.newaxis, :] - e[..., :, anp.newaxis] + 1.e-20)
|
||||
f -= _diag(f)
|
||||
ut = anp.swapaxes(u, -1, -2)
|
||||
r1 = f * _dot(ut, gu)
|
||||
|
@ -43,8 +48,10 @@ def grad_eig(ans, x):
|
|||
# but the derivative should be real in real input case when imaginary delta is forbidden
|
||||
return r
|
||||
return vjp
|
||||
|
||||
|
||||
defvjp(anp.linalg.eig, grad_eig)
|
||||
### End of the code block from autograd.master
|
||||
# End of the code block from autograd.master
|
||||
|
||||
|
||||
def scalar_mat_op(op, obs, **kwargs):
|
||||
|
@ -92,8 +99,8 @@ def eig(obs, **kwargs):
|
|||
"""Computes the eigenvalues of a given matrix of Obs according to np.linalg.eig."""
|
||||
if kwargs.get('num_grad') is True:
|
||||
return _num_diff_eig(obs, **kwargs)
|
||||
# Note: Automatic differentiation of eig is implemented in the git of autograd
|
||||
# but not yet released to PyPi (1.3)
|
||||
# Note: Automatic differentiation of eig is implemented in the git of autograd
|
||||
# but not yet released to PyPi (1.3)
|
||||
w = derived_observable(lambda x, **kwargs: anp.real(anp.linalg.eig(x)[0]), obs)
|
||||
return w
|
||||
|
||||
|
@ -214,7 +221,6 @@ def _num_diff_eigh(obs, **kwargs):
|
|||
for i in range(dim):
|
||||
res_vec.append(derived_observable(_mat, raveled_obs, n=0, i=i, **kwargs))
|
||||
|
||||
|
||||
res_mat = []
|
||||
for i in range(dim):
|
||||
row = []
|
||||
|
@ -244,7 +250,7 @@ def _num_diff_eig(obs, **kwargs):
|
|||
res = np.linalg.eig(np.array(mat))[n]
|
||||
|
||||
if n == 0:
|
||||
# Discard imaginary part of eigenvalue here
|
||||
# Discard imaginary part of eigenvalue here
|
||||
return np.real(res[kwargs.get('i')])
|
||||
else:
|
||||
return res[kwargs.get('i')][kwargs.get('j')]
|
||||
|
@ -260,8 +266,8 @@ def _num_diff_eig(obs, **kwargs):
|
|||
|
||||
res_vec = []
|
||||
for i in range(dim):
|
||||
# Note: Automatic differentiation of eig is implemented in the git of autograd
|
||||
# but not yet released to PyPi (1.3)
|
||||
# Note: Automatic differentiation of eig is implemented in the git of autograd
|
||||
# but not yet released to PyPi (1.3)
|
||||
res_vec.append(derived_observable(_mat, raveled_obs, n=0, i=i, **kwargs))
|
||||
|
||||
return np.array(res_vec) @ np.identity(dim)
|
||||
|
|
|
@ -81,4 +81,3 @@ def ks_test(obs=None):
|
|||
plt.show()
|
||||
|
||||
print(scipy.stats.kstest(Qs, 'uniform'))
|
||||
|
||||
|
|
|
@ -44,7 +44,7 @@ def matrix_pencil_method(corrs, k=1, p=None, **kwargs):
|
|||
# Construct the hankel matrices
|
||||
matrix = []
|
||||
for n in range(data_sets):
|
||||
matrix.append(scipy.linalg.hankel(data[n][:n_data-p], data[n][n_data-p-1:]))
|
||||
matrix.append(scipy.linalg.hankel(data[n][:n_data - p], data[n][n_data - p - 1:]))
|
||||
matrix = np.array(matrix)
|
||||
# Construct y1 and y2
|
||||
y1 = np.concatenate(matrix[:, :, :p])
|
||||
|
@ -60,7 +60,7 @@ def matrix_pencil_method(corrs, k=1, p=None, **kwargs):
|
|||
|
||||
def matrix_pencil_method_old(data, p, noise_level=None, verbose=1, **kwargs):
|
||||
""" Older impleentation of the matrix pencil method with pencil p on given data to
|
||||
extract energy levels.
|
||||
extract energy levels.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
|
@ -79,7 +79,7 @@ def matrix_pencil_method_old(data, p, noise_level=None, verbose=1, **kwargs):
|
|||
if n_data <= p:
|
||||
raise Exception('The pencil p has to be smaller than the number of data samples.')
|
||||
|
||||
matrix = scipy.linalg.hankel(data[:n_data-p], data[n_data-p-1:]) @ np.identity(p + 1)
|
||||
matrix = scipy.linalg.hankel(data[:n_data - p], data[n_data - p - 1:]) @ np.identity(p + 1)
|
||||
|
||||
if noise_level is not None:
|
||||
u, s, vh = svd(matrix)
|
||||
|
|
|
@ -91,7 +91,6 @@ class Obs:
|
|||
self.e_n_tauint = {}
|
||||
self.e_n_dtauint = {}
|
||||
|
||||
|
||||
def gamma_method(self, **kwargs):
|
||||
"""Calculate the error and related properties of the Obs.
|
||||
|
||||
|
@ -249,7 +248,7 @@ class Obs:
|
|||
|
||||
e_gamma[e_name] /= div[:w_max]
|
||||
|
||||
if np.abs(e_gamma[e_name][0]) < 10 * np.finfo(float).tiny: # Prevent division by zero
|
||||
if np.abs(e_gamma[e_name][0]) < 10 * np.finfo(float).tiny: # Prevent division by zero
|
||||
self.e_tauint[e_name] = 0.5
|
||||
self.e_dtauint[e_name] = 0.0
|
||||
self.e_dvalue[e_name] = 0.0
|
||||
|
@ -262,16 +261,13 @@ class Obs:
|
|||
# Make sure no entry of tauint is smaller than 0.5
|
||||
self.e_n_tauint[e_name][self.e_n_tauint[e_name] < 0.5] = 0.500000000001
|
||||
# hep-lat/0306017 eq. (42)
|
||||
self.e_n_dtauint[e_name] = self.e_n_tauint[e_name] * 2 * np.sqrt(np.abs(np.arange(w_max)
|
||||
+ 0.5 - self.e_n_tauint[e_name]) / e_N)
|
||||
self.e_n_dtauint[e_name] = self.e_n_tauint[e_name] * 2 * np.sqrt(np.abs(np.arange(w_max) + 0.5 - self.e_n_tauint[e_name]) / e_N)
|
||||
self.e_n_dtauint[e_name][0] = 0.0
|
||||
|
||||
|
||||
def _compute_drho(i):
|
||||
tmp = self.e_rho[e_name][i+1:w_max] + np.concatenate([self.e_rho[e_name][i-1::-1], self.e_rho[e_name][1:w_max - 2 * i]]) - 2 * self.e_rho[e_name][i] * self.e_rho[e_name][1:w_max - i]
|
||||
tmp = self.e_rho[e_name][i + 1:w_max] + np.concatenate([self.e_rho[e_name][i - 1::-1], self.e_rho[e_name][1:w_max - 2 * i]]) - 2 * self.e_rho[e_name][i] * self.e_rho[e_name][1:w_max - i]
|
||||
self.e_drho[e_name][i] = np.sqrt(np.sum(tmp ** 2) / e_N)
|
||||
|
||||
|
||||
_compute_drho(1)
|
||||
if self.tau_exp[e_name] > 0:
|
||||
# Critical slowing down analysis
|
||||
|
@ -279,10 +275,9 @@ class Obs:
|
|||
_compute_drho(n + 1)
|
||||
if (self.e_rho[e_name][n] - self.N_sigma * self.e_drho[e_name][n]) < 0 or n >= w_max // 2 - 2:
|
||||
# Bias correction hep-lat/0306017 eq. (49) included
|
||||
self.e_tauint[e_name] = self.e_n_tauint[e_name][n] * (1 + (2 * n + 1) / e_N) / (1 + 1 / e_N) + self.tau_exp[e_name] * np.abs(self.e_rho[e_name][n + 1])
|
||||
# The absolute makes sure, that the tail contribution is always positive
|
||||
self.e_tauint[e_name] = self.e_n_tauint[e_name][n] * (1 + (2 * n + 1) / e_N) / (1 + 1 / e_N) + self.tau_exp[e_name] * np.abs(self.e_rho[e_name][n + 1]) # The absolute makes sure, that the tail contribution is always positive
|
||||
self.e_dtauint[e_name] = np.sqrt(self.e_n_dtauint[e_name][n] ** 2 + self.tau_exp[e_name] ** 2 * self.e_drho[e_name][n + 1] ** 2)
|
||||
# Error of tau_exp neglected so far, missing term: self.e_rho[e_name][n + 1] ** 2 * d_tau_exp ** 2
|
||||
# Error of tau_exp neglected so far, missing term: self.e_rho[e_name][n + 1] ** 2 * d_tau_exp ** 2
|
||||
self.e_dvalue[e_name] = np.sqrt(2 * self.e_tauint[e_name] * e_gamma[e_name][0] * (1 + 1 / e_N) / e_N)
|
||||
self.e_ddvalue[e_name] = self.e_dvalue[e_name] * np.sqrt((n + 0.5) / e_N)
|
||||
self.e_windowsize[e_name] = n
|
||||
|
@ -295,7 +290,7 @@ class Obs:
|
|||
if n < w_max // 2 - 2:
|
||||
_compute_drho(n + 1)
|
||||
if g_w[n - 1] < 0 or n >= w_max - 1:
|
||||
self.e_tauint[e_name] = self.e_n_tauint[e_name][n] * (1 + (2 * n + 1) / e_N) / (1 + 1 / e_N) # Bias correction hep-lat/0306017 eq. (49)
|
||||
self.e_tauint[e_name] = self.e_n_tauint[e_name][n] * (1 + (2 * n + 1) / e_N) / (1 + 1 / e_N) # Bias correction hep-lat/0306017 eq. (49)
|
||||
self.e_dtauint[e_name] = self.e_n_dtauint[e_name][n]
|
||||
self.e_dvalue[e_name] = np.sqrt(2 * self.e_tauint[e_name] * e_gamma[e_name][0] * (1 + 1 / e_N) / e_N)
|
||||
self.e_ddvalue[e_name] = self.e_dvalue[e_name] * np.sqrt((n + 0.5) / e_N)
|
||||
|
@ -325,7 +320,6 @@ class Obs:
|
|||
self.ddvalue = np.sqrt(self.ddvalue) / self.dvalue
|
||||
return 0
|
||||
|
||||
|
||||
def print(self, level=1):
|
||||
"""Print basic properties of the Obs."""
|
||||
if level == 0:
|
||||
|
@ -338,7 +332,7 @@ class Obs:
|
|||
if len(self.e_names) > 1:
|
||||
print('', e_name, '\t %3.8e +/- %3.8e' % (self.e_dvalue[e_name], self.e_ddvalue[e_name]))
|
||||
if self.tau_exp[e_name] > 0:
|
||||
print(' t_int\t %3.8e +/- %3.8e tau_exp = %3.2f, N_sigma = %1.0i' % (self.e_tauint[e_name], self.e_dtauint[e_name], self.tau_exp[e_name], self.N_sigma))
|
||||
print(' t_int\t %3.8e +/- %3.8e tau_exp = %3.2f, N_sigma = %1.0i' % (self.e_tauint[e_name], self.e_dtauint[e_name], self.tau_exp[e_name], self.N_sigma))
|
||||
else:
|
||||
print(' t_int\t %3.8e +/- %3.8e S = %3.2f' % (self.e_tauint[e_name], self.e_dtauint[e_name], self.S[e_name]))
|
||||
if level > 1:
|
||||
|
@ -346,7 +340,6 @@ class Obs:
|
|||
for e_name in self.e_names:
|
||||
print(e_name, ':', self.e_content[e_name])
|
||||
|
||||
|
||||
def plot_tauint(self, save=None):
|
||||
"""Plot integrated autocorrelation time for each ensemble."""
|
||||
if not self.e_names:
|
||||
|
@ -360,15 +353,15 @@ class Obs:
|
|||
if self.tau_exp[e_name] > 0:
|
||||
base = self.e_n_tauint[e_name][self.e_windowsize[e_name]]
|
||||
x_help = np.arange(2 * self.tau_exp[e_name])
|
||||
y_help = (x_help + 1) * np.abs(self.e_rho[e_name][self.e_windowsize[e_name]+1]) * (1 - x_help / (2 * (2 * self.tau_exp[e_name] - 1))) + base
|
||||
y_help = (x_help + 1) * np.abs(self.e_rho[e_name][self.e_windowsize[e_name] + 1]) * (1 - x_help / (2 * (2 * self.tau_exp[e_name] - 1))) + base
|
||||
x_arr = np.arange(self.e_windowsize[e_name] + 1, self.e_windowsize[e_name] + 1 + 2 * self.tau_exp[e_name])
|
||||
plt.plot(x_arr, y_help, 'C' + str(e), linewidth=1, ls='--', marker=',')
|
||||
plt.errorbar([self.e_windowsize[e_name] + 2 * self.tau_exp[e_name]], [self.e_tauint[e_name]],
|
||||
yerr=[self.e_dtauint[e_name]], fmt='C' + str(e), linewidth=1, capsize=2, marker='o', mfc=plt.rcParams['axes.facecolor'])
|
||||
xmax = self.e_windowsize[e_name] + 2 * self.tau_exp[e_name] + 1.5
|
||||
label = e_name + r', $\tau_\mathrm{exp}$='+str(np.around(self.tau_exp[e_name], decimals=2))
|
||||
label = e_name + r', $\tau_\mathrm{exp}$=' + str(np.around(self.tau_exp[e_name], decimals=2))
|
||||
else:
|
||||
label = e_name + ', S=' +str(np.around(self.S[e_name], decimals=2))
|
||||
label = e_name + ', S=' + str(np.around(self.S[e_name], decimals=2))
|
||||
xmax = max(10.5, 2 * self.e_windowsize[e_name] - 0.5)
|
||||
|
||||
plt.errorbar(np.arange(length), self.e_n_tauint[e_name][:], yerr=self.e_n_dtauint[e_name][:], linewidth=1, capsize=2, label=label)
|
||||
|
@ -380,7 +373,6 @@ class Obs:
|
|||
if save:
|
||||
fig.savefig(save)
|
||||
|
||||
|
||||
def plot_rho(self):
|
||||
"""Plot normalized autocorrelation function time for each ensemble."""
|
||||
if not self.e_names:
|
||||
|
@ -395,7 +387,7 @@ class Obs:
|
|||
plt.plot([self.e_windowsize[e_name] + 1, self.e_windowsize[e_name] + 1 + 2 * self.tau_exp[e_name]],
|
||||
[self.e_rho[e_name][self.e_windowsize[e_name] + 1], 0], 'k-', lw=1)
|
||||
xmax = self.e_windowsize[e_name] + 2 * self.tau_exp[e_name] + 1.5
|
||||
plt.title('Rho ' + e_name + ', tau\_exp=' + str(np.around(self.tau_exp[e_name], decimals=2)))
|
||||
plt.title('Rho ' + e_name + r', tau\_exp=' + str(np.around(self.tau_exp[e_name], decimals=2)))
|
||||
else:
|
||||
xmax = max(10.5, 2 * self.e_windowsize[e_name] - 0.5)
|
||||
plt.title('Rho ' + e_name + ', S=' + str(np.around(self.S[e_name], decimals=2)))
|
||||
|
@ -403,7 +395,6 @@ class Obs:
|
|||
plt.xlim(-0.5, xmax)
|
||||
plt.draw()
|
||||
|
||||
|
||||
def plot_rep_dist(self):
|
||||
"""Plot replica distribution for each ensemble with more than one replicum."""
|
||||
if not self.e_names:
|
||||
|
@ -423,33 +414,30 @@ class Obs:
|
|||
for r, r_name in enumerate(self.e_content[e_name]):
|
||||
arr[r] = (self.r_values[r_name] - sub_r_mean) / (self.e_dvalue[e_name] * np.sqrt(e_N / self.shape[r_name] - 1))
|
||||
plt.hist(arr, rwidth=0.8, bins=len(self.e_content[e_name]))
|
||||
plt.title('Replica distribution' + e_name + ' (mean=0, var=1), Q='+str(np.around(self.e_Q[e_name], decimals=2)))
|
||||
plt.title('Replica distribution' + e_name + ' (mean=0, var=1), Q=' + str(np.around(self.e_Q[e_name], decimals=2)))
|
||||
plt.show()
|
||||
|
||||
|
||||
def plot_history(self):
|
||||
"""Plot derived Monte Carlo history for each ensemble."""
|
||||
if not self.e_names:
|
||||
raise Exception('Run the gamma method first.')
|
||||
|
||||
for e, e_name in enumerate(self.e_names):
|
||||
f = plt.figure()
|
||||
plt.figure()
|
||||
r_length = []
|
||||
sub_r_mean = 0
|
||||
for r, r_name in enumerate(self.e_content[e_name]):
|
||||
r_length.append(len(self.deltas[r_name]))
|
||||
e_N = np.sum(r_length)
|
||||
x = np.arange(e_N)
|
||||
tmp = []
|
||||
for r, r_name in enumerate(self.e_content[e_name]):
|
||||
tmp.append(self.deltas[r_name]+self.r_values[r_name])
|
||||
tmp.append(self.deltas[r_name] + self.r_values[r_name])
|
||||
y = np.concatenate(tmp, axis=0)
|
||||
plt.errorbar(x, y, fmt='.', markersize=3)
|
||||
plt.xlim(-0.5, e_N - 0.5)
|
||||
plt.title(e_name)
|
||||
plt.show()
|
||||
|
||||
|
||||
def plot_piechart(self):
|
||||
"""Plot piechart which shows the fractional contribution of each
|
||||
ensemble to the error and returns a dictionary containing the fractions."""
|
||||
|
@ -480,19 +468,17 @@ class Obs:
|
|||
with open(file_name, 'wb') as fb:
|
||||
pickle.dump(self, fb)
|
||||
|
||||
|
||||
def __repr__(self):
|
||||
if self.dvalue == 0.0:
|
||||
return 'Obs['+str(self.value)+']'
|
||||
return 'Obs[' + str(self.value) + ']'
|
||||
fexp = np.floor(np.log10(self.dvalue))
|
||||
if fexp < 0.0:
|
||||
return 'Obs[{:{form}}({:2.0f})]'.format(self.value, self.dvalue * 10 ** (-fexp + 1), form='.'+str(-int(fexp) + 1) + 'f')
|
||||
return 'Obs[{:{form}}({:2.0f})]'.format(self.value, self.dvalue * 10 ** (-fexp + 1), form='.' + str(-int(fexp) + 1) + 'f')
|
||||
elif fexp == 0.0:
|
||||
return 'Obs[{:.1f}({:1.1f})]'.format(self.value, self.dvalue)
|
||||
else:
|
||||
return 'Obs[{:.0f}({:2.0f})]'.format(self.value, self.dvalue)
|
||||
|
||||
|
||||
# Overload comparisons
|
||||
def __lt__(self, other):
|
||||
return self.value < other
|
||||
|
@ -500,7 +486,6 @@ class Obs:
|
|||
def __gt__(self, other):
|
||||
return self.value > other
|
||||
|
||||
|
||||
# Overload math operations
|
||||
def __add__(self, y):
|
||||
if isinstance(y, Obs):
|
||||
|
@ -512,10 +497,10 @@ class Obs:
|
|||
return NotImplemented
|
||||
else:
|
||||
return derived_observable(lambda x, **kwargs: x[0] + y, [self], man_grad=[1])
|
||||
|
||||
def __radd__(self, y):
|
||||
return self + y
|
||||
|
||||
|
||||
def __mul__(self, y):
|
||||
if isinstance(y, Obs):
|
||||
return derived_observable(lambda x, **kwargs: x[0] * x[1], [self, y], man_grad=[y.value, self.value])
|
||||
|
@ -531,7 +516,6 @@ class Obs:
|
|||
def __rmul__(self, y):
|
||||
return self * y
|
||||
|
||||
|
||||
def __sub__(self, y):
|
||||
if isinstance(y, Obs):
|
||||
return derived_observable(lambda x, **kwargs: x[0] - x[1], [self, y], man_grad=[1, -1])
|
||||
|
@ -545,15 +529,12 @@ class Obs:
|
|||
else:
|
||||
return derived_observable(lambda x, **kwargs: x[0] - y, [self], man_grad=[1])
|
||||
|
||||
|
||||
def __rsub__(self, y):
|
||||
return -1 * (self - y)
|
||||
|
||||
|
||||
def __neg__(self):
|
||||
return -1 * self
|
||||
|
||||
|
||||
def __truediv__(self, y):
|
||||
if isinstance(y, Obs):
|
||||
return derived_observable(lambda x, **kwargs: x[0] / x[1], [self, y], man_grad=[1 / y.value, - self.value / y.value ** 2])
|
||||
|
@ -567,7 +548,6 @@ class Obs:
|
|||
else:
|
||||
return derived_observable(lambda x, **kwargs: x[0] / y, [self], man_grad=[1 / y])
|
||||
|
||||
|
||||
def __rtruediv__(self, y):
|
||||
if isinstance(y, Obs):
|
||||
return derived_observable(lambda x, **kwargs: x[0] / x[1], [y, self], man_grad=[1 / self.value, - y.value / self.value ** 2])
|
||||
|
@ -577,86 +557,67 @@ class Obs:
|
|||
else:
|
||||
return derived_observable(lambda x, **kwargs: y / x[0], [self], man_grad=[-y / self.value ** 2])
|
||||
|
||||
|
||||
def __pow__(self, y):
|
||||
if isinstance(y, Obs):
|
||||
return derived_observable(lambda x: x[0] ** x[1], [self, y])
|
||||
else:
|
||||
return derived_observable(lambda x: x[0] ** y, [self])
|
||||
|
||||
|
||||
def __rpow__(self, y):
|
||||
if isinstance(y, Obs):
|
||||
return derived_observable(lambda x: x[0] ** x[1], [y, self])
|
||||
else:
|
||||
return derived_observable(lambda x: y ** x[0], [self])
|
||||
|
||||
|
||||
def __abs__(self):
|
||||
return derived_observable(lambda x: anp.abs(x[0]), [self])
|
||||
|
||||
|
||||
# Overload numpy functions
|
||||
def sqrt(self):
|
||||
return derived_observable(lambda x, **kwargs: np.sqrt(x[0]), [self], man_grad=[1 / 2 / np.sqrt(self.value)])
|
||||
|
||||
|
||||
def log(self):
|
||||
return derived_observable(lambda x, **kwargs: np.log(x[0]), [self], man_grad=[1 / self.value])
|
||||
|
||||
|
||||
def exp(self):
|
||||
return derived_observable(lambda x, **kwargs: np.exp(x[0]), [self], man_grad=[np.exp(self.value)])
|
||||
|
||||
|
||||
def sin(self):
|
||||
return derived_observable(lambda x, **kwargs: np.sin(x[0]), [self], man_grad=[np.cos(self.value)])
|
||||
|
||||
|
||||
def cos(self):
|
||||
return derived_observable(lambda x, **kwargs: np.cos(x[0]), [self], man_grad=[-np.sin(self.value)])
|
||||
|
||||
|
||||
def tan(self):
|
||||
return derived_observable(lambda x, **kwargs: np.tan(x[0]), [self], man_grad=[1 / np.cos(self.value) ** 2])
|
||||
|
||||
|
||||
def arcsin(self):
|
||||
return derived_observable(lambda x: anp.arcsin(x[0]), [self])
|
||||
|
||||
|
||||
def arccos(self):
|
||||
return derived_observable(lambda x: anp.arccos(x[0]), [self])
|
||||
|
||||
|
||||
def arctan(self):
|
||||
return derived_observable(lambda x: anp.arctan(x[0]), [self])
|
||||
|
||||
|
||||
def sinh(self):
|
||||
return derived_observable(lambda x, **kwargs: np.sinh(x[0]), [self], man_grad=[np.cosh(self.value)])
|
||||
|
||||
|
||||
def cosh(self):
|
||||
return derived_observable(lambda x, **kwargs: np.cosh(x[0]), [self], man_grad=[np.sinh(self.value)])
|
||||
|
||||
|
||||
def tanh(self):
|
||||
return derived_observable(lambda x, **kwargs: np.tanh(x[0]), [self], man_grad=[1 / np.cosh(self.value) ** 2])
|
||||
|
||||
|
||||
def arcsinh(self):
|
||||
return derived_observable(lambda x: anp.arcsinh(x[0]), [self])
|
||||
|
||||
|
||||
def arccosh(self):
|
||||
return derived_observable(lambda x: anp.arccosh(x[0]), [self])
|
||||
|
||||
|
||||
def arctanh(self):
|
||||
return derived_observable(lambda x: anp.arctanh(x[0]), [self])
|
||||
|
||||
|
||||
def sinc(self):
|
||||
return derived_observable(lambda x: anp.sinc(x[0]), [self])
|
||||
|
||||
|
@ -751,7 +712,7 @@ def derived_observable(func, data, **kwargs):
|
|||
kwarg = kwargs.get(key)
|
||||
if kwarg is not None:
|
||||
options[key] = kwarg
|
||||
tmp_df = nd.Gradient(func, order=4, **{k:v for k, v in options.items() if v is not None})(values, **kwargs)
|
||||
tmp_df = nd.Gradient(func, order=4, **{k: v for k, v in options.items() if v is not None})(values, **kwargs)
|
||||
if tmp_df.size == 1:
|
||||
deriv = np.array([tmp_df.real])
|
||||
else:
|
||||
|
@ -933,8 +894,7 @@ def covariance2(obs1, obs2, correlation=False, **kwargs):
|
|||
max_gamma = min(obs1.shape[r_name], w_max)
|
||||
# The padding for the fft has to be even
|
||||
padding = obs1.shape[r_name] + max_gamma + (obs1.shape[r_name] + max_gamma) % 2
|
||||
e_gamma[e_name][:max_gamma] += (np.fft.irfft(np.fft.rfft(obs1.deltas[r_name], padding) * np.conjugate(np.fft.rfft(obs2.deltas[r_name], padding)))[:max_gamma]
|
||||
+ np.fft.irfft(np.fft.rfft(obs2.deltas[r_name], padding) * np.conjugate(np.fft.rfft(obs1.deltas[r_name], padding)))[:max_gamma]) / 2.0
|
||||
e_gamma[e_name][:max_gamma] += (np.fft.irfft(np.fft.rfft(obs1.deltas[r_name], padding) * np.conjugate(np.fft.rfft(obs2.deltas[r_name], padding)))[:max_gamma] + np.fft.irfft(np.fft.rfft(obs2.deltas[r_name], padding) * np.conjugate(np.fft.rfft(obs1.deltas[r_name], padding)))[:max_gamma]) / 2.0
|
||||
|
||||
if np.all(e_gamma[e_name]) == 0.0:
|
||||
continue
|
||||
|
@ -964,7 +924,6 @@ def covariance2(obs1, obs2, correlation=False, **kwargs):
|
|||
# Make sure no entry of tauint is smaller than 0.5
|
||||
e_n_tauint[e_name][e_n_tauint[e_name] < 0.5] = 0.500000000001
|
||||
|
||||
|
||||
window = max(obs1.e_windowsize[e_name], obs2.e_windowsize[e_name])
|
||||
# Bias correction hep-lat/0306017 eq. (49)
|
||||
e_dvalue[e_name] = 2 * (e_n_tauint[e_name][window] + obs1.tau_exp[e_name] * np.abs(e_rho[e_name][window + 1])) * (1 + (2 * window + 1) / e_N) * e_gamma[e_name][0] / e_N
|
||||
|
@ -1105,7 +1064,6 @@ def load_object(path):
|
|||
with open(path, 'rb') as file:
|
||||
return pickle.load(file)
|
||||
|
||||
|
||||
def merge_obs(list_of_obs):
|
||||
"""Combine all observables in list_of_obs into one new observable
|
||||
|
||||
|
@ -1113,10 +1071,10 @@ def merge_obs(list_of_obs):
|
|||
"""
|
||||
replist = [item for obs in list_of_obs for item in obs.names]
|
||||
if (len(replist) == len(set(replist))) is False:
|
||||
raise Exception('list_of_obs contains duplicate replica: %s' %(str(replist)))
|
||||
raise Exception('list_of_obs contains duplicate replica: %s' % (str(replist)))
|
||||
new_dict = {}
|
||||
for o in list_of_obs:
|
||||
new_dict.update({key: o.deltas.get(key, 0) + o.r_values.get(key, 0)
|
||||
for key in set(o.deltas) | set(o.r_values)})
|
||||
for key in set(o.deltas) | set(o.r_values)})
|
||||
|
||||
return Obs(list(new_dict.values()), list(new_dict.keys()))
|
||||
|
|
|
@ -3,7 +3,8 @@
|
|||
|
||||
import scipy.optimize
|
||||
from autograd import jacobian
|
||||
from .pyerrors import Obs, derived_observable, pseudo_Obs
|
||||
from .pyerrors import derived_observable, pseudo_Obs
|
||||
|
||||
|
||||
def find_root(d, func, guess=1.0, **kwargs):
|
||||
"""Finds the root of the function func(x, d) where d is an Obs.
|
||||
|
@ -18,7 +19,7 @@ def find_root(d, func, guess=1.0, **kwargs):
|
|||
|
||||
# Error propagation as detailed in arXiv:1809.01289
|
||||
dx = jacobian(func)(root[0], d.value)
|
||||
da = jacobian(lambda u, v : func(v, u))(d.value, root[0])
|
||||
da = jacobian(lambda u, v: func(v, u))(d.value, root[0])
|
||||
deriv = - da / dx
|
||||
|
||||
return derived_observable(lambda x, **kwargs: x[0], [pseudo_Obs(root, 0.0, d.names[0], d.shape[d.names[0]]), d], man_grad=[0, deriv])
|
||||
|
|
Loading…
Add table
Reference in a new issue