mirror of
https://github.com/fjosw/pyerrors.git
synced 2025-05-14 19:43:41 +02:00
flake8 style changes
This commit is contained in:
parent
43f85efff5
commit
57c6a07801
9 changed files with 87 additions and 128 deletions
|
@ -293,7 +293,7 @@ def odr_fit(x, y, func, silent=False, **kwargs):
|
||||||
|
|
||||||
old_jac = jacobian(func)(output.beta, output.xplus)
|
old_jac = jacobian(func)(output.beta, output.xplus)
|
||||||
fused_row1 = np.concatenate((old_jac, np.concatenate((number_of_x_parameters * [np.zeros(old_jac.shape)]), axis=0)))
|
fused_row1 = np.concatenate((old_jac, np.concatenate((number_of_x_parameters * [np.zeros(old_jac.shape)]), axis=0)))
|
||||||
fused_row2 = np.concatenate((jacobian(lambda x, y : func(y, x))(output.xplus, output.beta).reshape(x_f.shape[-1], x_f.shape[-1] * number_of_x_parameters), np.identity(number_of_x_parameters * old_jac.shape[0])))
|
fused_row2 = np.concatenate((jacobian(lambda x, y: func(y, x))(output.xplus, output.beta).reshape(x_f.shape[-1], x_f.shape[-1] * number_of_x_parameters), np.identity(number_of_x_parameters * old_jac.shape[0])))
|
||||||
new_jac = np.concatenate((fused_row1, fused_row2), axis=1)
|
new_jac = np.concatenate((fused_row1, fused_row2), axis=1)
|
||||||
|
|
||||||
A = W @ new_jac
|
A = W @ new_jac
|
||||||
|
@ -551,7 +551,7 @@ def qqplot(x, o_y, func, p):
|
||||||
my_y = [o.value for o in residuals]
|
my_y = [o.value for o in residuals]
|
||||||
probplot = scipy.stats.probplot(my_y)
|
probplot = scipy.stats.probplot(my_y)
|
||||||
my_x = probplot[0][0]
|
my_x = probplot[0][0]
|
||||||
fig = plt.figure(figsize=(8, 8 / 1.618))
|
plt.figure(figsize=(8, 8 / 1.618))
|
||||||
plt.errorbar(my_x, my_y, fmt='o')
|
plt.errorbar(my_x, my_y, fmt='o')
|
||||||
fit_start = my_x[0]
|
fit_start = my_x[0]
|
||||||
fit_stop = my_x[-1]
|
fit_stop = my_x[-1]
|
||||||
|
|
|
@ -237,7 +237,7 @@ def write_ADerrors(obs_list, file_path, bdio_path='./libbdio.so', **kwargs):
|
||||||
for r_name in obs.e_content[e_name]:
|
for r_name in obs.e_content[e_name]:
|
||||||
r_length.append(len(obs.deltas[r_name]))
|
r_length.append(len(obs.deltas[r_name]))
|
||||||
|
|
||||||
#e_N = np.sum(r_length)
|
# e_N = np.sum(r_length)
|
||||||
nt.append(max(r_length) // 2)
|
nt.append(max(r_length) // 2)
|
||||||
print('nt', nt)
|
print('nt', nt)
|
||||||
zero = neid * [0.0]
|
zero = neid * [0.0]
|
||||||
|
|
|
@ -7,6 +7,7 @@ import numpy as np
|
||||||
from ..pyerrors import Obs
|
from ..pyerrors import Obs
|
||||||
from ..correlators import Corr
|
from ..correlators import Corr
|
||||||
|
|
||||||
|
|
||||||
def read_meson_hd5(path, filestem, ens_id, meson='meson_0', tree='meson'):
|
def read_meson_hd5(path, filestem, ens_id, meson='meson_0', tree='meson'):
|
||||||
"""Read hadrons meson hdf5 file and extract the meson labeled 'meson'
|
"""Read hadrons meson hdf5 file and extract the meson labeled 'meson'
|
||||||
|
|
||||||
|
@ -24,23 +25,23 @@ def read_meson_hd5(path, filestem, ens_id, meson='meson_0', tree='meson'):
|
||||||
|
|
||||||
# Clean up file list
|
# Clean up file list
|
||||||
files = []
|
files = []
|
||||||
for l in ls:
|
for line in ls:
|
||||||
if l.startswith(filestem):
|
if line.startswith(filestem):
|
||||||
files.append(l)
|
files.append(line)
|
||||||
|
|
||||||
if not files:
|
if not files:
|
||||||
raise Exception('No files starting with', filestem, 'in folder', path)
|
raise Exception('No files starting with', filestem, 'in folder', path)
|
||||||
|
|
||||||
# Sort according to configuration number
|
# Sort according to configuration number
|
||||||
get_cnfg_number = lambda x : int(x[len(filestem) + 1:-3])
|
get_cnfg_number = lambda x: int(x[len(filestem) + 1:-3])
|
||||||
files.sort(key=get_cnfg_number)
|
files.sort(key=get_cnfg_number)
|
||||||
|
|
||||||
# Check that configurations are evenly spaced
|
# Check that configurations are evenly spaced
|
||||||
cnfg_numbers = []
|
cnfg_numbers = []
|
||||||
for l in files:
|
for line in files:
|
||||||
cnfg_numbers.append(get_cnfg_number(l))
|
cnfg_numbers.append(get_cnfg_number(line))
|
||||||
|
|
||||||
if not all(np.diff(cnfg_numbers)==np.diff(cnfg_numbers)[0]):
|
if not all(np.diff(cnfg_numbers) == np.diff(cnfg_numbers)[0]):
|
||||||
raise Exception('Configurations are not evenly spaced.')
|
raise Exception('Configurations are not evenly spaced.')
|
||||||
|
|
||||||
corr_data = []
|
corr_data = []
|
||||||
|
|
|
@ -22,10 +22,9 @@ class Jack:
|
||||||
self.jacks = jacks
|
self.jacks = jacks
|
||||||
self.N = list(map(np.size, self.jacks))
|
self.N = list(map(np.size, self.jacks))
|
||||||
self.max_binsize = len(self.N)
|
self.max_binsize = len(self.N)
|
||||||
self.value = value #list(map(np.mean, self.jacks))
|
self.value = value # list(map(np.mean, self.jacks))
|
||||||
self.dvalue = list(map(_jack_error, self.jacks))
|
self.dvalue = list(map(_jack_error, self.jacks))
|
||||||
|
|
||||||
|
|
||||||
def print(self, **kwargs):
|
def print(self, **kwargs):
|
||||||
"""Print basic properties of the Jack."""
|
"""Print basic properties of the Jack."""
|
||||||
|
|
||||||
|
@ -42,19 +41,16 @@ class Jack:
|
||||||
|
|
||||||
print('Result:\t %3.8e +/- %3.8e +/- %3.8e (%3.3f%%)' % (self.value, self.dvalue[b], self.dvalue[b] * np.sqrt(2 * b / self.N[0]), np.abs(self.dvalue[b] / self.value * 100)))
|
print('Result:\t %3.8e +/- %3.8e +/- %3.8e (%3.3f%%)' % (self.value, self.dvalue[b], self.dvalue[b] * np.sqrt(2 * b / self.N[0]), np.abs(self.dvalue[b] / self.value * 100)))
|
||||||
|
|
||||||
|
|
||||||
def plot_tauint(self):
|
def plot_tauint(self):
|
||||||
plt.xlabel('binsize')
|
plt.xlabel('binsize')
|
||||||
plt.ylabel('tauint')
|
plt.ylabel('tauint')
|
||||||
length = self.max_binsize
|
length = self.max_binsize
|
||||||
x = np.arange(length) + 1
|
x = np.arange(length) + 1
|
||||||
plt.errorbar(x[:], (self.dvalue[:] / self.dvalue[0]) ** 2 / 2, yerr=np.sqrt(((2 * (self.dvalue[:] / self.dvalue[0]) ** 2 * np.sqrt(2 * x[:] / self.N[0])) / 2) ** 2
|
plt.errorbar(x[:], (self.dvalue[:] / self.dvalue[0]) ** 2 / 2, yerr=np.sqrt(((2 * (self.dvalue[:] / self.dvalue[0]) ** 2 * np.sqrt(2 * x[:] / self.N[0])) / 2) ** 2 + ((2 * (self.dvalue[:] / self.dvalue[0]) ** 2 * np.sqrt(2 / self.N[0])) / 2) ** 2), linewidth=1, capsize=2)
|
||||||
+ ((2 * (self.dvalue[:] / self.dvalue[0]) ** 2 * np.sqrt(2 / self.N[0])) / 2) ** 2), linewidth=1, capsize=2)
|
|
||||||
plt.xlim(0.5, length + 0.5)
|
plt.xlim(0.5, length + 0.5)
|
||||||
plt.title('Tauint')
|
plt.title('Tauint')
|
||||||
plt.show()
|
plt.show()
|
||||||
|
|
||||||
|
|
||||||
def plot_history(self):
|
def plot_history(self):
|
||||||
N = self.N
|
N = self.N
|
||||||
x = np.arange(N)
|
x = np.arange(N)
|
||||||
|
@ -97,7 +93,7 @@ def generate_jack(obs, **kwargs):
|
||||||
max_b = 1
|
max_b = 1
|
||||||
|
|
||||||
for b in range(max_b):
|
for b in range(max_b):
|
||||||
#binning if necessary
|
# binning if necessary
|
||||||
if b > 0:
|
if b > 0:
|
||||||
n = full_data.size // (b + 1)
|
n = full_data.size // (b + 1)
|
||||||
binned_data = np.zeros(n)
|
binned_data = np.zeros(n)
|
||||||
|
@ -108,10 +104,9 @@ def generate_jack(obs, **kwargs):
|
||||||
else:
|
else:
|
||||||
binned_data = full_data
|
binned_data = full_data
|
||||||
n = binned_data.size
|
n = binned_data.size
|
||||||
#generate jacks from data
|
# generate jacks from data
|
||||||
mean = np.mean(binned_data)
|
mean = np.mean(binned_data)
|
||||||
tmp_jacks = np.zeros(n)
|
tmp_jacks = np.zeros(n)
|
||||||
#print(binned_data)
|
|
||||||
for i in range(n):
|
for i in range(n):
|
||||||
tmp_jacks[i] = (n * mean - binned_data[i]) / (n - 1)
|
tmp_jacks[i] = (n * mean - binned_data[i]) / (n - 1)
|
||||||
jacks.append(tmp_jacks)
|
jacks.append(tmp_jacks)
|
||||||
|
|
|
@ -6,15 +6,17 @@ import autograd.numpy as anp # Thinly-wrapped numpy
|
||||||
from .pyerrors import derived_observable
|
from .pyerrors import derived_observable
|
||||||
|
|
||||||
|
|
||||||
### This code block is directly taken from the current master branch of autograd and remains
|
# This code block is directly taken from the current master branch of autograd and remains
|
||||||
# only until the new version is released on PyPi
|
# only until the new version is released on PyPi
|
||||||
from functools import partial
|
from functools import partial
|
||||||
from autograd.extend import defvjp
|
from autograd.extend import defvjp
|
||||||
|
|
||||||
_dot = partial(anp.einsum, '...ij,...jk->...ik')
|
_dot = partial(anp.einsum, '...ij,...jk->...ik')
|
||||||
# batched diag
|
# batched diag
|
||||||
_diag = lambda a: anp.eye(a.shape[-1])*a
|
_diag = lambda a: anp.eye(a.shape[-1]) * a
|
||||||
# batched diagonal, similar to matrix_diag in tensorflow
|
# batched diagonal, similar to matrix_diag in tensorflow
|
||||||
|
|
||||||
|
|
||||||
def _matrix_diag(a):
|
def _matrix_diag(a):
|
||||||
reps = anp.array(a.shape)
|
reps = anp.array(a.shape)
|
||||||
reps[:-1] = 1
|
reps[:-1] = 1
|
||||||
|
@ -24,14 +26,17 @@ def _matrix_diag(a):
|
||||||
|
|
||||||
# https://arxiv.org/pdf/1701.00392.pdf Eq(4.77)
|
# https://arxiv.org/pdf/1701.00392.pdf Eq(4.77)
|
||||||
# Note the formula from Sec3.1 in https://people.maths.ox.ac.uk/gilesm/files/NA-08-01.pdf is incomplete
|
# Note the formula from Sec3.1 in https://people.maths.ox.ac.uk/gilesm/files/NA-08-01.pdf is incomplete
|
||||||
|
|
||||||
|
|
||||||
def grad_eig(ans, x):
|
def grad_eig(ans, x):
|
||||||
"""Gradient of a general square (complex valued) matrix"""
|
"""Gradient of a general square (complex valued) matrix"""
|
||||||
e, u = ans # eigenvalues as 1d array, eigenvectors in columns
|
e, u = ans # eigenvalues as 1d array, eigenvectors in columns
|
||||||
n = e.shape[-1]
|
n = e.shape[-1]
|
||||||
|
|
||||||
def vjp(g):
|
def vjp(g):
|
||||||
ge, gu = g
|
ge, gu = g
|
||||||
ge = _matrix_diag(ge)
|
ge = _matrix_diag(ge)
|
||||||
f = 1/(e[..., anp.newaxis, :] - e[..., :, anp.newaxis] + 1.e-20)
|
f = 1 / (e[..., anp.newaxis, :] - e[..., :, anp.newaxis] + 1.e-20)
|
||||||
f -= _diag(f)
|
f -= _diag(f)
|
||||||
ut = anp.swapaxes(u, -1, -2)
|
ut = anp.swapaxes(u, -1, -2)
|
||||||
r1 = f * _dot(ut, gu)
|
r1 = f * _dot(ut, gu)
|
||||||
|
@ -43,8 +48,10 @@ def grad_eig(ans, x):
|
||||||
# but the derivative should be real in real input case when imaginary delta is forbidden
|
# but the derivative should be real in real input case when imaginary delta is forbidden
|
||||||
return r
|
return r
|
||||||
return vjp
|
return vjp
|
||||||
|
|
||||||
|
|
||||||
defvjp(anp.linalg.eig, grad_eig)
|
defvjp(anp.linalg.eig, grad_eig)
|
||||||
### End of the code block from autograd.master
|
# End of the code block from autograd.master
|
||||||
|
|
||||||
|
|
||||||
def scalar_mat_op(op, obs, **kwargs):
|
def scalar_mat_op(op, obs, **kwargs):
|
||||||
|
@ -214,7 +221,6 @@ def _num_diff_eigh(obs, **kwargs):
|
||||||
for i in range(dim):
|
for i in range(dim):
|
||||||
res_vec.append(derived_observable(_mat, raveled_obs, n=0, i=i, **kwargs))
|
res_vec.append(derived_observable(_mat, raveled_obs, n=0, i=i, **kwargs))
|
||||||
|
|
||||||
|
|
||||||
res_mat = []
|
res_mat = []
|
||||||
for i in range(dim):
|
for i in range(dim):
|
||||||
row = []
|
row = []
|
||||||
|
|
|
@ -81,4 +81,3 @@ def ks_test(obs=None):
|
||||||
plt.show()
|
plt.show()
|
||||||
|
|
||||||
print(scipy.stats.kstest(Qs, 'uniform'))
|
print(scipy.stats.kstest(Qs, 'uniform'))
|
||||||
|
|
||||||
|
|
|
@ -44,7 +44,7 @@ def matrix_pencil_method(corrs, k=1, p=None, **kwargs):
|
||||||
# Construct the hankel matrices
|
# Construct the hankel matrices
|
||||||
matrix = []
|
matrix = []
|
||||||
for n in range(data_sets):
|
for n in range(data_sets):
|
||||||
matrix.append(scipy.linalg.hankel(data[n][:n_data-p], data[n][n_data-p-1:]))
|
matrix.append(scipy.linalg.hankel(data[n][:n_data - p], data[n][n_data - p - 1:]))
|
||||||
matrix = np.array(matrix)
|
matrix = np.array(matrix)
|
||||||
# Construct y1 and y2
|
# Construct y1 and y2
|
||||||
y1 = np.concatenate(matrix[:, :, :p])
|
y1 = np.concatenate(matrix[:, :, :p])
|
||||||
|
@ -79,7 +79,7 @@ def matrix_pencil_method_old(data, p, noise_level=None, verbose=1, **kwargs):
|
||||||
if n_data <= p:
|
if n_data <= p:
|
||||||
raise Exception('The pencil p has to be smaller than the number of data samples.')
|
raise Exception('The pencil p has to be smaller than the number of data samples.')
|
||||||
|
|
||||||
matrix = scipy.linalg.hankel(data[:n_data-p], data[n_data-p-1:]) @ np.identity(p + 1)
|
matrix = scipy.linalg.hankel(data[:n_data - p], data[n_data - p - 1:]) @ np.identity(p + 1)
|
||||||
|
|
||||||
if noise_level is not None:
|
if noise_level is not None:
|
||||||
u, s, vh = svd(matrix)
|
u, s, vh = svd(matrix)
|
||||||
|
|
|
@ -91,7 +91,6 @@ class Obs:
|
||||||
self.e_n_tauint = {}
|
self.e_n_tauint = {}
|
||||||
self.e_n_dtauint = {}
|
self.e_n_dtauint = {}
|
||||||
|
|
||||||
|
|
||||||
def gamma_method(self, **kwargs):
|
def gamma_method(self, **kwargs):
|
||||||
"""Calculate the error and related properties of the Obs.
|
"""Calculate the error and related properties of the Obs.
|
||||||
|
|
||||||
|
@ -262,16 +261,13 @@ class Obs:
|
||||||
# Make sure no entry of tauint is smaller than 0.5
|
# Make sure no entry of tauint is smaller than 0.5
|
||||||
self.e_n_tauint[e_name][self.e_n_tauint[e_name] < 0.5] = 0.500000000001
|
self.e_n_tauint[e_name][self.e_n_tauint[e_name] < 0.5] = 0.500000000001
|
||||||
# hep-lat/0306017 eq. (42)
|
# hep-lat/0306017 eq. (42)
|
||||||
self.e_n_dtauint[e_name] = self.e_n_tauint[e_name] * 2 * np.sqrt(np.abs(np.arange(w_max)
|
self.e_n_dtauint[e_name] = self.e_n_tauint[e_name] * 2 * np.sqrt(np.abs(np.arange(w_max) + 0.5 - self.e_n_tauint[e_name]) / e_N)
|
||||||
+ 0.5 - self.e_n_tauint[e_name]) / e_N)
|
|
||||||
self.e_n_dtauint[e_name][0] = 0.0
|
self.e_n_dtauint[e_name][0] = 0.0
|
||||||
|
|
||||||
|
|
||||||
def _compute_drho(i):
|
def _compute_drho(i):
|
||||||
tmp = self.e_rho[e_name][i+1:w_max] + np.concatenate([self.e_rho[e_name][i-1::-1], self.e_rho[e_name][1:w_max - 2 * i]]) - 2 * self.e_rho[e_name][i] * self.e_rho[e_name][1:w_max - i]
|
tmp = self.e_rho[e_name][i + 1:w_max] + np.concatenate([self.e_rho[e_name][i - 1::-1], self.e_rho[e_name][1:w_max - 2 * i]]) - 2 * self.e_rho[e_name][i] * self.e_rho[e_name][1:w_max - i]
|
||||||
self.e_drho[e_name][i] = np.sqrt(np.sum(tmp ** 2) / e_N)
|
self.e_drho[e_name][i] = np.sqrt(np.sum(tmp ** 2) / e_N)
|
||||||
|
|
||||||
|
|
||||||
_compute_drho(1)
|
_compute_drho(1)
|
||||||
if self.tau_exp[e_name] > 0:
|
if self.tau_exp[e_name] > 0:
|
||||||
# Critical slowing down analysis
|
# Critical slowing down analysis
|
||||||
|
@ -279,8 +275,7 @@ class Obs:
|
||||||
_compute_drho(n + 1)
|
_compute_drho(n + 1)
|
||||||
if (self.e_rho[e_name][n] - self.N_sigma * self.e_drho[e_name][n]) < 0 or n >= w_max // 2 - 2:
|
if (self.e_rho[e_name][n] - self.N_sigma * self.e_drho[e_name][n]) < 0 or n >= w_max // 2 - 2:
|
||||||
# Bias correction hep-lat/0306017 eq. (49) included
|
# Bias correction hep-lat/0306017 eq. (49) included
|
||||||
self.e_tauint[e_name] = self.e_n_tauint[e_name][n] * (1 + (2 * n + 1) / e_N) / (1 + 1 / e_N) + self.tau_exp[e_name] * np.abs(self.e_rho[e_name][n + 1])
|
self.e_tauint[e_name] = self.e_n_tauint[e_name][n] * (1 + (2 * n + 1) / e_N) / (1 + 1 / e_N) + self.tau_exp[e_name] * np.abs(self.e_rho[e_name][n + 1]) # The absolute makes sure, that the tail contribution is always positive
|
||||||
# The absolute makes sure, that the tail contribution is always positive
|
|
||||||
self.e_dtauint[e_name] = np.sqrt(self.e_n_dtauint[e_name][n] ** 2 + self.tau_exp[e_name] ** 2 * self.e_drho[e_name][n + 1] ** 2)
|
self.e_dtauint[e_name] = np.sqrt(self.e_n_dtauint[e_name][n] ** 2 + self.tau_exp[e_name] ** 2 * self.e_drho[e_name][n + 1] ** 2)
|
||||||
# Error of tau_exp neglected so far, missing term: self.e_rho[e_name][n + 1] ** 2 * d_tau_exp ** 2
|
# Error of tau_exp neglected so far, missing term: self.e_rho[e_name][n + 1] ** 2 * d_tau_exp ** 2
|
||||||
self.e_dvalue[e_name] = np.sqrt(2 * self.e_tauint[e_name] * e_gamma[e_name][0] * (1 + 1 / e_N) / e_N)
|
self.e_dvalue[e_name] = np.sqrt(2 * self.e_tauint[e_name] * e_gamma[e_name][0] * (1 + 1 / e_N) / e_N)
|
||||||
|
@ -325,7 +320,6 @@ class Obs:
|
||||||
self.ddvalue = np.sqrt(self.ddvalue) / self.dvalue
|
self.ddvalue = np.sqrt(self.ddvalue) / self.dvalue
|
||||||
return 0
|
return 0
|
||||||
|
|
||||||
|
|
||||||
def print(self, level=1):
|
def print(self, level=1):
|
||||||
"""Print basic properties of the Obs."""
|
"""Print basic properties of the Obs."""
|
||||||
if level == 0:
|
if level == 0:
|
||||||
|
@ -346,7 +340,6 @@ class Obs:
|
||||||
for e_name in self.e_names:
|
for e_name in self.e_names:
|
||||||
print(e_name, ':', self.e_content[e_name])
|
print(e_name, ':', self.e_content[e_name])
|
||||||
|
|
||||||
|
|
||||||
def plot_tauint(self, save=None):
|
def plot_tauint(self, save=None):
|
||||||
"""Plot integrated autocorrelation time for each ensemble."""
|
"""Plot integrated autocorrelation time for each ensemble."""
|
||||||
if not self.e_names:
|
if not self.e_names:
|
||||||
|
@ -360,15 +353,15 @@ class Obs:
|
||||||
if self.tau_exp[e_name] > 0:
|
if self.tau_exp[e_name] > 0:
|
||||||
base = self.e_n_tauint[e_name][self.e_windowsize[e_name]]
|
base = self.e_n_tauint[e_name][self.e_windowsize[e_name]]
|
||||||
x_help = np.arange(2 * self.tau_exp[e_name])
|
x_help = np.arange(2 * self.tau_exp[e_name])
|
||||||
y_help = (x_help + 1) * np.abs(self.e_rho[e_name][self.e_windowsize[e_name]+1]) * (1 - x_help / (2 * (2 * self.tau_exp[e_name] - 1))) + base
|
y_help = (x_help + 1) * np.abs(self.e_rho[e_name][self.e_windowsize[e_name] + 1]) * (1 - x_help / (2 * (2 * self.tau_exp[e_name] - 1))) + base
|
||||||
x_arr = np.arange(self.e_windowsize[e_name] + 1, self.e_windowsize[e_name] + 1 + 2 * self.tau_exp[e_name])
|
x_arr = np.arange(self.e_windowsize[e_name] + 1, self.e_windowsize[e_name] + 1 + 2 * self.tau_exp[e_name])
|
||||||
plt.plot(x_arr, y_help, 'C' + str(e), linewidth=1, ls='--', marker=',')
|
plt.plot(x_arr, y_help, 'C' + str(e), linewidth=1, ls='--', marker=',')
|
||||||
plt.errorbar([self.e_windowsize[e_name] + 2 * self.tau_exp[e_name]], [self.e_tauint[e_name]],
|
plt.errorbar([self.e_windowsize[e_name] + 2 * self.tau_exp[e_name]], [self.e_tauint[e_name]],
|
||||||
yerr=[self.e_dtauint[e_name]], fmt='C' + str(e), linewidth=1, capsize=2, marker='o', mfc=plt.rcParams['axes.facecolor'])
|
yerr=[self.e_dtauint[e_name]], fmt='C' + str(e), linewidth=1, capsize=2, marker='o', mfc=plt.rcParams['axes.facecolor'])
|
||||||
xmax = self.e_windowsize[e_name] + 2 * self.tau_exp[e_name] + 1.5
|
xmax = self.e_windowsize[e_name] + 2 * self.tau_exp[e_name] + 1.5
|
||||||
label = e_name + r', $\tau_\mathrm{exp}$='+str(np.around(self.tau_exp[e_name], decimals=2))
|
label = e_name + r', $\tau_\mathrm{exp}$=' + str(np.around(self.tau_exp[e_name], decimals=2))
|
||||||
else:
|
else:
|
||||||
label = e_name + ', S=' +str(np.around(self.S[e_name], decimals=2))
|
label = e_name + ', S=' + str(np.around(self.S[e_name], decimals=2))
|
||||||
xmax = max(10.5, 2 * self.e_windowsize[e_name] - 0.5)
|
xmax = max(10.5, 2 * self.e_windowsize[e_name] - 0.5)
|
||||||
|
|
||||||
plt.errorbar(np.arange(length), self.e_n_tauint[e_name][:], yerr=self.e_n_dtauint[e_name][:], linewidth=1, capsize=2, label=label)
|
plt.errorbar(np.arange(length), self.e_n_tauint[e_name][:], yerr=self.e_n_dtauint[e_name][:], linewidth=1, capsize=2, label=label)
|
||||||
|
@ -380,7 +373,6 @@ class Obs:
|
||||||
if save:
|
if save:
|
||||||
fig.savefig(save)
|
fig.savefig(save)
|
||||||
|
|
||||||
|
|
||||||
def plot_rho(self):
|
def plot_rho(self):
|
||||||
"""Plot normalized autocorrelation function time for each ensemble."""
|
"""Plot normalized autocorrelation function time for each ensemble."""
|
||||||
if not self.e_names:
|
if not self.e_names:
|
||||||
|
@ -395,7 +387,7 @@ class Obs:
|
||||||
plt.plot([self.e_windowsize[e_name] + 1, self.e_windowsize[e_name] + 1 + 2 * self.tau_exp[e_name]],
|
plt.plot([self.e_windowsize[e_name] + 1, self.e_windowsize[e_name] + 1 + 2 * self.tau_exp[e_name]],
|
||||||
[self.e_rho[e_name][self.e_windowsize[e_name] + 1], 0], 'k-', lw=1)
|
[self.e_rho[e_name][self.e_windowsize[e_name] + 1], 0], 'k-', lw=1)
|
||||||
xmax = self.e_windowsize[e_name] + 2 * self.tau_exp[e_name] + 1.5
|
xmax = self.e_windowsize[e_name] + 2 * self.tau_exp[e_name] + 1.5
|
||||||
plt.title('Rho ' + e_name + ', tau\_exp=' + str(np.around(self.tau_exp[e_name], decimals=2)))
|
plt.title('Rho ' + e_name + r', tau\_exp=' + str(np.around(self.tau_exp[e_name], decimals=2)))
|
||||||
else:
|
else:
|
||||||
xmax = max(10.5, 2 * self.e_windowsize[e_name] - 0.5)
|
xmax = max(10.5, 2 * self.e_windowsize[e_name] - 0.5)
|
||||||
plt.title('Rho ' + e_name + ', S=' + str(np.around(self.S[e_name], decimals=2)))
|
plt.title('Rho ' + e_name + ', S=' + str(np.around(self.S[e_name], decimals=2)))
|
||||||
|
@ -403,7 +395,6 @@ class Obs:
|
||||||
plt.xlim(-0.5, xmax)
|
plt.xlim(-0.5, xmax)
|
||||||
plt.draw()
|
plt.draw()
|
||||||
|
|
||||||
|
|
||||||
def plot_rep_dist(self):
|
def plot_rep_dist(self):
|
||||||
"""Plot replica distribution for each ensemble with more than one replicum."""
|
"""Plot replica distribution for each ensemble with more than one replicum."""
|
||||||
if not self.e_names:
|
if not self.e_names:
|
||||||
|
@ -423,33 +414,30 @@ class Obs:
|
||||||
for r, r_name in enumerate(self.e_content[e_name]):
|
for r, r_name in enumerate(self.e_content[e_name]):
|
||||||
arr[r] = (self.r_values[r_name] - sub_r_mean) / (self.e_dvalue[e_name] * np.sqrt(e_N / self.shape[r_name] - 1))
|
arr[r] = (self.r_values[r_name] - sub_r_mean) / (self.e_dvalue[e_name] * np.sqrt(e_N / self.shape[r_name] - 1))
|
||||||
plt.hist(arr, rwidth=0.8, bins=len(self.e_content[e_name]))
|
plt.hist(arr, rwidth=0.8, bins=len(self.e_content[e_name]))
|
||||||
plt.title('Replica distribution' + e_name + ' (mean=0, var=1), Q='+str(np.around(self.e_Q[e_name], decimals=2)))
|
plt.title('Replica distribution' + e_name + ' (mean=0, var=1), Q=' + str(np.around(self.e_Q[e_name], decimals=2)))
|
||||||
plt.show()
|
plt.show()
|
||||||
|
|
||||||
|
|
||||||
def plot_history(self):
|
def plot_history(self):
|
||||||
"""Plot derived Monte Carlo history for each ensemble."""
|
"""Plot derived Monte Carlo history for each ensemble."""
|
||||||
if not self.e_names:
|
if not self.e_names:
|
||||||
raise Exception('Run the gamma method first.')
|
raise Exception('Run the gamma method first.')
|
||||||
|
|
||||||
for e, e_name in enumerate(self.e_names):
|
for e, e_name in enumerate(self.e_names):
|
||||||
f = plt.figure()
|
plt.figure()
|
||||||
r_length = []
|
r_length = []
|
||||||
sub_r_mean = 0
|
|
||||||
for r, r_name in enumerate(self.e_content[e_name]):
|
for r, r_name in enumerate(self.e_content[e_name]):
|
||||||
r_length.append(len(self.deltas[r_name]))
|
r_length.append(len(self.deltas[r_name]))
|
||||||
e_N = np.sum(r_length)
|
e_N = np.sum(r_length)
|
||||||
x = np.arange(e_N)
|
x = np.arange(e_N)
|
||||||
tmp = []
|
tmp = []
|
||||||
for r, r_name in enumerate(self.e_content[e_name]):
|
for r, r_name in enumerate(self.e_content[e_name]):
|
||||||
tmp.append(self.deltas[r_name]+self.r_values[r_name])
|
tmp.append(self.deltas[r_name] + self.r_values[r_name])
|
||||||
y = np.concatenate(tmp, axis=0)
|
y = np.concatenate(tmp, axis=0)
|
||||||
plt.errorbar(x, y, fmt='.', markersize=3)
|
plt.errorbar(x, y, fmt='.', markersize=3)
|
||||||
plt.xlim(-0.5, e_N - 0.5)
|
plt.xlim(-0.5, e_N - 0.5)
|
||||||
plt.title(e_name)
|
plt.title(e_name)
|
||||||
plt.show()
|
plt.show()
|
||||||
|
|
||||||
|
|
||||||
def plot_piechart(self):
|
def plot_piechart(self):
|
||||||
"""Plot piechart which shows the fractional contribution of each
|
"""Plot piechart which shows the fractional contribution of each
|
||||||
ensemble to the error and returns a dictionary containing the fractions."""
|
ensemble to the error and returns a dictionary containing the fractions."""
|
||||||
|
@ -480,19 +468,17 @@ class Obs:
|
||||||
with open(file_name, 'wb') as fb:
|
with open(file_name, 'wb') as fb:
|
||||||
pickle.dump(self, fb)
|
pickle.dump(self, fb)
|
||||||
|
|
||||||
|
|
||||||
def __repr__(self):
|
def __repr__(self):
|
||||||
if self.dvalue == 0.0:
|
if self.dvalue == 0.0:
|
||||||
return 'Obs['+str(self.value)+']'
|
return 'Obs[' + str(self.value) + ']'
|
||||||
fexp = np.floor(np.log10(self.dvalue))
|
fexp = np.floor(np.log10(self.dvalue))
|
||||||
if fexp < 0.0:
|
if fexp < 0.0:
|
||||||
return 'Obs[{:{form}}({:2.0f})]'.format(self.value, self.dvalue * 10 ** (-fexp + 1), form='.'+str(-int(fexp) + 1) + 'f')
|
return 'Obs[{:{form}}({:2.0f})]'.format(self.value, self.dvalue * 10 ** (-fexp + 1), form='.' + str(-int(fexp) + 1) + 'f')
|
||||||
elif fexp == 0.0:
|
elif fexp == 0.0:
|
||||||
return 'Obs[{:.1f}({:1.1f})]'.format(self.value, self.dvalue)
|
return 'Obs[{:.1f}({:1.1f})]'.format(self.value, self.dvalue)
|
||||||
else:
|
else:
|
||||||
return 'Obs[{:.0f}({:2.0f})]'.format(self.value, self.dvalue)
|
return 'Obs[{:.0f}({:2.0f})]'.format(self.value, self.dvalue)
|
||||||
|
|
||||||
|
|
||||||
# Overload comparisons
|
# Overload comparisons
|
||||||
def __lt__(self, other):
|
def __lt__(self, other):
|
||||||
return self.value < other
|
return self.value < other
|
||||||
|
@ -500,7 +486,6 @@ class Obs:
|
||||||
def __gt__(self, other):
|
def __gt__(self, other):
|
||||||
return self.value > other
|
return self.value > other
|
||||||
|
|
||||||
|
|
||||||
# Overload math operations
|
# Overload math operations
|
||||||
def __add__(self, y):
|
def __add__(self, y):
|
||||||
if isinstance(y, Obs):
|
if isinstance(y, Obs):
|
||||||
|
@ -512,10 +497,10 @@ class Obs:
|
||||||
return NotImplemented
|
return NotImplemented
|
||||||
else:
|
else:
|
||||||
return derived_observable(lambda x, **kwargs: x[0] + y, [self], man_grad=[1])
|
return derived_observable(lambda x, **kwargs: x[0] + y, [self], man_grad=[1])
|
||||||
|
|
||||||
def __radd__(self, y):
|
def __radd__(self, y):
|
||||||
return self + y
|
return self + y
|
||||||
|
|
||||||
|
|
||||||
def __mul__(self, y):
|
def __mul__(self, y):
|
||||||
if isinstance(y, Obs):
|
if isinstance(y, Obs):
|
||||||
return derived_observable(lambda x, **kwargs: x[0] * x[1], [self, y], man_grad=[y.value, self.value])
|
return derived_observable(lambda x, **kwargs: x[0] * x[1], [self, y], man_grad=[y.value, self.value])
|
||||||
|
@ -531,7 +516,6 @@ class Obs:
|
||||||
def __rmul__(self, y):
|
def __rmul__(self, y):
|
||||||
return self * y
|
return self * y
|
||||||
|
|
||||||
|
|
||||||
def __sub__(self, y):
|
def __sub__(self, y):
|
||||||
if isinstance(y, Obs):
|
if isinstance(y, Obs):
|
||||||
return derived_observable(lambda x, **kwargs: x[0] - x[1], [self, y], man_grad=[1, -1])
|
return derived_observable(lambda x, **kwargs: x[0] - x[1], [self, y], man_grad=[1, -1])
|
||||||
|
@ -545,15 +529,12 @@ class Obs:
|
||||||
else:
|
else:
|
||||||
return derived_observable(lambda x, **kwargs: x[0] - y, [self], man_grad=[1])
|
return derived_observable(lambda x, **kwargs: x[0] - y, [self], man_grad=[1])
|
||||||
|
|
||||||
|
|
||||||
def __rsub__(self, y):
|
def __rsub__(self, y):
|
||||||
return -1 * (self - y)
|
return -1 * (self - y)
|
||||||
|
|
||||||
|
|
||||||
def __neg__(self):
|
def __neg__(self):
|
||||||
return -1 * self
|
return -1 * self
|
||||||
|
|
||||||
|
|
||||||
def __truediv__(self, y):
|
def __truediv__(self, y):
|
||||||
if isinstance(y, Obs):
|
if isinstance(y, Obs):
|
||||||
return derived_observable(lambda x, **kwargs: x[0] / x[1], [self, y], man_grad=[1 / y.value, - self.value / y.value ** 2])
|
return derived_observable(lambda x, **kwargs: x[0] / x[1], [self, y], man_grad=[1 / y.value, - self.value / y.value ** 2])
|
||||||
|
@ -567,7 +548,6 @@ class Obs:
|
||||||
else:
|
else:
|
||||||
return derived_observable(lambda x, **kwargs: x[0] / y, [self], man_grad=[1 / y])
|
return derived_observable(lambda x, **kwargs: x[0] / y, [self], man_grad=[1 / y])
|
||||||
|
|
||||||
|
|
||||||
def __rtruediv__(self, y):
|
def __rtruediv__(self, y):
|
||||||
if isinstance(y, Obs):
|
if isinstance(y, Obs):
|
||||||
return derived_observable(lambda x, **kwargs: x[0] / x[1], [y, self], man_grad=[1 / self.value, - y.value / self.value ** 2])
|
return derived_observable(lambda x, **kwargs: x[0] / x[1], [y, self], man_grad=[1 / self.value, - y.value / self.value ** 2])
|
||||||
|
@ -577,86 +557,67 @@ class Obs:
|
||||||
else:
|
else:
|
||||||
return derived_observable(lambda x, **kwargs: y / x[0], [self], man_grad=[-y / self.value ** 2])
|
return derived_observable(lambda x, **kwargs: y / x[0], [self], man_grad=[-y / self.value ** 2])
|
||||||
|
|
||||||
|
|
||||||
def __pow__(self, y):
|
def __pow__(self, y):
|
||||||
if isinstance(y, Obs):
|
if isinstance(y, Obs):
|
||||||
return derived_observable(lambda x: x[0] ** x[1], [self, y])
|
return derived_observable(lambda x: x[0] ** x[1], [self, y])
|
||||||
else:
|
else:
|
||||||
return derived_observable(lambda x: x[0] ** y, [self])
|
return derived_observable(lambda x: x[0] ** y, [self])
|
||||||
|
|
||||||
|
|
||||||
def __rpow__(self, y):
|
def __rpow__(self, y):
|
||||||
if isinstance(y, Obs):
|
if isinstance(y, Obs):
|
||||||
return derived_observable(lambda x: x[0] ** x[1], [y, self])
|
return derived_observable(lambda x: x[0] ** x[1], [y, self])
|
||||||
else:
|
else:
|
||||||
return derived_observable(lambda x: y ** x[0], [self])
|
return derived_observable(lambda x: y ** x[0], [self])
|
||||||
|
|
||||||
|
|
||||||
def __abs__(self):
|
def __abs__(self):
|
||||||
return derived_observable(lambda x: anp.abs(x[0]), [self])
|
return derived_observable(lambda x: anp.abs(x[0]), [self])
|
||||||
|
|
||||||
|
|
||||||
# Overload numpy functions
|
# Overload numpy functions
|
||||||
def sqrt(self):
|
def sqrt(self):
|
||||||
return derived_observable(lambda x, **kwargs: np.sqrt(x[0]), [self], man_grad=[1 / 2 / np.sqrt(self.value)])
|
return derived_observable(lambda x, **kwargs: np.sqrt(x[0]), [self], man_grad=[1 / 2 / np.sqrt(self.value)])
|
||||||
|
|
||||||
|
|
||||||
def log(self):
|
def log(self):
|
||||||
return derived_observable(lambda x, **kwargs: np.log(x[0]), [self], man_grad=[1 / self.value])
|
return derived_observable(lambda x, **kwargs: np.log(x[0]), [self], man_grad=[1 / self.value])
|
||||||
|
|
||||||
|
|
||||||
def exp(self):
|
def exp(self):
|
||||||
return derived_observable(lambda x, **kwargs: np.exp(x[0]), [self], man_grad=[np.exp(self.value)])
|
return derived_observable(lambda x, **kwargs: np.exp(x[0]), [self], man_grad=[np.exp(self.value)])
|
||||||
|
|
||||||
|
|
||||||
def sin(self):
|
def sin(self):
|
||||||
return derived_observable(lambda x, **kwargs: np.sin(x[0]), [self], man_grad=[np.cos(self.value)])
|
return derived_observable(lambda x, **kwargs: np.sin(x[0]), [self], man_grad=[np.cos(self.value)])
|
||||||
|
|
||||||
|
|
||||||
def cos(self):
|
def cos(self):
|
||||||
return derived_observable(lambda x, **kwargs: np.cos(x[0]), [self], man_grad=[-np.sin(self.value)])
|
return derived_observable(lambda x, **kwargs: np.cos(x[0]), [self], man_grad=[-np.sin(self.value)])
|
||||||
|
|
||||||
|
|
||||||
def tan(self):
|
def tan(self):
|
||||||
return derived_observable(lambda x, **kwargs: np.tan(x[0]), [self], man_grad=[1 / np.cos(self.value) ** 2])
|
return derived_observable(lambda x, **kwargs: np.tan(x[0]), [self], man_grad=[1 / np.cos(self.value) ** 2])
|
||||||
|
|
||||||
|
|
||||||
def arcsin(self):
|
def arcsin(self):
|
||||||
return derived_observable(lambda x: anp.arcsin(x[0]), [self])
|
return derived_observable(lambda x: anp.arcsin(x[0]), [self])
|
||||||
|
|
||||||
|
|
||||||
def arccos(self):
|
def arccos(self):
|
||||||
return derived_observable(lambda x: anp.arccos(x[0]), [self])
|
return derived_observable(lambda x: anp.arccos(x[0]), [self])
|
||||||
|
|
||||||
|
|
||||||
def arctan(self):
|
def arctan(self):
|
||||||
return derived_observable(lambda x: anp.arctan(x[0]), [self])
|
return derived_observable(lambda x: anp.arctan(x[0]), [self])
|
||||||
|
|
||||||
|
|
||||||
def sinh(self):
|
def sinh(self):
|
||||||
return derived_observable(lambda x, **kwargs: np.sinh(x[0]), [self], man_grad=[np.cosh(self.value)])
|
return derived_observable(lambda x, **kwargs: np.sinh(x[0]), [self], man_grad=[np.cosh(self.value)])
|
||||||
|
|
||||||
|
|
||||||
def cosh(self):
|
def cosh(self):
|
||||||
return derived_observable(lambda x, **kwargs: np.cosh(x[0]), [self], man_grad=[np.sinh(self.value)])
|
return derived_observable(lambda x, **kwargs: np.cosh(x[0]), [self], man_grad=[np.sinh(self.value)])
|
||||||
|
|
||||||
|
|
||||||
def tanh(self):
|
def tanh(self):
|
||||||
return derived_observable(lambda x, **kwargs: np.tanh(x[0]), [self], man_grad=[1 / np.cosh(self.value) ** 2])
|
return derived_observable(lambda x, **kwargs: np.tanh(x[0]), [self], man_grad=[1 / np.cosh(self.value) ** 2])
|
||||||
|
|
||||||
|
|
||||||
def arcsinh(self):
|
def arcsinh(self):
|
||||||
return derived_observable(lambda x: anp.arcsinh(x[0]), [self])
|
return derived_observable(lambda x: anp.arcsinh(x[0]), [self])
|
||||||
|
|
||||||
|
|
||||||
def arccosh(self):
|
def arccosh(self):
|
||||||
return derived_observable(lambda x: anp.arccosh(x[0]), [self])
|
return derived_observable(lambda x: anp.arccosh(x[0]), [self])
|
||||||
|
|
||||||
|
|
||||||
def arctanh(self):
|
def arctanh(self):
|
||||||
return derived_observable(lambda x: anp.arctanh(x[0]), [self])
|
return derived_observable(lambda x: anp.arctanh(x[0]), [self])
|
||||||
|
|
||||||
|
|
||||||
def sinc(self):
|
def sinc(self):
|
||||||
return derived_observable(lambda x: anp.sinc(x[0]), [self])
|
return derived_observable(lambda x: anp.sinc(x[0]), [self])
|
||||||
|
|
||||||
|
@ -751,7 +712,7 @@ def derived_observable(func, data, **kwargs):
|
||||||
kwarg = kwargs.get(key)
|
kwarg = kwargs.get(key)
|
||||||
if kwarg is not None:
|
if kwarg is not None:
|
||||||
options[key] = kwarg
|
options[key] = kwarg
|
||||||
tmp_df = nd.Gradient(func, order=4, **{k:v for k, v in options.items() if v is not None})(values, **kwargs)
|
tmp_df = nd.Gradient(func, order=4, **{k: v for k, v in options.items() if v is not None})(values, **kwargs)
|
||||||
if tmp_df.size == 1:
|
if tmp_df.size == 1:
|
||||||
deriv = np.array([tmp_df.real])
|
deriv = np.array([tmp_df.real])
|
||||||
else:
|
else:
|
||||||
|
@ -933,8 +894,7 @@ def covariance2(obs1, obs2, correlation=False, **kwargs):
|
||||||
max_gamma = min(obs1.shape[r_name], w_max)
|
max_gamma = min(obs1.shape[r_name], w_max)
|
||||||
# The padding for the fft has to be even
|
# The padding for the fft has to be even
|
||||||
padding = obs1.shape[r_name] + max_gamma + (obs1.shape[r_name] + max_gamma) % 2
|
padding = obs1.shape[r_name] + max_gamma + (obs1.shape[r_name] + max_gamma) % 2
|
||||||
e_gamma[e_name][:max_gamma] += (np.fft.irfft(np.fft.rfft(obs1.deltas[r_name], padding) * np.conjugate(np.fft.rfft(obs2.deltas[r_name], padding)))[:max_gamma]
|
e_gamma[e_name][:max_gamma] += (np.fft.irfft(np.fft.rfft(obs1.deltas[r_name], padding) * np.conjugate(np.fft.rfft(obs2.deltas[r_name], padding)))[:max_gamma] + np.fft.irfft(np.fft.rfft(obs2.deltas[r_name], padding) * np.conjugate(np.fft.rfft(obs1.deltas[r_name], padding)))[:max_gamma]) / 2.0
|
||||||
+ np.fft.irfft(np.fft.rfft(obs2.deltas[r_name], padding) * np.conjugate(np.fft.rfft(obs1.deltas[r_name], padding)))[:max_gamma]) / 2.0
|
|
||||||
|
|
||||||
if np.all(e_gamma[e_name]) == 0.0:
|
if np.all(e_gamma[e_name]) == 0.0:
|
||||||
continue
|
continue
|
||||||
|
@ -964,7 +924,6 @@ def covariance2(obs1, obs2, correlation=False, **kwargs):
|
||||||
# Make sure no entry of tauint is smaller than 0.5
|
# Make sure no entry of tauint is smaller than 0.5
|
||||||
e_n_tauint[e_name][e_n_tauint[e_name] < 0.5] = 0.500000000001
|
e_n_tauint[e_name][e_n_tauint[e_name] < 0.5] = 0.500000000001
|
||||||
|
|
||||||
|
|
||||||
window = max(obs1.e_windowsize[e_name], obs2.e_windowsize[e_name])
|
window = max(obs1.e_windowsize[e_name], obs2.e_windowsize[e_name])
|
||||||
# Bias correction hep-lat/0306017 eq. (49)
|
# Bias correction hep-lat/0306017 eq. (49)
|
||||||
e_dvalue[e_name] = 2 * (e_n_tauint[e_name][window] + obs1.tau_exp[e_name] * np.abs(e_rho[e_name][window + 1])) * (1 + (2 * window + 1) / e_N) * e_gamma[e_name][0] / e_N
|
e_dvalue[e_name] = 2 * (e_n_tauint[e_name][window] + obs1.tau_exp[e_name] * np.abs(e_rho[e_name][window + 1])) * (1 + (2 * window + 1) / e_N) * e_gamma[e_name][0] / e_N
|
||||||
|
@ -1141,7 +1100,6 @@ def plot_corrs(observables, **kwargs):
|
||||||
for j in range(len(observables)):
|
for j in range(len(observables)):
|
||||||
label.append(str(j + 1))
|
label.append(str(j + 1))
|
||||||
|
|
||||||
|
|
||||||
f = plt.figure()
|
f = plt.figure()
|
||||||
for j in range(len(observables)):
|
for j in range(len(observables)):
|
||||||
T = len(observables[j])
|
T = len(observables[j])
|
||||||
|
@ -1202,8 +1160,7 @@ def plot_corrs(observables, **kwargs):
|
||||||
y_fit = fit_result[1].value * np.exp(-fit_result[0].value * x)
|
y_fit = fit_result[1].value * np.exp(-fit_result[0].value * x)
|
||||||
plt.plot(x, y_fit, color='k')
|
plt.plot(x, y_fit, color='k')
|
||||||
if not (fit_result[0].e_names == {} and fit_result[1].e_names == {}):
|
if not (fit_result[0].e_names == {} and fit_result[1].e_names == {}):
|
||||||
y_fit_err = np.sqrt((y_fit * fit_result[0].dvalue) ** 2 + 2 * covariance(fit_result[0], fit_result[1])* y_fit *
|
y_fit_err = np.sqrt((y_fit * fit_result[0].dvalue) ** 2 + 2 * covariance(fit_result[0], fit_result[1]) * y_fit * np.exp(-fit_result[0].value * x) + (np.exp(-fit_result[0].value * x) * fit_result[1].dvalue) ** 2)
|
||||||
np.exp(-fit_result[0].value * x) + (np.exp(-fit_result[0].value * x) * fit_result[1].dvalue) ** 2)
|
|
||||||
plt.fill_between(x, y_fit + y_fit_err, y_fit - y_fit_err, color='k', alpha=0.1)
|
plt.fill_between(x, y_fit + y_fit_err, y_fit - y_fit_err, color='k', alpha=0.1)
|
||||||
|
|
||||||
plt.xlabel('$x_0/a$')
|
plt.xlabel('$x_0/a$')
|
||||||
|
@ -1231,7 +1188,7 @@ def merge_obs(list_of_obs):
|
||||||
"""
|
"""
|
||||||
replist = [item for obs in list_of_obs for item in obs.names]
|
replist = [item for obs in list_of_obs for item in obs.names]
|
||||||
if (len(replist) == len(set(replist))) is False:
|
if (len(replist) == len(set(replist))) is False:
|
||||||
raise Exception('list_of_obs contains duplicate replica: %s' %(str(replist)))
|
raise Exception('list_of_obs contains duplicate replica: %s' % (str(replist)))
|
||||||
new_dict = {}
|
new_dict = {}
|
||||||
for o in list_of_obs:
|
for o in list_of_obs:
|
||||||
new_dict.update({key: o.deltas.get(key, 0) + o.r_values.get(key, 0)
|
new_dict.update({key: o.deltas.get(key, 0) + o.r_values.get(key, 0)
|
||||||
|
|
|
@ -3,7 +3,8 @@
|
||||||
|
|
||||||
import scipy.optimize
|
import scipy.optimize
|
||||||
from autograd import jacobian
|
from autograd import jacobian
|
||||||
from .pyerrors import Obs, derived_observable, pseudo_Obs
|
from .pyerrors import derived_observable, pseudo_Obs
|
||||||
|
|
||||||
|
|
||||||
def find_root(d, func, guess=1.0, **kwargs):
|
def find_root(d, func, guess=1.0, **kwargs):
|
||||||
"""Finds the root of the function func(x, d) where d is an Obs.
|
"""Finds the root of the function func(x, d) where d is an Obs.
|
||||||
|
@ -18,7 +19,7 @@ def find_root(d, func, guess=1.0, **kwargs):
|
||||||
|
|
||||||
# Error propagation as detailed in arXiv:1809.01289
|
# Error propagation as detailed in arXiv:1809.01289
|
||||||
dx = jacobian(func)(root[0], d.value)
|
dx = jacobian(func)(root[0], d.value)
|
||||||
da = jacobian(lambda u, v : func(v, u))(d.value, root[0])
|
da = jacobian(lambda u, v: func(v, u))(d.value, root[0])
|
||||||
deriv = - da / dx
|
deriv = - da / dx
|
||||||
|
|
||||||
return derived_observable(lambda x, **kwargs: x[0], [pseudo_Obs(root, 0.0, d.names[0], d.shape[d.names[0]]), d], man_grad=[0, deriv])
|
return derived_observable(lambda x, **kwargs: x[0], [pseudo_Obs(root, 0.0, d.names[0], d.shape[d.names[0]]), d], man_grad=[0, deriv])
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue