mirror of
https://github.com/fjosw/pyerrors.git
synced 2025-03-15 14:50:25 +01:00
Merge branch 'develop' into feature/irregularMC
This commit is contained in:
commit
913f682087
3 changed files with 70 additions and 19 deletions
|
@ -98,6 +98,9 @@ my_sum.details()
|
|||
|
||||
The integrated autocorrelation time $\tau_\mathrm{int}$ and the autocorrelation function $\rho(W)$ can be monitored via the methods `pyerrors.obs.Obs.plot_tauint` and `pyerrors.obs.Obs.plot_tauint`.
|
||||
|
||||
If the parameter $S$ is set to zero it is assumed that dataset does not exhibit any autocorrelation and the windowsize is chosen to be zero.
|
||||
In this case the error estimate is identical to the sample standard error.
|
||||
|
||||
### Exponential tails
|
||||
|
||||
Slow modes in the Monte Carlo history can be accounted for by attaching an exponential tail to the autocorrelation function $\rho$ as suggested in [arXiv:1009.5228](https://arxiv.org/abs/1009.5228). The longest autocorrelation time in the history, $\tau_\mathrm{exp}$, can be passed to the `gamma_method` as parameter. In this case the automatic windowing procedure is vacated and the parameter $S$ does not affect the error estimate.
|
||||
|
|
|
@ -155,20 +155,21 @@ class Obs:
|
|||
return res
|
||||
|
||||
def gamma_method(self, **kwargs):
|
||||
"""Calculate the error and related properties of the Obs.
|
||||
"""Estimate the error and related properties of the Obs.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
S : float
|
||||
specifies a custom value for the parameter S (default 2.0), can be
|
||||
a float or an array of floats for different ensembles
|
||||
specifies a custom value for the parameter S (default 2.0).
|
||||
If set to 0 it is assumed that the data exhibits no
|
||||
autocorrelation. In this case the error estimates coincides
|
||||
with the sample standard error.
|
||||
tau_exp : float
|
||||
positive value triggers the critical slowing down analysis
|
||||
(default 0.0), can be a float or an array of floats for different
|
||||
ensembles
|
||||
(default 0.0).
|
||||
N_sigma : float
|
||||
number of standard deviations from zero until the tail is
|
||||
attached to the autocorrelation function (default 1)
|
||||
attached to the autocorrelation function (default 1).
|
||||
fft : bool
|
||||
determines whether the fft algorithm is used for the computation
|
||||
of the autocorrelation function (default True)
|
||||
|
@ -281,19 +282,26 @@ class Obs:
|
|||
self.e_windowsize[e_name] = n
|
||||
break
|
||||
else:
|
||||
# Standard automatic windowing procedure
|
||||
g_w = self.S[e_name] / np.log((2 * self.e_n_tauint[e_name][1:] + 1) / (2 * self.e_n_tauint[e_name][1:] - 1))
|
||||
g_w = np.exp(- np.arange(1, w_max) / g_w) - g_w / np.sqrt(np.arange(1, w_max) * e_N)
|
||||
for n in range(1, w_max):
|
||||
if n < w_max // 2 - 2:
|
||||
_compute_drho(n + 1)
|
||||
if g_w[n - 1] < 0 or n >= w_max - 1:
|
||||
self.e_tauint[e_name] = self.e_n_tauint[e_name][n] * (1 + (2 * n + 1) / e_N) / (1 + 1 / e_N) # Bias correction hep-lat/0306017 eq. (49)
|
||||
self.e_dtauint[e_name] = self.e_n_dtauint[e_name][n]
|
||||
self.e_dvalue[e_name] = np.sqrt(2 * self.e_tauint[e_name] * e_gamma[e_name][0] * (1 + 1 / e_N) / e_N)
|
||||
self.e_ddvalue[e_name] = self.e_dvalue[e_name] * np.sqrt((n + 0.5) / e_N)
|
||||
self.e_windowsize[e_name] = n
|
||||
break
|
||||
if self.S[e_name] == 0.0:
|
||||
self.e_tauint[e_name] = 0.5
|
||||
self.e_dtauint[e_name] = 0.0
|
||||
self.e_dvalue[e_name] = np.sqrt(e_gamma[e_name][0] / (e_N - 1))
|
||||
self.e_ddvalue[e_name] = self.e_dvalue[e_name] * np.sqrt(0.5 / e_N)
|
||||
self.e_windowsize[e_name] = 0
|
||||
else:
|
||||
# Standard automatic windowing procedure
|
||||
tau = self.S[e_name] / np.log((2 * self.e_n_tauint[e_name][1:] + 1) / (2 * self.e_n_tauint[e_name][1:] - 1))
|
||||
g_w = np.exp(- np.arange(1, w_max) / tau) - tau / np.sqrt(np.arange(1, w_max) * e_N)
|
||||
for n in range(1, w_max):
|
||||
if n < w_max // 2 - 2:
|
||||
_compute_drho(n + 1)
|
||||
if g_w[n - 1] < 0 or n >= w_max - 1:
|
||||
self.e_tauint[e_name] = self.e_n_tauint[e_name][n] * (1 + (2 * n + 1) / e_N) / (1 + 1 / e_N) # Bias correction hep-lat/0306017 eq. (49)
|
||||
self.e_dtauint[e_name] = self.e_n_dtauint[e_name][n]
|
||||
self.e_dvalue[e_name] = np.sqrt(2 * self.e_tauint[e_name] * e_gamma[e_name][0] * (1 + 1 / e_N) / e_N)
|
||||
self.e_ddvalue[e_name] = self.e_dvalue[e_name] * np.sqrt((n + 0.5) / e_N)
|
||||
self.e_windowsize[e_name] = n
|
||||
break
|
||||
|
||||
self._dvalue += self.e_dvalue[e_name] ** 2
|
||||
self.ddvalue += (self.e_dvalue[e_name] * self.e_ddvalue[e_name]) ** 2
|
||||
|
@ -1547,6 +1555,25 @@ def load_object(path):
|
|||
return pickle.load(file)
|
||||
|
||||
|
||||
def import_jackknife(jacks, name):
|
||||
"""Imports jackknife samples and returns an Obs
|
||||
|
||||
Parameters
|
||||
----------
|
||||
jacks : numpy.ndarray
|
||||
numpy array containing the mean value as zeroth entry and
|
||||
the N jackknife samples as first to Nth entry.
|
||||
name : str
|
||||
name of the ensemble the samples are defined on.
|
||||
"""
|
||||
length = len(jacks) - 1
|
||||
prj = (np.ones((length, length)) - (length - 1) * np.identity(length))
|
||||
samples = jacks[1:] @ prj
|
||||
new_obs = Obs([samples], [name])
|
||||
new_obs._value = jacks[0]
|
||||
return new_obs
|
||||
|
||||
|
||||
def merge_obs(list_of_obs):
|
||||
"""Combine all observables in list_of_obs into one new observable
|
||||
|
||||
|
|
|
@ -95,6 +95,18 @@ def test_gamma_method():
|
|||
assert test_obs.e_tauint['t'] - 10.5 <= test_obs.e_dtauint['t']
|
||||
|
||||
|
||||
def test_gamma_method_no_windowing():
|
||||
for iteration in range(50):
|
||||
obs = pe.Obs([np.random.normal(1.02, 0.02, 733 + np.random.randint(1000))], ['ens'])
|
||||
obs.gamma_method(S=0)
|
||||
assert obs.e_tauint['ens'] == 0.5
|
||||
assert np.isclose(np.sqrt(np.var(obs.deltas['ens'], ddof=1) / obs.shape['ens']), obs.dvalue)
|
||||
obs.gamma_method(S=1.1)
|
||||
assert obs.e_tauint['ens'] > 0.5
|
||||
with pytest.raises(Exception):
|
||||
obs.gamma_method(S=-0.2)
|
||||
|
||||
|
||||
def test_gamma_method_persistance():
|
||||
my_obs = pe.Obs([np.random.rand(730)], ['t'])
|
||||
my_obs.gamma_method()
|
||||
|
@ -522,3 +534,12 @@ def test_jackknife():
|
|||
my_new_obs = my_obs + pe.Obs([full_data], ['test2'])
|
||||
with pytest.raises(Exception):
|
||||
my_new_obs.export_jackknife()
|
||||
|
||||
|
||||
def test_import_jackknife():
|
||||
full_data = np.random.normal(1.105, 0.021, 754)
|
||||
my_obs = pe.Obs([full_data], ['test'])
|
||||
my_jacks = my_obs.export_jackknife()
|
||||
reconstructed_obs = pe.import_jackknife(my_jacks, 'test')
|
||||
assert my_obs == reconstructed_obs
|
||||
|
||||
|
|
Loading…
Add table
Reference in a new issue