mirror of
https://github.com/fjosw/pyerrors.git
synced 2025-03-15 14:50:25 +01:00
feat: automatic windowing procedure can now be deactivated by choosing
S=0
This commit is contained in:
parent
56b1b36037
commit
ccca4eabbf
3 changed files with 42 additions and 19 deletions
|
@ -98,6 +98,9 @@ my_sum.details()
|
|||
|
||||
The integrated autocorrelation time $\tau_\mathrm{int}$ and the autocorrelation function $\rho(W)$ can be monitored via the methods `pyerrors.obs.Obs.plot_tauint` and `pyerrors.obs.Obs.plot_tauint`.
|
||||
|
||||
If the parameter $S$ is set to zero it is assumed that dataset does not exhibit any autocorrelation and the windowsize is chosen to be zero.
|
||||
In this case the error estimate is identical to the sample standard error.
|
||||
|
||||
### Exponential tails
|
||||
|
||||
Slow modes in the Monte Carlo history can be accounted for by attaching an exponential tail to the autocorrelation function $\rho$ as suggested in [arXiv:1009.5228](https://arxiv.org/abs/1009.5228). The longest autocorrelation time in the history, $\tau_\mathrm{exp}$, can be passed to the `gamma_method` as parameter. In this case the automatic windowing procedure is vacated and the parameter $S$ does not affect the error estimate.
|
||||
|
|
|
@ -155,20 +155,21 @@ class Obs:
|
|||
return res
|
||||
|
||||
def gamma_method(self, **kwargs):
|
||||
"""Calculate the error and related properties of the Obs.
|
||||
"""Estimate the error and related properties of the Obs.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
S : float
|
||||
specifies a custom value for the parameter S (default 2.0), can be
|
||||
a float or an array of floats for different ensembles
|
||||
specifies a custom value for the parameter S (default 2.0).
|
||||
If set to 0 it is assumed that the data exhibits no
|
||||
autocorrelation. In this case the error estimates coincides
|
||||
with the sample standard error.
|
||||
tau_exp : float
|
||||
positive value triggers the critical slowing down analysis
|
||||
(default 0.0), can be a float or an array of floats for different
|
||||
ensembles
|
||||
(default 0.0).
|
||||
N_sigma : float
|
||||
number of standard deviations from zero until the tail is
|
||||
attached to the autocorrelation function (default 1)
|
||||
attached to the autocorrelation function (default 1).
|
||||
fft : bool
|
||||
determines whether the fft algorithm is used for the computation
|
||||
of the autocorrelation function (default True)
|
||||
|
@ -281,19 +282,26 @@ class Obs:
|
|||
self.e_windowsize[e_name] = n
|
||||
break
|
||||
else:
|
||||
# Standard automatic windowing procedure
|
||||
g_w = self.S[e_name] / np.log((2 * self.e_n_tauint[e_name][1:] + 1) / (2 * self.e_n_tauint[e_name][1:] - 1))
|
||||
g_w = np.exp(- np.arange(1, w_max) / g_w) - g_w / np.sqrt(np.arange(1, w_max) * e_N)
|
||||
for n in range(1, w_max):
|
||||
if n < w_max // 2 - 2:
|
||||
_compute_drho(n + 1)
|
||||
if g_w[n - 1] < 0 or n >= w_max - 1:
|
||||
self.e_tauint[e_name] = self.e_n_tauint[e_name][n] * (1 + (2 * n + 1) / e_N) / (1 + 1 / e_N) # Bias correction hep-lat/0306017 eq. (49)
|
||||
self.e_dtauint[e_name] = self.e_n_dtauint[e_name][n]
|
||||
self.e_dvalue[e_name] = np.sqrt(2 * self.e_tauint[e_name] * e_gamma[e_name][0] * (1 + 1 / e_N) / e_N)
|
||||
self.e_ddvalue[e_name] = self.e_dvalue[e_name] * np.sqrt((n + 0.5) / e_N)
|
||||
self.e_windowsize[e_name] = n
|
||||
break
|
||||
if self.S[e_name] == 0.0:
|
||||
self.e_tauint[e_name] = 0.5
|
||||
self.e_dtauint[e_name] = 0.0
|
||||
self.e_dvalue[e_name] = np.sqrt(e_gamma[e_name][0] / (e_N - 1))
|
||||
self.e_ddvalue[e_name] = self.e_dvalue[e_name] * np.sqrt(0.5 / e_N)
|
||||
self.e_windowsize[e_name] = 0
|
||||
else:
|
||||
# Standard automatic windowing procedure
|
||||
tau = self.S[e_name] / np.log((2 * self.e_n_tauint[e_name][1:] + 1) / (2 * self.e_n_tauint[e_name][1:] - 1))
|
||||
g_w = np.exp(- np.arange(1, w_max) / tau) - tau / np.sqrt(np.arange(1, w_max) * e_N)
|
||||
for n in range(1, w_max):
|
||||
if n < w_max // 2 - 2:
|
||||
_compute_drho(n + 1)
|
||||
if g_w[n - 1] < 0 or n >= w_max - 1:
|
||||
self.e_tauint[e_name] = self.e_n_tauint[e_name][n] * (1 + (2 * n + 1) / e_N) / (1 + 1 / e_N) # Bias correction hep-lat/0306017 eq. (49)
|
||||
self.e_dtauint[e_name] = self.e_n_dtauint[e_name][n]
|
||||
self.e_dvalue[e_name] = np.sqrt(2 * self.e_tauint[e_name] * e_gamma[e_name][0] * (1 + 1 / e_N) / e_N)
|
||||
self.e_ddvalue[e_name] = self.e_dvalue[e_name] * np.sqrt((n + 0.5) / e_N)
|
||||
self.e_windowsize[e_name] = n
|
||||
break
|
||||
|
||||
self._dvalue += self.e_dvalue[e_name] ** 2
|
||||
self.ddvalue += (self.e_dvalue[e_name] * self.e_ddvalue[e_name]) ** 2
|
||||
|
|
|
@ -95,6 +95,18 @@ def test_gamma_method():
|
|||
assert test_obs.e_tauint['t'] - 10.5 <= test_obs.e_dtauint['t']
|
||||
|
||||
|
||||
def test_gamma_method_no_windowing():
|
||||
for iteration in range(50):
|
||||
obs = pe.Obs([np.random.normal(1.02, 0.02, 733 + np.random.randint(1000))], ['ens'])
|
||||
obs.gamma_method(S=0)
|
||||
assert obs.e_tauint['ens'] == 0.5
|
||||
assert np.isclose(np.sqrt(np.var(obs.deltas['ens'], ddof=1) / obs.shape['ens']), obs.dvalue)
|
||||
obs.gamma_method(S=1.1)
|
||||
assert obs.e_tauint['ens'] > 0.5
|
||||
with pytest.raises(Exception):
|
||||
obs.gamma_method(S=-0.2)
|
||||
|
||||
|
||||
def test_gamma_method_persistance():
|
||||
my_obs = pe.Obs([np.random.rand(730)], ['t'])
|
||||
my_obs.gamma_method()
|
||||
|
|
Loading…
Add table
Reference in a new issue