mirror of
https://github.com/fjosw/pyerrors.git
synced 2025-03-15 23:00:25 +01:00
Merge branch 'develop' into feature/irregularMC
This commit is contained in:
commit
913f682087
3 changed files with 70 additions and 19 deletions
|
@ -98,6 +98,9 @@ my_sum.details()
|
||||||
|
|
||||||
The integrated autocorrelation time $\tau_\mathrm{int}$ and the autocorrelation function $\rho(W)$ can be monitored via the methods `pyerrors.obs.Obs.plot_tauint` and `pyerrors.obs.Obs.plot_tauint`.
|
The integrated autocorrelation time $\tau_\mathrm{int}$ and the autocorrelation function $\rho(W)$ can be monitored via the methods `pyerrors.obs.Obs.plot_tauint` and `pyerrors.obs.Obs.plot_tauint`.
|
||||||
|
|
||||||
|
If the parameter $S$ is set to zero it is assumed that dataset does not exhibit any autocorrelation and the windowsize is chosen to be zero.
|
||||||
|
In this case the error estimate is identical to the sample standard error.
|
||||||
|
|
||||||
### Exponential tails
|
### Exponential tails
|
||||||
|
|
||||||
Slow modes in the Monte Carlo history can be accounted for by attaching an exponential tail to the autocorrelation function $\rho$ as suggested in [arXiv:1009.5228](https://arxiv.org/abs/1009.5228). The longest autocorrelation time in the history, $\tau_\mathrm{exp}$, can be passed to the `gamma_method` as parameter. In this case the automatic windowing procedure is vacated and the parameter $S$ does not affect the error estimate.
|
Slow modes in the Monte Carlo history can be accounted for by attaching an exponential tail to the autocorrelation function $\rho$ as suggested in [arXiv:1009.5228](https://arxiv.org/abs/1009.5228). The longest autocorrelation time in the history, $\tau_\mathrm{exp}$, can be passed to the `gamma_method` as parameter. In this case the automatic windowing procedure is vacated and the parameter $S$ does not affect the error estimate.
|
||||||
|
|
|
@ -155,20 +155,21 @@ class Obs:
|
||||||
return res
|
return res
|
||||||
|
|
||||||
def gamma_method(self, **kwargs):
|
def gamma_method(self, **kwargs):
|
||||||
"""Calculate the error and related properties of the Obs.
|
"""Estimate the error and related properties of the Obs.
|
||||||
|
|
||||||
Parameters
|
Parameters
|
||||||
----------
|
----------
|
||||||
S : float
|
S : float
|
||||||
specifies a custom value for the parameter S (default 2.0), can be
|
specifies a custom value for the parameter S (default 2.0).
|
||||||
a float or an array of floats for different ensembles
|
If set to 0 it is assumed that the data exhibits no
|
||||||
|
autocorrelation. In this case the error estimates coincides
|
||||||
|
with the sample standard error.
|
||||||
tau_exp : float
|
tau_exp : float
|
||||||
positive value triggers the critical slowing down analysis
|
positive value triggers the critical slowing down analysis
|
||||||
(default 0.0), can be a float or an array of floats for different
|
(default 0.0).
|
||||||
ensembles
|
|
||||||
N_sigma : float
|
N_sigma : float
|
||||||
number of standard deviations from zero until the tail is
|
number of standard deviations from zero until the tail is
|
||||||
attached to the autocorrelation function (default 1)
|
attached to the autocorrelation function (default 1).
|
||||||
fft : bool
|
fft : bool
|
||||||
determines whether the fft algorithm is used for the computation
|
determines whether the fft algorithm is used for the computation
|
||||||
of the autocorrelation function (default True)
|
of the autocorrelation function (default True)
|
||||||
|
@ -281,19 +282,26 @@ class Obs:
|
||||||
self.e_windowsize[e_name] = n
|
self.e_windowsize[e_name] = n
|
||||||
break
|
break
|
||||||
else:
|
else:
|
||||||
# Standard automatic windowing procedure
|
if self.S[e_name] == 0.0:
|
||||||
g_w = self.S[e_name] / np.log((2 * self.e_n_tauint[e_name][1:] + 1) / (2 * self.e_n_tauint[e_name][1:] - 1))
|
self.e_tauint[e_name] = 0.5
|
||||||
g_w = np.exp(- np.arange(1, w_max) / g_w) - g_w / np.sqrt(np.arange(1, w_max) * e_N)
|
self.e_dtauint[e_name] = 0.0
|
||||||
for n in range(1, w_max):
|
self.e_dvalue[e_name] = np.sqrt(e_gamma[e_name][0] / (e_N - 1))
|
||||||
if n < w_max // 2 - 2:
|
self.e_ddvalue[e_name] = self.e_dvalue[e_name] * np.sqrt(0.5 / e_N)
|
||||||
_compute_drho(n + 1)
|
self.e_windowsize[e_name] = 0
|
||||||
if g_w[n - 1] < 0 or n >= w_max - 1:
|
else:
|
||||||
self.e_tauint[e_name] = self.e_n_tauint[e_name][n] * (1 + (2 * n + 1) / e_N) / (1 + 1 / e_N) # Bias correction hep-lat/0306017 eq. (49)
|
# Standard automatic windowing procedure
|
||||||
self.e_dtauint[e_name] = self.e_n_dtauint[e_name][n]
|
tau = self.S[e_name] / np.log((2 * self.e_n_tauint[e_name][1:] + 1) / (2 * self.e_n_tauint[e_name][1:] - 1))
|
||||||
self.e_dvalue[e_name] = np.sqrt(2 * self.e_tauint[e_name] * e_gamma[e_name][0] * (1 + 1 / e_N) / e_N)
|
g_w = np.exp(- np.arange(1, w_max) / tau) - tau / np.sqrt(np.arange(1, w_max) * e_N)
|
||||||
self.e_ddvalue[e_name] = self.e_dvalue[e_name] * np.sqrt((n + 0.5) / e_N)
|
for n in range(1, w_max):
|
||||||
self.e_windowsize[e_name] = n
|
if n < w_max // 2 - 2:
|
||||||
break
|
_compute_drho(n + 1)
|
||||||
|
if g_w[n - 1] < 0 or n >= w_max - 1:
|
||||||
|
self.e_tauint[e_name] = self.e_n_tauint[e_name][n] * (1 + (2 * n + 1) / e_N) / (1 + 1 / e_N) # Bias correction hep-lat/0306017 eq. (49)
|
||||||
|
self.e_dtauint[e_name] = self.e_n_dtauint[e_name][n]
|
||||||
|
self.e_dvalue[e_name] = np.sqrt(2 * self.e_tauint[e_name] * e_gamma[e_name][0] * (1 + 1 / e_N) / e_N)
|
||||||
|
self.e_ddvalue[e_name] = self.e_dvalue[e_name] * np.sqrt((n + 0.5) / e_N)
|
||||||
|
self.e_windowsize[e_name] = n
|
||||||
|
break
|
||||||
|
|
||||||
self._dvalue += self.e_dvalue[e_name] ** 2
|
self._dvalue += self.e_dvalue[e_name] ** 2
|
||||||
self.ddvalue += (self.e_dvalue[e_name] * self.e_ddvalue[e_name]) ** 2
|
self.ddvalue += (self.e_dvalue[e_name] * self.e_ddvalue[e_name]) ** 2
|
||||||
|
@ -1547,6 +1555,25 @@ def load_object(path):
|
||||||
return pickle.load(file)
|
return pickle.load(file)
|
||||||
|
|
||||||
|
|
||||||
|
def import_jackknife(jacks, name):
|
||||||
|
"""Imports jackknife samples and returns an Obs
|
||||||
|
|
||||||
|
Parameters
|
||||||
|
----------
|
||||||
|
jacks : numpy.ndarray
|
||||||
|
numpy array containing the mean value as zeroth entry and
|
||||||
|
the N jackknife samples as first to Nth entry.
|
||||||
|
name : str
|
||||||
|
name of the ensemble the samples are defined on.
|
||||||
|
"""
|
||||||
|
length = len(jacks) - 1
|
||||||
|
prj = (np.ones((length, length)) - (length - 1) * np.identity(length))
|
||||||
|
samples = jacks[1:] @ prj
|
||||||
|
new_obs = Obs([samples], [name])
|
||||||
|
new_obs._value = jacks[0]
|
||||||
|
return new_obs
|
||||||
|
|
||||||
|
|
||||||
def merge_obs(list_of_obs):
|
def merge_obs(list_of_obs):
|
||||||
"""Combine all observables in list_of_obs into one new observable
|
"""Combine all observables in list_of_obs into one new observable
|
||||||
|
|
||||||
|
|
|
@ -95,6 +95,18 @@ def test_gamma_method():
|
||||||
assert test_obs.e_tauint['t'] - 10.5 <= test_obs.e_dtauint['t']
|
assert test_obs.e_tauint['t'] - 10.5 <= test_obs.e_dtauint['t']
|
||||||
|
|
||||||
|
|
||||||
|
def test_gamma_method_no_windowing():
|
||||||
|
for iteration in range(50):
|
||||||
|
obs = pe.Obs([np.random.normal(1.02, 0.02, 733 + np.random.randint(1000))], ['ens'])
|
||||||
|
obs.gamma_method(S=0)
|
||||||
|
assert obs.e_tauint['ens'] == 0.5
|
||||||
|
assert np.isclose(np.sqrt(np.var(obs.deltas['ens'], ddof=1) / obs.shape['ens']), obs.dvalue)
|
||||||
|
obs.gamma_method(S=1.1)
|
||||||
|
assert obs.e_tauint['ens'] > 0.5
|
||||||
|
with pytest.raises(Exception):
|
||||||
|
obs.gamma_method(S=-0.2)
|
||||||
|
|
||||||
|
|
||||||
def test_gamma_method_persistance():
|
def test_gamma_method_persistance():
|
||||||
my_obs = pe.Obs([np.random.rand(730)], ['t'])
|
my_obs = pe.Obs([np.random.rand(730)], ['t'])
|
||||||
my_obs.gamma_method()
|
my_obs.gamma_method()
|
||||||
|
@ -522,3 +534,12 @@ def test_jackknife():
|
||||||
my_new_obs = my_obs + pe.Obs([full_data], ['test2'])
|
my_new_obs = my_obs + pe.Obs([full_data], ['test2'])
|
||||||
with pytest.raises(Exception):
|
with pytest.raises(Exception):
|
||||||
my_new_obs.export_jackknife()
|
my_new_obs.export_jackknife()
|
||||||
|
|
||||||
|
|
||||||
|
def test_import_jackknife():
|
||||||
|
full_data = np.random.normal(1.105, 0.021, 754)
|
||||||
|
my_obs = pe.Obs([full_data], ['test'])
|
||||||
|
my_jacks = my_obs.export_jackknife()
|
||||||
|
reconstructed_obs = pe.import_jackknife(my_jacks, 'test')
|
||||||
|
assert my_obs == reconstructed_obs
|
||||||
|
|
||||||
|
|
Loading…
Add table
Reference in a new issue