mirror of
https://github.com/fjosw/pyerrors.git
synced 2025-03-15 14:50:25 +01:00
Merge branch 'develop' into feature/rwf
This commit is contained in:
commit
c939e49f73
6 changed files with 47 additions and 55 deletions
|
@ -39,7 +39,7 @@ class Corr:
|
|||
region indentified for this correlator.
|
||||
"""
|
||||
|
||||
if isinstance(data_input, np.ndarray): # Input is an array of Corrs
|
||||
if isinstance(data_input, np.ndarray):
|
||||
|
||||
# This only works, if the array fulfills the conditions below
|
||||
if not len(data_input.shape) == 2 and data_input.shape[0] == data_input.shape[1]:
|
||||
|
@ -95,7 +95,6 @@ class Corr:
|
|||
# An undefined timeslice is represented by the None object
|
||||
self.content = [None] * padding[0] + self.content + [None] * padding[1]
|
||||
self.T = len(self.content)
|
||||
|
||||
self.prange = prange
|
||||
|
||||
self.gamma_method()
|
||||
|
@ -160,9 +159,6 @@ class Corr:
|
|||
raise Exception("Vectors are of wrong shape!")
|
||||
if normalize:
|
||||
vector_l, vector_r = vector_l / np.sqrt((vector_l @ vector_l)), vector_r / np.sqrt(vector_r @ vector_r)
|
||||
# if (not (0.95 < vector_r @ vector_r < 1.05)) or (not (0.95 < vector_l @ vector_l < 1.05)):
|
||||
# print("Vectors are normalized before projection!")
|
||||
|
||||
newcontent = [None if (item is None) else np.asarray([vector_l.T @ item @ vector_r]) for item in self.content]
|
||||
|
||||
else:
|
||||
|
|
|
@ -35,6 +35,38 @@ def load_object(path):
|
|||
return pickle.load(file)
|
||||
|
||||
|
||||
def pseudo_Obs(value, dvalue, name, samples=1000):
|
||||
"""Generate an Obs object with given value, dvalue and name for test purposes
|
||||
|
||||
Parameters
|
||||
----------
|
||||
value : float
|
||||
central value of the Obs to be generated.
|
||||
dvalue : float
|
||||
error of the Obs to be generated.
|
||||
name : str
|
||||
name of the ensemble for which the Obs is to be generated.
|
||||
samples: int
|
||||
number of samples for the Obs (default 1000).
|
||||
"""
|
||||
if dvalue <= 0.0:
|
||||
return Obs([np.zeros(samples) + value], [name])
|
||||
else:
|
||||
for _ in range(100):
|
||||
deltas = [np.random.normal(0.0, dvalue * np.sqrt(samples), samples)]
|
||||
deltas -= np.mean(deltas)
|
||||
deltas *= dvalue / np.sqrt((np.var(deltas) / samples)) / np.sqrt(1 + 3 / samples)
|
||||
deltas += value
|
||||
res = Obs(deltas, [name])
|
||||
res.gamma_method(S=2, tau_exp=0)
|
||||
if abs(res.dvalue - dvalue) < 1e-10 * dvalue:
|
||||
break
|
||||
|
||||
res._value = float(value)
|
||||
|
||||
return res
|
||||
|
||||
|
||||
def gen_correlated_data(means, cov, name, tau=0.5, samples=1000):
|
||||
""" Generate observables with given covariance and autocorrelation times.
|
||||
|
||||
|
|
|
@ -1301,7 +1301,7 @@ def correlate(obs_a, obs_b):
|
|||
|
||||
Keep in mind to only correlate primary observables which have not been reweighted
|
||||
yet. The reweighting has to be applied after correlating the observables.
|
||||
Currently only works if ensembles are identical. This is not really necessary.
|
||||
Currently only works if ensembles are identical (this is not strictly necessary).
|
||||
"""
|
||||
|
||||
if sorted(obs_a.names) != sorted(obs_b.names):
|
||||
|
@ -1461,38 +1461,6 @@ def covariance(obs1, obs2, correlation=False, **kwargs):
|
|||
return dvalue
|
||||
|
||||
|
||||
def pseudo_Obs(value, dvalue, name, samples=1000):
|
||||
"""Generate a pseudo Obs with given value, dvalue and name
|
||||
|
||||
Parameters
|
||||
----------
|
||||
value : float
|
||||
central value of the Obs to be generated.
|
||||
dvalue : float
|
||||
error of the Obs to be generated.
|
||||
name : str
|
||||
name of the ensemble for which the Obs is to be generated.
|
||||
samples: int
|
||||
number of samples for the Obs (default 1000).
|
||||
"""
|
||||
if dvalue <= 0.0:
|
||||
return Obs([np.zeros(samples) + value], [name])
|
||||
else:
|
||||
for _ in range(100):
|
||||
deltas = [np.random.normal(0.0, dvalue * np.sqrt(samples), samples)]
|
||||
deltas -= np.mean(deltas)
|
||||
deltas *= dvalue / np.sqrt((np.var(deltas) / samples)) / np.sqrt(1 + 3 / samples)
|
||||
deltas += value
|
||||
res = Obs(deltas, [name])
|
||||
res.gamma_method(S=2, tau_exp=0)
|
||||
if abs(res.dvalue - dvalue) < 1e-10 * dvalue:
|
||||
break
|
||||
|
||||
res._value = float(value)
|
||||
|
||||
return res
|
||||
|
||||
|
||||
def import_jackknife(jacks, name, idl=None):
|
||||
"""Imports jackknife samples and returns an Obs
|
||||
|
||||
|
|
|
@ -1,7 +1,6 @@
|
|||
import numpy as np
|
||||
import pyerrors as pe
|
||||
import pytest
|
||||
import time
|
||||
|
||||
np.random.seed(0)
|
||||
|
||||
|
|
|
@ -1,7 +1,5 @@
|
|||
import autograd.numpy as np
|
||||
import os
|
||||
import random
|
||||
import string
|
||||
import copy
|
||||
import pyerrors as pe
|
||||
import pytest
|
||||
|
@ -142,7 +140,7 @@ def test_overloading_vectorization():
|
|||
assert [o.value for o in b / a] == [o.value for o in [b / p for p in a]]
|
||||
|
||||
|
||||
def test_gamma_method():
|
||||
def test_gamma_method_standard_data():
|
||||
for data in [np.tile([1, -1], 1000),
|
||||
np.random.rand(100001),
|
||||
np.zeros(1195),
|
||||
|
@ -285,7 +283,7 @@ def test_covariance_symmetry():
|
|||
assert np.abs(cov_ab) < test_obs1.dvalue * test_obs2.dvalue * (1 + 10 * np.finfo(np.float64).eps)
|
||||
|
||||
|
||||
def test_gamma_method():
|
||||
def test_gamma_method_uncorrelated():
|
||||
# Construct pseudo Obs with random shape
|
||||
value = np.random.normal(5, 10)
|
||||
dvalue = np.abs(np.random.normal(0, 1))
|
||||
|
|
|
@ -1,14 +1,14 @@
|
|||
import os,sys,inspect
|
||||
import os
|
||||
import sys
|
||||
import inspect
|
||||
import pyerrors as pe
|
||||
import pyerrors.input.sfcf as sfin
|
||||
import shutil
|
||||
|
||||
current_dir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
|
||||
parent_dir = os.path.dirname(current_dir)
|
||||
sys.path.insert(0, parent_dir)
|
||||
|
||||
import pyerrors as pe
|
||||
import pyerrors.input.openQCD as qcdin
|
||||
import pyerrors.input.sfcf as sfin
|
||||
import shutil
|
||||
|
||||
from time import sleep
|
||||
|
||||
def build_test_environment(env_type, cfgs, reps):
|
||||
if env_type == "o":
|
||||
|
@ -26,7 +26,6 @@ def build_test_environment(env_type, cfgs, reps):
|
|||
|
||||
|
||||
|
||||
|
||||
def clean_test_environment(env_type, cfgs, reps):
|
||||
if env_type == "o":
|
||||
for i in range(1,reps):
|
||||
|
@ -39,7 +38,7 @@ def clean_test_environment(env_type, cfgs, reps):
|
|||
for i in range(2,cfgs+1):
|
||||
os.remove("tests/data/sfcf_test/data_c/data_c_r0/data_c_r0_n"+str(i))
|
||||
|
||||
|
||||
|
||||
def test_o_bb():
|
||||
build_test_environment("o",5,3)
|
||||
f_1 = sfin.read_sfcf("tests/data/sfcf_test/data_o", "test", "f_1",quarks="lquark lquark", wf = 0, wf2=0, version = "2.0", corr_type="bb")
|
||||
|
@ -47,7 +46,7 @@ def test_o_bb():
|
|||
clean_test_environment("o",5,3)
|
||||
assert len(f_1) == 1
|
||||
assert f_1[0].value == 351.1941525454502
|
||||
|
||||
|
||||
def test_o_bi():
|
||||
build_test_environment("o",5,3)
|
||||
f_A = sfin.read_sfcf("tests/data/sfcf_test/data_o", "test", "f_A",quarks="lquark lquark", wf = 0, version = "2.0")
|
||||
|
@ -57,7 +56,7 @@ def test_o_bi():
|
|||
assert f_A[0].value == 65.4711887279723
|
||||
assert f_A[1].value == 1.0447210336915187
|
||||
assert f_A[2].value == -41.025094911185185
|
||||
|
||||
|
||||
def test_o_bib():
|
||||
build_test_environment("o",5,3)
|
||||
f_V0 = sfin.read_sfcf("tests/data/sfcf_test/data_o", "test", "F_V0",quarks="lquark lquark", wf = 0, wf2 = 0, version = "2.0", corr_type="bib")
|
||||
|
@ -122,4 +121,4 @@ def test_a_bib():
|
|||
assert len(f_V0) == 3
|
||||
assert f_V0[0] == 683.6776090085115
|
||||
assert f_V0[1] == 661.3188585582334
|
||||
assert f_V0[2] == 683.6776090081005
|
||||
assert f_V0[2] == 683.6776090081005
|
||||
|
|
Loading…
Add table
Reference in a new issue