From 934d0912498f9951d34b803b8cab223b91f0d1cd Mon Sep 17 00:00:00 2001 From: Fabian Joswig Date: Fri, 8 Apr 2022 11:14:58 +0100 Subject: [PATCH 01/68] feat: _intersection_idx and _collapse_deltas_for_merge together with tests added. --- pyerrors/obs.py | 55 +++++++++++++++++++++++++++++++++++++++++++++++ tests/obs_test.py | 20 +++++++++++++++++ 2 files changed, 75 insertions(+) diff --git a/pyerrors/obs.py b/pyerrors/obs.py index b4eb6469..f04b96c9 100644 --- a/pyerrors/obs.py +++ b/pyerrors/obs.py @@ -972,6 +972,33 @@ def _merge_idx(idl): return sorted(set().union(*idl)) +def _intersection_idx(idl): + """Returns the intersection of all lists in idl as sorted list + + Parameters + ---------- + idl : list + List of lists or ranges. + """ + + # Use groupby to efficiently check whether all elements of idl are identical + try: + g = groupby(idl) + if next(g, True) and not next(g, False): + return idl[0] + except Exception: + pass + + if np.all([type(idx) is range for idx in idl]): + if len(set([idx[0] for idx in idl])) == 1: + idstart = max([idx.start for idx in idl]) + idstop = min([idx.stop for idx in idl]) + idstep = max([idx.step for idx in idl]) + return range(idstart, idstop, idstep) + + return sorted(set().intersection(*idl)) + + def _expand_deltas_for_merge(deltas, idx, shape, new_idx): """Expand deltas defined on idx to the list of configs that is defined by new_idx. New, empty entries are filled by 0. If idx and new_idx are of type range, the smallest @@ -999,6 +1026,34 @@ def _expand_deltas_for_merge(deltas, idx, shape, new_idx): return np.array([ret[new_idx[i] - new_idx[0]] for i in range(len(new_idx))]) +def _collapse_deltas_for_merge(deltas, idx, shape, new_idx): + """Collapse deltas defined on idx to the list of configs that is defined by new_idx. + If idx and new_idx are of type range, the smallest + common divisor of the step sizes is used as new step size. + + Parameters + ---------- + deltas : list + List of fluctuations + idx : list + List or range of configs on which the deltas are defined. + Has to be a subset of new_idx and has to be sorted in ascending order. + shape : list + Number of configs in idx. + new_idx : list + List of configs that defines the new range, has to be sorted in ascending order. + """ + + if type(idx) is range and type(new_idx) is range: + if idx == new_idx: + return deltas + ret = np.zeros(new_idx[-1] - new_idx[0] + 1) + for i in range(shape): + if idx[i] in new_idx: + ret[idx[i] - new_idx[0]] = deltas[i] + return np.array([ret[new_idx[i] - new_idx[0]] for i in range(len(new_idx))]) + + def _filter_zeroes(deltas, idx, eps=Obs.filter_eps): """Filter out all configurations with vanishing fluctuation such that they do not contribute to the error estimate anymore. Returns the new deltas and diff --git a/tests/obs_test.py b/tests/obs_test.py index 381ecdcb..8ed497d6 100644 --- a/tests/obs_test.py +++ b/tests/obs_test.py @@ -515,6 +515,26 @@ def test_merge_idx(): assert pe.obs._merge_idx([range(500, 6050, 50), range(500, 6250, 250)]) == range(500, 6250, 50) +def test_intersection_idx(): + assert pe.obs._intersection_idx([range(1, 100), range(1, 100), range(1, 100)]) == range(1, 100) + assert pe.obs._intersection_idx([range(1, 100, 10), range(1, 100, 2)]) == range(1, 100, 10) + assert pe.obs._intersection_idx([range(10, 1010, 10), range(10, 1010, 50)]) == range(10, 1010, 50) + assert pe.obs._intersection_idx([range(500, 6050, 50), range(500, 6250, 250)]) == range(500, 6050, 250) + + +def test_intersection_collapse(): + range1 = range(1, 2000, 2) + range2 = range(2, 2001, 8) + + obs1 = pe.Obs([np.random.normal(1.0, 0.1, len(range1))], ["ens"], idl=[range1]) + obs_merge = obs1 + pe.Obs([np.random.normal(1.0, 0.1, len(range2))], ["ens"], idl=[range2]) + + intersection = pe.obs._intersection_idx([o.idl["ens"] for o in [obs1, obs_merge]]) + coll = pe.obs._collapse_deltas_for_merge(obs_merge.deltas["ens"], obs_merge.idl["ens"], len(obs_merge.idl["ens"]), range1) + + assert np.all(coll == obs1.deltas["ens"]) + + def test_irregular_error_propagation(): obs_list = [pe.Obs([np.random.rand(100)], ['t']), pe.Obs([np.random.rand(50)], ['t'], idl=[range(1, 100, 2)]), From eacc9b19a3b1f1f82e0f077698ce12031d27ba9a Mon Sep 17 00:00:00 2001 From: Fabian Joswig Date: Fri, 8 Apr 2022 11:44:01 +0100 Subject: [PATCH 02/68] feat: the correlation for two observables with different idls is now based on the intersection of two instead of the union. Tests added. --- pyerrors/obs.py | 19 ++++++++++--------- tests/obs_test.py | 26 ++++++++++++++++++++++++++ 2 files changed, 36 insertions(+), 9 deletions(-) diff --git a/pyerrors/obs.py b/pyerrors/obs.py index f04b96c9..7e1ed9e0 100644 --- a/pyerrors/obs.py +++ b/pyerrors/obs.py @@ -1472,8 +1472,8 @@ def _covariance_element(obs1, obs2): """Estimates the covariance of two Obs objects, neglecting autocorrelations.""" def calc_gamma(deltas1, deltas2, idx1, idx2, new_idx): - deltas1 = _expand_deltas_for_merge(deltas1, idx1, len(idx1), new_idx) - deltas2 = _expand_deltas_for_merge(deltas2, idx2, len(idx2), new_idx) + deltas1 = _collapse_deltas_for_merge(deltas1, idx1, len(idx1), new_idx) + deltas2 = _collapse_deltas_for_merge(deltas2, idx2, len(idx2), new_idx) return np.sum(deltas1 * deltas2) if set(obs1.names).isdisjoint(set(obs2.names)): @@ -1493,29 +1493,30 @@ def _covariance_element(obs1, obs2): for r_name in obs1.e_content[e_name]: if r_name not in obs2.e_content[e_name]: continue - idl_d[r_name] = _merge_idx([obs1.idl[r_name], obs2.idl[r_name]]) + idl_d[r_name] = _intersection_idx([obs1.idl[r_name], obs2.idl[r_name]]) gamma = 0.0 for r_name in obs1.e_content[e_name]: if r_name not in obs2.e_content[e_name]: continue + if len(idl_d[r_name]) == 0: + continue gamma += calc_gamma(obs1.deltas[r_name], obs2.deltas[r_name], obs1.idl[r_name], obs2.idl[r_name], idl_d[r_name]) if gamma == 0.0: continue gamma_div = 0.0 - e_N = 0 for r_name in obs1.e_content[e_name]: if r_name not in obs2.e_content[e_name]: continue - gamma_div += calc_gamma(np.ones(obs1.shape[r_name]), np.ones(obs2.shape[r_name]), obs1.idl[r_name], obs2.idl[r_name], idl_d[r_name]) - e_N += len(idl_d[r_name]) - gamma /= max(gamma_div, 1.0) + if len(idl_d[r_name]) == 0: + continue + gamma_div += np.sqrt(calc_gamma(obs1.deltas[r_name], obs1.deltas[r_name], obs1.idl[r_name], obs1.idl[r_name], idl_d[r_name]) * calc_gamma(obs2.deltas[r_name], obs2.deltas[r_name], obs2.idl[r_name], obs2.idl[r_name], idl_d[r_name])) + gamma /= gamma_div - # Bias correction hep-lat/0306017 eq. (49) - dvalue += (1 + 1 / e_N) * gamma / e_N + dvalue += gamma for e_name in obs1.cov_names: diff --git a/tests/obs_test.py b/tests/obs_test.py index 8ed497d6..c2d161e4 100644 --- a/tests/obs_test.py +++ b/tests/obs_test.py @@ -749,6 +749,32 @@ def test_covariance_idl(): pe.covariance([obs1, obs2]) +def test_correlation_intersection_of_idls(): + range1 = range(1, 2000, 2) + range2 = range(2, 2001, 2) + + obs1 = pe.Obs([np.random.normal(1.0, 0.1, len(range1))], ["ens"], idl=[range1]) + obs2_a = 0.4 * pe.Obs([np.random.normal(1.0, 0.1, len(range1))], ["ens"], idl=[range1]) + 0.6 * obs1 + obs1.gamma_method() + obs2_a.gamma_method() + + cov1 = pe.covariance([obs1, obs2_a]) + corr1 = pe.covariance([obs1, obs2_a], correlation=True) + + obs2_b = obs2_a + pe.Obs([np.random.normal(1.0, 0.1, len(range2))], ["ens"], idl=[range2]) + obs2_b.gamma_method() + + cov2 = pe.covariance([obs1, obs2_b]) + corr2 = pe.covariance([obs1, obs2_b], correlation=True) + + assert np.isclose(corr1[0, 1], corr2[0, 1], atol=1e-14) + assert cov1[0, 1] > cov2[0, 1] + + obs2_c = pe.Obs([np.random.normal(1.0, 0.1, len(range2))], ["ens"], idl=[range2]) + obs2_c.gamma_method() + assert np.isclose(0, pe.covariance([obs1, obs2_c])[0, 1], atol=1e-14) + + def test_empty_obs(): o = pe.Obs([np.random.rand(100)], ['test']) q = o + pe.Obs([], [], means=[]) From 99c98aeb9e3752529232c9db4bc873d33a684293 Mon Sep 17 00:00:00 2001 From: Fabian Joswig Date: Thu, 28 Apr 2022 11:25:17 +0100 Subject: [PATCH 03/68] fix: bug in obs._intersection_idx fixed. --- pyerrors/obs.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pyerrors/obs.py b/pyerrors/obs.py index 7e1ed9e0..3437019a 100644 --- a/pyerrors/obs.py +++ b/pyerrors/obs.py @@ -996,7 +996,7 @@ def _intersection_idx(idl): idstep = max([idx.step for idx in idl]) return range(idstart, idstop, idstep) - return sorted(set().intersection(*idl)) + return sorted(set.intersection(*[set(o) for o in tt])) def _expand_deltas_for_merge(deltas, idx, shape, new_idx): From c7f17396e57418ec44626c6030bd829d43c75ea8 Mon Sep 17 00:00:00 2001 From: Fabian Joswig Date: Thu, 28 Apr 2022 14:07:42 +0100 Subject: [PATCH 04/68] fix: Another bug in range detection of obs._intersection_idx fixed. --- pyerrors/obs.py | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/pyerrors/obs.py b/pyerrors/obs.py index 3437019a..28c5c6be 100644 --- a/pyerrors/obs.py +++ b/pyerrors/obs.py @@ -1,5 +1,7 @@ import warnings import pickle +from math import gcd +from functools import reduce import numpy as np import autograd.numpy as anp # Thinly-wrapped numpy from autograd import jacobian @@ -981,6 +983,12 @@ def _intersection_idx(idl): List of lists or ranges. """ + def _lcm(*args): + """Returns the lowest common multiple of args. + + From python 3.9 onwards the math library contains an lcm function.""" + return reduce(lambda a, b: a * b // gcd(a, b), args) + # Use groupby to efficiently check whether all elements of idl are identical try: g = groupby(idl) @@ -993,10 +1001,10 @@ def _intersection_idx(idl): if len(set([idx[0] for idx in idl])) == 1: idstart = max([idx.start for idx in idl]) idstop = min([idx.stop for idx in idl]) - idstep = max([idx.step for idx in idl]) + idstep = _lcm(*[idx.step for idx in idl]) return range(idstart, idstop, idstep) - return sorted(set.intersection(*[set(o) for o in tt])) + return sorted(set.intersection(*[set(o) for o in idl])) def _expand_deltas_for_merge(deltas, idx, shape, new_idx): From 217d310ca473bcc7ef050987b4afb78926be83cf Mon Sep 17 00:00:00 2001 From: Fabian Joswig Date: Thu, 28 Apr 2022 14:13:29 +0100 Subject: [PATCH 05/68] tests: test for _intersection_idx extended. --- tests/obs_test.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/tests/obs_test.py b/tests/obs_test.py index c2d161e4..8f32bd77 100644 --- a/tests/obs_test.py +++ b/tests/obs_test.py @@ -521,6 +521,9 @@ def test_intersection_idx(): assert pe.obs._intersection_idx([range(10, 1010, 10), range(10, 1010, 50)]) == range(10, 1010, 50) assert pe.obs._intersection_idx([range(500, 6050, 50), range(500, 6250, 250)]) == range(500, 6050, 250) + for ids in [[list(range(1, 80, 3)), list(range(1, 100, 2))], [range(1, 80, 3), range(1, 100, 2), range(1, 100, 7)]]: + assert list(pe.obs._intersection_idx(ids)) == pe.obs._intersection_idx([list(o) for o in ids]) + def test_intersection_collapse(): range1 = range(1, 2000, 2) From 477573593c8bd6e25522ac230e243f62e10370bf Mon Sep 17 00:00:00 2001 From: Fabian Joswig Date: Thu, 19 May 2022 09:59:40 +0100 Subject: [PATCH 06/68] build: long description added to setup.py, autograd dependency changed to pypi version. --- setup.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/setup.py b/setup.py index 5a971d92..c28faf2c 100644 --- a/setup.py +++ b/setup.py @@ -1,13 +1,18 @@ #!/usr/bin/env python from setuptools import setup, find_packages +from pathlib import Path +this_directory = Path(__file__).parent +long_description = (this_directory / "README.md").read_text() setup(name='pyerrors', version='2.0.0', description='Error analysis for lattice QCD', + long_description=long_description, + long_description_content_type='text/markdown', author='Fabian Joswig', author_email='fabian.joswig@ed.ac.uk', packages=find_packages(), python_requires='>=3.6.0', - install_requires=['numpy>=1.16', 'autograd @ git+https://github.com/HIPS/autograd.git', 'numdifftools', 'matplotlib>=3.3', 'scipy', 'iminuit>=2', 'h5py'] + install_requires=['numpy>=1.16', 'autograd>=1.4', 'numdifftools', 'matplotlib>=3.3', 'scipy', 'iminuit>=2', 'h5py'] ) From cfe6d9c83559b7d7b14738e7c87d375093e65a16 Mon Sep 17 00:00:00 2001 From: Fabian Joswig Date: Thu, 19 May 2022 10:36:22 +0100 Subject: [PATCH 07/68] build: url, license and classifiers added to setup.py --- setup.py | 15 ++++++++++++++- 1 file changed, 14 insertions(+), 1 deletion(-) diff --git a/setup.py b/setup.py index bc4533ef..ef3fb3f8 100644 --- a/setup.py +++ b/setup.py @@ -8,9 +8,22 @@ setup(name='pyerrors', description='Error analysis for lattice QCD', long_description=long_description, long_description_content_type='text/markdown', + url="https://github.com/fjosw/pyerrors", author='Fabian Joswig', author_email='fabian.joswig@ed.ac.uk', + license="MIT", packages=find_packages(), python_requires='>=3.6.0', - install_requires=['numpy>=1.16', 'autograd>=1.4', 'numdifftools', 'matplotlib>=3.3', 'scipy', 'iminuit>=2', 'h5py', 'lxml', 'python-rapidjson'] + install_requires=['numpy>=1.16', 'autograd>=1.4', 'numdifftools', 'matplotlib>=3.3', 'scipy', 'iminuit>=2', 'h5py', 'lxml', 'python-rapidjson'], + classifiers=[ + 'Development Status :: 5 - Production/Stable', + 'Intended Audience :: Science/Research', + 'License :: OSI Approved :: MIT License', + 'Programming Language :: Python :: 3', + 'Programming Language :: Python :: 3.7', + 'Programming Language :: Python :: 3.8', + 'Programming Language :: Python :: 3.9', + 'Programming Language :: Python :: 3.10', + 'Topic :: Scientific/Engineering' + ], ) From 16d288e3b9a38b5d36f5a2af8e1769d74c3065ad Mon Sep 17 00:00:00 2001 From: Fabian Joswig Date: Thu, 19 May 2022 10:43:11 +0100 Subject: [PATCH 08/68] docs: README updated. --- README.md | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/README.md b/README.md index 67d59be0..e1f124ca 100644 --- a/README.md +++ b/README.md @@ -8,11 +8,12 @@ - **Bug reports:** https://github.com/fjosw/pyerrors/issues ## Installation -To install the current `develop` version run +To install the most recent release run +```bash +pip install pyerrors # Fresh install +pip install -U pyerrors # Upgrade +``` +to install the current `develop` version run ```bash pip install git+https://github.com/fjosw/pyerrors.git@develop ``` -to install the most recent release run -```bash -pip install git+https://github.com/fjosw/pyerrors.git@master -``` From 60f9bb6a89281ce7a7484db1c66819972191ef02 Mon Sep 17 00:00:00 2001 From: Fabian Joswig Date: Thu, 28 Apr 2022 15:30:18 +0100 Subject: [PATCH 09/68] tests: Additional tests for covariance with different idls added. --- tests/obs_test.py | 54 +++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 54 insertions(+) diff --git a/tests/obs_test.py b/tests/obs_test.py index 8f32bd77..6dd460c9 100644 --- a/tests/obs_test.py +++ b/tests/obs_test.py @@ -778,6 +778,60 @@ def test_correlation_intersection_of_idls(): assert np.isclose(0, pe.covariance([obs1, obs2_c])[0, 1], atol=1e-14) +def test_covariance_non_identical_objects(): + obs1 = pe.Obs([np.random.normal(1.0, 0.1, 1000), np.random.normal(1.0, 0.1, 1000), np.random.normal(1.0, 0.1, 732)], ["ens|r1", "ens|r2", "ens2"]) + obs1.gamma_method() + obs2 = obs1 + 1e-18 + obs2.gamma_method() + assert obs1 == obs2 + assert obs1 is not obs2 + assert np.allclose(np.ones((2, 2)), pe.covariance([obs1, obs2], correlation=True), atol=1e-14) + + +def test_covariance_additional_non_overlapping_data(): + range1 = range(1, 20, 2) + + data2 = np.random.normal(0.0, 0.1, len(range1)) + + obs1 = pe.Obs([np.random.normal(1.0, 0.1, len(range1))], ["ens"], idl=[range1]) + obs2_a = pe.Obs([data2], ["ens"], idl=[range1]) + obs1.gamma_method() + obs2_a.gamma_method() + + corr1 = pe.covariance([obs1, obs2_a], correlation=True) + + added_data = np.random.normal(0.0, 0.1, len(range1)) + added_data -= np.mean(added_data) - np.mean(data2) + data2_extended = np.ravel([data2, added_data], 'F') + + obs2_b = pe.Obs([data2_extended], ["ens"]) + obs2_b.gamma_method() + + corr2 = pe.covariance([obs1, obs2_b], correlation=True) + + assert np.isclose(corr1[0, 1], corr2[0, 1], atol=1e-14) + + +def test_coavariance_reorder_non_overlapping_data(): + range1 = range(1, 20, 2) + range2 = range(1, 41, 2) + + obs1 = pe.Obs([np.random.normal(1.0, 0.1, len(range1))], ["ens"], idl=[range1]) + obs2_b = pe.Obs([np.random.normal(1.0, 0.1, len(range2))], ["ens"], idl=[range2]) + obs1.gamma_method() + obs2_b.gamma_method() + + corr1 = pe.covariance([obs1, obs2_b], correlation=True) + + deltas = list(obs2_b.deltas['ens'][:len(range1)]) + sorted(obs2_b.deltas['ens'][len(range1):]) + obs2_a = pe.Obs([obs2_b.value + np.array(deltas)], ["ens"], idl=[range2]) + obs2_a.gamma_method() + + corr2 = pe.covariance([obs1, obs2_a], correlation=True) + + assert np.isclose(corr1[0, 1], corr2[0, 1], atol=1e-14) + + def test_empty_obs(): o = pe.Obs([np.random.rand(100)], ['test']) q = o + pe.Obs([], [], means=[]) From 9011adb0de58f5fbc132a9c5f926e5177f70c71c Mon Sep 17 00:00:00 2001 From: Fabian Joswig Date: Thu, 19 May 2022 13:53:33 +0100 Subject: [PATCH 10/68] tests: additional test addedwhich checks that merge and intersction of idls agree for identical idls. --- tests/obs_test.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/tests/obs_test.py b/tests/obs_test.py index 6dd460c9..82b5ffec 100644 --- a/tests/obs_test.py +++ b/tests/obs_test.py @@ -524,6 +524,12 @@ def test_intersection_idx(): for ids in [[list(range(1, 80, 3)), list(range(1, 100, 2))], [range(1, 80, 3), range(1, 100, 2), range(1, 100, 7)]]: assert list(pe.obs._intersection_idx(ids)) == pe.obs._intersection_idx([list(o) for o in ids]) +def test_merge_intersection(): + for idl_list in [[range(1, 100), range(1, 100), range(1, 100)], + [range(4, 80, 6), range(4, 80, 6)], + [[0, 2, 8, 19, 205], [0, 2, 8, 19, 205]]]: + assert pe.obs._merge_idx(idl_list) == pe.obs._intersection_idx(idl_list) + def test_intersection_collapse(): range1 = range(1, 2000, 2) From bb0236a556109b43d6873cd0c988d268c6e9050b Mon Sep 17 00:00:00 2001 From: Fabian Joswig Date: Thu, 19 May 2022 14:09:05 +0100 Subject: [PATCH 11/68] tests: test covariance vs numpy added. --- tests/obs_test.py | 20 ++++++++++++++++++++ 1 file changed, 20 insertions(+) diff --git a/tests/obs_test.py b/tests/obs_test.py index 82b5ffec..55a23abd 100644 --- a/tests/obs_test.py +++ b/tests/obs_test.py @@ -648,6 +648,26 @@ def test_covariance_is_variance(): assert np.isclose(test_obs.dvalue ** 2, pe.covariance([test_obs, test_obs])[0, 1]) +def test_covariance_vs_numpy(): + N = 1078 + data1 = np.random.normal(2.5, 0.2, N) + data2 = np.random.normal(0.5, 0.08, N) + data3 = np.random.normal(-178, 5, N) + uncorr = np.row_stack([data1, data2, data3]) + corr = np.random.multivariate_normal([0.0, 17, -0.0487], [[1.0, 0.6, -0.22], [0.6, 0.8, 0.01], [-0.22, 0.01, 1.9]], N).T + + for X in [uncorr, corr]: + obs1 = pe.Obs([X[0]], ["ens1"]) + obs2 = pe.Obs([X[1]], ["ens1"]) + obs3 = pe.Obs([X[2]], ["ens1"]) + obs1.gamma_method(S=0.0) + obs2.gamma_method(S=0.0) + obs3.gamma_method(S=0.0) + pe_cov = pe.covariance([obs1, obs2, obs3]) + np_cov = np.cov(X) / N + assert np.allclose(pe_cov, np_cov, atol=1e-14) + + def test_covariance_symmetry(): value1 = np.random.normal(5, 10) dvalue1 = np.abs(np.random.normal(0, 1)) From 5f7adf839f32626ac6c8a9ff99c7c6a894baaffa Mon Sep 17 00:00:00 2001 From: Fabian Joswig Date: Thu, 19 May 2022 14:39:57 +0100 Subject: [PATCH 12/68] build: json.gz format version bumped to 1.1 --- pyerrors/input/json.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pyerrors/input/json.py b/pyerrors/input/json.py index ac7963cc..31639287 100644 --- a/pyerrors/input/json.py +++ b/pyerrors/input/json.py @@ -165,7 +165,7 @@ def create_json_string(ol, description='', indent=1): d = {} d['program'] = 'pyerrors %s' % (pyerrorsversion.__version__) - d['version'] = '1.0' + d['version'] = '1.1' d['who'] = getpass.getuser() d['date'] = datetime.datetime.now().astimezone().strftime('%Y-%m-%d %H:%M:%S %z') d['host'] = socket.gethostname() + ', ' + platform.platform() From d531b9823442b33a3b2c0d5cc02147d384984930 Mon Sep 17 00:00:00 2001 From: Fabian Joswig Date: Thu, 19 May 2022 15:50:47 +0100 Subject: [PATCH 13/68] fix: fixed value assignment for json format reconstruction when the average of the r_values is not equal to value --- pyerrors/input/json.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/pyerrors/input/json.py b/pyerrors/input/json.py index 31639287..69fa2c2d 100644 --- a/pyerrors/input/json.py +++ b/pyerrors/input/json.py @@ -294,6 +294,7 @@ def _parse_json_dict(json_dict, verbose=True, full_output=False): if od: ret = Obs([[ddi[0] + values[0] for ddi in di] for di in od['deltas']], od['names'], idl=od['idl']) + ret._value = values[0] ret.is_merged = od['is_merged'] else: ret = Obs([], [], means=[]) @@ -319,6 +320,7 @@ def _parse_json_dict(json_dict, verbose=True, full_output=False): for i in range(layout): if od: ret.append(Obs([list(di[:, i] + values[i]) for di in od['deltas']], od['names'], idl=od['idl'])) + ret[-1]._value = values[i] ret[-1].is_merged = od['is_merged'] else: ret.append(Obs([], [], means=[])) @@ -346,6 +348,7 @@ def _parse_json_dict(json_dict, verbose=True, full_output=False): for i in range(N): if od: ret.append(Obs([di[:, i] + values[i] for di in od['deltas']], od['names'], idl=od['idl'])) + ret[-1]._value = values[i] ret[-1].is_merged = od['is_merged'] else: ret.append(Obs([], [], means=[])) From e9184d836891b5e295fce1973725d9517d04f1d2 Mon Sep 17 00:00:00 2001 From: Fabian Joswig Date: Thu, 19 May 2022 15:51:17 +0100 Subject: [PATCH 14/68] tests: additional tests for json format added. --- tests/json_io_test.py | 52 +++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 52 insertions(+) diff --git a/tests/json_io_test.py b/tests/json_io_test.py index d13f2675..6fac73b8 100644 --- a/tests/json_io_test.py +++ b/tests/json_io_test.py @@ -353,3 +353,55 @@ def test_dobsio(): for j in range(len(or1)): o = or1[j] - or2[j] assert(o.is_zero()) + + +def test_reconstruct_non_linear_r_obs(tmp_path): + to = pe.Obs([np.random.rand(500), np.random.rand(500), np.random.rand(111)], + ["e|r1", "e|r2", "my_new_ensemble_54^£$|8'[@124435%6^7&()~#"], + idl=[range(1, 501), range(0, 500), range(1, 999, 9)]) + to = np.log(to ** 2) / to + to.dump((tmp_path / "test_equality").as_posix()) + ro = pe.input.json.load_json((tmp_path / "test_equality").as_posix()) + assert assert_equal_Obs(to, ro) + + +def test_reconstruct_non_linear_r_obs_list(tmp_path): + to = pe.Obs([np.random.rand(500), np.random.rand(500), np.random.rand(111)], + ["e|r1", "e|r2", "my_new_ensemble_54^£$|8'[@124435%6^7&()~#"], + idl=[range(1, 501), range(0, 500), range(1, 999, 9)]) + to = np.log(to ** 2) / to + for to_list in [[to, to, to], np.array([to, to, to])]: + pe.input.json.dump_to_json(to_list, (tmp_path / "test_equality_list").as_posix()) + ro_list = pe.input.json.load_json((tmp_path / "test_equality_list").as_posix()) + for oa, ob in zip(to_list, ro_list): + assert assert_equal_Obs(oa, ob) + + +def assert_equal_Obs(to, ro): + for kw in ["N", "cov_names", "covobs", "ddvalue", "dvalue", "e_content", + "e_names", "idl", "mc_names", "names", + "reweighted", "shape", "tag"]: + if not getattr(to, kw) == getattr(ro, kw): + print(kw, "does not match.") + return False + + for kw in ["value"]: + if not np.isclose(getattr(to, kw), getattr(ro, kw), atol=1e-14): + print(kw, "does not match.") + return False + + + for kw in ["r_values", "deltas"]: + for (k, v), (k2, v2) in zip(getattr(to, kw).items(), getattr(ro, kw).items()): + assert k == k2 + if not np.allclose(v, v2, atol=1e-14): + print(kw, "does not match.") + return False + + m_to = getattr(to, "is_merged") + m_ro = getattr(ro, "is_merged") + if not m_to == m_ro: + if not (all(value is False for value in m_ro.values()) and all(value is False for value in m_to.values())): + print("is_merged", "does not match.") + return False + return True From 1054f749e9769f02c9f66c3b5dbf00fb9134c178 Mon Sep 17 00:00:00 2001 From: Fabian Joswig Date: Fri, 20 May 2022 11:00:52 +0100 Subject: [PATCH 15/68] docs: check for latex availability added to examples. --- examples/01_basic_example.ipynb | 4 +++- examples/02_correlators.ipynb | 8 +++++--- examples/03_pcac_example.ipynb | 4 +++- examples/04_fit_example.ipynb | 10 ++++++---- examples/05_matrix_operations.ipynb | 4 ++-- examples/06_gevp.ipynb | 7 +++++-- 6 files changed, 24 insertions(+), 13 deletions(-) diff --git a/examples/01_basic_example.ipynb b/examples/01_basic_example.ipynb index 834f9d39..df1d76f2 100644 --- a/examples/01_basic_example.ipynb +++ b/examples/01_basic_example.ipynb @@ -21,6 +21,7 @@ "outputs": [], "source": [ "import numpy as np\n", + "import matplotlib\n", "import matplotlib.pyplot as plt\n", "import pyerrors as pe" ] @@ -32,7 +33,8 @@ "outputs": [], "source": [ "plt.style.use('./base_style.mplstyle')\n", - "plt.rc('text', usetex=True)" + "usetex = matplotlib.checkdep_usetex(True)\n", + "plt.rc('text', usetex=usetex)" ] }, { diff --git a/examples/02_correlators.ipynb b/examples/02_correlators.ipynb index a647162e..e2d2a09a 100644 --- a/examples/02_correlators.ipynb +++ b/examples/02_correlators.ipynb @@ -8,6 +8,7 @@ "outputs": [], "source": [ "import numpy as np\n", + "import matplotlib\n", "import matplotlib.pyplot as plt\n", "import pyerrors as pe" ] @@ -20,7 +21,8 @@ "outputs": [], "source": [ "plt.style.use('./base_style.mplstyle')\n", - "plt.rc('text', usetex=True)" + "usetex = matplotlib.checkdep_usetex(True)\n", + "plt.rc('text', usetex=usetex)" ] }, { @@ -480,7 +482,7 @@ ], "metadata": { "kernelspec": { - "display_name": "Python 3", + "display_name": "Python 3 (ipykernel)", "language": "python", "name": "python3" }, @@ -494,7 +496,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.8.8" + "version": "3.8.10" } }, "nbformat": 4, diff --git a/examples/03_pcac_example.ipynb b/examples/03_pcac_example.ipynb index bf0f0b02..835062a8 100644 --- a/examples/03_pcac_example.ipynb +++ b/examples/03_pcac_example.ipynb @@ -7,6 +7,7 @@ "outputs": [], "source": [ "import numpy as np\n", + "import matplotlib\n", "import matplotlib.pyplot as plt\n", "import pyerrors as pe" ] @@ -18,7 +19,8 @@ "outputs": [], "source": [ "plt.style.use('./base_style.mplstyle')\n", - "plt.rc('text', usetex=True)" + "usetex = matplotlib.checkdep_usetex(True)\n", + "plt.rc('text', usetex=usetex)" ] }, { diff --git a/examples/04_fit_example.ipynb b/examples/04_fit_example.ipynb index 026cca2d..72d62e9e 100644 --- a/examples/04_fit_example.ipynb +++ b/examples/04_fit_example.ipynb @@ -6,9 +6,10 @@ "metadata": {}, "outputs": [], "source": [ - "import pyerrors as pe\n", "import numpy as np\n", - "import matplotlib.pyplot as plt" + "import matplotlib\n", + "import matplotlib.pyplot as plt\n", + "import pyerrors as pe" ] }, { @@ -18,7 +19,8 @@ "outputs": [], "source": [ "plt.style.use('./base_style.mplstyle')\n", - "plt.rc('text', usetex=True)" + "usetex = matplotlib.checkdep_usetex(True)\n", + "plt.rc('text', usetex=usetex)" ] }, { @@ -439,7 +441,7 @@ "hash": "916dbcbb3f70747c44a77c7bcd40155683ae19c65e1c03b4aa3499c5328201f1" }, "kernelspec": { - "display_name": "Python 3.8.10 64-bit", + "display_name": "Python 3 (ipykernel)", "language": "python", "name": "python3" }, diff --git a/examples/05_matrix_operations.ipynb b/examples/05_matrix_operations.ipynb index a49a7ee0..926d1734 100644 --- a/examples/05_matrix_operations.ipynb +++ b/examples/05_matrix_operations.ipynb @@ -393,7 +393,7 @@ ], "metadata": { "kernelspec": { - "display_name": "Python 3", + "display_name": "Python 3 (ipykernel)", "language": "python", "name": "python3" }, @@ -407,7 +407,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.8.8" + "version": "3.8.10" } }, "nbformat": 4, diff --git a/examples/06_gevp.ipynb b/examples/06_gevp.ipynb index e969008c..268346f6 100644 --- a/examples/06_gevp.ipynb +++ b/examples/06_gevp.ipynb @@ -7,6 +7,7 @@ "metadata": {}, "outputs": [], "source": [ + "import matplotlib\n", "import matplotlib.pyplot as plt\n", "import pyerrors as pe" ] @@ -18,7 +19,9 @@ "metadata": {}, "outputs": [], "source": [ - "plt.style.use('./base_style.mplstyle'); plt.rc('text', usetex=True)" + "plt.style.use('./base_style.mplstyle')\n", + "usetex = matplotlib.checkdep_usetex(True)\n", + "plt.rc('text', usetex=usetex)" ] }, { @@ -305,7 +308,7 @@ ], "metadata": { "kernelspec": { - "display_name": "Python 3", + "display_name": "Python 3 (ipykernel)", "language": "python", "name": "python3" }, From 5eae3d6683e7b9eee0d7ad7b78523f1468f662a3 Mon Sep 17 00:00:00 2001 From: Fabian Joswig Date: Fri, 20 May 2022 11:07:03 +0100 Subject: [PATCH 16/68] docs: start bender badge added. --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index e1f124ca..d9f6a26b 100644 --- a/README.md +++ b/README.md @@ -1,4 +1,4 @@ -[![flake8](https://github.com/fjosw/pyerrors/actions/workflows/flake8.yml/badge.svg)](https://github.com/fjosw/pyerrors/actions/workflows/flake8.yml) [![pytest](https://github.com/fjosw/pyerrors/actions/workflows/pytest.yml/badge.svg)](https://github.com/fjosw/pyerrors/actions/workflows/pytest.yml) [![examples](https://github.com/fjosw/pyerrors/actions/workflows/examples.yml/badge.svg)](https://github.com/fjosw/pyerrors/actions/workflows/examples.yml) [![docs](https://github.com/fjosw/pyerrors/actions/workflows/docs.yml/badge.svg)](https://github.com/fjosw/pyerrors/actions/workflows/docs.yml) [![](https://img.shields.io/badge/python-3.7+-blue.svg)](https://www.python.org/downloads/) [![License: MIT](https://img.shields.io/badge/License-MIT-yellow.svg)](https://opensource.org/licenses/MIT) +[![flake8](https://github.com/fjosw/pyerrors/actions/workflows/flake8.yml/badge.svg)](https://github.com/fjosw/pyerrors/actions/workflows/flake8.yml) [![pytest](https://github.com/fjosw/pyerrors/actions/workflows/pytest.yml/badge.svg)](https://github.com/fjosw/pyerrors/actions/workflows/pytest.yml) [![examples](https://github.com/fjosw/pyerrors/actions/workflows/examples.yml/badge.svg)](https://github.com/fjosw/pyerrors/actions/workflows/examples.yml) [![docs](https://github.com/fjosw/pyerrors/actions/workflows/docs.yml/badge.svg)](https://github.com/fjosw/pyerrors/actions/workflows/docs.yml) [![](https://img.shields.io/badge/python-3.7+-blue.svg)](https://www.python.org/downloads/) [![License: MIT](https://img.shields.io/badge/License-MIT-yellow.svg)](https://opensource.org/licenses/MIT) [![Binder](https://mybinder.org/badge_logo.svg)](https://mybinder.org/v2/gh/fjosw/pyerrors/HEAD?labpath=examples) # pyerrors `pyerrors` is a python package for error computation and propagation of Markov chain Monte Carlo data. From e636e0268813f649f06df9f19eb4174987020332 Mon Sep 17 00:00:00 2001 From: Fabian Joswig Date: Sun, 22 May 2022 11:19:13 +0100 Subject: [PATCH 17/68] docs: example and docs badges removed from README --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index d9f6a26b..225d8128 100644 --- a/README.md +++ b/README.md @@ -1,4 +1,4 @@ -[![flake8](https://github.com/fjosw/pyerrors/actions/workflows/flake8.yml/badge.svg)](https://github.com/fjosw/pyerrors/actions/workflows/flake8.yml) [![pytest](https://github.com/fjosw/pyerrors/actions/workflows/pytest.yml/badge.svg)](https://github.com/fjosw/pyerrors/actions/workflows/pytest.yml) [![examples](https://github.com/fjosw/pyerrors/actions/workflows/examples.yml/badge.svg)](https://github.com/fjosw/pyerrors/actions/workflows/examples.yml) [![docs](https://github.com/fjosw/pyerrors/actions/workflows/docs.yml/badge.svg)](https://github.com/fjosw/pyerrors/actions/workflows/docs.yml) [![](https://img.shields.io/badge/python-3.7+-blue.svg)](https://www.python.org/downloads/) [![License: MIT](https://img.shields.io/badge/License-MIT-yellow.svg)](https://opensource.org/licenses/MIT) [![Binder](https://mybinder.org/badge_logo.svg)](https://mybinder.org/v2/gh/fjosw/pyerrors/HEAD?labpath=examples) +[![flake8](https://github.com/fjosw/pyerrors/actions/workflows/flake8.yml/badge.svg)](https://github.com/fjosw/pyerrors/actions/workflows/flake8.yml) [![pytest](https://github.com/fjosw/pyerrors/actions/workflows/pytest.yml/badge.svg)](https://github.com/fjosw/pyerrors/actions/workflows/pytest.yml) [![](https://img.shields.io/badge/python-3.7+-blue.svg)](https://www.python.org/downloads/) [![License: MIT](https://img.shields.io/badge/License-MIT-yellow.svg)](https://opensource.org/licenses/MIT) [![Binder](https://mybinder.org/badge_logo.svg)](https://mybinder.org/v2/gh/fjosw/pyerrors/HEAD?labpath=examples) # pyerrors `pyerrors` is a python package for error computation and propagation of Markov chain Monte Carlo data. From 9666011781f1ffd7ba9f040279ddbb3af5b4f5e6 Mon Sep 17 00:00:00 2001 From: Fabian Joswig Date: Sun, 22 May 2022 11:38:16 +0100 Subject: [PATCH 18/68] docs: reference to examples and binder link added to docs. --- pyerrors/__init__.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/pyerrors/__init__.py b/pyerrors/__init__.py index 6ec89a04..ef257c86 100644 --- a/pyerrors/__init__.py +++ b/pyerrors/__init__.py @@ -8,6 +8,8 @@ It is based on the gamma method [arXiv:hep-lat/0306017](https://arxiv.org/abs/he - non-linear fits with x- and y-errors and exact linear error propagation based on automatic differentiation as introduced in [arXiv:1809.01289](https://arxiv.org/abs/1809.01289). - real and complex matrix operations and their error propagation based on automatic differentiation (Matrix inverse, Cholesky decomposition, calculation of eigenvalues and eigenvectors, singular value decomposition...). +More detailed examples can found in the [GitHub repository](https://github.com/fjosw/pyerrors/tree/develop/examples) [![badge](https://img.shields.io/badge/-try%20it%20out-579ACA.svg?logo=data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAFkAAABZCAMAAABi1XidAAAB8lBMVEX///9XmsrmZYH1olJXmsr1olJXmsrmZYH1olJXmsr1olJXmsrmZYH1olL1olJXmsr1olJXmsrmZYH1olL1olJXmsrmZYH1olJXmsr1olL1olJXmsrmZYH1olL1olJXmsrmZYH1olL1olL0nFf1olJXmsrmZYH1olJXmsq8dZb1olJXmsrmZYH1olJXmspXmspXmsr1olL1olJXmsrmZYH1olJXmsr1olL1olJXmsrmZYH1olL1olLeaIVXmsrmZYH1olL1olL1olJXmsrmZYH1olLna31Xmsr1olJXmsr1olJXmsrmZYH1olLqoVr1olJXmsr1olJXmsrmZYH1olL1olKkfaPobXvviGabgadXmsqThKuofKHmZ4Dobnr1olJXmsr1olJXmspXmsr1olJXmsrfZ4TuhWn1olL1olJXmsqBi7X1olJXmspZmslbmMhbmsdemsVfl8ZgmsNim8Jpk8F0m7R4m7F5nLB6jbh7jbiDirOEibOGnKaMhq+PnaCVg6qWg6qegKaff6WhnpKofKGtnomxeZy3noG6dZi+n3vCcpPDcpPGn3bLb4/Mb47UbIrVa4rYoGjdaIbeaIXhoWHmZYHobXvpcHjqdHXreHLroVrsfG/uhGnuh2bwj2Hxk17yl1vzmljzm1j0nlX1olL3AJXWAAAAbXRSTlMAEBAQHx8gICAuLjAwMDw9PUBAQEpQUFBXV1hgYGBkcHBwcXl8gICAgoiIkJCQlJicnJ2goKCmqK+wsLC4usDAwMjP0NDQ1NbW3Nzg4ODi5+3v8PDw8/T09PX29vb39/f5+fr7+/z8/Pz9/v7+zczCxgAABC5JREFUeAHN1ul3k0UUBvCb1CTVpmpaitAGSLSpSuKCLWpbTKNJFGlcSMAFF63iUmRccNG6gLbuxkXU66JAUef/9LSpmXnyLr3T5AO/rzl5zj137p136BISy44fKJXuGN/d19PUfYeO67Znqtf2KH33Id1psXoFdW30sPZ1sMvs2D060AHqws4FHeJojLZqnw53cmfvg+XR8mC0OEjuxrXEkX5ydeVJLVIlV0e10PXk5k7dYeHu7Cj1j+49uKg7uLU61tGLw1lq27ugQYlclHC4bgv7VQ+TAyj5Zc/UjsPvs1sd5cWryWObtvWT2EPa4rtnWW3JkpjggEpbOsPr7F7EyNewtpBIslA7p43HCsnwooXTEc3UmPmCNn5lrqTJxy6nRmcavGZVt/3Da2pD5NHvsOHJCrdc1G2r3DITpU7yic7w/7Rxnjc0kt5GC4djiv2Sz3Fb2iEZg41/ddsFDoyuYrIkmFehz0HR2thPgQqMyQYb2OtB0WxsZ3BeG3+wpRb1vzl2UYBog8FfGhttFKjtAclnZYrRo9ryG9uG/FZQU4AEg8ZE9LjGMzTmqKXPLnlWVnIlQQTvxJf8ip7VgjZjyVPrjw1te5otM7RmP7xm+sK2Gv9I8Gi++BRbEkR9EBw8zRUcKxwp73xkaLiqQb+kGduJTNHG72zcW9LoJgqQxpP3/Tj//c3yB0tqzaml05/+orHLksVO+95kX7/7qgJvnjlrfr2Ggsyx0eoy9uPzN5SPd86aXggOsEKW2Prz7du3VID3/tzs/sSRs2w7ovVHKtjrX2pd7ZMlTxAYfBAL9jiDwfLkq55Tm7ifhMlTGPyCAs7RFRhn47JnlcB9RM5T97ASuZXIcVNuUDIndpDbdsfrqsOppeXl5Y+XVKdjFCTh+zGaVuj0d9zy05PPK3QzBamxdwtTCrzyg/2Rvf2EstUjordGwa/kx9mSJLr8mLLtCW8HHGJc2R5hS219IiF6PnTusOqcMl57gm0Z8kanKMAQg0qSyuZfn7zItsbGyO9QlnxY0eCuD1XL2ys/MsrQhltE7Ug0uFOzufJFE2PxBo/YAx8XPPdDwWN0MrDRYIZF0mSMKCNHgaIVFoBbNoLJ7tEQDKxGF0kcLQimojCZopv0OkNOyWCCg9XMVAi7ARJzQdM2QUh0gmBozjc3Skg6dSBRqDGYSUOu66Zg+I2fNZs/M3/f/Grl/XnyF1Gw3VKCez0PN5IUfFLqvgUN4C0qNqYs5YhPL+aVZYDE4IpUk57oSFnJm4FyCqqOE0jhY2SMyLFoo56zyo6becOS5UVDdj7Vih0zp+tcMhwRpBeLyqtIjlJKAIZSbI8SGSF3k0pA3mR5tHuwPFoa7N7reoq2bqCsAk1HqCu5uvI1n6JuRXI+S1Mco54YmYTwcn6Aeic+kssXi8XpXC4V3t7/ADuTNKaQJdScAAAAAElFTkSuQmCC)](https://mybinder.org/v2/gh/fjosw/pyerrors/HEAD?labpath=examples). + There exist similar publicly available implementations of gamma method error analysis suites in [Fortran](https://gitlab.ift.uam-csic.es/alberto/aderrors), [Julia](https://gitlab.ift.uam-csic.es/alberto/aderrors.jl) and [Python](https://github.com/mbruno46/pyobs). ## Basic example @@ -443,8 +445,10 @@ Julia I/O routines for the json.gz format, compatible with [ADerrors.jl](https:/ # Citing If you use `pyerrors` for research that leads to a publication please consider citing: - Ulli Wolff, *Monte Carlo errors with less errors*. Comput.Phys.Commun. 156 (2004) 143-153, Comput.Phys.Commun. 176 (2007) 383 (erratum). -- Stefan Schaefer, Rainer Sommer, Francesco Virotta, *Critical slowing down and error analysis in lattice QCD simulations*. Nucl.Phys.B 845 (2011) 93-119. - Alberto Ramos, *Automatic differentiation for error analysis of Monte Carlo data*. Comput.Phys.Commun. 238 (2019) 19-35. +and +- Stefan Schaefer, Rainer Sommer, Francesco Virotta, *Critical slowing down and error analysis in lattice QCD simulations*. Nucl.Phys.B 845 (2011) 93-119. +where applicable. ''' from .obs import * from .correlators import * From f62581ba698b3feace96ead855ecafbdbbc65790 Mon Sep 17 00:00:00 2001 From: Fabian Joswig Date: Sun, 22 May 2022 12:02:33 +0100 Subject: [PATCH 19/68] ci: workflow for caching of binder build added. --- .github/workflows/binder.yml | 15 +++++++++++++++ .github/workflows/pytest.yml | 6 +++--- 2 files changed, 18 insertions(+), 3 deletions(-) create mode 100644 .github/workflows/binder.yml diff --git a/.github/workflows/binder.yml b/.github/workflows/binder.yml new file mode 100644 index 00000000..460f0fef --- /dev/null +++ b/.github/workflows/binder.yml @@ -0,0 +1,15 @@ +name: binder +on: + push: + branches: + - develop + +jobs: + Create-MyBinderOrg-Cache: + runs-on: ubuntu-latest + steps: + - name: cache binder build on mybinder.org + uses: jupyterhub/repo2docker-action@master + with: + NO_PUSH: true + MYBINDERORG_TAG: ${{ github.event.ref }} # This builds the container on mybinder.org with the branch that was pushed on. diff --git a/.github/workflows/pytest.yml b/.github/workflows/pytest.yml index 4c074906..9db158d3 100644 --- a/.github/workflows/pytest.yml +++ b/.github/workflows/pytest.yml @@ -13,15 +13,15 @@ jobs: pytest: runs-on: ${{ matrix.os }} strategy: - fail-fast: true + fail-fast: true matrix: os: [ubuntu-latest] python-version: ["3.7", "3.8", "3.9", "3.10"] include: - os: macos-latest - python-version: 3.9 + python-version: 3.9 - os: windows-latest - python-version: 3.9 + python-version: 3.9 steps: - name: Checkout source From 9ad0146d961be40b8c08f0f66266705c6e32ff97 Mon Sep 17 00:00:00 2001 From: Fabian Joswig Date: Wed, 25 May 2022 14:32:49 +0100 Subject: [PATCH 20/68] fix: pinv replaced by inv for inversion of hessian in fit module, exceptions and warnings added. --- pyerrors/fits.py | 20 ++++++++++++++++++-- 1 file changed, 18 insertions(+), 2 deletions(-) diff --git a/pyerrors/fits.py b/pyerrors/fits.py index 420d3da0..4faf237d 100644 --- a/pyerrors/fits.py +++ b/pyerrors/fits.py @@ -263,10 +263,18 @@ def total_least_squares(x, y, func, silent=False, **kwargs): output.chisquare_by_expected_chisquare) fitp = out.beta + hess = jacobian(jacobian(odr_chisquare))(np.concatenate((fitp, out.xplus.ravel()))) + condn = np.linalg.cond(hess) + if condn > 1e8: + warnings.warn("Hessian matrix might be ill-conditioned ({0:1.2e}), error propagation might be unreliable.".format(condn), RuntimeWarning) try: - hess_inv = np.linalg.pinv(jacobian(jacobian(odr_chisquare))(np.concatenate((fitp, out.xplus.ravel())))) + hess_inv = np.linalg.inv(hess) except TypeError: raise Exception("It is required to use autograd.numpy instead of numpy within fit functions, see the documentation for details.") from None + except np.linalg.LinAlgError: + raise Exception("Cannot invert hessian matrix.") + except Exception: + raise Exception("Unkown error in connection with Hessian inverse.") def odr_chisquare_compact_x(d): model = func(d[:n_parms], d[n_parms:n_parms + m].reshape(x_shape)) @@ -541,10 +549,18 @@ def _standard_fit(x, y, func, silent=False, **kwargs): output.chisquare_by_expected_chisquare) fitp = fit_result.x + hess = jacobian(jacobian(chisqfunc))(fitp) + condn = np.linalg.cond(hess) + if condn > 1e8: + warnings.warn("Hessian matrix might be ill-conditioned ({0:1.2e}), error propagation might be unreliable.".format(condn), RuntimeWarning) try: - hess_inv = np.linalg.pinv(jacobian(jacobian(chisqfunc))(fitp)) + hess_inv = np.linalg.inv(hess) except TypeError: raise Exception("It is required to use autograd.numpy instead of numpy within fit functions, see the documentation for details.") from None + except np.linalg.LinAlgError: + raise Exception("Cannot invert hessian matrix.") + except Exception: + raise Exception("Unkown error in connection with Hessian inverse.") if kwargs.get('correlated_fit') is True: def chisqfunc_compact(d): From 967ddb0ecd1f99e61ea5113336365e7134b2826c Mon Sep 17 00:00:00 2001 From: Fabian Joswig Date: Wed, 25 May 2022 14:51:46 +0100 Subject: [PATCH 21/68] fix: no-autograd exception in fits works again. --- pyerrors/fits.py | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/pyerrors/fits.py b/pyerrors/fits.py index 4faf237d..98437221 100644 --- a/pyerrors/fits.py +++ b/pyerrors/fits.py @@ -263,14 +263,15 @@ def total_least_squares(x, y, func, silent=False, **kwargs): output.chisquare_by_expected_chisquare) fitp = out.beta - hess = jacobian(jacobian(odr_chisquare))(np.concatenate((fitp, out.xplus.ravel()))) + try: + hess = jacobian(jacobian(odr_chisquare))(np.concatenate((fitp, out.xplus.ravel()))) + except TypeError: + raise Exception("It is required to use autograd.numpy instead of numpy within fit functions, see the documentation for details.") from None condn = np.linalg.cond(hess) if condn > 1e8: warnings.warn("Hessian matrix might be ill-conditioned ({0:1.2e}), error propagation might be unreliable.".format(condn), RuntimeWarning) try: hess_inv = np.linalg.inv(hess) - except TypeError: - raise Exception("It is required to use autograd.numpy instead of numpy within fit functions, see the documentation for details.") from None except np.linalg.LinAlgError: raise Exception("Cannot invert hessian matrix.") except Exception: @@ -549,14 +550,15 @@ def _standard_fit(x, y, func, silent=False, **kwargs): output.chisquare_by_expected_chisquare) fitp = fit_result.x - hess = jacobian(jacobian(chisqfunc))(fitp) + try: + hess = jacobian(jacobian(chisqfunc))(fitp) + except TypeError: + raise Exception("It is required to use autograd.numpy instead of numpy within fit functions, see the documentation for details.") from None condn = np.linalg.cond(hess) if condn > 1e8: warnings.warn("Hessian matrix might be ill-conditioned ({0:1.2e}), error propagation might be unreliable.".format(condn), RuntimeWarning) try: hess_inv = np.linalg.inv(hess) - except TypeError: - raise Exception("It is required to use autograd.numpy instead of numpy within fit functions, see the documentation for details.") from None except np.linalg.LinAlgError: raise Exception("Cannot invert hessian matrix.") except Exception: From 48636fedb24a7815d77cd2bb086ea737860c7d2e Mon Sep 17 00:00:00 2001 From: Fabian Joswig Date: Wed, 25 May 2022 15:20:31 +0100 Subject: [PATCH 22/68] docs: warning about ill conditioned hessian more detailed. --- pyerrors/fits.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/pyerrors/fits.py b/pyerrors/fits.py index 98437221..512a57cb 100644 --- a/pyerrors/fits.py +++ b/pyerrors/fits.py @@ -269,7 +269,8 @@ def total_least_squares(x, y, func, silent=False, **kwargs): raise Exception("It is required to use autograd.numpy instead of numpy within fit functions, see the documentation for details.") from None condn = np.linalg.cond(hess) if condn > 1e8: - warnings.warn("Hessian matrix might be ill-conditioned ({0:1.2e}), error propagation might be unreliable.".format(condn), RuntimeWarning) + warnings.warn("Hessian matrix might be ill-conditioned ({0:1.2e}), error propagation might be unreliable.\n \ + Maybe try rescaling the problem such that all parameters are of O(1).".format(condn), RuntimeWarning) try: hess_inv = np.linalg.inv(hess) except np.linalg.LinAlgError: @@ -556,7 +557,8 @@ def _standard_fit(x, y, func, silent=False, **kwargs): raise Exception("It is required to use autograd.numpy instead of numpy within fit functions, see the documentation for details.") from None condn = np.linalg.cond(hess) if condn > 1e8: - warnings.warn("Hessian matrix might be ill-conditioned ({0:1.2e}), error propagation might be unreliable.".format(condn), RuntimeWarning) + warnings.warn("Hessian matrix might be ill-conditioned ({0:1.2e}), error propagation might be unreliable.\n \ + Maybe try rescaling the problem such that all parameters are of O(1).".format(condn), RuntimeWarning) try: hess_inv = np.linalg.inv(hess) except np.linalg.LinAlgError: From 919ba68a527ec8f7bea0c8dba267f05fcee03f20 Mon Sep 17 00:00:00 2001 From: Fabian Joswig Date: Wed, 25 May 2022 17:59:29 +0100 Subject: [PATCH 23/68] feat: When least_squares is called with correlated_fit=True the routine first performs an uncorrelated fit in order to find appropriate starting parameters. --- pyerrors/fits.py | 29 +++++++++++++++++------------ 1 file changed, 17 insertions(+), 12 deletions(-) diff --git a/pyerrors/fits.py b/pyerrors/fits.py index 512a57cb..5ba44ef8 100644 --- a/pyerrors/fits.py +++ b/pyerrors/fits.py @@ -482,15 +482,15 @@ def _standard_fit(x, y, func, silent=False, **kwargs): chol_inv = np.linalg.inv(chol) chol_inv = np.dot(chol_inv, covdiag) - def chisqfunc(p): + def chisqfunc_corr(p): model = func(p, x) chisq = anp.sum(anp.dot(chol_inv, (y_f - model)) ** 2) return chisq - else: - def chisqfunc(p): - model = func(p, x) - chisq = anp.sum(((y_f - model) / dy_f) ** 2) - return chisq + + def chisqfunc(p): + model = func(p, x) + chisq = anp.sum(((y_f - model) / dy_f) ** 2) + return chisq output.method = kwargs.get('method', 'Levenberg-Marquardt') if not silent: @@ -499,27 +499,32 @@ def _standard_fit(x, y, func, silent=False, **kwargs): if output.method != 'Levenberg-Marquardt': if output.method == 'migrad': fit_result = iminuit.minimize(chisqfunc, x0, tol=1e-4) # Stopping criterion 0.002 * tol * errordef + if kwargs.get('correlated_fit') is True: + fit_result = iminuit.minimize(chisqfunc_corr, fit_result.x, tol=1e-4) # Stopping criterion 0.002 * tol * errordef output.iterations = fit_result.nfev else: fit_result = scipy.optimize.minimize(chisqfunc, x0, method=kwargs.get('method'), tol=1e-12) + if kwargs.get('correlated_fit') is True: + fit_result = scipy.optimize.minimize(chisqfunc_corr, fit_result.x, method=kwargs.get('method'), tol=1e-12) output.iterations = fit_result.nit chisquare = fit_result.fun else: if kwargs.get('correlated_fit') is True: - def chisqfunc_residuals(p): + def chisqfunc_residuals_corr(p): model = func(p, x) chisq = anp.dot(chol_inv, (y_f - model)) return chisq - else: - def chisqfunc_residuals(p): - model = func(p, x) - chisq = ((y_f - model) / dy_f) - return chisq + def chisqfunc_residuals(p): + model = func(p, x) + chisq = ((y_f - model) / dy_f) + return chisq fit_result = scipy.optimize.least_squares(chisqfunc_residuals, x0, method='lm', ftol=1e-15, gtol=1e-15, xtol=1e-15) + if kwargs.get('correlated_fit') is True: + fit_result = scipy.optimize.least_squares(chisqfunc_residuals_corr, fit_result.x, method='lm', ftol=1e-15, gtol=1e-15, xtol=1e-15) chisquare = np.sum(fit_result.fun ** 2) From 6bbd88a8c1296b313996ed6d657a16c305bffafc Mon Sep 17 00:00:00 2001 From: Fabian Joswig Date: Wed, 25 May 2022 18:03:46 +0100 Subject: [PATCH 24/68] tests: fit_corr_independent now runs for various methods. --- tests/fits_test.py | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/tests/fits_test.py b/tests/fits_test.py index 374550e4..88b8438a 100644 --- a/tests/fits_test.py +++ b/tests/fits_test.py @@ -171,12 +171,13 @@ def test_fit_corr_independent(): y = a[0] * anp.exp(-a[1] * x) return y - out = pe.least_squares(x, oy, func) - out_corr = pe.least_squares(x, oy, func, correlated_fit=True) + for method in ["Levenberg-Marquardt", "migrad", "Nelder-Mead"]: + out = pe.least_squares(x, oy, func, method=method) + out_corr = pe.least_squares(x, oy, func, correlated_fit=True, method=method) - assert np.isclose(out.chisquare, out_corr.chisquare) - assert (out[0] - out_corr[0]).is_zero(atol=1e-5) - assert (out[1] - out_corr[1]).is_zero(atol=1e-5) + assert np.isclose(out.chisquare, out_corr.chisquare) + assert (out[0] - out_corr[0]).is_zero(atol=1e-5) + assert (out[1] - out_corr[1]).is_zero(atol=1e-5) def test_total_least_squares(): From ff5540d6676913fdc2eddbe7fb8c268fd5afd726 Mon Sep 17 00:00:00 2001 From: Fabian Joswig Date: Thu, 26 May 2022 10:19:39 +0100 Subject: [PATCH 25/68] feat: optimized calculation of the inverse hessian for error propagation in fits. --- pyerrors/fits.py | 38 +++++++++++++++----------------------- 1 file changed, 15 insertions(+), 23 deletions(-) diff --git a/pyerrors/fits.py b/pyerrors/fits.py index 5ba44ef8..1ea2f827 100644 --- a/pyerrors/fits.py +++ b/pyerrors/fits.py @@ -267,16 +267,6 @@ def total_least_squares(x, y, func, silent=False, **kwargs): hess = jacobian(jacobian(odr_chisquare))(np.concatenate((fitp, out.xplus.ravel()))) except TypeError: raise Exception("It is required to use autograd.numpy instead of numpy within fit functions, see the documentation for details.") from None - condn = np.linalg.cond(hess) - if condn > 1e8: - warnings.warn("Hessian matrix might be ill-conditioned ({0:1.2e}), error propagation might be unreliable.\n \ - Maybe try rescaling the problem such that all parameters are of O(1).".format(condn), RuntimeWarning) - try: - hess_inv = np.linalg.inv(hess) - except np.linalg.LinAlgError: - raise Exception("Cannot invert hessian matrix.") - except Exception: - raise Exception("Unkown error in connection with Hessian inverse.") def odr_chisquare_compact_x(d): model = func(d[:n_parms], d[n_parms:n_parms + m].reshape(x_shape)) @@ -285,7 +275,11 @@ def total_least_squares(x, y, func, silent=False, **kwargs): jac_jac_x = jacobian(jacobian(odr_chisquare_compact_x))(np.concatenate((fitp, out.xplus.ravel(), x_f.ravel()))) - deriv_x = -hess_inv @ jac_jac_x[:n_parms + m, n_parms + m:] + # Compute hess^{-1} @ jac_jac_x[:n_parms + m, n_parms + m:] using LAPACK dgesv + try: + deriv_x = -scipy.linalg.solve(hess, jac_jac_x[:n_parms + m, n_parms + m:]) + except np.linalg.LinAlgError: + raise Exception("Cannot invert hessian matrix.") def odr_chisquare_compact_y(d): model = func(d[:n_parms], d[n_parms:n_parms + m].reshape(x_shape)) @@ -294,7 +288,11 @@ def total_least_squares(x, y, func, silent=False, **kwargs): jac_jac_y = jacobian(jacobian(odr_chisquare_compact_y))(np.concatenate((fitp, out.xplus.ravel(), y_f))) - deriv_y = -hess_inv @ jac_jac_y[:n_parms + m, n_parms + m:] + # Compute hess^{-1} @ jac_jac_y[:n_parms + m, n_parms + m:] using LAPACK dgesv + try: + deriv_y = -scipy.linalg.solve(hess, jac_jac_y[:n_parms + m, n_parms + m:]) + except np.linalg.LinAlgError: + raise Exception("Cannot invert hessian matrix.") result = [] for i in range(n_parms): @@ -560,16 +558,6 @@ def _standard_fit(x, y, func, silent=False, **kwargs): hess = jacobian(jacobian(chisqfunc))(fitp) except TypeError: raise Exception("It is required to use autograd.numpy instead of numpy within fit functions, see the documentation for details.") from None - condn = np.linalg.cond(hess) - if condn > 1e8: - warnings.warn("Hessian matrix might be ill-conditioned ({0:1.2e}), error propagation might be unreliable.\n \ - Maybe try rescaling the problem such that all parameters are of O(1).".format(condn), RuntimeWarning) - try: - hess_inv = np.linalg.inv(hess) - except np.linalg.LinAlgError: - raise Exception("Cannot invert hessian matrix.") - except Exception: - raise Exception("Unkown error in connection with Hessian inverse.") if kwargs.get('correlated_fit') is True: def chisqfunc_compact(d): @@ -585,7 +573,11 @@ def _standard_fit(x, y, func, silent=False, **kwargs): jac_jac = jacobian(jacobian(chisqfunc_compact))(np.concatenate((fitp, y_f))) - deriv = -hess_inv @ jac_jac[:n_parms, n_parms:] + # Compute hess^{-1} @ jac_jac[:n_parms, n_parms:] using LAPACK dgesv + try: + deriv = -scipy.linalg.solve(hess, jac_jac[:n_parms, n_parms:]) + except np.linalg.LinAlgError: + raise Exception("Cannot invert hessian matrix.") result = [] for i in range(n_parms): From e7f5961cd6092032b7ff4f5056e761b63929017d Mon Sep 17 00:00:00 2001 From: Fabian Joswig Date: Thu, 26 May 2022 13:52:05 +0100 Subject: [PATCH 26/68] fix: bug in estimation of chisquare for correlated fit fixed, tests added. --- pyerrors/fits.py | 6 +++++- tests/fits_test.py | 4 ++++ 2 files changed, 9 insertions(+), 1 deletion(-) diff --git a/pyerrors/fits.py b/pyerrors/fits.py index 1ea2f827..16fb5405 100644 --- a/pyerrors/fits.py +++ b/pyerrors/fits.py @@ -525,6 +525,10 @@ def _standard_fit(x, y, func, silent=False, **kwargs): fit_result = scipy.optimize.least_squares(chisqfunc_residuals_corr, fit_result.x, method='lm', ftol=1e-15, gtol=1e-15, xtol=1e-15) chisquare = np.sum(fit_result.fun ** 2) + if kwargs.get('correlated_fit') is True: + assert np.isclose(chisquare, chisqfunc_corr(fit_result.x), atol=1e-14) + else: + assert np.isclose(chisquare, chisqfunc(fit_result.x), atol=1e-14) output.iterations = fit_result.nfev @@ -585,7 +589,7 @@ def _standard_fit(x, y, func, silent=False, **kwargs): output.fit_parameters = result - output.chisquare = chisqfunc(fit_result.x) + output.chisquare = chisquare output.dof = x.shape[-1] - n_parms output.p_value = 1 - chi2.cdf(output.chisquare, output.dof) diff --git a/tests/fits_test.py b/tests/fits_test.py index 88b8438a..a288f547 100644 --- a/tests/fits_test.py +++ b/tests/fits_test.py @@ -149,8 +149,10 @@ def test_correlated_fit(): return p[1] * anp.exp(-p[0] * x) fitp = pe.least_squares(x, data, fitf, expected_chisquare=True) + assert np.isclose(fitp.chisquare / fitp.dof, fitp.chisquare_by_dof, atol=1e-14) fitpc = pe.least_squares(x, data, fitf, correlated_fit=True) + assert np.isclose(fitpc.chisquare / fitpc.dof, fitpc.chisquare_by_dof, atol=1e-14) for i in range(2): diff = fitp[i] - fitpc[i] diff.gamma_method() @@ -176,6 +178,8 @@ def test_fit_corr_independent(): out_corr = pe.least_squares(x, oy, func, correlated_fit=True, method=method) assert np.isclose(out.chisquare, out_corr.chisquare) + assert np.isclose(out.dof, out_corr.dof) + assert np.isclose(out.chisquare_by_dof, out_corr.chisquare_by_dof) assert (out[0] - out_corr[0]).is_zero(atol=1e-5) assert (out[1] - out_corr[1]).is_zero(atol=1e-5) From 09fd1fdf608e30e1e98bf5f2d560b5a9351d575a Mon Sep 17 00:00:00 2001 From: Fabian Joswig Date: Fri, 27 May 2022 13:50:19 +0100 Subject: [PATCH 27/68] feat: rounding errors in inversion of cholesky decomposed correlation matrix reduced. --- pyerrors/fits.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/pyerrors/fits.py b/pyerrors/fits.py index 16fb5405..45c8081c 100644 --- a/pyerrors/fits.py +++ b/pyerrors/fits.py @@ -477,8 +477,7 @@ def _standard_fit(x, y, func, silent=False, **kwargs): if condn > 1 / np.sqrt(np.finfo(float).eps): warnings.warn("Correlation matrix may be ill-conditioned, condition number: {%1.2e}" % (condn), RuntimeWarning) chol = np.linalg.cholesky(corr) - chol_inv = np.linalg.inv(chol) - chol_inv = np.dot(chol_inv, covdiag) + chol_inv = scipy.linalg.solve(chol, covdiag) def chisqfunc_corr(p): model = func(p, x) From 328c564fdd1edf4272339d6763c16143ba6f21b1 Mon Sep 17 00:00:00 2001 From: Fabian Joswig Date: Fri, 27 May 2022 14:00:46 +0100 Subject: [PATCH 28/68] feat: further improvement of inverse of cholesky decomposed correlation matrix. --- pyerrors/fits.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pyerrors/fits.py b/pyerrors/fits.py index 45c8081c..3af1f96e 100644 --- a/pyerrors/fits.py +++ b/pyerrors/fits.py @@ -477,7 +477,7 @@ def _standard_fit(x, y, func, silent=False, **kwargs): if condn > 1 / np.sqrt(np.finfo(float).eps): warnings.warn("Correlation matrix may be ill-conditioned, condition number: {%1.2e}" % (condn), RuntimeWarning) chol = np.linalg.cholesky(corr) - chol_inv = scipy.linalg.solve(chol, covdiag) + chol_inv = scipy.linalg.solve_triangular(chol, covdiag, lower=True) def chisqfunc_corr(p): model = func(p, x) From 658c2b30de0f14d8a1c153d8845c8ba6e92beb70 Mon Sep 17 00:00:00 2001 From: Fabian Joswig Date: Fri, 27 May 2022 14:17:05 +0100 Subject: [PATCH 29/68] feat: eigenvalue calculation in pe.covariance is not done if only the correlation matrix is requested. --- pyerrors/obs.py | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/pyerrors/obs.py b/pyerrors/obs.py index 220b1646..d211c541 100644 --- a/pyerrors/obs.py +++ b/pyerrors/obs.py @@ -1452,13 +1452,6 @@ def covariance(obs, visualize=False, correlation=False, smooth=None, **kwargs): if isinstance(smooth, int): corr = _smooth_eigenvalues(corr, smooth) - errors = [o.dvalue for o in obs] - cov = np.diag(errors) @ corr @ np.diag(errors) - - eigenvalues = np.linalg.eigh(cov)[0] - if not np.all(eigenvalues >= 0): - warnings.warn("Covariance matrix is not positive semi-definite (Eigenvalues: " + str(eigenvalues) + ")", RuntimeWarning) - if visualize: plt.matshow(corr, vmin=-1, vmax=1) plt.set_cmap('RdBu') @@ -1467,8 +1460,15 @@ def covariance(obs, visualize=False, correlation=False, smooth=None, **kwargs): if correlation is True: return corr - else: - return cov + + errors = [o.dvalue for o in obs] + cov = np.diag(errors) @ corr @ np.diag(errors) + + eigenvalues = np.linalg.eigh(cov)[0] + if not np.all(eigenvalues >= 0): + warnings.warn("Covariance matrix is not positive semi-definite (Eigenvalues: " + str(eigenvalues) + ")", RuntimeWarning) + + return cov def _smooth_eigenvalues(corr, E): From 46cc5dd7929c20f737b399f116a66085b26ac1db Mon Sep 17 00:00:00 2001 From: Fabian Joswig Date: Sun, 29 May 2022 13:58:43 +0100 Subject: [PATCH 30/68] feat: The desired correlation function extracted with input.hadrons.read_meson_hd5 can now also be specified via a tuple of strings indicating the desired source and sink gamma matrices. --- pyerrors/input/hadrons.py | 24 +++++++++++++++++++++--- 1 file changed, 21 insertions(+), 3 deletions(-) diff --git a/pyerrors/input/hadrons.py b/pyerrors/input/hadrons.py index 31106bda..c773b186 100644 --- a/pyerrors/input/hadrons.py +++ b/pyerrors/input/hadrons.py @@ -55,8 +55,8 @@ def _get_files(path, filestem, idl): return filtered_files, idx -def read_meson_hd5(path, filestem, ens_id, meson='meson_0', idl=None): - """Read hadrons meson hdf5 file and extract the meson labeled 'meson' +def read_meson_hd5(path, filestem, ens_id, meson='meson_0', idl=None, gammas=None): + r'''Read hadrons meson hdf5 file and extract the meson labeled 'meson' Parameters ----------------- @@ -69,13 +69,31 @@ def read_meson_hd5(path, filestem, ens_id, meson='meson_0', idl=None): meson : str label of the meson to be extracted, standard value meson_0 which corresponds to the pseudoscalar pseudoscalar two-point function. + gammas : tuple of strings + Instrad of a meson label one can also provide a tuple of two strings + indicating the gamma matrices at source and sink. + ("Gamma5", "Gamma5") corresponds to the pseudoscalar pseudoscalar + two-point function. The gammas argument dominateds over meson. idl : range If specified only configurations in the given range are read in. - """ + ''' files, idx = _get_files(path, filestem, idl) tree = meson.rsplit('_')[0] + if gammas is not None: + h5file = h5py.File(path + '/' + files[0], "r") + found_meson = None + for key in h5file[tree].keys(): + if gammas[0] == h5file[tree][key].attrs["gamma_snk"][0].decode() and h5file[tree][key].attrs["gamma_src"][0].decode() == gammas[1]: + found_meson = key + break + h5file.close() + if found_meson: + meson = found_meson + else: + raise Exception("Source Sink combination " + str(gammas) + " not found.") + corr_data = [] infos = [] for hd5_file in files: From 52330b7493fd6345c32e151fc9a29d95346bcf1f Mon Sep 17 00:00:00 2001 From: Fabian Joswig Date: Mon, 30 May 2022 10:33:30 +0100 Subject: [PATCH 31/68] docs: CHANGELOG updated. --- CHANGELOG.md | 20 ++++++++++++++++++++ 1 file changed, 20 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 0ac0ee11..a752258f 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,6 +2,26 @@ All notable changes to this project will be documented in this file. +## [2.1.0] - 2022-05-31 +### Added +- `obs.covariance` now has the option to smooth small eigenvalues of the matrix with the method described in hep-lat/9412087. +- `Corr.prune` was added which can reduce the size of a correlator matrix before solving the GEVP. +- `Corr.show` has two additional optional arguments. `hide_sigma` to hide data points with large errors and `references` to display reference values as horizontal lines. +- I/O routines for ALPHA dobs format added. +- `input.hadrons` functionality extended. + +### Changed +- The standard behavior of the `Corr.GEVP` method has changed. It now returns all eigenvectors of the system instead of only the specified ones as default. The standard way of sorting the eigenvectors was changed to `Eigenvalue`. The argument `sorted_list` was deprecated in favor of `sort`. +- Before performing a correlated fit the routine first runs an uncorrelated one to obtain a better guess for the initial parameters. + +### Fixed +- `obs.covariance` now also gives correct estimators if data defined on non-identical configurations is passed to the function. +- Rounding errors in estimating derivatives of fit parameters with respect to input data from the inverse hessian reduced. This should only make a difference when the magnitude of the errors of different fit parameters vary vastly. +- Bug in json.gz format fixed which did not properly store the replica mean values. Format version bumped to 1.1. +- The GEVP matrix is now symmetrized before solving the system for all sorting options not only the one with fixed `ts`. +- Automatic range estimation improved in `fits.residual_plot`. +- Bugs in `input.bdio` fixed. + ## [2.0.0] - 2022-03-31 ### Added - The possibility to work with Monte Carlo histories which are evenly or unevenly spaced was added. From 4aae55d1d6fef0948931e88d03d5f7b733ad311a Mon Sep 17 00:00:00 2001 From: Fabian Joswig Date: Tue, 31 May 2022 11:51:59 +0100 Subject: [PATCH 32/68] docs: Docstring for fits.least_square extended. --- .gitignore | 3 +++ pyerrors/fits.py | 3 ++- 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/.gitignore b/.gitignore index fdaac56f..2701c491 100644 --- a/.gitignore +++ b/.gitignore @@ -6,6 +6,9 @@ __pycache__ .cache examples/B1k2_pcac_plateau.p examples/Untitled.* +examples/pcac_plateau_test_ensemble.json.gz core.* *.swp htmlcov +build +pyerrors.egg-info diff --git a/pyerrors/fits.py b/pyerrors/fits.py index 3af1f96e..ebe24df5 100644 --- a/pyerrors/fits.py +++ b/pyerrors/fits.py @@ -95,7 +95,8 @@ def least_squares(x, y, func, priors=None, silent=False, **kwargs): If true all output to the console is omitted (default False). initial_guess : list can provide an initial guess for the input parameters. Relevant for - non-linear fits with many parameters. + non-linear fits with many parameters. In case of correlated fits the guess is used to perform + an uncorrelated fit which then serves as guess for the correlated fit. method : str, optional can be used to choose an alternative method for the minimization of chisquare. The possible methods are the ones which can be used for scipy.optimize.minimize and From 2fb498298809ce8119aefef1b7f5b07226948bb3 Mon Sep 17 00:00:00 2001 From: Fabian Joswig Date: Tue, 31 May 2022 11:54:26 +0100 Subject: [PATCH 33/68] build: version number bumped to 2.1.0 --- pyerrors/version.py | 2 +- setup.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/pyerrors/version.py b/pyerrors/version.py index 6cb8ec9d..9aa3f903 100644 --- a/pyerrors/version.py +++ b/pyerrors/version.py @@ -1 +1 @@ -__version__ = "2.1.0+dev" +__version__ = "2.1.0" diff --git a/setup.py b/setup.py index ef3fb3f8..ab2d8e6f 100644 --- a/setup.py +++ b/setup.py @@ -4,7 +4,7 @@ this_directory = Path(__file__).parent long_description = (this_directory / "README.md").read_text() setup(name='pyerrors', - version='2.1.0+dev', + version='2.1.0', description='Error analysis for lattice QCD', long_description=long_description, long_description_content_type='text/markdown', From e89a537f7c95b1a24ea427bb1cd59eceb3e178ab Mon Sep 17 00:00:00 2001 From: Fabian Joswig Date: Tue, 31 May 2022 12:11:42 +0100 Subject: [PATCH 34/68] build: version number bumped to 2.2.0+dev --- pyerrors/version.py | 2 +- setup.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/pyerrors/version.py b/pyerrors/version.py index 9aa3f903..0d8c75f0 100644 --- a/pyerrors/version.py +++ b/pyerrors/version.py @@ -1 +1 @@ -__version__ = "2.1.0" +__version__ = "2.2.0+dev" diff --git a/setup.py b/setup.py index ab2d8e6f..8b5dbfaf 100644 --- a/setup.py +++ b/setup.py @@ -4,7 +4,7 @@ this_directory = Path(__file__).parent long_description = (this_directory / "README.md").read_text() setup(name='pyerrors', - version='2.1.0', + version='2.2.0+dev', description='Error analysis for lattice QCD', long_description=long_description, long_description_content_type='text/markdown', From 8a785601a6324b76214527d880218c74775e6001 Mon Sep 17 00:00:00 2001 From: Fabian Joswig Date: Tue, 31 May 2022 12:27:31 +0100 Subject: [PATCH 35/68] build: project urls added to setup.py --- setup.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/setup.py b/setup.py index 8b5dbfaf..0a648496 100644 --- a/setup.py +++ b/setup.py @@ -9,6 +9,10 @@ setup(name='pyerrors', long_description=long_description, long_description_content_type='text/markdown', url="https://github.com/fjosw/pyerrors", + project_urls= { + 'Documentation': 'https://fjosw.github.io/pyerrors/pyerrors.html', + 'Bug Tracker': 'https://github.com/fjosw/pyerrors/issues' + }, author='Fabian Joswig', author_email='fabian.joswig@ed.ac.uk', license="MIT", From ccb424a251069fa5c5b4ea36d0937f24ae4b895c Mon Sep 17 00:00:00 2001 From: Fabian Joswig Date: Tue, 31 May 2022 13:43:34 +0100 Subject: [PATCH 36/68] fix: hide_sigma in Corr.show now ignores the entry at 0 for the estimation of the plot range. --- pyerrors/correlators.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pyerrors/correlators.py b/pyerrors/correlators.py index ab4f6f50..e105e98c 100644 --- a/pyerrors/correlators.py +++ b/pyerrors/correlators.py @@ -758,7 +758,7 @@ class Corr: x, y, y_err = self.plottable() if hide_sigma: - hide_from = np.argmax((hide_sigma * np.array(y_err)) > np.abs(y)) - 1 + hide_from = np.argmax((hide_sigma * np.array(y_err[1:])) > np.abs(y[1:])) - 1 else: hide_from = None ax1.errorbar(x[:hide_from], y[:hide_from], y_err[:hide_from], label=self.tag) @@ -781,7 +781,7 @@ class Corr: corr.gamma_method() x, y, y_err = corr.plottable() if hide_sigma: - hide_from = np.argmax((hide_sigma * np.array(y_err)) > np.abs(y)) - 1 + hide_from = np.argmax((hide_sigma * np.array(y_err[1:])) > np.abs(y[1:])) - 1 else: hide_from = None plt.errorbar(x[:hide_from], y[:hide_from], y_err[:hide_from], label=corr.tag, mfc=plt.rcParams['axes.facecolor']) From 0b6b98990defda26077b69d1bab999b6765a7c87 Mon Sep 17 00:00:00 2001 From: Fabian Joswig Date: Wed, 1 Jun 2022 15:03:55 +0100 Subject: [PATCH 37/68] build: scipy version 1 or greater required. --- setup.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup.py b/setup.py index 0a648496..31d810fc 100644 --- a/setup.py +++ b/setup.py @@ -18,7 +18,7 @@ setup(name='pyerrors', license="MIT", packages=find_packages(), python_requires='>=3.6.0', - install_requires=['numpy>=1.16', 'autograd>=1.4', 'numdifftools', 'matplotlib>=3.3', 'scipy', 'iminuit>=2', 'h5py', 'lxml', 'python-rapidjson'], + install_requires=['numpy>=1.16', 'autograd>=1.4', 'numdifftools', 'matplotlib>=3.3', 'scipy>=1', 'iminuit>=2', 'h5py', 'lxml', 'python-rapidjson'], classifiers=[ 'Development Status :: 5 - Production/Stable', 'Intended Audience :: Science/Research', From 6bd724715fbf156d1a46857575e6efa414fa0c1f Mon Sep 17 00:00:00 2001 From: Fabian Joswig Date: Mon, 6 Jun 2022 15:04:16 +0100 Subject: [PATCH 38/68] fix: Fit_result.gamma_method can now be called with kwargs --- pyerrors/fits.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pyerrors/fits.py b/pyerrors/fits.py index ebe24df5..6c7ea066 100644 --- a/pyerrors/fits.py +++ b/pyerrors/fits.py @@ -34,9 +34,9 @@ class Fit_result(Sequence): def __len__(self): return len(self.fit_parameters) - def gamma_method(self): + def gamma_method(self, **kwargs): """Apply the gamma method to all fit parameters""" - [o.gamma_method() for o in self.fit_parameters] + [o.gamma_method(**kwargs) for o in self.fit_parameters] def __str__(self): my_str = 'Goodness of fit:\n' From c9e0c6e20844a6f320794aa94f864ca918f452c0 Mon Sep 17 00:00:00 2001 From: Fabian Joswig Date: Mon, 6 Jun 2022 15:11:03 +0100 Subject: [PATCH 39/68] tests: linear least_squares fit error estimation tested against jackknife resampling. --- tests/fits_test.py | 34 ++++++++++++++++++++++++++++++++++ 1 file changed, 34 insertions(+) diff --git a/tests/fits_test.py b/tests/fits_test.py index a288f547..4819633e 100644 --- a/tests/fits_test.py +++ b/tests/fits_test.py @@ -381,6 +381,40 @@ def test_error_band(): pe.fits.error_band(x, f, fitp.fit_parameters) +def test_fit_vs_jackknife(): + od = 0.9999999999 + cov1 = np.array([[1, od, od], [od, 1.0, od], [od, od, 1.0]]) + cov1 *= 0.05 + nod = -0.4 + cov2 = np.array([[1, nod, nod], [nod, 1.0, nod], [nod, nod, 1.0]]) + cov2 *= 0.05 + cov3 = np.identity(3) + cov3 *= 0.05 + samples = 500 + + for i, cov in enumerate([cov1, cov2, cov3]): + dat = pe.misc.gen_correlated_data(np.arange(1, 4), cov, 'test', 0.5, samples=samples) + [o.gamma_method(S=0) for o in dat]; + func = lambda a, x: a[0] + a[1] * x + fr = pe.least_squares(np.arange(1, 4), dat, func) + fr.gamma_method(S=0) + + jd = np.array([o.export_jackknife() for o in dat]).T + jfr = [] + for jacks in jd: + + def chisqfunc_residuals(p): + model = func(p, np.arange(1, 4)) + chisq = ((jacks - model) / [o.dvalue for o in dat]) + return chisq + + tf = scipy.optimize.least_squares(chisqfunc_residuals, [0.0, 0.0], method='lm', ftol=1e-15, gtol=1e-15, xtol=1e-15) + jfr.append(tf.x) + ajfr = np.array(jfr).T + err = np.array([np.sqrt(np.var(ajfr[j][1:], ddof=0) * (samples - 1)) for j in range(2)]) + assert np.allclose(err, [o.dvalue for o in fr], atol=1e-8) + + def test_fit_no_autograd(): dim = 10 x = np.arange(dim) From 155c32992169b4275a0cbe08a1b0b1a2939cb47e Mon Sep 17 00:00:00 2001 From: Fabian Joswig Date: Mon, 6 Jun 2022 15:47:41 +0100 Subject: [PATCH 40/68] fix: bug in error propagation for correlated fits fixed. --- pyerrors/fits.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/pyerrors/fits.py b/pyerrors/fits.py index 6c7ea066..489c2f8b 100644 --- a/pyerrors/fits.py +++ b/pyerrors/fits.py @@ -559,7 +559,10 @@ def _standard_fit(x, y, func, silent=False, **kwargs): fitp = fit_result.x try: - hess = jacobian(jacobian(chisqfunc))(fitp) + if kwargs.get('correlated_fit') is True: + hess = jacobian(jacobian(chisqfunc_corr))(fitp) + else: + hess = jacobian(jacobian(chisqfunc))(fitp) except TypeError: raise Exception("It is required to use autograd.numpy instead of numpy within fit functions, see the documentation for details.") from None From 92285942ac3af8b8a2ebd6c005d5439569e578f3 Mon Sep 17 00:00:00 2001 From: Fabian Joswig Date: Mon, 6 Jun 2022 15:56:19 +0100 Subject: [PATCH 41/68] build: Version number bumped to 2.1.1, CHANGELOG updated. --- CHANGELOG.md | 5 +++++ pyerrors/version.py | 2 +- setup.py | 2 +- 3 files changed, 7 insertions(+), 2 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index a752258f..457b4e6f 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,6 +2,11 @@ All notable changes to this project will be documented in this file. +## [2.1.1] - 2022-06-06 +### Fixed +- Bug in error propagation of correlated least square fits fixed. +- Fit_result.gamma_method can now be called with kwargs + ## [2.1.0] - 2022-05-31 ### Added - `obs.covariance` now has the option to smooth small eigenvalues of the matrix with the method described in hep-lat/9412087. diff --git a/pyerrors/version.py b/pyerrors/version.py index 0d8c75f0..58039f50 100644 --- a/pyerrors/version.py +++ b/pyerrors/version.py @@ -1 +1 @@ -__version__ = "2.2.0+dev" +__version__ = "2.1.1" diff --git a/setup.py b/setup.py index 31d810fc..1dabcc66 100644 --- a/setup.py +++ b/setup.py @@ -4,7 +4,7 @@ this_directory = Path(__file__).parent long_description = (this_directory / "README.md").read_text() setup(name='pyerrors', - version='2.2.0+dev', + version='2.1.1', description='Error analysis for lattice QCD', long_description=long_description, long_description_content_type='text/markdown', From 02ed8ecd32ec687603f2fbc35ff954d1aaf107f5 Mon Sep 17 00:00:00 2001 From: Fabian Joswig Date: Tue, 7 Jun 2022 10:59:11 +0100 Subject: [PATCH 42/68] build: version number bumped to 2.2.0+dev --- pyerrors/version.py | 2 +- setup.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/pyerrors/version.py b/pyerrors/version.py index 58039f50..0d8c75f0 100644 --- a/pyerrors/version.py +++ b/pyerrors/version.py @@ -1 +1 @@ -__version__ = "2.1.1" +__version__ = "2.2.0+dev" diff --git a/setup.py b/setup.py index 1dabcc66..31d810fc 100644 --- a/setup.py +++ b/setup.py @@ -4,7 +4,7 @@ this_directory = Path(__file__).parent long_description = (this_directory / "README.md").read_text() setup(name='pyerrors', - version='2.1.1', + version='2.2.0+dev', description='Error analysis for lattice QCD', long_description=long_description, long_description_content_type='text/markdown', From 8a911cac6150edf46cb3767d1280a08334bf1de3 Mon Sep 17 00:00:00 2001 From: Fabian Joswig Date: Tue, 7 Jun 2022 10:59:26 +0100 Subject: [PATCH 43/68] tests: Correlated least square fit tested against jackknife resampling --- tests/fits_test.py | 40 ++++++++++++++++++++++++++++++++++++++++ 1 file changed, 40 insertions(+) diff --git a/tests/fits_test.py b/tests/fits_test.py index 4819633e..bb3599e2 100644 --- a/tests/fits_test.py +++ b/tests/fits_test.py @@ -414,6 +414,46 @@ def test_fit_vs_jackknife(): err = np.array([np.sqrt(np.var(ajfr[j][1:], ddof=0) * (samples - 1)) for j in range(2)]) assert np.allclose(err, [o.dvalue for o in fr], atol=1e-8) +def test_correlated_fit_vs_jackknife(): + od = 0.999999 + cov1 = np.array([[1, od, od], [od, 1.0, od], [od, od, 1.0]]) + cov1 *= 0.1 + nod = -0.44 + cov2 = np.array([[1, nod, nod], [nod, 1.0, nod], [nod, nod, 1.0]]) + cov2 *= 0.1 + cov3 = np.identity(3) + cov3 *= 0.01 + + samples = 250 + x_val = np.arange(1, 6, 2) + for i, cov in enumerate([cov1, cov2, cov3]): + dat = pe.misc.gen_correlated_data(x_val + x_val ** 2 + np.random.normal(0.0, 0.1, 3), cov, 'test', 0.5, samples=samples) + [o.gamma_method(S=0) for o in dat]; + dat + func = lambda a, x: a[0] * x + a[1] * x ** 2 + fr = pe.least_squares(x_val, dat, func, correlated_fit=True, silent=True) + [o.gamma_method(S=0) for o in fr] + + cov = pe.covariance(dat) + chol = np.linalg.cholesky(cov) + chol_inv = np.linalg.inv(chol) + + jd = np.array([o.export_jackknife() for o in dat]).T + jfr = [] + for jacks in jd: + + def chisqfunc_residuals(p): + model = func(p, x_val) + chisq = np.dot(chol_inv, (jacks - model)) + return chisq + + tf = scipy.optimize.least_squares(chisqfunc_residuals, [0.0, 0.0], method='lm', ftol=1e-15, gtol=1e-15, xtol=1e-15) + jfr.append(tf.x) + ajfr = np.array(jfr).T + err = np.array([np.sqrt(np.var(ajfr[j][1:], ddof=0) * (samples - 1)) for j in range(2)]) + assert np.allclose(err, [o.dvalue for o in fr], atol=1e-7) + assert np.allclose(ajfr.T[0], [o.value for o in fr], atol=1e-8) + def test_fit_no_autograd(): dim = 10 From 78f576a35eacfb15c9526da8f8725195415a52c8 Mon Sep 17 00:00:00 2001 From: Fabian Joswig Date: Thu, 9 Jun 2022 11:23:41 +0100 Subject: [PATCH 44/68] fix: Corr.matrix_symmetric now also works if entries of the correlators are arrays with at least one None entry. --- pyerrors/correlators.py | 2 +- tests/correlators_test.py | 20 ++++++++++++++++++++ 2 files changed, 21 insertions(+), 1 deletion(-) diff --git a/pyerrors/correlators.py b/pyerrors/correlators.py index e105e98c..7046135a 100644 --- a/pyerrors/correlators.py +++ b/pyerrors/correlators.py @@ -236,7 +236,7 @@ class Corr: def matrix_symmetric(self): """Symmetrizes the correlator matrices on every timeslice.""" if self.N > 1: - transposed = [None if (G is None) else G.T for G in self.content] + transposed = [None if len(list(filter(None, np.asarray(G).flatten()))) < self.N ** 2 else G.T for G in self.content] return 0.5 * (Corr(transposed) + self) if self.N == 1: raise Exception("Trying to symmetrize a correlator matrix, that already has N=1.") diff --git a/tests/correlators_test.py b/tests/correlators_test.py index 97bbebb1..4f642f82 100644 --- a/tests/correlators_test.py +++ b/tests/correlators_test.py @@ -332,6 +332,15 @@ def test_matrix_symmetric(): assert np.all([np.all(o == o.T) for o in sym_corr_mat]) + t_obs = pe.pseudo_Obs(1.0, 0.1, 'test') + o_mat = np.array([[t_obs, t_obs], [t_obs, t_obs]]) + corr1 = pe.Corr([o_mat, None, o_mat]) + corr2 = pe.Corr([o_mat, np.array([[None, None], [None, None]]), o_mat]) + corr3 = pe.Corr([o_mat, np.array([[t_obs, None], [None, t_obs]], dtype=object), o_mat]) + corr1.matrix_symmetric() + corr2.matrix_symmetric() + corr3.matrix_symmetric() + def test_GEVP_solver(): @@ -347,6 +356,17 @@ def test_GEVP_solver(): assert np.allclose(sp_vecs, pe.correlators._GEVP_solver(mat1, mat2), atol=1e-14) +def test_GEVP_none_entries(): + t_obs = pe.pseudo_Obs(1.0, 0.1, 'test') + t_obs2 = pe.pseudo_Obs(0.1, 0.1, 'test') + + o_mat = np.array([[t_obs, t_obs2], [t_obs2, t_obs2]]) + n_arr = np.array([[None, None], [None, None]]) + + corr = pe.Corr([o_mat, o_mat, o_mat, o_mat, o_mat, o_mat, None, o_mat, n_arr, None, o_mat]) + corr.GEVP(t0=2) + + def test_hankel(): corr_content = [] for t in range(8): From 60dbc2c6fe10ee4d537f6f8f269fe76dc376098f Mon Sep 17 00:00:00 2001 From: Fabian Joswig Date: Fri, 10 Jun 2022 08:56:41 +0100 Subject: [PATCH 45/68] build: minimal version numbers for dependencies updated. --- setup.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/setup.py b/setup.py index 31d810fc..9ce20d43 100644 --- a/setup.py +++ b/setup.py @@ -18,7 +18,7 @@ setup(name='pyerrors', license="MIT", packages=find_packages(), python_requires='>=3.6.0', - install_requires=['numpy>=1.16', 'autograd>=1.4', 'numdifftools', 'matplotlib>=3.3', 'scipy>=1', 'iminuit>=2', 'h5py', 'lxml', 'python-rapidjson'], + install_requires=['numpy>=1.16', 'autograd>=1.4', 'numdifftools', 'matplotlib>=3.3', 'scipy>=1', 'iminuit>=2', 'h5pyi>=3', 'lxml>=4', 'python-rapidjson>=1'], classifiers=[ 'Development Status :: 5 - Production/Stable', 'Intended Audience :: Science/Research', @@ -28,6 +28,6 @@ setup(name='pyerrors', 'Programming Language :: Python :: 3.8', 'Programming Language :: Python :: 3.9', 'Programming Language :: Python :: 3.10', - 'Topic :: Scientific/Engineering' + 'Topic :: Scientific/Engineering :: Physics' ], ) From 6aab72cf0e0befb611e9ad0e52946e4443828b94 Mon Sep 17 00:00:00 2001 From: Fabian Joswig Date: Fri, 10 Jun 2022 08:57:55 +0100 Subject: [PATCH 46/68] build: typo in setup.py corrected. --- setup.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup.py b/setup.py index 9ce20d43..9f3f5dfe 100644 --- a/setup.py +++ b/setup.py @@ -18,7 +18,7 @@ setup(name='pyerrors', license="MIT", packages=find_packages(), python_requires='>=3.6.0', - install_requires=['numpy>=1.16', 'autograd>=1.4', 'numdifftools', 'matplotlib>=3.3', 'scipy>=1', 'iminuit>=2', 'h5pyi>=3', 'lxml>=4', 'python-rapidjson>=1'], + install_requires=['numpy>=1.16', 'autograd>=1.4', 'numdifftools', 'matplotlib>=3.3', 'scipy>=1', 'iminuit>=2', 'h5py>=3', 'lxml>=4', 'python-rapidjson>=1'], classifiers=[ 'Development Status :: 5 - Production/Stable', 'Intended Audience :: Science/Research', From 5e550f432101c7b18a5481187f039acbd05e303d Mon Sep 17 00:00:00 2001 From: Fabian Joswig Date: Fri, 10 Jun 2022 09:29:04 +0100 Subject: [PATCH 47/68] tests: test for smooth_eigenvalues functionality added. --- tests/obs_test.py | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/tests/obs_test.py b/tests/obs_test.py index 55a23abd..2a158c3e 100644 --- a/tests/obs_test.py +++ b/tests/obs_test.py @@ -736,6 +736,23 @@ def test_covariance_factorizing(): assert np.isclose(pe.covariance([mt0, tt[1]])[0, 1], -pe.covariance(tt)[0, 1]) +def test_covariance_smooth_eigenvalues(): + for c_coeff in range(0, 14, 2): + length = 14 + sm = 5 + t_fac = 1.5 + tt = pe.misc.gen_correlated_data(np.zeros(length), 1 - 0.1 ** c_coeff * np.ones((length, length)) + 0.1 ** c_coeff * np.diag(np.ones(length)), 'test', tau=0.5 + t_fac * np.random.rand(length), samples=200) + [o.gamma_method() for o in tt] + full_corr = pe.covariance(tt, correlation=True) + cov = pe.covariance(tt, smooth=sm, correlation=True) + + full_evals = np.linalg.eigh(full_corr)[0] + sm_length = np.where(full_evals < np.mean(full_evals[:-sm]))[0][-1] + + evals = np.linalg.eigh(cov)[0] + assert np.all(np.isclose(evals[:sm_length], evals[0], atol=1e-8)) + + def test_covariance_alternation(): length = 12 t_fac = 2.5 From f49562b895219e388fec0fb5e62db55bfd6c7077 Mon Sep 17 00:00:00 2001 From: Fabian Joswig Date: Fri, 10 Jun 2022 10:13:49 +0100 Subject: [PATCH 48/68] tests: additional test for least square fits added which probes different initial guess and exceptions. Tolerance in total least square tests relaxed. --- tests/fits_test.py | 26 +++++++++++++++++++++++--- 1 file changed, 23 insertions(+), 3 deletions(-) diff --git a/tests/fits_test.py b/tests/fits_test.py index bb3599e2..8dd6677d 100644 --- a/tests/fits_test.py +++ b/tests/fits_test.py @@ -184,6 +184,26 @@ def test_fit_corr_independent(): assert (out[1] - out_corr[1]).is_zero(atol=1e-5) +def test_linear_fit_guesses(): + for err in [10, 0.1, 0.001]: + xvals = [] + yvals = [] + for x in range(1, 8, 2): + xvals.append(x) + yvals.append(pe.pseudo_Obs(x + np.random.normal(0.0, err), err, 'test1') + pe.pseudo_Obs(0, err / 100, 'test2', samples=87)) + lin_func = lambda a, x: a[0] + a[1] * x + with pytest.raises(Exception): + pe.least_squares(xvals, yvals, lin_func) + [o.gamma_method() for o in yvals]; + with pytest.raises(Exception): + pe.least_squares(xvals, yvals, lin_func, initial_guess=[5]) + + bad_guess = pe.least_squares(xvals, yvals, lin_func, initial_guess=[999, 999]) + good_guess = pe.least_squares(xvals, yvals, lin_func, initial_guess=[0, 1]) + assert np.isclose(bad_guess.chisquare, good_guess.chisquare, atol=1e-8) + assert np.all([(go - ba).is_zero(atol=1e-6) for (go, ba) in zip(good_guess, bad_guess)]) + + def test_total_least_squares(): dim = 10 + int(30 * np.random.rand()) x = np.arange(dim) + np.random.normal(0.0, 0.15, dim) @@ -223,7 +243,7 @@ def test_total_least_squares(): beta[i].gamma_method(S=1.0) assert math.isclose(beta[i].value, output.beta[i], rel_tol=1e-5) assert math.isclose(output.cov_beta[i, i], beta[i].dvalue ** 2, rel_tol=2.5e-1), str(output.cov_beta[i, i]) + ' ' + str(beta[i].dvalue ** 2) - assert math.isclose(pe.covariance([beta[0], beta[1]])[0, 1], output.cov_beta[0, 1], rel_tol=2.5e-1) + assert math.isclose(pe.covariance([beta[0], beta[1]])[0, 1], output.cov_beta[0, 1], rel_tol=3.5e-1) out = pe.total_least_squares(ox, oy, func, const_par=[beta[1]]) @@ -246,7 +266,7 @@ def test_total_least_squares(): betac[i].gamma_method(S=1.0) assert math.isclose(betac[i].value, output.beta[i], rel_tol=1e-3) assert math.isclose(output.cov_beta[i, i], betac[i].dvalue ** 2, rel_tol=2.5e-1), str(output.cov_beta[i, i]) + ' ' + str(betac[i].dvalue ** 2) - assert math.isclose(pe.covariance([betac[0], betac[1]])[0, 1], output.cov_beta[0, 1], rel_tol=2.5e-1) + assert math.isclose(pe.covariance([betac[0], betac[1]])[0, 1], output.cov_beta[0, 1], rel_tol=3.5e-1) outc = pe.total_least_squares(oxc, oyc, func, const_par=[betac[1]]) @@ -261,7 +281,7 @@ def test_total_least_squares(): betac[i].gamma_method(S=1.0) assert math.isclose(betac[i].value, output.beta[i], rel_tol=1e-3) assert math.isclose(output.cov_beta[i, i], betac[i].dvalue ** 2, rel_tol=2.5e-1), str(output.cov_beta[i, i]) + ' ' + str(betac[i].dvalue ** 2) - assert math.isclose(pe.covariance([betac[0], betac[1]])[0, 1], output.cov_beta[0, 1], rel_tol=2.5e-1) + assert math.isclose(pe.covariance([betac[0], betac[1]])[0, 1], output.cov_beta[0, 1], rel_tol=3.5e-1) outc = pe.total_least_squares(oxc, oy, func, const_par=[betac[1]]) From efd6d97d0a318c3e0ba92c0b6b40564b780dfb7d Mon Sep 17 00:00:00 2001 From: Fabian Joswig Date: Fri, 10 Jun 2022 10:21:28 +0100 Subject: [PATCH 49/68] docs: CHANGELOG updated, version number bumped to 2.1.2 --- CHANGELOG.md | 6 +++++- pyerrors/version.py | 2 +- setup.py | 2 +- 3 files changed, 7 insertions(+), 3 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 457b4e6f..b052e0bf 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,10 +2,14 @@ All notable changes to this project will be documented in this file. +## [2.1.2] - 2022-06-10 +### Fixed +- Bug in `Corr.matrix_symmetric` fixed which appeared when a time slice contained an array with `None` entries. + ## [2.1.1] - 2022-06-06 ### Fixed - Bug in error propagation of correlated least square fits fixed. -- Fit_result.gamma_method can now be called with kwargs +- `Fit_result.gamma_method` can now be called with kwargs ## [2.1.0] - 2022-05-31 ### Added diff --git a/pyerrors/version.py b/pyerrors/version.py index 0d8c75f0..4eabd0b3 100644 --- a/pyerrors/version.py +++ b/pyerrors/version.py @@ -1 +1 @@ -__version__ = "2.2.0+dev" +__version__ = "2.1.2" diff --git a/setup.py b/setup.py index 9f3f5dfe..2e5ba4be 100644 --- a/setup.py +++ b/setup.py @@ -4,7 +4,7 @@ this_directory = Path(__file__).parent long_description = (this_directory / "README.md").read_text() setup(name='pyerrors', - version='2.2.0+dev', + version='2.1.2', description='Error analysis for lattice QCD', long_description=long_description, long_description_content_type='text/markdown', From 0d86df25066e823302dc8a28914a30947ed3ed0a Mon Sep 17 00:00:00 2001 From: Fabian Joswig Date: Fri, 10 Jun 2022 10:30:00 +0100 Subject: [PATCH 50/68] build: Version number bumped to 2.2.0+dev, CHANGELOG added to project links in setup.py --- pyerrors/version.py | 2 +- setup.py | 5 +++-- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/pyerrors/version.py b/pyerrors/version.py index 4eabd0b3..0d8c75f0 100644 --- a/pyerrors/version.py +++ b/pyerrors/version.py @@ -1 +1 @@ -__version__ = "2.1.2" +__version__ = "2.2.0+dev" diff --git a/setup.py b/setup.py index 2e5ba4be..ca4f785d 100644 --- a/setup.py +++ b/setup.py @@ -4,14 +4,15 @@ this_directory = Path(__file__).parent long_description = (this_directory / "README.md").read_text() setup(name='pyerrors', - version='2.1.2', + version='2.2.0+dev', description='Error analysis for lattice QCD', long_description=long_description, long_description_content_type='text/markdown', url="https://github.com/fjosw/pyerrors", project_urls= { 'Documentation': 'https://fjosw.github.io/pyerrors/pyerrors.html', - 'Bug Tracker': 'https://github.com/fjosw/pyerrors/issues' + 'Bug Tracker': 'https://github.com/fjosw/pyerrors/issues', + 'Changelog' : 'https://github.com/fjosw/pyerrors/blob/master/CHANGELOG.md' }, author='Fabian Joswig', author_email='fabian.joswig@ed.ac.uk', From 5359a30b972aaed7912f26237809d83beeb64fff Mon Sep 17 00:00:00 2001 From: Fabian Joswig Date: Mon, 13 Jun 2022 11:59:05 +0100 Subject: [PATCH 51/68] fix: Bug in Corr.projected fixed which appears in connection with arrays of None as Corr entry. --- pyerrors/correlators.py | 4 ++-- tests/correlators_test.py | 9 +++++++++ 2 files changed, 11 insertions(+), 2 deletions(-) diff --git a/pyerrors/correlators.py b/pyerrors/correlators.py index 7046135a..f568ad42 100644 --- a/pyerrors/correlators.py +++ b/pyerrors/correlators.py @@ -155,7 +155,7 @@ class Corr: raise Exception("Vectors are of wrong shape!") if normalize: vector_l, vector_r = vector_l / np.sqrt((vector_l @ vector_l)), vector_r / np.sqrt(vector_r @ vector_r) - newcontent = [None if (item is None) else np.asarray([vector_l.T @ item @ vector_r]) for item in self.content] + newcontent = [None if len(list(filter(None, np.asarray(item).flatten()))) < self.N ** 2 else np.asarray([vector_l.T @ item @ vector_r]) for item in self.content] else: # There are no checks here yet. There are so many possible scenarios, where this can go wrong. @@ -163,7 +163,7 @@ class Corr: for t in range(self.T): vector_l[t], vector_r[t] = vector_l[t] / np.sqrt((vector_l[t] @ vector_l[t])), vector_r[t] / np.sqrt(vector_r[t] @ vector_r[t]) - newcontent = [None if (self.content[t] is None or vector_l[t] is None or vector_r[t] is None) else np.asarray([vector_l[t].T @ self.content[t] @ vector_r[t]]) for t in range(self.T)] + newcontent = [None if (len(list(filter(None, np.asarray(self.content[t]).flatten()))) < self.N ** 2 or vector_l[t] is None or vector_r[t] is None) else np.asarray([vector_l[t].T @ self.content[t] @ vector_r[t]]) for t in range(self.T)] return Corr(newcontent) def item(self, i, j): diff --git a/tests/correlators_test.py b/tests/correlators_test.py index 4f642f82..bba09f32 100644 --- a/tests/correlators_test.py +++ b/tests/correlators_test.py @@ -246,6 +246,15 @@ def test_matrix_corr(): corr_mat.Eigenvalue(2, state=0) +def test_projected_none(): + a = pe.pseudo_Obs(1.0, 0.1, 'a') + l = np.asarray([[a, a], [a, a]]) + n = np.asarray([[None, None], [None, None]]) + x = [l, n] + matr = pe.Corr(x) + matr.projected(np.asarray([1.0, 0.0])) + + def test_GEVP_warnings(): corr_aa = _gen_corr(1) corr_ab = 0.5 * corr_aa From ed50240d29eb9c717bdb5a7bc7808edc9e4afb71 Mon Sep 17 00:00:00 2001 From: Fabian Joswig Date: Mon, 13 Jun 2022 12:59:54 +0100 Subject: [PATCH 52/68] fix: check for correlator None entries refactored and added to all elementary operations. Tests added. --- pyerrors/correlators.py | 42 +++++++++++++++++++++------------------ tests/correlators_test.py | 8 +++++++- 2 files changed, 30 insertions(+), 20 deletions(-) diff --git a/pyerrors/correlators.py b/pyerrors/correlators.py index f568ad42..34804d57 100644 --- a/pyerrors/correlators.py +++ b/pyerrors/correlators.py @@ -155,7 +155,7 @@ class Corr: raise Exception("Vectors are of wrong shape!") if normalize: vector_l, vector_r = vector_l / np.sqrt((vector_l @ vector_l)), vector_r / np.sqrt(vector_r @ vector_r) - newcontent = [None if len(list(filter(None, np.asarray(item).flatten()))) < self.N ** 2 else np.asarray([vector_l.T @ item @ vector_r]) for item in self.content] + newcontent = [None if _check_for_none(self, item) else np.asarray([vector_l.T @ item @ vector_r]) for item in self.content] else: # There are no checks here yet. There are so many possible scenarios, where this can go wrong. @@ -163,7 +163,7 @@ class Corr: for t in range(self.T): vector_l[t], vector_r[t] = vector_l[t] / np.sqrt((vector_l[t] @ vector_l[t])), vector_r[t] / np.sqrt(vector_r[t] @ vector_r[t]) - newcontent = [None if (len(list(filter(None, np.asarray(self.content[t]).flatten()))) < self.N ** 2 or vector_l[t] is None or vector_r[t] is None) else np.asarray([vector_l[t].T @ self.content[t] @ vector_r[t]]) for t in range(self.T)] + newcontent = [None if (_check_for_none(self, self.content[t]) or vector_l[t] is None or vector_r[t] is None) else np.asarray([vector_l[t].T @ self.content[t] @ vector_r[t]]) for t in range(self.T)] return Corr(newcontent) def item(self, i, j): @@ -236,7 +236,7 @@ class Corr: def matrix_symmetric(self): """Symmetrizes the correlator matrices on every timeslice.""" if self.N > 1: - transposed = [None if len(list(filter(None, np.asarray(G).flatten()))) < self.N ** 2 else G.T for G in self.content] + transposed = [None if _check_for_none(self, G) else G.T for G in self.content] return 0.5 * (Corr(transposed) + self) if self.N == 1: raise Exception("Trying to symmetrize a correlator matrix, that already has N=1.") @@ -923,7 +923,7 @@ class Corr: raise Exception("Addition of Corrs with different shape") newcontent = [] for t in range(self.T): - if (self.content[t] is None) or (y.content[t] is None): + if _check_for_none(self, self.content[t]) or _check_for_none(y, y.content[t]): newcontent.append(None) else: newcontent.append(self.content[t] + y.content[t]) @@ -932,7 +932,7 @@ class Corr: elif isinstance(y, (Obs, int, float, CObs)): newcontent = [] for t in range(self.T): - if (self.content[t] is None): + if _check_for_none(self, self.content[t]): newcontent.append(None) else: newcontent.append(self.content[t] + y) @@ -951,7 +951,7 @@ class Corr: raise Exception("Multiplication of Corr object requires N=N or N=1 and T=T") newcontent = [] for t in range(self.T): - if (self.content[t] is None) or (y.content[t] is None): + if _check_for_none(self, self.content[t]) or _check_for_none(y, y.content[t]): newcontent.append(None) else: newcontent.append(self.content[t] * y.content[t]) @@ -960,7 +960,7 @@ class Corr: elif isinstance(y, (Obs, int, float, CObs)): newcontent = [] for t in range(self.T): - if (self.content[t] is None): + if _check_for_none(self, self.content[t]): newcontent.append(None) else: newcontent.append(self.content[t] * y) @@ -979,12 +979,12 @@ class Corr: raise Exception("Multiplication of Corr object requires N=N or N=1 and T=T") newcontent = [] for t in range(self.T): - if (self.content[t] is None) or (y.content[t] is None): + if _check_for_none(self, self.content[t]) or _check_for_none(y, y.content[t]): newcontent.append(None) else: newcontent.append(self.content[t] / y.content[t]) for t in range(self.T): - if newcontent[t] is None: + if _check_for_none(self, newcontent[t]): continue if np.isnan(np.sum(newcontent[t]).value): newcontent[t] = None @@ -1003,7 +1003,7 @@ class Corr: newcontent = [] for t in range(self.T): - if (self.content[t] is None): + if _check_for_none(self, self.content[t]): newcontent.append(None) else: newcontent.append(self.content[t] / y) @@ -1014,7 +1014,7 @@ class Corr: raise Exception('Division by zero will return undefined correlator') newcontent = [] for t in range(self.T): - if (self.content[t] is None): + if _check_for_none(self, self.content[t]): newcontent.append(None) else: newcontent.append(self.content[t] / y) @@ -1028,7 +1028,7 @@ class Corr: raise TypeError('Corr / wrong type') def __neg__(self): - newcontent = [None if (item is None) else -1. * item for item in self.content] + newcontent = [None if _check_for_none(self, item) else -1. * item for item in self.content] return Corr(newcontent, prange=self.prange) def __sub__(self, y): @@ -1036,31 +1036,31 @@ class Corr: def __pow__(self, y): if isinstance(y, (Obs, int, float, CObs)): - newcontent = [None if (item is None) else item**y for item in self.content] + newcontent = [None if _check_for_none(self, item) else item**y for item in self.content] return Corr(newcontent, prange=self.prange) else: raise TypeError('Type of exponent not supported') def __abs__(self): - newcontent = [None if (item is None) else np.abs(item) for item in self.content] + newcontent = [None if _check_for_none(self, item) else np.abs(item) for item in self.content] return Corr(newcontent, prange=self.prange) # The numpy functions: def sqrt(self): - return self**0.5 + return self ** 0.5 def log(self): - newcontent = [None if (item is None) else np.log(item) for item in self.content] + newcontent = [None if _check_for_none(self, item) else np.log(item) for item in self.content] return Corr(newcontent, prange=self.prange) def exp(self): - newcontent = [None if (item is None) else np.exp(item) for item in self.content] + newcontent = [None if _check_for_none(self, item) else np.exp(item) for item in self.content] return Corr(newcontent, prange=self.prange) def _apply_func_to_corr(self, func): - newcontent = [None if (item is None) else func(item) for item in self.content] + newcontent = [None if _check_for_none(self, item) else func(item) for item in self.content] for t in range(self.T): - if newcontent[t] is None: + if _check_for_none(self, newcontent[t]): continue if np.isnan(np.sum(newcontent[t]).value): newcontent[t] = None @@ -1221,6 +1221,10 @@ def _sort_vectors(vec_set, ts): return sorted_vec_set +def _check_for_none(corr, entry): + """Checks if entry for correlator corr is None""" + return len(list(filter(None, np.asarray(entry).flatten()))) < corr.N ** 2 + def _GEVP_solver(Gt, G0): """Helper function for solving the GEVP and sorting the eigenvectors. diff --git a/tests/correlators_test.py b/tests/correlators_test.py index bba09f32..0aadcb85 100644 --- a/tests/correlators_test.py +++ b/tests/correlators_test.py @@ -246,7 +246,7 @@ def test_matrix_corr(): corr_mat.Eigenvalue(2, state=0) -def test_projected_none(): +def test_corr_none_entries(): a = pe.pseudo_Obs(1.0, 0.1, 'a') l = np.asarray([[a, a], [a, a]]) n = np.asarray([[None, None], [None, None]]) @@ -254,6 +254,12 @@ def test_projected_none(): matr = pe.Corr(x) matr.projected(np.asarray([1.0, 0.0])) + matr * 2 - 2 * matr + matr * matr + matr ** 2 / matr + + for func in [np.sqrt, np.log, np.exp, np.sin, np.cos, np.tan, np.sinh, np.cosh, np.tanh]: + func(matr) + def test_GEVP_warnings(): corr_aa = _gen_corr(1) From a323d60b799532cbbe565014e882823d3b14b05e Mon Sep 17 00:00:00 2001 From: Fabian Joswig Date: Mon, 13 Jun 2022 13:08:05 +0100 Subject: [PATCH 53/68] fix: Exception added when symmetric or anti_symmetric are called on multi-dimensional correlator. --- pyerrors/correlators.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/pyerrors/correlators.py b/pyerrors/correlators.py index 34804d57..287f915c 100644 --- a/pyerrors/correlators.py +++ b/pyerrors/correlators.py @@ -197,6 +197,8 @@ class Corr: def symmetric(self): """ Symmetrize the correlator around x0=0.""" + if self.N != 1: + raise Exception('symmetric cannot be safely applied to multi-dimensional correlators.') if self.T % 2 != 0: raise Exception("Can not symmetrize odd T") @@ -215,6 +217,8 @@ class Corr: def anti_symmetric(self): """Anti-symmetrize the correlator around x0=0.""" + if self.N != 1: + raise Exception('anti_symmetric cannot be safely applied to multi-dimensional correlators.') if self.T % 2 != 0: raise Exception("Can not symmetrize odd T") From d736c001dcd6d87920449d3d05122aac742710ee Mon Sep 17 00:00:00 2001 From: Fabian Joswig Date: Mon, 13 Jun 2022 13:17:47 +0100 Subject: [PATCH 54/68] fix: further checks for multi-dimensional correaltors and None entries added to methods of Corr class. --- pyerrors/correlators.py | 17 ++++++++++++++--- 1 file changed, 14 insertions(+), 3 deletions(-) diff --git a/pyerrors/correlators.py b/pyerrors/correlators.py index 287f915c..d4d6206e 100644 --- a/pyerrors/correlators.py +++ b/pyerrors/correlators.py @@ -423,13 +423,15 @@ class Corr: Can either be an Obs which is correlated with all entries of the correlator or a Corr of same length. """ + if self.N != 1: + raise Exception("Only one-dimensional correlators can be safely correlated.") new_content = [] for x0, t_slice in enumerate(self.content): - if t_slice is None: + if _check_for_none(self, t_slice): new_content.append(None) else: if isinstance(partner, Corr): - if partner.content[x0] is None: + if _check_for_none(partner, partner.content[x0]): new_content.append(None) else: new_content.append(np.array([correlate(o, partner.content[x0][0]) for o in t_slice])) @@ -453,9 +455,11 @@ class Corr: the reweighting factor on all configurations in weight.idl and not on the configurations in obs[i].idl. """ + if self.N != 1: + raise Exception("Reweighting only implemented for one-dimensional correlators.") new_content = [] for t_slice in self.content: - if t_slice is None: + if _check_for_none(self, t_slice): new_content.append(None) else: new_content.append(np.array(reweight(weight, t_slice, **kwargs))) @@ -471,6 +475,8 @@ class Corr: partity : int Parity quantum number of the correlator, can be +1 or -1 """ + if self.N != 1: + raise Exception("T_symmetry only implemented for one-dimensional correlators.") if not isinstance(partner, Corr): raise Exception("T partner has to be a Corr object.") if parity not in [+1, -1]: @@ -498,6 +504,8 @@ class Corr: decides which definition of the finite differences derivative is used. Available choice: symmetric, forward, backward, improved, default: symmetric """ + if self.N != 1: + raise Exception("deriv only implemented for one-dimensional correlators.") if variant == "symmetric": newcontent = [] for t in range(1, self.T - 1): @@ -550,6 +558,8 @@ class Corr: decides which definition of the finite differences derivative is used. Available choice: symmetric, improved, default: symmetric """ + if self.N != 1: + raise Exception("second_deriv only implemented for one-dimensional correlators.") if variant == "symmetric": newcontent = [] for t in range(1, self.T - 1): @@ -1225,6 +1235,7 @@ def _sort_vectors(vec_set, ts): return sorted_vec_set + def _check_for_none(corr, entry): """Checks if entry for correlator corr is None""" return len(list(filter(None, np.asarray(entry).flatten()))) < corr.N ** 2 From 2ed815ddcc2b8bb272d60c9e1fc9b24b72e59d0c Mon Sep 17 00:00:00 2001 From: Fabian Joswig Date: Mon, 13 Jun 2022 13:36:14 +0100 Subject: [PATCH 55/68] build: version number bumped to 2.1.3, CHANGELOG updated. --- CHANGELOG.md | 4 ++++ pyerrors/version.py | 2 +- setup.py | 2 +- 3 files changed, 6 insertions(+), 2 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index b052e0bf..0f900f57 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,6 +2,10 @@ All notable changes to this project will be documented in this file. +## [2.1.3] - 2022-06-13 +### Fixed +- Further bugs in connection with correlator objects which have arrays with None entries as content fixed. + ## [2.1.2] - 2022-06-10 ### Fixed - Bug in `Corr.matrix_symmetric` fixed which appeared when a time slice contained an array with `None` entries. diff --git a/pyerrors/version.py b/pyerrors/version.py index 0d8c75f0..e835b9d0 100644 --- a/pyerrors/version.py +++ b/pyerrors/version.py @@ -1 +1 @@ -__version__ = "2.2.0+dev" +__version__ = "2.1.3" diff --git a/setup.py b/setup.py index ca4f785d..db07c7f1 100644 --- a/setup.py +++ b/setup.py @@ -4,7 +4,7 @@ this_directory = Path(__file__).parent long_description = (this_directory / "README.md").read_text() setup(name='pyerrors', - version='2.2.0+dev', + version='2.1.3', description='Error analysis for lattice QCD', long_description=long_description, long_description_content_type='text/markdown', From 5c47188931bb66969f2d0def105192cc37b35c68 Mon Sep 17 00:00:00 2001 From: Fabian Joswig Date: Mon, 13 Jun 2022 14:04:25 +0100 Subject: [PATCH 56/68] build: version number bumped to 2.2.0+dev --- pyerrors/version.py | 2 +- setup.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/pyerrors/version.py b/pyerrors/version.py index e835b9d0..0d8c75f0 100644 --- a/pyerrors/version.py +++ b/pyerrors/version.py @@ -1 +1 @@ -__version__ = "2.1.3" +__version__ = "2.2.0+dev" diff --git a/setup.py b/setup.py index db07c7f1..ca4f785d 100644 --- a/setup.py +++ b/setup.py @@ -4,7 +4,7 @@ this_directory = Path(__file__).parent long_description = (this_directory / "README.md").read_text() setup(name='pyerrors', - version='2.1.3', + version='2.2.0+dev', description='Error analysis for lattice QCD', long_description=long_description, long_description_content_type='text/markdown', From 0d02e4d4b903296c99f380afefb725a4eaa45c70 Mon Sep 17 00:00:00 2001 From: Fabian Joswig Date: Tue, 14 Jun 2022 11:59:46 +0100 Subject: [PATCH 57/68] build: extract version number in setup.py from version.py --- .github/workflows/pytest.yml | 1 + .gitignore | 1 + setup.py | 8 +++++++- 3 files changed, 9 insertions(+), 1 deletion(-) diff --git a/.github/workflows/pytest.yml b/.github/workflows/pytest.yml index 9db158d3..5e215110 100644 --- a/.github/workflows/pytest.yml +++ b/.github/workflows/pytest.yml @@ -40,6 +40,7 @@ jobs: pip install pytest pip install pytest-cov pip install pytest-benchmark + pip freeze - name: Run tests run: pytest --cov=pyerrors -vv diff --git a/.gitignore b/.gitignore index 2701c491..a8338e8f 100644 --- a/.gitignore +++ b/.gitignore @@ -12,3 +12,4 @@ core.* htmlcov build pyerrors.egg-info +dist diff --git a/setup.py b/setup.py index ca4f785d..0c00aad5 100644 --- a/setup.py +++ b/setup.py @@ -1,10 +1,16 @@ from setuptools import setup, find_packages from pathlib import Path +from distutils.util import convert_path + this_directory = Path(__file__).parent long_description = (this_directory / "README.md").read_text() +version = {} +with open(convert_path('pyerrors/version.py')) as ver_file: + exec(ver_file.read(), version) + setup(name='pyerrors', - version='2.2.0+dev', + version=version['__version__'], description='Error analysis for lattice QCD', long_description=long_description, long_description_content_type='text/markdown', From 612e6c742becae6c5f2b129389d7f9bd94c04f5c Mon Sep 17 00:00:00 2001 From: Fabian Joswig Date: Tue, 14 Jun 2022 14:46:45 +0100 Subject: [PATCH 58/68] tests: matplotlib figures explicitly closed in tests --- tests/correlators_test.py | 2 ++ tests/obs_test.py | 3 +++ 2 files changed, 5 insertions(+) diff --git a/tests/correlators_test.py b/tests/correlators_test.py index 0aadcb85..3cfa230b 100644 --- a/tests/correlators_test.py +++ b/tests/correlators_test.py @@ -1,6 +1,7 @@ import os import numpy as np import scipy +import matplotlib.pyplot as plt import pyerrors as pe import pytest @@ -440,6 +441,7 @@ def test_spaghetti_plot(): corr.spaghetti_plot(True) corr.spaghetti_plot(False) + plt.close('all') def _gen_corr(val, samples=2000): diff --git a/tests/obs_test.py b/tests/obs_test.py index 2a158c3e..5b6a8509 100644 --- a/tests/obs_test.py +++ b/tests/obs_test.py @@ -1,6 +1,7 @@ import autograd.numpy as np import os import copy +import matplotlib.pyplot as plt import pyerrors as pe import pytest @@ -56,6 +57,7 @@ def test_Obs_exceptions(): one.gamma_method() with pytest.raises(Exception): one.plot_piechart() + plt.close('all') def test_dump(): value = np.random.normal(5, 10) @@ -368,6 +370,7 @@ def test_utils(): assert my_obs < (my_obs + 1) float(my_obs) str(my_obs) + plt.close('all') def test_cobs(): From bd6c0a223bdb708e9c933f1be382e85dd1b3feb9 Mon Sep 17 00:00:00 2001 From: Janneuendorf Date: Tue, 14 Jun 2022 15:49:20 +0200 Subject: [PATCH 59/68] Quick fix to corr.m_eff(). Zero values no longer produce errors but are handled as nones in m_eff(). --- pyerrors/correlators.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/pyerrors/correlators.py b/pyerrors/correlators.py index d4d6206e..54da64f8 100644 --- a/pyerrors/correlators.py +++ b/pyerrors/correlators.py @@ -602,7 +602,7 @@ class Corr: if variant == 'log': newcontent = [] for t in range(self.T - 1): - if (self.content[t] is None) or (self.content[t + 1] is None): + if ((self.content[t] is None) or (self.content[t + 1] is None)) or (self.content[t + 1][0].value == 0): newcontent.append(None) else: newcontent.append(self.content[t] / self.content[t + 1]) @@ -622,7 +622,7 @@ class Corr: newcontent = [] for t in range(self.T - 1): - if (self.content[t] is None) or (self.content[t + 1] is None): + if (self.content[t] is None) or (self.content[t + 1] is None) or (self.content[t + 1][0].value == 0): newcontent.append(None) # Fill the two timeslices in the middle of the lattice with their predecessors elif variant == 'sinh' and t in [self.T / 2, self.T / 2 - 1]: @@ -637,7 +637,7 @@ class Corr: elif variant == 'arccosh': newcontent = [] for t in range(1, self.T - 1): - if (self.content[t] is None) or (self.content[t + 1] is None) or (self.content[t - 1] is None): + if (self.content[t] is None) or (self.content[t + 1] is None) or (self.content[t - 1] is None) or (self.content[t + 1][0].value == 0) or (self.content[t - 1][0].value == 0): newcontent.append(None) else: newcontent.append((self.content[t + 1] + self.content[t - 1]) / (2 * self.content[t])) From e671d38a1189a5f88b2ddadc1e1228c30e6abdb5 Mon Sep 17 00:00:00 2001 From: Janneuendorf Date: Tue, 14 Jun 2022 16:06:08 +0200 Subject: [PATCH 60/68] fix --- pyerrors/correlators.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pyerrors/correlators.py b/pyerrors/correlators.py index 54da64f8..921ae732 100644 --- a/pyerrors/correlators.py +++ b/pyerrors/correlators.py @@ -637,7 +637,7 @@ class Corr: elif variant == 'arccosh': newcontent = [] for t in range(1, self.T - 1): - if (self.content[t] is None) or (self.content[t + 1] is None) or (self.content[t - 1] is None) or (self.content[t + 1][0].value == 0) or (self.content[t - 1][0].value == 0): + if (self.content[t] is None) or (self.content[t + 1] is None) or (self.content[t - 1] is None) or (self.content[t][0].value == 0): newcontent.append(None) else: newcontent.append((self.content[t + 1] + self.content[t - 1]) / (2 * self.content[t])) From 1f113a3b29d0b1d43a183e377ddcaa078cd87122 Mon Sep 17 00:00:00 2001 From: Fabian Joswig Date: Wed, 15 Jun 2022 13:48:34 +0100 Subject: [PATCH 61/68] refactor: comparison of constants removed. --- pyerrors/input/bdio.py | 6 +++--- pyerrors/input/misc.py | 2 +- pyerrors/input/openQCD.py | 8 ++++---- 3 files changed, 8 insertions(+), 8 deletions(-) diff --git a/pyerrors/input/bdio.py b/pyerrors/input/bdio.py index f54dfbcd..2d30f3b2 100644 --- a/pyerrors/input/bdio.py +++ b/pyerrors/input/bdio.py @@ -61,7 +61,7 @@ def read_ADerrors(file_path, bdio_path='./libbdio.so', **kwargs): return_list = [] print('Reading of bdio file started') - while 1 > 0: + while True: bdio_seek_record(fbdio) ruinfo = bdio_get_ruinfo(fbdio) @@ -373,7 +373,7 @@ def read_mesons(file_path, bdio_path='./libbdio.so', **kwargs): fbdio = bdio_open(ctypes.c_char_p(b_path), ctypes.c_char_p(b_read), ctypes.c_char_p(b_form)) print('Reading of bdio file started') - while 1 > 0: + while True: bdio_seek_record(fbdio) ruinfo = bdio_get_ruinfo(fbdio) if ruinfo < 0: @@ -582,7 +582,7 @@ def read_dSdm(file_path, bdio_path='./libbdio.so', **kwargs): fbdio = bdio_open(ctypes.c_char_p(b_path), ctypes.c_char_p(b_read), ctypes.c_char_p(b_form)) print('Reading of bdio file started') - while 1 > 0: + while True: bdio_seek_record(fbdio) ruinfo = bdio_get_ruinfo(fbdio) if ruinfo < 0: diff --git a/pyerrors/input/misc.py b/pyerrors/input/misc.py index 6d7b6d50..2e148d90 100644 --- a/pyerrors/input/misc.py +++ b/pyerrors/input/misc.py @@ -93,7 +93,7 @@ def read_pbp(path, prefix, **kwargs): nsrc.append(struct.unpack('i', t)[0]) # body - while 0 < 1: + while True: t = fp.read(4) if len(t) < 4: break diff --git a/pyerrors/input/openQCD.py b/pyerrors/input/openQCD.py index 05e9fdb7..c76c0596 100644 --- a/pyerrors/input/openQCD.py +++ b/pyerrors/input/openQCD.py @@ -150,7 +150,7 @@ def read_rwms(path, prefix, version='2.0', names=None, **kwargs): print('something is wrong!') configlist.append([]) - while 0 < 1: + while True: t = fp.read(4) if len(t) < 4: break @@ -362,7 +362,7 @@ def extract_t0(path, prefix, dtr_read, xmin, spatial_extent, fit_range=5, **kwar Ysl = [] configlist.append([]) - while 0 < 1: + while True: t = fp.read(4) if(len(t) < 4): break @@ -657,7 +657,7 @@ def read_qtop(path, prefix, c, dtr_cnfg=1, version="openQCD", **kwargs): nfl = 1 iobs = 8 * nfl # number of flow observables calculated - while 0 < 1: + while True: t = fp.read(4) if(len(t) < 4): break @@ -682,7 +682,7 @@ def read_qtop(path, prefix, c, dtr_cnfg=1, version="openQCD", **kwargs): t = fp.read(8) eps = struct.unpack('d', t)[0] - while 0 < 1: + while True: t = fp.read(4) if(len(t) < 4): break From d79aa2cf74609f7b9f4d1330e510729b762134a0 Mon Sep 17 00:00:00 2001 From: Fabian Joswig Date: Wed, 15 Jun 2022 14:01:26 +0100 Subject: [PATCH 62/68] refactor: range in Corr.print and __repr__ renamed to print_range --- pyerrors/correlators.py | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/pyerrors/correlators.py b/pyerrors/correlators.py index 921ae732..646ef875 100644 --- a/pyerrors/correlators.py +++ b/pyerrors/correlators.py @@ -897,10 +897,10 @@ class Corr: else: raise Exception("Unknown datatype " + str(datatype)) - def print(self, range=[0, None]): - print(self.__repr__(range)) + def print(self, print_range=[0, None]): + print(self.__repr__(print_range)) - def __repr__(self, range=[0, None]): + def __repr__(self, print_range=[0, None]): content_string = "" content_string += "Corr T=" + str(self.T) + " N=" + str(self.N) + "\n" # +" filled with"+ str(type(self.content[0][0])) there should be a good solution here @@ -910,14 +910,14 @@ class Corr: if self.N != 1: return content_string - if range[1]: - range[1] += 1 + if print_range[1]: + print_range[1] += 1 content_string += 'x0/a\tCorr(x0/a)\n------------------\n' - for i, sub_corr in enumerate(self.content[range[0]:range[1]]): + for i, sub_corr in enumerate(self.content[print_range[0]:print_range[1]]): if sub_corr is None: - content_string += str(i + range[0]) + '\n' + content_string += str(i + print_range[0]) + '\n' else: - content_string += str(i + range[0]) + content_string += str(i + print_range[0]) for element in sub_corr: content_string += '\t' + ' ' * int(element >= 0) + str(element) content_string += '\n' From 02403fc282410e8caa02276e2e4045219dff7421 Mon Sep 17 00:00:00 2001 From: Fabian Joswig Date: Wed, 15 Jun 2022 14:04:07 +0100 Subject: [PATCH 63/68] fix: Modification of parameter with default in input.dobs fixed. --- pyerrors/input/dobs.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/pyerrors/input/dobs.py b/pyerrors/input/dobs.py index 498e88d8..92ae3e35 100644 --- a/pyerrors/input/dobs.py +++ b/pyerrors/input/dobs.py @@ -652,7 +652,7 @@ def _dobsdict_to_xmlstring_spaces(d, space=' '): return o -def create_dobs_string(obsl, name, spec='dobs v1.0', origin='', symbol=[], who=None, enstags={}): +def create_dobs_string(obsl, name, spec='dobs v1.0', origin='', symbol=[], who=None, enstags=None): """Generate the string for the export of a list of Obs or structures containing Obs to a .xml.gz file according to the Zeuthen dobs format. @@ -678,6 +678,8 @@ def create_dobs_string(obsl, name, spec='dobs v1.0', origin='', symbol=[], who=N Provide alternative enstag for ensembles in the form enstags = {ename: enstag} Otherwise, the ensemble name is used. """ + if enstags is None: + enstags = {} od = {} r_names = [] for o in obsl: From abdeace107f8c68003b4ff71148d0d87cfd24009 Mon Sep 17 00:00:00 2001 From: Fabian Joswig Date: Wed, 15 Jun 2022 14:05:41 +0100 Subject: [PATCH 64/68] fix: Modification of parameter with default in Corr.__repr__ fixed. --- pyerrors/correlators.py | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/pyerrors/correlators.py b/pyerrors/correlators.py index 646ef875..ecb8d525 100644 --- a/pyerrors/correlators.py +++ b/pyerrors/correlators.py @@ -897,12 +897,14 @@ class Corr: else: raise Exception("Unknown datatype " + str(datatype)) - def print(self, print_range=[0, None]): + def print(self, print_range=None): print(self.__repr__(print_range)) - def __repr__(self, print_range=[0, None]): - content_string = "" + def __repr__(self, print_range=None): + if print_range is None: + print_range = [0, None] + content_string = "" content_string += "Corr T=" + str(self.T) + " N=" + str(self.N) + "\n" # +" filled with"+ str(type(self.content[0][0])) there should be a good solution here if self.tag is not None: From d600793d0e0db8574aa63dd285cffb53c41c837a Mon Sep 17 00:00:00 2001 From: Fabian Joswig Date: Wed, 15 Jun 2022 14:07:35 +0100 Subject: [PATCH 65/68] fix: unreachable statement in input.misc.read_pbp removed. --- pyerrors/input/misc.py | 14 ++++---------- 1 file changed, 4 insertions(+), 10 deletions(-) diff --git a/pyerrors/input/misc.py b/pyerrors/input/misc.py index 2e148d90..fe75fb1a 100644 --- a/pyerrors/input/misc.py +++ b/pyerrors/input/misc.py @@ -17,8 +17,6 @@ def read_pbp(path, prefix, **kwargs): list which contains the last config to be read for each replicum """ - extract_nfct = 1 - ls = [] for (dirpath, dirnames, filenames) in os.walk(path): ls.extend(filenames) @@ -78,14 +76,10 @@ def read_pbp(path, prefix, **kwargs): # This block is necessary for openQCD1.6 ms1 files nfct = [] - if extract_nfct == 1: - for i in range(nrw): - t = fp.read(4) - nfct.append(struct.unpack('i', t)[0]) - print('nfct: ', nfct) # Hasenbusch factor, 1 for rat reweighting - else: - for i in range(nrw): - nfct.append(1) + for i in range(nrw): + t = fp.read(4) + nfct.append(struct.unpack('i', t)[0]) + print('nfct: ', nfct) # Hasenbusch factor, 1 for rat reweighting nsrc = [] for i in range(nrw): From 0150ef4c0bd4ec5f2d4f001ad669a4ae65707a1f Mon Sep 17 00:00:00 2001 From: Fabian Joswig Date: Wed, 15 Jun 2022 14:14:01 +0100 Subject: [PATCH 66/68] refactor: imports of scipy.stats in fits simplified. --- pyerrors/fits.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/pyerrors/fits.py b/pyerrors/fits.py index 489c2f8b..997c7f6f 100644 --- a/pyerrors/fits.py +++ b/pyerrors/fits.py @@ -8,7 +8,6 @@ import scipy.stats import matplotlib.pyplot as plt from matplotlib import gridspec from scipy.odr import ODR, Model, RealData -from scipy.stats import chi2 import iminuit from autograd import jacobian from autograd import elementwise_grad as egrad @@ -303,7 +302,7 @@ def total_least_squares(x, y, func, silent=False, **kwargs): output.odr_chisquare = odr_chisquare(np.concatenate((out.beta, out.xplus.ravel()))) output.dof = x.shape[-1] - n_parms - output.p_value = 1 - chi2.cdf(output.odr_chisquare, output.dof) + output.p_value = 1 - scipy.stats.chi2.cdf(output.odr_chisquare, output.dof) return output @@ -594,7 +593,7 @@ def _standard_fit(x, y, func, silent=False, **kwargs): output.chisquare = chisquare output.dof = x.shape[-1] - n_parms - output.p_value = 1 - chi2.cdf(output.chisquare, output.dof) + output.p_value = 1 - scipy.stats.chi2.cdf(output.chisquare, output.dof) if kwargs.get('resplot') is True: residual_plot(x, y, func, result) From a545545d73491fe86028e08921540b6886810b86 Mon Sep 17 00:00:00 2001 From: Fabian Joswig Date: Fri, 17 Jun 2022 10:17:18 +0100 Subject: [PATCH 67/68] fix: Another modification of parameter with default in input.dobs fixed. --- pyerrors/input/dobs.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/pyerrors/input/dobs.py b/pyerrors/input/dobs.py index 92ae3e35..95aa8894 100644 --- a/pyerrors/input/dobs.py +++ b/pyerrors/input/dobs.py @@ -827,7 +827,7 @@ def create_dobs_string(obsl, name, spec='dobs v1.0', origin='', symbol=[], who=N return rs -def write_dobs(obsl, fname, name, spec='dobs v1.0', origin='', symbol=[], who=None, enstags={}, gz=True): +def write_dobs(obsl, fname, name, spec='dobs v1.0', origin='', symbol=[], who=None, enstags=None, gz=True): """Export a list of Obs or structures containing Obs to a .xml.gz file according to the Zeuthen dobs format. @@ -857,6 +857,8 @@ def write_dobs(obsl, fname, name, spec='dobs v1.0', origin='', symbol=[], who=No gz : bool If True, the output is a gzipped XML. If False, the output is a XML file. """ + if enstags is None: + enstags = {} dobsstring = create_dobs_string(obsl, name, spec, origin, symbol, who, enstags=enstags) From 6ef9d99619b8dfc9f15004a9efc20717e4877fae Mon Sep 17 00:00:00 2001 From: Fabian Joswig Date: Fri, 17 Jun 2022 16:36:01 +0100 Subject: [PATCH 68/68] refactor: redundant check in input.hadrons.read_DistillationContraction removed. --- pyerrors/input/hadrons.py | 4 ---- 1 file changed, 4 deletions(-) diff --git a/pyerrors/input/hadrons.py b/pyerrors/input/hadrons.py index c773b186..4645330d 100644 --- a/pyerrors/input/hadrons.py +++ b/pyerrors/input/hadrons.py @@ -171,10 +171,6 @@ def read_DistillationContraction_hd5(path, ens_id, diagrams=["direct"], idl=None identifier = tuple(identifier) # "DistillationContraction/Metadata/DmfSuffix" contains info about different quarks, irrelevant in the SU(3) case. - check_traj = h5file["DistillationContraction/Metadata"].attrs.get("Traj")[0] - - assert check_traj == n_traj - for diagram in diagrams: real_data = np.zeros(Nt) for x0 in range(Nt):