Merge branch 'develop' into feat/typehints

This commit is contained in:
Fabian Joswig 2025-05-05 19:11:51 +02:00
commit 6d80efd388
15 changed files with 156 additions and 93 deletions

View file

@ -27,17 +27,17 @@ jobs:
uses: actions/setup-python@v5
with:
python-version: ${{ matrix.python-version }}
- name: uv
uses: astral-sh/setup-uv@v5
- name: Install
run: |
sudo apt-get update
sudo apt-get install dvipng texlive-latex-extra texlive-fonts-recommended cm-super
python -m pip install --upgrade pip
pip install wheel
pip install .
pip install pytest
pip install nbmake
pip install -U matplotlib!=3.7.0 # Exclude version 3.7.0 of matplotlib as this breaks local imports of style files.
uv pip install wheel --system
uv pip install . --system
uv pip install pytest nbmake --system
uv pip install -U matplotlib!=3.7.0 --system # Exclude version 3.7.0 of matplotlib as this breaks local imports of style files.
- name: Run tests
run: pytest -vv --nbmake examples/*.ipynb

View file

@ -20,7 +20,9 @@ jobs:
python-version: ["3.9", "3.10", "3.11", "3.12", "3.13"]
include:
- os: macos-latest
python-version: "3.10"
python-version: "3.12"
- os: ubuntu-24.04-arm
python-version: "3.12"
steps:
- name: Checkout source
@ -30,18 +32,15 @@ jobs:
uses: actions/setup-python@v5
with:
python-version: ${{ matrix.python-version }}
- name: uv
uses: astral-sh/setup-uv@v5
- name: Install
run: |
python -m pip install --upgrade pip
pip install wheel
pip install .
pip install pytest
pip install pytest-cov
pip install pytest-benchmark
pip install hypothesis
pip install typing_extensions
pip freeze
uv pip install wheel --system
uv pip install . --system
uv pip install pytest pytest-cov pytest-benchmark hypothesis typing_extensions --system
uv pip freeze --system
- name: Run tests
run: pytest --cov=pyerrors -vv
run: pytest --cov=pyerrors -vv -Werror

View file

@ -2,6 +2,21 @@
All notable changes to this project will be documented in this file.
## [2.14.0] - 2025-03-09
### Added
- Explicit checks of the provided inverse matrix for correlated fits #259
### Changed
- Compute derivative for pow explicitly instead of relying on autograd. This results in a ~4x speedup for pow operations #246
- More explicit exception types #248
### Fixed
- Removed the possibility to create an Obs from data on several replica #258
- Fix range in `set_prange` #247
- Fix ensemble name handling in sfcf input modules #253
- Correct error message for fit shape mismatch #257
## [2.13.0] - 2024-11-03
### Added

View file

@ -151,7 +151,7 @@
"\n",
"$$C_{\\textrm{projected}}(t)=v_1^T \\underline{C}(t) v_2$$\n",
"\n",
"If we choose the vectors to be $v_1=v_2=(0,1,0,0)$, we should get the same correlator as in the cell above. \n",
"If we choose the vectors to be $v_1=v_2=(1,0,0,0)$, we should get the same correlator as in the cell above. \n",
"\n",
"Thinking about it this way is usefull in the Context of the generalized eigenvalue problem (GEVP), used to find the source-sink combination, which best describes a certain energy eigenstate.\n",
"A good introduction is found in https://arxiv.org/abs/0902.1265."

View file

@ -316,7 +316,7 @@ def least_squares(x: Any, y: Union[dict[str, ndarray], list[Obs], ndarray, dict[
if len(key_ls) > 1:
for key in key_ls:
if np.asarray(yd[key]).shape != funcd[key](np.arange(n_parms), xd[key]).shape:
raise ValueError(f"Fit function {key} returns the wrong shape ({funcd[key](np.arange(n_parms), xd[key]).shape} instead of {xd[key].shape})\nIf the fit function is just a constant you could try adding x*0 to get the correct shape.")
raise ValueError(f"Fit function {key} returns the wrong shape ({funcd[key](np.arange(n_parms), xd[key]).shape} instead of {np.asarray(yd[key]).shape})\nIf the fit function is just a constant you could try adding x*0 to get the correct shape.")
if not silent:
print('Fit with', n_parms, 'parameter' + 's' * (n_parms > 1))
@ -387,6 +387,8 @@ def least_squares(x: Any, y: Union[dict[str, ndarray], list[Obs], ndarray, dict[
if (chol_inv[1] != key_ls):
raise ValueError('The keys of inverse covariance matrix are not the same or do not appear in the same order as the x and y values.')
chol_inv = chol_inv[0]
if np.any(np.diag(chol_inv) <= 0) or (not np.all(chol_inv == np.tril(chol_inv))):
raise ValueError('The inverse covariance matrix inv_chol_cov_matrix[0] has to be a lower triangular matrix constructed from a Cholesky decomposition.')
else:
corr = covariance(y_all, correlation=True, **kwargs)
inverrdiag = np.diag(1 / np.asarray(dy_f))

View file

@ -538,7 +538,8 @@ def import_dobs_string(content: bytes, full_output: bool=False, separator_insert
deltas.append(repdeltas)
idl.append(repidl)
res.append(Obs(deltas, obs_names, idl=idl))
obsmeans = [np.average(deltas[j]) for j in range(len(deltas))]
res.append(Obs([np.array(deltas[j]) - obsmeans[j] for j in range(len(obsmeans))], obs_names, idl=idl, means=obsmeans))
res[-1]._value = mean[i]
_check(len(e_names) == ne)

View file

@ -135,10 +135,11 @@ def create_json_string(ol: list, description: Union[str, dict]='', indent: int=1
names = []
idl = []
for key, value in obs.idl.items():
samples.append([np.nan] * len(value))
samples.append(np.array([np.nan] * len(value)))
names.append(key)
idl.append(value)
my_obs = Obs(samples, names, idl)
my_obs = Obs(samples, names, idl, means=[np.nan for n in names])
my_obs._value = np.nan
my_obs._covobs = obs._covobs
for name in obs._covobs:
my_obs.names.append(name)
@ -334,7 +335,8 @@ def _parse_json_dict(json_dict: dict[str, Any], verbose: bool=True, full_output:
cd = _gen_covobsd_from_cdatad(o.get('cdata', {}))
if od:
ret = Obs([[ddi[0] + values[0] for ddi in di] for di in od['deltas']], od['names'], idl=od['idl'])
r_offsets = [np.average([ddi[0] for ddi in di]) for di in od['deltas']]
ret = Obs([np.array([ddi[0] for ddi in od['deltas'][i]]) - r_offsets[i] for i in range(len(od['deltas']))], od['names'], idl=od['idl'], means=[ro + values[0] for ro in r_offsets])
ret._value = values[0]
else:
ret = Obs([], [], means=[])
@ -359,7 +361,8 @@ def _parse_json_dict(json_dict: dict[str, Any], verbose: bool=True, full_output:
taglist = o.get('tag', layout * [None])
for i in range(layout):
if od:
ret.append(Obs([list(di[:, i] + values[i]) for di in od['deltas']], od['names'], idl=od['idl']))
r_offsets = np.array([np.average(di[:, i]) for di in od['deltas']])
ret.append(Obs([od['deltas'][j][:, i] - r_offsets[j] for j in range(len(od['deltas']))], od['names'], idl=od['idl'], means=[ro + values[i] for ro in r_offsets]))
ret[-1]._value = values[i]
else:
ret.append(Obs([], [], means=[]))
@ -386,7 +389,8 @@ def _parse_json_dict(json_dict: dict[str, Any], verbose: bool=True, full_output:
taglist = o.get('tag', N * [None])
for i in range(N):
if od:
ret.append(Obs([di[:, i] + values[i] for di in od['deltas']], od['names'], idl=od['idl']))
r_offsets = np.array([np.average(di[:, i]) for di in od['deltas']])
ret.append(Obs([od['deltas'][j][:, i] - r_offsets[j] for j in range(len(od['deltas']))], od['names'], idl=od['idl'], means=[ro + values[i] for ro in r_offsets]))
ret[-1]._value = values[i]
else:
ret.append(Obs([], [], means=[]))
@ -570,7 +574,6 @@ def _ol_from_dict(ind: dict, reps: str='DICTOBS') -> tuple[list, dict]:
counter = 0
def dict_replace_obs(d):
nonlocal ol
nonlocal counter
x = {}
for k, v in d.items():
@ -591,7 +594,6 @@ def _ol_from_dict(ind: dict, reps: str='DICTOBS') -> tuple[list, dict]:
return x
def list_replace_obs(li):
nonlocal ol
nonlocal counter
x = []
for e in li:
@ -612,7 +614,6 @@ def _ol_from_dict(ind: dict, reps: str='DICTOBS') -> tuple[list, dict]:
return x
def obslist_replace_obs(li):
nonlocal ol
nonlocal counter
il = []
for e in li:
@ -693,7 +694,6 @@ def _od_from_list_and_dict(ol: list, ind: dict, reps: str='DICTOBS') -> dict[str
def dict_replace_string(d):
nonlocal counter
nonlocal ol
x = {}
for k, v in d.items():
if isinstance(v, dict):
@ -709,7 +709,6 @@ def _od_from_list_and_dict(ol: list, ind: dict, reps: str='DICTOBS') -> dict[str
def list_replace_string(li):
nonlocal counter
nonlocal ol
x = []
for e in li:
if isinstance(e, list):

View file

@ -2,6 +2,7 @@ from __future__ import annotations
import warnings
import gzip
import sqlite3
from contextlib import closing
import pandas as pd
from ..obs import Obs
from ..correlators import Corr
@ -32,9 +33,8 @@ def to_sql(df: DataFrame, table_name: str, db: str, if_exists: str='fail', gz: b
None
"""
se_df = _serialize_df(df, gz=gz)
con = sqlite3.connect(db)
se_df.to_sql(table_name, con, if_exists=if_exists, index=False, **kwargs)
con.close()
with closing(sqlite3.connect(db)) as con:
se_df.to_sql(table_name, con=con, if_exists=if_exists, index=False, **kwargs)
def read_sql(sql: str, db: str, auto_gamma: bool=False, **kwargs) -> DataFrame:
@ -55,9 +55,8 @@ def read_sql(sql: str, db: str, auto_gamma: bool=False, **kwargs) -> DataFrame:
data : pandas.DataFrame
Dataframe with the content of the sqlite database.
"""
con = sqlite3.connect(db)
extract_df = pd.read_sql(sql, con, **kwargs)
con.close()
with closing(sqlite3.connect(db)) as con:
extract_df = pd.read_sql(sql, con=con, **kwargs)
return _deserialize_df(extract_df, auto_gamma=auto_gamma)

View file

@ -95,6 +95,8 @@ class Obs:
raise ValueError('Names are not unique.')
if not all(isinstance(x, str) for x in names):
raise TypeError('All names have to be strings.')
if len(set([o.split('|')[0] for o in names])) > 1:
raise ValueError('Cannot initialize Obs based on multiple ensembles. Please average separate Obs from each ensemble.')
else:
if not isinstance(names[0], str):
raise TypeError('All names have to be strings.')
@ -1434,6 +1436,8 @@ def reweight(weight: Obs, obs: Union[ndarray, list[Obs]], **kwargs) -> list[Obs]
raise ValueError('Error: Not possible to reweight an Obs that contains covobs!')
if not set(obs[i].names).issubset(weight.names):
raise ValueError('Error: Ensembles do not fit')
if len(obs[i].mc_names) > 1 or len(weight.mc_names) > 1:
raise ValueError('Error: Cannot reweight an Obs that contains multiple ensembles.')
for name in obs[i].names:
if not set(obs[i].idl[name]).issubset(weight.idl[name]):
raise ValueError('obs[%d] has to be defined on a subset of the configs in weight.idl[%s]!' % (i, name))
@ -1469,9 +1473,12 @@ def correlate(obs_a: Obs, obs_b: Obs) -> Obs:
-----
Keep in mind to only correlate primary observables which have not been reweighted
yet. The reweighting has to be applied after correlating the observables.
Currently only works if ensembles are identical (this is not strictly necessary).
Only works if a single ensemble is present in the Obs.
Currently only works if ensemble content is identical (this is not strictly necessary).
"""
if len(obs_a.mc_names) > 1 or len(obs_b.mc_names) > 1:
raise ValueError('Error: Cannot correlate Obs that contain multiple ensembles.')
if sorted(obs_a.names) != sorted(obs_b.names):
raise ValueError(f"Ensembles do not fit {set(sorted(obs_a.names)) ^ set(sorted(obs_b.names))}")
if len(obs_a.cov_names) or len(obs_b.cov_names):
@ -1781,8 +1788,13 @@ def import_bootstrap(boots: ndarray, name: str, random_numbers: ndarray) -> Obs:
return ret
<<<<<<< HEAD
def merge_obs(list_of_obs: list[Obs]) -> Obs:
"""Combine all observables in list_of_obs into one new observable
"""Combine all observables in list_of_obs into one new observable.
This allows to merge Obs that have been computed on multiple replica
of the same ensemble.
If you like to merge Obs that are based on several ensembles, please
average them yourself.
Parameters
----------

View file

@ -1 +1 @@
__version__ = "2.14.0-dev"
__version__ = "2.15.0-dev"

View file

@ -30,7 +30,6 @@ setup(name='pyerrors',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.9',
'Programming Language :: Python :: 3.10',

View file

@ -223,6 +223,9 @@ def test_inv_cov_matrix_input_least_squares():
diff_inv_cov_combined_fit.gamma_method()
assert(diff_inv_cov_combined_fit.is_zero(atol=1e-12))
with pytest.raises(ValueError):
pe.least_squares(x_dict, data_dict, fitf_dict, correlated_fit = True, inv_chol_cov_matrix = [corr,chol_inv_keys_combined_fit])
def test_least_squares_invalid_inv_cov_matrix_input():
xvals = []
yvals = []

View file

@ -12,7 +12,7 @@ def test_jsonio():
o = pe.pseudo_Obs(1.0, .2, 'one')
o2 = pe.pseudo_Obs(0.5, .1, 'two|r1')
o3 = pe.pseudo_Obs(0.5, .1, 'two|r2')
o4 = pe.merge_obs([o2, o3])
o4 = pe.merge_obs([o2, o3, pe.pseudo_Obs(0.5, .1, 'two|r3', samples=3221)])
otag = 'This has been merged!'
o4.tag = otag
do = o - .2 * o4
@ -101,8 +101,8 @@ def test_json_string_reconstruction():
def test_json_corr_io():
my_list = [pe.Obs([np.random.normal(1.0, 0.1, 100)], ['ens1']) for o in range(8)]
rw_list = pe.reweight(pe.Obs([np.random.normal(1.0, 0.1, 100)], ['ens1']), my_list)
my_list = [pe.Obs([np.random.normal(1.0, 0.1, 100), np.random.normal(1.0, 0.1, 321)], ['ens1|r1', 'ens1|r2'], idl=[range(1, 201, 2), range(321)]) for o in range(8)]
rw_list = pe.reweight(pe.Obs([np.random.normal(1.0, 0.1, 100), np.random.normal(1.0, 0.1, 321)], ['ens1|r1', 'ens1|r2'], idl=[range(1, 201, 2), range(321)]), my_list)
for obs_list in [my_list, rw_list]:
for tag in [None, "test"]:
@ -111,7 +111,8 @@ def test_json_corr_io():
for corr_tag in [None, 'my_Corr_tag']:
for prange in [None, [3, 6]]:
for gap in [False, True]:
my_corr = pe.Corr(obs_list, padding=[pad, pad], prange=prange)
for mult in [1., pe.cov_Obs([12.22, 1.21], [.212**2, .11**2], 'renorm')[0]]:
my_corr = mult * pe.Corr(obs_list, padding=[pad, pad], prange=prange)
my_corr.tag = corr_tag
if gap:
my_corr.content[4] = None
@ -128,13 +129,23 @@ def test_json_corr_io():
def test_json_corr_2d_io():
obs_list = [np.array([[pe.pseudo_Obs(1.0 + i, 0.1 * i, 'test'), pe.pseudo_Obs(0.0, 0.1 * i, 'test')], [pe.pseudo_Obs(0.0, 0.1 * i, 'test'), pe.pseudo_Obs(1.0 + i, 0.1 * i, 'test')]]) for i in range(4)]
obs_list = [np.array([
[
pe.merge_obs([pe.pseudo_Obs(1.0 + i, 0.1 * i, 'test|r2'), pe.pseudo_Obs(1.0 + i, 0.1 * i, 'test|r1', samples=321)]),
pe.merge_obs([pe.pseudo_Obs(0.0, 0.1 * i, 'test|r2'), pe.pseudo_Obs(0.0, 0.1 * i, 'test|r1', samples=321)]),
],
[
pe.merge_obs([pe.pseudo_Obs(0.0, 0.1 * i, 'test|r2'), pe.pseudo_Obs(0.0, 0.1 * i, 'test|r1', samples=321),]),
pe.merge_obs([pe.pseudo_Obs(1.0 + i, 0.1 * i, 'test|r2'), pe.pseudo_Obs(1.0 + i, 0.1 * i, 'test|r1', samples=321)]),
],
]) for i in range(4)]
for tag in [None, "test"]:
obs_list[3][0, 1].tag = tag
for padding in [0, 1]:
for prange in [None, [3, 6]]:
my_corr = pe.Corr(obs_list, padding=[padding, padding], prange=prange)
for mult in [1., pe.cov_Obs([12.22, 1.21], [.212**2, .11**2], 'renorm')[0]]:
my_corr = mult * pe.Corr(obs_list, padding=[padding, padding], prange=prange)
my_corr.tag = tag
pe.input.json.dump_to_json(my_corr, 'corr')
recover = pe.input.json.load_json('corr')
@ -211,6 +222,7 @@ def test_json_dict_io():
'd': pe.pseudo_Obs(.01, .001, 'testd', samples=10) * pe.cov_Obs(1, .01, 'cov1'),
'se': None,
'sf': 1.2,
'k': pe.cov_Obs(.1, .001**2, 'cov') * pe.merge_obs([pe.pseudo_Obs(1.0, 0.1, 'test|r2'), pe.pseudo_Obs(1.0, 0.1, 'test|r1', samples=321)]),
}
}
@ -314,7 +326,7 @@ def test_dobsio():
o2 = pe.pseudo_Obs(0.5, .1, 'two|r1')
o3 = pe.pseudo_Obs(0.5, .1, 'two|r2')
o4 = pe.merge_obs([o2, o3])
o4 = pe.merge_obs([o2, o3, pe.pseudo_Obs(0.5, .1, 'two|r3', samples=3221)])
otag = 'This has been merged!'
o4.tag = otag
do = o - .2 * o4
@ -328,7 +340,7 @@ def test_dobsio():
o5 /= co2[0]
o5.tag = 2 * otag
tt1 = pe.Obs([np.random.rand(100), np.random.rand(100)], ['t|r1', 't|r2'], idl=[range(2, 202, 2), range(22, 222, 2)])
tt1 = pe.Obs([np.random.rand(100), np.random.rand(102)], ['t|r1', 't|r2'], idl=[range(2, 202, 2), range(22, 226, 2)])
tt3 = pe.Obs([np.random.rand(102)], ['qe|r1'])
tt = tt1 + tt3
@ -337,7 +349,7 @@ def test_dobsio():
tt4 = pe.Obs([np.random.rand(100), np.random.rand(100)], ['t|r1', 't|r2'], idl=[range(1, 101, 1), range(2, 202, 2)])
ol = [o2, o3, o4, do, o5, tt, tt4, np.log(tt4 / o5**2), np.exp(o5 + np.log(co3 / tt3 + o4) / tt)]
ol = [o2, o3, o4, do, o5, tt, tt4, np.log(tt4 / o5**2), np.exp(o5 + np.log(co3 / tt3 + o4) / tt), o4.reweight(o4)]
print(ol)
fname = 'test_rw'
@ -362,9 +374,12 @@ def test_dobsio():
def test_reconstruct_non_linear_r_obs(tmp_path):
to = pe.Obs([np.random.rand(500), np.random.rand(500), np.random.rand(111)],
["e|r1", "e|r2", "my_new_ensemble_54^£$|8'[@124435%6^7&()~#"],
idl=[range(1, 501), range(0, 500), range(1, 999, 9)])
to = (
pe.Obs([np.random.rand(500), np.random.rand(1200)],
["e|r1", "e|r2", ],
idl=[range(1, 501), range(0, 1200)])
+ pe.Obs([np.random.rand(111)], ["my_new_ensemble_54^£$|8'[@124435%6^7&()~#"], idl=[range(1, 999, 9)])
)
to = np.log(to ** 2) / to
to.dump((tmp_path / "test_equality").as_posix())
ro = pe.input.json.load_json((tmp_path / "test_equality").as_posix())
@ -372,9 +387,12 @@ def test_reconstruct_non_linear_r_obs(tmp_path):
def test_reconstruct_non_linear_r_obs_list(tmp_path):
to = pe.Obs([np.random.rand(500), np.random.rand(500), np.random.rand(111)],
["e|r1", "e|r2", "my_new_ensemble_54^£$|8'[@124435%6^7&()~#"],
idl=[range(1, 501), range(0, 500), range(1, 999, 9)])
to = (
pe.Obs([np.random.rand(500), np.random.rand(1200)],
["e|r1", "e|r2", ],
idl=[range(1, 501), range(0, 1200)])
+ pe.Obs([np.random.rand(111)], ["my_new_ensemble_54^£$|8'[@124435%6^7&()~#"], idl=[range(1, 999, 9)])
)
to = np.log(to ** 2) / to
for to_list in [[to, to, to], np.array([to, to, to])]:
pe.input.json.dump_to_json(to_list, (tmp_path / "test_equality_list").as_posix())

View file

@ -34,7 +34,7 @@ def test_matmul():
my_list = []
length = 100 + np.random.randint(200)
for i in range(dim ** 2):
my_list.append(pe.Obs([np.random.rand(length), np.random.rand(length + 1)], ['t1', 't2']))
my_list.append(pe.Obs([np.random.rand(length)], ['t1']) + pe.Obs([np.random.rand(length + 1)], ['t2']))
my_array = const * np.array(my_list).reshape((dim, dim))
tt = pe.linalg.matmul(my_array, my_array) - my_array @ my_array
for t, e in np.ndenumerate(tt):
@ -43,8 +43,8 @@ def test_matmul():
my_list = []
length = 100 + np.random.randint(200)
for i in range(dim ** 2):
my_list.append(pe.CObs(pe.Obs([np.random.rand(length), np.random.rand(length + 1)], ['t1', 't2']),
pe.Obs([np.random.rand(length), np.random.rand(length + 1)], ['t1', 't2'])))
my_list.append(pe.CObs(pe.Obs([np.random.rand(length)], ['t1']) + pe.Obs([np.random.rand(length + 1)], ['t2']),
pe.Obs([np.random.rand(length)], ['t1']) + pe.Obs([np.random.rand(length + 1)], ['t2'])))
my_array = np.array(my_list).reshape((dim, dim)) * const
tt = pe.linalg.matmul(my_array, my_array) - my_array @ my_array
for t, e in np.ndenumerate(tt):
@ -151,7 +151,7 @@ def test_multi_dot():
my_list = []
length = 1000 + np.random.randint(200)
for i in range(dim ** 2):
my_list.append(pe.Obs([np.random.rand(length), np.random.rand(length + 1)], ['t1', 't2']))
my_list.append(pe.Obs([np.random.rand(length)], ['t1']) + pe.Obs([np.random.rand(length + 1)], ['t2']))
my_array = pe.cov_Obs(1.0, 0.002, 'cov') * np.array(my_list).reshape((dim, dim))
tt = pe.linalg.matmul(my_array, my_array, my_array, my_array) - my_array @ my_array @ my_array @ my_array
for t, e in np.ndenumerate(tt):
@ -160,8 +160,8 @@ def test_multi_dot():
my_list = []
length = 1000 + np.random.randint(200)
for i in range(dim ** 2):
my_list.append(pe.CObs(pe.Obs([np.random.rand(length), np.random.rand(length + 1)], ['t1', 't2']),
pe.Obs([np.random.rand(length), np.random.rand(length + 1)], ['t1', 't2'])))
my_list.append(pe.CObs(pe.Obs([np.random.rand(length)], ['t1']) + pe.Obs([np.random.rand(length + 1)], ['t2']),
pe.Obs([np.random.rand(length)], ['t1']) + pe.Obs([np.random.rand(length + 1)], ['t2'])))
my_array = np.array(my_list).reshape((dim, dim)) * pe.cov_Obs(1.0, 0.002, 'cov')
tt = pe.linalg.matmul(my_array, my_array, my_array, my_array) - my_array @ my_array @ my_array @ my_array
for t, e in np.ndenumerate(tt):
@ -209,7 +209,7 @@ def test_irregular_matrix_inverse():
for idl in [range(8, 508, 10), range(250, 273), [2, 8, 19, 20, 78, 99, 828, 10548979]]:
irregular_array = []
for i in range(dim ** 2):
irregular_array.append(pe.Obs([np.random.normal(1.1, 0.2, len(idl)), np.random.normal(0.25, 0.1, 10)], ['ens1', 'ens2'], idl=[idl, range(1, 11)]))
irregular_array.append(pe.Obs([np.random.normal(1.1, 0.2, len(idl))], ['ens1'], idl=[idl]) + pe.Obs([np.random.normal(0.25, 0.1, 10)], ['ens2'], idl=[range(1, 11)]))
irregular_matrix = np.array(irregular_array).reshape((dim, dim)) * pe.cov_Obs(1.0, 0.002, 'cov') * pe.pseudo_Obs(1.0, 0.002, 'ens2|r23')
invertible_irregular_matrix = np.identity(dim) + irregular_matrix @ irregular_matrix.T

View file

@ -333,7 +333,7 @@ def test_derived_observables():
def test_multi_ens():
names = ['A0', 'A1|r001', 'A1|r002']
test_obs = pe.Obs([np.random.rand(50), np.random.rand(50), np.random.rand(50)], names)
test_obs = pe.Obs([np.random.rand(50)], names[:1]) + pe.Obs([np.random.rand(50), np.random.rand(50)], names[1:])
assert test_obs.e_names == ['A0', 'A1']
assert test_obs.e_content['A0'] == ['A0']
assert test_obs.e_content['A1'] == ['A1|r001', 'A1|r002']
@ -345,6 +345,9 @@ def test_multi_ens():
ensembles.append(str(i))
assert my_sum.e_names == sorted(ensembles)
with pytest.raises(ValueError):
test_obs = pe.Obs([np.random.rand(50), np.random.rand(50), np.random.rand(50)], names)
def test_multi_ens2():
names = ['ens', 'e', 'en', 'e|r010', 'E|er', 'ens|', 'Ens|34', 'ens|r548984654ez4e3t34terh']
@ -499,18 +502,25 @@ def test_reweighting():
with pytest.raises(ValueError):
pe.reweight(my_irregular_obs, [my_obs])
my_merged_obs = my_obs + pe.Obs([np.random.rand(1000)], ['q'])
with pytest.raises(ValueError):
pe.reweight(my_merged_obs, [my_merged_obs])
def test_merge_obs():
my_obs1 = pe.Obs([np.random.rand(100)], ['t'])
my_obs2 = pe.Obs([np.random.rand(100)], ['q'], idl=[range(1, 200, 2)])
my_obs1 = pe.Obs([np.random.normal(1, .1, 100)], ['t|1'])
my_obs2 = pe.Obs([np.random.normal(1, .1, 100)], ['t|2'], idl=[range(1, 200, 2)])
merged = pe.merge_obs([my_obs1, my_obs2])
diff = merged - my_obs2 - my_obs1
assert diff == -(my_obs1.value + my_obs2.value) / 2
diff = merged - (my_obs2 + my_obs1) / 2
assert np.isclose(0, diff.value, atol=1e-16)
with pytest.raises(ValueError):
pe.merge_obs([my_obs1, my_obs1])
my_covobs = pe.cov_Obs(1.0, 0.003, 'cov')
with pytest.raises(ValueError):
pe.merge_obs([my_obs1, my_covobs])
my_obs3 = pe.Obs([np.random.rand(100)], ['q|2'], idl=[range(1, 200, 2)])
with pytest.raises(ValueError):
pe.merge_obs([my_obs1, my_obs3])
@ -543,6 +553,9 @@ def test_correlate():
my_obs6 = pe.Obs([np.random.rand(100)], ['t'], idl=[range(5, 505, 5)])
corr3 = pe.correlate(my_obs5, my_obs6)
assert my_obs5.idl == corr3.idl
my_obs7 = pe.Obs([np.random.rand(99)], ['q'])
with pytest.raises(ValueError):
pe.correlate(my_obs1, my_obs7)
my_new_obs = pe.Obs([np.random.rand(100)], ['q3'])
with pytest.raises(ValueError):
@ -682,14 +695,14 @@ def test_gamma_method_irregular():
assert (a.dvalue - 5 * a.ddvalue < expe and expe < a.dvalue + 5 * a.ddvalue)
arr2 = np.random.normal(1, .2, size=N)
afull = pe.Obs([arr, arr2], ['a1', 'a2'])
afull = pe.Obs([arr], ['a1']) + pe.Obs([arr2], ['a2'])
configs = np.ones_like(arr2)
for i in np.random.uniform(0, len(arr2), size=int(.8*N)):
configs[int(i)] = 0
zero_arr2 = [arr2[i] for i in range(len(arr2)) if not configs[i] == 0]
idx2 = [i + 1 for i in range(len(configs)) if configs[i] == 1]
a = pe.Obs([zero_arr, zero_arr2], ['a1', 'a2'], idl=[idx, idx2])
a = pe.Obs([zero_arr], ['a1'], idl=[idx]) + pe.Obs([zero_arr2], ['a2'], idl=[idx2])
afull.gamma_method()
a.gamma_method()
@ -1023,7 +1036,7 @@ def test_correlation_intersection_of_idls():
def test_covariance_non_identical_objects():
obs1 = pe.Obs([np.random.normal(1.0, 0.1, 1000), np.random.normal(1.0, 0.1, 1000), np.random.normal(1.0, 0.1, 732)], ["ens|r1", "ens|r2", "ens2"])
obs1 = pe.Obs([np.random.normal(1.0, 0.1, 1000), np.random.normal(1.0, 0.1, 1000)], ["ens|r1", "ens|r2"]) + pe.Obs([np.random.normal(1.0, 0.1, 732)], ['ens2'])
obs1.gamma_method()
obs2 = obs1 + 1e-18
obs2.gamma_method()
@ -1107,6 +1120,9 @@ def test_reweight_method():
obs1 = pe.pseudo_Obs(0.2, 0.01, 'test')
rw = pe.pseudo_Obs(0.999, 0.001, 'test')
assert obs1.reweight(rw) == pe.reweight(rw, [obs1])[0]
rw2 = pe.pseudo_Obs(0.999, 0.001, 'test2')
with pytest.raises(ValueError):
obs1.reweight(rw2)
def test_jackknife():