mirror of
https://github.com/fjosw/pyerrors.git
synced 2025-05-16 12:33:41 +02:00
Merge branch 'develop' into documentation
This commit is contained in:
commit
05a437478e
5 changed files with 33 additions and 6 deletions
3
.github/workflows/pytest.yml
vendored
3
.github/workflows/pytest.yml
vendored
|
@ -16,7 +16,7 @@ jobs:
|
||||||
fail-fast: false
|
fail-fast: false
|
||||||
matrix:
|
matrix:
|
||||||
os: [ubuntu-latest]
|
os: [ubuntu-latest]
|
||||||
python-version: ["3.7", "3.8", "3.9", "3.10"]
|
python-version: ["3.7", "3.8", "3.9", "3.10", "3.11"]
|
||||||
include:
|
include:
|
||||||
- os: macos-latest
|
- os: macos-latest
|
||||||
python-version: 3.9
|
python-version: 3.9
|
||||||
|
@ -40,6 +40,7 @@ jobs:
|
||||||
pip install pytest
|
pip install pytest
|
||||||
pip install pytest-cov
|
pip install pytest-cov
|
||||||
pip install pytest-benchmark
|
pip install pytest-benchmark
|
||||||
|
pip install hypothesis
|
||||||
pip install py
|
pip install py
|
||||||
pip freeze
|
pip freeze
|
||||||
|
|
||||||
|
|
|
@ -25,7 +25,9 @@ For all pull requests tests are executed for the most recent python releases via
|
||||||
pytest -vv --cov=pyerrors
|
pytest -vv --cov=pyerrors
|
||||||
pytest -vv --nbmake examples/*.ipynb
|
pytest -vv --nbmake examples/*.ipynb
|
||||||
```
|
```
|
||||||
requiring `pytest`, `pytest-cov`, `pytest-benchmark` and `nbmake`. To get a coverage report in html run
|
requiring `pytest`, `pytest-cov`, `pytest-benchmark`, `hypothesis' and `nbmake`. To install the test dependencies one can run `pip install pyerrors[test]`
|
||||||
|
|
||||||
|
To get a coverage report in html run
|
||||||
```
|
```
|
||||||
pytest --cov=pyerrors --cov-report html
|
pytest --cov=pyerrors --cov-report html
|
||||||
```
|
```
|
||||||
|
|
|
@ -41,7 +41,7 @@ print(my_new_obs) # Print the result to stdout
|
||||||
`pyerrors` introduces a new datatype, `Obs`, which simplifies error propagation and estimation for auto- and cross-correlated data.
|
`pyerrors` introduces a new datatype, `Obs`, which simplifies error propagation and estimation for auto- and cross-correlated data.
|
||||||
An `Obs` object can be initialized with two arguments, the first is a list containing the samples for an observable from a Monte Carlo chain.
|
An `Obs` object can be initialized with two arguments, the first is a list containing the samples for an observable from a Monte Carlo chain.
|
||||||
The samples can either be provided as python list or as numpy array.
|
The samples can either be provided as python list or as numpy array.
|
||||||
The second argument is a list containing the names of the respective Monte Carlo chains as strings. These strings uniquely identify a Monte Carlo chain/ensemble.
|
The second argument is a list containing the names of the respective Monte Carlo chains as strings. These strings uniquely identify a Monte Carlo chain/ensemble. **It is crucial for the correct error propagation that observations from the same Monte Carlo history are labeled with the same name. See [Multiple ensembles/replica](#Multiple-ensembles/replica) for details.**
|
||||||
|
|
||||||
```python
|
```python
|
||||||
import pyerrors as pe
|
import pyerrors as pe
|
||||||
|
@ -142,6 +142,8 @@ my_sum.details()
|
||||||
> · Ensemble 'ensemble1' : 1000 configurations (from 1 to 1000)
|
> · Ensemble 'ensemble1' : 1000 configurations (from 1 to 1000)
|
||||||
> · Ensemble 'ensemble2' : 500 configurations (from 1 to 500)
|
> · Ensemble 'ensemble2' : 500 configurations (from 1 to 500)
|
||||||
```
|
```
|
||||||
|
Observables from the **same Monte Carlo chain** have to be initialized with the **same name** for correct error propagation. If different names were used in this case the data would be treated as statistically independent resulting in loss of relevant information and a potential over or under estimate of the statistical error.
|
||||||
|
|
||||||
|
|
||||||
`pyerrors` identifies multiple replica (independent Markov chains with identical simulation parameters) by the vertical bar `|` in the name of the data set.
|
`pyerrors` identifies multiple replica (independent Markov chains with identical simulation parameters) by the vertical bar `|` in the name of the data set.
|
||||||
|
|
||||||
|
|
4
setup.py
4
setup.py
|
@ -25,8 +25,8 @@ setup(name='pyerrors',
|
||||||
license="MIT",
|
license="MIT",
|
||||||
packages=find_packages(),
|
packages=find_packages(),
|
||||||
python_requires='>=3.7.0',
|
python_requires='>=3.7.0',
|
||||||
install_requires=['numpy>=1.19', 'autograd>=1.5', 'numdifftools>=0.9.41', 'matplotlib>=3.5', 'scipy>=1.7', 'iminuit>=2.17', 'h5py>=3.7', 'lxml>=4.9', 'python-rapidjson>=1.9', 'pandas>=1.1'],
|
install_requires=['numpy>=1.21', 'autograd>=1.5', 'numdifftools>=0.9.41', 'matplotlib>=3.5', 'scipy>=1.7', 'iminuit>=2.17', 'h5py>=3.8', 'lxml>=4.9', 'python-rapidjson>=1.9', 'pandas>=1.1'],
|
||||||
extras_require={'test': ['pytest', 'pytest-cov', 'pytest-benchmark']},
|
extras_require={'test': ['pytest', 'pytest-cov', 'pytest-benchmark', 'hypothesis']},
|
||||||
classifiers=[
|
classifiers=[
|
||||||
'Development Status :: 5 - Production/Stable',
|
'Development Status :: 5 - Production/Stable',
|
||||||
'Intended Audience :: Science/Research',
|
'Intended Audience :: Science/Research',
|
||||||
|
|
|
@ -4,9 +4,25 @@ import copy
|
||||||
import matplotlib.pyplot as plt
|
import matplotlib.pyplot as plt
|
||||||
import pyerrors as pe
|
import pyerrors as pe
|
||||||
import pytest
|
import pytest
|
||||||
|
from hypothesis import given, strategies as st
|
||||||
|
|
||||||
np.random.seed(0)
|
np.random.seed(0)
|
||||||
|
|
||||||
|
@given(st.lists(st.floats(allow_nan=False, allow_infinity=False, width=32), min_size=5),
|
||||||
|
st.text(),
|
||||||
|
st.floats(allow_nan=False, allow_infinity=False, width=32, min_value=0))
|
||||||
|
def test_fuzzy_obs(data, string, S):
|
||||||
|
my_obs = pe.Obs([data], [string])
|
||||||
|
my_obs * my_obs
|
||||||
|
my_obs.gamma_method(S=S)
|
||||||
|
|
||||||
|
|
||||||
|
@given(st.floats(allow_nan=False, allow_infinity=False, width=16))
|
||||||
|
def test_sin2_cos2(value):
|
||||||
|
Obs = pe.pseudo_Obs(value, value * 0.123, "C0")
|
||||||
|
iamzero = np.sin(Obs) ** 2 + np.cos(Obs) ** 2 - 1
|
||||||
|
assert iamzero.is_zero(atol=1e-6)
|
||||||
|
|
||||||
|
|
||||||
def test_Obs_exceptions():
|
def test_Obs_exceptions():
|
||||||
with pytest.raises(Exception):
|
with pytest.raises(Exception):
|
||||||
|
@ -59,7 +75,7 @@ def test_Obs_exceptions():
|
||||||
one.plot_piechart()
|
one.plot_piechart()
|
||||||
plt.close('all')
|
plt.close('all')
|
||||||
|
|
||||||
def test_dump():
|
def test_dump_pickle():
|
||||||
value = np.random.normal(5, 10)
|
value = np.random.normal(5, 10)
|
||||||
dvalue = np.abs(np.random.normal(0, 1))
|
dvalue = np.abs(np.random.normal(0, 1))
|
||||||
test_obs = pe.pseudo_Obs(value, dvalue, 't')
|
test_obs = pe.pseudo_Obs(value, dvalue, 't')
|
||||||
|
@ -68,6 +84,12 @@ def test_dump():
|
||||||
new_obs = pe.load_object('test_dump.p')
|
new_obs = pe.load_object('test_dump.p')
|
||||||
os.remove('test_dump.p')
|
os.remove('test_dump.p')
|
||||||
assert test_obs == new_obs
|
assert test_obs == new_obs
|
||||||
|
|
||||||
|
|
||||||
|
def test_dump_json():
|
||||||
|
value = np.random.normal(5, 10)
|
||||||
|
dvalue = np.abs(np.random.normal(0, 1))
|
||||||
|
test_obs = pe.pseudo_Obs(value, dvalue, 't')
|
||||||
test_obs.dump('test_dump', dataype="json.gz", path=".")
|
test_obs.dump('test_dump', dataype="json.gz", path=".")
|
||||||
test_obs.dump('test_dump', dataype="json.gz")
|
test_obs.dump('test_dump', dataype="json.gz")
|
||||||
new_obs = pe.input.json.load_json("test_dump")
|
new_obs = pe.input.json.load_json("test_dump")
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue