mirror of
https://github.com/fjosw/pyerrors.git
synced 2025-03-15 23:00:25 +01:00
Compare commits
No commits in common. "develop" and "v2.8.2" have entirely different histories.
48 changed files with 710 additions and 3016 deletions
2
.github/CODEOWNERS
vendored
2
.github/CODEOWNERS
vendored
|
@ -2,5 +2,3 @@
|
|||
/pyerrors/covobs.py @s-kuberski
|
||||
/pyerrors/input/json.py @s-kuberski
|
||||
/pyerrors/input/dobs.py @s-kuberski
|
||||
/pyerrors/input/sfcf.py @jkuhl-uni
|
||||
/tests/sfcf_in_test.py @jkuhl-uni
|
||||
|
|
39
.github/workflows/codeql.yml
vendored
Normal file
39
.github/workflows/codeql.yml
vendored
Normal file
|
@ -0,0 +1,39 @@
|
|||
name: "CodeQL"
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
- develop
|
||||
pull_request:
|
||||
|
||||
jobs:
|
||||
analyze:
|
||||
name: Analyze
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
actions: read
|
||||
contents: read
|
||||
security-events: write
|
||||
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
language: [ 'python' ]
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v3
|
||||
|
||||
- name: Initialize CodeQL
|
||||
uses: github/codeql-action/init@v2
|
||||
with:
|
||||
languages: ${{ matrix.language }}
|
||||
|
||||
- name: Autobuild
|
||||
uses: github/codeql-action/autobuild@v2
|
||||
|
||||
- name: Perform CodeQL Analysis
|
||||
uses: github/codeql-action/analyze@v2
|
||||
with:
|
||||
category: "/language:${{matrix.language}}"
|
4
.github/workflows/docs.yml
vendored
4
.github/workflows/docs.yml
vendored
|
@ -11,10 +11,10 @@ jobs:
|
|||
|
||||
steps:
|
||||
- name: Set up Python environment
|
||||
uses: actions/setup-python@v5
|
||||
uses: actions/setup-python@v4
|
||||
with:
|
||||
python-version: "3.10"
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/checkout@v3
|
||||
- name: Updated documentation
|
||||
run: |
|
||||
git config --global user.email "${{ github.actor }}@users.noreply.github.com"
|
||||
|
|
19
.github/workflows/examples.yml
vendored
19
.github/workflows/examples.yml
vendored
|
@ -6,7 +6,6 @@ on:
|
|||
- master
|
||||
- develop
|
||||
pull_request:
|
||||
workflow_dispatch:
|
||||
schedule:
|
||||
- cron: '0 4 1 * *'
|
||||
|
||||
|
@ -17,27 +16,27 @@ jobs:
|
|||
fail-fast: false
|
||||
matrix:
|
||||
os: [ubuntu-latest]
|
||||
python-version: ["3.10", "3.12"]
|
||||
python-version: ["3.8", "3.10"]
|
||||
|
||||
steps:
|
||||
- name: Checkout source
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v3
|
||||
|
||||
- name: Setup python
|
||||
uses: actions/setup-python@v5
|
||||
uses: actions/setup-python@v4
|
||||
with:
|
||||
python-version: ${{ matrix.python-version }}
|
||||
- name: uv
|
||||
uses: astral-sh/setup-uv@v5
|
||||
|
||||
- name: Install
|
||||
run: |
|
||||
sudo apt-get update
|
||||
sudo apt-get install dvipng texlive-latex-extra texlive-fonts-recommended cm-super
|
||||
uv pip install wheel --system
|
||||
uv pip install . --system
|
||||
uv pip install pytest nbmake --system
|
||||
uv pip install -U matplotlib!=3.7.0 --system # Exclude version 3.7.0 of matplotlib as this breaks local imports of style files.
|
||||
python -m pip install --upgrade pip
|
||||
pip install wheel
|
||||
pip install .
|
||||
pip install pytest
|
||||
pip install nbmake
|
||||
pip install -U matplotlib!=3.7.0 # Exclude version 3.7.0 of matplotlib as this breaks local imports of style files.
|
||||
|
||||
- name: Run tests
|
||||
run: pytest -vv --nbmake examples/*.ipynb
|
||||
|
|
4
.github/workflows/flake8.yml
vendored
4
.github/workflows/flake8.yml
vendored
|
@ -13,9 +13,9 @@ jobs:
|
|||
name: Lint
|
||||
steps:
|
||||
- name: Check out source repository
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v3
|
||||
- name: Set up Python environment
|
||||
uses: actions/setup-python@v5
|
||||
uses: actions/setup-python@v4
|
||||
with:
|
||||
python-version: "3.10"
|
||||
- name: flake8 Lint
|
||||
|
|
28
.github/workflows/pytest.yml
vendored
28
.github/workflows/pytest.yml
vendored
|
@ -6,7 +6,6 @@ on:
|
|||
- master
|
||||
- develop
|
||||
pull_request:
|
||||
workflow_dispatch:
|
||||
schedule:
|
||||
- cron: '0 4 1 * *'
|
||||
|
||||
|
@ -17,30 +16,33 @@ jobs:
|
|||
fail-fast: false
|
||||
matrix:
|
||||
os: [ubuntu-latest]
|
||||
python-version: ["3.9", "3.10", "3.11", "3.12", "3.13"]
|
||||
python-version: ["3.7", "3.8", "3.9", "3.10", "3.11"]
|
||||
include:
|
||||
- os: macos-latest
|
||||
python-version: "3.12"
|
||||
- os: ubuntu-24.04-arm
|
||||
python-version: "3.12"
|
||||
python-version: 3.9
|
||||
- os: windows-latest
|
||||
python-version: 3.9
|
||||
|
||||
steps:
|
||||
- name: Checkout source
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v3
|
||||
|
||||
- name: Setup python
|
||||
uses: actions/setup-python@v5
|
||||
uses: actions/setup-python@v4
|
||||
with:
|
||||
python-version: ${{ matrix.python-version }}
|
||||
- name: uv
|
||||
uses: astral-sh/setup-uv@v5
|
||||
|
||||
- name: Install
|
||||
run: |
|
||||
uv pip install wheel --system
|
||||
uv pip install . --system
|
||||
uv pip install pytest pytest-cov pytest-benchmark hypothesis --system
|
||||
uv pip freeze --system
|
||||
python -m pip install --upgrade pip
|
||||
pip install wheel
|
||||
pip install .
|
||||
pip install pytest
|
||||
pip install pytest-cov
|
||||
pip install pytest-benchmark
|
||||
pip install hypothesis
|
||||
pip install py
|
||||
pip freeze
|
||||
|
||||
- name: Run tests
|
||||
run: pytest --cov=pyerrors -vv
|
||||
|
|
58
.github/workflows/release.yml
vendored
58
.github/workflows/release.yml
vendored
|
@ -1,58 +0,0 @@
|
|||
name: Release
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
release:
|
||||
types: [published]
|
||||
|
||||
jobs:
|
||||
build:
|
||||
name: Build sdist and wheel
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
name: Checkout repository
|
||||
|
||||
- uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: "3.12"
|
||||
|
||||
- name: Install pypa/build
|
||||
run: >-
|
||||
python3 -m
|
||||
pip install
|
||||
build
|
||||
--user
|
||||
|
||||
- name: Build wheel and source tarball
|
||||
run: python3 -m build
|
||||
|
||||
- name: Upload artifacts
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: python-package-distributions
|
||||
path: dist/
|
||||
if-no-files-found: error
|
||||
|
||||
publish:
|
||||
needs: [build]
|
||||
name: Upload to PyPI
|
||||
runs-on: ubuntu-latest
|
||||
environment:
|
||||
name: pypi
|
||||
url: https://pypi.org/p/pyerrors
|
||||
permissions:
|
||||
id-token: write
|
||||
|
||||
steps:
|
||||
- name: Download artifacts
|
||||
uses: actions/download-artifact@v4
|
||||
with:
|
||||
name: python-package-distributions
|
||||
path: dist/
|
||||
|
||||
- name: Sanity check
|
||||
run: ls -la dist/
|
||||
|
||||
- name: Publish to PyPI
|
||||
uses: pypa/gh-action-pypi-publish@release/v1
|
15
.github/workflows/ruff.yml
vendored
15
.github/workflows/ruff.yml
vendored
|
@ -1,15 +0,0 @@
|
|||
name: ruff
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
- develop
|
||||
pull_request:
|
||||
jobs:
|
||||
ruff:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: astral-sh/ruff-action@v2
|
||||
with:
|
||||
src: "./pyerrors"
|
74
CHANGELOG.md
74
CHANGELOG.md
|
@ -2,80 +2,6 @@
|
|||
|
||||
All notable changes to this project will be documented in this file.
|
||||
|
||||
## [2.14.0] - 2025-03-09
|
||||
|
||||
### Added
|
||||
- Explicit checks of the provided inverse matrix for correlated fits #259
|
||||
|
||||
### Changed
|
||||
- Compute derivative for pow explicitly instead of relying on autograd. This results in a ~4x speedup for pow operations #246
|
||||
- More explicit exception types #248
|
||||
|
||||
### Fixed
|
||||
- Removed the possibility to create an Obs from data on several replica #258
|
||||
- Fix range in `set_prange` #247
|
||||
- Fix ensemble name handling in sfcf input modules #253
|
||||
- Correct error message for fit shape mismatch #257
|
||||
|
||||
## [2.13.0] - 2024-11-03
|
||||
|
||||
### Added
|
||||
- Allow providing lower triangular matrix constructed from a Cholesky decomposition in least squares function for correlated fits.
|
||||
|
||||
### Fixed
|
||||
- Corrected bug that prevented combined fits with multiple x-obs in some cases.
|
||||
|
||||
## [2.12.0] - 2024-08-22
|
||||
|
||||
### Changed
|
||||
- Support for numpy 2 was added via a new autograd release
|
||||
- Support for python<3.9 was dropped and dependencies were updated.
|
||||
|
||||
### Fixed
|
||||
- Minor bug fixes in input.sfcf
|
||||
|
||||
|
||||
## [2.11.1] - 2024-04-25
|
||||
|
||||
### Fixed
|
||||
- Fixed a bug in error computation when combining two Obs from the same ensemble and fluctuations on one replicum are not part of one of the Obs.
|
||||
|
||||
|
||||
## [2.11.0] - 2024-04-01
|
||||
### Added
|
||||
- New special function module.
|
||||
|
||||
### Fixed
|
||||
- Various bug fixes in input module.
|
||||
|
||||
## [2.10.0] - 2023-11-24
|
||||
### Added
|
||||
- More efficient implementation of read_sfcf
|
||||
- added support for addition and multiplication of complex numbers to Corr objects
|
||||
- the Corr.GEVP method can now also propagate the errors for the eigenvectors
|
||||
|
||||
### Fixed
|
||||
- Fixed bug in combined fit with multiple independent variables
|
||||
- Check for invalid set of configuration numbers added when initializing an Obs object.
|
||||
- Fixed a bug in hadrons.read_hdf5
|
||||
|
||||
|
||||
## [2.9.0] - 2023-07-20
|
||||
### Added
|
||||
- Vectorized `gamma_method` added which can be applied to lists or arrays of pyerrors objects.
|
||||
- Wrapper for numerical integration of `Obs` valued functions with `Obs` valued intervals.
|
||||
- Bootstrap import and export added.
|
||||
- `matmul` overloaded for `Corr` class.
|
||||
- More options for initializing `Corr` objects added.
|
||||
- General Hadrons hdf5 reader added.
|
||||
- New variant of second_derivative.
|
||||
- CObs formatting added.
|
||||
- Comparisons for `Corr` class added.
|
||||
|
||||
### Changed
|
||||
- support for python<3.8 was dropped and dependencies were updated.
|
||||
|
||||
|
||||
## [2.8.2] - 2023-06-02
|
||||
### Fixed
|
||||
- Another bug appearing in an edge case of `_compute_drho` fixed.
|
||||
|
|
|
@ -19,17 +19,19 @@ Please add docstrings to any new function, class or method you implement. The do
|
|||
|
||||
### Tests
|
||||
When implementing a new feature or fixing a bug please add meaningful tests to the files in the `tests` directory which cover the new code.
|
||||
We follow the [PEP8](https://peps.python.org/pep-0008/) code style which is checked by `flake8`.
|
||||
For all pull requests tests are executed for the most recent python releases via
|
||||
```
|
||||
pytest -vv -Werror
|
||||
pytest
|
||||
pytest --nbmake examples/*.ipynb
|
||||
flake8 --ignore=E501,W503 --exclude=__init__.py pyerrors
|
||||
```
|
||||
The tests require `pytest`, `pytest-cov`, `pytest-benchmark`, `hypothesis`, `nbmake` and `flake8`. To install the test dependencies one can run `pip install pyerrors[test]`.
|
||||
Please make sure that all tests pass for a new pull requests.
|
||||
requiring `pytest`, `pytest-cov`, `pytest-benchmark`, `hypothesis` and `nbmake`. To install the test dependencies one can run `pip install pyerrors[test]`
|
||||
|
||||
To get a coverage report in html run
|
||||
```
|
||||
pytest --cov=pyerrors --cov-report html
|
||||
```
|
||||
The linter `flake8` is executed with the command
|
||||
```
|
||||
flake8 --ignore=E501,W503 --exclude=__init__.py pyerrors
|
||||
```
|
||||
Please make sure that all tests pass for a new pull requests.
|
||||
|
|
20
README.md
20
README.md
|
@ -1,26 +1,22 @@
|
|||
[](https://www.python.org/downloads/) [](https://opensource.org/licenses/MIT) [](https://arxiv.org/abs/2209.14371) [](https://doi.org/10.1016/j.cpc.2023.108750)
|
||||
[](https://github.com/fjosw/pyerrors/actions/workflows/pytest.yml) [](https://www.python.org/downloads/) [](https://opensource.org/licenses/MIT) [](https://arxiv.org/abs/2209.14371) [](https://doi.org/10.1016/j.cpc.2023.108750)
|
||||
# pyerrors
|
||||
`pyerrors` is a python framework for error computation and propagation of Markov chain Monte Carlo data from lattice field theory and statistical mechanics simulations.
|
||||
|
||||
- **Documentation:** https://fjosw.github.io/pyerrors/pyerrors.html
|
||||
- **Examples:** https://github.com/fjosw/pyerrors/tree/develop/examples
|
||||
- **Ask a question:** https://github.com/fjosw/pyerrors/discussions/new?category=q-a
|
||||
- **Changelog:** https://github.com/fjosw/pyerrors/blob/develop/CHANGELOG.md
|
||||
- **Bug reports:** https://github.com/fjosw/pyerrors/issues
|
||||
|
||||
## Installation
|
||||
Install the most recent release using pip and [pypi](https://pypi.org/project/pyerrors/):
|
||||
```bash
|
||||
python -m pip install pyerrors # Fresh install
|
||||
python -m pip install -U pyerrors # Update
|
||||
pip install pyerrors # Fresh install
|
||||
pip install -U pyerrors # Update
|
||||
```
|
||||
Install the most recent release using conda and [conda-forge](https://anaconda.org/conda-forge/pyerrors):
|
||||
```bash
|
||||
conda install -c conda-forge pyerrors # Fresh install
|
||||
conda update -c conda-forge pyerrors # Update
|
||||
```
|
||||
|
||||
## Contributing
|
||||
We appreciate all contributions to the code, the documentation and the examples. If you want to get involved please have a look at our [contribution guideline](https://github.com/fjosw/pyerrors/blob/develop/CONTRIBUTING.md).
|
||||
|
||||
## Citing pyerrors
|
||||
If you use `pyerrors` for research that leads to a publication we suggest citing the following papers:
|
||||
- Fabian Joswig, Simon Kuberski, Justus T. Kuhlmann, Jan Neuendorf, *pyerrors: a python framework for error analysis of Monte Carlo data*. Comput.Phys.Commun. 288 (2023) 108750.
|
||||
- Ulli Wolff, *Monte Carlo errors with less errors*. Comput.Phys.Commun. 156 (2004) 143-153, Comput.Phys.Commun. 176 (2007) 383 (erratum).
|
||||
- Alberto Ramos, *Automatic differentiation for error analysis of Monte Carlo data*. Comput.Phys.Commun. 238 (2019) 19-35.
|
||||
- Stefan Schaefer, Rainer Sommer, Francesco Virotta, *Critical slowing down and error analysis in lattice QCD simulations*. Nucl.Phys.B 845 (2011) 93-119.
|
||||
|
|
|
@ -34,7 +34,7 @@
|
|||
"source": [
|
||||
"plt.style.use('./base_style.mplstyle')\n",
|
||||
"import shutil\n",
|
||||
"usetex = shutil.which('latex') not in ('', None)\n",
|
||||
"usetex = shutil.which('latex') != ''\n",
|
||||
"plt.rc('text', usetex=usetex)"
|
||||
]
|
||||
},
|
||||
|
|
|
@ -22,7 +22,7 @@
|
|||
"source": [
|
||||
"plt.style.use('./base_style.mplstyle')\n",
|
||||
"import shutil\n",
|
||||
"usetex = shutil.which('latex') not in ('', None)\n",
|
||||
"usetex = shutil.which('latex') != ''\n",
|
||||
"plt.rc('text', usetex=usetex)"
|
||||
]
|
||||
},
|
||||
|
|
|
@ -20,7 +20,7 @@
|
|||
"source": [
|
||||
"plt.style.use('./base_style.mplstyle')\n",
|
||||
"import shutil\n",
|
||||
"usetex = shutil.which('latex') not in ('', None)\n",
|
||||
"usetex = shutil.which('latex') != ''\n",
|
||||
"plt.rc('text', usetex=usetex)"
|
||||
]
|
||||
},
|
||||
|
|
File diff suppressed because one or more lines are too long
|
@ -7,7 +7,8 @@
|
|||
"outputs": [],
|
||||
"source": [
|
||||
"import pyerrors as pe\n",
|
||||
"import numpy as np"
|
||||
"import numpy as np\n",
|
||||
"import scipy"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
@ -26,8 +27,8 @@
|
|||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"[[4.10(20) -1.00(10)]\n",
|
||||
" [-1.00(10) 1.000(10)]]\n"
|
||||
"[[Obs[4.10(20)] Obs[-1.00(10)]]\n",
|
||||
" [Obs[-1.00(10)] Obs[1.000(10)]]]\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
|
@ -62,8 +63,8 @@
|
|||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"[[17.81 -5.1]\n",
|
||||
" [-5.1 2.0]]\n"
|
||||
"[[Obs[17.81] Obs[-5.1]]\n",
|
||||
" [Obs[-5.1] Obs[2.0]]]\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
|
@ -87,8 +88,8 @@
|
|||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"[[4.1 -1.0]\n",
|
||||
" [-1.0 1.0]]\n"
|
||||
"[[Obs[4.1] Obs[-1.0]]\n",
|
||||
" [Obs[-1.0] Obs[1.0]]]\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
|
@ -112,8 +113,8 @@
|
|||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"[[78.12099999999998 -22.909999999999997]\n",
|
||||
" [-22.909999999999997 7.1]]\n"
|
||||
"[[Obs[78.12099999999998] Obs[-22.909999999999997]]\n",
|
||||
" [Obs[-22.909999999999997] Obs[7.1]]]\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
|
@ -137,8 +138,8 @@
|
|||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"[[30.16185746098009 -1.1752011936438014]\n",
|
||||
" [-1.1752011936438014 1.1752011936438014]]\n"
|
||||
"[[Obs[30.161857460980094] Obs[-1.1752011936438014]]\n",
|
||||
" [Obs[-1.1752011936438014] Obs[1.1752011936438014]]]\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
|
@ -162,7 +163,7 @@
|
|||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"[2.00(40) 1.00(10)]\n"
|
||||
"[Obs[2.00(40)] Obs[1.00(10)]]\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
|
@ -170,7 +171,8 @@
|
|||
"vec1 = pe.pseudo_Obs(2, 0.4, 'e1')\n",
|
||||
"vec2 = pe.pseudo_Obs(1, 0.1, 'e1')\n",
|
||||
"vector = np.asarray([vec1, vec2])\n",
|
||||
"pe.gm(vector)\n",
|
||||
"for (i), entry in np.ndenumerate(vector):\n",
|
||||
" entry.gamma_method()\n",
|
||||
"print(vector)"
|
||||
]
|
||||
},
|
||||
|
@ -190,13 +192,14 @@
|
|||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"[7.2(1.7) -1.00(46)]\n"
|
||||
"[Obs[7.2(1.7)] Obs[-1.00(46)]]\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"product = matrix @ vector\n",
|
||||
"pe.gm(product)\n",
|
||||
"for (i), entry in np.ndenumerate(product):\n",
|
||||
" entry.gamma_method()\n",
|
||||
"print(product)"
|
||||
]
|
||||
},
|
||||
|
@ -242,14 +245,15 @@
|
|||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"[[2.025(49) 0.0]\n",
|
||||
" [-0.494(51) 0.870(29)]]\n"
|
||||
"[[Obs[2.025(49)] Obs[0.0]]\n",
|
||||
" [Obs[-0.494(50)] Obs[0.870(29)]]]\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"cholesky = pe.linalg.cholesky(matrix)\n",
|
||||
"pe.gm(cholesky)\n",
|
||||
"for (i, j), entry in np.ndenumerate(cholesky):\n",
|
||||
" entry.gamma_method()\n",
|
||||
"print(cholesky)"
|
||||
]
|
||||
},
|
||||
|
@ -269,8 +273,8 @@
|
|||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"[[-8.881784197001252e-16 0.0]\n",
|
||||
" [0.0 0.0]]\n"
|
||||
"[[Obs[-8.881784197001252e-16] Obs[0.0]]\n",
|
||||
" [Obs[0.0] Obs[0.0]]]\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
|
@ -295,17 +299,18 @@
|
|||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"[[0.494(12) 0.0]\n",
|
||||
" [0.280(40) 1.150(39)]]\n",
|
||||
"[[Obs[0.494(12)] Obs[0.0]]\n",
|
||||
" [Obs[0.280(40)] Obs[1.150(39)]]]\n",
|
||||
"Check:\n",
|
||||
"[[1.0 0.0]\n",
|
||||
" [0.0 1.0]]\n"
|
||||
"[[Obs[1.0] Obs[0.0]]\n",
|
||||
" [Obs[0.0] Obs[1.0]]]\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"inv = pe.linalg.inv(cholesky)\n",
|
||||
"pe.gm(inv)\n",
|
||||
"for (i, j), entry in np.ndenumerate(inv):\n",
|
||||
" entry.gamma_method()\n",
|
||||
"print(inv)\n",
|
||||
"print('Check:')\n",
|
||||
"check_inv = cholesky @ inv\n",
|
||||
|
@ -330,19 +335,21 @@
|
|||
"output_type": "stream",
|
||||
"text": [
|
||||
"Eigenvalues:\n",
|
||||
"[0.705(57) 4.39(19)]\n",
|
||||
"[Obs[0.705(57)] Obs[4.39(19)]]\n",
|
||||
"Eigenvectors:\n",
|
||||
"[[-0.283(26) -0.9592(76)]\n",
|
||||
" [-0.9592(76) 0.283(26)]]\n"
|
||||
"[[Obs[-0.283(26)] Obs[-0.9592(75)]]\n",
|
||||
" [Obs[-0.9592(75)] Obs[0.283(26)]]]\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"e, v = pe.linalg.eigh(matrix)\n",
|
||||
"pe.gm(e)\n",
|
||||
"for (i), entry in np.ndenumerate(e):\n",
|
||||
" entry.gamma_method()\n",
|
||||
"print('Eigenvalues:')\n",
|
||||
"print(e)\n",
|
||||
"pe.gm(v)\n",
|
||||
"for (i, j), entry in np.ndenumerate(v):\n",
|
||||
" entry.gamma_method()\n",
|
||||
"print('Eigenvectors:')\n",
|
||||
"print(v)"
|
||||
]
|
||||
|
@ -363,13 +370,17 @@
|
|||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"[[ True True]\n",
|
||||
" [ True True]]\n"
|
||||
"Check eigenvector 1\n",
|
||||
"[Obs[-5.551115123125783e-17] Obs[0.0]]\n",
|
||||
"Check eigenvector 2\n",
|
||||
"[Obs[0.0] Obs[-2.220446049250313e-16]]\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"print(matrix @ v == e * v)"
|
||||
"for i in range(2):\n",
|
||||
" print('Check eigenvector', i + 1)\n",
|
||||
" print(matrix @ v[:, i] - v[:, i] * e[i])"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
@ -396,7 +407,7 @@
|
|||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.0"
|
||||
"version": "3.8.10"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
|
|
|
@ -21,7 +21,7 @@
|
|||
"source": [
|
||||
"plt.style.use('./base_style.mplstyle')\n",
|
||||
"import shutil\n",
|
||||
"usetex = shutil.which('latex') not in ('', None)\n",
|
||||
"usetex = shutil.which('latex') != ''\n",
|
||||
"plt.rc('text', usetex=usetex)"
|
||||
]
|
||||
},
|
||||
|
|
|
@ -27,8 +27,8 @@ There exist similar publicly available implementations of gamma method error ana
|
|||
|
||||
Install the most recent release using pip and [pypi](https://pypi.org/project/pyerrors/):
|
||||
```bash
|
||||
python -m pip install pyerrors # Fresh install
|
||||
python -m pip install -U pyerrors # Update
|
||||
pip install pyerrors # Fresh install
|
||||
pip install -U pyerrors # Update
|
||||
```
|
||||
Install the most recent release using conda and [conda-forge](https://anaconda.org/conda-forge/pyerrors):
|
||||
```bash
|
||||
|
@ -37,9 +37,8 @@ conda update -c conda-forge pyerrors # Update
|
|||
```
|
||||
Install the current `develop` version:
|
||||
```bash
|
||||
python -m pip install -U --no-deps --force-reinstall git+https://github.com/fjosw/pyerrors.git@develop
|
||||
pip install git+https://github.com/fjosw/pyerrors.git@develop
|
||||
```
|
||||
(Also works for any feature branch).
|
||||
|
||||
## Basic example
|
||||
|
||||
|
@ -109,7 +108,6 @@ my_sum.details()
|
|||
> · Ensemble 'ensemble_name' : 1000 configurations (from 1 to 1000)
|
||||
|
||||
```
|
||||
The `gamma_method` is not automatically called after every intermediate step in order to prevent computational overhead.
|
||||
|
||||
We use the following definition of the integrated autocorrelation time established in [Madras & Sokal 1988](https://link.springer.com/article/10.1007/BF01022990)
|
||||
$$\tau_\mathrm{int}=\frac{1}{2}+\sum_{t=1}^{W}\rho(t)\geq \frac{1}{2}\,.$$
|
||||
|
@ -307,7 +305,7 @@ print(my_derived_cobs)
|
|||
# The `Covobs` class
|
||||
In many projects, auxiliary data that is not based on Monte Carlo chains enters. Examples are experimentally determined mesons masses which are used to set the scale or renormalization constants. These numbers come with an error that has to be propagated through the analysis. The `Covobs` class allows to define such quantities in `pyerrors`. Furthermore, external input might consist of correlated quantities. An example are the parameters of an interpolation formula, which are defined via mean values and a covariance matrix between all parameters. The contribution of the interpolation formula to the error of a derived quantity therefore might depend on the complete covariance matrix.
|
||||
|
||||
This concept is built into the definition of `Covobs`. In `pyerrors`, external input is defined by $M$ mean values, a $M\times M$ covariance matrix, where $M=1$ is permissible, and a name that uniquely identifies the covariance matrix. Below, we define the pion mass, based on its mean value and error, 134.9768(5). **Note, that the square of the error enters `cov_Obs`**, since the second argument of this function is the covariance matrix of the `Covobs`.
|
||||
This concept is built into the definition of `Covobs`. In `pyerrors`, external input is defined by $M$ mean values, a $M\times M$ covariance matrix, where $M=1$ is permissible, and a name that uniquely identifies the covariance matrix. Below, we define the pion mass, based on its mean value and error, 134.9768(5). Note, that the square of the error enters `cov_Obs`, since the second argument of this function is the covariance matrix of the `Covobs`.
|
||||
|
||||
```python
|
||||
import pyerrors.obs as pe
|
||||
|
@ -389,12 +387,11 @@ def func(a, x):
|
|||
|
||||
`pyerrors` also supports correlated fits which can be triggered via the parameter `correlated_fit=True`.
|
||||
Details about how the required covariance matrix is estimated can be found in `pyerrors.obs.covariance`.
|
||||
Direct visualizations of the performed fits can be triggered via `resplot=True` or `qqplot=True`.
|
||||
|
||||
For all available options including combined fits to multiple datasets see `pyerrors.fits.least_squares`.
|
||||
Direct visualizations of the performed fits can be triggered via `resplot=True` or `qqplot=True`. For all available options see `pyerrors.fits.least_squares`.
|
||||
|
||||
## Total least squares fits
|
||||
`pyerrors` can also fit data with errors on both the dependent and independent variables using the total least squares method also referred to as orthogonal distance regression as implemented in [scipy](https://docs.scipy.org/doc/scipy/reference/odr.html), see `pyerrors.fits.least_squares`. The syntax is identical to the standard least squares case, the only difference being that `x` also has to be a `list` or `numpy.array` of `Obs`.
|
||||
`pyerrors` can also fit data with errors on both the dependent and independent variables using the total least squares method also referred to orthogonal distance regression as implemented in [scipy](https://docs.scipy.org/doc/scipy/reference/odr.html), see `pyerrors.fits.least_squares`. The syntax is identical to the standard least squares case, the only difference being that `x` also has to be a `list` or `numpy.array` of `Obs`.
|
||||
|
||||
For the full API see `pyerrors.fits` for fits and `pyerrors.roots` for finding roots of functions.
|
||||
|
||||
|
@ -481,12 +478,10 @@ from .obs import *
|
|||
from .correlators import *
|
||||
from .fits import *
|
||||
from .misc import *
|
||||
from . import dirac as dirac
|
||||
from . import input as input
|
||||
from . import linalg as linalg
|
||||
from . import mpm as mpm
|
||||
from . import roots as roots
|
||||
from . import integrate as integrate
|
||||
from . import special as special
|
||||
from . import dirac
|
||||
from . import input
|
||||
from . import linalg
|
||||
from . import mpm
|
||||
from . import roots
|
||||
|
||||
from .version import __version__ as __version__
|
||||
from .version import __version__
|
||||
|
|
|
@ -8,36 +8,19 @@ from .obs import Obs, reweight, correlate, CObs
|
|||
from .misc import dump_object, _assert_equal_properties
|
||||
from .fits import least_squares
|
||||
from .roots import find_root
|
||||
from . import linalg
|
||||
|
||||
|
||||
class Corr:
|
||||
r"""The class for a correlator (time dependent sequence of pe.Obs).
|
||||
"""The class for a correlator (time dependent sequence of pe.Obs).
|
||||
|
||||
Everything, this class does, can be achieved using lists or arrays of Obs.
|
||||
But it is simply more convenient to have a dedicated object for correlators.
|
||||
One often wants to add or multiply correlators of the same length at every timeslice and it is inconvenient
|
||||
to iterate over all timeslices for every operation. This is especially true, when dealing with matrices.
|
||||
|
||||
The correlator can have two types of content: An Obs at every timeslice OR a matrix at every timeslice.
|
||||
Other dependency (eg. spatial) are not supported.
|
||||
The correlator can have two types of content: An Obs at every timeslice OR a GEVP
|
||||
matrix at every timeslice. Other dependency (eg. spatial) are not supported.
|
||||
|
||||
The Corr class can also deal with missing measurements or paddings for fixed boundary conditions.
|
||||
The missing entries are represented via the `None` object.
|
||||
|
||||
Initialization
|
||||
--------------
|
||||
A simple correlator can be initialized with a list or a one-dimensional array of `Obs` or `Cobs`
|
||||
```python
|
||||
corr11 = pe.Corr([obs1, obs2])
|
||||
corr11 = pe.Corr(np.array([obs1, obs2]))
|
||||
```
|
||||
A matrix-valued correlator can either be initialized via a two-dimensional array of `Corr` objects
|
||||
```python
|
||||
matrix_corr = pe.Corr(np.array([[corr11, corr12], [corr21, corr22]]))
|
||||
```
|
||||
or alternatively via a three-dimensional array of `Obs` or `CObs` of shape (T, N, N) where T is
|
||||
the temporal extent of the correlator and N is the dimension of the matrix.
|
||||
"""
|
||||
|
||||
__slots__ = ["content", "N", "T", "tag", "prange"]
|
||||
|
@ -48,28 +31,27 @@ class Corr:
|
|||
Parameters
|
||||
----------
|
||||
data_input : list or array
|
||||
list of Obs or list of arrays of Obs or array of Corrs (see class docstring for details).
|
||||
list of Obs or list of arrays of Obs or array of Corrs
|
||||
padding : list, optional
|
||||
List with two entries where the first labels the padding
|
||||
at the front of the correlator and the second the padding
|
||||
at the back.
|
||||
prange : list, optional
|
||||
List containing the first and last timeslice of the plateau
|
||||
region identified for this correlator.
|
||||
region indentified for this correlator.
|
||||
"""
|
||||
|
||||
if isinstance(data_input, np.ndarray):
|
||||
if data_input.ndim == 1:
|
||||
data_input = list(data_input)
|
||||
elif data_input.ndim == 2:
|
||||
if not data_input.shape[0] == data_input.shape[1]:
|
||||
raise ValueError("Array needs to be square.")
|
||||
|
||||
# This only works, if the array fulfills the conditions below
|
||||
if not len(data_input.shape) == 2 and data_input.shape[0] == data_input.shape[1]:
|
||||
raise Exception("Incompatible array shape")
|
||||
if not all([isinstance(item, Corr) for item in data_input.flatten()]):
|
||||
raise ValueError("If the input is an array, its elements must be of type pe.Corr.")
|
||||
raise Exception("If the input is an array, its elements must be of type pe.Corr")
|
||||
if not all([item.N == 1 for item in data_input.flatten()]):
|
||||
raise ValueError("Can only construct matrix correlator from single valued correlators.")
|
||||
raise Exception("Can only construct matrix correlator from single valued correlators")
|
||||
if not len(set([item.T for item in data_input.flatten()])) == 1:
|
||||
raise ValueError("All input Correlators must be defined over the same timeslices.")
|
||||
raise Exception("All input Correlators must be defined over the same timeslices.")
|
||||
|
||||
T = data_input[0, 0].T
|
||||
N = data_input.shape[0]
|
||||
|
@ -77,7 +59,7 @@ class Corr:
|
|||
for t in range(T):
|
||||
if any([(item.content[t] is None) for item in data_input.flatten()]):
|
||||
if not all([(item.content[t] is None) for item in data_input.flatten()]):
|
||||
warnings.warn("Input ill-defined at different timeslices. Conversion leads to data loss.!", RuntimeWarning)
|
||||
warnings.warn("Input ill-defined at different timeslices. Conversion leads to data loss!", RuntimeWarning)
|
||||
input_as_list.append(None)
|
||||
else:
|
||||
array_at_timeslace = np.empty([N, N], dtype="object")
|
||||
|
@ -86,12 +68,6 @@ class Corr:
|
|||
array_at_timeslace[i, j] = data_input[i, j][t]
|
||||
input_as_list.append(array_at_timeslace)
|
||||
data_input = input_as_list
|
||||
elif data_input.ndim == 3:
|
||||
if not data_input.shape[1] == data_input.shape[2]:
|
||||
raise ValueError("Array needs to be square.")
|
||||
data_input = list(data_input)
|
||||
else:
|
||||
raise ValueError("Arrays with ndim>3 not supported.")
|
||||
|
||||
if isinstance(data_input, list):
|
||||
|
||||
|
@ -99,18 +75,19 @@ class Corr:
|
|||
_assert_equal_properties([o for o in data_input if o is not None])
|
||||
self.content = [np.asarray([item]) if item is not None else None for item in data_input]
|
||||
self.N = 1
|
||||
|
||||
elif all([isinstance(item, np.ndarray) or item is None for item in data_input]) and any([isinstance(item, np.ndarray) for item in data_input]):
|
||||
self.content = data_input
|
||||
noNull = [a for a in self.content if a is not None] # To check if the matrices are correct for all undefined elements
|
||||
noNull = [a for a in self.content if not (a is None)] # To check if the matrices are correct for all undefined elements
|
||||
self.N = noNull[0].shape[0]
|
||||
if self.N > 1 and noNull[0].shape[0] != noNull[0].shape[1]:
|
||||
raise ValueError("Smearing matrices are not NxN.")
|
||||
raise Exception("Smearing matrices are not NxN")
|
||||
if (not all([item.shape == noNull[0].shape for item in noNull])):
|
||||
raise ValueError("Items in data_input are not of identical shape." + str(noNull))
|
||||
raise Exception("Items in data_input are not of identical shape." + str(noNull))
|
||||
else:
|
||||
raise TypeError("'data_input' contains item of wrong type.")
|
||||
raise Exception("data_input contains item of wrong type")
|
||||
else:
|
||||
raise TypeError("Data input was not given as list or correct array.")
|
||||
raise Exception("Data input was not given as list or correct array")
|
||||
|
||||
self.tag = None
|
||||
|
||||
|
@ -141,7 +118,7 @@ class Corr:
|
|||
def gamma_method(self, **kwargs):
|
||||
"""Apply the gamma method to the content of the Corr."""
|
||||
for item in self.content:
|
||||
if item is not None:
|
||||
if not (item is None):
|
||||
if self.N == 1:
|
||||
item[0].gamma_method(**kwargs)
|
||||
else:
|
||||
|
@ -159,7 +136,7 @@ class Corr:
|
|||
By default it will return the lowest source, which usually means unsmeared-unsmeared (0,0), but it does not have to
|
||||
"""
|
||||
if self.N == 1:
|
||||
raise ValueError("Trying to project a Corr, that already has N=1.")
|
||||
raise Exception("Trying to project a Corr, that already has N=1.")
|
||||
|
||||
if vector_l is None:
|
||||
vector_l, vector_r = np.asarray([1.] + (self.N - 1) * [0.]), np.asarray([1.] + (self.N - 1) * [0.])
|
||||
|
@ -167,16 +144,16 @@ class Corr:
|
|||
vector_r = vector_l
|
||||
if isinstance(vector_l, list) and not isinstance(vector_r, list):
|
||||
if len(vector_l) != self.T:
|
||||
raise ValueError("Length of vector list must be equal to T")
|
||||
raise Exception("Length of vector list must be equal to T")
|
||||
vector_r = [vector_r] * self.T
|
||||
if isinstance(vector_r, list) and not isinstance(vector_l, list):
|
||||
if len(vector_r) != self.T:
|
||||
raise ValueError("Length of vector list must be equal to T")
|
||||
raise Exception("Length of vector list must be equal to T")
|
||||
vector_l = [vector_l] * self.T
|
||||
|
||||
if not isinstance(vector_l, list):
|
||||
if not vector_l.shape == vector_r.shape == (self.N,):
|
||||
raise ValueError("Vectors are of wrong shape!")
|
||||
raise Exception("Vectors are of wrong shape!")
|
||||
if normalize:
|
||||
vector_l, vector_r = vector_l / np.sqrt((vector_l @ vector_l)), vector_r / np.sqrt(vector_r @ vector_r)
|
||||
newcontent = [None if _check_for_none(self, item) else np.asarray([vector_l.T @ item @ vector_r]) for item in self.content]
|
||||
|
@ -201,7 +178,7 @@ class Corr:
|
|||
Second index to be picked.
|
||||
"""
|
||||
if self.N == 1:
|
||||
raise ValueError("Trying to pick item from projected Corr")
|
||||
raise Exception("Trying to pick item from projected Corr")
|
||||
newcontent = [None if (item is None) else item[i, j] for item in self.content]
|
||||
return Corr(newcontent)
|
||||
|
||||
|
@ -212,8 +189,8 @@ class Corr:
|
|||
timeslice and the error on each timeslice.
|
||||
"""
|
||||
if self.N != 1:
|
||||
raise ValueError("Can only make Corr[N=1] plottable")
|
||||
x_list = [x for x in range(self.T) if self.content[x] is not None]
|
||||
raise Exception("Can only make Corr[N=1] plottable")
|
||||
x_list = [x for x in range(self.T) if not self.content[x] is None]
|
||||
y_list = [y[0].value for y in self.content if y is not None]
|
||||
y_err_list = [y[0].dvalue for y in self.content if y is not None]
|
||||
|
||||
|
@ -222,9 +199,9 @@ class Corr:
|
|||
def symmetric(self):
|
||||
""" Symmetrize the correlator around x0=0."""
|
||||
if self.N != 1:
|
||||
raise ValueError('symmetric cannot be safely applied to multi-dimensional correlators.')
|
||||
raise Exception('symmetric cannot be safely applied to multi-dimensional correlators.')
|
||||
if self.T % 2 != 0:
|
||||
raise ValueError("Can not symmetrize odd T")
|
||||
raise Exception("Can not symmetrize odd T")
|
||||
|
||||
if self.content[0] is not None:
|
||||
if np.argmax(np.abs([o[0].value if o is not None else 0 for o in self.content])) != 0:
|
||||
|
@ -237,15 +214,15 @@ class Corr:
|
|||
else:
|
||||
newcontent.append(0.5 * (self.content[t] + self.content[self.T - t]))
|
||||
if (all([x is None for x in newcontent])):
|
||||
raise ValueError("Corr could not be symmetrized: No redundant values")
|
||||
raise Exception("Corr could not be symmetrized: No redundant values")
|
||||
return Corr(newcontent, prange=self.prange)
|
||||
|
||||
def anti_symmetric(self):
|
||||
"""Anti-symmetrize the correlator around x0=0."""
|
||||
if self.N != 1:
|
||||
raise TypeError('anti_symmetric cannot be safely applied to multi-dimensional correlators.')
|
||||
raise Exception('anti_symmetric cannot be safely applied to multi-dimensional correlators.')
|
||||
if self.T % 2 != 0:
|
||||
raise ValueError("Can not symmetrize odd T")
|
||||
raise Exception("Can not symmetrize odd T")
|
||||
|
||||
test = 1 * self
|
||||
test.gamma_method()
|
||||
|
@ -259,13 +236,13 @@ class Corr:
|
|||
else:
|
||||
newcontent.append(0.5 * (self.content[t] - self.content[self.T - t]))
|
||||
if (all([x is None for x in newcontent])):
|
||||
raise ValueError("Corr could not be symmetrized: No redundant values")
|
||||
raise Exception("Corr could not be symmetrized: No redundant values")
|
||||
return Corr(newcontent, prange=self.prange)
|
||||
|
||||
def is_matrix_symmetric(self):
|
||||
"""Checks whether a correlator matrices is symmetric on every timeslice."""
|
||||
if self.N == 1:
|
||||
raise TypeError("Only works for correlator matrices.")
|
||||
raise Exception("Only works for correlator matrices.")
|
||||
for t in range(self.T):
|
||||
if self[t] is None:
|
||||
continue
|
||||
|
@ -277,29 +254,17 @@ class Corr:
|
|||
return False
|
||||
return True
|
||||
|
||||
def trace(self):
|
||||
"""Calculates the per-timeslice trace of a correlator matrix."""
|
||||
if self.N == 1:
|
||||
raise ValueError("Only works for correlator matrices.")
|
||||
newcontent = []
|
||||
for t in range(self.T):
|
||||
if _check_for_none(self, self.content[t]):
|
||||
newcontent.append(None)
|
||||
else:
|
||||
newcontent.append(np.trace(self.content[t]))
|
||||
return Corr(newcontent)
|
||||
|
||||
def matrix_symmetric(self):
|
||||
"""Symmetrizes the correlator matrices on every timeslice."""
|
||||
if self.N == 1:
|
||||
raise ValueError("Trying to symmetrize a correlator matrix, that already has N=1.")
|
||||
raise Exception("Trying to symmetrize a correlator matrix, that already has N=1.")
|
||||
if self.is_matrix_symmetric():
|
||||
return 1.0 * self
|
||||
else:
|
||||
transposed = [None if _check_for_none(self, G) else G.T for G in self.content]
|
||||
return 0.5 * (Corr(transposed) + self)
|
||||
|
||||
def GEVP(self, t0, ts=None, sort="Eigenvalue", vector_obs=False, **kwargs):
|
||||
def GEVP(self, t0, ts=None, sort="Eigenvalue", **kwargs):
|
||||
r'''Solve the generalized eigenvalue problem on the correlator matrix and returns the corresponding eigenvectors.
|
||||
|
||||
The eigenvectors are sorted according to the descending eigenvalues, the zeroth eigenvector(s) correspond to the
|
||||
|
@ -318,28 +283,21 @@ class Corr:
|
|||
If sort="Eigenvector" it gives a reference point for the sorting method.
|
||||
sort : string
|
||||
If this argument is set, a list of self.T vectors per state is returned. If it is set to None, only one vector is returned.
|
||||
- "Eigenvalue": The eigenvector is chosen according to which eigenvalue it belongs individually on every timeslice. (default)
|
||||
- "Eigenvalue": The eigenvector is chosen according to which eigenvalue it belongs individually on every timeslice.
|
||||
- "Eigenvector": Use the method described in arXiv:2004.10472 to find the set of v(t) belonging to the state.
|
||||
The reference state is identified by its eigenvalue at $t=t_s$.
|
||||
- None: The GEVP is solved only at ts, no sorting is necessary
|
||||
vector_obs : bool
|
||||
If True, uncertainties are propagated in the eigenvector computation (default False).
|
||||
|
||||
Other Parameters
|
||||
----------------
|
||||
state : int
|
||||
Returns only the vector(s) for a specified state. The lowest state is zero.
|
||||
method : str
|
||||
Method used to solve the GEVP.
|
||||
- "eigh": Use scipy.linalg.eigh to solve the GEVP. (default for vector_obs=False)
|
||||
- "cholesky": Use manually implemented solution via the Cholesky decomposition. Automatically chosen if vector_obs==True.
|
||||
'''
|
||||
|
||||
if self.N == 1:
|
||||
raise ValueError("GEVP methods only works on correlator matrices and not single correlators.")
|
||||
raise Exception("GEVP methods only works on correlator matrices and not single correlators.")
|
||||
if ts is not None:
|
||||
if (ts <= t0):
|
||||
raise ValueError("ts has to be larger than t0.")
|
||||
raise Exception("ts has to be larger than t0.")
|
||||
|
||||
if "sorted_list" in kwargs:
|
||||
warnings.warn("Argument 'sorted_list' is deprecated, use 'sort' instead.", DeprecationWarning)
|
||||
|
@ -350,34 +308,16 @@ class Corr:
|
|||
else:
|
||||
symmetric_corr = self.matrix_symmetric()
|
||||
|
||||
def _get_mat_at_t(t, vector_obs=vector_obs):
|
||||
if vector_obs:
|
||||
return symmetric_corr[t]
|
||||
else:
|
||||
return np.vectorize(lambda x: x.value)(symmetric_corr[t])
|
||||
G0 = _get_mat_at_t(t0)
|
||||
|
||||
method = kwargs.get('method', 'eigh')
|
||||
if vector_obs:
|
||||
chol = linalg.cholesky(G0)
|
||||
chol_inv = linalg.inv(chol)
|
||||
method = 'cholesky'
|
||||
else:
|
||||
chol = np.linalg.cholesky(_get_mat_at_t(t0, vector_obs=False)) # Check if matrix G0 is positive-semidefinite.
|
||||
if method == 'cholesky':
|
||||
chol_inv = np.linalg.inv(chol)
|
||||
else:
|
||||
chol_inv = None
|
||||
G0 = np.vectorize(lambda x: x.value)(symmetric_corr[t0])
|
||||
np.linalg.cholesky(G0) # Check if matrix G0 is positive-semidefinite.
|
||||
|
||||
if sort is None:
|
||||
if (ts is None):
|
||||
raise ValueError("ts is required if sort=None.")
|
||||
raise Exception("ts is required if sort=None.")
|
||||
if (self.content[t0] is None) or (self.content[ts] is None):
|
||||
raise ValueError("Corr not defined at t0/ts.")
|
||||
Gt = _get_mat_at_t(ts)
|
||||
reordered_vecs = _GEVP_solver(Gt, G0, method=method, chol_inv=chol_inv)
|
||||
if kwargs.get('auto_gamma', False) and vector_obs:
|
||||
[[o.gm() for o in ev if isinstance(o, Obs)] for ev in reordered_vecs]
|
||||
raise Exception("Corr not defined at t0/ts.")
|
||||
Gt = np.vectorize(lambda x: x.value)(symmetric_corr[ts])
|
||||
reordered_vecs = _GEVP_solver(Gt, G0)
|
||||
|
||||
elif sort in ["Eigenvalue", "Eigenvector"]:
|
||||
if sort == "Eigenvalue" and ts is not None:
|
||||
|
@ -385,27 +325,25 @@ class Corr:
|
|||
all_vecs = [None] * (t0 + 1)
|
||||
for t in range(t0 + 1, self.T):
|
||||
try:
|
||||
Gt = _get_mat_at_t(t)
|
||||
all_vecs.append(_GEVP_solver(Gt, G0, method=method, chol_inv=chol_inv))
|
||||
Gt = np.vectorize(lambda x: x.value)(symmetric_corr[t])
|
||||
all_vecs.append(_GEVP_solver(Gt, G0))
|
||||
except Exception:
|
||||
all_vecs.append(None)
|
||||
if sort == "Eigenvector":
|
||||
if ts is None:
|
||||
raise ValueError("ts is required for the Eigenvector sorting method.")
|
||||
raise Exception("ts is required for the Eigenvector sorting method.")
|
||||
all_vecs = _sort_vectors(all_vecs, ts)
|
||||
|
||||
reordered_vecs = [[v[s] if v is not None else None for v in all_vecs] for s in range(self.N)]
|
||||
if kwargs.get('auto_gamma', False) and vector_obs:
|
||||
[[[o.gm() for o in evn] for evn in ev if evn is not None] for ev in reordered_vecs]
|
||||
else:
|
||||
raise ValueError("Unknown value for 'sort'. Choose 'Eigenvalue', 'Eigenvector' or None.")
|
||||
raise Exception("Unkown value for 'sort'.")
|
||||
|
||||
if "state" in kwargs:
|
||||
return reordered_vecs[kwargs.get("state")]
|
||||
else:
|
||||
return reordered_vecs
|
||||
|
||||
def Eigenvalue(self, t0, ts=None, state=0, sort="Eigenvalue", **kwargs):
|
||||
def Eigenvalue(self, t0, ts=None, state=0, sort="Eigenvalue"):
|
||||
"""Determines the eigenvalue of the GEVP by solving and projecting the correlator
|
||||
|
||||
Parameters
|
||||
|
@ -415,7 +353,7 @@ class Corr:
|
|||
|
||||
All other parameters are identical to the ones of Corr.GEVP.
|
||||
"""
|
||||
vec = self.GEVP(t0, ts=ts, sort=sort, **kwargs)[state]
|
||||
vec = self.GEVP(t0, ts=ts, sort=sort)[state]
|
||||
return self.projected(vec)
|
||||
|
||||
def Hankel(self, N, periodic=False):
|
||||
|
@ -435,7 +373,7 @@ class Corr:
|
|||
"""
|
||||
|
||||
if self.N != 1:
|
||||
raise NotImplementedError("Multi-operator Prony not implemented!")
|
||||
raise Exception("Multi-operator Prony not implemented!")
|
||||
|
||||
array = np.empty([N, N], dtype="object")
|
||||
new_content = []
|
||||
|
@ -467,7 +405,7 @@ class Corr:
|
|||
dt : int
|
||||
number of timeslices
|
||||
"""
|
||||
return Corr(list(np.roll(np.array(self.content, dtype=object), dt, axis=0)))
|
||||
return Corr(list(np.roll(np.array(self.content, dtype=object), dt)))
|
||||
|
||||
def reverse(self):
|
||||
"""Reverse the time ordering of the Corr"""
|
||||
|
@ -502,7 +440,7 @@ class Corr:
|
|||
correlator or a Corr of same length.
|
||||
"""
|
||||
if self.N != 1:
|
||||
raise ValueError("Only one-dimensional correlators can be safely correlated.")
|
||||
raise Exception("Only one-dimensional correlators can be safely correlated.")
|
||||
new_content = []
|
||||
for x0, t_slice in enumerate(self.content):
|
||||
if _check_for_none(self, t_slice):
|
||||
|
@ -516,7 +454,7 @@ class Corr:
|
|||
elif isinstance(partner, Obs): # Should this include CObs?
|
||||
new_content.append(np.array([correlate(o, partner) for o in t_slice]))
|
||||
else:
|
||||
raise TypeError("Can only correlate with an Obs or a Corr.")
|
||||
raise Exception("Can only correlate with an Obs or a Corr.")
|
||||
|
||||
return Corr(new_content)
|
||||
|
||||
|
@ -550,7 +488,7 @@ class Corr:
|
|||
----------
|
||||
partner : Corr
|
||||
Time symmetry partner of the Corr
|
||||
parity : int
|
||||
partity : int
|
||||
Parity quantum number of the correlator, can be +1 or -1
|
||||
"""
|
||||
if self.N != 1:
|
||||
|
@ -583,7 +521,7 @@ class Corr:
|
|||
Available choice: symmetric, forward, backward, improved, log, default: symmetric
|
||||
"""
|
||||
if self.N != 1:
|
||||
raise ValueError("deriv only implemented for one-dimensional correlators.")
|
||||
raise Exception("deriv only implemented for one-dimensional correlators.")
|
||||
if variant == "symmetric":
|
||||
newcontent = []
|
||||
for t in range(1, self.T - 1):
|
||||
|
@ -592,7 +530,7 @@ class Corr:
|
|||
else:
|
||||
newcontent.append(0.5 * (self.content[t + 1] - self.content[t - 1]))
|
||||
if (all([x is None for x in newcontent])):
|
||||
raise ValueError('Derivative is undefined at all timeslices')
|
||||
raise Exception('Derivative is undefined at all timeslices')
|
||||
return Corr(newcontent, padding=[1, 1])
|
||||
elif variant == "forward":
|
||||
newcontent = []
|
||||
|
@ -602,7 +540,7 @@ class Corr:
|
|||
else:
|
||||
newcontent.append(self.content[t + 1] - self.content[t])
|
||||
if (all([x is None for x in newcontent])):
|
||||
raise ValueError("Derivative is undefined at all timeslices")
|
||||
raise Exception("Derivative is undefined at all timeslices")
|
||||
return Corr(newcontent, padding=[0, 1])
|
||||
elif variant == "backward":
|
||||
newcontent = []
|
||||
|
@ -612,7 +550,7 @@ class Corr:
|
|||
else:
|
||||
newcontent.append(self.content[t] - self.content[t - 1])
|
||||
if (all([x is None for x in newcontent])):
|
||||
raise ValueError("Derivative is undefined at all timeslices")
|
||||
raise Exception("Derivative is undefined at all timeslices")
|
||||
return Corr(newcontent, padding=[1, 0])
|
||||
elif variant == "improved":
|
||||
newcontent = []
|
||||
|
@ -622,7 +560,7 @@ class Corr:
|
|||
else:
|
||||
newcontent.append((1 / 12) * (self.content[t - 2] - 8 * self.content[t - 1] + 8 * self.content[t + 1] - self.content[t + 2]))
|
||||
if (all([x is None for x in newcontent])):
|
||||
raise ValueError('Derivative is undefined at all timeslices')
|
||||
raise Exception('Derivative is undefined at all timeslices')
|
||||
return Corr(newcontent, padding=[2, 2])
|
||||
elif variant == 'log':
|
||||
newcontent = []
|
||||
|
@ -632,31 +570,23 @@ class Corr:
|
|||
else:
|
||||
newcontent.append(np.log(self.content[t]))
|
||||
if (all([x is None for x in newcontent])):
|
||||
raise ValueError("Log is undefined at all timeslices")
|
||||
raise Exception("Log is undefined at all timeslices")
|
||||
logcorr = Corr(newcontent)
|
||||
return self * logcorr.deriv('symmetric')
|
||||
else:
|
||||
raise ValueError("Unknown variant.")
|
||||
raise Exception("Unknown variant.")
|
||||
|
||||
def second_deriv(self, variant="symmetric"):
|
||||
r"""Return the second derivative of the correlator with respect to x0.
|
||||
"""Return the second derivative of the correlator with respect to x0.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
variant : str
|
||||
decides which definition of the finite differences derivative is used.
|
||||
Available choice:
|
||||
- symmetric (default)
|
||||
$$\tilde{\partial}^2_0 f(x_0) = f(x_0+1)-2f(x_0)+f(x_0-1)$$
|
||||
- big_symmetric
|
||||
$$\partial^2_0 f(x_0) = \frac{f(x_0+2)-2f(x_0)+f(x_0-2)}{4}$$
|
||||
- improved
|
||||
$$\partial^2_0 f(x_0) = \frac{-f(x_0+2) + 16 * f(x_0+1) - 30 * f(x_0) + 16 * f(x_0-1) - f(x_0-2)}{12}$$
|
||||
- log
|
||||
$$f(x) = \tilde{\partial}^2_0 log(f(x_0))+(\tilde{\partial}_0 log(f(x_0)))^2$$
|
||||
Available choice: symmetric, improved, log, default: symmetric
|
||||
"""
|
||||
if self.N != 1:
|
||||
raise ValueError("second_deriv only implemented for one-dimensional correlators.")
|
||||
raise Exception("second_deriv only implemented for one-dimensional correlators.")
|
||||
if variant == "symmetric":
|
||||
newcontent = []
|
||||
for t in range(1, self.T - 1):
|
||||
|
@ -665,18 +595,8 @@ class Corr:
|
|||
else:
|
||||
newcontent.append((self.content[t + 1] - 2 * self.content[t] + self.content[t - 1]))
|
||||
if (all([x is None for x in newcontent])):
|
||||
raise ValueError("Derivative is undefined at all timeslices")
|
||||
raise Exception("Derivative is undefined at all timeslices")
|
||||
return Corr(newcontent, padding=[1, 1])
|
||||
elif variant == "big_symmetric":
|
||||
newcontent = []
|
||||
for t in range(2, self.T - 2):
|
||||
if (self.content[t - 2] is None) or (self.content[t + 2] is None):
|
||||
newcontent.append(None)
|
||||
else:
|
||||
newcontent.append((self.content[t + 2] - 2 * self.content[t] + self.content[t - 2]) / 4)
|
||||
if (all([x is None for x in newcontent])):
|
||||
raise ValueError("Derivative is undefined at all timeslices")
|
||||
return Corr(newcontent, padding=[2, 2])
|
||||
elif variant == "improved":
|
||||
newcontent = []
|
||||
for t in range(2, self.T - 2):
|
||||
|
@ -685,7 +605,7 @@ class Corr:
|
|||
else:
|
||||
newcontent.append((1 / 12) * (-self.content[t + 2] + 16 * self.content[t + 1] - 30 * self.content[t] + 16 * self.content[t - 1] - self.content[t - 2]))
|
||||
if (all([x is None for x in newcontent])):
|
||||
raise ValueError("Derivative is undefined at all timeslices")
|
||||
raise Exception("Derivative is undefined at all timeslices")
|
||||
return Corr(newcontent, padding=[2, 2])
|
||||
elif variant == 'log':
|
||||
newcontent = []
|
||||
|
@ -695,11 +615,11 @@ class Corr:
|
|||
else:
|
||||
newcontent.append(np.log(self.content[t]))
|
||||
if (all([x is None for x in newcontent])):
|
||||
raise ValueError("Log is undefined at all timeslices")
|
||||
raise Exception("Log is undefined at all timeslices")
|
||||
logcorr = Corr(newcontent)
|
||||
return self * (logcorr.second_deriv('symmetric') + (logcorr.deriv('symmetric'))**2)
|
||||
else:
|
||||
raise ValueError("Unknown variant.")
|
||||
raise Exception("Unknown variant.")
|
||||
|
||||
def m_eff(self, variant='log', guess=1.0):
|
||||
"""Returns the effective mass of the correlator as correlator object
|
||||
|
@ -708,8 +628,8 @@ class Corr:
|
|||
----------
|
||||
variant : str
|
||||
log : uses the standard effective mass log(C(t) / C(t+1))
|
||||
cosh, periodic : Use periodicity of the correlator by solving C(t) / C(t+1) = cosh(m * (t - T/2)) / cosh(m * (t + 1 - T/2)) for m.
|
||||
sinh : Use anti-periodicity of the correlator by solving C(t) / C(t+1) = sinh(m * (t - T/2)) / sinh(m * (t + 1 - T/2)) for m.
|
||||
cosh, periodic : Use periodicitiy of the correlator by solving C(t) / C(t+1) = cosh(m * (t - T/2)) / cosh(m * (t + 1 - T/2)) for m.
|
||||
sinh : Use anti-periodicitiy of the correlator by solving C(t) / C(t+1) = sinh(m * (t - T/2)) / sinh(m * (t + 1 - T/2)) for m.
|
||||
See, e.g., arXiv:1205.5380
|
||||
arccosh : Uses the explicit form of the symmetrized correlator (not recommended)
|
||||
logsym: uses the symmetric effective mass log(C(t-1) / C(t+1))/2
|
||||
|
@ -728,7 +648,7 @@ class Corr:
|
|||
else:
|
||||
newcontent.append(self.content[t] / self.content[t + 1])
|
||||
if (all([x is None for x in newcontent])):
|
||||
raise ValueError('m_eff is undefined at all timeslices')
|
||||
raise Exception('m_eff is undefined at all timeslices')
|
||||
|
||||
return np.log(Corr(newcontent, padding=[0, 1]))
|
||||
|
||||
|
@ -742,7 +662,7 @@ class Corr:
|
|||
else:
|
||||
newcontent.append(self.content[t - 1] / self.content[t + 1])
|
||||
if (all([x is None for x in newcontent])):
|
||||
raise ValueError('m_eff is undefined at all timeslices')
|
||||
raise Exception('m_eff is undefined at all timeslices')
|
||||
|
||||
return np.log(Corr(newcontent, padding=[1, 1])) / 2
|
||||
|
||||
|
@ -767,7 +687,7 @@ class Corr:
|
|||
else:
|
||||
newcontent.append(np.abs(find_root(self.content[t][0] / self.content[t + 1][0], root_function, guess=guess)))
|
||||
if (all([x is None for x in newcontent])):
|
||||
raise ValueError('m_eff is undefined at all timeslices')
|
||||
raise Exception('m_eff is undefined at all timeslices')
|
||||
|
||||
return Corr(newcontent, padding=[0, 1])
|
||||
|
||||
|
@ -779,11 +699,11 @@ class Corr:
|
|||
else:
|
||||
newcontent.append((self.content[t + 1] + self.content[t - 1]) / (2 * self.content[t]))
|
||||
if (all([x is None for x in newcontent])):
|
||||
raise ValueError("m_eff is undefined at all timeslices")
|
||||
raise Exception("m_eff is undefined at all timeslices")
|
||||
return np.arccosh(Corr(newcontent, padding=[1, 1]))
|
||||
|
||||
else:
|
||||
raise ValueError('Unknown variant.')
|
||||
raise Exception('Unknown variant.')
|
||||
|
||||
def fit(self, function, fitrange=None, silent=False, **kwargs):
|
||||
r'''Fits function to the data
|
||||
|
@ -801,7 +721,7 @@ class Corr:
|
|||
Decides whether output is printed to the standard output.
|
||||
'''
|
||||
if self.N != 1:
|
||||
raise ValueError("Correlator must be projected before fitting")
|
||||
raise Exception("Correlator must be projected before fitting")
|
||||
|
||||
if fitrange is None:
|
||||
if self.prange:
|
||||
|
@ -810,12 +730,12 @@ class Corr:
|
|||
fitrange = [0, self.T - 1]
|
||||
else:
|
||||
if not isinstance(fitrange, list):
|
||||
raise TypeError("fitrange has to be a list with two elements")
|
||||
raise Exception("fitrange has to be a list with two elements")
|
||||
if len(fitrange) != 2:
|
||||
raise ValueError("fitrange has to have exactly two elements [fit_start, fit_stop]")
|
||||
raise Exception("fitrange has to have exactly two elements [fit_start, fit_stop]")
|
||||
|
||||
xs = np.array([x for x in range(fitrange[0], fitrange[1] + 1) if self.content[x] is not None])
|
||||
ys = np.array([self.content[x][0] for x in range(fitrange[0], fitrange[1] + 1) if self.content[x] is not None])
|
||||
xs = np.array([x for x in range(fitrange[0], fitrange[1] + 1) if not self.content[x] is None])
|
||||
ys = np.array([self.content[x][0] for x in range(fitrange[0], fitrange[1] + 1) if not self.content[x] is None])
|
||||
result = least_squares(xs, ys, function, silent=silent, **kwargs)
|
||||
return result
|
||||
|
||||
|
@ -840,9 +760,9 @@ class Corr:
|
|||
else:
|
||||
raise Exception("no plateau range provided")
|
||||
if self.N != 1:
|
||||
raise ValueError("Correlator must be projected before getting a plateau.")
|
||||
raise Exception("Correlator must be projected before getting a plateau.")
|
||||
if (all([self.content[t] is None for t in range(plateau_range[0], plateau_range[1] + 1)])):
|
||||
raise ValueError("plateau is undefined at all timeslices in plateaurange.")
|
||||
raise Exception("plateau is undefined at all timeslices in plateaurange.")
|
||||
if auto_gamma:
|
||||
self.gamma_method()
|
||||
if method == "fit":
|
||||
|
@ -854,16 +774,16 @@ class Corr:
|
|||
return returnvalue
|
||||
|
||||
else:
|
||||
raise ValueError("Unsupported plateau method: " + method)
|
||||
raise Exception("Unsupported plateau method: " + method)
|
||||
|
||||
def set_prange(self, prange):
|
||||
"""Sets the attribute prange of the Corr object."""
|
||||
if not len(prange) == 2:
|
||||
raise ValueError("prange must be a list or array with two values")
|
||||
raise Exception("prange must be a list or array with two values")
|
||||
if not ((isinstance(prange[0], int)) and (isinstance(prange[1], int))):
|
||||
raise TypeError("Start and end point must be integers")
|
||||
if not (0 <= prange[0] <= self.T and 0 <= prange[1] <= self.T and prange[0] <= prange[1]):
|
||||
raise ValueError("Start and end point must define a range in the interval 0,T")
|
||||
raise Exception("Start and end point must be integers")
|
||||
if not (0 <= prange[0] <= self.T and 0 <= prange[1] <= self.T and prange[0] < prange[1]):
|
||||
raise Exception("Start and end point must define a range in the interval 0,T")
|
||||
|
||||
self.prange = prange
|
||||
return
|
||||
|
@ -900,7 +820,7 @@ class Corr:
|
|||
Optional title of the figure.
|
||||
"""
|
||||
if self.N != 1:
|
||||
raise ValueError("Correlator must be projected before plotting")
|
||||
raise Exception("Correlator must be projected before plotting")
|
||||
|
||||
if auto_gamma:
|
||||
self.gamma_method()
|
||||
|
@ -941,7 +861,7 @@ class Corr:
|
|||
hide_from = None
|
||||
ax1.errorbar(x[:hide_from], y[:hide_from], y_err[:hide_from], label=corr.tag, mfc=plt.rcParams['axes.facecolor'])
|
||||
else:
|
||||
raise TypeError("'comp' must be a correlator or a list of correlators.")
|
||||
raise Exception("'comp' must be a correlator or a list of correlators.")
|
||||
|
||||
if plateau:
|
||||
if isinstance(plateau, Obs):
|
||||
|
@ -950,14 +870,14 @@ class Corr:
|
|||
ax1.axhline(y=plateau.value, linewidth=2, color=plt.rcParams['text.color'], alpha=0.6, marker=',', ls='--', label=str(plateau))
|
||||
ax1.axhspan(plateau.value - plateau.dvalue, plateau.value + plateau.dvalue, alpha=0.25, color=plt.rcParams['text.color'], ls='-')
|
||||
else:
|
||||
raise TypeError("'plateau' must be an Obs")
|
||||
raise Exception("'plateau' must be an Obs")
|
||||
|
||||
if references:
|
||||
if isinstance(references, list):
|
||||
for ref in references:
|
||||
ax1.axhline(y=ref, linewidth=1, color=plt.rcParams['text.color'], alpha=0.6, marker=',', ls='--')
|
||||
else:
|
||||
raise TypeError("'references' must be a list of floating pint values.")
|
||||
raise Exception("'references' must be a list of floating pint values.")
|
||||
|
||||
if self.prange:
|
||||
ax1.axvline(self.prange[0], 0, 1, ls='-', marker=',', color="black", zorder=0)
|
||||
|
@ -991,7 +911,7 @@ class Corr:
|
|||
if isinstance(save, str):
|
||||
fig.savefig(save, bbox_inches='tight')
|
||||
else:
|
||||
raise TypeError("'save' has to be a string.")
|
||||
raise Exception("'save' has to be a string.")
|
||||
|
||||
def spaghetti_plot(self, logscale=True):
|
||||
"""Produces a spaghetti plot of the correlator suited to monitor exceptional configurations.
|
||||
|
@ -1002,7 +922,7 @@ class Corr:
|
|||
Determines whether the scale of the y-axis is logarithmic or standard.
|
||||
"""
|
||||
if self.N != 1:
|
||||
raise ValueError("Correlator needs to be projected first.")
|
||||
raise Exception("Correlator needs to be projected first.")
|
||||
|
||||
mc_names = list(set([item for sublist in [sum(map(o[0].e_content.get, o[0].mc_names), []) for o in self.content if o is not None] for item in sublist]))
|
||||
x0_vals = [n for (n, o) in zip(np.arange(self.T), self.content) if o is not None]
|
||||
|
@ -1044,7 +964,7 @@ class Corr:
|
|||
elif datatype == "pickle":
|
||||
dump_object(self, filename, **kwargs)
|
||||
else:
|
||||
raise ValueError("Unknown datatype " + str(datatype))
|
||||
raise Exception("Unknown datatype " + str(datatype))
|
||||
|
||||
def print(self, print_range=None):
|
||||
print(self.__repr__(print_range))
|
||||
|
@ -1060,6 +980,8 @@ class Corr:
|
|||
content_string += "Description: " + self.tag + "\n"
|
||||
if self.N != 1:
|
||||
return content_string
|
||||
if isinstance(self[0], CObs):
|
||||
return content_string
|
||||
|
||||
if print_range[1]:
|
||||
print_range[1] += 1
|
||||
|
@ -1070,7 +992,7 @@ class Corr:
|
|||
else:
|
||||
content_string += str(i + print_range[0])
|
||||
for element in sub_corr:
|
||||
content_string += f"\t{element:+2}"
|
||||
content_string += '\t' + ' ' * int(element >= 0) + str(element)
|
||||
content_string += '\n'
|
||||
return content_string
|
||||
|
||||
|
@ -1082,19 +1004,10 @@ class Corr:
|
|||
# This is because Obs*Corr checks Obs.__mul__ first and does not catch an exception.
|
||||
# One could try and tell Obs to check if the y in __mul__ is a Corr and
|
||||
|
||||
__array_priority__ = 10000
|
||||
|
||||
def __eq__(self, y):
|
||||
if isinstance(y, Corr):
|
||||
comp = np.asarray(y.content, dtype=object)
|
||||
else:
|
||||
comp = np.asarray(y)
|
||||
return np.asarray(self.content, dtype=object) == comp
|
||||
|
||||
def __add__(self, y):
|
||||
if isinstance(y, Corr):
|
||||
if ((self.N != y.N) or (self.T != y.T)):
|
||||
raise ValueError("Addition of Corrs with different shape")
|
||||
raise Exception("Addition of Corrs with different shape")
|
||||
newcontent = []
|
||||
for t in range(self.T):
|
||||
if _check_for_none(self, self.content[t]) or _check_for_none(y, y.content[t]):
|
||||
|
@ -1103,7 +1016,7 @@ class Corr:
|
|||
newcontent.append(self.content[t] + y.content[t])
|
||||
return Corr(newcontent)
|
||||
|
||||
elif isinstance(y, (Obs, int, float, CObs, complex)):
|
||||
elif isinstance(y, (Obs, int, float, CObs)):
|
||||
newcontent = []
|
||||
for t in range(self.T):
|
||||
if _check_for_none(self, self.content[t]):
|
||||
|
@ -1122,7 +1035,7 @@ class Corr:
|
|||
def __mul__(self, y):
|
||||
if isinstance(y, Corr):
|
||||
if not ((self.N == 1 or y.N == 1 or self.N == y.N) and self.T == y.T):
|
||||
raise ValueError("Multiplication of Corr object requires N=N or N=1 and T=T")
|
||||
raise Exception("Multiplication of Corr object requires N=N or N=1 and T=T")
|
||||
newcontent = []
|
||||
for t in range(self.T):
|
||||
if _check_for_none(self, self.content[t]) or _check_for_none(y, y.content[t]):
|
||||
|
@ -1131,7 +1044,7 @@ class Corr:
|
|||
newcontent.append(self.content[t] * y.content[t])
|
||||
return Corr(newcontent)
|
||||
|
||||
elif isinstance(y, (Obs, int, float, CObs, complex)):
|
||||
elif isinstance(y, (Obs, int, float, CObs)):
|
||||
newcontent = []
|
||||
for t in range(self.T):
|
||||
if _check_for_none(self, self.content[t]):
|
||||
|
@ -1147,53 +1060,10 @@ class Corr:
|
|||
else:
|
||||
raise TypeError("Corr * wrong type")
|
||||
|
||||
def __matmul__(self, y):
|
||||
if isinstance(y, np.ndarray):
|
||||
if y.ndim != 2 or y.shape[0] != y.shape[1]:
|
||||
raise ValueError("Can only multiply correlators by square matrices.")
|
||||
if not self.N == y.shape[0]:
|
||||
raise ValueError("matmul: mismatch of matrix dimensions")
|
||||
newcontent = []
|
||||
for t in range(self.T):
|
||||
if _check_for_none(self, self.content[t]):
|
||||
newcontent.append(None)
|
||||
else:
|
||||
newcontent.append(self.content[t] @ y)
|
||||
return Corr(newcontent)
|
||||
elif isinstance(y, Corr):
|
||||
if not self.N == y.N:
|
||||
raise ValueError("matmul: mismatch of matrix dimensions")
|
||||
newcontent = []
|
||||
for t in range(self.T):
|
||||
if _check_for_none(self, self.content[t]) or _check_for_none(y, y.content[t]):
|
||||
newcontent.append(None)
|
||||
else:
|
||||
newcontent.append(self.content[t] @ y.content[t])
|
||||
return Corr(newcontent)
|
||||
|
||||
else:
|
||||
return NotImplemented
|
||||
|
||||
def __rmatmul__(self, y):
|
||||
if isinstance(y, np.ndarray):
|
||||
if y.ndim != 2 or y.shape[0] != y.shape[1]:
|
||||
raise ValueError("Can only multiply correlators by square matrices.")
|
||||
if not self.N == y.shape[0]:
|
||||
raise ValueError("matmul: mismatch of matrix dimensions")
|
||||
newcontent = []
|
||||
for t in range(self.T):
|
||||
if _check_for_none(self, self.content[t]):
|
||||
newcontent.append(None)
|
||||
else:
|
||||
newcontent.append(y @ self.content[t])
|
||||
return Corr(newcontent)
|
||||
else:
|
||||
return NotImplemented
|
||||
|
||||
def __truediv__(self, y):
|
||||
if isinstance(y, Corr):
|
||||
if not ((self.N == 1 or y.N == 1 or self.N == y.N) and self.T == y.T):
|
||||
raise ValueError("Multiplication of Corr object requires N=N or N=1 and T=T")
|
||||
raise Exception("Multiplication of Corr object requires N=N or N=1 and T=T")
|
||||
newcontent = []
|
||||
for t in range(self.T):
|
||||
if _check_for_none(self, self.content[t]) or _check_for_none(y, y.content[t]):
|
||||
|
@ -1207,16 +1077,16 @@ class Corr:
|
|||
newcontent[t] = None
|
||||
|
||||
if all([item is None for item in newcontent]):
|
||||
raise ValueError("Division returns completely undefined correlator")
|
||||
raise Exception("Division returns completely undefined correlator")
|
||||
return Corr(newcontent)
|
||||
|
||||
elif isinstance(y, (Obs, CObs)):
|
||||
if isinstance(y, Obs):
|
||||
if y.value == 0:
|
||||
raise ValueError('Division by zero will return undefined correlator')
|
||||
raise Exception('Division by zero will return undefined correlator')
|
||||
if isinstance(y, CObs):
|
||||
if y.is_zero():
|
||||
raise ValueError('Division by zero will return undefined correlator')
|
||||
raise Exception('Division by zero will return undefined correlator')
|
||||
|
||||
newcontent = []
|
||||
for t in range(self.T):
|
||||
|
@ -1228,7 +1098,7 @@ class Corr:
|
|||
|
||||
elif isinstance(y, (int, float)):
|
||||
if y == 0:
|
||||
raise ValueError('Division by zero will return undefined correlator')
|
||||
raise Exception('Division by zero will return undefined correlator')
|
||||
newcontent = []
|
||||
for t in range(self.T):
|
||||
if _check_for_none(self, self.content[t]):
|
||||
|
@ -1284,7 +1154,7 @@ class Corr:
|
|||
if np.isnan(tmp_sum.value):
|
||||
newcontent[t] = None
|
||||
if all([item is None for item in newcontent]):
|
||||
raise ValueError('Operation returns undefined correlator')
|
||||
raise Exception('Operation returns undefined correlator')
|
||||
return Corr(newcontent)
|
||||
|
||||
def sin(self):
|
||||
|
@ -1392,13 +1262,13 @@ class Corr:
|
|||
'''
|
||||
|
||||
if self.N == 1:
|
||||
raise ValueError('Method cannot be applied to one-dimensional correlators.')
|
||||
raise Exception('Method cannot be applied to one-dimensional correlators.')
|
||||
if basematrix is None:
|
||||
basematrix = self
|
||||
if Ntrunc >= basematrix.N:
|
||||
raise ValueError('Cannot truncate using Ntrunc <= %d' % (basematrix.N))
|
||||
raise Exception('Cannot truncate using Ntrunc <= %d' % (basematrix.N))
|
||||
if basematrix.N != self.N:
|
||||
raise ValueError('basematrix and targetmatrix have to be of the same size.')
|
||||
raise Exception('basematrix and targetmatrix have to be of the same size.')
|
||||
|
||||
evecs = basematrix.GEVP(t0proj, tproj, sort=None)[:Ntrunc]
|
||||
|
||||
|
@ -1414,13 +1284,8 @@ class Corr:
|
|||
return Corr(newcontent)
|
||||
|
||||
|
||||
def _sort_vectors(vec_set_in, ts):
|
||||
def _sort_vectors(vec_set, ts):
|
||||
"""Helper function used to find a set of Eigenvectors consistent over all timeslices"""
|
||||
|
||||
if isinstance(vec_set_in[ts][0][0], Obs):
|
||||
vec_set = [anp.vectorize(lambda x: float(x))(vi) if vi is not None else vi for vi in vec_set_in]
|
||||
else:
|
||||
vec_set = vec_set_in
|
||||
reference_sorting = np.array(vec_set[ts])
|
||||
N = reference_sorting.shape[0]
|
||||
sorted_vec_set = []
|
||||
|
@ -1439,9 +1304,9 @@ def _sort_vectors(vec_set_in, ts):
|
|||
if current_score > best_score:
|
||||
best_score = current_score
|
||||
best_perm = perm
|
||||
sorted_vec_set.append([vec_set_in[t][k] for k in best_perm])
|
||||
sorted_vec_set.append([vec_set[t][k] for k in best_perm])
|
||||
else:
|
||||
sorted_vec_set.append(vec_set_in[t])
|
||||
sorted_vec_set.append(vec_set[t])
|
||||
|
||||
return sorted_vec_set
|
||||
|
||||
|
@ -1451,63 +1316,10 @@ def _check_for_none(corr, entry):
|
|||
return len(list(filter(None, np.asarray(entry).flatten()))) < corr.N ** 2
|
||||
|
||||
|
||||
def _GEVP_solver(Gt, G0, method='eigh', chol_inv=None):
|
||||
r"""Helper function for solving the GEVP and sorting the eigenvectors.
|
||||
|
||||
Solves $G(t)v_i=\lambda_i G(t_0)v_i$ and returns the eigenvectors v_i
|
||||
def _GEVP_solver(Gt, G0):
|
||||
"""Helper function for solving the GEVP and sorting the eigenvectors.
|
||||
|
||||
The helper function assumes that both provided matrices are symmetric and
|
||||
only processes the lower triangular part of both matrices. In case the matrices
|
||||
are not symmetric the upper triangular parts are effectively discarded.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
Gt : array
|
||||
The correlator at time t for the left hand side of the GEVP
|
||||
G0 : array
|
||||
The correlator at time t0 for the right hand side of the GEVP
|
||||
Method used to solve the GEVP.
|
||||
- "eigh": Use scipy.linalg.eigh to solve the GEVP.
|
||||
- "cholesky": Use manually implemented solution via the Cholesky decomposition.
|
||||
chol_inv : array, optional
|
||||
Inverse of the Cholesky decomposition of G0. May be provided to
|
||||
speed up the computation in the case of method=='cholesky'
|
||||
|
||||
"""
|
||||
if isinstance(G0[0][0], Obs):
|
||||
vector_obs = True
|
||||
else:
|
||||
vector_obs = False
|
||||
|
||||
if method == 'cholesky':
|
||||
if vector_obs:
|
||||
cholesky = linalg.cholesky
|
||||
inv = linalg.inv
|
||||
eigv = linalg.eigv
|
||||
matmul = linalg.matmul
|
||||
else:
|
||||
cholesky = np.linalg.cholesky
|
||||
inv = np.linalg.inv
|
||||
|
||||
def eigv(x, **kwargs):
|
||||
return np.linalg.eigh(x)[1]
|
||||
|
||||
def matmul(*operands):
|
||||
return np.linalg.multi_dot(operands)
|
||||
N = Gt.shape[0]
|
||||
output = [[] for j in range(N)]
|
||||
if chol_inv is None:
|
||||
chol = cholesky(G0) # This will automatically report if the matrix is not pos-def
|
||||
chol_inv = inv(chol)
|
||||
|
||||
try:
|
||||
new_matrix = matmul(chol_inv, Gt, chol_inv.T)
|
||||
ev = eigv(new_matrix)
|
||||
ev = matmul(chol_inv.T, ev)
|
||||
output = np.flip(ev, axis=1).T
|
||||
except (np.linalg.LinAlgError, TypeError, ValueError): # The above code can fail because of linalg-errors or because the entry of the corr is None
|
||||
for s in range(N):
|
||||
output[s] = None
|
||||
return output
|
||||
elif method == 'eigh':
|
||||
are not symmetric the upper triangular parts are effectively discarded."""
|
||||
return scipy.linalg.eigh(Gt, G0, lower=True)[1].T[::-1]
|
||||
|
|
|
@ -34,7 +34,7 @@ def epsilon_tensor(i, j, k):
|
|||
"""
|
||||
test_set = set((i, j, k))
|
||||
if not (test_set <= set((1, 2, 3)) or test_set <= set((0, 1, 2))):
|
||||
raise ValueError("Unexpected input", i, j, k)
|
||||
raise Exception("Unexpected input", i, j, k)
|
||||
|
||||
return (i - j) * (j - k) * (k - i) / 2
|
||||
|
||||
|
@ -52,7 +52,7 @@ def epsilon_tensor_rank4(i, j, k, o):
|
|||
"""
|
||||
test_set = set((i, j, k, o))
|
||||
if not (test_set <= set((1, 2, 3, 4)) or test_set <= set((0, 1, 2, 3))):
|
||||
raise ValueError("Unexpected input", i, j, k, o)
|
||||
raise Exception("Unexpected input", i, j, k, o)
|
||||
|
||||
return (i - j) * (j - k) * (k - i) * (i - o) * (j - o) * (o - k) / 12
|
||||
|
||||
|
@ -92,5 +92,5 @@ def Grid_gamma(gamma_tag):
|
|||
elif gamma_tag == 'SigmaZT':
|
||||
g = 0.5 * (gamma[2] @ gamma[3] - gamma[3] @ gamma[2])
|
||||
else:
|
||||
raise ValueError('Unkown gamma structure', gamma_tag)
|
||||
raise Exception('Unkown gamma structure', gamma_tag)
|
||||
return g
|
||||
|
|
100
pyerrors/fits.py
100
pyerrors/fits.py
|
@ -14,7 +14,7 @@ from autograd import hessian as auto_hessian
|
|||
from autograd import elementwise_grad as egrad
|
||||
from numdifftools import Jacobian as num_jacobian
|
||||
from numdifftools import Hessian as num_hessian
|
||||
from .obs import Obs, derived_observable, covariance, cov_Obs, invert_corr_cov_cholesky
|
||||
from .obs import Obs, derived_observable, covariance, cov_Obs
|
||||
|
||||
|
||||
class Fit_result(Sequence):
|
||||
|
@ -151,14 +151,6 @@ def least_squares(x, y, func, priors=None, silent=False, **kwargs):
|
|||
For details about how the covariance matrix is estimated see `pyerrors.obs.covariance`.
|
||||
In practice the correlation matrix is Cholesky decomposed and inverted (instead of the covariance matrix).
|
||||
This procedure should be numerically more stable as the correlation matrix is typically better conditioned (Jacobi preconditioning).
|
||||
inv_chol_cov_matrix [array,list], optional
|
||||
array: shape = (no of y values) X (no of y values)
|
||||
list: for an uncombined fit: [""]
|
||||
for a combined fit: list of keys belonging to the corr_matrix saved in the array, must be the same as the keys of the y dict in alphabetical order
|
||||
If correlated_fit=True is set as well, can provide an inverse covariance matrix (y errors, dy_f included!) of your own choosing for a correlated fit.
|
||||
The matrix must be a lower triangular matrix constructed from a Cholesky decomposition: The function invert_corr_cov_cholesky(corr, inverrdiag) can be
|
||||
used to construct it from a correlation matrix (corr) and the errors dy_f of the data points (inverrdiag = np.diag(1 / np.asarray(dy_f))). For the correct
|
||||
ordering the correlation matrix (corr) can be sorted via the function sort_corr(corr, kl, yd) where kl is the list of keys and yd the y dict.
|
||||
expected_chisquare : bool
|
||||
If True estimates the expected chisquare which is
|
||||
corrected by effects caused by correlated input data (default False).
|
||||
|
@ -173,66 +165,15 @@ def least_squares(x, y, func, priors=None, silent=False, **kwargs):
|
|||
-------
|
||||
output : Fit_result
|
||||
Parameters and information on the fitted result.
|
||||
Examples
|
||||
------
|
||||
>>> # Example of a correlated (correlated_fit = True, inv_chol_cov_matrix handed over) combined fit, based on a randomly generated data set
|
||||
>>> import numpy as np
|
||||
>>> from scipy.stats import norm
|
||||
>>> from scipy.linalg import cholesky
|
||||
>>> import pyerrors as pe
|
||||
>>> # generating the random data set
|
||||
>>> num_samples = 400
|
||||
>>> N = 3
|
||||
>>> x = np.arange(N)
|
||||
>>> x1 = norm.rvs(size=(N, num_samples)) # generate random numbers
|
||||
>>> x2 = norm.rvs(size=(N, num_samples)) # generate random numbers
|
||||
>>> r = r1 = r2 = np.zeros((N, N))
|
||||
>>> y = {}
|
||||
>>> for i in range(N):
|
||||
>>> for j in range(N):
|
||||
>>> r[i, j] = np.exp(-0.8 * np.fabs(i - j)) # element in correlation matrix
|
||||
>>> errl = np.sqrt([3.4, 2.5, 3.6]) # set y errors
|
||||
>>> for i in range(N):
|
||||
>>> for j in range(N):
|
||||
>>> r[i, j] *= errl[i] * errl[j] # element in covariance matrix
|
||||
>>> c = cholesky(r, lower=True)
|
||||
>>> y = {'a': np.dot(c, x1), 'b': np.dot(c, x2)} # generate y data with the covariance matrix defined
|
||||
>>> # random data set has been generated, now the dictionaries and the inverse covariance matrix to be handed over are built
|
||||
>>> x_dict = {}
|
||||
>>> y_dict = {}
|
||||
>>> chol_inv_dict = {}
|
||||
>>> data = []
|
||||
>>> for key in y.keys():
|
||||
>>> x_dict[key] = x
|
||||
>>> for i in range(N):
|
||||
>>> data.append(pe.Obs([[i + 1 + o for o in y[key][i]]], ['ens'])) # generate y Obs from the y data
|
||||
>>> [o.gamma_method() for o in data]
|
||||
>>> corr = pe.covariance(data, correlation=True)
|
||||
>>> inverrdiag = np.diag(1 / np.asarray([o.dvalue for o in data]))
|
||||
>>> chol_inv = pe.obs.invert_corr_cov_cholesky(corr, inverrdiag) # gives form of the inverse covariance matrix needed for the combined correlated fit below
|
||||
>>> y_dict = {'a': data[:3], 'b': data[3:]}
|
||||
>>> # common fit parameter p[0] in combined fit
|
||||
>>> def fit1(p, x):
|
||||
>>> return p[0] + p[1] * x
|
||||
>>> def fit2(p, x):
|
||||
>>> return p[0] + p[2] * x
|
||||
>>> fitf_dict = {'a': fit1, 'b':fit2}
|
||||
>>> fitp_inv_cov_combined_fit = pe.least_squares(x_dict,y_dict, fitf_dict, correlated_fit = True, inv_chol_cov_matrix = [chol_inv,['a','b']])
|
||||
Fit with 3 parameters
|
||||
Method: Levenberg-Marquardt
|
||||
`ftol` termination condition is satisfied.
|
||||
chisquare/d.o.f.: 0.5388013574561786 # random
|
||||
fit parameters [1.11897846 0.96361162 0.92325319] # random
|
||||
|
||||
'''
|
||||
output = Fit_result()
|
||||
|
||||
if (isinstance(x, dict) and isinstance(y, dict) and isinstance(func, dict)):
|
||||
if (type(x) == dict and type(y) == dict and type(func) == dict):
|
||||
xd = {key: anp.asarray(x[key]) for key in x}
|
||||
yd = y
|
||||
funcd = func
|
||||
output.fit_function = func
|
||||
elif (isinstance(x, dict) or isinstance(y, dict) or isinstance(func, dict)):
|
||||
elif (type(x) == dict or type(y) == dict or type(func) == dict):
|
||||
raise TypeError("All arguments have to be dictionaries in order to perform a combined fit.")
|
||||
else:
|
||||
x = np.asarray(x)
|
||||
|
@ -256,7 +197,7 @@ def least_squares(x, y, func, priors=None, silent=False, **kwargs):
|
|||
if sorted(list(funcd.keys())) != key_ls:
|
||||
raise ValueError('x and func dictionaries do not contain the same keys.')
|
||||
|
||||
x_all = np.concatenate([np.array(xd[key]).transpose() for key in key_ls]).transpose()
|
||||
x_all = np.concatenate([np.array(xd[key]) for key in key_ls])
|
||||
y_all = np.concatenate([np.array(yd[key]) for key in key_ls])
|
||||
|
||||
y_f = [o.value for o in y_all]
|
||||
|
@ -289,12 +230,6 @@ def least_squares(x, y, func, priors=None, silent=False, **kwargs):
|
|||
n_parms_ls.append(n_loc)
|
||||
|
||||
n_parms = max(n_parms_ls)
|
||||
|
||||
if len(key_ls) > 1:
|
||||
for key in key_ls:
|
||||
if np.asarray(yd[key]).shape != funcd[key](np.arange(n_parms), xd[key]).shape:
|
||||
raise ValueError(f"Fit function {key} returns the wrong shape ({funcd[key](np.arange(n_parms), xd[key]).shape} instead of {np.asarray(yd[key]).shape})\nIf the fit function is just a constant you could try adding x*0 to get the correct shape.")
|
||||
|
||||
if not silent:
|
||||
print('Fit with', n_parms, 'parameter' + 's' * (n_parms > 1))
|
||||
|
||||
|
@ -356,21 +291,15 @@ def least_squares(x, y, func, priors=None, silent=False, **kwargs):
|
|||
return anp.sum(general_chisqfunc_uncorr(p, y_f, p_f) ** 2)
|
||||
|
||||
if kwargs.get('correlated_fit') is True:
|
||||
if 'inv_chol_cov_matrix' in kwargs:
|
||||
chol_inv = kwargs.get('inv_chol_cov_matrix')
|
||||
if (chol_inv[0].shape[0] != len(dy_f)):
|
||||
raise TypeError('The number of columns of the inverse covariance matrix handed over needs to be equal to the number of y errors.')
|
||||
if (chol_inv[0].shape[0] != chol_inv[0].shape[1]):
|
||||
raise TypeError('The inverse covariance matrix handed over needs to have the same number of rows as columns.')
|
||||
if (chol_inv[1] != key_ls):
|
||||
raise ValueError('The keys of inverse covariance matrix are not the same or do not appear in the same order as the x and y values.')
|
||||
chol_inv = chol_inv[0]
|
||||
if np.any(np.diag(chol_inv) <= 0) or (not np.all(chol_inv == np.tril(chol_inv))):
|
||||
raise ValueError('The inverse covariance matrix inv_chol_cov_matrix[0] has to be a lower triangular matrix constructed from a Cholesky decomposition.')
|
||||
else:
|
||||
corr = covariance(y_all, correlation=True, **kwargs)
|
||||
inverrdiag = np.diag(1 / np.asarray(dy_f))
|
||||
chol_inv = invert_corr_cov_cholesky(corr, inverrdiag)
|
||||
covdiag = np.diag(1 / np.asarray(dy_f))
|
||||
condn = np.linalg.cond(corr)
|
||||
if condn > 0.1 / np.finfo(float).eps:
|
||||
raise Exception(f"Cannot invert correlation matrix as its condition number exceeds machine precision ({condn:1.2e})")
|
||||
if condn > 1e13:
|
||||
warnings.warn("Correlation matrix may be ill-conditioned, condition number: {%1.2e}" % (condn), RuntimeWarning)
|
||||
chol = np.linalg.cholesky(corr)
|
||||
chol_inv = scipy.linalg.solve_triangular(chol, covdiag, lower=True)
|
||||
|
||||
def general_chisqfunc(p, ivars, pr):
|
||||
model = anp.concatenate([anp.array(funcd[key](p, xd[key])).reshape(-1) for key in key_ls])
|
||||
|
@ -415,6 +344,7 @@ def least_squares(x, y, func, priors=None, silent=False, **kwargs):
|
|||
|
||||
fit_result = scipy.optimize.least_squares(chisqfunc_residuals_uncorr, x0, method='lm', ftol=1e-15, gtol=1e-15, xtol=1e-15)
|
||||
if kwargs.get('correlated_fit') is True:
|
||||
|
||||
def chisqfunc_residuals(p):
|
||||
return general_chisqfunc(p, y_f, p_f)
|
||||
|
||||
|
@ -429,7 +359,7 @@ def least_squares(x, y, func, priors=None, silent=False, **kwargs):
|
|||
raise Exception('The minimization procedure did not converge.')
|
||||
|
||||
output.chisquare = chisquare
|
||||
output.dof = y_all.shape[-1] - n_parms + len(loc_priors)
|
||||
output.dof = x_all.shape[-1] - n_parms + len(loc_priors)
|
||||
output.p_value = 1 - scipy.stats.chi2.cdf(output.chisquare, output.dof)
|
||||
if output.dof > 0:
|
||||
output.chisquare_by_dof = output.chisquare / output.dof
|
||||
|
@ -457,7 +387,7 @@ def least_squares(x, y, func, priors=None, silent=False, **kwargs):
|
|||
hat_vector = prepare_hat_matrix()
|
||||
A = W @ hat_vector
|
||||
P_phi = A @ np.linalg.pinv(A.T @ A) @ A.T
|
||||
expected_chisquare = np.trace((np.identity(y_all.shape[-1]) - P_phi) @ W @ cov @ W)
|
||||
expected_chisquare = np.trace((np.identity(x_all.shape[-1]) - P_phi) @ W @ cov @ W)
|
||||
output.chisquare_by_expected_chisquare = output.chisquare / expected_chisquare
|
||||
if not silent:
|
||||
print('chisquare/expected_chisquare:', output.chisquare_by_expected_chisquare)
|
||||
|
|
|
@ -5,11 +5,11 @@ r'''
|
|||
For comparison with other analysis workflows `pyerrors` can also generate jackknife samples from an `Obs` object or import jackknife samples into an `Obs` object.
|
||||
See `pyerrors.obs.Obs.export_jackknife` and `pyerrors.obs.import_jackknife` for details.
|
||||
'''
|
||||
from . import bdio as bdio
|
||||
from . import dobs as dobs
|
||||
from . import hadrons as hadrons
|
||||
from . import json as json
|
||||
from . import misc as misc
|
||||
from . import openQCD as openQCD
|
||||
from . import pandas as pandas
|
||||
from . import sfcf as sfcf
|
||||
from . import bdio
|
||||
from . import dobs
|
||||
from . import hadrons
|
||||
from . import json
|
||||
from . import misc
|
||||
from . import openQCD
|
||||
from . import pandas
|
||||
from . import sfcf
|
||||
|
|
|
@ -79,7 +79,7 @@ def _dict_to_xmlstring_spaces(d, space=' '):
|
|||
o += space
|
||||
o += li + '\n'
|
||||
if li.startswith('<') and not cm:
|
||||
if '<%s' % ('/') not in li:
|
||||
if not '<%s' % ('/') in li:
|
||||
c += 1
|
||||
cm = False
|
||||
return o
|
||||
|
@ -529,8 +529,7 @@ def import_dobs_string(content, full_output=False, separator_insertion=True):
|
|||
deltas.append(repdeltas)
|
||||
idl.append(repidl)
|
||||
|
||||
obsmeans = [np.average(deltas[j]) for j in range(len(deltas))]
|
||||
res.append(Obs([np.array(deltas[j]) - obsmeans[j] for j in range(len(obsmeans))], obs_names, idl=idl, means=obsmeans))
|
||||
res.append(Obs(deltas, obs_names, idl=idl))
|
||||
res[-1]._value = mean[i]
|
||||
_check(len(e_names) == ne)
|
||||
|
||||
|
@ -672,7 +671,7 @@ def _dobsdict_to_xmlstring_spaces(d, space=' '):
|
|||
o += space
|
||||
o += li + '\n'
|
||||
if li.startswith('<') and not cm:
|
||||
if '<%s' % ('/') not in li:
|
||||
if not '<%s' % ('/') in li:
|
||||
c += 1
|
||||
cm = False
|
||||
return o
|
||||
|
|
|
@ -54,92 +54,6 @@ def _get_files(path, filestem, idl):
|
|||
return filtered_files, idx
|
||||
|
||||
|
||||
def read_hd5(filestem, ens_id, group, attrs=None, idl=None, part="real"):
|
||||
r'''Read hadrons hdf5 file and extract entry based on attributes.
|
||||
|
||||
Parameters
|
||||
-----------------
|
||||
filestem : str
|
||||
Full namestem of the files to read, including the full path.
|
||||
ens_id : str
|
||||
name of the ensemble, required for internal bookkeeping
|
||||
group : str
|
||||
label of the group to be extracted.
|
||||
attrs : dict or int
|
||||
Dictionary containing the attributes. For example
|
||||
```python
|
||||
attrs = {"gamma_snk": "Gamma5",
|
||||
"gamma_src": "Gamma5"}
|
||||
```
|
||||
Alternatively an integer can be specified to identify the sub group.
|
||||
This is discouraged as the order in the file is not guaranteed.
|
||||
idl : range
|
||||
If specified only configurations in the given range are read in.
|
||||
part: str
|
||||
string specifying whether to extract the real part ('real'),
|
||||
the imaginary part ('imag') or a complex correlator ('complex').
|
||||
Default 'real'.
|
||||
|
||||
Returns
|
||||
-------
|
||||
corr : Corr
|
||||
Correlator of the source sink combination in question.
|
||||
'''
|
||||
|
||||
path_obj = Path(filestem)
|
||||
path = path_obj.parent.as_posix()
|
||||
filestem = path_obj.name
|
||||
|
||||
files, idx = _get_files(path, filestem, idl)
|
||||
|
||||
if isinstance(attrs, dict):
|
||||
h5file = h5py.File(path + '/' + files[0], "r")
|
||||
entry = None
|
||||
for key in h5file[group].keys():
|
||||
if attrs.items() <= {k: v[0].decode() for k, v in h5file[group][key].attrs.items()}.items():
|
||||
if entry is None:
|
||||
entry = key
|
||||
else:
|
||||
raise ValueError("More than one fitting entry found. More constraint on attributes needed.")
|
||||
h5file.close()
|
||||
if entry is None:
|
||||
raise ValueError(f"Entry with attributes {attrs} not found.")
|
||||
elif isinstance(attrs, int):
|
||||
entry = group + f"_{attrs}"
|
||||
else:
|
||||
raise TypeError("Invalid type for 'attrs'. Needs to be dict or int.")
|
||||
|
||||
corr_data = []
|
||||
infos = []
|
||||
for hd5_file in files:
|
||||
h5file = h5py.File(path + '/' + hd5_file, "r")
|
||||
if group + '/' + entry not in h5file:
|
||||
raise Exception("Entry '" + entry + "' not contained in the files.")
|
||||
raw_data = h5file[group + '/' + entry + '/corr']
|
||||
real_data = raw_data[:].view("complex")
|
||||
corr_data.append(real_data)
|
||||
if not infos:
|
||||
for k, i in h5file[group + '/' + entry].attrs.items():
|
||||
infos.append(k + ': ' + i[0].decode())
|
||||
h5file.close()
|
||||
corr_data = np.array(corr_data)
|
||||
|
||||
if part == "complex":
|
||||
l_obs = []
|
||||
for c in corr_data.T:
|
||||
l_obs.append(CObs(Obs([c.real], [ens_id], idl=[idx]),
|
||||
Obs([c.imag], [ens_id], idl=[idx])))
|
||||
else:
|
||||
corr_data = getattr(corr_data, part)
|
||||
l_obs = []
|
||||
for c in corr_data.T:
|
||||
l_obs.append(Obs([c], [ens_id], idl=[idx]))
|
||||
|
||||
corr = Corr(l_obs)
|
||||
corr.tag = r", ".join(infos)
|
||||
return corr
|
||||
|
||||
|
||||
def read_meson_hd5(path, filestem, ens_id, meson='meson_0', idl=None, gammas=None):
|
||||
r'''Read hadrons meson hdf5 file and extract the meson labeled 'meson'
|
||||
|
||||
|
@ -156,7 +70,7 @@ def read_meson_hd5(path, filestem, ens_id, meson='meson_0', idl=None, gammas=Non
|
|||
corresponds to the pseudoscalar pseudoscalar two-point function.
|
||||
gammas : tuple of strings
|
||||
Instrad of a meson label one can also provide a tuple of two strings
|
||||
indicating the gamma matrices at sink and source (gamma_snk, gamma_src).
|
||||
indicating the gamma matrices at source and sink.
|
||||
("Gamma5", "Gamma5") corresponds to the pseudoscalar pseudoscalar
|
||||
two-point function. The gammas argument dominateds over meson.
|
||||
idl : range
|
||||
|
@ -167,16 +81,45 @@ def read_meson_hd5(path, filestem, ens_id, meson='meson_0', idl=None, gammas=Non
|
|||
corr : Corr
|
||||
Correlator of the source sink combination in question.
|
||||
'''
|
||||
if gammas is None:
|
||||
attrs = int(meson.rsplit('_', 1)[-1])
|
||||
|
||||
files, idx = _get_files(path, filestem, idl)
|
||||
|
||||
tree = meson.rsplit('_')[0]
|
||||
if gammas is not None:
|
||||
h5file = h5py.File(path + '/' + files[0], "r")
|
||||
found_meson = None
|
||||
for key in h5file[tree].keys():
|
||||
if gammas[0] == h5file[tree][key].attrs["gamma_snk"][0].decode() and h5file[tree][key].attrs["gamma_src"][0].decode() == gammas[1]:
|
||||
found_meson = key
|
||||
break
|
||||
h5file.close()
|
||||
if found_meson:
|
||||
meson = found_meson
|
||||
else:
|
||||
if len(gammas) != 2:
|
||||
raise ValueError("'gammas' needs to have exactly two entries")
|
||||
attrs = {"gamma_snk": gammas[0],
|
||||
"gamma_src": gammas[1]}
|
||||
return read_hd5(filestem=path + "/" + filestem, ens_id=ens_id,
|
||||
group=meson.rsplit('_', 1)[0], attrs=attrs, idl=idl,
|
||||
part="real")
|
||||
raise Exception("Source Sink combination " + str(gammas) + " not found.")
|
||||
|
||||
corr_data = []
|
||||
infos = []
|
||||
for hd5_file in files:
|
||||
h5file = h5py.File(path + '/' + hd5_file, "r")
|
||||
if not tree + '/' + meson in h5file:
|
||||
raise Exception("Entry '" + meson + "' not contained in the files.")
|
||||
raw_data = h5file[tree + '/' + meson + '/corr']
|
||||
real_data = raw_data[:]["re"].astype(np.double)
|
||||
corr_data.append(real_data)
|
||||
if not infos:
|
||||
for k, i in h5file[tree + '/' + meson].attrs.items():
|
||||
infos.append(k + ': ' + i[0].decode())
|
||||
h5file.close()
|
||||
corr_data = np.array(corr_data)
|
||||
|
||||
l_obs = []
|
||||
for c in corr_data.T:
|
||||
l_obs.append(Obs([c], [ens_id], idl=[idx]))
|
||||
|
||||
corr = Corr(l_obs)
|
||||
corr.tag = r", ".join(infos)
|
||||
return corr
|
||||
|
||||
|
||||
def _extract_real_arrays(path, files, tree, keys):
|
||||
|
@ -186,7 +129,7 @@ def _extract_real_arrays(path, files, tree, keys):
|
|||
for hd5_file in files:
|
||||
h5file = h5py.File(path + '/' + hd5_file, "r")
|
||||
for key in keys:
|
||||
if tree + '/' + key not in h5file:
|
||||
if not tree + '/' + key in h5file:
|
||||
raise Exception("Entry '" + key + "' not contained in the files.")
|
||||
raw_data = h5file[tree + '/' + key + '/data']
|
||||
real_data = raw_data[:].astype(np.double)
|
||||
|
|
|
@ -133,11 +133,10 @@ def create_json_string(ol, description='', indent=1):
|
|||
names = []
|
||||
idl = []
|
||||
for key, value in obs.idl.items():
|
||||
samples.append(np.array([np.nan] * len(value)))
|
||||
samples.append([np.nan] * len(value))
|
||||
names.append(key)
|
||||
idl.append(value)
|
||||
my_obs = Obs(samples, names, idl, means=[np.nan for n in names])
|
||||
my_obs._value = np.nan
|
||||
my_obs = Obs(samples, names, idl)
|
||||
my_obs._covobs = obs._covobs
|
||||
for name in obs._covobs:
|
||||
my_obs.names.append(name)
|
||||
|
@ -332,8 +331,7 @@ def _parse_json_dict(json_dict, verbose=True, full_output=False):
|
|||
cd = _gen_covobsd_from_cdatad(o.get('cdata', {}))
|
||||
|
||||
if od:
|
||||
r_offsets = [np.average([ddi[0] for ddi in di]) for di in od['deltas']]
|
||||
ret = Obs([np.array([ddi[0] for ddi in od['deltas'][i]]) - r_offsets[i] for i in range(len(od['deltas']))], od['names'], idl=od['idl'], means=[ro + values[0] for ro in r_offsets])
|
||||
ret = Obs([[ddi[0] + values[0] for ddi in di] for di in od['deltas']], od['names'], idl=od['idl'])
|
||||
ret._value = values[0]
|
||||
else:
|
||||
ret = Obs([], [], means=[])
|
||||
|
@ -358,8 +356,7 @@ def _parse_json_dict(json_dict, verbose=True, full_output=False):
|
|||
taglist = o.get('tag', layout * [None])
|
||||
for i in range(layout):
|
||||
if od:
|
||||
r_offsets = np.array([np.average(di[:, i]) for di in od['deltas']])
|
||||
ret.append(Obs([od['deltas'][j][:, i] - r_offsets[j] for j in range(len(od['deltas']))], od['names'], idl=od['idl'], means=[ro + values[i] for ro in r_offsets]))
|
||||
ret.append(Obs([list(di[:, i] + values[i]) for di in od['deltas']], od['names'], idl=od['idl']))
|
||||
ret[-1]._value = values[i]
|
||||
else:
|
||||
ret.append(Obs([], [], means=[]))
|
||||
|
@ -386,8 +383,7 @@ def _parse_json_dict(json_dict, verbose=True, full_output=False):
|
|||
taglist = o.get('tag', N * [None])
|
||||
for i in range(N):
|
||||
if od:
|
||||
r_offsets = np.array([np.average(di[:, i]) for di in od['deltas']])
|
||||
ret.append(Obs([od['deltas'][j][:, i] - r_offsets[j] for j in range(len(od['deltas']))], od['names'], idl=od['idl'], means=[ro + values[i] for ro in r_offsets]))
|
||||
ret.append(Obs([di[:, i] + values[i] for di in od['deltas']], od['names'], idl=od['idl']))
|
||||
ret[-1]._value = values[i]
|
||||
else:
|
||||
ret.append(Obs([], [], means=[]))
|
||||
|
|
|
@ -47,7 +47,7 @@ def read_rwms(path, prefix, version='2.0', names=None, **kwargs):
|
|||
Reweighting factors read
|
||||
"""
|
||||
known_oqcd_versions = ['1.4', '1.6', '2.0']
|
||||
if version not in known_oqcd_versions:
|
||||
if not (version in known_oqcd_versions):
|
||||
raise Exception('Unknown openQCD version defined!')
|
||||
print("Working with openQCD version " + version)
|
||||
if 'postfix' in kwargs:
|
||||
|
@ -1286,9 +1286,7 @@ def read_ms5_xsf(path, prefix, qc, corr, sep="r", **kwargs):
|
|||
imagsamples[repnum][t].append(corrres[1][t])
|
||||
if 'idl' in kwargs:
|
||||
left_idl = list(left_idl)
|
||||
if expected_idl[repnum] == left_idl:
|
||||
raise ValueError("None of the idls searched for were found in replikum of file " + file)
|
||||
elif len(left_idl) > 0:
|
||||
if len(left_idl) > 0:
|
||||
warnings.warn('Could not find idls ' + str(left_idl) + ' in replikum of file ' + file, UserWarning)
|
||||
repnum += 1
|
||||
s = "Read correlator " + corr + " from " + str(repnum) + " replika with idls" + str(realsamples[0][t])
|
||||
|
|
|
@ -4,13 +4,9 @@ import re
|
|||
import numpy as np # Thinly-wrapped numpy
|
||||
from ..obs import Obs
|
||||
from .utils import sort_names, check_idl
|
||||
import itertools
|
||||
|
||||
|
||||
sep = "/"
|
||||
|
||||
|
||||
def read_sfcf(path, prefix, name, quarks='.*', corr_type="bi", noffset=0, wf=0, wf2=0, version="1.0c", cfg_separator="n", silent=False, **kwargs):
|
||||
def read_sfcf(path, prefix, name, quarks='.*', corr_type='bi', noffset=0, wf=0, wf2=0, version="1.0c", cfg_separator="n", silent=False, **kwargs):
|
||||
"""Read sfcf files from given folder structure.
|
||||
|
||||
Parameters
|
||||
|
@ -69,77 +65,6 @@ def read_sfcf(path, prefix, name, quarks='.*', corr_type="bi", noffset=0, wf=0,
|
|||
list of Observables with length T, observable per timeslice.
|
||||
bb-type correlators have length 1.
|
||||
"""
|
||||
ret = read_sfcf_multi(path, prefix, [name], quarks_list=[quarks], corr_type_list=[corr_type],
|
||||
noffset_list=[noffset], wf_list=[wf], wf2_list=[wf2], version=version,
|
||||
cfg_separator=cfg_separator, silent=silent, **kwargs)
|
||||
return ret[name][quarks][str(noffset)][str(wf)][str(wf2)]
|
||||
|
||||
|
||||
def read_sfcf_multi(path, prefix, name_list, quarks_list=['.*'], corr_type_list=['bi'], noffset_list=[0], wf_list=[0], wf2_list=[0], version="1.0c", cfg_separator="n", silent=False, keyed_out=False, **kwargs):
|
||||
"""Read sfcf files from given folder structure.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
path : str
|
||||
Path to the sfcf files.
|
||||
prefix : str
|
||||
Prefix of the sfcf files.
|
||||
name : str
|
||||
Name of the correlation function to read.
|
||||
quarks_list : list[str]
|
||||
Label of the quarks used in the sfcf input file. e.g. "quark quark"
|
||||
for version 0.0 this does NOT need to be given with the typical " - "
|
||||
that is present in the output file,
|
||||
this is done automatically for this version
|
||||
corr_type_list : list[str]
|
||||
Type of correlation function to read. Can be
|
||||
- 'bi' for boundary-inner
|
||||
- 'bb' for boundary-boundary
|
||||
- 'bib' for boundary-inner-boundary
|
||||
noffset_list : list[int]
|
||||
Offset of the source (only relevant when wavefunctions are used)
|
||||
wf_list : int
|
||||
ID of wave function
|
||||
wf2_list : list[int]
|
||||
ID of the second wavefunction
|
||||
(only relevant for boundary-to-boundary correlation functions)
|
||||
im : bool
|
||||
if True, read imaginary instead of real part
|
||||
of the correlation function.
|
||||
names : list
|
||||
Alternative labeling for replicas/ensembles.
|
||||
Has to have the appropriate length
|
||||
ens_name : str
|
||||
replaces the name of the ensemble
|
||||
version: str
|
||||
version of SFCF, with which the measurement was done.
|
||||
if the compact output option (-c) was specified,
|
||||
append a "c" to the version (e.g. "1.0c")
|
||||
if the append output option (-a) was specified,
|
||||
append an "a" to the version
|
||||
cfg_separator : str
|
||||
String that separates the ensemble identifier from the configuration number (default 'n').
|
||||
replica: list
|
||||
list of replica to be read, default is all
|
||||
files: list[list[int]]
|
||||
list of files to be read per replica, default is all.
|
||||
for non-compact output format, hand the folders to be read here.
|
||||
check_configs: list[list[int]]
|
||||
list of list of supposed configs, eg. [range(1,1000)]
|
||||
for one replicum with 1000 configs
|
||||
rep_string: str
|
||||
Separator of ensemble name and replicum. Example: In "ensAr0", "r" would be the separator string.
|
||||
Returns
|
||||
-------
|
||||
result: dict[list[Obs]]
|
||||
dict with one of the following properties:
|
||||
if keyed_out:
|
||||
dict[key] = list[Obs]
|
||||
where key has the form name/quarks/offset/wf/wf2
|
||||
if not keyed_out:
|
||||
dict[name][quarks][offset][wf][wf2] = list[Obs]
|
||||
"""
|
||||
|
||||
if kwargs.get('im'):
|
||||
im = 1
|
||||
part = 'imaginary'
|
||||
|
@ -147,6 +72,16 @@ def read_sfcf_multi(path, prefix, name_list, quarks_list=['.*'], corr_type_list=
|
|||
im = 0
|
||||
part = 'real'
|
||||
|
||||
if corr_type == 'bb':
|
||||
b2b = True
|
||||
single = True
|
||||
elif corr_type == 'bib':
|
||||
b2b = True
|
||||
single = False
|
||||
else:
|
||||
b2b = False
|
||||
single = False
|
||||
|
||||
known_versions = ["0.0", "1.0", "2.0", "1.0c", "2.0c", "1.0a", "2.0a"]
|
||||
|
||||
if version not in known_versions:
|
||||
|
@ -185,10 +120,8 @@ def read_sfcf_multi(path, prefix, name_list, quarks_list=['.*'], corr_type_list=
|
|||
|
||||
else:
|
||||
replica = len([file.split(".")[-1] for file in ls]) // len(set([file.split(".")[-1] for file in ls]))
|
||||
if replica == 0:
|
||||
raise Exception('No replica found in directory')
|
||||
if not silent:
|
||||
print('Read', part, 'part of', name_list, 'from', prefix[:-1], ',', replica, 'replica')
|
||||
print('Read', part, 'part of', name, 'from', prefix[:-1], ',', replica, 'replica')
|
||||
|
||||
if 'names' in kwargs:
|
||||
new_names = kwargs.get('names')
|
||||
|
@ -200,66 +133,17 @@ def read_sfcf_multi(path, prefix, name_list, quarks_list=['.*'], corr_type_list=
|
|||
else:
|
||||
ens_name = kwargs.get("ens_name")
|
||||
if not appended:
|
||||
new_names = _get_rep_names(ls, ens_name, rep_sep=(kwargs.get('rep_string', 'r')))
|
||||
new_names = _get_rep_names(ls, ens_name)
|
||||
else:
|
||||
new_names = _get_appended_rep_names(ls, prefix, name_list[0], ens_name, rep_sep=(kwargs.get('rep_string', 'r')))
|
||||
new_names = _get_appended_rep_names(ls, prefix, name, ens_name)
|
||||
new_names = sort_names(new_names)
|
||||
|
||||
idl = []
|
||||
|
||||
noffset_list = [str(x) for x in noffset_list]
|
||||
wf_list = [str(x) for x in wf_list]
|
||||
wf2_list = [str(x) for x in wf2_list]
|
||||
|
||||
# setup dict structures
|
||||
intern = {}
|
||||
for name, corr_type in zip(name_list, corr_type_list):
|
||||
intern[name] = {}
|
||||
b2b, single = _extract_corr_type(corr_type)
|
||||
intern[name]["b2b"] = b2b
|
||||
intern[name]["single"] = single
|
||||
intern[name]["spec"] = {}
|
||||
for quarks in quarks_list:
|
||||
intern[name]["spec"][quarks] = {}
|
||||
for off in noffset_list:
|
||||
intern[name]["spec"][quarks][off] = {}
|
||||
for w in wf_list:
|
||||
intern[name]["spec"][quarks][off][w] = {}
|
||||
if b2b:
|
||||
for w2 in wf2_list:
|
||||
intern[name]["spec"][quarks][off][w][w2] = {}
|
||||
intern[name]["spec"][quarks][off][w][w2]["pattern"] = _make_pattern(version, name, off, w, w2, intern[name]['b2b'], quarks)
|
||||
else:
|
||||
intern[name]["spec"][quarks][off][w]["0"] = {}
|
||||
intern[name]["spec"][quarks][off][w]["0"]["pattern"] = _make_pattern(version, name, off, w, 0, intern[name]['b2b'], quarks)
|
||||
|
||||
internal_ret_dict = {}
|
||||
needed_keys = []
|
||||
for name, corr_type in zip(name_list, corr_type_list):
|
||||
b2b, single = _extract_corr_type(corr_type)
|
||||
if b2b:
|
||||
needed_keys.extend(_lists2key([name], quarks_list, noffset_list, wf_list, wf2_list))
|
||||
else:
|
||||
needed_keys.extend(_lists2key([name], quarks_list, noffset_list, wf_list, ["0"]))
|
||||
|
||||
for key in needed_keys:
|
||||
internal_ret_dict[key] = []
|
||||
|
||||
if not appended:
|
||||
for i, item in enumerate(ls):
|
||||
rep_path = path + '/' + item
|
||||
if "files" in kwargs:
|
||||
files = kwargs.get("files")
|
||||
if isinstance(files, list):
|
||||
if all(isinstance(f, list) for f in files):
|
||||
files = files[i]
|
||||
elif all(isinstance(f, str) for f in files):
|
||||
files = files
|
||||
else:
|
||||
raise TypeError("files has to be of type list[list[str]] or list[str]!")
|
||||
else:
|
||||
raise TypeError("files has to be of type list[list[str]] or list[str]!")
|
||||
|
||||
else:
|
||||
files = []
|
||||
sub_ls = _find_files(rep_path, prefix, compact, files)
|
||||
|
@ -272,7 +156,7 @@ def read_sfcf_multi(path, prefix, name_list, quarks_list=['.*'], corr_type_list=
|
|||
else:
|
||||
rep_idl.append(int(cfg[3:]))
|
||||
except Exception:
|
||||
raise Exception("Couldn't parse idl from directory, problem with file " + cfg)
|
||||
raise Exception("Couldn't parse idl from directroy, problem with file " + cfg)
|
||||
rep_idl.sort()
|
||||
# maybe there is a better way to print the idls
|
||||
if not silent:
|
||||
|
@ -280,89 +164,67 @@ def read_sfcf_multi(path, prefix, name_list, quarks_list=['.*'], corr_type_list=
|
|||
idl.append(rep_idl)
|
||||
# here we have found all the files we need to look into.
|
||||
if i == 0:
|
||||
if version != "0.0" and compact:
|
||||
file = path + '/' + item + '/' + sub_ls[0]
|
||||
for name_index, name in enumerate(name_list):
|
||||
if version == "0.0" or not compact:
|
||||
file = path + '/' + item + '/' + sub_ls[0] + '/' + name
|
||||
if corr_type_list[name_index] == 'bi':
|
||||
name_keys = _lists2key(quarks_list, noffset_list, wf_list, ["0"])
|
||||
else:
|
||||
name_keys = _lists2key(quarks_list, noffset_list, wf_list, wf2_list)
|
||||
for key in name_keys:
|
||||
specs = _key2specs(key)
|
||||
quarks = specs[0]
|
||||
off = specs[1]
|
||||
w = specs[2]
|
||||
w2 = specs[3]
|
||||
# here, we want to find the place within the file,
|
||||
# where the correlator we need is stored.
|
||||
# to do so, the pattern needed is put together
|
||||
# from the input values
|
||||
start_read, T = _find_correlator(file, version, intern[name]["spec"][quarks][str(off)][str(w)][str(w2)]["pattern"], intern[name]['b2b'], silent=silent)
|
||||
intern[name]["spec"][quarks][str(off)][str(w)][str(w2)]["start"] = start_read
|
||||
intern[name]["T"] = T
|
||||
if version == "0.0":
|
||||
file = path + '/' + item + '/' + sub_ls[0] + '/' + name
|
||||
else:
|
||||
if compact:
|
||||
file = path + '/' + item + '/' + sub_ls[0]
|
||||
else:
|
||||
file = path + '/' + item + '/' + sub_ls[0] + '/' + name
|
||||
|
||||
pattern = _make_pattern(version, name, noffset, wf, wf2, b2b, quarks)
|
||||
start_read, T = _find_correlator(file, version, pattern, b2b, silent=silent)
|
||||
|
||||
# preparing the datastructure
|
||||
# the correlators get parsed into...
|
||||
deltas = []
|
||||
for j in range(intern[name]["T"]):
|
||||
for j in range(T):
|
||||
deltas.append([])
|
||||
internal_ret_dict[sep.join([name, key])] = deltas
|
||||
|
||||
if compact:
|
||||
rep_deltas = _read_compact_rep(path, item, sub_ls, intern, needed_keys, im)
|
||||
for key in needed_keys:
|
||||
name = _key2specs(key)[0]
|
||||
for t in range(intern[name]["T"]):
|
||||
internal_ret_dict[key][t].append(rep_deltas[key][t])
|
||||
rep_deltas = _read_compact_rep(path, item, sub_ls, start_read, T, b2b, name, im)
|
||||
|
||||
for t in range(T):
|
||||
deltas[t].append(rep_deltas[t])
|
||||
else:
|
||||
for key in needed_keys:
|
||||
rep_data = []
|
||||
name = _key2specs(key)[0]
|
||||
for subitem in sub_ls:
|
||||
cfg_path = path + '/' + item + '/' + subitem
|
||||
file_data = _read_o_file(cfg_path, name, needed_keys, intern, version, im)
|
||||
rep_data.append(file_data)
|
||||
for t in range(intern[name]["T"]):
|
||||
internal_ret_dict[key][t].append([])
|
||||
for cfg in range(no_cfg):
|
||||
internal_ret_dict[key][t][i].append(rep_data[cfg][key][t])
|
||||
for t in range(T):
|
||||
deltas[t].append(np.zeros(no_cfg))
|
||||
for cnfg, subitem in enumerate(sub_ls):
|
||||
with open(path + '/' + item + '/' + subitem + '/' + name) as fp:
|
||||
for k, line in enumerate(fp):
|
||||
if (k >= start_read and k < start_read + T):
|
||||
floats = list(map(float, line.split()))
|
||||
if version == "0.0":
|
||||
deltas[k - start_read][i][cnfg] = floats[im - single]
|
||||
else:
|
||||
deltas[k - start_read][i][cnfg] = floats[1 + im - single]
|
||||
|
||||
else:
|
||||
for key in needed_keys:
|
||||
specs = _key2specs(key)
|
||||
name = specs[0]
|
||||
quarks = specs[1]
|
||||
off = specs[2]
|
||||
w = specs[3]
|
||||
w2 = specs[4]
|
||||
if "files" in kwargs:
|
||||
if isinstance(kwargs.get("files"), list) and all(isinstance(f, str) for f in kwargs.get("files")):
|
||||
name_ls = kwargs.get("files")
|
||||
ls = kwargs.get("files")
|
||||
else:
|
||||
raise TypeError("In append mode, files has to be of type list[str]!")
|
||||
else:
|
||||
name_ls = ls
|
||||
for exc in name_ls:
|
||||
for exc in ls:
|
||||
if not fnmatch.fnmatch(exc, prefix + '*.' + name):
|
||||
name_ls = list(set(name_ls) - set([exc]))
|
||||
name_ls = sort_names(name_ls)
|
||||
pattern = intern[name]['spec'][quarks][off][w][w2]['pattern']
|
||||
ls = list(set(ls) - set([exc]))
|
||||
ls = sort_names(ls)
|
||||
pattern = _make_pattern(version, name, noffset, wf, wf2, b2b, quarks)
|
||||
deltas = []
|
||||
for rep, file in enumerate(name_ls):
|
||||
for rep, file in enumerate(ls):
|
||||
rep_idl = []
|
||||
filename = path + '/' + file
|
||||
T, rep_idl, rep_data = _read_append_rep(filename, pattern, intern[name]['b2b'], cfg_separator, im, intern[name]['single'])
|
||||
T, rep_idl, rep_data = _read_append_rep(filename, pattern, b2b, cfg_separator, im, single)
|
||||
if rep == 0:
|
||||
intern[name]['T'] = T
|
||||
for t in range(intern[name]['T']):
|
||||
for t in range(T):
|
||||
deltas.append([])
|
||||
for t in range(intern[name]['T']):
|
||||
for t in range(T):
|
||||
deltas[t].append(rep_data[t])
|
||||
internal_ret_dict[key] = deltas
|
||||
if name == name_list[0]:
|
||||
idl.append(rep_idl)
|
||||
|
||||
if kwargs.get("check_configs") is True:
|
||||
if "check_configs" in kwargs:
|
||||
if not silent:
|
||||
print("Checking for missing configs...")
|
||||
che = kwargs.get("check_configs")
|
||||
|
@ -374,91 +236,10 @@ def read_sfcf_multi(path, prefix, name_list, quarks_list=['.*'], corr_type_list=
|
|||
check_idl(idl[r], che[r])
|
||||
if not silent:
|
||||
print("Done")
|
||||
|
||||
result_dict = {}
|
||||
if keyed_out:
|
||||
for key in needed_keys:
|
||||
name = _key2specs(key)[0]
|
||||
result = []
|
||||
for t in range(intern[name]["T"]):
|
||||
result.append(Obs(internal_ret_dict[key][t], new_names, idl=idl))
|
||||
result_dict[key] = result
|
||||
else:
|
||||
for name, corr_type in zip(name_list, corr_type_list):
|
||||
result_dict[name] = {}
|
||||
for quarks in quarks_list:
|
||||
result_dict[name][quarks] = {}
|
||||
for off in noffset_list:
|
||||
result_dict[name][quarks][off] = {}
|
||||
for w in wf_list:
|
||||
result_dict[name][quarks][off][w] = {}
|
||||
if corr_type != 'bi':
|
||||
for w2 in wf2_list:
|
||||
key = _specs2key(name, quarks, off, w, w2)
|
||||
result = []
|
||||
for t in range(intern[name]["T"]):
|
||||
result.append(Obs(internal_ret_dict[key][t], new_names, idl=idl))
|
||||
result_dict[name][quarks][str(off)][str(w)][str(w2)] = result
|
||||
else:
|
||||
key = _specs2key(name, quarks, off, w, "0")
|
||||
result = []
|
||||
for t in range(intern[name]["T"]):
|
||||
result.append(Obs(internal_ret_dict[key][t], new_names, idl=idl))
|
||||
result_dict[name][quarks][str(off)][str(w)][str(0)] = result
|
||||
return result_dict
|
||||
|
||||
|
||||
def _lists2key(*lists):
|
||||
keys = []
|
||||
for tup in itertools.product(*lists):
|
||||
keys.append(sep.join(tup))
|
||||
return keys
|
||||
|
||||
|
||||
def _key2specs(key):
|
||||
return key.split(sep)
|
||||
|
||||
|
||||
def _specs2key(*specs):
|
||||
return sep.join(specs)
|
||||
|
||||
|
||||
def _read_o_file(cfg_path, name, needed_keys, intern, version, im):
|
||||
return_vals = {}
|
||||
for key in needed_keys:
|
||||
file = cfg_path + '/' + name
|
||||
specs = _key2specs(key)
|
||||
if specs[0] == name:
|
||||
with open(file) as fp:
|
||||
lines = fp.readlines()
|
||||
quarks = specs[1]
|
||||
off = specs[2]
|
||||
w = specs[3]
|
||||
w2 = specs[4]
|
||||
T = intern[name]["T"]
|
||||
start_read = intern[name]["spec"][quarks][off][w][w2]["start"]
|
||||
deltas = []
|
||||
for line in lines[start_read:start_read + T]:
|
||||
floats = list(map(float, line.split()))
|
||||
if version == "0.0":
|
||||
deltas.append(floats[im - intern[name]["single"]])
|
||||
else:
|
||||
deltas.append(floats[1 + im - intern[name]["single"]])
|
||||
return_vals[key] = deltas
|
||||
return return_vals
|
||||
|
||||
|
||||
def _extract_corr_type(corr_type):
|
||||
if corr_type == 'bb':
|
||||
b2b = True
|
||||
single = True
|
||||
elif corr_type == 'bib':
|
||||
b2b = True
|
||||
single = False
|
||||
else:
|
||||
b2b = False
|
||||
single = False
|
||||
return b2b, single
|
||||
for t in range(T):
|
||||
result.append(Obs(deltas[t], new_names, idl=idl))
|
||||
return result
|
||||
|
||||
|
||||
def _find_files(rep_path, prefix, compact, files=[]):
|
||||
|
@ -505,9 +286,9 @@ def _make_pattern(version, name, noffset, wf, wf2, b2b, quarks):
|
|||
def _find_correlator(file_name, version, pattern, b2b, silent=False):
|
||||
T = 0
|
||||
|
||||
with open(file_name, "r") as my_file:
|
||||
file = open(file_name, "r")
|
||||
|
||||
content = my_file.read()
|
||||
content = file.read()
|
||||
match = re.search(pattern, content)
|
||||
if match:
|
||||
if version == "0.0":
|
||||
|
@ -523,62 +304,45 @@ def _find_correlator(file_name, version, pattern, b2b, silent=False):
|
|||
print(T, 'entries, starting to read in line', start_read)
|
||||
|
||||
else:
|
||||
file.close()
|
||||
raise ValueError('Correlator with pattern\n' + pattern + '\nnot found.')
|
||||
|
||||
file.close()
|
||||
return start_read, T
|
||||
|
||||
|
||||
def _read_compact_file(rep_path, cfg_file, intern, needed_keys, im):
|
||||
return_vals = {}
|
||||
with open(rep_path + cfg_file) as fp:
|
||||
def _read_compact_file(rep_path, config_file, start_read, T, b2b, name, im):
|
||||
with open(rep_path + config_file) as fp:
|
||||
lines = fp.readlines()
|
||||
for key in needed_keys:
|
||||
keys = _key2specs(key)
|
||||
name = keys[0]
|
||||
quarks = keys[1]
|
||||
off = keys[2]
|
||||
w = keys[3]
|
||||
w2 = keys[4]
|
||||
|
||||
T = intern[name]["T"]
|
||||
start_read = intern[name]["spec"][quarks][off][w][w2]["start"]
|
||||
# check, if the correlator is in fact
|
||||
# printed completely
|
||||
if (start_read + T + 1 > len(lines)):
|
||||
raise Exception("EOF before end of correlator data! Maybe " + rep_path + cfg_file + " is corrupted?")
|
||||
raise Exception("EOF before end of correlator data! Maybe " + rep_path + config_file + " is corrupted?")
|
||||
corr_lines = lines[start_read - 6: start_read + T]
|
||||
del lines
|
||||
t_vals = []
|
||||
|
||||
if corr_lines[1 - intern[name]["b2b"]].strip() != 'name ' + name:
|
||||
raise Exception('Wrong format in file', cfg_file)
|
||||
if corr_lines[1 - b2b].strip() != 'name ' + name:
|
||||
raise Exception('Wrong format in file', config_file)
|
||||
|
||||
for k in range(6, T + 6):
|
||||
floats = list(map(float, corr_lines[k].split()))
|
||||
t_vals.append(floats[-2:][im])
|
||||
return_vals[key] = t_vals
|
||||
return return_vals
|
||||
return t_vals
|
||||
|
||||
|
||||
def _read_compact_rep(path, rep, sub_ls, intern, needed_keys, im):
|
||||
def _read_compact_rep(path, rep, sub_ls, start_read, T, b2b, name, im):
|
||||
rep_path = path + '/' + rep + '/'
|
||||
no_cfg = len(sub_ls)
|
||||
|
||||
return_vals = {}
|
||||
for key in needed_keys:
|
||||
name = _key2specs(key)[0]
|
||||
deltas = []
|
||||
for t in range(intern[name]["T"]):
|
||||
for t in range(T):
|
||||
deltas.append(np.zeros(no_cfg))
|
||||
return_vals[key] = deltas
|
||||
|
||||
for cfg in range(no_cfg):
|
||||
cfg_file = sub_ls[cfg]
|
||||
cfg_data = _read_compact_file(rep_path, cfg_file, intern, needed_keys, im)
|
||||
for key in needed_keys:
|
||||
name = _key2specs(key)[0]
|
||||
for t in range(intern[name]["T"]):
|
||||
return_vals[key][t][cfg] = cfg_data[key][t]
|
||||
return return_vals
|
||||
cfg_data = _read_compact_file(rep_path, cfg_file, start_read, T, b2b, name, im)
|
||||
for t in range(T):
|
||||
deltas[t][cfg] = cfg_data[t]
|
||||
return deltas
|
||||
|
||||
|
||||
def _read_chunk(chunk, gauge_line, cfg_sep, start_read, T, corr_line, b2b, pattern, im, single):
|
||||
|
@ -647,22 +411,22 @@ def _read_append_rep(filename, pattern, b2b, cfg_separator, im, single):
|
|||
return T, rep_idl, data
|
||||
|
||||
|
||||
def _get_rep_names(ls, ens_name=None, rep_sep='r'):
|
||||
def _get_rep_names(ls, ens_name=None):
|
||||
new_names = []
|
||||
for entry in ls:
|
||||
try:
|
||||
idx = entry.index(rep_sep)
|
||||
idx = entry.index('r')
|
||||
except Exception:
|
||||
raise Exception("Automatic recognition of replicum failed, please enter the key word 'names'.")
|
||||
|
||||
if ens_name:
|
||||
new_names.append(ens_name + '|' + entry[idx:])
|
||||
new_names.append('ens_name' + '|' + entry[idx:])
|
||||
else:
|
||||
new_names.append(entry[:idx] + '|' + entry[idx:])
|
||||
return new_names
|
||||
|
||||
|
||||
def _get_appended_rep_names(ls, prefix, name, ens_name=None, rep_sep='r'):
|
||||
def _get_appended_rep_names(ls, prefix, name, ens_name=None):
|
||||
new_names = []
|
||||
for exc in ls:
|
||||
if not fnmatch.fnmatch(exc, prefix + '*.' + name):
|
||||
|
@ -671,12 +435,12 @@ def _get_appended_rep_names(ls, prefix, name, ens_name=None, rep_sep='r'):
|
|||
for entry in ls:
|
||||
myentry = entry[:-len(name) - 1]
|
||||
try:
|
||||
idx = myentry.index(rep_sep)
|
||||
idx = myentry.index('r')
|
||||
except Exception:
|
||||
raise Exception("Automatic recognition of replicum failed, please enter the key word 'names'.")
|
||||
|
||||
if ens_name:
|
||||
new_names.append(ens_name + '|' + entry[idx:])
|
||||
new_names.append('ens_name' + '|' + entry[idx:])
|
||||
else:
|
||||
new_names.append(myentry[:idx] + '|' + myentry[idx:])
|
||||
return new_names
|
||||
|
|
|
@ -1,8 +1,5 @@
|
|||
"""Utilities for the input"""
|
||||
|
||||
import re
|
||||
import fnmatch
|
||||
import os
|
||||
"""Utilities for the input"""
|
||||
|
||||
|
||||
def sort_names(ll):
|
||||
|
@ -20,7 +17,6 @@ def sort_names(ll):
|
|||
ll: list
|
||||
sorted list
|
||||
"""
|
||||
|
||||
if len(ll) > 1:
|
||||
sorted = False
|
||||
r_pattern = r'r(\d+)'
|
||||
|
@ -67,7 +63,6 @@ def check_idl(idl, che):
|
|||
miss_str : str
|
||||
string with integers of which idls are missing
|
||||
"""
|
||||
|
||||
missing = []
|
||||
for c in che:
|
||||
if c not in idl:
|
||||
|
@ -80,65 +75,3 @@ def check_idl(idl, che):
|
|||
miss_str += "," + str(i)
|
||||
print(miss_str)
|
||||
return miss_str
|
||||
|
||||
|
||||
def check_params(path, param_hash, prefix, param_prefix="parameters_"):
|
||||
"""
|
||||
Check if, for sfcf, the parameter hashes at the end of the parameter files are in fact the expected one.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
path: str
|
||||
measurement path, same as for sfcf read method
|
||||
param_hash: str
|
||||
expected parameter hash
|
||||
prefix: str
|
||||
data prefix to find the appropriate replicum folders in path
|
||||
param_prefix: str
|
||||
prefix of the parameter file. Defaults to 'parameters_'
|
||||
|
||||
Returns
|
||||
-------
|
||||
nums: dict
|
||||
dictionary of faulty parameter files sorted by the replica paths
|
||||
"""
|
||||
|
||||
ls = []
|
||||
for (dirpath, dirnames, filenames) in os.walk(path):
|
||||
ls.extend(dirnames)
|
||||
break
|
||||
if not ls:
|
||||
raise Exception('Error, directory not found')
|
||||
# Exclude folders with different names
|
||||
for exc in ls:
|
||||
if not fnmatch.fnmatch(exc, prefix + '*'):
|
||||
ls = list(set(ls) - set([exc]))
|
||||
|
||||
ls = sort_names(ls)
|
||||
nums = {}
|
||||
for rep in ls:
|
||||
rep_path = path + '/' + rep
|
||||
# files of replicum
|
||||
sub_ls = []
|
||||
for (dirpath, dirnames, filenames) in os.walk(rep_path):
|
||||
sub_ls.extend(filenames)
|
||||
|
||||
# filter
|
||||
param_files = []
|
||||
for file in sub_ls:
|
||||
if fnmatch.fnmatch(file, param_prefix + '*'):
|
||||
param_files.append(file)
|
||||
|
||||
rep_nums = ''
|
||||
for file in param_files:
|
||||
with open(rep_path + '/' + file) as fp:
|
||||
for line in fp:
|
||||
pass
|
||||
last_line = line
|
||||
if last_line.split()[2] != param_hash:
|
||||
rep_nums += file.split("_")[1] + ','
|
||||
nums[rep_path] = rep_nums
|
||||
|
||||
if not len(rep_nums) == 0:
|
||||
raise Warning("found differing parameter hash in the param files in " + rep_path)
|
||||
return nums
|
||||
|
|
|
@ -1,87 +0,0 @@
|
|||
import numpy as np
|
||||
from .obs import derived_observable, Obs
|
||||
from autograd import jacobian
|
||||
from scipy.integrate import quad as squad
|
||||
|
||||
|
||||
def quad(func, p, a, b, **kwargs):
|
||||
'''Performs a (one-dimensional) numeric integration of f(p, x) from a to b.
|
||||
|
||||
The integration is performed using scipy.integrate.quad().
|
||||
All parameters that can be passed to scipy.integrate.quad may also be passed to this function.
|
||||
The output is the same as for scipy.integrate.quad, the first element being an Obs.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
func : object
|
||||
function to integrate, has to be of the form
|
||||
|
||||
```python
|
||||
import autograd.numpy as anp
|
||||
|
||||
def func(p, x):
|
||||
return p[0] + p[1] * x + p[2] * anp.sinh(x)
|
||||
```
|
||||
where x is the integration variable.
|
||||
p : list of floats or Obs
|
||||
parameters of the function func.
|
||||
a: float or Obs
|
||||
Lower limit of integration (use -numpy.inf for -infinity).
|
||||
b: float or Obs
|
||||
Upper limit of integration (use -numpy.inf for -infinity).
|
||||
All parameters of scipy.integrate.quad
|
||||
|
||||
Returns
|
||||
-------
|
||||
y : Obs
|
||||
The integral of func from `a` to `b`.
|
||||
abserr : float
|
||||
An estimate of the absolute error in the result.
|
||||
infodict : dict
|
||||
A dictionary containing additional information.
|
||||
Run scipy.integrate.quad_explain() for more information.
|
||||
message
|
||||
A convergence message.
|
||||
explain
|
||||
Appended only with 'cos' or 'sin' weighting and infinite
|
||||
integration limits, it contains an explanation of the codes in
|
||||
infodict['ierlst']
|
||||
'''
|
||||
|
||||
Np = len(p)
|
||||
isobs = [True if isinstance(pi, Obs) else False for pi in p]
|
||||
pval = np.array([p[i].value if isobs[i] else p[i] for i in range(Np)],)
|
||||
pobs = [p[i] for i in range(Np) if isobs[i]]
|
||||
|
||||
bounds = [a, b]
|
||||
isobs_b = [True if isinstance(bi, Obs) else False for bi in bounds]
|
||||
bval = np.array([bounds[i].value if isobs_b[i] else bounds[i] for i in range(2)])
|
||||
bobs = [bounds[i] for i in range(2) if isobs_b[i]]
|
||||
bsign = [-1, 1]
|
||||
|
||||
ifunc = np.vectorize(lambda x: func(pval, x))
|
||||
|
||||
intpars = squad.__code__.co_varnames[3:3 + len(squad.__defaults__)]
|
||||
ikwargs = {k: kwargs[k] for k in intpars if k in kwargs}
|
||||
|
||||
integration_result = squad(ifunc, bval[0], bval[1], **ikwargs)
|
||||
val = integration_result[0]
|
||||
|
||||
jac = jacobian(func)
|
||||
|
||||
derivint = []
|
||||
for i in range(Np):
|
||||
if isobs[i]:
|
||||
ifunc = np.vectorize(lambda x: jac(pval, x)[i])
|
||||
derivint.append(squad(ifunc, bounds[0], bounds[1], **ikwargs)[0])
|
||||
|
||||
for i in range(2):
|
||||
if isobs_b[i]:
|
||||
derivint.append(bsign[i] * func(pval, bval[i]))
|
||||
|
||||
if len(derivint) == 0:
|
||||
return integration_result
|
||||
|
||||
res = derived_observable(lambda x, **kwargs: 0 * (x[0] + np.finfo(np.float64).eps) * (pval[0] + np.finfo(np.float64).eps) + val, pobs + bobs, man_grad=derivint)
|
||||
|
||||
return (res, *integration_result[1:])
|
|
@ -271,12 +271,6 @@ def eig(obs, **kwargs):
|
|||
return w
|
||||
|
||||
|
||||
def eigv(obs, **kwargs):
|
||||
"""Computes the eigenvectors of a given hermitian matrix of Obs according to np.linalg.eigh."""
|
||||
v = derived_observable(lambda x, **kwargs: anp.linalg.eigh(x)[1], obs)
|
||||
return v
|
||||
|
||||
|
||||
def pinv(obs, **kwargs):
|
||||
"""Computes the Moore-Penrose pseudoinverse of a matrix of Obs."""
|
||||
return derived_observable(lambda x, **kwargs: anp.linalg.pinv(x), obs)
|
||||
|
|
|
@ -20,7 +20,7 @@ def print_config():
|
|||
"pandas": pd.__version__}
|
||||
|
||||
for key, value in config.items():
|
||||
print(f"{key: <10}\t {value}")
|
||||
print(f"{key : <10}\t {value}")
|
||||
|
||||
|
||||
def errorbar(x, y, axes=plt, **kwargs):
|
||||
|
|
293
pyerrors/obs.py
293
pyerrors/obs.py
|
@ -3,7 +3,6 @@ import hashlib
|
|||
import pickle
|
||||
import numpy as np
|
||||
import autograd.numpy as anp # Thinly-wrapped numpy
|
||||
import scipy
|
||||
from autograd import jacobian
|
||||
import matplotlib.pyplot as plt
|
||||
from scipy.stats import skew, skewtest, kurtosis, kurtosistest
|
||||
|
@ -82,8 +81,6 @@ class Obs:
|
|||
raise ValueError('Names are not unique.')
|
||||
if not all(isinstance(x, str) for x in names):
|
||||
raise TypeError('All names have to be strings.')
|
||||
if len(set([o.split('|')[0] for o in names])) > 1:
|
||||
raise ValueError('Cannot initialize Obs based on multiple ensembles. Please average separate Obs from each ensemble.')
|
||||
else:
|
||||
if not isinstance(names[0], str):
|
||||
raise TypeError('All names have to be strings.')
|
||||
|
@ -106,9 +103,7 @@ class Obs:
|
|||
elif isinstance(idx, (list, np.ndarray)):
|
||||
dc = np.unique(np.diff(idx))
|
||||
if np.any(dc < 0):
|
||||
raise ValueError("Unsorted idx for idl[%s] at position %s" % (name, ' '.join(['%s' % (pos + 1) for pos in np.where(np.diff(idx) < 0)[0]])))
|
||||
elif np.any(dc == 0):
|
||||
raise ValueError("Duplicate entries in idx for idl[%s] at position %s" % (name, ' '.join(['%s' % (pos + 1) for pos in np.where(np.diff(idx) == 0)[0]])))
|
||||
raise ValueError("Unsorted idx for idl[%s]" % (name))
|
||||
if len(dc) == 1:
|
||||
self.idl[name] = range(idx[0], idx[-1] + dc[0], dc[0])
|
||||
else:
|
||||
|
@ -224,7 +219,7 @@ class Obs:
|
|||
tmp = kwargs.get(kwarg_name)
|
||||
if isinstance(tmp, (int, float)):
|
||||
if tmp < 0:
|
||||
raise ValueError(kwarg_name + ' has to be larger or equal to 0.')
|
||||
raise Exception(kwarg_name + ' has to be larger or equal to 0.')
|
||||
for e, e_name in enumerate(self.e_names):
|
||||
getattr(self, kwarg_name)[e_name] = tmp
|
||||
else:
|
||||
|
@ -293,7 +288,7 @@ class Obs:
|
|||
texp = self.tau_exp[e_name]
|
||||
# Critical slowing down analysis
|
||||
if w_max // 2 <= 1:
|
||||
raise ValueError("Need at least 8 samples for tau_exp error analysis")
|
||||
raise Exception("Need at least 8 samples for tau_exp error analysis")
|
||||
for n in range(1, w_max // 2):
|
||||
_compute_drho(n + 1)
|
||||
if (self.e_rho[e_name][n] - self.N_sigma[e_name] * self.e_drho[e_name][n]) < 0 or n >= w_max // 2 - 2:
|
||||
|
@ -622,7 +617,7 @@ class Obs:
|
|||
if not hasattr(self, 'e_dvalue'):
|
||||
raise Exception('Run the gamma method first.')
|
||||
if np.isclose(0.0, self._dvalue, atol=1e-15):
|
||||
raise ValueError('Error is 0.0')
|
||||
raise Exception('Error is 0.0')
|
||||
labels = self.e_names
|
||||
sizes = [self.e_dvalue[name] ** 2 for name in labels] / self._dvalue ** 2
|
||||
fig1, ax1 = plt.subplots()
|
||||
|
@ -632,7 +627,7 @@ class Obs:
|
|||
if save:
|
||||
fig1.savefig(save)
|
||||
|
||||
return dict(zip(labels, sizes))
|
||||
return dict(zip(self.e_names, sizes))
|
||||
|
||||
def dump(self, filename, datatype="json.gz", description="", **kwargs):
|
||||
"""Dump the Obs to a file 'name' of chosen format.
|
||||
|
@ -661,7 +656,7 @@ class Obs:
|
|||
with open(file_name + '.p', 'wb') as fb:
|
||||
pickle.dump(self, fb)
|
||||
else:
|
||||
raise TypeError("Unknown datatype " + str(datatype))
|
||||
raise Exception("Unknown datatype " + str(datatype))
|
||||
|
||||
def export_jackknife(self):
|
||||
"""Export jackknife samples from the Obs
|
||||
|
@ -678,7 +673,7 @@ class Obs:
|
|||
"""
|
||||
|
||||
if len(self.names) != 1:
|
||||
raise ValueError("'export_jackknife' is only implemented for Obs defined on one ensemble and replicum.")
|
||||
raise Exception("'export_jackknife' is only implemented for Obs defined on one ensemble and replicum.")
|
||||
|
||||
name = self.names[0]
|
||||
full_data = self.deltas[name] + self.r_values[name]
|
||||
|
@ -689,49 +684,6 @@ class Obs:
|
|||
tmp_jacks[1:] = (n * mean - full_data) / (n - 1)
|
||||
return tmp_jacks
|
||||
|
||||
def export_bootstrap(self, samples=500, random_numbers=None, save_rng=None):
|
||||
"""Export bootstrap samples from the Obs
|
||||
|
||||
Parameters
|
||||
----------
|
||||
samples : int
|
||||
Number of bootstrap samples to generate.
|
||||
random_numbers : np.ndarray
|
||||
Array of shape (samples, length) containing the random numbers to generate the bootstrap samples.
|
||||
If not provided the bootstrap samples are generated bashed on the md5 hash of the enesmble name.
|
||||
save_rng : str
|
||||
Save the random numbers to a file if a path is specified.
|
||||
|
||||
Returns
|
||||
-------
|
||||
numpy.ndarray
|
||||
Returns a numpy array of length N + 1 where N is the number of samples
|
||||
for the given ensemble and replicum. The zeroth entry of the array contains
|
||||
the mean value of the Obs, entries 1 to N contain the N import_bootstrap samples
|
||||
derived from the Obs. The current implementation only works for observables
|
||||
defined on exactly one ensemble and replicum. The derived bootstrap samples
|
||||
should agree with samples from a full bootstrap analysis up to O(1/N).
|
||||
"""
|
||||
if len(self.names) != 1:
|
||||
raise ValueError("'export_boostrap' is only implemented for Obs defined on one ensemble and replicum.")
|
||||
|
||||
name = self.names[0]
|
||||
length = self.N
|
||||
|
||||
if random_numbers is None:
|
||||
seed = int(hashlib.md5(name.encode()).hexdigest(), 16) & 0xFFFFFFFF
|
||||
rng = np.random.default_rng(seed)
|
||||
random_numbers = rng.integers(0, length, size=(samples, length))
|
||||
|
||||
if save_rng is not None:
|
||||
np.savetxt(save_rng, random_numbers, fmt='%i')
|
||||
|
||||
proj = np.vstack([np.bincount(o, minlength=length) for o in random_numbers]) / length
|
||||
ret = np.zeros(samples + 1)
|
||||
ret[0] = self.value
|
||||
ret[1:] = proj @ (self.deltas[name] + self.r_values[name])
|
||||
return ret
|
||||
|
||||
def __float__(self):
|
||||
return float(self.value)
|
||||
|
||||
|
@ -777,10 +729,11 @@ class Obs:
|
|||
return self.value >= other
|
||||
|
||||
def __eq__(self, other):
|
||||
if other is None:
|
||||
return False
|
||||
return (self - other).is_zero()
|
||||
|
||||
def __ne__(self, other):
|
||||
return not (self - other).is_zero()
|
||||
|
||||
# Overload math operations
|
||||
def __add__(self, y):
|
||||
if isinstance(y, Obs):
|
||||
|
@ -788,8 +741,6 @@ class Obs:
|
|||
else:
|
||||
if isinstance(y, np.ndarray):
|
||||
return np.array([self + o for o in y])
|
||||
elif isinstance(y, complex):
|
||||
return CObs(self, 0) + y
|
||||
elif y.__class__.__name__ in ['Corr', 'CObs']:
|
||||
return NotImplemented
|
||||
else:
|
||||
|
@ -858,12 +809,15 @@ class Obs:
|
|||
|
||||
def __pow__(self, y):
|
||||
if isinstance(y, Obs):
|
||||
return derived_observable(lambda x, **kwargs: x[0] ** x[1], [self, y], man_grad=[y.value * self.value ** (y.value - 1), self.value ** y.value * np.log(self.value)])
|
||||
return derived_observable(lambda x: x[0] ** x[1], [self, y])
|
||||
else:
|
||||
return derived_observable(lambda x, **kwargs: x[0] ** y, [self], man_grad=[y * self.value ** (y - 1)])
|
||||
return derived_observable(lambda x: x[0] ** y, [self])
|
||||
|
||||
def __rpow__(self, y):
|
||||
return derived_observable(lambda x, **kwargs: y ** x[0], [self], man_grad=[y ** self.value * np.log(y)])
|
||||
if isinstance(y, Obs):
|
||||
return derived_observable(lambda x: x[0] ** x[1], [y, self])
|
||||
else:
|
||||
return derived_observable(lambda x: y ** x[0], [self])
|
||||
|
||||
def __abs__(self):
|
||||
return derived_observable(lambda x: anp.abs(x[0]), [self])
|
||||
|
@ -1025,25 +979,6 @@ class CObs:
|
|||
def __repr__(self):
|
||||
return 'CObs[' + str(self) + ']'
|
||||
|
||||
def __format__(self, format_type):
|
||||
if format_type == "":
|
||||
significance = 2
|
||||
format_type = "2"
|
||||
else:
|
||||
significance = int(float(format_type.replace("+", "").replace("-", "")))
|
||||
return f"({self.real:{format_type}}{self.imag:+{significance}}j)"
|
||||
|
||||
|
||||
def gamma_method(x, **kwargs):
|
||||
"""Vectorized version of the gamma_method applicable to lists or arrays of Obs.
|
||||
|
||||
See docstring of pe.Obs.gamma_method for details.
|
||||
"""
|
||||
return np.vectorize(lambda o: o.gm(**kwargs))(x)
|
||||
|
||||
|
||||
gm = gamma_method
|
||||
|
||||
|
||||
def _format_uncertainty(value, dvalue, significance=2):
|
||||
"""Creates a string of a value and its error in paranthesis notation, e.g., 13.02(45)"""
|
||||
|
@ -1137,7 +1072,7 @@ def _intersection_idx(idl):
|
|||
return idinter
|
||||
|
||||
|
||||
def _expand_deltas_for_merge(deltas, idx, shape, new_idx, scalefactor):
|
||||
def _expand_deltas_for_merge(deltas, idx, shape, new_idx):
|
||||
"""Expand deltas defined on idx to the list of configs that is defined by new_idx.
|
||||
New, empty entries are filled by 0. If idx and new_idx are of type range, the smallest
|
||||
common divisor of the step sizes is used as new step size.
|
||||
|
@ -1153,20 +1088,15 @@ def _expand_deltas_for_merge(deltas, idx, shape, new_idx, scalefactor):
|
|||
Number of configs in idx.
|
||||
new_idx : list
|
||||
List of configs that defines the new range, has to be sorted in ascending order.
|
||||
scalefactor : float
|
||||
An additional scaling factor that can be applied to scale the fluctuations,
|
||||
e.g., when Obs with differing numbers of replica are merged.
|
||||
"""
|
||||
|
||||
if type(idx) is range and type(new_idx) is range:
|
||||
if idx == new_idx:
|
||||
if scalefactor == 1:
|
||||
return deltas
|
||||
else:
|
||||
return deltas * scalefactor
|
||||
ret = np.zeros(new_idx[-1] - new_idx[0] + 1)
|
||||
for i in range(shape):
|
||||
ret[idx[i] - new_idx[0]] = deltas[i]
|
||||
return np.array([ret[new_idx[i] - new_idx[0]] for i in range(len(new_idx))]) * len(new_idx) / len(idx) * scalefactor
|
||||
return np.array([ret[new_idx[i] - new_idx[0]] for i in range(len(new_idx))]) * len(new_idx) / len(idx)
|
||||
|
||||
|
||||
def derived_observable(func, data, array_mode=False, **kwargs):
|
||||
|
@ -1247,29 +1177,10 @@ def derived_observable(func, data, array_mode=False, **kwargs):
|
|||
new_r_values[name] = func(tmp_values, **kwargs)
|
||||
new_idl_d[name] = _merge_idx(idl)
|
||||
|
||||
def _compute_scalefactor_missing_rep(obs):
|
||||
"""
|
||||
Computes the scale factor that is to be multiplied with the deltas
|
||||
in the case where Obs with different subsets of replica are merged.
|
||||
Returns a dictionary with the scale factor for each Monte Carlo name.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
obs : Obs
|
||||
The observable corresponding to the deltas that are to be scaled
|
||||
"""
|
||||
scalef_d = {}
|
||||
for mc_name in obs.mc_names:
|
||||
mc_idl_d = [name for name in obs.idl if name.startswith(mc_name + '|')]
|
||||
new_mc_idl_d = [name for name in new_idl_d if name.startswith(mc_name + '|')]
|
||||
if len(mc_idl_d) > 0 and len(mc_idl_d) < len(new_mc_idl_d):
|
||||
scalef_d[mc_name] = sum([len(new_idl_d[name]) for name in new_mc_idl_d]) / sum([len(new_idl_d[name]) for name in mc_idl_d])
|
||||
return scalef_d
|
||||
|
||||
if 'man_grad' in kwargs:
|
||||
deriv = np.asarray(kwargs.get('man_grad'))
|
||||
if new_values.shape + data.shape != deriv.shape:
|
||||
raise ValueError('Manual derivative does not have correct shape.')
|
||||
raise Exception('Manual derivative does not have correct shape.')
|
||||
elif kwargs.get('num_grad') is True:
|
||||
if multi > 0:
|
||||
raise Exception('Multi mode currently not supported for numerical derivative')
|
||||
|
@ -1303,7 +1214,7 @@ def derived_observable(func, data, array_mode=False, **kwargs):
|
|||
d_extracted[name] = []
|
||||
ens_length = len(new_idl_d[name])
|
||||
for i_dat, dat in enumerate(data):
|
||||
d_extracted[name].append(np.array([_expand_deltas_for_merge(o.deltas.get(name, np.zeros(ens_length)), o.idl.get(name, new_idl_d[name]), o.shape.get(name, ens_length), new_idl_d[name], _compute_scalefactor_missing_rep(o).get(name.split('|')[0], 1)) for o in dat.reshape(np.prod(dat.shape))]).reshape(dat.shape + (ens_length, )))
|
||||
d_extracted[name].append(np.array([_expand_deltas_for_merge(o.deltas.get(name, np.zeros(ens_length)), o.idl.get(name, new_idl_d[name]), o.shape.get(name, ens_length), new_idl_d[name]) for o in dat.reshape(np.prod(dat.shape))]).reshape(dat.shape + (ens_length, )))
|
||||
for name in new_cov_names:
|
||||
g_extracted[name] = []
|
||||
zero_grad = _Zero_grad(new_covobs_lengths[name])
|
||||
|
@ -1325,17 +1236,16 @@ def derived_observable(func, data, array_mode=False, **kwargs):
|
|||
new_grad[name] += np.tensordot(deriv[i_val + (i_dat, )], dat)
|
||||
else:
|
||||
for j_obs, obs in np.ndenumerate(data):
|
||||
scalef_d = _compute_scalefactor_missing_rep(obs)
|
||||
for name in obs.names:
|
||||
if name in obs.cov_names:
|
||||
new_grad[name] = new_grad.get(name, 0) + deriv[i_val + j_obs] * obs.covobs[name].grad
|
||||
else:
|
||||
new_deltas[name] = new_deltas.get(name, 0) + deriv[i_val + j_obs] * _expand_deltas_for_merge(obs.deltas[name], obs.idl[name], obs.shape[name], new_idl_d[name], scalef_d.get(name.split('|')[0], 1))
|
||||
new_deltas[name] = new_deltas.get(name, 0) + deriv[i_val + j_obs] * _expand_deltas_for_merge(obs.deltas[name], obs.idl[name], obs.shape[name], new_idl_d[name])
|
||||
|
||||
new_covobs = {name: Covobs(0, allcov[name], name, grad=new_grad[name]) for name in new_grad}
|
||||
|
||||
if not set(new_covobs.keys()).isdisjoint(new_deltas.keys()):
|
||||
raise ValueError('The same name has been used for deltas and covobs!')
|
||||
raise Exception('The same name has been used for deltas and covobs!')
|
||||
new_samples = []
|
||||
new_means = []
|
||||
new_idl = []
|
||||
|
@ -1376,7 +1286,7 @@ def _reduce_deltas(deltas, idx_old, idx_new):
|
|||
Has to be a subset of idx_old.
|
||||
"""
|
||||
if not len(deltas) == len(idx_old):
|
||||
raise ValueError('Length of deltas and idx_old have to be the same: %d != %d' % (len(deltas), len(idx_old)))
|
||||
raise Exception('Length of deltas and idx_old have to be the same: %d != %d' % (len(deltas), len(idx_old)))
|
||||
if type(idx_old) is range and type(idx_new) is range:
|
||||
if idx_old == idx_new:
|
||||
return deltas
|
||||
|
@ -1384,7 +1294,7 @@ def _reduce_deltas(deltas, idx_old, idx_new):
|
|||
return deltas
|
||||
indices = np.intersect1d(idx_old, idx_new, assume_unique=True, return_indices=True)[1]
|
||||
if len(indices) < len(idx_new):
|
||||
raise ValueError('Error in _reduce_deltas: Config of idx_new not in idx_old')
|
||||
raise Exception('Error in _reduce_deltas: Config of idx_new not in idx_old')
|
||||
return np.array(deltas)[indices]
|
||||
|
||||
|
||||
|
@ -1406,14 +1316,12 @@ def reweight(weight, obs, **kwargs):
|
|||
result = []
|
||||
for i in range(len(obs)):
|
||||
if len(obs[i].cov_names):
|
||||
raise ValueError('Error: Not possible to reweight an Obs that contains covobs!')
|
||||
raise Exception('Error: Not possible to reweight an Obs that contains covobs!')
|
||||
if not set(obs[i].names).issubset(weight.names):
|
||||
raise ValueError('Error: Ensembles do not fit')
|
||||
if len(obs[i].mc_names) > 1 or len(weight.mc_names) > 1:
|
||||
raise ValueError('Error: Cannot reweight an Obs that contains multiple ensembles.')
|
||||
raise Exception('Error: Ensembles do not fit')
|
||||
for name in obs[i].names:
|
||||
if not set(obs[i].idl[name]).issubset(weight.idl[name]):
|
||||
raise ValueError('obs[%d] has to be defined on a subset of the configs in weight.idl[%s]!' % (i, name))
|
||||
raise Exception('obs[%d] has to be defined on a subset of the configs in weight.idl[%s]!' % (i, name))
|
||||
new_samples = []
|
||||
w_deltas = {}
|
||||
for name in sorted(obs[i].names):
|
||||
|
@ -1446,21 +1354,18 @@ def correlate(obs_a, obs_b):
|
|||
-----
|
||||
Keep in mind to only correlate primary observables which have not been reweighted
|
||||
yet. The reweighting has to be applied after correlating the observables.
|
||||
Only works if a single ensemble is present in the Obs.
|
||||
Currently only works if ensemble content is identical (this is not strictly necessary).
|
||||
Currently only works if ensembles are identical (this is not strictly necessary).
|
||||
"""
|
||||
|
||||
if len(obs_a.mc_names) > 1 or len(obs_b.mc_names) > 1:
|
||||
raise ValueError('Error: Cannot correlate Obs that contain multiple ensembles.')
|
||||
if sorted(obs_a.names) != sorted(obs_b.names):
|
||||
raise ValueError(f"Ensembles do not fit {set(sorted(obs_a.names)) ^ set(sorted(obs_b.names))}")
|
||||
raise Exception(f"Ensembles do not fit {set(sorted(obs_a.names)) ^ set(sorted(obs_b.names))}")
|
||||
if len(obs_a.cov_names) or len(obs_b.cov_names):
|
||||
raise ValueError('Error: Not possible to correlate Obs that contain covobs!')
|
||||
raise Exception('Error: Not possible to correlate Obs that contain covobs!')
|
||||
for name in obs_a.names:
|
||||
if obs_a.shape[name] != obs_b.shape[name]:
|
||||
raise ValueError('Shapes of ensemble', name, 'do not fit')
|
||||
raise Exception('Shapes of ensemble', name, 'do not fit')
|
||||
if obs_a.idl[name] != obs_b.idl[name]:
|
||||
raise ValueError('idl of ensemble', name, 'do not fit')
|
||||
raise Exception('idl of ensemble', name, 'do not fit')
|
||||
|
||||
if obs_a.reweighted is True:
|
||||
warnings.warn("The first observable is already reweighted.", RuntimeWarning)
|
||||
|
@ -1548,92 +1453,6 @@ def covariance(obs, visualize=False, correlation=False, smooth=None, **kwargs):
|
|||
return cov
|
||||
|
||||
|
||||
def invert_corr_cov_cholesky(corr, inverrdiag):
|
||||
"""Constructs a lower triangular matrix `chol` via the Cholesky decomposition of the correlation matrix `corr`
|
||||
and then returns the inverse covariance matrix `chol_inv` as a lower triangular matrix by solving `chol * x = inverrdiag`.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
corr : np.ndarray
|
||||
correlation matrix
|
||||
inverrdiag : np.ndarray
|
||||
diagonal matrix, the entries are the inverse errors of the data points considered
|
||||
"""
|
||||
|
||||
condn = np.linalg.cond(corr)
|
||||
if condn > 0.1 / np.finfo(float).eps:
|
||||
raise ValueError(f"Cannot invert correlation matrix as its condition number exceeds machine precision ({condn:1.2e})")
|
||||
if condn > 1e13:
|
||||
warnings.warn("Correlation matrix may be ill-conditioned, condition number: {%1.2e}" % (condn), RuntimeWarning)
|
||||
chol = np.linalg.cholesky(corr)
|
||||
chol_inv = scipy.linalg.solve_triangular(chol, inverrdiag, lower=True)
|
||||
|
||||
return chol_inv
|
||||
|
||||
|
||||
def sort_corr(corr, kl, yd):
|
||||
""" Reorders a correlation matrix to match the alphabetical order of its underlying y data.
|
||||
|
||||
The ordering of the input correlation matrix `corr` is given by the list of keys `kl`.
|
||||
The input dictionary `yd` (with the same keys `kl`) must contain the corresponding y data
|
||||
that the correlation matrix is based on.
|
||||
This function sorts the list of keys `kl` alphabetically and sorts the matrix `corr`
|
||||
according to this alphabetical order such that the sorted matrix `corr_sorted` corresponds
|
||||
to the y data `yd` when arranged in an alphabetical order by its keys.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
corr : np.ndarray
|
||||
A square correlation matrix constructed using the order of the y data specified by `kl`.
|
||||
The dimensions of `corr` should match the total number of y data points in `yd` combined.
|
||||
kl : list of str
|
||||
A list of keys that denotes the order in which the y data from `yd` was used to build the
|
||||
input correlation matrix `corr`.
|
||||
yd : dict of list
|
||||
A dictionary where each key corresponds to a unique identifier, and its value is a list of
|
||||
y data points. The total number of y data points across all keys must match the dimensions
|
||||
of `corr`. The lists in the dictionary can be lists of Obs.
|
||||
|
||||
Returns
|
||||
-------
|
||||
np.ndarray
|
||||
A new, sorted correlation matrix that corresponds to the y data from `yd` when arranged alphabetically by its keys.
|
||||
|
||||
Example
|
||||
-------
|
||||
>>> import numpy as np
|
||||
>>> import pyerrors as pe
|
||||
>>> corr = np.array([[1, 0.2, 0.3], [0.2, 1, 0.4], [0.3, 0.4, 1]])
|
||||
>>> kl = ['b', 'a']
|
||||
>>> yd = {'a': [1, 2], 'b': [3]}
|
||||
>>> sorted_corr = pe.obs.sort_corr(corr, kl, yd)
|
||||
>>> print(sorted_corr)
|
||||
array([[1. , 0.3, 0.4],
|
||||
[0.3, 1. , 0.2],
|
||||
[0.4, 0.2, 1. ]])
|
||||
|
||||
"""
|
||||
kl_sorted = sorted(kl)
|
||||
|
||||
posd = {}
|
||||
ofs = 0
|
||||
for ki, k in enumerate(kl):
|
||||
posd[k] = [i + ofs for i in range(len(yd[k]))]
|
||||
ofs += len(posd[k])
|
||||
|
||||
mapping = []
|
||||
for k in kl_sorted:
|
||||
for i in range(len(yd[k])):
|
||||
mapping.append(posd[k][i])
|
||||
|
||||
corr_sorted = np.zeros_like(corr)
|
||||
for i in range(corr.shape[0]):
|
||||
for j in range(corr.shape[0]):
|
||||
corr_sorted[i][j] = corr[mapping[i]][mapping[j]]
|
||||
|
||||
return corr_sorted
|
||||
|
||||
|
||||
def _smooth_eigenvalues(corr, E):
|
||||
"""Eigenvalue smoothing as described in hep-lat/9412087
|
||||
|
||||
|
@ -1643,7 +1462,7 @@ def _smooth_eigenvalues(corr, E):
|
|||
Number of eigenvalues to be left substantially unchanged
|
||||
"""
|
||||
if not (2 < E < corr.shape[0] - 1):
|
||||
raise ValueError(f"'E' has to be between 2 and the dimension of the correlation matrix minus 1 ({corr.shape[0] - 1}).")
|
||||
raise Exception(f"'E' has to be between 2 and the dimension of the correlation matrix minus 1 ({corr.shape[0] - 1}).")
|
||||
vals, vec = np.linalg.eigh(corr)
|
||||
lambda_min = np.mean(vals[:-E])
|
||||
vals[vals < lambda_min] = lambda_min
|
||||
|
@ -1731,42 +1550,8 @@ def import_jackknife(jacks, name, idl=None):
|
|||
return new_obs
|
||||
|
||||
|
||||
def import_bootstrap(boots, name, random_numbers):
|
||||
"""Imports bootstrap samples and returns an Obs
|
||||
|
||||
Parameters
|
||||
----------
|
||||
boots : numpy.ndarray
|
||||
numpy array containing the mean value as zeroth entry and
|
||||
the N bootstrap samples as first to Nth entry.
|
||||
name : str
|
||||
name of the ensemble the samples are defined on.
|
||||
random_numbers : np.ndarray
|
||||
Array of shape (samples, length) containing the random numbers to generate the bootstrap samples,
|
||||
where samples is the number of bootstrap samples and length is the length of the original Monte Carlo
|
||||
chain to be reconstructed.
|
||||
"""
|
||||
samples, length = random_numbers.shape
|
||||
if samples != len(boots) - 1:
|
||||
raise ValueError("Random numbers do not have the correct shape.")
|
||||
|
||||
if samples < length:
|
||||
raise ValueError("Obs can't be reconstructed if there are fewer bootstrap samples than Monte Carlo data points.")
|
||||
|
||||
proj = np.vstack([np.bincount(o, minlength=length) for o in random_numbers]) / length
|
||||
|
||||
samples = scipy.linalg.lstsq(proj, boots[1:])[0]
|
||||
ret = Obs([samples], [name])
|
||||
ret._value = boots[0]
|
||||
return ret
|
||||
|
||||
|
||||
def merge_obs(list_of_obs):
|
||||
"""Combine all observables in list_of_obs into one new observable.
|
||||
This allows to merge Obs that have been computed on multiple replica
|
||||
of the same ensemble.
|
||||
If you like to merge Obs that are based on several ensembles, please
|
||||
average them yourself.
|
||||
"""Combine all observables in list_of_obs into one new observable
|
||||
|
||||
Parameters
|
||||
----------
|
||||
|
@ -1779,9 +1564,9 @@ def merge_obs(list_of_obs):
|
|||
"""
|
||||
replist = [item for obs in list_of_obs for item in obs.names]
|
||||
if (len(replist) == len(set(replist))) is False:
|
||||
raise ValueError('list_of_obs contains duplicate replica: %s' % (str(replist)))
|
||||
raise Exception('list_of_obs contains duplicate replica: %s' % (str(replist)))
|
||||
if any([len(o.cov_names) for o in list_of_obs]):
|
||||
raise ValueError('Not possible to merge data that contains covobs!')
|
||||
raise Exception('Not possible to merge data that contains covobs!')
|
||||
new_dict = {}
|
||||
idl_dict = {}
|
||||
for o in list_of_obs:
|
||||
|
@ -1832,7 +1617,7 @@ def cov_Obs(means, cov, name, grad=None):
|
|||
for i in range(len(means)):
|
||||
ol.append(covobs_to_obs(Covobs(means[i], cov, name, pos=i, grad=grad)))
|
||||
if ol[0].covobs[name].N != len(means):
|
||||
raise ValueError('You have to provide %d mean values!' % (ol[0].N))
|
||||
raise Exception('You have to provide %d mean values!' % (ol[0].N))
|
||||
if len(ol) == 1:
|
||||
return ol[0]
|
||||
return ol
|
||||
|
@ -1848,7 +1633,7 @@ def _determine_gap(o, e_content, e_name):
|
|||
|
||||
gap = min(gaps)
|
||||
if not np.all([gi % gap == 0 for gi in gaps]):
|
||||
raise ValueError(f"Replica for ensemble {e_name} do not have a common spacing.", gaps)
|
||||
raise Exception(f"Replica for ensemble {e_name} do not have a common spacing.", gaps)
|
||||
|
||||
return gap
|
||||
|
||||
|
|
|
@ -1,23 +0,0 @@
|
|||
import scipy
|
||||
import numpy as np
|
||||
from autograd.extend import primitive, defvjp
|
||||
from autograd.scipy.special import j0, y0, j1, y1, jn, yn, i0, i1, iv, ive, beta, betainc, betaln
|
||||
from autograd.scipy.special import polygamma, psi, digamma, gamma, gammaln, gammainc, gammaincc, gammasgn, rgamma, multigammaln
|
||||
from autograd.scipy.special import erf, erfc, erfinv, erfcinv, logit, expit, logsumexp
|
||||
|
||||
|
||||
__all__ = ["beta", "betainc", "betaln",
|
||||
"polygamma", "psi", "digamma", "gamma", "gammaln", "gammainc", "gammaincc", "gammasgn", "rgamma", "multigammaln",
|
||||
"kn", "j0", "y0", "j1", "y1", "jn", "yn", "i0", "i1", "iv", "ive",
|
||||
"erf", "erfc", "erfinv", "erfcinv", "logit", "expit", "logsumexp"]
|
||||
|
||||
|
||||
@primitive
|
||||
def kn(n, x):
|
||||
"""Modified Bessel function of the second kind of integer order n"""
|
||||
if int(n) != n:
|
||||
raise TypeError("The order 'n' needs to be an integer.")
|
||||
return scipy.special.kn(n, x)
|
||||
|
||||
|
||||
defvjp(kn, None, lambda ans, n, x: lambda g: - g * 0.5 * (kn(np.abs(n - 1), x) + kn(n + 1, x)))
|
|
@ -1 +1 @@
|
|||
__version__ = "2.15.0-dev"
|
||||
__version__ = "2.8.2"
|
||||
|
|
|
@ -1,6 +1,3 @@
|
|||
[build-system]
|
||||
requires = ["setuptools >= 63.0.0", "wheel"]
|
||||
build-backend = "setuptools.build_meta"
|
||||
|
||||
[tool.ruff.lint]
|
||||
ignore = ["F403"]
|
||||
|
|
10
setup.py
10
setup.py
|
@ -24,19 +24,19 @@ setup(name='pyerrors',
|
|||
author_email='fabian.joswig@ed.ac.uk',
|
||||
license="MIT",
|
||||
packages=find_packages(),
|
||||
python_requires='>=3.9.0',
|
||||
install_requires=['numpy>=2.0', 'autograd>=1.7.0', 'numdifftools>=0.9.41', 'matplotlib>=3.9', 'scipy>=1.13', 'iminuit>=2.28', 'h5py>=3.11', 'lxml>=5.0', 'python-rapidjson>=1.20', 'pandas>=2.2'],
|
||||
extras_require={'test': ['pytest', 'pytest-cov', 'pytest-benchmark', 'hypothesis', 'nbmake', 'flake8']},
|
||||
python_requires='>=3.7.0',
|
||||
install_requires=['numpy>=1.21,<1.25', 'autograd>=1.5', 'numdifftools>=0.9.41', 'matplotlib>=3.5', 'scipy>=1.7', 'iminuit>=2.17', 'h5py>=3.8', 'lxml>=4.9', 'python-rapidjson>=1.9', 'pandas>=1.1'],
|
||||
extras_require={'test': ['pytest', 'pytest-cov', 'pytest-benchmark', 'hypothesis']},
|
||||
classifiers=[
|
||||
'Development Status :: 5 - Production/Stable',
|
||||
'Intended Audience :: Science/Research',
|
||||
'License :: OSI Approved :: MIT License',
|
||||
'Programming Language :: Python :: 3',
|
||||
'Programming Language :: Python :: 3.7',
|
||||
'Programming Language :: Python :: 3.8',
|
||||
'Programming Language :: Python :: 3.9',
|
||||
'Programming Language :: Python :: 3.10',
|
||||
'Programming Language :: Python :: 3.11',
|
||||
'Programming Language :: Python :: 3.12',
|
||||
'Programming Language :: Python :: 3.13',
|
||||
'Topic :: Scientific/Engineering :: Physics'
|
||||
],
|
||||
)
|
||||
|
|
|
@ -58,16 +58,12 @@ def test_modify_correlator():
|
|||
for pad in [0, 2]:
|
||||
corr = pe.Corr(corr_content, padding=[pad, pad])
|
||||
corr.roll(np.random.randint(100))
|
||||
corr.deriv(variant="symmetric")
|
||||
corr.deriv(variant="forward")
|
||||
corr.deriv(variant="backward")
|
||||
corr.deriv(variant="symmetric")
|
||||
corr.deriv(variant="improved")
|
||||
corr.deriv(variant="log")
|
||||
corr.deriv().deriv()
|
||||
corr.second_deriv(variant="symmetric")
|
||||
corr.second_deriv(variant="big_symmetric")
|
||||
corr.second_deriv(variant="improved")
|
||||
corr.second_deriv(variant="log")
|
||||
corr.second_deriv().second_deriv()
|
||||
|
||||
for i, e in enumerate(corr.content):
|
||||
|
@ -129,7 +125,7 @@ def test_m_eff():
|
|||
with pytest.warns(RuntimeWarning):
|
||||
my_corr.m_eff('sinh')
|
||||
|
||||
with pytest.raises(ValueError):
|
||||
with pytest.raises(Exception):
|
||||
my_corr.m_eff('unkown_variant')
|
||||
|
||||
|
||||
|
@ -140,7 +136,7 @@ def test_m_eff_negative_values():
|
|||
assert m_eff_log[padding + 1] is None
|
||||
m_eff_cosh = my_corr.m_eff('cosh')
|
||||
assert m_eff_cosh[padding + 1] is None
|
||||
with pytest.raises(ValueError):
|
||||
with pytest.raises(Exception):
|
||||
my_corr.m_eff('logsym')
|
||||
|
||||
|
||||
|
@ -155,7 +151,7 @@ def test_correlate():
|
|||
my_corr = pe.correlators.Corr([pe.pseudo_Obs(10, 0.1, 't'), pe.pseudo_Obs(0, 0.05, 't')])
|
||||
corr1 = my_corr.correlate(my_corr)
|
||||
corr2 = my_corr.correlate(my_corr[0])
|
||||
with pytest.raises(TypeError):
|
||||
with pytest.raises(Exception):
|
||||
corr3 = my_corr.correlate(7.3)
|
||||
|
||||
|
||||
|
@ -176,9 +172,9 @@ def test_fit_correlator():
|
|||
assert fit_res[0] == my_corr[0]
|
||||
assert fit_res[1] == my_corr[1] - my_corr[0]
|
||||
|
||||
with pytest.raises(TypeError):
|
||||
with pytest.raises(Exception):
|
||||
my_corr.fit(f, "from 0 to 3")
|
||||
with pytest.raises(ValueError):
|
||||
with pytest.raises(Exception):
|
||||
my_corr.fit(f, [0, 2, 3])
|
||||
|
||||
|
||||
|
@ -200,17 +196,17 @@ def test_padded_correlator():
|
|||
|
||||
def test_corr_exceptions():
|
||||
obs_a = pe.Obs([np.random.normal(0.1, 0.1, 100)], ['test'])
|
||||
obs_b = pe.Obs([np.random.normal(0.1, 0.1, 99)], ['test'])
|
||||
obs_b= pe.Obs([np.random.normal(0.1, 0.1, 99)], ['test'])
|
||||
with pytest.raises(Exception):
|
||||
pe.Corr([obs_a, obs_b])
|
||||
|
||||
obs_a = pe.Obs([np.random.normal(0.1, 0.1, 100)], ['test'])
|
||||
obs_b = pe.Obs([np.random.normal(0.1, 0.1, 100)], ['test'], idl=[range(1, 200, 2)])
|
||||
obs_b= pe.Obs([np.random.normal(0.1, 0.1, 100)], ['test'], idl=[range(1, 200, 2)])
|
||||
with pytest.raises(Exception):
|
||||
pe.Corr([obs_a, obs_b])
|
||||
|
||||
obs_a = pe.Obs([np.random.normal(0.1, 0.1, 100)], ['test'])
|
||||
obs_b = pe.Obs([np.random.normal(0.1, 0.1, 100)], ['test2'])
|
||||
obs_b= pe.Obs([np.random.normal(0.1, 0.1, 100)], ['test2'])
|
||||
with pytest.raises(Exception):
|
||||
pe.Corr([obs_a, obs_b])
|
||||
|
||||
|
@ -256,11 +252,11 @@ def test_prange():
|
|||
corr = pe.correlators.Corr(corr_content)
|
||||
|
||||
corr.set_prange([2, 4])
|
||||
with pytest.raises(ValueError):
|
||||
with pytest.raises(Exception):
|
||||
corr.set_prange([2])
|
||||
with pytest.raises(TypeError):
|
||||
with pytest.raises(Exception):
|
||||
corr.set_prange([2, 2.3])
|
||||
with pytest.raises(ValueError):
|
||||
with pytest.raises(Exception):
|
||||
corr.set_prange([4, 1])
|
||||
|
||||
|
||||
|
@ -436,7 +432,6 @@ def test_GEVP_solver():
|
|||
sp_vecs = [v / np.sqrt((v.T @ mat2 @ v)) for v in sp_vecs]
|
||||
|
||||
assert np.allclose(sp_vecs, pe.correlators._GEVP_solver(mat1, mat2), atol=1e-14)
|
||||
assert np.allclose(np.abs(sp_vecs), np.abs(pe.correlators._GEVP_solver(mat1, mat2, method='cholesky')))
|
||||
|
||||
|
||||
def test_GEVP_none_entries():
|
||||
|
@ -553,7 +548,7 @@ def test_corr_no_filtering():
|
|||
li = [-pe.pseudo_Obs(.2, .1, 'a', samples=10) for i in range(96)]
|
||||
for i in range(len(li)):
|
||||
li[i].idl['a'] = range(1, 21, 2)
|
||||
c = pe.Corr(li)
|
||||
c= pe.Corr(li)
|
||||
b = pe.pseudo_Obs(1, 1e-11, 'a', samples=30)
|
||||
c *= b
|
||||
assert np.all([c[0].idl == o.idl for o in c])
|
||||
|
@ -571,213 +566,3 @@ def test_corr_symmetric():
|
|||
assert scorr[1] == scorr[3]
|
||||
assert scorr[2] == corr[2]
|
||||
assert scorr[0] == corr[0]
|
||||
|
||||
|
||||
def test_error_GEVP():
|
||||
corr = pe.input.json.load_json("tests/data/test_matrix_corr.json.gz")
|
||||
t0, ts, state = 3, 6, 0
|
||||
vec_regular = corr.GEVP(t0=t0)[state][ts]
|
||||
vec_errors = corr.GEVP(t0=t0, vector_obs=True, auto_gamma=True)[state][ts]
|
||||
vec_regular_chol = corr.GEVP(t0=t0, method='cholesky')[state][ts]
|
||||
print(vec_errors)
|
||||
print(type(vec_errors[0]))
|
||||
assert(np.isclose(np.asarray([e.value for e in vec_errors]), vec_regular).all())
|
||||
assert(all([e.dvalue > 0. for e in vec_errors]))
|
||||
|
||||
projected_regular = corr.projected(vec_regular).content[ts + 1][0]
|
||||
projected_errors = corr.projected(vec_errors).content[ts + 1][0]
|
||||
projected_regular.gamma_method()
|
||||
projected_errors.gamma_method()
|
||||
assert(projected_errors.dvalue > projected_regular.dvalue)
|
||||
assert(corr.GEVP(t0, vector_obs=True)[state][42] is None)
|
||||
|
||||
assert(np.isclose(vec_regular_chol, vec_regular).all())
|
||||
assert(np.isclose(corr.GEVP(t0=t0, state=state)[ts], vec_regular).all())
|
||||
|
||||
|
||||
def test_corr_array_ndim1_init():
|
||||
y = [pe.pseudo_Obs(2 + np.random.normal(0.0, 0.1), .1, 't') for i in np.arange(5)]
|
||||
cc1 = pe.Corr(y)
|
||||
cc2 = pe.Corr(np.array(y))
|
||||
assert np.all([o1 == o2 for o1, o2 in zip(cc1, cc2)])
|
||||
|
||||
|
||||
def test_corr_array_ndim3_init():
|
||||
y = np.array([pe.pseudo_Obs(np.random.normal(2.0, 0.1), .1, 't') for i in np.arange(12)]).reshape(3, 2, 2)
|
||||
tt1 = pe.Corr(list(y))
|
||||
tt2 = pe.Corr(y)
|
||||
tt3 = pe.Corr(np.array([pe.Corr(o) for o in y.reshape(3, 4).T]).reshape(2, 2))
|
||||
assert np.all([o1 == o2 for o1, o2 in zip(tt1, tt2)])
|
||||
assert np.all([o1 == o2 for o1, o2 in zip(tt1, tt3)])
|
||||
assert tt1.T == y.shape[0]
|
||||
assert tt1.N == y.shape[1] == y.shape[2]
|
||||
|
||||
with pytest.raises(ValueError):
|
||||
pe.Corr(y.reshape(6, 2, 1))
|
||||
|
||||
|
||||
def test_two_matrix_corr_inits():
|
||||
T = 4
|
||||
rn = lambda : np.random.normal(0.5, 0.1)
|
||||
|
||||
# Generate T random CObs in a list
|
||||
list_of_timeslices =[]
|
||||
for i in range(T):
|
||||
re = pe.pseudo_Obs(rn(), rn(), "test")
|
||||
im = pe.pseudo_Obs(rn(), rn(), "test")
|
||||
list_of_timeslices.append(pe.CObs(re, im))
|
||||
|
||||
# First option: Correlator of matrix of correlators
|
||||
corr = pe.Corr(list_of_timeslices)
|
||||
mat_corr1 = pe.Corr(np.array([[corr, corr], [corr, corr]]))
|
||||
|
||||
# Second option: Correlator of list of arrays per timeslice
|
||||
list_of_arrays = [np.array([[elem, elem], [elem, elem]]) for elem in list_of_timeslices]
|
||||
mat_corr2 = pe.Corr(list_of_arrays)
|
||||
|
||||
for el in mat_corr1 - mat_corr2:
|
||||
assert np.all(el == 0)
|
||||
|
||||
|
||||
def test_matmul_overloading():
|
||||
N = 4
|
||||
rn = lambda : np.random.normal(0.5, 0.1)
|
||||
|
||||
# Generate N^2 random CObs and assemble them in an array
|
||||
ll =[]
|
||||
for i in range(N ** 2):
|
||||
re = pe.pseudo_Obs(rn(), rn(), "test")
|
||||
im = pe.pseudo_Obs(rn(), rn(), "test")
|
||||
ll.append(pe.CObs(re, im))
|
||||
mat = np.array(ll).reshape(N, N)
|
||||
|
||||
# Multiply with gamma matrix
|
||||
corr = pe.Corr([mat] * 4, padding=[0, 1])
|
||||
|
||||
# __matmul__
|
||||
mcorr = corr @ pe.dirac.gammaX
|
||||
comp = mat @ pe.dirac.gammaX
|
||||
for i in range(4):
|
||||
assert np.all(mcorr[i] == comp)
|
||||
|
||||
# __rmatmul__
|
||||
mcorr = pe.dirac.gammaX @ corr
|
||||
comp = pe.dirac.gammaX @ mat
|
||||
for i in range(4):
|
||||
assert np.all(mcorr[i] == comp)
|
||||
|
||||
test_mat = pe.dirac.gamma5 + pe.dirac.gammaX
|
||||
icorr = corr @ test_mat @ np.linalg.inv(test_mat)
|
||||
tt = corr - icorr
|
||||
for i in range(4):
|
||||
assert np.all(tt[i] == 0)
|
||||
|
||||
# associative property
|
||||
tt = (corr.real @ pe.dirac.gammaX + corr.imag @ (pe.dirac.gammaX * 1j)) - corr @ pe.dirac.gammaX
|
||||
for el in tt:
|
||||
if el is not None:
|
||||
assert np.all(el == 0)
|
||||
|
||||
corr2 = corr @ corr
|
||||
for i in range(4):
|
||||
np.all(corr2[i] == corr[i] @ corr[i])
|
||||
|
||||
|
||||
def test_matrix_trace():
|
||||
N = 4
|
||||
rn = lambda : np.random.normal(0.5, 0.1)
|
||||
|
||||
# Generate N^2 random CObs and assemble them in an array
|
||||
ll =[]
|
||||
for i in range(N ** 2):
|
||||
re = pe.pseudo_Obs(rn(), rn(), "test")
|
||||
im = pe.pseudo_Obs(rn(), rn(), "test")
|
||||
ll.append(pe.CObs(re, im))
|
||||
mat = np.array(ll).reshape(N, N)
|
||||
|
||||
corr = pe.Corr([mat] * 4)
|
||||
|
||||
# Explicitly check trace
|
||||
for el in corr.trace():
|
||||
el == np.sum(np.diag(mat))
|
||||
|
||||
# Trace is cyclic
|
||||
for one, two in zip((pe.dirac.gammaX @ corr).trace(), (corr @ pe.dirac.gammaX).trace()):
|
||||
assert np.all(one == two)
|
||||
|
||||
# Antisymmetric matrices are traceless.
|
||||
mat = (mat - mat.T) / 2
|
||||
corr = pe.Corr([mat] * 4)
|
||||
for el in corr.trace():
|
||||
assert el == 0
|
||||
|
||||
with pytest.raises(ValueError):
|
||||
corr.item(0, 0).trace()
|
||||
|
||||
|
||||
def test_corr_roll():
|
||||
T = 4
|
||||
rn = lambda : np.random.normal(0.5, 0.1)
|
||||
|
||||
ll = []
|
||||
for i in range(T):
|
||||
re = pe.pseudo_Obs(rn(), rn(), "test")
|
||||
im = pe.pseudo_Obs(rn(), rn(), "test")
|
||||
ll.append(pe.CObs(re, im))
|
||||
|
||||
# Rolling by T should produce the same correlator
|
||||
corr = pe.Corr(ll)
|
||||
tt = corr - corr.roll(T)
|
||||
for el in tt:
|
||||
assert np.all(el == 0)
|
||||
|
||||
mcorr = pe.Corr(np.array([[corr, corr + 0.1], [corr - 0.1, 2 * corr]]))
|
||||
tt = mcorr.roll(T) - mcorr
|
||||
for el in tt:
|
||||
assert np.all(el == 0)
|
||||
|
||||
|
||||
def test_correlator_comparison():
|
||||
scorr = pe.Corr([pe.pseudo_Obs(0.3, 0.1, "test") for o in range(4)])
|
||||
mcorr = pe.Corr(np.array([[scorr, scorr], [scorr, scorr]]))
|
||||
for corr in [scorr, mcorr]:
|
||||
assert (corr == corr).all()
|
||||
assert np.all(corr == 1 * corr)
|
||||
assert np.all(corr == (1 + 1e-16) * corr)
|
||||
assert not np.all(corr == (1 + 1e-5) * corr)
|
||||
assert np.all(corr == 1 / (1 / corr))
|
||||
assert np.all(corr - corr == 0)
|
||||
assert np.all(corr * 0 == 0)
|
||||
assert np.all(0 * corr == 0)
|
||||
assert np.all(0 * corr + scorr[2] == scorr[2])
|
||||
assert np.all(-corr == 0 - corr)
|
||||
assert np.all(corr ** 2 == corr * corr)
|
||||
acorr = pe.Corr([scorr[0]] * 6)
|
||||
assert np.all(acorr == scorr[0])
|
||||
assert not np.all(acorr == scorr[1])
|
||||
|
||||
mcorr[1][0, 1] = None
|
||||
assert not np.all(mcorr == pe.Corr(np.array([[scorr, scorr], [scorr, scorr]])))
|
||||
|
||||
pcorr = pe.Corr([pe.pseudo_Obs(0.25, 0.1, "test") for o in range(2)], padding=[1, 1])
|
||||
assert np.all(pcorr == pcorr)
|
||||
assert np.all(1 * pcorr == pcorr)
|
||||
|
||||
|
||||
def test_corr_item():
|
||||
corr_aa = _gen_corr(1)
|
||||
corr_ab = 0.5 * corr_aa
|
||||
|
||||
corr_mat = pe.Corr(np.array([[corr_aa, corr_ab], [corr_ab, corr_aa]]))
|
||||
corr_mat.item(0, 0)
|
||||
assert corr_mat[0].item(0, 1) == corr_mat.item(0, 1)[0]
|
||||
|
||||
|
||||
def test_complex_add_and_mul():
|
||||
o = pe.pseudo_Obs(1.0, 0.3, "my_r345sfg16£$%&$%^%$^$", samples=47)
|
||||
co = pe.CObs(o, 0.341 * o)
|
||||
for obs in [o, co]:
|
||||
cc = pe.Corr([obs for _ in range(4)])
|
||||
cc += 2j
|
||||
cc = cc * 4j
|
||||
cc.real + cc.imag
|
||||
|
|
Binary file not shown.
|
@ -30,7 +30,7 @@ def test_grid_dirac():
|
|||
'SigmaYZ',
|
||||
'SigmaZT']:
|
||||
pe.dirac.Grid_gamma(gamma)
|
||||
with pytest.raises(ValueError):
|
||||
with pytest.raises(Exception):
|
||||
pe.dirac.Grid_gamma('Not a gamma matrix')
|
||||
|
||||
|
||||
|
@ -44,7 +44,7 @@ def test_epsilon_tensor():
|
|||
(1, 1, 3) : 0.0}
|
||||
for key, value in check.items():
|
||||
assert pe.dirac.epsilon_tensor(*key) == value
|
||||
with pytest.raises(ValueError):
|
||||
with pytest.raises(Exception):
|
||||
pe.dirac.epsilon_tensor(0, 1, 3)
|
||||
|
||||
|
||||
|
@ -59,5 +59,5 @@ def test_epsilon_tensor_rank4():
|
|||
(1, 2, 3, 1) : 0.0}
|
||||
for key, value in check.items():
|
||||
assert pe.dirac.epsilon_tensor_rank4(*key) == value
|
||||
with pytest.raises(ValueError):
|
||||
with pytest.raises(Exception):
|
||||
pe.dirac.epsilon_tensor_rank4(0, 1, 3, 4)
|
||||
|
|
|
@ -152,127 +152,6 @@ def test_alternative_solvers():
|
|||
chisquare_values = np.array(chisquare_values)
|
||||
assert np.all(np.isclose(chisquare_values, chisquare_values[0]))
|
||||
|
||||
def test_inv_cov_matrix_input_least_squares():
|
||||
|
||||
|
||||
num_samples = 400
|
||||
N = 10
|
||||
|
||||
x = norm.rvs(size=(N, num_samples)) # generate random numbers
|
||||
|
||||
r = np.zeros((N, N))
|
||||
for i in range(N):
|
||||
for j in range(N):
|
||||
r[i, j] = np.exp(-0.8 * np.fabs(i - j)) # element in correlation matrix
|
||||
|
||||
errl = np.sqrt([3.4, 2.5, 3.6, 2.8, 4.2, 4.7, 4.9, 5.1, 3.2, 4.2]) # set y errors
|
||||
for i in range(N):
|
||||
for j in range(N):
|
||||
r[i, j] *= errl[i] * errl[j] # element in covariance matrix
|
||||
|
||||
c = cholesky(r, lower=True)
|
||||
y = np.dot(c, x)
|
||||
x = np.arange(N)
|
||||
x_dict = {}
|
||||
y_dict = {}
|
||||
for i,item in enumerate(x):
|
||||
x_dict[str(item)] = [x[i]]
|
||||
|
||||
for linear in [True, False]:
|
||||
data = []
|
||||
for i in range(N):
|
||||
if linear:
|
||||
data.append(pe.Obs([[i + 1 + o for o in y[i]]], ['ens']))
|
||||
else:
|
||||
data.append(pe.Obs([[np.exp(-(i + 1)) + np.exp(-(i + 1)) * o for o in y[i]]], ['ens']))
|
||||
|
||||
[o.gamma_method() for o in data]
|
||||
|
||||
data_dict = {}
|
||||
for i,item in enumerate(x):
|
||||
data_dict[str(item)] = [data[i]]
|
||||
|
||||
corr = pe.covariance(data, correlation=True)
|
||||
chol = np.linalg.cholesky(corr)
|
||||
covdiag = np.diag(1 / np.asarray([o.dvalue for o in data]))
|
||||
chol_inv = scipy.linalg.solve_triangular(chol, covdiag, lower=True)
|
||||
chol_inv_keys = [""]
|
||||
chol_inv_keys_combined_fit = [str(item) for i,item in enumerate(x)]
|
||||
|
||||
if linear:
|
||||
def fitf(p, x):
|
||||
return p[1] + p[0] * x
|
||||
fitf_dict = {}
|
||||
for i,item in enumerate(x):
|
||||
fitf_dict[str(item)] = fitf
|
||||
else:
|
||||
def fitf(p, x):
|
||||
return p[1] * anp.exp(-p[0] * x)
|
||||
fitf_dict = {}
|
||||
for i,item in enumerate(x):
|
||||
fitf_dict[str(item)] = fitf
|
||||
|
||||
fitpc = pe.least_squares(x, data, fitf, correlated_fit=True)
|
||||
fitp_inv_cov = pe.least_squares(x, data, fitf, correlated_fit = True, inv_chol_cov_matrix = [chol_inv,chol_inv_keys])
|
||||
fitp_inv_cov_combined_fit = pe.least_squares(x_dict, data_dict, fitf_dict, correlated_fit = True, inv_chol_cov_matrix = [chol_inv,chol_inv_keys_combined_fit])
|
||||
for i in range(2):
|
||||
diff_inv_cov = fitp_inv_cov[i] - fitpc[i]
|
||||
diff_inv_cov.gamma_method()
|
||||
assert(diff_inv_cov.is_zero(atol=0.0))
|
||||
diff_inv_cov_combined_fit = fitp_inv_cov_combined_fit[i] - fitpc[i]
|
||||
diff_inv_cov_combined_fit.gamma_method()
|
||||
assert(diff_inv_cov_combined_fit.is_zero(atol=1e-12))
|
||||
|
||||
with pytest.raises(ValueError):
|
||||
pe.least_squares(x_dict, data_dict, fitf_dict, correlated_fit = True, inv_chol_cov_matrix = [corr,chol_inv_keys_combined_fit])
|
||||
|
||||
def test_least_squares_invalid_inv_cov_matrix_input():
|
||||
xvals = []
|
||||
yvals = []
|
||||
err = 0.1
|
||||
def func_valid(a,x):
|
||||
return a[0] + a[1] * x
|
||||
for x in range(1, 8, 2):
|
||||
xvals.append(x)
|
||||
yvals.append(pe.pseudo_Obs(x + np.random.normal(0.0, err), err, 'test1') + pe.pseudo_Obs(0, err / 100, 'test2', samples=87))
|
||||
|
||||
[o.gamma_method() for o in yvals]
|
||||
|
||||
#dictionaries for a combined fit
|
||||
xvals_dict = { }
|
||||
yvals_dict = { }
|
||||
for i,item in enumerate(np.arange(1, 8, 2)):
|
||||
xvals_dict[str(item)] = [xvals[i]]
|
||||
yvals_dict[str(item)] = [yvals[i]]
|
||||
chol_inv_keys_combined_fit = ['1', '3', '5', '7']
|
||||
chol_inv_keys_combined_fit_invalid = ['2', '7', '100', '8']
|
||||
func_dict_valid = {"1": func_valid,"3": func_valid,"5": func_valid,"7": func_valid}
|
||||
|
||||
corr_valid = pe.covariance(yvals, correlation = True)
|
||||
chol = np.linalg.cholesky(corr_valid)
|
||||
covdiag = np.diag(1 / np.asarray([o.dvalue for o in yvals]))
|
||||
chol_inv_valid = scipy.linalg.solve_triangular(chol, covdiag, lower=True)
|
||||
chol_inv_keys = [""]
|
||||
pe.least_squares(xvals, yvals,func_valid, correlated_fit = True, inv_chol_cov_matrix = [chol_inv_valid,chol_inv_keys])
|
||||
pe.least_squares(xvals_dict, yvals_dict,func_dict_valid, correlated_fit = True, inv_chol_cov_matrix = [chol_inv_valid,chol_inv_keys_combined_fit])
|
||||
chol_inv_invalid_shape1 = np.zeros((len(yvals),len(yvals)-1))
|
||||
chol_inv_invalid_shape2 = np.zeros((len(yvals)+2,len(yvals)))
|
||||
|
||||
# for an uncombined fit
|
||||
with pytest.raises(TypeError):
|
||||
pe.least_squares(xvals, yvals, func_valid, correlated_fit = True, inv_chol_cov_matrix = [chol_inv_invalid_shape1,chol_inv_keys])
|
||||
with pytest.raises(TypeError):
|
||||
pe.least_squares(xvals, yvals, func_valid,correlated_fit = True, inv_chol_cov_matrix = [chol_inv_invalid_shape2,chol_inv_keys])
|
||||
with pytest.raises(ValueError):
|
||||
pe.least_squares(xvals, yvals, func_valid,correlated_fit = True, inv_chol_cov_matrix = [chol_inv_valid,chol_inv_keys_combined_fit_invalid])
|
||||
|
||||
#repeat for a combined fit
|
||||
with pytest.raises(TypeError):
|
||||
pe.least_squares(xvals_dict, yvals_dict,func_dict_valid, correlated_fit = True, inv_chol_cov_matrix = [chol_inv_invalid_shape1,chol_inv_keys_combined_fit])
|
||||
with pytest.raises(TypeError):
|
||||
pe.least_squares(xvals_dict, yvals_dict,func_dict_valid, correlated_fit = True, inv_chol_cov_matrix = [chol_inv_invalid_shape2,chol_inv_keys_combined_fit])
|
||||
with pytest.raises(ValueError):
|
||||
pe.least_squares(xvals_dict, yvals_dict,func_dict_valid, correlated_fit = True, inv_chol_cov_matrix = [chol_inv_valid,chol_inv_keys_combined_fit_invalid])
|
||||
|
||||
def test_correlated_fit():
|
||||
num_samples = 400
|
||||
|
@ -356,12 +235,12 @@ def test_fit_corr_independent():
|
|||
|
||||
|
||||
def test_linear_fit_guesses():
|
||||
for err in [1.2, 0.1, 0.001]:
|
||||
for err in [10, 0.1, 0.001]:
|
||||
xvals = []
|
||||
yvals = []
|
||||
for x in range(1, 8, 2):
|
||||
xvals.append(x)
|
||||
yvals.append(pe.pseudo_Obs(x + np.random.normal(0.0, err), err, 'test1') + pe.pseudo_Obs(0, err / 97, 'test2', samples=87))
|
||||
yvals.append(pe.pseudo_Obs(x + np.random.normal(0.0, err), err, 'test1') + pe.pseudo_Obs(0, err / 100, 'test2', samples=87))
|
||||
lin_func = lambda a, x: a[0] + a[1] * x
|
||||
with pytest.raises(Exception):
|
||||
pe.least_squares(xvals, yvals, lin_func)
|
||||
|
@ -372,7 +251,7 @@ def test_linear_fit_guesses():
|
|||
bad_guess = pe.least_squares(xvals, yvals, lin_func, initial_guess=[999, 999])
|
||||
good_guess = pe.least_squares(xvals, yvals, lin_func, initial_guess=[0, 1])
|
||||
assert np.isclose(bad_guess.chisquare, good_guess.chisquare, atol=1e-8)
|
||||
assert np.all([(go - ba).is_zero(atol=5e-5) for (go, ba) in zip(good_guess, bad_guess)])
|
||||
assert np.all([(go - ba).is_zero(atol=1e-6) for (go, ba) in zip(good_guess, bad_guess)])
|
||||
|
||||
|
||||
def test_total_least_squares():
|
||||
|
@ -1085,20 +964,6 @@ def test_combined_resplot_qqplot():
|
|||
fr = pe.least_squares(xd, yd, fd, resplot=True, qqplot=True)
|
||||
plt.close('all')
|
||||
|
||||
def test_combined_fit_xerr():
|
||||
fitd = {
|
||||
'a' : lambda p, x: p[0] * x[0] + p[1] * x[1],
|
||||
'b' : lambda p, x: p[0] * x[0] + p[2] * x[1],
|
||||
'c' : lambda p, x: p[0] * x[0] + p[3] * x[1],
|
||||
}
|
||||
yd = {
|
||||
'a': [pe.cov_Obs(3 + .1 * np.random.uniform(), .1**2, 'a' + str(i)) for i in range(5)],
|
||||
'b': [pe.cov_Obs(1 + .1 * np.random.uniform(), .1**2, 'b' + str(i)) for i in range(6)],
|
||||
'c': [pe.cov_Obs(3 + .1 * np.random.uniform(), .1**2, 'c' + str(i)) for i in range(3)],
|
||||
}
|
||||
xd = {k: np.transpose([[1 + .01 * np.random.uniform(), 2] for i in range(len(yd[k]))]) for k in fitd}
|
||||
pe.fits.least_squares(xd, yd, fitd)
|
||||
|
||||
|
||||
def test_x_multidim_fit():
|
||||
x1 = np.arange(1, 10)
|
||||
|
@ -1277,70 +1142,6 @@ def test_fit_dof():
|
|||
assert cd[0] != cd[0] # Check for nan
|
||||
assert np.all(np.array(cd[1:]) > 0)
|
||||
|
||||
N = 5
|
||||
|
||||
def fitf(a, x):
|
||||
return a[0] + 0 * x
|
||||
|
||||
def fitf_multi(a, x):
|
||||
return a[0] + 0 * x[0] + 0*x[1]
|
||||
|
||||
for priors in [None, [pe.cov_Obs(3, 1, 'p')]]:
|
||||
if priors is None:
|
||||
lp = 0
|
||||
else:
|
||||
lp = len(priors)
|
||||
x = [1. for i in range(N)]
|
||||
y = [pe.cov_Obs(i, .1, '%d' % (i)) for i in range(N)]
|
||||
[o.gm() for o in y]
|
||||
res = pe.fits.least_squares(x, y, fitf, expected_chisquare=True, priors=priors)
|
||||
assert(res.dof == N - 1 + lp)
|
||||
if priors is None:
|
||||
assert(np.isclose(res.chisquare_by_expected_chisquare, res.chisquare_by_dof))
|
||||
|
||||
kl = ['a', 'b']
|
||||
x = {k: [1. for i in range(N)] for k in kl}
|
||||
y = {k: [pe.cov_Obs(i, .1, '%d%s' % (i, k)) for i in range(N)] for k in kl}
|
||||
[[o.gm() for o in y[k]] for k in y]
|
||||
res = pe.fits.least_squares(x, y, {k: fitf for k in kl}, expected_chisquare=True, priors=priors)
|
||||
assert(res.dof == 2 * N - 1 + lp)
|
||||
if priors is None:
|
||||
assert(np.isclose(res.chisquare_by_expected_chisquare, res.chisquare_by_dof))
|
||||
|
||||
x = np.array([[1., 2.] for i in range(N)]).T
|
||||
y = [pe.cov_Obs(i, .1, '%d' % (i)) for i in range(N)]
|
||||
[o.gm() for o in y]
|
||||
res = pe.fits.least_squares(x, y, fitf_multi, expected_chisquare=True, priors=priors)
|
||||
assert(res.dof == N - 1 + lp)
|
||||
if priors is None:
|
||||
assert(np.isclose(res.chisquare_by_expected_chisquare, res.chisquare_by_dof))
|
||||
|
||||
x = {k: np.array([[1., 2.] for i in range(N)]).T for k in kl}
|
||||
y = {k: [pe.cov_Obs(i, .1, '%d%s' % (i, k)) for i in range(N)] for k in kl}
|
||||
[[o.gm() for o in y[k]] for k in y]
|
||||
res = pe.fits.least_squares(x, y, {k: fitf_multi for k in kl}, expected_chisquare=True, priors=priors)
|
||||
|
||||
assert(res.dof == 2 * N - 1 + lp)
|
||||
if priors is None:
|
||||
assert(np.isclose(res.chisquare_by_expected_chisquare, res.chisquare_by_dof))
|
||||
|
||||
|
||||
def test_combined_fit_constant_shape():
|
||||
N1 = 16
|
||||
N2 = 10
|
||||
x = {"a": np.arange(N1),
|
||||
"": np.arange(N2)}
|
||||
y = {"a": [pe.pseudo_Obs(o + np.random.normal(0.0, 0.1), 0.1, "test") for o in range(N1)],
|
||||
"": [pe.pseudo_Obs(o + np.random.normal(0.0, 0.1), 0.1, "test") for o in range(N2)]}
|
||||
funcs = {"a": lambda a, x: a[0] + a[1] * x,
|
||||
"": lambda a, x: a[1]}
|
||||
with pytest.raises(ValueError):
|
||||
pe.fits.least_squares(x, y, funcs, method='migrad')
|
||||
|
||||
funcs = {"a": lambda a, x: a[0] + a[1] * x,
|
||||
"": lambda a, x: a[1] + x * 0}
|
||||
pe.fits.least_squares(x, y, funcs, method='migrad')
|
||||
|
||||
|
||||
def fit_general(x, y, func, silent=False, **kwargs):
|
||||
"""Performs a non-linear fit to y = func(x) and returns a list of Obs corresponding to the fit parameters.
|
||||
|
|
|
@ -1,51 +0,0 @@
|
|||
import numpy as np
|
||||
import autograd.numpy as anp
|
||||
import pyerrors as pe
|
||||
|
||||
|
||||
def test_integration():
|
||||
def f(p, x):
|
||||
return p[0] * x + p[1] * x**2 - p[2] / x
|
||||
|
||||
def F(p, x):
|
||||
return p[0] * x**2 / 2. + p[1] * x**3 / 3. - anp.log(x) * p[2]
|
||||
|
||||
def check_ana_vs_int(p, l, u, **kwargs):
|
||||
numint_full = pe.integrate.quad(f, p, l, u, **kwargs)
|
||||
numint = numint_full[0]
|
||||
|
||||
anaint = F(p, u) - F(p, l)
|
||||
diff = (numint - anaint)
|
||||
|
||||
if isinstance(numint, pe.Obs):
|
||||
numint.gm()
|
||||
anaint.gm()
|
||||
|
||||
assert(diff.is_zero())
|
||||
else:
|
||||
assert(np.isclose(0, diff))
|
||||
|
||||
pobs = np.array([pe.cov_Obs(1., .1**2, '0'), pe.cov_Obs(2., .2**2, '1'), pe.cov_Obs(2.2, .17**2, '2')])
|
||||
lobs = pe.cov_Obs(.123, .012**2, 'l')
|
||||
uobs = pe.cov_Obs(1., .05**2, 'u')
|
||||
|
||||
check_ana_vs_int(pobs, lobs, uobs)
|
||||
check_ana_vs_int(pobs, lobs.value, uobs)
|
||||
check_ana_vs_int(pobs, lobs, uobs.value)
|
||||
check_ana_vs_int(pobs, lobs.value, uobs.value)
|
||||
for i in range(len(pobs)):
|
||||
p = [pi for pi in pobs]
|
||||
p[i] = pobs[i].value
|
||||
check_ana_vs_int(p, lobs, uobs)
|
||||
|
||||
check_ana_vs_int([pi.value for pi in pobs], lobs, uobs)
|
||||
check_ana_vs_int([pi.value for pi in pobs], lobs.value, uobs.value)
|
||||
|
||||
check_ana_vs_int(pobs, lobs, uobs, epsabs=1.e-9, epsrel=1.236e-10, limit=100)
|
||||
assert(len(pe.integrate.quad(f, pobs, lobs, uobs, full_output=True)) > 2)
|
||||
|
||||
r1, _ = pe.integrate.quad(F, pobs, 1, 0.1)
|
||||
r2, _ = pe.integrate.quad(F, pobs, 0.1, 1)
|
||||
assert r1 == -r2
|
||||
iamzero, _ = pe.integrate.quad(F, pobs, 1, 1)
|
||||
assert iamzero == 0
|
|
@ -12,7 +12,7 @@ def test_jsonio():
|
|||
o = pe.pseudo_Obs(1.0, .2, 'one')
|
||||
o2 = pe.pseudo_Obs(0.5, .1, 'two|r1')
|
||||
o3 = pe.pseudo_Obs(0.5, .1, 'two|r2')
|
||||
o4 = pe.merge_obs([o2, o3, pe.pseudo_Obs(0.5, .1, 'two|r3', samples=3221)])
|
||||
o4 = pe.merge_obs([o2, o3])
|
||||
otag = 'This has been merged!'
|
||||
o4.tag = otag
|
||||
do = o - .2 * o4
|
||||
|
@ -101,8 +101,8 @@ def test_json_string_reconstruction():
|
|||
|
||||
|
||||
def test_json_corr_io():
|
||||
my_list = [pe.Obs([np.random.normal(1.0, 0.1, 100), np.random.normal(1.0, 0.1, 321)], ['ens1|r1', 'ens1|r2'], idl=[range(1, 201, 2), range(321)]) for o in range(8)]
|
||||
rw_list = pe.reweight(pe.Obs([np.random.normal(1.0, 0.1, 100), np.random.normal(1.0, 0.1, 321)], ['ens1|r1', 'ens1|r2'], idl=[range(1, 201, 2), range(321)]), my_list)
|
||||
my_list = [pe.Obs([np.random.normal(1.0, 0.1, 100)], ['ens1']) for o in range(8)]
|
||||
rw_list = pe.reweight(pe.Obs([np.random.normal(1.0, 0.1, 100)], ['ens1']), my_list)
|
||||
|
||||
for obs_list in [my_list, rw_list]:
|
||||
for tag in [None, "test"]:
|
||||
|
@ -111,8 +111,7 @@ def test_json_corr_io():
|
|||
for corr_tag in [None, 'my_Corr_tag']:
|
||||
for prange in [None, [3, 6]]:
|
||||
for gap in [False, True]:
|
||||
for mult in [1., pe.cov_Obs([12.22, 1.21], [.212**2, .11**2], 'renorm')[0]]:
|
||||
my_corr = mult * pe.Corr(obs_list, padding=[pad, pad], prange=prange)
|
||||
my_corr = pe.Corr(obs_list, padding=[pad, pad], prange=prange)
|
||||
my_corr.tag = corr_tag
|
||||
if gap:
|
||||
my_corr.content[4] = None
|
||||
|
@ -129,23 +128,13 @@ def test_json_corr_io():
|
|||
|
||||
|
||||
def test_json_corr_2d_io():
|
||||
obs_list = [np.array([
|
||||
[
|
||||
pe.merge_obs([pe.pseudo_Obs(1.0 + i, 0.1 * i, 'test|r2'), pe.pseudo_Obs(1.0 + i, 0.1 * i, 'test|r1', samples=321)]),
|
||||
pe.merge_obs([pe.pseudo_Obs(0.0, 0.1 * i, 'test|r2'), pe.pseudo_Obs(0.0, 0.1 * i, 'test|r1', samples=321)]),
|
||||
],
|
||||
[
|
||||
pe.merge_obs([pe.pseudo_Obs(0.0, 0.1 * i, 'test|r2'), pe.pseudo_Obs(0.0, 0.1 * i, 'test|r1', samples=321),]),
|
||||
pe.merge_obs([pe.pseudo_Obs(1.0 + i, 0.1 * i, 'test|r2'), pe.pseudo_Obs(1.0 + i, 0.1 * i, 'test|r1', samples=321)]),
|
||||
],
|
||||
]) for i in range(4)]
|
||||
obs_list = [np.array([[pe.pseudo_Obs(1.0 + i, 0.1 * i, 'test'), pe.pseudo_Obs(0.0, 0.1 * i, 'test')], [pe.pseudo_Obs(0.0, 0.1 * i, 'test'), pe.pseudo_Obs(1.0 + i, 0.1 * i, 'test')]]) for i in range(4)]
|
||||
|
||||
for tag in [None, "test"]:
|
||||
obs_list[3][0, 1].tag = tag
|
||||
for padding in [0, 1]:
|
||||
for prange in [None, [3, 6]]:
|
||||
for mult in [1., pe.cov_Obs([12.22, 1.21], [.212**2, .11**2], 'renorm')[0]]:
|
||||
my_corr = mult * pe.Corr(obs_list, padding=[padding, padding], prange=prange)
|
||||
my_corr = pe.Corr(obs_list, padding=[padding, padding], prange=prange)
|
||||
my_corr.tag = tag
|
||||
pe.input.json.dump_to_json(my_corr, 'corr')
|
||||
recover = pe.input.json.load_json('corr')
|
||||
|
@ -222,7 +211,6 @@ def test_json_dict_io():
|
|||
'd': pe.pseudo_Obs(.01, .001, 'testd', samples=10) * pe.cov_Obs(1, .01, 'cov1'),
|
||||
'se': None,
|
||||
'sf': 1.2,
|
||||
'k': pe.cov_Obs(.1, .001**2, 'cov') * pe.merge_obs([pe.pseudo_Obs(1.0, 0.1, 'test|r2'), pe.pseudo_Obs(1.0, 0.1, 'test|r1', samples=321)]),
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -326,7 +314,7 @@ def test_dobsio():
|
|||
|
||||
o2 = pe.pseudo_Obs(0.5, .1, 'two|r1')
|
||||
o3 = pe.pseudo_Obs(0.5, .1, 'two|r2')
|
||||
o4 = pe.merge_obs([o2, o3, pe.pseudo_Obs(0.5, .1, 'two|r3', samples=3221)])
|
||||
o4 = pe.merge_obs([o2, o3])
|
||||
otag = 'This has been merged!'
|
||||
o4.tag = otag
|
||||
do = o - .2 * o4
|
||||
|
@ -340,7 +328,7 @@ def test_dobsio():
|
|||
o5 /= co2[0]
|
||||
o5.tag = 2 * otag
|
||||
|
||||
tt1 = pe.Obs([np.random.rand(100), np.random.rand(102)], ['t|r1', 't|r2'], idl=[range(2, 202, 2), range(22, 226, 2)])
|
||||
tt1 = pe.Obs([np.random.rand(100), np.random.rand(100)], ['t|r1', 't|r2'], idl=[range(2, 202, 2), range(22, 222, 2)])
|
||||
tt3 = pe.Obs([np.random.rand(102)], ['qe|r1'])
|
||||
|
||||
tt = tt1 + tt3
|
||||
|
@ -349,7 +337,7 @@ def test_dobsio():
|
|||
|
||||
tt4 = pe.Obs([np.random.rand(100), np.random.rand(100)], ['t|r1', 't|r2'], idl=[range(1, 101, 1), range(2, 202, 2)])
|
||||
|
||||
ol = [o2, o3, o4, do, o5, tt, tt4, np.log(tt4 / o5**2), np.exp(o5 + np.log(co3 / tt3 + o4) / tt), o4.reweight(o4)]
|
||||
ol = [o2, o3, o4, do, o5, tt, tt4, np.log(tt4 / o5**2), np.exp(o5 + np.log(co3 / tt3 + o4) / tt)]
|
||||
print(ol)
|
||||
fname = 'test_rw'
|
||||
|
||||
|
@ -374,12 +362,9 @@ def test_dobsio():
|
|||
|
||||
|
||||
def test_reconstruct_non_linear_r_obs(tmp_path):
|
||||
to = (
|
||||
pe.Obs([np.random.rand(500), np.random.rand(1200)],
|
||||
["e|r1", "e|r2", ],
|
||||
idl=[range(1, 501), range(0, 1200)])
|
||||
+ pe.Obs([np.random.rand(111)], ["my_new_ensemble_54^£$|8'[@124435%6^7&()~#"], idl=[range(1, 999, 9)])
|
||||
)
|
||||
to = pe.Obs([np.random.rand(500), np.random.rand(500), np.random.rand(111)],
|
||||
["e|r1", "e|r2", "my_new_ensemble_54^£$|8'[@124435%6^7&()~#"],
|
||||
idl=[range(1, 501), range(0, 500), range(1, 999, 9)])
|
||||
to = np.log(to ** 2) / to
|
||||
to.dump((tmp_path / "test_equality").as_posix())
|
||||
ro = pe.input.json.load_json((tmp_path / "test_equality").as_posix())
|
||||
|
@ -387,12 +372,9 @@ def test_reconstruct_non_linear_r_obs(tmp_path):
|
|||
|
||||
|
||||
def test_reconstruct_non_linear_r_obs_list(tmp_path):
|
||||
to = (
|
||||
pe.Obs([np.random.rand(500), np.random.rand(1200)],
|
||||
["e|r1", "e|r2", ],
|
||||
idl=[range(1, 501), range(0, 1200)])
|
||||
+ pe.Obs([np.random.rand(111)], ["my_new_ensemble_54^£$|8'[@124435%6^7&()~#"], idl=[range(1, 999, 9)])
|
||||
)
|
||||
to = pe.Obs([np.random.rand(500), np.random.rand(500), np.random.rand(111)],
|
||||
["e|r1", "e|r2", "my_new_ensemble_54^£$|8'[@124435%6^7&()~#"],
|
||||
idl=[range(1, 501), range(0, 500), range(1, 999, 9)])
|
||||
to = np.log(to ** 2) / to
|
||||
for to_list in [[to, to, to], np.array([to, to, to])]:
|
||||
pe.input.json.dump_to_json(to_list, (tmp_path / "test_equality_list").as_posix())
|
||||
|
|
|
@ -14,8 +14,8 @@ def get_real_matrix(dimension):
|
|||
exponent_imag = np.random.normal(0, 1)
|
||||
base_matrix[n, m] = pe.Obs([np.random.normal(1.0, 0.1, 100)], ['t'])
|
||||
|
||||
return base_matrix
|
||||
|
||||
return base_matrix
|
||||
|
||||
def get_complex_matrix(dimension):
|
||||
base_matrix = np.empty((dimension, dimension), dtype=object)
|
||||
|
@ -34,7 +34,7 @@ def test_matmul():
|
|||
my_list = []
|
||||
length = 100 + np.random.randint(200)
|
||||
for i in range(dim ** 2):
|
||||
my_list.append(pe.Obs([np.random.rand(length)], ['t1']) + pe.Obs([np.random.rand(length + 1)], ['t2']))
|
||||
my_list.append(pe.Obs([np.random.rand(length), np.random.rand(length + 1)], ['t1', 't2']))
|
||||
my_array = const * np.array(my_list).reshape((dim, dim))
|
||||
tt = pe.linalg.matmul(my_array, my_array) - my_array @ my_array
|
||||
for t, e in np.ndenumerate(tt):
|
||||
|
@ -43,8 +43,8 @@ def test_matmul():
|
|||
my_list = []
|
||||
length = 100 + np.random.randint(200)
|
||||
for i in range(dim ** 2):
|
||||
my_list.append(pe.CObs(pe.Obs([np.random.rand(length)], ['t1']) + pe.Obs([np.random.rand(length + 1)], ['t2']),
|
||||
pe.Obs([np.random.rand(length)], ['t1']) + pe.Obs([np.random.rand(length + 1)], ['t2'])))
|
||||
my_list.append(pe.CObs(pe.Obs([np.random.rand(length), np.random.rand(length + 1)], ['t1', 't2']),
|
||||
pe.Obs([np.random.rand(length), np.random.rand(length + 1)], ['t1', 't2'])))
|
||||
my_array = np.array(my_list).reshape((dim, dim)) * const
|
||||
tt = pe.linalg.matmul(my_array, my_array) - my_array @ my_array
|
||||
for t, e in np.ndenumerate(tt):
|
||||
|
@ -109,6 +109,7 @@ def test_einsum():
|
|||
assert np.all([o.imag.is_zero_within_error(0.001) for o in arr])
|
||||
assert np.all([o.imag.dvalue < 0.001 for o in arr])
|
||||
|
||||
|
||||
tt = [get_real_matrix(4), get_real_matrix(3)]
|
||||
q = np.tensordot(tt[0], tt[1], 0)
|
||||
c1 = tt[1] @ q
|
||||
|
@ -151,7 +152,7 @@ def test_multi_dot():
|
|||
my_list = []
|
||||
length = 1000 + np.random.randint(200)
|
||||
for i in range(dim ** 2):
|
||||
my_list.append(pe.Obs([np.random.rand(length)], ['t1']) + pe.Obs([np.random.rand(length + 1)], ['t2']))
|
||||
my_list.append(pe.Obs([np.random.rand(length), np.random.rand(length + 1)], ['t1', 't2']))
|
||||
my_array = pe.cov_Obs(1.0, 0.002, 'cov') * np.array(my_list).reshape((dim, dim))
|
||||
tt = pe.linalg.matmul(my_array, my_array, my_array, my_array) - my_array @ my_array @ my_array @ my_array
|
||||
for t, e in np.ndenumerate(tt):
|
||||
|
@ -160,8 +161,8 @@ def test_multi_dot():
|
|||
my_list = []
|
||||
length = 1000 + np.random.randint(200)
|
||||
for i in range(dim ** 2):
|
||||
my_list.append(pe.CObs(pe.Obs([np.random.rand(length)], ['t1']) + pe.Obs([np.random.rand(length + 1)], ['t2']),
|
||||
pe.Obs([np.random.rand(length)], ['t1']) + pe.Obs([np.random.rand(length + 1)], ['t2'])))
|
||||
my_list.append(pe.CObs(pe.Obs([np.random.rand(length), np.random.rand(length + 1)], ['t1', 't2']),
|
||||
pe.Obs([np.random.rand(length), np.random.rand(length + 1)], ['t1', 't2'])))
|
||||
my_array = np.array(my_list).reshape((dim, dim)) * pe.cov_Obs(1.0, 0.002, 'cov')
|
||||
tt = pe.linalg.matmul(my_array, my_array, my_array, my_array) - my_array @ my_array @ my_array @ my_array
|
||||
for t, e in np.ndenumerate(tt):
|
||||
|
@ -209,7 +210,7 @@ def test_irregular_matrix_inverse():
|
|||
for idl in [range(8, 508, 10), range(250, 273), [2, 8, 19, 20, 78, 99, 828, 10548979]]:
|
||||
irregular_array = []
|
||||
for i in range(dim ** 2):
|
||||
irregular_array.append(pe.Obs([np.random.normal(1.1, 0.2, len(idl))], ['ens1'], idl=[idl]) + pe.Obs([np.random.normal(0.25, 0.1, 10)], ['ens2'], idl=[range(1, 11)]))
|
||||
irregular_array.append(pe.Obs([np.random.normal(1.1, 0.2, len(idl)), np.random.normal(0.25, 0.1, 10)], ['ens1', 'ens2'], idl=[idl, range(1, 11)]))
|
||||
irregular_matrix = np.array(irregular_array).reshape((dim, dim)) * pe.cov_Obs(1.0, 0.002, 'cov') * pe.pseudo_Obs(1.0, 0.002, 'ens2|r23')
|
||||
|
||||
invertible_irregular_matrix = np.identity(dim) + irregular_matrix @ irregular_matrix.T
|
||||
|
@ -276,10 +277,10 @@ def test_matrix_functions():
|
|||
for (i, j), entry in np.ndenumerate(check_inv):
|
||||
entry.gamma_method()
|
||||
if(i == j):
|
||||
assert math.isclose(entry.value, 1.0, abs_tol=2e-9), 'value ' + str(i) + ',' + str(j) + ' ' + str(entry.value)
|
||||
assert math.isclose(entry.value, 1.0, abs_tol=1e-9), 'value ' + str(i) + ',' + str(j) + ' ' + str(entry.value)
|
||||
else:
|
||||
assert math.isclose(entry.value, 0.0, abs_tol=2e-9), 'value ' + str(i) + ',' + str(j) + ' ' + str(entry.value)
|
||||
assert math.isclose(entry.dvalue, 0.0, abs_tol=2e-9), 'dvalue ' + str(i) + ',' + str(j) + ' ' + str(entry.dvalue)
|
||||
assert math.isclose(entry.value, 0.0, abs_tol=1e-9), 'value ' + str(i) + ',' + str(j) + ' ' + str(entry.value)
|
||||
assert math.isclose(entry.dvalue, 0.0, abs_tol=1e-9), 'dvalue ' + str(i) + ',' + str(j) + ' ' + str(entry.dvalue)
|
||||
|
||||
# Check Cholesky decomposition
|
||||
sym = np.dot(matrix, matrix.T)
|
||||
|
@ -297,10 +298,6 @@ def test_matrix_functions():
|
|||
for j in range(dim):
|
||||
assert tmp[j].is_zero()
|
||||
|
||||
# Check eigv
|
||||
v2 = pe.linalg.eigv(sym)
|
||||
assert np.sum(v - v2).is_zero()
|
||||
|
||||
# Check eig function
|
||||
e2 = pe.linalg.eig(sym)
|
||||
assert np.all(np.sort(e) == np.sort(e2))
|
||||
|
@ -358,4 +355,3 @@ def test_complex_matrix_real_entries():
|
|||
my_mat[0, 1] = 4
|
||||
my_mat[2, 0] = pe.Obs([np.random.normal(1.0, 0.1, 100)], ['t'])
|
||||
assert np.all((my_mat @ pe.linalg.inv(my_mat) - np.identity(4)) == 0)
|
||||
|
||||
|
|
|
@ -18,7 +18,3 @@ def test_obs_errorbar():
|
|||
pe.errorbar(x_obs, y_obs, marker="x", ms=2, xerr=xerr, yerr=yerr)
|
||||
|
||||
plt.close('all')
|
||||
|
||||
|
||||
def test_print_config():
|
||||
pe.print_config()
|
||||
|
|
|
@ -1,11 +1,9 @@
|
|||
import numpy as np
|
||||
import autograd.numpy as anp
|
||||
import autograd.numpy as np
|
||||
import os
|
||||
import copy
|
||||
import matplotlib.pyplot as plt
|
||||
import pyerrors as pe
|
||||
import pytest
|
||||
import pyerrors.linalg
|
||||
from hypothesis import given, strategies as st
|
||||
|
||||
np.random.seed(0)
|
||||
|
@ -41,10 +39,6 @@ def test_Obs_exceptions():
|
|||
pe.Obs([np.random.rand(4)], ['name'])
|
||||
with pytest.raises(ValueError):
|
||||
pe.Obs([np.random.rand(5)], ['1'], idl=[[5, 3, 2 ,4 ,1]])
|
||||
with pytest.raises(ValueError):
|
||||
pe.Obs([np.random.rand(5)], ['1'], idl=[[1, 2, 3, 3, 5]])
|
||||
with pytest.raises(ValueError):
|
||||
pe.Obs([np.random.rand(5)], ['1'], idl=[[1, 1, 3, 1, 5]])
|
||||
with pytest.raises(TypeError):
|
||||
pe.Obs([np.random.rand(5)], ['1'], idl=['t'])
|
||||
with pytest.raises(ValueError):
|
||||
|
@ -61,9 +55,9 @@ def test_Obs_exceptions():
|
|||
my_obs.plot_rep_dist()
|
||||
with pytest.raises(Exception):
|
||||
my_obs.plot_piechart()
|
||||
with pytest.raises(TypeError):
|
||||
with pytest.raises(Exception):
|
||||
my_obs.gamma_method(S='2.3')
|
||||
with pytest.raises(ValueError):
|
||||
with pytest.raises(Exception):
|
||||
my_obs.gamma_method(tau_exp=2.3)
|
||||
my_obs.gamma_method()
|
||||
my_obs.details()
|
||||
|
@ -108,7 +102,6 @@ def test_comparison():
|
|||
test_obs1 = pe.pseudo_Obs(value1, 0.1, 't')
|
||||
value2 = np.random.normal(0, 100)
|
||||
test_obs2 = pe.pseudo_Obs(value2, 0.1, 't')
|
||||
assert test_obs1 != None
|
||||
assert (value1 > value2) == (test_obs1 > test_obs2)
|
||||
assert (value1 < value2) == (test_obs1 < test_obs2)
|
||||
assert (value1 >= value2) == (test_obs1 >= test_obs2)
|
||||
|
@ -133,9 +126,9 @@ def test_function_overloading():
|
|||
|
||||
fs = [lambda x: x[0] + x[1], lambda x: x[1] + x[0], lambda x: x[0] - x[1], lambda x: x[1] - x[0],
|
||||
lambda x: x[0] * x[1], lambda x: x[1] * x[0], lambda x: x[0] / x[1], lambda x: x[1] / x[0],
|
||||
lambda x: anp.exp(x[0]), lambda x: anp.sin(x[0]), lambda x: anp.cos(x[0]), lambda x: anp.tan(x[0]),
|
||||
lambda x: anp.log(x[0]), lambda x: anp.sqrt(anp.abs(x[0])),
|
||||
lambda x: anp.sinh(x[0]), lambda x: anp.cosh(x[0]), lambda x: anp.tanh(x[0])]
|
||||
lambda x: np.exp(x[0]), lambda x: np.sin(x[0]), lambda x: np.cos(x[0]), lambda x: np.tan(x[0]),
|
||||
lambda x: np.log(x[0]), lambda x: np.sqrt(np.abs(x[0])),
|
||||
lambda x: np.sinh(x[0]), lambda x: np.cosh(x[0]), lambda x: np.tanh(x[0])]
|
||||
|
||||
for i, f in enumerate(fs):
|
||||
t1 = f([a, b])
|
||||
|
@ -199,7 +192,7 @@ def test_gamma_method_no_windowing():
|
|||
assert np.isclose(np.sqrt(np.var(obs.deltas['ens'], ddof=1) / obs.shape['ens']), obs.dvalue)
|
||||
obs.gamma_method(S=1.1)
|
||||
assert obs.e_tauint['ens'] > 0.5
|
||||
with pytest.raises(ValueError):
|
||||
with pytest.raises(Exception):
|
||||
obs.gamma_method(S=-0.2)
|
||||
|
||||
|
||||
|
@ -313,9 +306,9 @@ def test_derived_observables():
|
|||
test_obs = pe.pseudo_Obs(2, 0.1 * (1 + np.random.rand()), 't', int(1000 * (1 + np.random.rand())))
|
||||
|
||||
# Check if autograd and numgrad give the same result
|
||||
d_Obs_ad = pe.derived_observable(lambda x, **kwargs: x[0] * x[1] * anp.sin(x[0] * x[1]), [test_obs, test_obs])
|
||||
d_Obs_ad = pe.derived_observable(lambda x, **kwargs: x[0] * x[1] * np.sin(x[0] * x[1]), [test_obs, test_obs])
|
||||
d_Obs_ad.gamma_method()
|
||||
d_Obs_fd = pe.derived_observable(lambda x, **kwargs: x[0] * x[1] * anp.sin(x[0] * x[1]), [test_obs, test_obs], num_grad=True)
|
||||
d_Obs_fd = pe.derived_observable(lambda x, **kwargs: x[0] * x[1] * np.sin(x[0] * x[1]), [test_obs, test_obs], num_grad=True)
|
||||
d_Obs_fd.gamma_method()
|
||||
|
||||
assert d_Obs_ad == d_Obs_fd
|
||||
|
@ -333,7 +326,7 @@ def test_derived_observables():
|
|||
|
||||
def test_multi_ens():
|
||||
names = ['A0', 'A1|r001', 'A1|r002']
|
||||
test_obs = pe.Obs([np.random.rand(50)], names[:1]) + pe.Obs([np.random.rand(50), np.random.rand(50)], names[1:])
|
||||
test_obs = pe.Obs([np.random.rand(50), np.random.rand(50), np.random.rand(50)], names)
|
||||
assert test_obs.e_names == ['A0', 'A1']
|
||||
assert test_obs.e_content['A0'] == ['A0']
|
||||
assert test_obs.e_content['A1'] == ['A1|r001', 'A1|r002']
|
||||
|
@ -345,9 +338,6 @@ def test_multi_ens():
|
|||
ensembles.append(str(i))
|
||||
assert my_sum.e_names == sorted(ensembles)
|
||||
|
||||
with pytest.raises(ValueError):
|
||||
test_obs = pe.Obs([np.random.rand(50), np.random.rand(50), np.random.rand(50)], names)
|
||||
|
||||
|
||||
def test_multi_ens2():
|
||||
names = ['ens', 'e', 'en', 'e|r010', 'E|er', 'ens|', 'Ens|34', 'ens|r548984654ez4e3t34terh']
|
||||
|
@ -464,18 +454,6 @@ def test_cobs_overloading():
|
|||
obs / cobs
|
||||
|
||||
|
||||
def test_pow():
|
||||
data = [1, 2.341, pe.pseudo_Obs(4.8, 0.48, "test_obs"), pe.cov_Obs(1.1, 0.3 ** 2, "test_cov_obs")]
|
||||
|
||||
for d in data:
|
||||
assert d * d == d ** 2
|
||||
assert d * d * d == d ** 3
|
||||
|
||||
for d2 in data:
|
||||
assert np.log(d ** d2) == d2 * np.log(d)
|
||||
assert (d ** d2) ** (1 / d2) == d
|
||||
|
||||
|
||||
def test_reweighting():
|
||||
my_obs = pe.Obs([np.random.rand(1000)], ['t'])
|
||||
assert not my_obs.reweighted
|
||||
|
@ -493,33 +471,26 @@ def test_reweighting():
|
|||
r_obs2 = r_obs[0] * my_obs
|
||||
assert r_obs2.reweighted
|
||||
my_covobs = pe.cov_Obs(1.0, 0.003, 'cov')
|
||||
with pytest.raises(ValueError):
|
||||
with pytest.raises(Exception):
|
||||
pe.reweight(my_obs, [my_covobs])
|
||||
my_obs2 = pe.Obs([np.random.rand(1000)], ['t2'])
|
||||
with pytest.raises(ValueError):
|
||||
with pytest.raises(Exception):
|
||||
pe.reweight(my_obs, [my_obs + my_obs2])
|
||||
with pytest.raises(ValueError):
|
||||
with pytest.raises(Exception):
|
||||
pe.reweight(my_irregular_obs, [my_obs])
|
||||
|
||||
my_merged_obs = my_obs + pe.Obs([np.random.rand(1000)], ['q'])
|
||||
with pytest.raises(ValueError):
|
||||
pe.reweight(my_merged_obs, [my_merged_obs])
|
||||
|
||||
|
||||
def test_merge_obs():
|
||||
my_obs1 = pe.Obs([np.random.normal(1, .1, 100)], ['t|1'])
|
||||
my_obs2 = pe.Obs([np.random.normal(1, .1, 100)], ['t|2'], idl=[range(1, 200, 2)])
|
||||
my_obs1 = pe.Obs([np.random.rand(100)], ['t'])
|
||||
my_obs2 = pe.Obs([np.random.rand(100)], ['q'], idl=[range(1, 200, 2)])
|
||||
merged = pe.merge_obs([my_obs1, my_obs2])
|
||||
diff = merged - (my_obs2 + my_obs1) / 2
|
||||
assert np.isclose(0, diff.value, atol=1e-16)
|
||||
with pytest.raises(ValueError):
|
||||
diff = merged - my_obs2 - my_obs1
|
||||
assert diff == -(my_obs1.value + my_obs2.value) / 2
|
||||
with pytest.raises(Exception):
|
||||
pe.merge_obs([my_obs1, my_obs1])
|
||||
my_covobs = pe.cov_Obs(1.0, 0.003, 'cov')
|
||||
with pytest.raises(ValueError):
|
||||
with pytest.raises(Exception):
|
||||
pe.merge_obs([my_obs1, my_covobs])
|
||||
my_obs3 = pe.Obs([np.random.rand(100)], ['q|2'], idl=[range(1, 200, 2)])
|
||||
with pytest.raises(ValueError):
|
||||
pe.merge_obs([my_obs1, my_obs3])
|
||||
|
||||
|
||||
|
||||
|
@ -541,26 +512,23 @@ def test_correlate():
|
|||
assert corr1 == corr2
|
||||
|
||||
my_obs3 = pe.Obs([np.random.rand(100)], ['t'], idl=[range(2, 102)])
|
||||
with pytest.raises(ValueError):
|
||||
with pytest.raises(Exception):
|
||||
pe.correlate(my_obs1, my_obs3)
|
||||
|
||||
my_obs4 = pe.Obs([np.random.rand(99)], ['t'])
|
||||
with pytest.raises(ValueError):
|
||||
with pytest.raises(Exception):
|
||||
pe.correlate(my_obs1, my_obs4)
|
||||
|
||||
my_obs5 = pe.Obs([np.random.rand(100)], ['t'], idl=[range(5, 505, 5)])
|
||||
my_obs6 = pe.Obs([np.random.rand(100)], ['t'], idl=[range(5, 505, 5)])
|
||||
corr3 = pe.correlate(my_obs5, my_obs6)
|
||||
assert my_obs5.idl == corr3.idl
|
||||
my_obs7 = pe.Obs([np.random.rand(99)], ['q'])
|
||||
with pytest.raises(ValueError):
|
||||
pe.correlate(my_obs1, my_obs7)
|
||||
|
||||
my_new_obs = pe.Obs([np.random.rand(100)], ['q3'])
|
||||
with pytest.raises(ValueError):
|
||||
with pytest.raises(Exception):
|
||||
pe.correlate(my_obs1, my_new_obs)
|
||||
my_covobs = pe.cov_Obs(1.0, 0.003, 'cov')
|
||||
with pytest.raises(ValueError):
|
||||
with pytest.raises(Exception):
|
||||
pe.correlate(my_covobs, my_covobs)
|
||||
r_obs = pe.reweight(my_obs1, [my_obs1])[0]
|
||||
with pytest.warns(RuntimeWarning):
|
||||
|
@ -579,11 +547,11 @@ def test_merge_idx():
|
|||
|
||||
for j in range(5):
|
||||
idll = [range(1, int(round(np.random.uniform(300, 700))), int(round(np.random.uniform(1, 14)))) for i in range(10)]
|
||||
assert list(pe.obs._merge_idx(idll)) == sorted(set().union(*idll))
|
||||
assert pe.obs._merge_idx(idll) == sorted(set().union(*idll))
|
||||
|
||||
for j in range(5):
|
||||
idll = [range(int(round(np.random.uniform(1, 28))), int(round(np.random.uniform(300, 700))), int(round(np.random.uniform(1, 14)))) for i in range(10)]
|
||||
assert list(pe.obs._merge_idx(idll)) == sorted(set().union(*idll))
|
||||
assert pe.obs._merge_idx(idll) == sorted(set().union(*idll))
|
||||
|
||||
idl = [list(np.arange(1, 14)) + list(range(16, 100, 4)), range(4, 604, 4), [2, 4, 5, 6, 8, 9, 12, 24], range(1, 20, 1), range(50, 789, 7)]
|
||||
new_idx = pe.obs._merge_idx(idl)
|
||||
|
@ -694,14 +662,14 @@ def test_gamma_method_irregular():
|
|||
assert (a.dvalue - 5 * a.ddvalue < expe and expe < a.dvalue + 5 * a.ddvalue)
|
||||
|
||||
arr2 = np.random.normal(1, .2, size=N)
|
||||
afull = pe.Obs([arr], ['a1']) + pe.Obs([arr2], ['a2'])
|
||||
afull = pe.Obs([arr, arr2], ['a1', 'a2'])
|
||||
|
||||
configs = np.ones_like(arr2)
|
||||
for i in np.random.uniform(0, len(arr2), size=int(.8*N)):
|
||||
configs[int(i)] = 0
|
||||
zero_arr2 = [arr2[i] for i in range(len(arr2)) if not configs[i] == 0]
|
||||
idx2 = [i + 1 for i in range(len(configs)) if configs[i] == 1]
|
||||
a = pe.Obs([zero_arr], ['a1'], idl=[idx]) + pe.Obs([zero_arr2], ['a2'], idl=[idx2])
|
||||
a = pe.Obs([zero_arr, zero_arr2], ['a1', 'a2'], idl=[idx, idx2])
|
||||
|
||||
afull.gamma_method()
|
||||
a.gamma_method()
|
||||
|
@ -787,7 +755,7 @@ def test_gamma_method_irregular():
|
|||
my_obs.gm()
|
||||
idl += [range(1, 400, 4)]
|
||||
my_obs = pe.Obs([dat for i in range(len(idl))], ['%s|%d' % ('A', i) for i in range(len(idl))], idl=idl)
|
||||
with pytest.raises(ValueError):
|
||||
with pytest.raises(Exception):
|
||||
my_obs.gm()
|
||||
|
||||
# check cases where tau is large compared to the chain length
|
||||
|
@ -803,11 +771,11 @@ def test_gamma_method_irregular():
|
|||
carr = gen_autocorrelated_array(arr, .8)
|
||||
o = pe.Obs([carr], ['test'])
|
||||
o.gamma_method()
|
||||
no = np.nan * o
|
||||
no = np.NaN * o
|
||||
no.gamma_method()
|
||||
o.idl['test'] = range(1, 1998, 2)
|
||||
o.gamma_method()
|
||||
no = np.nan * o
|
||||
no = np.NaN * o
|
||||
no.gamma_method()
|
||||
|
||||
|
||||
|
@ -865,7 +833,7 @@ def test_covariance_vs_numpy():
|
|||
data1 = np.random.normal(2.5, 0.2, N)
|
||||
data2 = np.random.normal(0.5, 0.08, N)
|
||||
data3 = np.random.normal(-178, 5, N)
|
||||
uncorr = np.vstack([data1, data2, data3])
|
||||
uncorr = np.row_stack([data1, data2, data3])
|
||||
corr = np.random.multivariate_normal([0.0, 17, -0.0487], [[1.0, 0.6, -0.22], [0.6, 0.8, 0.01], [-0.22, 0.01, 1.9]], N).T
|
||||
|
||||
for X in [uncorr, corr]:
|
||||
|
@ -1035,7 +1003,7 @@ def test_correlation_intersection_of_idls():
|
|||
|
||||
|
||||
def test_covariance_non_identical_objects():
|
||||
obs1 = pe.Obs([np.random.normal(1.0, 0.1, 1000), np.random.normal(1.0, 0.1, 1000)], ["ens|r1", "ens|r2"]) + pe.Obs([np.random.normal(1.0, 0.1, 732)], ['ens2'])
|
||||
obs1 = pe.Obs([np.random.normal(1.0, 0.1, 1000), np.random.normal(1.0, 0.1, 1000), np.random.normal(1.0, 0.1, 732)], ["ens|r1", "ens|r2", "ens2"])
|
||||
obs1.gamma_method()
|
||||
obs2 = obs1 + 1e-18
|
||||
obs2.gamma_method()
|
||||
|
@ -1088,27 +1056,6 @@ def test_covariance_reorder_non_overlapping_data():
|
|||
assert np.isclose(corr1[0, 1], corr2[0, 1], atol=1e-14)
|
||||
|
||||
|
||||
def test_sort_corr():
|
||||
xd = {
|
||||
'b': [1, 2, 3],
|
||||
'a': [2.2, 4.4],
|
||||
'c': [3.7, 5.1]
|
||||
}
|
||||
|
||||
yd = {k : pe.cov_Obs(xd[k], [.2 * o for o in xd[k]], k) for k in xd}
|
||||
key_orig = list(yd.keys())
|
||||
y_all = np.concatenate([np.array(yd[key]) for key in key_orig])
|
||||
[o.gm() for o in y_all]
|
||||
cov = pe.covariance(y_all)
|
||||
|
||||
key_ls = key_sorted = sorted(key_orig)
|
||||
y_sorted = np.concatenate([np.array(yd[key]) for key in key_sorted])
|
||||
[o.gm() for o in y_sorted]
|
||||
cov_sorted = pe.covariance(y_sorted)
|
||||
retcov = pe.obs.sort_corr(cov, key_orig, yd)
|
||||
assert np.sum(retcov - cov_sorted) == 0
|
||||
|
||||
|
||||
def test_empty_obs():
|
||||
o = pe.Obs([np.random.rand(100)], ['test'])
|
||||
q = o + pe.Obs([], [], means=[])
|
||||
|
@ -1119,9 +1066,6 @@ def test_reweight_method():
|
|||
obs1 = pe.pseudo_Obs(0.2, 0.01, 'test')
|
||||
rw = pe.pseudo_Obs(0.999, 0.001, 'test')
|
||||
assert obs1.reweight(rw) == pe.reweight(rw, [obs1])[0]
|
||||
rw2 = pe.pseudo_Obs(0.999, 0.001, 'test2')
|
||||
with pytest.raises(ValueError):
|
||||
obs1.reweight(rw2)
|
||||
|
||||
|
||||
def test_jackknife():
|
||||
|
@ -1138,7 +1082,7 @@ def test_jackknife():
|
|||
|
||||
assert np.allclose(tmp_jacks, my_obs.export_jackknife())
|
||||
my_new_obs = my_obs + pe.Obs([full_data], ['test2'])
|
||||
with pytest.raises(ValueError):
|
||||
with pytest.raises(Exception):
|
||||
my_new_obs.export_jackknife()
|
||||
|
||||
|
||||
|
@ -1150,20 +1094,6 @@ def test_import_jackknife():
|
|||
assert my_obs == reconstructed_obs
|
||||
|
||||
|
||||
def test_import_bootstrap():
|
||||
seed = 4321
|
||||
samples = 1234
|
||||
length = 820
|
||||
name = "test"
|
||||
|
||||
rng = np.random.default_rng(seed)
|
||||
random_numbers = rng.integers(0, length, size=(samples, length))
|
||||
obs = pe.pseudo_Obs(2.447, 0.14, name, length)
|
||||
boots = obs.export_bootstrap(1234, random_numbers=random_numbers)
|
||||
re_obs = pe.import_bootstrap(boots, name, random_numbers=random_numbers)
|
||||
assert obs == re_obs
|
||||
|
||||
|
||||
def test_reduce_deltas():
|
||||
idx_old = range(1, 101)
|
||||
deltas = [float(i) for i in idx_old]
|
||||
|
@ -1326,7 +1256,7 @@ def test_nan_obs():
|
|||
no.gamma_method()
|
||||
|
||||
o.idl['test'] = [1, 5] + list(range(7, 2002, 2))
|
||||
no = np.nan * o
|
||||
no = np.NaN * o
|
||||
no.gamma_method()
|
||||
|
||||
|
||||
|
@ -1335,10 +1265,9 @@ def test_format_uncertainty():
|
|||
assert pe.obs._format_uncertainty(0.548, 2.48497, 2) == '0.5(2.5)'
|
||||
assert pe.obs._format_uncertainty(0.548, 2.48497, 4) == '0.548(2.485)'
|
||||
assert pe.obs._format_uncertainty(0.548, 20078.3, 9) == '0.5480(20078.3000)'
|
||||
pe.obs._format_uncertainty(np.nan, 1)
|
||||
pe.obs._format_uncertainty(1, np.nan)
|
||||
pe.obs._format_uncertainty(np.nan, np.inf)
|
||||
|
||||
pe.obs._format_uncertainty(np.NaN, 1)
|
||||
pe.obs._format_uncertainty(1, np.NaN)
|
||||
pe.obs._format_uncertainty(np.NaN, np.inf)
|
||||
|
||||
def test_format():
|
||||
o1 = pe.pseudo_Obs(0.348, 0.0123, "test")
|
||||
|
@ -1346,7 +1275,6 @@ def test_format():
|
|||
assert o1.__format__("+2") == '+0.348(12)'
|
||||
assert o1.__format__(" 2") == ' 0.348(12)'
|
||||
|
||||
|
||||
def test_f_string_obs():
|
||||
o1 = pe.pseudo_Obs(0.348, 0.0123, "test")
|
||||
print(f"{o1}")
|
||||
|
@ -1355,134 +1283,7 @@ def test_f_string_obs():
|
|||
print(f"{o1:-1}")
|
||||
print(f"{o1: 8}")
|
||||
|
||||
|
||||
def test_f_string_cobs():
|
||||
o_real = pe.pseudo_Obs(0.348, 0.0123, "test")
|
||||
o_imag = pe.pseudo_Obs(0.348, 0.0123, "test")
|
||||
o1 = pe.CObs(o_real, o_imag)
|
||||
print(f"{o1}")
|
||||
print(f"{o1:3}")
|
||||
print(f"{o1:+3}")
|
||||
print(f"{o1:-1}")
|
||||
print(f"{o1: 8}")
|
||||
|
||||
|
||||
def test_compute_drho_fails():
|
||||
obs = pe.input.json.load_json("tests/data/compute_drho_fails.json.gz")
|
||||
obs.gm()
|
||||
assert np.isclose(obs.dvalue, 0.0022150779611891094)
|
||||
|
||||
|
||||
def test_vec_gm():
|
||||
obs = pe.misc.gen_correlated_data(np.arange(3), np.array([[0.0364 , 0.03627262, 0.03615699],
|
||||
[0.03627262, 0.03688438, 0.03674798],
|
||||
[0.03615699, 0.03674798, 0.03732882]]), "qq", 3.8, 1000)
|
||||
pe.gm(obs[0], S=0)
|
||||
assert obs[0].S["qq"] == 0
|
||||
pe.gm(obs, S=1.3)
|
||||
assert np.all(np.vectorize(lambda x: x.S["qq"])(obs) == 1.3)
|
||||
aa = np.array([obs, obs, obs])
|
||||
pe.gamma_method(aa, S=2.2)
|
||||
assert np.all(np.vectorize(lambda x: x.S["qq"])(aa) == 2.20)
|
||||
cc = pe.Corr(obs)
|
||||
pe.gm(cc, S=4.12)
|
||||
assert np.all(np.vectorize(lambda x: x.S["qq"])(cc.content) == 4.12)
|
||||
|
||||
|
||||
def test_complex_addition():
|
||||
o = pe.pseudo_Obs(34.12, 1e-4, "testens")
|
||||
r = o + 2j
|
||||
assert r.real == o
|
||||
r = r * 1j
|
||||
assert r.imag == o
|
||||
|
||||
|
||||
def test_missing_replica():
|
||||
N1 = 3000
|
||||
N2 = 2000
|
||||
O1 = np.random.normal(1.0, .1, N1 + N2)
|
||||
O2 = .5 * O1[:N1]
|
||||
|
||||
w1 = N1 / (N1 + N2)
|
||||
w2 = N2 / (N1 + N2)
|
||||
m12 = np.mean(O1[N1:])
|
||||
m2 = np.mean(O2)
|
||||
d12 = np.std(O1[N1:]) / np.sqrt(N2) # error of <O1> from second rep
|
||||
d2 = np.std(O2) / np.sqrt(N1) # error of <O2> from first rep
|
||||
dval = np.sqrt((w2 * d12 / m2)**2 + (w2 * m12 * d2 / m2**2)**2) # complete error of <O1>/<O2>
|
||||
|
||||
# pyerrors version that should give the same result
|
||||
O1dobs = pe.Obs([O1[:N1], O1[N1:]], names=['E|1', 'E|2'])
|
||||
O2dobs = pe.Obs([O2], names=['E|1'])
|
||||
O1O2 = O1dobs / O2dobs
|
||||
O1O2.gm(S=0)
|
||||
|
||||
# explicit construction with different ensembles
|
||||
O1a = pe.Obs([O1[:N1]], names=['E|1'])
|
||||
O1b = pe.Obs([O1[N1:]], names=['F|2'])
|
||||
O1O2b = (w1 * O1a + w2 * O1b) / O2dobs
|
||||
O1O2b.gm(S=0)
|
||||
|
||||
# pyerrors version without replica (missing configs)
|
||||
O1c = pe.Obs([O1], names=['E|1'])
|
||||
O1O2c = O1c / O2dobs
|
||||
O1O2c.gm(S=0)
|
||||
|
||||
for o in [O1O2, O1O2b, O1O2c]:
|
||||
assert(np.isclose(dval, o.dvalue, atol=0, rtol=5e-2))
|
||||
|
||||
o = O1O2 * O2dobs - O1dobs
|
||||
o.gm()
|
||||
assert(o.is_zero())
|
||||
|
||||
o = O1dobs / O1O2 - O2dobs
|
||||
o.gm()
|
||||
assert(o.is_zero())
|
||||
|
||||
# bring more randomness and complexity into the game
|
||||
Nl = [int(np.random.uniform(low=500, high=5000)) for i in range(4)]
|
||||
wl = np.array(Nl) / sum(Nl)
|
||||
O1 = np.random.normal(1.0, .1, sum(Nl))
|
||||
|
||||
# pyerrors replica version
|
||||
datl = [O1[:Nl[0]], O1[Nl[0]:sum(Nl[:2])], O1[sum(Nl[:2]):sum(Nl[:3])], O1[sum(Nl[:3]):sum(Nl[:4])]]
|
||||
O1dobs = pe.Obs(datl, names=['E|%d' % (d) for d in range(len(Nl))])
|
||||
O2dobs = .5 * pe.Obs([datl[0]], names=['E|0'])
|
||||
O3dobs = 2. / pe.Obs([datl[1]], names=['E|1'])
|
||||
O1O2 = O1dobs / O2dobs
|
||||
O1O2.gm(S=0)
|
||||
O1O2O3 = O1O2 * np.sinh(O3dobs)
|
||||
O1O2O3.gm(S=0)
|
||||
|
||||
# explicit construction with different ensembles
|
||||
charl = ['E', 'F', 'G', 'H']
|
||||
Ol = [pe.Obs([datl[i]], names=['%s|%d' % (charl[i], i)]) for i in range(len(Nl))]
|
||||
O1O2b = sum(np.array(Ol) * wl) / O2dobs
|
||||
O1O2b.gm(S=0)
|
||||
i = 1
|
||||
O3dobsb = 2. / pe.Obs([datl[i]], names=['%s|%d' % (charl[i], i)])
|
||||
O1O2O3b = O1O2b * np.sinh(O3dobsb)
|
||||
O1O2O3b.gm(S=0)
|
||||
|
||||
for op in [[O1O2, O1O2b], [O1O2O3, O1O2O3b]]:
|
||||
assert np.isclose(op[0].value, op[1].value)
|
||||
assert np.isclose(op[0].dvalue, op[1].dvalue, atol=0, rtol=5e-2)
|
||||
|
||||
# perform the same test using the array_mode of derived_observable
|
||||
O1O2 = pyerrors.linalg.matmul(np.diag(np.diag(np.reshape(4 * [O1dobs], (2, 2)))), np.diag(np.diag(np.reshape(4 * [1. / O2dobs], (2, 2)))))
|
||||
O1O2O3 = pyerrors.linalg.matmul(O1O2, np.diag(np.diag(np.sinh(np.reshape(4 * [O3dobs], (2, 2))))))
|
||||
O1O2 = O1O2[0][0]
|
||||
O1O2.gm(S=0)
|
||||
O1O2O3 = O1O2O3[0][0]
|
||||
O1O2O3.gm(S=0)
|
||||
|
||||
O1O2b = pyerrors.linalg.matmul(np.diag(np.diag(np.reshape(4 * [sum(np.array(Ol) * wl)], (2, 2)))), np.diag(np.diag(np.reshape(4 * [1. / O2dobs], (2, 2)))))
|
||||
O1O2O3b = pyerrors.linalg.matmul(O1O2b, np.diag(np.diag(np.sinh(np.reshape(4 * [O3dobsb], (2, 2))))))
|
||||
O1O2b = O1O2b[0][0]
|
||||
O1O2b.gm(S=0)
|
||||
O1O2O3b = O1O2O3b[0][0]
|
||||
O1O2O3b.gm(S=0)
|
||||
|
||||
for op in [[O1O2, O1O2b], [O1O2O3, O1O2O3b]]:
|
||||
assert np.isclose(op[1].value, op[0].value)
|
||||
assert np.isclose(op[1].dvalue, op[0].dvalue, atol=0, rtol=5e-2)
|
||||
|
|
|
@ -46,9 +46,10 @@ def test_nan_df_export_import(tmp_path):
|
|||
my_df.loc[1, "int"] = np.nan
|
||||
|
||||
for gz in [True, False]:
|
||||
with pytest.warns(UserWarning, match="nan value in column int will be replaced by None"):
|
||||
pe.input.pandas.dump_df(my_df, (tmp_path / 'df_output').as_posix(), gz=gz)
|
||||
reconstructed_df = pe.input.pandas.load_df((tmp_path / 'df_output').as_posix(), auto_gamma=True, gz=gz)
|
||||
with pytest.warns(UserWarning, match="nan value in column int will be replaced by None"):
|
||||
warnings.warn("nan value in column int will be replaced by None", UserWarning)
|
||||
assert reconstructed_df.loc[1, "int"] is None
|
||||
assert np.all(reconstructed_df.loc[:, "float"] == my_df.loc[:, "float"])
|
||||
assert np.all(reconstructed_df.loc[:, "Obs1"] == my_df.loc[:, "Obs1"])
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
import os
|
||||
import sys
|
||||
import inspect
|
||||
import pyerrors as pe
|
||||
import pyerrors.input.sfcf as sfin
|
||||
import shutil
|
||||
import pytest
|
||||
|
@ -13,348 +14,107 @@ sys.path.insert(0, parent_dir)
|
|||
def build_test_environment(path, env_type, cfgs, reps):
|
||||
shutil.copytree("tests/data/sfcf_test/data_"+env_type, (path + "/data_" + env_type))
|
||||
if env_type == "o":
|
||||
for i in range(2, cfgs+1):
|
||||
for i in range(2,cfgs+1):
|
||||
shutil.copytree(path + "/data_o/test_r0/cfg1", path + "/data_o/test_r0/cfg"+str(i))
|
||||
for i in range(1, reps):
|
||||
for i in range(1,reps):
|
||||
shutil.copytree(path + "/data_o/test_r0", path + "/data_o/test_r"+str(i))
|
||||
elif env_type == "c":
|
||||
for i in range(2, cfgs+1):
|
||||
for i in range(2,cfgs+1):
|
||||
shutil.copy(path + "/data_c/data_c_r0/data_c_r0_n1", path + "/data_c/data_c_r0/data_c_r0_n"+str(i))
|
||||
for i in range(1, reps):
|
||||
for i in range(1,reps):
|
||||
os.mkdir(path + "/data_c/data_c_r"+str(i))
|
||||
for j in range(1, cfgs+1):
|
||||
shutil.copy(path + "/data_c/data_c_r0/data_c_r0_n1", path + "/data_c/data_c_r"+str(i)+"/data_c_r"+str(i)+"_n"+str(j))
|
||||
for j in range(1,cfgs+1):
|
||||
shutil.copy(path + "/data_c/data_c_r0/data_c_r0_n1",path + "/data_c/data_c_r"+str(i)+"/data_c_r"+str(i)+"_n"+str(j))
|
||||
elif env_type == "a":
|
||||
for i in range(1, reps):
|
||||
for i in range(1,reps):
|
||||
for corr in ["f_1", "f_A", "F_V0"]:
|
||||
shutil.copy(path + "/data_a/data_a_r0." + corr, path + "/data_a/data_a_r" + str(i) + "." + corr)
|
||||
|
||||
|
||||
def test_o_bb(tmp_path):
|
||||
build_test_environment(str(tmp_path), "o", 5, 3)
|
||||
f_1 = sfin.read_sfcf(str(tmp_path) + "/data_o", "test", "f_1", quarks="lquark lquark", wf=0, wf2=0, version="2.0", corr_type="bb")
|
||||
build_test_environment(str(tmp_path), "o",5,3)
|
||||
f_1 = sfin.read_sfcf(str(tmp_path) + "/data_o", "test", "f_1",quarks="lquark lquark", wf = 0, wf2=0, version = "2.0", corr_type="bb")
|
||||
print(f_1)
|
||||
assert len(f_1) == 1
|
||||
assert list(f_1[0].shape.keys()) == ["test_|r0", "test_|r1", "test_|r2"]
|
||||
assert list(f_1[0].shape.keys()) == ["test_|r0","test_|r1","test_|r2"]
|
||||
assert f_1[0].value == 351.1941525454502
|
||||
|
||||
|
||||
def test_o_bi(tmp_path):
|
||||
build_test_environment(str(tmp_path), "o", 5, 3)
|
||||
f_A = sfin.read_sfcf(str(tmp_path) + "/data_o", "test", "f_A", quarks="lquark lquark", wf=0, version="2.0")
|
||||
build_test_environment(str(tmp_path), "o",5,3)
|
||||
f_A = sfin.read_sfcf(str(tmp_path) + "/data_o", "test", "f_A",quarks="lquark lquark", wf = 0, version = "2.0")
|
||||
print(f_A)
|
||||
assert len(f_A) == 3
|
||||
assert list(f_A[0].shape.keys()) == ["test_|r0", "test_|r1", "test_|r2"]
|
||||
assert list(f_A[0].shape.keys()) == ["test_|r0","test_|r1","test_|r2"]
|
||||
assert f_A[0].value == 65.4711887279723
|
||||
assert f_A[1].value == 1.0447210336915187
|
||||
assert f_A[2].value == -41.025094911185185
|
||||
|
||||
|
||||
def test_o_bi_files(tmp_path):
|
||||
build_test_environment(str(tmp_path), "o", 10, 3)
|
||||
f_A = sfin.read_sfcf(str(tmp_path) + "/data_o", "test", "f_A", quarks="lquark lquark", wf=0, version="2.0",
|
||||
files=[["cfg" + str(i) for i in range(1, 11, 2)], ["cfg" + str(i) for i in range(2, 11, 2)], ["cfg" + str(i) for i in range(1, 11, 2)]])
|
||||
print(f_A)
|
||||
assert len(f_A) == 3
|
||||
assert list(f_A[0].shape.keys()) == ["test_|r0", "test_|r1", "test_|r2"]
|
||||
assert f_A[0].value == 65.4711887279723
|
||||
assert f_A[1].value == 1.0447210336915187
|
||||
assert f_A[2].value == -41.025094911185185
|
||||
|
||||
|
||||
def test_o_bib(tmp_path):
|
||||
build_test_environment(str(tmp_path), "o", 5, 3)
|
||||
f_V0 = sfin.read_sfcf(str(tmp_path) + "/data_o", "test", "F_V0", quarks="lquark lquark", wf=0, wf2=0, version="2.0", corr_type="bib")
|
||||
build_test_environment(str(tmp_path), "o",5,3)
|
||||
f_V0 = sfin.read_sfcf(str(tmp_path) + "/data_o", "test", "F_V0",quarks="lquark lquark", wf = 0, wf2 = 0, version = "2.0", corr_type="bib")
|
||||
print(f_V0)
|
||||
assert len(f_V0) == 3
|
||||
assert list(f_V0[0].shape.keys()) == ["test_|r0", "test_|r1", "test_|r2"]
|
||||
assert list(f_V0[0].shape.keys()) == ["test_|r0","test_|r1","test_|r2"]
|
||||
assert f_V0[0] == 683.6776090085115
|
||||
assert f_V0[1] == 661.3188585582334
|
||||
assert f_V0[2] == 683.6776090081005
|
||||
|
||||
|
||||
def test_simple_multi_o(tmp_path):
|
||||
build_test_environment(str(tmp_path), "o", 5, 3)
|
||||
corrs = sfin.read_sfcf_multi(str(tmp_path) + "/data_o", "test", ["F_V0"], quarks_list=["lquark lquark"], wf1_list=[0], wf2_list=[0], version="2.0", corr_type_list=["bib"])
|
||||
f_V0 = corrs["F_V0"]['lquark lquark']['0']['0']['0']
|
||||
assert len(f_V0) == 3
|
||||
assert list(f_V0[0].shape.keys()) == ["test_|r0", "test_|r1", "test_|r2"]
|
||||
assert f_V0[0] == 683.6776090085115
|
||||
assert f_V0[1] == 661.3188585582334
|
||||
assert f_V0[2] == 683.6776090081005
|
||||
|
||||
|
||||
def test_dict_multi_o(tmp_path):
|
||||
build_test_environment(str(tmp_path), "o", 5, 3)
|
||||
corrs = sfin.read_sfcf_multi(str(tmp_path) + "/data_o", "test",
|
||||
["F_V0", "f_A", "f_1"], quarks_list=["lquark lquark"],
|
||||
wf_list=[0], wf2_list=[0], version="2.0",
|
||||
corr_type_list=["bib", "bi", "bb"], nice_output=False)
|
||||
print(corrs)
|
||||
f_1 = corrs["f_1"]['lquark lquark']['0']['0']['0']
|
||||
f_A = corrs["f_A"]['lquark lquark']['0']['0']['0']
|
||||
f_V0 = corrs["F_V0"]['lquark lquark']['0']['0']['0']
|
||||
|
||||
assert len(f_1) == 1
|
||||
assert list(f_1[0].shape.keys()) == ["test_|r0", "test_|r1", "test_|r2"]
|
||||
assert f_1[0].value == 351.1941525454502
|
||||
|
||||
assert len(f_A) == 3
|
||||
assert list(f_A[0].shape.keys()) == ["test_|r0", "test_|r1", "test_|r2"]
|
||||
assert f_A[0].value == 65.4711887279723
|
||||
assert f_A[1].value == 1.0447210336915187
|
||||
assert f_A[2].value == -41.025094911185185
|
||||
|
||||
assert len(f_V0) == 3
|
||||
assert list(f_V0[0].shape.keys()) == ["test_|r0", "test_|r1", "test_|r2"]
|
||||
assert f_V0[0] == 683.6776090085115
|
||||
assert f_V0[1] == 661.3188585582334
|
||||
assert f_V0[2] == 683.6776090081005
|
||||
|
||||
|
||||
def test_c_bb(tmp_path):
|
||||
build_test_environment(str(tmp_path), "c", 5, 3)
|
||||
f_1 = sfin.read_sfcf(str(tmp_path) + "/data_c", "data_c", "f_1", quarks="lquark lquark", wf=0, wf2=0, version="2.0c", corr_type="bb")
|
||||
build_test_environment(str(tmp_path), "c",5,3)
|
||||
f_1 = sfin.read_sfcf(str(tmp_path) + "/data_c", "data_c", "f_1", quarks="lquark lquark", wf = 0, wf2=0, version = "2.0c", corr_type="bb")
|
||||
print(f_1)
|
||||
assert len(f_1) == 1
|
||||
assert list(f_1[0].shape.keys()) == ["data_c_|r0", "data_c_|r1", "data_c_|r2"]
|
||||
assert list(f_1[0].shape.keys()) == ["data_c_|r0","data_c_|r1","data_c_|r2"]
|
||||
assert f_1[0].value == 351.1941525454502
|
||||
|
||||
|
||||
def test_c_bi(tmp_path):
|
||||
build_test_environment(str(tmp_path), "c", 5, 3)
|
||||
f_A = sfin.read_sfcf(str(tmp_path) + "/data_c", "data_c", "f_A", quarks="lquark lquark", wf=0, version="2.0c")
|
||||
build_test_environment(str(tmp_path), "c",5,3)
|
||||
f_A = sfin.read_sfcf(str(tmp_path) + "/data_c", "data_c", "f_A", quarks="lquark lquark", wf = 0, version = "2.0c")
|
||||
print(f_A)
|
||||
assert len(f_A) == 3
|
||||
assert list(f_A[0].shape.keys()) == ["data_c_|r0", "data_c_|r1", "data_c_|r2"]
|
||||
assert list(f_A[0].shape.keys()) == ["data_c_|r0","data_c_|r1","data_c_|r2"]
|
||||
assert f_A[0].value == 65.4711887279723
|
||||
assert f_A[1].value == 1.0447210336915187
|
||||
assert f_A[2].value == -41.025094911185185
|
||||
|
||||
|
||||
def test_c_bi_files(tmp_path):
|
||||
build_test_environment(str(tmp_path), "c", 10, 3)
|
||||
f_A = sfin.read_sfcf(str(tmp_path) + "/data_c", "data_c", "f_A", quarks="lquark lquark", wf=0, version="2.0c",
|
||||
files=[["data_c_r0_n" + str(i) for i in range(1, 11, 2)], ["data_c_r1_n" + str(i) for i in range(2, 11, 2)], ["data_c_r2_n" + str(i) for i in range(1, 11, 2)]])
|
||||
print(f_A)
|
||||
assert len(f_A) == 3
|
||||
assert list(f_A[0].shape.keys()) == ["data_c_|r0", "data_c_|r1", "data_c_|r2"]
|
||||
assert f_A[0].value == 65.4711887279723
|
||||
assert f_A[1].value == 1.0447210336915187
|
||||
assert f_A[2].value == -41.025094911185185
|
||||
|
||||
|
||||
def test_c_bi_files_int_fail(tmp_path):
|
||||
build_test_environment(str(tmp_path), "c", 10, 3)
|
||||
with pytest.raises(TypeError):
|
||||
sfin.read_sfcf(str(tmp_path) + "/data_c", "data_c", "f_A", quarks="lquark lquark", wf=0, version="2.0c",
|
||||
files=[[range(1, 11, 2)], [range(2, 11, 2)], [range(1, 11, 2)]])
|
||||
|
||||
|
||||
def test_c_bib(tmp_path):
|
||||
build_test_environment(str(tmp_path), "c", 5, 3)
|
||||
f_V0 = sfin.read_sfcf(str(tmp_path) + "/data_c", "data_c", "F_V0", quarks="lquark lquark", wf=0, wf2=0, version="2.0c", corr_type="bib")
|
||||
build_test_environment(str(tmp_path), "c",5,3)
|
||||
f_V0 = sfin.read_sfcf(str(tmp_path) + "/data_c", "data_c", "F_V0",quarks="lquark lquark", wf = 0, wf2 = 0, version = "2.0c", corr_type="bib")
|
||||
print(f_V0)
|
||||
assert len(f_V0) == 3
|
||||
assert list(f_V0[0].shape.keys()) == ["data_c_|r0", "data_c_|r1", "data_c_|r2"]
|
||||
assert list(f_V0[0].shape.keys()) == ["data_c_|r0","data_c_|r1","data_c_|r2"]
|
||||
assert f_V0[0] == 683.6776090085115
|
||||
assert f_V0[1] == 661.3188585582334
|
||||
assert f_V0[2] == 683.6776090081005
|
||||
|
||||
|
||||
def test_simple_multi_c(tmp_path):
|
||||
build_test_environment(str(tmp_path), "c", 5, 3)
|
||||
corrs = sfin.read_sfcf_multi(str(tmp_path) + "/data_c", "data_c", ["F_V0"], quarks_list=["lquark lquark"], wf1_list=[0], wf2_list=[0], version="2.0c", corr_type_list=["bib"])
|
||||
f_V0 = corrs["F_V0"]['lquark lquark']['0']['0']['0']
|
||||
assert len(f_V0) == 3
|
||||
assert list(f_V0[0].shape.keys()) == ["data_c_|r0", "data_c_|r1", "data_c_|r2"]
|
||||
assert f_V0[0] == 683.6776090085115
|
||||
assert f_V0[1] == 661.3188585582334
|
||||
assert f_V0[2] == 683.6776090081005
|
||||
|
||||
|
||||
def test_dict_multi_c(tmp_path):
|
||||
build_test_environment(str(tmp_path), "c", 5, 3)
|
||||
corrs = sfin.read_sfcf_multi(str(tmp_path) + "/data_c", "data_c",
|
||||
["F_V0", "f_A", "f_1"], quarks_list=["lquark lquark"],
|
||||
wf_list=[0], wf2_list=[0], version="2.0c",
|
||||
corr_type_list=["bib", "bi", "bb"], nice_output=False)
|
||||
print(corrs)
|
||||
f_1 = corrs["f_1"]['lquark lquark']['0']['0']['0']
|
||||
f_A = corrs["f_A"]['lquark lquark']['0']['0']['0']
|
||||
f_V0 = corrs["F_V0"]['lquark lquark']['0']['0']['0']
|
||||
|
||||
assert len(f_1) == 1
|
||||
assert list(f_1[0].shape.keys()) == ["data_c_|r0", "data_c_|r1", "data_c_|r2"]
|
||||
assert f_1[0].value == 351.1941525454502
|
||||
|
||||
assert len(f_A) == 3
|
||||
assert list(f_A[0].shape.keys()) == ["data_c_|r0", "data_c_|r1", "data_c_|r2"]
|
||||
assert f_A[0].value == 65.4711887279723
|
||||
assert f_A[1].value == 1.0447210336915187
|
||||
assert f_A[2].value == -41.025094911185185
|
||||
|
||||
assert len(f_V0) == 3
|
||||
assert list(f_V0[0].shape.keys()) == ["data_c_|r0", "data_c_|r1", "data_c_|r2"]
|
||||
assert f_V0[0] == 683.6776090085115
|
||||
assert f_V0[1] == 661.3188585582334
|
||||
assert f_V0[2] == 683.6776090081005
|
||||
|
||||
|
||||
def test_dict_multi_wf_c(tmp_path):
|
||||
build_test_environment(str(tmp_path), "c", 5, 3)
|
||||
corrs = sfin.read_sfcf_multi(str(tmp_path) + "/data_c", "data_c",
|
||||
["F_V0", "f_A", "f_1"], quarks_list=["lquark lquark"],
|
||||
wf_list=[0, 1], wf2_list=[0, 1], version="2.0c",
|
||||
corr_type_list=["bib", "bi", "bb"], nice_output=False)
|
||||
rep_names = ["data_c_|r0", "data_c_|r1", "data_c_|r2"]
|
||||
f_1_00 = corrs["f_1"]['lquark lquark']['0']['0']['0']
|
||||
f_1_01 = corrs["f_1"]['lquark lquark']['0']['0']['1']
|
||||
f_1_10 = corrs["f_1"]['lquark lquark']['0']['1']['0']
|
||||
f_1_11 = corrs["f_1"]['lquark lquark']['0']['1']['1']
|
||||
|
||||
assert len(f_1_00) == 1
|
||||
assert list(f_1_00[0].shape.keys()) == rep_names
|
||||
assert f_1_00[0].value == 351.1941525454502
|
||||
|
||||
assert len(f_1_01) == 1
|
||||
assert list(f_1_01[0].shape.keys()) == rep_names
|
||||
assert f_1_01[0].value == 351.20703575855345
|
||||
|
||||
assert len(f_1_10) == 1
|
||||
assert list(f_1_10[0].shape.keys()) == rep_names
|
||||
assert f_1_10[0].value == 351.20703575855515
|
||||
|
||||
assert len(f_1_11) == 1
|
||||
assert list(f_1_11[0].shape.keys()) == rep_names
|
||||
assert f_1_11[0].value == 351.22001235609065
|
||||
|
||||
f_A = corrs["f_A"]['lquark lquark']['0']['0']['0']
|
||||
|
||||
assert len(f_A) == 3
|
||||
assert list(f_A[0].shape.keys()) == rep_names
|
||||
assert f_A[0].value == 65.4711887279723
|
||||
assert f_A[1].value == 1.0447210336915187
|
||||
assert f_A[2].value == -41.025094911185185
|
||||
|
||||
f_V0_00 = corrs["F_V0"]['lquark lquark']['0']['0']['0']
|
||||
f_V0_01 = corrs["F_V0"]['lquark lquark']['0']['0']['1']
|
||||
f_V0_10 = corrs["F_V0"]['lquark lquark']['0']['1']['0']
|
||||
f_V0_11 = corrs["F_V0"]['lquark lquark']['0']['1']['1']
|
||||
|
||||
assert len(f_V0_00) == 3
|
||||
assert list(f_V0_00[0].shape.keys()) == rep_names
|
||||
assert f_V0_00[0].value == 683.6776090085115
|
||||
assert f_V0_00[1].value == 661.3188585582334
|
||||
assert f_V0_00[2].value == 683.6776090081005
|
||||
|
||||
assert len(f_V0_01) == 3
|
||||
assert list(f_V0_01[0].shape.keys()) == rep_names
|
||||
assert f_V0_01[0].value == 683.7028316879306
|
||||
assert f_V0_01[1].value == 661.3432563640756
|
||||
assert f_V0_01[2].value == 683.7028316875197
|
||||
|
||||
assert len(f_V0_10) == 3
|
||||
assert list(f_V0_10[0].shape.keys()) == rep_names
|
||||
assert f_V0_10[0].value == 683.7028316879289
|
||||
assert f_V0_10[1].value == 661.343256364074
|
||||
assert f_V0_10[2].value == 683.702831687518
|
||||
|
||||
assert len(f_V0_11) == 3
|
||||
assert list(f_V0_11[0].shape.keys()) == rep_names
|
||||
assert f_V0_11[0].value == 683.7280552978792
|
||||
assert f_V0_11[1].value == 661.3676550700158
|
||||
assert f_V0_11[2].value == 683.7280552974681
|
||||
|
||||
|
||||
def test_a_bb(tmp_path):
|
||||
build_test_environment(str(tmp_path), "a", 5, 3)
|
||||
f_1 = sfin.read_sfcf(str(tmp_path) + "/data_a", "data_a", "f_1", quarks="lquark lquark", wf=0, wf2=0, version="2.0a", corr_type="bb")
|
||||
build_test_environment(str(tmp_path), "a",5,3)
|
||||
f_1 = sfin.read_sfcf(str(tmp_path) + "/data_a", "data_a", "f_1", quarks="lquark lquark", wf = 0, wf2=0, version = "2.0a", corr_type="bb")
|
||||
print(f_1)
|
||||
assert len(f_1) == 1
|
||||
assert list(f_1[0].shape.keys()) == ["data_a_|r0", "data_a_|r1", "data_a_|r2"]
|
||||
assert list(f_1[0].shape.keys()) == ["data_a_|r0","data_a_|r1","data_a_|r2"]
|
||||
assert f_1[0].value == 351.1941525454502
|
||||
|
||||
|
||||
def test_a_bi(tmp_path):
|
||||
build_test_environment(str(tmp_path), "a", 5, 3)
|
||||
f_A = sfin.read_sfcf(str(tmp_path) + "/data_a", "data_a", "f_A", quarks="lquark lquark", wf=0, version="2.0a")
|
||||
build_test_environment(str(tmp_path), "a",5,3)
|
||||
f_A = sfin.read_sfcf(str(tmp_path) + "/data_a", "data_a", "f_A", quarks="lquark lquark", wf = 0, version = "2.0a")
|
||||
print(f_A)
|
||||
assert len(f_A) == 3
|
||||
assert list(f_A[0].shape.keys()) == ["data_a_|r0", "data_a_|r1", "data_a_|r2"]
|
||||
assert list(f_A[0].shape.keys()) == ["data_a_|r0","data_a_|r1","data_a_|r2"]
|
||||
assert f_A[0].value == 65.4711887279723
|
||||
assert f_A[1].value == 1.0447210336915187
|
||||
assert f_A[2].value == -41.025094911185185
|
||||
|
||||
|
||||
def test_a_bi_files(tmp_path):
|
||||
build_test_environment(str(tmp_path), "a", 5, 3)
|
||||
f_A = sfin.read_sfcf(str(tmp_path) + "/data_a", "data_a", "f_A", quarks="lquark lquark", wf=0, version="2.0a", files=["data_a_r0.f_A", "data_a_r1.f_A", "data_a_r2.f_A"])
|
||||
print(f_A)
|
||||
assert len(f_A) == 3
|
||||
assert list(f_A[0].shape.keys()) == ["data_a_|r0", "data_a_|r1", "data_a_|r2"]
|
||||
assert f_A[0].value == 65.4711887279723
|
||||
assert f_A[1].value == 1.0447210336915187
|
||||
assert f_A[2].value == -41.025094911185185
|
||||
|
||||
|
||||
def test_a_bi_files_int_fail(tmp_path):
|
||||
build_test_environment(str(tmp_path), "a", 10, 3)
|
||||
with pytest.raises(TypeError):
|
||||
sfin.read_sfcf(str(tmp_path) + "/data_a", "data_a", "f_A", quarks="lquark lquark", wf=0, version="2.0a",
|
||||
files=[[range(1, 11, 2)], [range(2, 11, 2)], [range(1, 11, 2)]])
|
||||
|
||||
|
||||
def test_a_bib(tmp_path):
|
||||
build_test_environment(str(tmp_path), "a", 5, 3)
|
||||
f_V0 = sfin.read_sfcf(str(tmp_path) + "/data_a", "data_a", "F_V0", quarks="lquark lquark", wf=0, wf2=0, version="2.0a", corr_type="bib")
|
||||
build_test_environment(str(tmp_path), "a",5,3)
|
||||
f_V0 = sfin.read_sfcf(str(tmp_path) + "/data_a", "data_a", "F_V0",quarks="lquark lquark", wf = 0, wf2 = 0, version = "2.0a", corr_type="bib")
|
||||
print(f_V0)
|
||||
assert len(f_V0) == 3
|
||||
assert list(f_V0[0].shape.keys()) == ["data_a_|r0", "data_a_|r1", "data_a_|r2"]
|
||||
assert list(f_V0[0].shape.keys()) == ["data_a_|r0","data_a_|r1","data_a_|r2"]
|
||||
assert f_V0[0] == 683.6776090085115
|
||||
assert f_V0[1] == 661.3188585582334
|
||||
assert f_V0[2] == 683.6776090081005
|
||||
|
||||
|
||||
def test_simple_multi_a(tmp_path):
|
||||
build_test_environment(str(tmp_path), "a", 5, 3)
|
||||
corrs = sfin.read_sfcf_multi(str(tmp_path) + "/data_a", "data_a", ["F_V0"], quarks_list=["lquark lquark"], wf1_list=[0], wf2_list=[0], version="2.0a", corr_type_list=["bib"])
|
||||
f_V0 = corrs["F_V0"]['lquark lquark']['0']['0']['0']
|
||||
assert len(f_V0) == 3
|
||||
assert list(f_V0[0].shape.keys()) == ["data_a_|r0", "data_a_|r1", "data_a_|r2"]
|
||||
assert f_V0[0] == 683.6776090085115
|
||||
assert f_V0[1] == 661.3188585582334
|
||||
assert f_V0[2] == 683.6776090081005
|
||||
|
||||
|
||||
def test_dict_multi_a(tmp_path):
|
||||
build_test_environment(str(tmp_path), "a", 5, 3)
|
||||
corrs = sfin.read_sfcf_multi(str(tmp_path) + "/data_a", "data_a",
|
||||
["F_V0", "f_A", "f_1"], quarks_list=["lquark lquark"],
|
||||
wf_list=[0], wf2_list=[0], version="2.0a",
|
||||
corr_type_list=["bib", "bi", "bb"], nice_output=False)
|
||||
print(corrs)
|
||||
f_1 = corrs["f_1"]['lquark lquark']['0']['0']['0']
|
||||
f_A = corrs["f_A"]['lquark lquark']['0']['0']['0']
|
||||
f_V0 = corrs["F_V0"]['lquark lquark']['0']['0']['0']
|
||||
|
||||
assert len(f_1) == 1
|
||||
assert list(f_1[0].shape.keys()) == ["data_a_|r0", "data_a_|r1", "data_a_|r2"]
|
||||
assert f_1[0].value == 351.1941525454502
|
||||
|
||||
assert len(f_A) == 3
|
||||
assert list(f_A[0].shape.keys()) == ["data_a_|r0", "data_a_|r1", "data_a_|r2"]
|
||||
assert f_A[0].value == 65.4711887279723
|
||||
assert f_A[1].value == 1.0447210336915187
|
||||
assert f_A[2].value == -41.025094911185185
|
||||
|
||||
assert len(f_V0) == 3
|
||||
assert list(f_V0[0].shape.keys()) == ["data_a_|r0", "data_a_|r1", "data_a_|r2"]
|
||||
assert f_V0[0] == 683.6776090085115
|
||||
assert f_V0[1] == 661.3188585582334
|
||||
assert f_V0[2] == 683.6776090081005
|
||||
|
||||
|
||||
def test_find_corr():
|
||||
pattern = 'name ' + "f_A" + '\nquarks ' + "lquark lquark" + '\noffset ' + str(0) + '\nwf ' + str(0)
|
||||
start_read, T = sfin._find_correlator("tests/data/sfcf_test/data_c/data_c_r0/data_c_r0_n1", "2.0c", pattern, False)
|
||||
|
@ -369,8 +129,7 @@ def test_find_corr():
|
|||
with pytest.raises(ValueError):
|
||||
sfin._find_correlator("tests/data/sfcf_test/broken_data_c/data_c_r0/data_c_r0_n1", "2.0c", pattern, False)
|
||||
|
||||
|
||||
def test_read_compact_file():
|
||||
def test_read_compact_file(tmp_path):
|
||||
rep_path = "tests/data/sfcf_test/broken_data_c/data_c_r0/"
|
||||
config_file = "data_c_r0_n1"
|
||||
start_read = 469
|
||||
|
@ -380,40 +139,3 @@ def test_read_compact_file():
|
|||
im = False
|
||||
with pytest.raises(Exception):
|
||||
sfin._read_compact_file(rep_path, config_file, start_read, T, b2b, name, im)
|
||||
|
||||
|
||||
def test_find_correlator():
|
||||
file = "tests/data/sfcf_test/data_c/data_c_r0/data_c_r0_n1"
|
||||
found_start, found_T = sfin._find_correlator(file, "2.0", "name f_A\nquarks lquark lquark\noffset 0\nwf 0", False, False)
|
||||
assert found_start == 21
|
||||
assert found_T == 3
|
||||
|
||||
|
||||
def test_get_rep_name():
|
||||
names = ['data_r0', 'data_r1', 'data_r2']
|
||||
new_names = sfin._get_rep_names(names)
|
||||
assert len(new_names) == 3
|
||||
assert new_names[0] == 'data_|r0'
|
||||
assert new_names[1] == 'data_|r1'
|
||||
assert new_names[2] == 'data_|r2'
|
||||
names = ['data_q0', 'data_q1', 'data_q2']
|
||||
new_names = sfin._get_rep_names(names, rep_sep='q')
|
||||
assert len(new_names) == 3
|
||||
assert new_names[0] == 'data_|q0'
|
||||
assert new_names[1] == 'data_|q1'
|
||||
assert new_names[2] == 'data_|q2'
|
||||
|
||||
|
||||
def test_get_appended_rep_name():
|
||||
names = ['data_r0.f_1', 'data_r1.f_1', 'data_r2.f_1']
|
||||
new_names = sfin._get_appended_rep_names(names, 'data', 'f_1')
|
||||
assert len(new_names) == 3
|
||||
assert new_names[0] == 'data_|r0'
|
||||
assert new_names[1] == 'data_|r1'
|
||||
assert new_names[2] == 'data_|r2'
|
||||
names = ['data_q0.f_1', 'data_q1.f_1', 'data_q2.f_1']
|
||||
new_names = sfin._get_appended_rep_names(names, 'data', 'f_1', rep_sep='q')
|
||||
assert len(new_names) == 3
|
||||
assert new_names[0] == 'data_|q0'
|
||||
assert new_names[1] == 'data_|q1'
|
||||
assert new_names[2] == 'data_|q2'
|
||||
|
|
|
@ -1,12 +0,0 @@
|
|||
import numpy as np
|
||||
import scipy
|
||||
import pyerrors as pe
|
||||
import pytest
|
||||
|
||||
from autograd import jacobian
|
||||
from numdifftools import Jacobian as num_jacobian
|
||||
|
||||
def test_kn():
|
||||
for n in np.arange(0, 10):
|
||||
for val in np.linspace(0.1, 7.3, 10):
|
||||
assert np.isclose(num_jacobian(lambda x: scipy.special.kn(n, x))(val), jacobian(lambda x: pe.special.kn(n, x))(val), rtol=1e-10, atol=1e-10)
|
Loading…
Add table
Reference in a new issue